repo_name
string
path
string
copies
string
size
string
content
string
license
string
civato/P900-Lollipop
arch/arm/mach-s3c24xx/clock-s3c2443.c
4738
5846
/* linux/arch/arm/mach-s3c2443/clock.c * * Copyright (c) 2007, 2010 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * S3C2443 Clock control support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/device.h> #include <linux/clk.h> #include <linux/mutex.h> #include <linux/serial_core.h> #include <linux/io.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include <mach/regs-s3c2443-clock.h> #include <plat/cpu-freq.h> #include <plat/s3c2443.h> #include <plat/clock.h> #include <plat/clock-clksrc.h> #include <plat/cpu.h> /* We currently have to assume that the system is running * from the XTPll input, and that all ***REFCLKs are being * fed from it, as we cannot read the state of OM[4] from * software. * * It would be possible for each board initialisation to * set the correct muxing at initialisation */ /* clock selections */ /* armdiv * * this clock is sourced from msysclk and can have a number of * divider values applied to it to then be fed into armclk. * The real clock definition is done in s3c2443-clock.c, * only the armdiv divisor table must be defined here. */ static unsigned int armdiv[16] = { [S3C2443_CLKDIV0_ARMDIV_1 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 1, [S3C2443_CLKDIV0_ARMDIV_2 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 2, [S3C2443_CLKDIV0_ARMDIV_3 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 3, [S3C2443_CLKDIV0_ARMDIV_4 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 4, [S3C2443_CLKDIV0_ARMDIV_6 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 6, [S3C2443_CLKDIV0_ARMDIV_8 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 8, [S3C2443_CLKDIV0_ARMDIV_12 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 12, [S3C2443_CLKDIV0_ARMDIV_16 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 16, }; /* hsspi * * high-speed spi clock, sourced from esysclk */ static struct clksrc_clk clk_hsspi = { .clk = { .name = "hsspi-if", .parent = &clk_esysclk.clk, .ctrlbit = S3C2443_SCLKCON_HSSPICLK, .enable = s3c2443_clkcon_enable_s, }, .reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 4 }, }; /* clk_hsmcc_div * * this clock is sourced from epll, and is fed through a divider, * to a mux controlled by sclkcon where either it or a extclk can * be fed to the hsmmc block */ static struct clksrc_clk clk_hsmmc_div = { .clk = { .name = "hsmmc-div", .devname = "s3c-sdhci.1", .parent = &clk_esysclk.clk, }, .reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 6 }, }; static int s3c2443_setparent_hsmmc(struct clk *clk, struct clk *parent) { unsigned long clksrc = __raw_readl(S3C2443_SCLKCON); clksrc &= ~(S3C2443_SCLKCON_HSMMCCLK_EXT | S3C2443_SCLKCON_HSMMCCLK_EPLL); if (parent == &clk_epll) clksrc |= S3C2443_SCLKCON_HSMMCCLK_EPLL; else if (parent == &clk_ext) clksrc |= S3C2443_SCLKCON_HSMMCCLK_EXT; else return -EINVAL; if (clk->usage > 0) { __raw_writel(clksrc, S3C2443_SCLKCON); } clk->parent = parent; return 0; } static int s3c2443_enable_hsmmc(struct clk *clk, int enable) { return s3c2443_setparent_hsmmc(clk, clk->parent); } static struct clk clk_hsmmc = { .name = "hsmmc-if", .devname = "s3c-sdhci.1", .parent = &clk_hsmmc_div.clk, .enable = s3c2443_enable_hsmmc, .ops = &(struct clk_ops) { .set_parent = s3c2443_setparent_hsmmc, }, }; /* standard clock definitions */ static struct clk init_clocks_off[] = { { .name = "sdi", .parent = &clk_p, .enable = s3c2443_clkcon_enable_p, .ctrlbit = S3C2443_PCLKCON_SDI, }, { .name = "spi", .devname = "s3c2410-spi.0", .parent = &clk_p, .enable = s3c2443_clkcon_enable_p, .ctrlbit = S3C2443_PCLKCON_SPI0, }, { .name = "spi", .devname = "s3c2410-spi.1", .parent = &clk_p, .enable = s3c2443_clkcon_enable_p, .ctrlbit = S3C2443_PCLKCON_SPI1, } }; /* clocks to add straight away */ static struct clksrc_clk *clksrcs[] __initdata = { &clk_hsspi, &clk_hsmmc_div, }; static struct clk *clks[] __initdata = { &clk_hsmmc, }; void __init s3c2443_init_clocks(int xtal) { unsigned long epllcon = __raw_readl(S3C2443_EPLLCON); int ptr; clk_epll.rate = s3c2443_get_epll(epllcon, xtal); clk_epll.parent = &clk_epllref.clk; s3c2443_common_init_clocks(xtal, s3c2443_get_mpll, armdiv, ARRAY_SIZE(armdiv), S3C2443_CLKDIV0_ARMDIV_MASK); s3c24xx_register_clocks(clks, ARRAY_SIZE(clks)); for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++) s3c_register_clksrc(clksrcs[ptr], 1); /* We must be careful disabling the clocks we are not intending to * be using at boot time, as subsystems such as the LCD which do * their own DMA requests to the bus can cause the system to lockup * if they where in the middle of requesting bus access. * * Disabling the LCD clock if the LCD is active is very dangerous, * and therefore the bootloader should be careful to not enable * the LCD clock if it is not needed. */ /* install (and disable) the clocks we do not need immediately */ s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_pwmclk_init(); }
gpl-2.0
Gigoo25/android_kernel_samsung_klte
drivers/memstick/core/mspro_block.c
4994
40163
/* * Sony MemoryStick Pro storage support * * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Special thanks to Carlos Corbacho for providing various MemoryStick cards * that made this driver possible. * */ #include <linux/blkdev.h> #include <linux/idr.h> #include <linux/hdreg.h> #include <linux/kthread.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/memstick.h> #include <linux/module.h> #define DRIVER_NAME "mspro_block" static int major; module_param(major, int, 0644); #define MSPRO_BLOCK_MAX_SEGS 32 #define MSPRO_BLOCK_MAX_PAGES ((2 << 16) - 1) #define MSPRO_BLOCK_SIGNATURE 0xa5c3 #define MSPRO_BLOCK_MAX_ATTRIBUTES 41 #define MSPRO_BLOCK_PART_SHIFT 3 enum { MSPRO_BLOCK_ID_SYSINFO = 0x10, MSPRO_BLOCK_ID_MODELNAME = 0x15, MSPRO_BLOCK_ID_MBR = 0x20, MSPRO_BLOCK_ID_PBR16 = 0x21, MSPRO_BLOCK_ID_PBR32 = 0x22, MSPRO_BLOCK_ID_SPECFILEVALUES1 = 0x25, MSPRO_BLOCK_ID_SPECFILEVALUES2 = 0x26, MSPRO_BLOCK_ID_DEVINFO = 0x30 }; struct mspro_sys_attr { size_t size; void *data; unsigned char id; char name[32]; struct device_attribute dev_attr; }; struct mspro_attr_entry { __be32 address; __be32 size; unsigned char id; unsigned char reserved[3]; } __attribute__((packed)); struct mspro_attribute { __be16 signature; unsigned short version; unsigned char count; unsigned char reserved[11]; struct mspro_attr_entry entries[]; } __attribute__((packed)); struct mspro_sys_info { unsigned char class; unsigned char reserved0; __be16 block_size; __be16 block_count; __be16 user_block_count; __be16 page_size; unsigned char reserved1[2]; unsigned char assembly_date[8]; __be32 serial_number; unsigned char assembly_maker_code; unsigned char assembly_model_code[3]; __be16 memory_maker_code; __be16 memory_model_code; unsigned char reserved2[4]; unsigned char vcc; unsigned char vpp; __be16 controller_number; __be16 controller_function; __be16 start_sector; __be16 unit_size; unsigned char ms_sub_class; unsigned char reserved3[4]; unsigned char interface_type; __be16 controller_code; unsigned char format_type; unsigned char reserved4; unsigned char device_type; unsigned char reserved5[7]; unsigned char mspro_id[16]; unsigned char reserved6[16]; } __attribute__((packed)); struct mspro_mbr { unsigned char boot_partition; unsigned char start_head; unsigned char start_sector; unsigned char start_cylinder; unsigned char partition_type; unsigned char end_head; unsigned char end_sector; unsigned char end_cylinder; unsigned int start_sectors; unsigned int sectors_per_partition; } __attribute__((packed)); struct mspro_specfile { char name[8]; char ext[3]; unsigned char attr; unsigned char reserved[10]; unsigned short time; unsigned short date; unsigned short cluster; unsigned int size; } __attribute__((packed)); struct mspro_devinfo { __be16 cylinders; __be16 heads; __be16 bytes_per_track; __be16 bytes_per_sector; __be16 sectors_per_track; unsigned char reserved[6]; } __attribute__((packed)); struct mspro_block_data { struct memstick_dev *card; unsigned int usage_count; unsigned int caps; struct gendisk *disk; struct request_queue *queue; struct request *block_req; spinlock_t q_lock; unsigned short page_size; unsigned short cylinders; unsigned short heads; unsigned short sectors_per_track; unsigned char system; unsigned char read_only:1, eject:1, has_request:1, data_dir:1, active:1; unsigned char transfer_cmd; int (*mrq_handler)(struct memstick_dev *card, struct memstick_request **mrq); /* Default request setup function for data access method preferred by * this host instance. */ void (*setup_transfer)(struct memstick_dev *card, u64 offset, size_t length); struct attribute_group attr_group; struct scatterlist req_sg[MSPRO_BLOCK_MAX_SEGS]; unsigned int seg_count; unsigned int current_seg; unsigned int current_page; }; static DEFINE_IDR(mspro_block_disk_idr); static DEFINE_MUTEX(mspro_block_disk_lock); static int mspro_block_complete_req(struct memstick_dev *card, int error); /*** Block device ***/ static int mspro_block_bd_open(struct block_device *bdev, fmode_t mode) { struct gendisk *disk = bdev->bd_disk; struct mspro_block_data *msb = disk->private_data; int rc = -ENXIO; mutex_lock(&mspro_block_disk_lock); if (msb && msb->card) { msb->usage_count++; if ((mode & FMODE_WRITE) && msb->read_only) rc = -EROFS; else rc = 0; } mutex_unlock(&mspro_block_disk_lock); return rc; } static int mspro_block_disk_release(struct gendisk *disk) { struct mspro_block_data *msb = disk->private_data; int disk_id = MINOR(disk_devt(disk)) >> MSPRO_BLOCK_PART_SHIFT; mutex_lock(&mspro_block_disk_lock); if (msb) { if (msb->usage_count) msb->usage_count--; if (!msb->usage_count) { kfree(msb); disk->private_data = NULL; idr_remove(&mspro_block_disk_idr, disk_id); put_disk(disk); } } mutex_unlock(&mspro_block_disk_lock); return 0; } static int mspro_block_bd_release(struct gendisk *disk, fmode_t mode) { return mspro_block_disk_release(disk); } static int mspro_block_bd_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct mspro_block_data *msb = bdev->bd_disk->private_data; geo->heads = msb->heads; geo->sectors = msb->sectors_per_track; geo->cylinders = msb->cylinders; return 0; } static const struct block_device_operations ms_block_bdops = { .open = mspro_block_bd_open, .release = mspro_block_bd_release, .getgeo = mspro_block_bd_getgeo, .owner = THIS_MODULE }; /*** Information ***/ static struct mspro_sys_attr *mspro_from_sysfs_attr(struct attribute *attr) { struct device_attribute *dev_attr = container_of(attr, struct device_attribute, attr); return container_of(dev_attr, struct mspro_sys_attr, dev_attr); } static const char *mspro_block_attr_name(unsigned char tag) { switch (tag) { case MSPRO_BLOCK_ID_SYSINFO: return "attr_sysinfo"; case MSPRO_BLOCK_ID_MODELNAME: return "attr_modelname"; case MSPRO_BLOCK_ID_MBR: return "attr_mbr"; case MSPRO_BLOCK_ID_PBR16: return "attr_pbr16"; case MSPRO_BLOCK_ID_PBR32: return "attr_pbr32"; case MSPRO_BLOCK_ID_SPECFILEVALUES1: return "attr_specfilevalues1"; case MSPRO_BLOCK_ID_SPECFILEVALUES2: return "attr_specfilevalues2"; case MSPRO_BLOCK_ID_DEVINFO: return "attr_devinfo"; default: return NULL; }; } typedef ssize_t (*sysfs_show_t)(struct device *dev, struct device_attribute *attr, char *buffer); static ssize_t mspro_block_attr_show_default(struct device *dev, struct device_attribute *attr, char *buffer) { struct mspro_sys_attr *s_attr = container_of(attr, struct mspro_sys_attr, dev_attr); ssize_t cnt, rc = 0; for (cnt = 0; cnt < s_attr->size; cnt++) { if (cnt && !(cnt % 16)) { if (PAGE_SIZE - rc) buffer[rc++] = '\n'; } rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "%02x ", ((unsigned char *)s_attr->data)[cnt]); } return rc; } static ssize_t mspro_block_attr_show_sysinfo(struct device *dev, struct device_attribute *attr, char *buffer) { struct mspro_sys_attr *x_attr = container_of(attr, struct mspro_sys_attr, dev_attr); struct mspro_sys_info *x_sys = x_attr->data; ssize_t rc = 0; int date_tz = 0, date_tz_f = 0; if (x_sys->assembly_date[0] > 0x80U) { date_tz = (~x_sys->assembly_date[0]) + 1; date_tz_f = date_tz & 3; date_tz >>= 2; date_tz = -date_tz; date_tz_f *= 15; } else if (x_sys->assembly_date[0] < 0x80U) { date_tz = x_sys->assembly_date[0]; date_tz_f = date_tz & 3; date_tz >>= 2; date_tz_f *= 15; } rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "class: %x\n", x_sys->class); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "block size: %x\n", be16_to_cpu(x_sys->block_size)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "block count: %x\n", be16_to_cpu(x_sys->block_count)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "user block count: %x\n", be16_to_cpu(x_sys->user_block_count)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "page size: %x\n", be16_to_cpu(x_sys->page_size)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "assembly date: " "GMT%+d:%d %04u-%02u-%02u %02u:%02u:%02u\n", date_tz, date_tz_f, be16_to_cpup((__be16 *)&x_sys->assembly_date[1]), x_sys->assembly_date[3], x_sys->assembly_date[4], x_sys->assembly_date[5], x_sys->assembly_date[6], x_sys->assembly_date[7]); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "serial number: %x\n", be32_to_cpu(x_sys->serial_number)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "assembly maker code: %x\n", x_sys->assembly_maker_code); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "assembly model code: " "%02x%02x%02x\n", x_sys->assembly_model_code[0], x_sys->assembly_model_code[1], x_sys->assembly_model_code[2]); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "memory maker code: %x\n", be16_to_cpu(x_sys->memory_maker_code)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "memory model code: %x\n", be16_to_cpu(x_sys->memory_model_code)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "vcc: %x\n", x_sys->vcc); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "vpp: %x\n", x_sys->vpp); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "controller number: %x\n", be16_to_cpu(x_sys->controller_number)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "controller function: %x\n", be16_to_cpu(x_sys->controller_function)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start sector: %x\n", be16_to_cpu(x_sys->start_sector)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "unit size: %x\n", be16_to_cpu(x_sys->unit_size)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "sub class: %x\n", x_sys->ms_sub_class); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "interface type: %x\n", x_sys->interface_type); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "controller code: %x\n", be16_to_cpu(x_sys->controller_code)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "format type: %x\n", x_sys->format_type); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "device type: %x\n", x_sys->device_type); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "mspro id: %s\n", x_sys->mspro_id); return rc; } static ssize_t mspro_block_attr_show_modelname(struct device *dev, struct device_attribute *attr, char *buffer) { struct mspro_sys_attr *s_attr = container_of(attr, struct mspro_sys_attr, dev_attr); return scnprintf(buffer, PAGE_SIZE, "%s", (char *)s_attr->data); } static ssize_t mspro_block_attr_show_mbr(struct device *dev, struct device_attribute *attr, char *buffer) { struct mspro_sys_attr *x_attr = container_of(attr, struct mspro_sys_attr, dev_attr); struct mspro_mbr *x_mbr = x_attr->data; ssize_t rc = 0; rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "boot partition: %x\n", x_mbr->boot_partition); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start head: %x\n", x_mbr->start_head); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start sector: %x\n", x_mbr->start_sector); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start cylinder: %x\n", x_mbr->start_cylinder); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "partition type: %x\n", x_mbr->partition_type); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "end head: %x\n", x_mbr->end_head); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "end sector: %x\n", x_mbr->end_sector); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "end cylinder: %x\n", x_mbr->end_cylinder); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start sectors: %x\n", x_mbr->start_sectors); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "sectors per partition: %x\n", x_mbr->sectors_per_partition); return rc; } static ssize_t mspro_block_attr_show_specfile(struct device *dev, struct device_attribute *attr, char *buffer) { struct mspro_sys_attr *x_attr = container_of(attr, struct mspro_sys_attr, dev_attr); struct mspro_specfile *x_spfile = x_attr->data; char name[9], ext[4]; ssize_t rc = 0; memcpy(name, x_spfile->name, 8); name[8] = 0; memcpy(ext, x_spfile->ext, 3); ext[3] = 0; rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "name: %s\n", name); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "ext: %s\n", ext); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "attribute: %x\n", x_spfile->attr); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "time: %d:%d:%d\n", x_spfile->time >> 11, (x_spfile->time >> 5) & 0x3f, (x_spfile->time & 0x1f) * 2); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "date: %d-%d-%d\n", (x_spfile->date >> 9) + 1980, (x_spfile->date >> 5) & 0xf, x_spfile->date & 0x1f); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start cluster: %x\n", x_spfile->cluster); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "size: %x\n", x_spfile->size); return rc; } static ssize_t mspro_block_attr_show_devinfo(struct device *dev, struct device_attribute *attr, char *buffer) { struct mspro_sys_attr *x_attr = container_of(attr, struct mspro_sys_attr, dev_attr); struct mspro_devinfo *x_devinfo = x_attr->data; ssize_t rc = 0; rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "cylinders: %x\n", be16_to_cpu(x_devinfo->cylinders)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "heads: %x\n", be16_to_cpu(x_devinfo->heads)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "bytes per track: %x\n", be16_to_cpu(x_devinfo->bytes_per_track)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "bytes per sector: %x\n", be16_to_cpu(x_devinfo->bytes_per_sector)); rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "sectors per track: %x\n", be16_to_cpu(x_devinfo->sectors_per_track)); return rc; } static sysfs_show_t mspro_block_attr_show(unsigned char tag) { switch (tag) { case MSPRO_BLOCK_ID_SYSINFO: return mspro_block_attr_show_sysinfo; case MSPRO_BLOCK_ID_MODELNAME: return mspro_block_attr_show_modelname; case MSPRO_BLOCK_ID_MBR: return mspro_block_attr_show_mbr; case MSPRO_BLOCK_ID_SPECFILEVALUES1: case MSPRO_BLOCK_ID_SPECFILEVALUES2: return mspro_block_attr_show_specfile; case MSPRO_BLOCK_ID_DEVINFO: return mspro_block_attr_show_devinfo; default: return mspro_block_attr_show_default; } } /*** Protocol handlers ***/ /* * Functions prefixed with "h_" are protocol callbacks. They can be called from * interrupt context. Return value of 0 means that request processing is still * ongoing, while special error value of -EAGAIN means that current request is * finished (and request processor should come back some time later). */ static int h_mspro_block_req_init(struct memstick_dev *card, struct memstick_request **mrq) { struct mspro_block_data *msb = memstick_get_drvdata(card); *mrq = &card->current_mrq; card->next_request = msb->mrq_handler; return 0; } static int h_mspro_block_default(struct memstick_dev *card, struct memstick_request **mrq) { return mspro_block_complete_req(card, (*mrq)->error); } static int h_mspro_block_default_bad(struct memstick_dev *card, struct memstick_request **mrq) { return -ENXIO; } static int h_mspro_block_get_ro(struct memstick_dev *card, struct memstick_request **mrq) { struct mspro_block_data *msb = memstick_get_drvdata(card); if (!(*mrq)->error) { if ((*mrq)->data[offsetof(struct ms_status_register, status0)] & MEMSTICK_STATUS0_WP) msb->read_only = 1; else msb->read_only = 0; } return mspro_block_complete_req(card, (*mrq)->error); } static int h_mspro_block_wait_for_ced(struct memstick_dev *card, struct memstick_request **mrq) { dev_dbg(&card->dev, "wait for ced: value %x\n", (*mrq)->data[0]); if (!(*mrq)->error) { if ((*mrq)->data[0] & (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR)) (*mrq)->error = -EFAULT; else if (!((*mrq)->data[0] & MEMSTICK_INT_CED)) return 0; } return mspro_block_complete_req(card, (*mrq)->error); } static int h_mspro_block_transfer_data(struct memstick_dev *card, struct memstick_request **mrq) { struct mspro_block_data *msb = memstick_get_drvdata(card); unsigned char t_val = 0; struct scatterlist t_sg = { 0 }; size_t t_offset; if ((*mrq)->error) return mspro_block_complete_req(card, (*mrq)->error); switch ((*mrq)->tpc) { case MS_TPC_WRITE_REG: memstick_init_req(*mrq, MS_TPC_SET_CMD, &msb->transfer_cmd, 1); (*mrq)->need_card_int = 1; return 0; case MS_TPC_SET_CMD: t_val = (*mrq)->int_reg; memstick_init_req(*mrq, MS_TPC_GET_INT, NULL, 1); if (msb->caps & MEMSTICK_CAP_AUTO_GET_INT) goto has_int_reg; return 0; case MS_TPC_GET_INT: t_val = (*mrq)->data[0]; has_int_reg: if (t_val & (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR)) { t_val = MSPRO_CMD_STOP; memstick_init_req(*mrq, MS_TPC_SET_CMD, &t_val, 1); card->next_request = h_mspro_block_default; return 0; } if (msb->current_page == (msb->req_sg[msb->current_seg].length / msb->page_size)) { msb->current_page = 0; msb->current_seg++; if (msb->current_seg == msb->seg_count) { if (t_val & MEMSTICK_INT_CED) { return mspro_block_complete_req(card, 0); } else { card->next_request = h_mspro_block_wait_for_ced; memstick_init_req(*mrq, MS_TPC_GET_INT, NULL, 1); return 0; } } } if (!(t_val & MEMSTICK_INT_BREQ)) { memstick_init_req(*mrq, MS_TPC_GET_INT, NULL, 1); return 0; } t_offset = msb->req_sg[msb->current_seg].offset; t_offset += msb->current_page * msb->page_size; sg_set_page(&t_sg, nth_page(sg_page(&(msb->req_sg[msb->current_seg])), t_offset >> PAGE_SHIFT), msb->page_size, offset_in_page(t_offset)); memstick_init_req_sg(*mrq, msb->data_dir == READ ? MS_TPC_READ_LONG_DATA : MS_TPC_WRITE_LONG_DATA, &t_sg); (*mrq)->need_card_int = 1; return 0; case MS_TPC_READ_LONG_DATA: case MS_TPC_WRITE_LONG_DATA: msb->current_page++; if (msb->caps & MEMSTICK_CAP_AUTO_GET_INT) { t_val = (*mrq)->int_reg; goto has_int_reg; } else { memstick_init_req(*mrq, MS_TPC_GET_INT, NULL, 1); return 0; } default: BUG(); } } /*** Transfer setup functions for different access methods. ***/ /** Setup data transfer request for SET_CMD TPC with arguments in card * registers. * * @card Current media instance * @offset Target data offset in bytes * @length Required transfer length in bytes. */ static void h_mspro_block_setup_cmd(struct memstick_dev *card, u64 offset, size_t length) { struct mspro_block_data *msb = memstick_get_drvdata(card); struct mspro_param_register param = { .system = msb->system, .data_count = cpu_to_be16((uint16_t)(length / msb->page_size)), /* ISO C90 warning precludes direct initialization for now. */ .data_address = 0, .tpc_param = 0 }; do_div(offset, msb->page_size); param.data_address = cpu_to_be32((uint32_t)offset); card->next_request = h_mspro_block_req_init; msb->mrq_handler = h_mspro_block_transfer_data; memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, &param, sizeof(param)); } /*** Data transfer ***/ static int mspro_block_issue_req(struct memstick_dev *card, int chunk) { struct mspro_block_data *msb = memstick_get_drvdata(card); u64 t_off; unsigned int count; try_again: while (chunk) { msb->current_page = 0; msb->current_seg = 0; msb->seg_count = blk_rq_map_sg(msb->block_req->q, msb->block_req, msb->req_sg); if (!msb->seg_count) { chunk = __blk_end_request_cur(msb->block_req, -ENOMEM); continue; } t_off = blk_rq_pos(msb->block_req); t_off <<= 9; count = blk_rq_bytes(msb->block_req); msb->setup_transfer(card, t_off, count); msb->data_dir = rq_data_dir(msb->block_req); msb->transfer_cmd = msb->data_dir == READ ? MSPRO_CMD_READ_DATA : MSPRO_CMD_WRITE_DATA; memstick_new_req(card->host); return 0; } dev_dbg(&card->dev, "blk_fetch\n"); msb->block_req = blk_fetch_request(msb->queue); if (!msb->block_req) { dev_dbg(&card->dev, "issue end\n"); return -EAGAIN; } dev_dbg(&card->dev, "trying again\n"); chunk = 1; goto try_again; } static int mspro_block_complete_req(struct memstick_dev *card, int error) { struct mspro_block_data *msb = memstick_get_drvdata(card); int chunk, cnt; unsigned int t_len = 0; unsigned long flags; spin_lock_irqsave(&msb->q_lock, flags); dev_dbg(&card->dev, "complete %d, %d\n", msb->has_request ? 1 : 0, error); if (msb->has_request) { /* Nothing to do - not really an error */ if (error == -EAGAIN) error = 0; if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) { if (msb->data_dir == READ) { for (cnt = 0; cnt < msb->current_seg; cnt++) t_len += msb->req_sg[cnt].length / msb->page_size; if (msb->current_page) t_len += msb->current_page - 1; t_len *= msb->page_size; } } else t_len = blk_rq_bytes(msb->block_req); dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error); if (error && !t_len) t_len = blk_rq_cur_bytes(msb->block_req); chunk = __blk_end_request(msb->block_req, error, t_len); error = mspro_block_issue_req(card, chunk); if (!error) goto out; else msb->has_request = 0; } else { if (!error) error = -EAGAIN; } card->next_request = h_mspro_block_default_bad; complete_all(&card->mrq_complete); out: spin_unlock_irqrestore(&msb->q_lock, flags); return error; } static void mspro_block_stop(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); int rc = 0; unsigned long flags; while (1) { spin_lock_irqsave(&msb->q_lock, flags); if (!msb->has_request) { blk_stop_queue(msb->queue); rc = 1; } spin_unlock_irqrestore(&msb->q_lock, flags); if (rc) break; wait_for_completion(&card->mrq_complete); } } static void mspro_block_start(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); unsigned long flags; spin_lock_irqsave(&msb->q_lock, flags); blk_start_queue(msb->queue); spin_unlock_irqrestore(&msb->q_lock, flags); } static int mspro_block_prepare_req(struct request_queue *q, struct request *req) { if (req->cmd_type != REQ_TYPE_FS && req->cmd_type != REQ_TYPE_BLOCK_PC) { blk_dump_rq_flags(req, "MSPro unsupported request"); return BLKPREP_KILL; } req->cmd_flags |= REQ_DONTPREP; return BLKPREP_OK; } static void mspro_block_submit_req(struct request_queue *q) { struct memstick_dev *card = q->queuedata; struct mspro_block_data *msb = memstick_get_drvdata(card); struct request *req = NULL; if (msb->has_request) return; if (msb->eject) { while ((req = blk_fetch_request(q)) != NULL) __blk_end_request_all(req, -ENODEV); return; } msb->has_request = 1; if (mspro_block_issue_req(card, 0)) msb->has_request = 0; } /*** Initialization ***/ static int mspro_block_wait_for_ced(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); card->next_request = h_mspro_block_req_init; msb->mrq_handler = h_mspro_block_wait_for_ced; memstick_init_req(&card->current_mrq, MS_TPC_GET_INT, NULL, 1); memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); return card->current_mrq.error; } static int mspro_block_set_interface(struct memstick_dev *card, unsigned char sys_reg) { struct memstick_host *host = card->host; struct mspro_block_data *msb = memstick_get_drvdata(card); struct mspro_param_register param = { .system = sys_reg, .data_count = 0, .data_address = 0, .tpc_param = 0 }; card->next_request = h_mspro_block_req_init; msb->mrq_handler = h_mspro_block_default; memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, &param, sizeof(param)); memstick_new_req(host); wait_for_completion(&card->mrq_complete); return card->current_mrq.error; } static int mspro_block_switch_interface(struct memstick_dev *card) { struct memstick_host *host = card->host; struct mspro_block_data *msb = memstick_get_drvdata(card); int rc = 0; try_again: if (msb->caps & MEMSTICK_CAP_PAR4) rc = mspro_block_set_interface(card, MEMSTICK_SYS_PAR4); else return 0; if (rc) { printk(KERN_WARNING "%s: could not switch to 4-bit mode, error %d\n", dev_name(&card->dev), rc); return 0; } msb->system = MEMSTICK_SYS_PAR4; host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4); printk(KERN_INFO "%s: switching to 4-bit parallel mode\n", dev_name(&card->dev)); if (msb->caps & MEMSTICK_CAP_PAR8) { rc = mspro_block_set_interface(card, MEMSTICK_SYS_PAR8); if (!rc) { msb->system = MEMSTICK_SYS_PAR8; host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR8); printk(KERN_INFO "%s: switching to 8-bit parallel mode\n", dev_name(&card->dev)); } else printk(KERN_WARNING "%s: could not switch to 8-bit mode, error %d\n", dev_name(&card->dev), rc); } card->next_request = h_mspro_block_req_init; msb->mrq_handler = h_mspro_block_default; memstick_init_req(&card->current_mrq, MS_TPC_GET_INT, NULL, 1); memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); rc = card->current_mrq.error; if (rc) { printk(KERN_WARNING "%s: interface error, trying to fall back to serial\n", dev_name(&card->dev)); msb->system = MEMSTICK_SYS_SERIAL; host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF); msleep(10); host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON); host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL); rc = memstick_set_rw_addr(card); if (!rc) rc = mspro_block_set_interface(card, msb->system); if (!rc) { msleep(150); rc = mspro_block_wait_for_ced(card); if (rc) return rc; if (msb->caps & MEMSTICK_CAP_PAR8) { msb->caps &= ~MEMSTICK_CAP_PAR8; goto try_again; } } } return rc; } /* Memory allocated for attributes by this function should be freed by * mspro_block_data_clear, no matter if the initialization process succeeded * or failed. */ static int mspro_block_read_attributes(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); struct mspro_attribute *attr = NULL; struct mspro_sys_attr *s_attr = NULL; unsigned char *buffer = NULL; int cnt, rc, attr_count; /* While normally physical device offsets, represented here by * attr_offset and attr_len will be of large numeric types, we can be * sure, that attributes are close enough to the beginning of the * device, to save ourselves some trouble. */ unsigned int addr, attr_offset = 0, attr_len = msb->page_size; attr = kmalloc(msb->page_size, GFP_KERNEL); if (!attr) return -ENOMEM; sg_init_one(&msb->req_sg[0], attr, msb->page_size); msb->seg_count = 1; msb->current_seg = 0; msb->current_page = 0; msb->data_dir = READ; msb->transfer_cmd = MSPRO_CMD_READ_ATRB; msb->setup_transfer(card, attr_offset, attr_len); memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); if (card->current_mrq.error) { rc = card->current_mrq.error; goto out_free_attr; } if (be16_to_cpu(attr->signature) != MSPRO_BLOCK_SIGNATURE) { printk(KERN_ERR "%s: unrecognized device signature %x\n", dev_name(&card->dev), be16_to_cpu(attr->signature)); rc = -ENODEV; goto out_free_attr; } if (attr->count > MSPRO_BLOCK_MAX_ATTRIBUTES) { printk(KERN_WARNING "%s: way too many attribute entries\n", dev_name(&card->dev)); attr_count = MSPRO_BLOCK_MAX_ATTRIBUTES; } else attr_count = attr->count; msb->attr_group.attrs = kzalloc((attr_count + 1) * sizeof(struct attribute), GFP_KERNEL); if (!msb->attr_group.attrs) { rc = -ENOMEM; goto out_free_attr; } msb->attr_group.name = "media_attributes"; buffer = kmalloc(attr_len, GFP_KERNEL); if (!buffer) { rc = -ENOMEM; goto out_free_attr; } memcpy(buffer, (char *)attr, attr_len); for (cnt = 0; cnt < attr_count; ++cnt) { s_attr = kzalloc(sizeof(struct mspro_sys_attr), GFP_KERNEL); if (!s_attr) { rc = -ENOMEM; goto out_free_buffer; } msb->attr_group.attrs[cnt] = &s_attr->dev_attr.attr; addr = be32_to_cpu(attr->entries[cnt].address); s_attr->size = be32_to_cpu(attr->entries[cnt].size); dev_dbg(&card->dev, "adding attribute %d: id %x, address %x, " "size %zx\n", cnt, attr->entries[cnt].id, addr, s_attr->size); s_attr->id = attr->entries[cnt].id; if (mspro_block_attr_name(s_attr->id)) snprintf(s_attr->name, sizeof(s_attr->name), "%s", mspro_block_attr_name(attr->entries[cnt].id)); else snprintf(s_attr->name, sizeof(s_attr->name), "attr_x%02x", attr->entries[cnt].id); sysfs_attr_init(&s_attr->dev_attr.attr); s_attr->dev_attr.attr.name = s_attr->name; s_attr->dev_attr.attr.mode = S_IRUGO; s_attr->dev_attr.show = mspro_block_attr_show(s_attr->id); if (!s_attr->size) continue; s_attr->data = kmalloc(s_attr->size, GFP_KERNEL); if (!s_attr->data) { rc = -ENOMEM; goto out_free_buffer; } if (((addr / msb->page_size) == (attr_offset / msb->page_size)) && (((addr + s_attr->size - 1) / msb->page_size) == (attr_offset / msb->page_size))) { memcpy(s_attr->data, buffer + addr % msb->page_size, s_attr->size); continue; } attr_offset = (addr / msb->page_size) * msb->page_size; if ((attr_offset + attr_len) < (addr + s_attr->size)) { kfree(buffer); attr_len = (((addr + s_attr->size) / msb->page_size) + 1 ) * msb->page_size - attr_offset; buffer = kmalloc(attr_len, GFP_KERNEL); if (!buffer) { rc = -ENOMEM; goto out_free_attr; } } sg_init_one(&msb->req_sg[0], buffer, attr_len); msb->seg_count = 1; msb->current_seg = 0; msb->current_page = 0; msb->data_dir = READ; msb->transfer_cmd = MSPRO_CMD_READ_ATRB; dev_dbg(&card->dev, "reading attribute range %x, %x\n", attr_offset, attr_len); msb->setup_transfer(card, attr_offset, attr_len); memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); if (card->current_mrq.error) { rc = card->current_mrq.error; goto out_free_buffer; } memcpy(s_attr->data, buffer + addr % msb->page_size, s_attr->size); } rc = 0; out_free_buffer: kfree(buffer); out_free_attr: kfree(attr); return rc; } static int mspro_block_init_card(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); struct memstick_host *host = card->host; int rc = 0; msb->system = MEMSTICK_SYS_SERIAL; msb->setup_transfer = h_mspro_block_setup_cmd; card->reg_addr.r_offset = offsetof(struct mspro_register, status); card->reg_addr.r_length = sizeof(struct ms_status_register); card->reg_addr.w_offset = offsetof(struct mspro_register, param); card->reg_addr.w_length = sizeof(struct mspro_param_register); if (memstick_set_rw_addr(card)) return -EIO; msb->caps = host->caps; msleep(150); rc = mspro_block_wait_for_ced(card); if (rc) return rc; rc = mspro_block_switch_interface(card); if (rc) return rc; dev_dbg(&card->dev, "card activated\n"); if (msb->system != MEMSTICK_SYS_SERIAL) msb->caps |= MEMSTICK_CAP_AUTO_GET_INT; card->next_request = h_mspro_block_req_init; msb->mrq_handler = h_mspro_block_get_ro; memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL, sizeof(struct ms_status_register)); memstick_new_req(card->host); wait_for_completion(&card->mrq_complete); if (card->current_mrq.error) return card->current_mrq.error; dev_dbg(&card->dev, "card r/w status %d\n", msb->read_only ? 0 : 1); msb->page_size = 512; rc = mspro_block_read_attributes(card); if (rc) return rc; dev_dbg(&card->dev, "attributes loaded\n"); return 0; } static int mspro_block_init_disk(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); struct memstick_host *host = card->host; struct mspro_devinfo *dev_info = NULL; struct mspro_sys_info *sys_info = NULL; struct mspro_sys_attr *s_attr = NULL; int rc, disk_id; u64 limit = BLK_BOUNCE_HIGH; unsigned long capacity; if (host->dev.dma_mask && *(host->dev.dma_mask)) limit = *(host->dev.dma_mask); for (rc = 0; msb->attr_group.attrs[rc]; ++rc) { s_attr = mspro_from_sysfs_attr(msb->attr_group.attrs[rc]); if (s_attr->id == MSPRO_BLOCK_ID_DEVINFO) dev_info = s_attr->data; else if (s_attr->id == MSPRO_BLOCK_ID_SYSINFO) sys_info = s_attr->data; } if (!dev_info || !sys_info) return -ENODEV; msb->cylinders = be16_to_cpu(dev_info->cylinders); msb->heads = be16_to_cpu(dev_info->heads); msb->sectors_per_track = be16_to_cpu(dev_info->sectors_per_track); msb->page_size = be16_to_cpu(sys_info->unit_size); mutex_lock(&mspro_block_disk_lock); if (!idr_pre_get(&mspro_block_disk_idr, GFP_KERNEL)) { mutex_unlock(&mspro_block_disk_lock); return -ENOMEM; } rc = idr_get_new(&mspro_block_disk_idr, card, &disk_id); mutex_unlock(&mspro_block_disk_lock); if (rc) return rc; if ((disk_id << MSPRO_BLOCK_PART_SHIFT) > 255) { rc = -ENOSPC; goto out_release_id; } msb->disk = alloc_disk(1 << MSPRO_BLOCK_PART_SHIFT); if (!msb->disk) { rc = -ENOMEM; goto out_release_id; } msb->queue = blk_init_queue(mspro_block_submit_req, &msb->q_lock); if (!msb->queue) { rc = -ENOMEM; goto out_put_disk; } msb->queue->queuedata = card; blk_queue_prep_rq(msb->queue, mspro_block_prepare_req); blk_queue_bounce_limit(msb->queue, limit); blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES); blk_queue_max_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS); blk_queue_max_segment_size(msb->queue, MSPRO_BLOCK_MAX_PAGES * msb->page_size); msb->disk->major = major; msb->disk->first_minor = disk_id << MSPRO_BLOCK_PART_SHIFT; msb->disk->fops = &ms_block_bdops; msb->usage_count = 1; msb->disk->private_data = msb; msb->disk->queue = msb->queue; msb->disk->driverfs_dev = &card->dev; sprintf(msb->disk->disk_name, "mspblk%d", disk_id); blk_queue_logical_block_size(msb->queue, msb->page_size); capacity = be16_to_cpu(sys_info->user_block_count); capacity *= be16_to_cpu(sys_info->block_size); capacity *= msb->page_size >> 9; set_capacity(msb->disk, capacity); dev_dbg(&card->dev, "capacity set %ld\n", capacity); add_disk(msb->disk); msb->active = 1; return 0; out_put_disk: put_disk(msb->disk); out_release_id: mutex_lock(&mspro_block_disk_lock); idr_remove(&mspro_block_disk_idr, disk_id); mutex_unlock(&mspro_block_disk_lock); return rc; } static void mspro_block_data_clear(struct mspro_block_data *msb) { int cnt; struct mspro_sys_attr *s_attr; if (msb->attr_group.attrs) { for (cnt = 0; msb->attr_group.attrs[cnt]; ++cnt) { s_attr = mspro_from_sysfs_attr(msb->attr_group .attrs[cnt]); kfree(s_attr->data); kfree(s_attr); } kfree(msb->attr_group.attrs); } msb->card = NULL; } static int mspro_block_check_card(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); return (msb->active == 1); } static int mspro_block_probe(struct memstick_dev *card) { struct mspro_block_data *msb; int rc = 0; msb = kzalloc(sizeof(struct mspro_block_data), GFP_KERNEL); if (!msb) return -ENOMEM; memstick_set_drvdata(card, msb); msb->card = card; spin_lock_init(&msb->q_lock); rc = mspro_block_init_card(card); if (rc) goto out_free; rc = sysfs_create_group(&card->dev.kobj, &msb->attr_group); if (rc) goto out_free; rc = mspro_block_init_disk(card); if (!rc) { card->check = mspro_block_check_card; card->stop = mspro_block_stop; card->start = mspro_block_start; return 0; } sysfs_remove_group(&card->dev.kobj, &msb->attr_group); out_free: memstick_set_drvdata(card, NULL); mspro_block_data_clear(msb); kfree(msb); return rc; } static void mspro_block_remove(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); unsigned long flags; spin_lock_irqsave(&msb->q_lock, flags); msb->eject = 1; blk_start_queue(msb->queue); spin_unlock_irqrestore(&msb->q_lock, flags); del_gendisk(msb->disk); dev_dbg(&card->dev, "mspro block remove\n"); blk_cleanup_queue(msb->queue); msb->queue = NULL; sysfs_remove_group(&card->dev.kobj, &msb->attr_group); mutex_lock(&mspro_block_disk_lock); mspro_block_data_clear(msb); mutex_unlock(&mspro_block_disk_lock); mspro_block_disk_release(msb->disk); memstick_set_drvdata(card, NULL); } #ifdef CONFIG_PM static int mspro_block_suspend(struct memstick_dev *card, pm_message_t state) { struct mspro_block_data *msb = memstick_get_drvdata(card); unsigned long flags; spin_lock_irqsave(&msb->q_lock, flags); blk_stop_queue(msb->queue); msb->active = 0; spin_unlock_irqrestore(&msb->q_lock, flags); return 0; } static int mspro_block_resume(struct memstick_dev *card) { struct mspro_block_data *msb = memstick_get_drvdata(card); unsigned long flags; int rc = 0; #ifdef CONFIG_MEMSTICK_UNSAFE_RESUME struct mspro_block_data *new_msb; struct memstick_host *host = card->host; struct mspro_sys_attr *s_attr, *r_attr; unsigned char cnt; mutex_lock(&host->lock); new_msb = kzalloc(sizeof(struct mspro_block_data), GFP_KERNEL); if (!new_msb) { rc = -ENOMEM; goto out_unlock; } new_msb->card = card; memstick_set_drvdata(card, new_msb); if (mspro_block_init_card(card)) goto out_free; for (cnt = 0; new_msb->attr_group.attrs[cnt] && msb->attr_group.attrs[cnt]; ++cnt) { s_attr = mspro_from_sysfs_attr(new_msb->attr_group.attrs[cnt]); r_attr = mspro_from_sysfs_attr(msb->attr_group.attrs[cnt]); if (s_attr->id == MSPRO_BLOCK_ID_SYSINFO && r_attr->id == s_attr->id) { if (memcmp(s_attr->data, r_attr->data, s_attr->size)) break; msb->active = 1; break; } } out_free: memstick_set_drvdata(card, msb); mspro_block_data_clear(new_msb); kfree(new_msb); out_unlock: mutex_unlock(&host->lock); #endif /* CONFIG_MEMSTICK_UNSAFE_RESUME */ spin_lock_irqsave(&msb->q_lock, flags); blk_start_queue(msb->queue); spin_unlock_irqrestore(&msb->q_lock, flags); return rc; } #else #define mspro_block_suspend NULL #define mspro_block_resume NULL #endif /* CONFIG_PM */ static struct memstick_device_id mspro_block_id_tbl[] = { {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_PRO, MEMSTICK_CATEGORY_STORAGE_DUO, MEMSTICK_CLASS_DUO}, {} }; static struct memstick_driver mspro_block_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE }, .id_table = mspro_block_id_tbl, .probe = mspro_block_probe, .remove = mspro_block_remove, .suspend = mspro_block_suspend, .resume = mspro_block_resume }; static int __init mspro_block_init(void) { int rc = -ENOMEM; rc = register_blkdev(major, DRIVER_NAME); if (rc < 0) { printk(KERN_ERR DRIVER_NAME ": failed to register " "major %d, error %d\n", major, rc); return rc; } if (!major) major = rc; rc = memstick_register_driver(&mspro_block_driver); if (rc) unregister_blkdev(major, DRIVER_NAME); return rc; } static void __exit mspro_block_exit(void) { memstick_unregister_driver(&mspro_block_driver); unregister_blkdev(major, DRIVER_NAME); idr_destroy(&mspro_block_disk_idr); } module_init(mspro_block_init); module_exit(mspro_block_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alex Dubov"); MODULE_DESCRIPTION("Sony MemoryStickPro block device driver"); MODULE_DEVICE_TABLE(memstick, mspro_block_id_tbl);
gpl-2.0
atyoung/android-mason-kernel-ginger
drivers/video/sbuslib.c
4994
7031
/* sbuslib.c: Helper library for SBUS framebuffer drivers. * * Copyright (C) 2003 David S. Miller (davem@redhat.com) */ #include <linux/compat.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/fb.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/of_device.h> #include <asm/fbio.h> #include "sbuslib.h" void sbusfb_fill_var(struct fb_var_screeninfo *var, struct device_node *dp, int bpp) { memset(var, 0, sizeof(*var)); var->xres = of_getintprop_default(dp, "width", 1152); var->yres = of_getintprop_default(dp, "height", 900); var->xres_virtual = var->xres; var->yres_virtual = var->yres; var->bits_per_pixel = bpp; } EXPORT_SYMBOL(sbusfb_fill_var); static unsigned long sbusfb_mmapsize(long size, unsigned long fbsize) { if (size == SBUS_MMAP_EMPTY) return 0; if (size >= 0) return size; return fbsize * (-size); } int sbusfb_mmap_helper(struct sbus_mmap_map *map, unsigned long physbase, unsigned long fbsize, unsigned long iospace, struct vm_area_struct *vma) { unsigned int size, page, r, map_size; unsigned long map_offset = 0; unsigned long off; int i; if (!(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) return -EINVAL; size = vma->vm_end - vma->vm_start; if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) return -EINVAL; off = vma->vm_pgoff << PAGE_SHIFT; /* To stop the swapper from even considering these pages */ vma->vm_flags |= (VM_IO | VM_RESERVED); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* Each page, see which map applies */ for (page = 0; page < size; ){ map_size = 0; for (i = 0; map[i].size; i++) if (map[i].voff == off+page) { map_size = sbusfb_mmapsize(map[i].size, fbsize); #ifdef __sparc_v9__ #define POFF_MASK (PAGE_MASK|0x1UL) #else #define POFF_MASK (PAGE_MASK) #endif map_offset = (physbase + map[i].poff) & POFF_MASK; break; } if (!map_size){ page += PAGE_SIZE; continue; } if (page + map_size > size) map_size = size - page; r = io_remap_pfn_range(vma, vma->vm_start + page, MK_IOSPACE_PFN(iospace, map_offset >> PAGE_SHIFT), map_size, vma->vm_page_prot); if (r) return -EAGAIN; page += map_size; } return 0; } EXPORT_SYMBOL(sbusfb_mmap_helper); int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, struct fb_info *info, int type, int fb_depth, unsigned long fb_size) { switch(cmd) { case FBIOGTYPE: { struct fbtype __user *f = (struct fbtype __user *) arg; if (put_user(type, &f->fb_type) || __put_user(info->var.yres, &f->fb_height) || __put_user(info->var.xres, &f->fb_width) || __put_user(fb_depth, &f->fb_depth) || __put_user(0, &f->fb_cmsize) || __put_user(fb_size, &f->fb_cmsize)) return -EFAULT; return 0; } case FBIOPUTCMAP_SPARC: { struct fbcmap __user *c = (struct fbcmap __user *) arg; struct fb_cmap cmap; u16 red, green, blue; u8 red8, green8, blue8; unsigned char __user *ured; unsigned char __user *ugreen; unsigned char __user *ublue; int index, count, i; if (get_user(index, &c->index) || __get_user(count, &c->count) || __get_user(ured, &c->red) || __get_user(ugreen, &c->green) || __get_user(ublue, &c->blue)) return -EFAULT; cmap.len = 1; cmap.red = &red; cmap.green = &green; cmap.blue = &blue; cmap.transp = NULL; for (i = 0; i < count; i++) { int err; if (get_user(red8, &ured[i]) || get_user(green8, &ugreen[i]) || get_user(blue8, &ublue[i])) return -EFAULT; red = red8 << 8; green = green8 << 8; blue = blue8 << 8; cmap.start = index + i; err = fb_set_cmap(&cmap, info); if (err) return err; } return 0; } case FBIOGETCMAP_SPARC: { struct fbcmap __user *c = (struct fbcmap __user *) arg; unsigned char __user *ured; unsigned char __user *ugreen; unsigned char __user *ublue; struct fb_cmap *cmap = &info->cmap; int index, count, i; u8 red, green, blue; if (get_user(index, &c->index) || __get_user(count, &c->count) || __get_user(ured, &c->red) || __get_user(ugreen, &c->green) || __get_user(ublue, &c->blue)) return -EFAULT; if (index + count > cmap->len) return -EINVAL; for (i = 0; i < count; i++) { red = cmap->red[index + i] >> 8; green = cmap->green[index + i] >> 8; blue = cmap->blue[index + i] >> 8; if (put_user(red, &ured[i]) || put_user(green, &ugreen[i]) || put_user(blue, &ublue[i])) return -EFAULT; } return 0; } default: return -EINVAL; }; } EXPORT_SYMBOL(sbusfb_ioctl_helper); #ifdef CONFIG_COMPAT static int fbiogetputcmap(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct fbcmap32 __user *argp = (void __user *)arg; struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p)); u32 addr; int ret; ret = copy_in_user(p, argp, 2 * sizeof(int)); ret |= get_user(addr, &argp->red); ret |= put_user(compat_ptr(addr), &p->red); ret |= get_user(addr, &argp->green); ret |= put_user(compat_ptr(addr), &p->green); ret |= get_user(addr, &argp->blue); ret |= put_user(compat_ptr(addr), &p->blue); if (ret) return -EFAULT; return info->fbops->fb_ioctl(info, (cmd == FBIOPUTCMAP32) ? FBIOPUTCMAP_SPARC : FBIOGETCMAP_SPARC, (unsigned long)p); } static int fbiogscursor(struct fb_info *info, unsigned long arg) { struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p)); struct fbcursor32 __user *argp = (void __user *)arg; compat_uptr_t addr; int ret; ret = copy_in_user(p, argp, 2 * sizeof (short) + 2 * sizeof(struct fbcurpos)); ret |= copy_in_user(&p->size, &argp->size, sizeof(struct fbcurpos)); ret |= copy_in_user(&p->cmap, &argp->cmap, 2 * sizeof(int)); ret |= get_user(addr, &argp->cmap.red); ret |= put_user(compat_ptr(addr), &p->cmap.red); ret |= get_user(addr, &argp->cmap.green); ret |= put_user(compat_ptr(addr), &p->cmap.green); ret |= get_user(addr, &argp->cmap.blue); ret |= put_user(compat_ptr(addr), &p->cmap.blue); ret |= get_user(addr, &argp->mask); ret |= put_user(compat_ptr(addr), &p->mask); ret |= get_user(addr, &argp->image); ret |= put_user(compat_ptr(addr), &p->image); if (ret) return -EFAULT; return info->fbops->fb_ioctl(info, FBIOSCURSOR, (unsigned long)p); } int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { switch (cmd) { case FBIOGTYPE: case FBIOSATTR: case FBIOGATTR: case FBIOSVIDEO: case FBIOGVIDEO: case FBIOGCURSOR32: /* This is not implemented yet. Later it should be converted... */ case FBIOSCURPOS: case FBIOGCURPOS: case FBIOGCURMAX: return info->fbops->fb_ioctl(info, cmd, arg); case FBIOPUTCMAP32: return fbiogetputcmap(info, cmd, arg); case FBIOGETCMAP32: return fbiogetputcmap(info, cmd, arg); case FBIOSCURSOR32: return fbiogscursor(info, arg); default: return -ENOIOCTLCMD; } } EXPORT_SYMBOL(sbusfb_compat_ioctl); #endif
gpl-2.0
imrehg/vab820-kernel-bsp
arch/mips/lantiq/xway/mach-easy50712.c
7298
1538
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Copyright (C) 2010 John Crispin <blogic@openwrt.org> */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/input.h> #include <linux/phy.h> #include <lantiq_soc.h> #include <irq.h> #include "../machtypes.h" #include "devices.h" static struct mtd_partition easy50712_partitions[] = { { .name = "uboot", .offset = 0x0, .size = 0x10000, }, { .name = "uboot_env", .offset = 0x10000, .size = 0x10000, }, { .name = "linux", .offset = 0x20000, .size = 0xe0000, }, { .name = "rootfs", .offset = 0x100000, .size = 0x300000, }, }; static struct physmap_flash_data easy50712_flash_data = { .nr_parts = ARRAY_SIZE(easy50712_partitions), .parts = easy50712_partitions, }; static struct ltq_pci_data ltq_pci_data = { .clock = PCI_CLOCK_INT, .gpio = PCI_GNT1 | PCI_REQ1, .irq = { [14] = INT_NUM_IM0_IRL0 + 22, }, }; static struct ltq_eth_data ltq_eth_data = { .mii_mode = PHY_INTERFACE_MODE_MII, }; static void __init easy50712_init(void) { ltq_register_gpio_stp(); ltq_register_nor(&easy50712_flash_data); ltq_register_pci(&ltq_pci_data); ltq_register_etop(&ltq_eth_data); } MIPS_MACHINE(LTQ_MACH_EASY50712, "EASY50712", "EASY50712 Eval Board", easy50712_init);
gpl-2.0
percy-g2/bbbandroid-kernel
drivers/usb/misc/cytherm.c
7554
10846
/* -*- linux-c -*- * Cypress USB Thermometer driver * * Copyright (c) 2004 Erik Rigtorp <erkki@linux.nu> <erik@rigtorp.com> * * This driver works with Elektor magazine USB Interface as published in * issue #291. It should also work with the original starter kit/demo board * from Cypress. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb.h> #define DRIVER_VERSION "v1.0" #define DRIVER_AUTHOR "Erik Rigtorp" #define DRIVER_DESC "Cypress USB Thermometer driver" #define USB_SKEL_VENDOR_ID 0x04b4 #define USB_SKEL_PRODUCT_ID 0x0002 static const struct usb_device_id id_table[] = { { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) }, { } }; MODULE_DEVICE_TABLE (usb, id_table); /* Structure to hold all of our device specific stuff */ struct usb_cytherm { struct usb_device *udev; /* save off the usb device pointer */ struct usb_interface *interface; /* the interface for this device */ int brightness; }; /* local function prototypes */ static int cytherm_probe(struct usb_interface *interface, const struct usb_device_id *id); static void cytherm_disconnect(struct usb_interface *interface); /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver cytherm_driver = { .name = "cytherm", .probe = cytherm_probe, .disconnect = cytherm_disconnect, .id_table = id_table, }; /* Vendor requests */ /* They all operate on one byte at a time */ #define PING 0x00 #define READ_ROM 0x01 /* Reads form ROM, value = address */ #define READ_RAM 0x02 /* Reads form RAM, value = address */ #define WRITE_RAM 0x03 /* Write to RAM, value = address, index = data */ #define READ_PORT 0x04 /* Reads from port, value = address */ #define WRITE_PORT 0x05 /* Write to port, value = address, index = data */ /* Send a vendor command to device */ static int vendor_command(struct usb_device *dev, unsigned char request, unsigned char value, unsigned char index, void *buf, int size) { return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER, value, index, buf, size, USB_CTRL_GET_TIMEOUT); } #define BRIGHTNESS 0x2c /* RAM location for brightness value */ #define BRIGHTNESS_SEM 0x2b /* RAM location for brightness semaphore */ static ssize_t show_brightness(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct usb_cytherm *cytherm = usb_get_intfdata(intf); return sprintf(buf, "%i", cytherm->brightness); } static ssize_t set_brightness(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); struct usb_cytherm *cytherm = usb_get_intfdata(intf); unsigned char *buffer; int retval; buffer = kmalloc(8, GFP_KERNEL); if (!buffer) { dev_err(&cytherm->udev->dev, "out of memory\n"); return 0; } cytherm->brightness = simple_strtoul(buf, NULL, 10); if (cytherm->brightness > 0xFF) cytherm->brightness = 0xFF; else if (cytherm->brightness < 0) cytherm->brightness = 0; /* Set brightness */ retval = vendor_command(cytherm->udev, WRITE_RAM, BRIGHTNESS, cytherm->brightness, buffer, 8); if (retval) dev_dbg(&cytherm->udev->dev, "retval = %d\n", retval); /* Inform µC that we have changed the brightness setting */ retval = vendor_command(cytherm->udev, WRITE_RAM, BRIGHTNESS_SEM, 0x01, buffer, 8); if (retval) dev_dbg(&cytherm->udev->dev, "retval = %d\n", retval); kfree(buffer); return count; } static DEVICE_ATTR(brightness, S_IRUGO | S_IWUSR | S_IWGRP, show_brightness, set_brightness); #define TEMP 0x33 /* RAM location for temperature */ #define SIGN 0x34 /* RAM location for temperature sign */ static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct usb_cytherm *cytherm = usb_get_intfdata(intf); int retval; unsigned char *buffer; int temp, sign; buffer = kmalloc(8, GFP_KERNEL); if (!buffer) { dev_err(&cytherm->udev->dev, "out of memory\n"); return 0; } /* read temperature */ retval = vendor_command(cytherm->udev, READ_RAM, TEMP, 0, buffer, 8); if (retval) dev_dbg(&cytherm->udev->dev, "retval = %d\n", retval); temp = buffer[1]; /* read sign */ retval = vendor_command(cytherm->udev, READ_RAM, SIGN, 0, buffer, 8); if (retval) dev_dbg(&cytherm->udev->dev, "retval = %d\n", retval); sign = buffer[1]; kfree(buffer); return sprintf(buf, "%c%i.%i", sign ? '-' : '+', temp >> 1, 5*(temp - ((temp >> 1) << 1))); } static ssize_t set_temp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return count; } static DEVICE_ATTR(temp, S_IRUGO, show_temp, set_temp); #define BUTTON 0x7a static ssize_t show_button(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct usb_cytherm *cytherm = usb_get_intfdata(intf); int retval; unsigned char *buffer; buffer = kmalloc(8, GFP_KERNEL); if (!buffer) { dev_err(&cytherm->udev->dev, "out of memory\n"); return 0; } /* check button */ retval = vendor_command(cytherm->udev, READ_RAM, BUTTON, 0, buffer, 8); if (retval) dev_dbg(&cytherm->udev->dev, "retval = %d\n", retval); retval = buffer[1]; kfree(buffer); if (retval) return sprintf(buf, "1"); else return sprintf(buf, "0"); } static ssize_t set_button(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return count; } static DEVICE_ATTR(button, S_IRUGO, show_button, set_button); static ssize_t show_port0(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct usb_cytherm *cytherm = usb_get_intfdata(intf); int retval; unsigned char *buffer; buffer = kmalloc(8, GFP_KERNEL); if (!buffer) { dev_err(&cytherm->udev->dev, "out of memory\n"); return 0; } retval = vendor_command(cytherm->udev, READ_PORT, 0, 0, buffer, 8); if (retval) dev_dbg(&cytherm->udev->dev, "retval = %d\n", retval); retval = buffer[1]; kfree(buffer); return sprintf(buf, "%d", retval); } static ssize_t set_port0(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); struct usb_cytherm *cytherm = usb_get_intfdata(intf); unsigned char *buffer; int retval; int tmp; buffer = kmalloc(8, GFP_KERNEL); if (!buffer) { dev_err(&cytherm->udev->dev, "out of memory\n"); return 0; } tmp = simple_strtoul(buf, NULL, 10); if (tmp > 0xFF) tmp = 0xFF; else if (tmp < 0) tmp = 0; retval = vendor_command(cytherm->udev, WRITE_PORT, 0, tmp, buffer, 8); if (retval) dev_dbg(&cytherm->udev->dev, "retval = %d\n", retval); kfree(buffer); return count; } static DEVICE_ATTR(port0, S_IRUGO | S_IWUSR | S_IWGRP, show_port0, set_port0); static ssize_t show_port1(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct usb_cytherm *cytherm = usb_get_intfdata(intf); int retval; unsigned char *buffer; buffer = kmalloc(8, GFP_KERNEL); if (!buffer) { dev_err(&cytherm->udev->dev, "out of memory\n"); return 0; } retval = vendor_command(cytherm->udev, READ_PORT, 1, 0, buffer, 8); if (retval) dev_dbg(&cytherm->udev->dev, "retval = %d\n", retval); retval = buffer[1]; kfree(buffer); return sprintf(buf, "%d", retval); } static ssize_t set_port1(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); struct usb_cytherm *cytherm = usb_get_intfdata(intf); unsigned char *buffer; int retval; int tmp; buffer = kmalloc(8, GFP_KERNEL); if (!buffer) { dev_err(&cytherm->udev->dev, "out of memory\n"); return 0; } tmp = simple_strtoul(buf, NULL, 10); if (tmp > 0xFF) tmp = 0xFF; else if (tmp < 0) tmp = 0; retval = vendor_command(cytherm->udev, WRITE_PORT, 1, tmp, buffer, 8); if (retval) dev_dbg(&cytherm->udev->dev, "retval = %d\n", retval); kfree(buffer); return count; } static DEVICE_ATTR(port1, S_IRUGO | S_IWUSR | S_IWGRP, show_port1, set_port1); static int cytherm_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct usb_cytherm *dev = NULL; int retval = -ENOMEM; dev = kzalloc (sizeof(struct usb_cytherm), GFP_KERNEL); if (dev == NULL) { dev_err (&interface->dev, "Out of memory\n"); goto error_mem; } dev->udev = usb_get_dev(udev); usb_set_intfdata (interface, dev); dev->brightness = 0xFF; retval = device_create_file(&interface->dev, &dev_attr_brightness); if (retval) goto error; retval = device_create_file(&interface->dev, &dev_attr_temp); if (retval) goto error; retval = device_create_file(&interface->dev, &dev_attr_button); if (retval) goto error; retval = device_create_file(&interface->dev, &dev_attr_port0); if (retval) goto error; retval = device_create_file(&interface->dev, &dev_attr_port1); if (retval) goto error; dev_info (&interface->dev, "Cypress thermometer device now attached\n"); return 0; error: device_remove_file(&interface->dev, &dev_attr_brightness); device_remove_file(&interface->dev, &dev_attr_temp); device_remove_file(&interface->dev, &dev_attr_button); device_remove_file(&interface->dev, &dev_attr_port0); device_remove_file(&interface->dev, &dev_attr_port1); usb_set_intfdata (interface, NULL); usb_put_dev(dev->udev); kfree(dev); error_mem: return retval; } static void cytherm_disconnect(struct usb_interface *interface) { struct usb_cytherm *dev; dev = usb_get_intfdata (interface); device_remove_file(&interface->dev, &dev_attr_brightness); device_remove_file(&interface->dev, &dev_attr_temp); device_remove_file(&interface->dev, &dev_attr_button); device_remove_file(&interface->dev, &dev_attr_port0); device_remove_file(&interface->dev, &dev_attr_port1); /* first remove the files, then NULL the pointer */ usb_set_intfdata (interface, NULL); usb_put_dev(dev->udev); kfree(dev); dev_info(&interface->dev, "Cypress thermometer now disconnected\n"); } module_usb_driver(cytherm_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
Jimmyk422/android_kernel_samsung_iconvmu
arch/powerpc/boot/libfdt-wrapper.c
12674
5163
/* * This file does the necessary interface mapping between the bootwrapper * device tree operations and the interface provided by shared source * files flatdevicetree.[ch]. * * Copyright 2007 David Gibson, IBM Corporation. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <stddef.h> #include <stdio.h> #include <page.h> #include <libfdt.h> #include "ops.h" #define DEBUG 0 #define BAD_ERROR(err) (((err) < 0) \ && ((err) != -FDT_ERR_NOTFOUND) \ && ((err) != -FDT_ERR_EXISTS)) #define check_err(err) \ ({ \ if (BAD_ERROR(err) || ((err < 0) && DEBUG)) \ printf("%s():%d %s\n\r", __func__, __LINE__, \ fdt_strerror(err)); \ if (BAD_ERROR(err)) \ exit(); \ (err < 0) ? -1 : 0; \ }) #define offset_devp(off) \ ({ \ int _offset = (off); \ check_err(_offset) ? NULL : (void *)(_offset+1); \ }) #define devp_offset_find(devp) (((int)(devp))-1) #define devp_offset(devp) (devp ? ((int)(devp))-1 : 0) static void *fdt; static void *buf; /* = NULL */ #define EXPAND_GRANULARITY 1024 static void expand_buf(int minexpand) { int size = fdt_totalsize(fdt); int rc; size = _ALIGN(size + minexpand, EXPAND_GRANULARITY); buf = platform_ops.realloc(buf, size); if (!buf) fatal("Couldn't find %d bytes to expand device tree\n\r", size); rc = fdt_open_into(fdt, buf, size); if (rc != 0) fatal("Couldn't expand fdt into new buffer: %s\n\r", fdt_strerror(rc)); fdt = buf; } static void *fdt_wrapper_finddevice(const char *path) { return offset_devp(fdt_path_offset(fdt, path)); } static int fdt_wrapper_getprop(const void *devp, const char *name, void *buf, const int buflen) { const void *p; int len; p = fdt_getprop(fdt, devp_offset(devp), name, &len); if (!p) return check_err(len); memcpy(buf, p, min(len, buflen)); return len; } static int fdt_wrapper_setprop(const void *devp, const char *name, const void *buf, const int len) { int rc; rc = fdt_setprop(fdt, devp_offset(devp), name, buf, len); if (rc == -FDT_ERR_NOSPACE) { expand_buf(len + 16); rc = fdt_setprop(fdt, devp_offset(devp), name, buf, len); } return check_err(rc); } static int fdt_wrapper_del_node(const void *devp) { return fdt_del_node(fdt, devp_offset(devp)); } static void *fdt_wrapper_get_parent(const void *devp) { return offset_devp(fdt_parent_offset(fdt, devp_offset(devp))); } static void *fdt_wrapper_create_node(const void *devp, const char *name) { int offset; offset = fdt_add_subnode(fdt, devp_offset(devp), name); if (offset == -FDT_ERR_NOSPACE) { expand_buf(strlen(name) + 16); offset = fdt_add_subnode(fdt, devp_offset(devp), name); } return offset_devp(offset); } static void *fdt_wrapper_find_node_by_prop_value(const void *prev, const char *name, const char *val, int len) { int offset = fdt_node_offset_by_prop_value(fdt, devp_offset_find(prev), name, val, len); return offset_devp(offset); } static void *fdt_wrapper_find_node_by_compatible(const void *prev, const char *val) { int offset = fdt_node_offset_by_compatible(fdt, devp_offset_find(prev), val); return offset_devp(offset); } static char *fdt_wrapper_get_path(const void *devp, char *buf, int len) { int rc; rc = fdt_get_path(fdt, devp_offset(devp), buf, len); if (check_err(rc)) return NULL; return buf; } static unsigned long fdt_wrapper_finalize(void) { int rc; rc = fdt_pack(fdt); if (rc != 0) fatal("Couldn't pack flat tree: %s\n\r", fdt_strerror(rc)); return (unsigned long)fdt; } void fdt_init(void *blob) { int err; int bufsize; dt_ops.finddevice = fdt_wrapper_finddevice; dt_ops.getprop = fdt_wrapper_getprop; dt_ops.setprop = fdt_wrapper_setprop; dt_ops.get_parent = fdt_wrapper_get_parent; dt_ops.create_node = fdt_wrapper_create_node; dt_ops.find_node_by_prop_value = fdt_wrapper_find_node_by_prop_value; dt_ops.find_node_by_compatible = fdt_wrapper_find_node_by_compatible; dt_ops.del_node = fdt_wrapper_del_node; dt_ops.get_path = fdt_wrapper_get_path; dt_ops.finalize = fdt_wrapper_finalize; /* Make sure the dt blob is the right version and so forth */ fdt = blob; bufsize = fdt_totalsize(fdt) + EXPAND_GRANULARITY; buf = malloc(bufsize); if(!buf) fatal("malloc failed. can't relocate the device tree\n\r"); err = fdt_open_into(fdt, buf, bufsize); if (err != 0) fatal("fdt_init(): %s\n\r", fdt_strerror(err)); fdt = buf; }
gpl-2.0
gizero/linux-wallya-2.6.33-rc4-psp03.20.00.14
drivers/infiniband/hw/mthca/mthca_pd.c
15490
2597
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/errno.h> #include "mthca_dev.h" int mthca_pd_alloc(struct mthca_dev *dev, int privileged, struct mthca_pd *pd) { int err = 0; pd->privileged = privileged; atomic_set(&pd->sqp_count, 0); pd->pd_num = mthca_alloc(&dev->pd_table.alloc); if (pd->pd_num == -1) return -ENOMEM; if (privileged) { err = mthca_mr_alloc_notrans(dev, pd->pd_num, MTHCA_MPT_FLAG_LOCAL_READ | MTHCA_MPT_FLAG_LOCAL_WRITE, &pd->ntmr); if (err) mthca_free(&dev->pd_table.alloc, pd->pd_num); } return err; } void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd) { if (pd->privileged) mthca_free_mr(dev, &pd->ntmr); mthca_free(&dev->pd_table.alloc, pd->pd_num); } int mthca_init_pd_table(struct mthca_dev *dev) { return mthca_alloc_init(&dev->pd_table.alloc, dev->limits.num_pds, (1 << 24) - 1, dev->limits.reserved_pds); } void mthca_cleanup_pd_table(struct mthca_dev *dev) { /* XXX check if any PDs are still allocated? */ mthca_alloc_cleanup(&dev->pd_table.alloc); }
gpl-2.0
BenHuiHui/linux
fs/namei.c
131
117305
/* * linux/fs/namei.c * * Copyright (C) 1991, 1992 Linus Torvalds */ /* * Some corrections by tytso. */ /* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname * lookup logic. */ /* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture. */ #include <linux/init.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/pagemap.h> #include <linux/fsnotify.h> #include <linux/personality.h> #include <linux/security.h> #include <linux/ima.h> #include <linux/syscalls.h> #include <linux/mount.h> #include <linux/audit.h> #include <linux/capability.h> #include <linux/file.h> #include <linux/fcntl.h> #include <linux/device_cgroup.h> #include <linux/fs_struct.h> #include <linux/posix_acl.h> #include <linux/hash.h> #include <asm/uaccess.h> #include "internal.h" #include "mount.h" /* [Feb-1997 T. Schoebel-Theuer] * Fundamental changes in the pathname lookup mechanisms (namei) * were necessary because of omirr. The reason is that omirr needs * to know the _real_ pathname, not the user-supplied one, in case * of symlinks (and also when transname replacements occur). * * The new code replaces the old recursive symlink resolution with * an iterative one (in case of non-nested symlink chains). It does * this with calls to <fs>_follow_link(). * As a side effect, dir_namei(), _namei() and follow_link() are now * replaced with a single function lookup_dentry() that can handle all * the special cases of the former code. * * With the new dcache, the pathname is stored at each inode, at least as * long as the refcount of the inode is positive. As a side effect, the * size of the dcache depends on the inode cache and thus is dynamic. * * [29-Apr-1998 C. Scott Ananian] Updated above description of symlink * resolution to correspond with current state of the code. * * Note that the symlink resolution is not *completely* iterative. * There is still a significant amount of tail- and mid- recursion in * the algorithm. Also, note that <fs>_readlink() is not used in * lookup_dentry(): lookup_dentry() on the result of <fs>_readlink() * may return different results than <fs>_follow_link(). Many virtual * filesystems (including /proc) exhibit this behavior. */ /* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation: * New symlink semantics: when open() is called with flags O_CREAT | O_EXCL * and the name already exists in form of a symlink, try to create the new * name indicated by the symlink. The old code always complained that the * name already exists, due to not following the symlink even if its target * is nonexistent. The new semantics affects also mknod() and link() when * the name is a symlink pointing to a non-existent name. * * I don't know which semantics is the right one, since I have no access * to standards. But I found by trial that HP-UX 9.0 has the full "new" * semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the * "old" one. Personally, I think the new semantics is much more logical. * Note that "ln old new" where "new" is a symlink pointing to a non-existing * file does succeed in both HP-UX and SunOs, but not in Solaris * and in the old Linux semantics. */ /* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink * semantics. See the comments in "open_namei" and "do_link" below. * * [10-Sep-98 Alan Modra] Another symlink change. */ /* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks: * inside the path - always follow. * in the last component in creation/removal/renaming - never follow. * if LOOKUP_FOLLOW passed - follow. * if the pathname has trailing slashes - follow. * otherwise - don't follow. * (applied in that order). * * [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT * restored for 2.4. This is the last surviving part of old 4.2BSD bug. * During the 2.4 we need to fix the userland stuff depending on it - * hopefully we will be able to get rid of that wart in 2.5. So far only * XEmacs seems to be relying on it... */ /* * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland) * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives * any extra contention... */ /* In order to reduce some races, while at the same time doing additional * checking and hopefully speeding things up, we copy filenames to the * kernel data space before using them.. * * POSIX.1 2.4: an empty pathname is invalid (ENOENT). * PATH_MAX includes the nul terminator --RR. */ #define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname)) struct filename * getname_flags(const char __user *filename, int flags, int *empty) { struct filename *result; char *kname; int len; result = audit_reusename(filename); if (result) return result; result = __getname(); if (unlikely(!result)) return ERR_PTR(-ENOMEM); /* * First, try to embed the struct filename inside the names_cache * allocation */ kname = (char *)result->iname; result->name = kname; len = strncpy_from_user(kname, filename, EMBEDDED_NAME_MAX); if (unlikely(len < 0)) { __putname(result); return ERR_PTR(len); } /* * Uh-oh. We have a name that's approaching PATH_MAX. Allocate a * separate struct filename so we can dedicate the entire * names_cache allocation for the pathname, and re-do the copy from * userland. */ if (unlikely(len == EMBEDDED_NAME_MAX)) { const size_t size = offsetof(struct filename, iname[1]); kname = (char *)result; /* * size is chosen that way we to guarantee that * result->iname[0] is within the same object and that * kname can't be equal to result->iname, no matter what. */ result = kzalloc(size, GFP_KERNEL); if (unlikely(!result)) { __putname(kname); return ERR_PTR(-ENOMEM); } result->name = kname; len = strncpy_from_user(kname, filename, PATH_MAX); if (unlikely(len < 0)) { __putname(kname); kfree(result); return ERR_PTR(len); } if (unlikely(len == PATH_MAX)) { __putname(kname); kfree(result); return ERR_PTR(-ENAMETOOLONG); } } result->refcnt = 1; /* The empty path is special. */ if (unlikely(!len)) { if (empty) *empty = 1; if (!(flags & LOOKUP_EMPTY)) { putname(result); return ERR_PTR(-ENOENT); } } result->uptr = filename; result->aname = NULL; audit_getname(result); return result; } struct filename * getname(const char __user * filename) { return getname_flags(filename, 0, NULL); } struct filename * getname_kernel(const char * filename) { struct filename *result; int len = strlen(filename) + 1; result = __getname(); if (unlikely(!result)) return ERR_PTR(-ENOMEM); if (len <= EMBEDDED_NAME_MAX) { result->name = (char *)result->iname; } else if (len <= PATH_MAX) { struct filename *tmp; tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); if (unlikely(!tmp)) { __putname(result); return ERR_PTR(-ENOMEM); } tmp->name = (char *)result; result = tmp; } else { __putname(result); return ERR_PTR(-ENAMETOOLONG); } memcpy((char *)result->name, filename, len); result->uptr = NULL; result->aname = NULL; result->refcnt = 1; audit_getname(result); return result; } void putname(struct filename *name) { BUG_ON(name->refcnt <= 0); if (--name->refcnt > 0) return; if (name->name != name->iname) { __putname(name->name); kfree(name); } else __putname(name); } static int check_acl(struct inode *inode, int mask) { #ifdef CONFIG_FS_POSIX_ACL struct posix_acl *acl; if (mask & MAY_NOT_BLOCK) { acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS); if (!acl) return -EAGAIN; /* no ->get_acl() calls in RCU mode... */ if (acl == ACL_NOT_CACHED) return -ECHILD; return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK); } acl = get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl) { int error = posix_acl_permission(inode, acl, mask); posix_acl_release(acl); return error; } #endif return -EAGAIN; } /* * This does the basic permission checking */ static int acl_permission_check(struct inode *inode, int mask) { unsigned int mode = inode->i_mode; if (likely(uid_eq(current_fsuid(), inode->i_uid))) mode >>= 6; else { if (IS_POSIXACL(inode) && (mode & S_IRWXG)) { int error = check_acl(inode, mask); if (error != -EAGAIN) return error; } if (in_group_p(inode->i_gid)) mode >>= 3; } /* * If the DACs are ok we don't need any capability check. */ if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0) return 0; return -EACCES; } /** * generic_permission - check for access rights on a Posix-like filesystem * @inode: inode to check access rights for * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...) * * Used to check for read/write/execute permissions on a file. * We use "fsuid" for this, letting us set arbitrary permissions * for filesystem access without changing the "normal" uids which * are used for other things. * * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk * request cannot be satisfied (eg. requires blocking or too much complexity). * It would then be called again in ref-walk mode. */ int generic_permission(struct inode *inode, int mask) { int ret; /* * Do the basic permission checks. */ ret = acl_permission_check(inode, mask); if (ret != -EACCES) return ret; if (S_ISDIR(inode->i_mode)) { /* DACs are overridable for directories */ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) return 0; if (!(mask & MAY_WRITE)) if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH)) return 0; return -EACCES; } /* * Read/write DACs are always overridable. * Executable DACs are overridable when there is * at least one exec bit set. */ if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO)) if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) return 0; /* * Searching includes executable on directories, else just read. */ mask &= MAY_READ | MAY_WRITE | MAY_EXEC; if (mask == MAY_READ) if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH)) return 0; return -EACCES; } EXPORT_SYMBOL(generic_permission); /* * We _really_ want to just do "generic_permission()" without * even looking at the inode->i_op values. So we keep a cache * flag in inode->i_opflags, that says "this has not special * permission function, use the fast case". */ static inline int do_inode_permission(struct inode *inode, int mask) { if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) { if (likely(inode->i_op->permission)) return inode->i_op->permission(inode, mask); /* This gets set once for the inode lifetime */ spin_lock(&inode->i_lock); inode->i_opflags |= IOP_FASTPERM; spin_unlock(&inode->i_lock); } return generic_permission(inode, mask); } /** * __inode_permission - Check for access rights to a given inode * @inode: Inode to check permission on * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Check for read/write/execute permissions on an inode. * * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask. * * This does not check for a read-only file system. You probably want * inode_permission(). */ int __inode_permission(struct inode *inode, int mask) { int retval; if (unlikely(mask & MAY_WRITE)) { /* * Nobody gets write access to an immutable file. */ if (IS_IMMUTABLE(inode)) return -EACCES; } retval = do_inode_permission(inode, mask); if (retval) return retval; retval = devcgroup_inode_permission(inode, mask); if (retval) return retval; return security_inode_permission(inode, mask); } EXPORT_SYMBOL(__inode_permission); /** * sb_permission - Check superblock-level permissions * @sb: Superblock of inode to check permission on * @inode: Inode to check permission on * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Separate out file-system wide checks from inode-specific permission checks. */ static int sb_permission(struct super_block *sb, struct inode *inode, int mask) { if (unlikely(mask & MAY_WRITE)) { umode_t mode = inode->i_mode; /* Nobody gets write access to a read-only fs. */ if ((sb->s_flags & MS_RDONLY) && (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) return -EROFS; } return 0; } /** * inode_permission - Check for access rights to a given inode * @inode: Inode to check permission on * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Check for read/write/execute permissions on an inode. We use fs[ug]id for * this, letting us set arbitrary permissions for filesystem access without * changing the "normal" UIDs which are used for other things. * * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask. */ int inode_permission(struct inode *inode, int mask) { int retval; retval = sb_permission(inode->i_sb, inode, mask); if (retval) return retval; return __inode_permission(inode, mask); } EXPORT_SYMBOL(inode_permission); /** * path_get - get a reference to a path * @path: path to get the reference to * * Given a path increment the reference count to the dentry and the vfsmount. */ void path_get(const struct path *path) { mntget(path->mnt); dget(path->dentry); } EXPORT_SYMBOL(path_get); /** * path_put - put a reference to a path * @path: path to put the reference to * * Given a path decrement the reference count to the dentry and the vfsmount. */ void path_put(const struct path *path) { dput(path->dentry); mntput(path->mnt); } EXPORT_SYMBOL(path_put); #define EMBEDDED_LEVELS 2 struct nameidata { struct path path; struct qstr last; struct path root; struct inode *inode; /* path.dentry.d_inode */ unsigned int flags; unsigned seq, m_seq; int last_type; unsigned depth; int total_link_count; struct saved { struct path link; void *cookie; const char *name; struct inode *inode; unsigned seq; } *stack, internal[EMBEDDED_LEVELS]; struct filename *name; struct nameidata *saved; unsigned root_seq; int dfd; }; static void set_nameidata(struct nameidata *p, int dfd, struct filename *name) { struct nameidata *old = current->nameidata; p->stack = p->internal; p->dfd = dfd; p->name = name; p->total_link_count = old ? old->total_link_count : 0; p->saved = old; current->nameidata = p; } static void restore_nameidata(void) { struct nameidata *now = current->nameidata, *old = now->saved; current->nameidata = old; if (old) old->total_link_count = now->total_link_count; if (now->stack != now->internal) { kfree(now->stack); now->stack = now->internal; } } static int __nd_alloc_stack(struct nameidata *nd) { struct saved *p; if (nd->flags & LOOKUP_RCU) { p= kmalloc(MAXSYMLINKS * sizeof(struct saved), GFP_ATOMIC); if (unlikely(!p)) return -ECHILD; } else { p= kmalloc(MAXSYMLINKS * sizeof(struct saved), GFP_KERNEL); if (unlikely(!p)) return -ENOMEM; } memcpy(p, nd->internal, sizeof(nd->internal)); nd->stack = p; return 0; } /** * path_connected - Verify that a path->dentry is below path->mnt.mnt_root * @path: nameidate to verify * * Rename can sometimes move a file or directory outside of a bind * mount, path_connected allows those cases to be detected. */ static bool path_connected(const struct path *path) { struct vfsmount *mnt = path->mnt; /* Only bind mounts can have disconnected paths */ if (mnt->mnt_root == mnt->mnt_sb->s_root) return true; return is_subdir(path->dentry, mnt->mnt_root); } static inline int nd_alloc_stack(struct nameidata *nd) { if (likely(nd->depth != EMBEDDED_LEVELS)) return 0; if (likely(nd->stack != nd->internal)) return 0; return __nd_alloc_stack(nd); } static void drop_links(struct nameidata *nd) { int i = nd->depth; while (i--) { struct saved *last = nd->stack + i; struct inode *inode = last->inode; if (last->cookie && inode->i_op->put_link) { inode->i_op->put_link(inode, last->cookie); last->cookie = NULL; } } } static void terminate_walk(struct nameidata *nd) { drop_links(nd); if (!(nd->flags & LOOKUP_RCU)) { int i; path_put(&nd->path); for (i = 0; i < nd->depth; i++) path_put(&nd->stack[i].link); if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { path_put(&nd->root); nd->root.mnt = NULL; } } else { nd->flags &= ~LOOKUP_RCU; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); } nd->depth = 0; } /* path_put is needed afterwards regardless of success or failure */ static bool legitimize_path(struct nameidata *nd, struct path *path, unsigned seq) { int res = __legitimize_mnt(path->mnt, nd->m_seq); if (unlikely(res)) { if (res > 0) path->mnt = NULL; path->dentry = NULL; return false; } if (unlikely(!lockref_get_not_dead(&path->dentry->d_lockref))) { path->dentry = NULL; return false; } return !read_seqcount_retry(&path->dentry->d_seq, seq); } static bool legitimize_links(struct nameidata *nd) { int i; for (i = 0; i < nd->depth; i++) { struct saved *last = nd->stack + i; if (unlikely(!legitimize_path(nd, &last->link, last->seq))) { drop_links(nd); nd->depth = i + 1; return false; } } return true; } /* * Path walking has 2 modes, rcu-walk and ref-walk (see * Documentation/filesystems/path-lookup.txt). In situations when we can't * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab * normal reference counts on dentries and vfsmounts to transition to rcu-walk * mode. Refcounts are grabbed at the last known good point before rcu-walk * got stuck, so ref-walk may continue from there. If this is not successful * (eg. a seqcount has changed), then failure is returned and it's up to caller * to restart the path walk from the beginning in ref-walk mode. */ /** * unlazy_walk - try to switch to ref-walk mode. * @nd: nameidata pathwalk data * @dentry: child of nd->path.dentry or NULL * @seq: seq number to check dentry against * Returns: 0 on success, -ECHILD on failure * * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry * for ref-walk mode. @dentry must be a path found by a do_lookup call on * @nd or NULL. Must be called from rcu-walk context. * Nothing should touch nameidata between unlazy_walk() failure and * terminate_walk(). */ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry, unsigned seq) { struct dentry *parent = nd->path.dentry; BUG_ON(!(nd->flags & LOOKUP_RCU)); nd->flags &= ~LOOKUP_RCU; if (unlikely(!legitimize_links(nd))) goto out2; if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq))) goto out2; if (unlikely(!lockref_get_not_dead(&parent->d_lockref))) goto out1; /* * For a negative lookup, the lookup sequence point is the parents * sequence point, and it only needs to revalidate the parent dentry. * * For a positive lookup, we need to move both the parent and the * dentry from the RCU domain to be properly refcounted. And the * sequence number in the dentry validates *both* dentry counters, * since we checked the sequence number of the parent after we got * the child sequence number. So we know the parent must still * be valid if the child sequence number is still valid. */ if (!dentry) { if (read_seqcount_retry(&parent->d_seq, nd->seq)) goto out; BUG_ON(nd->inode != parent->d_inode); } else { if (!lockref_get_not_dead(&dentry->d_lockref)) goto out; if (read_seqcount_retry(&dentry->d_seq, seq)) goto drop_dentry; } /* * Sequence counts matched. Now make sure that the root is * still valid and get it if required. */ if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { if (unlikely(!legitimize_path(nd, &nd->root, nd->root_seq))) { rcu_read_unlock(); dput(dentry); return -ECHILD; } } rcu_read_unlock(); return 0; drop_dentry: rcu_read_unlock(); dput(dentry); goto drop_root_mnt; out2: nd->path.mnt = NULL; out1: nd->path.dentry = NULL; out: rcu_read_unlock(); drop_root_mnt: if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; return -ECHILD; } static int unlazy_link(struct nameidata *nd, struct path *link, unsigned seq) { if (unlikely(!legitimize_path(nd, link, seq))) { drop_links(nd); nd->depth = 0; nd->flags &= ~LOOKUP_RCU; nd->path.mnt = NULL; nd->path.dentry = NULL; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); } else if (likely(unlazy_walk(nd, NULL, 0)) == 0) { return 0; } path_put(link); return -ECHILD; } static inline int d_revalidate(struct dentry *dentry, unsigned int flags) { return dentry->d_op->d_revalidate(dentry, flags); } /** * complete_walk - successful completion of path walk * @nd: pointer nameidata * * If we had been in RCU mode, drop out of it and legitimize nd->path. * Revalidate the final result, unless we'd already done that during * the path walk or the filesystem doesn't ask for it. Return 0 on * success, -error on failure. In case of failure caller does not * need to drop nd->path. */ static int complete_walk(struct nameidata *nd) { struct dentry *dentry = nd->path.dentry; int status; if (nd->flags & LOOKUP_RCU) { if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; if (unlikely(unlazy_walk(nd, NULL, 0))) return -ECHILD; } if (likely(!(nd->flags & LOOKUP_JUMPED))) return 0; if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE))) return 0; status = dentry->d_op->d_weak_revalidate(dentry, nd->flags); if (status > 0) return 0; if (!status) status = -ESTALE; return status; } static void set_root(struct nameidata *nd) { get_fs_root(current->fs, &nd->root); } static void set_root_rcu(struct nameidata *nd) { struct fs_struct *fs = current->fs; unsigned seq; do { seq = read_seqcount_begin(&fs->seq); nd->root = fs->root; nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } static void path_put_conditional(struct path *path, struct nameidata *nd) { dput(path->dentry); if (path->mnt != nd->path.mnt) mntput(path->mnt); } static inline void path_to_nameidata(const struct path *path, struct nameidata *nd) { if (!(nd->flags & LOOKUP_RCU)) { dput(nd->path.dentry); if (nd->path.mnt != path->mnt) mntput(nd->path.mnt); } nd->path.mnt = path->mnt; nd->path.dentry = path->dentry; } /* * Helper to directly jump to a known parsed path from ->follow_link, * caller must have taken a reference to path beforehand. */ void nd_jump_link(struct path *path) { struct nameidata *nd = current->nameidata; path_put(&nd->path); nd->path = *path; nd->inode = nd->path.dentry->d_inode; nd->flags |= LOOKUP_JUMPED; } static inline void put_link(struct nameidata *nd) { struct saved *last = nd->stack + --nd->depth; struct inode *inode = last->inode; if (last->cookie && inode->i_op->put_link) inode->i_op->put_link(inode, last->cookie); if (!(nd->flags & LOOKUP_RCU)) path_put(&last->link); } int sysctl_protected_symlinks __read_mostly = 0; int sysctl_protected_hardlinks __read_mostly = 0; /** * may_follow_link - Check symlink following for unsafe situations * @nd: nameidata pathwalk data * * In the case of the sysctl_protected_symlinks sysctl being enabled, * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is * in a sticky world-writable directory. This is to protect privileged * processes from failing races against path names that may change out * from under them by way of other users creating malicious symlinks. * It will permit symlinks to be followed only when outside a sticky * world-writable directory, or when the uid of the symlink and follower * match, or when the directory owner matches the symlink's owner. * * Returns 0 if following the symlink is allowed, -ve on error. */ static inline int may_follow_link(struct nameidata *nd) { const struct inode *inode; const struct inode *parent; if (!sysctl_protected_symlinks) return 0; /* Allowed if owner and follower match. */ inode = nd->stack[0].inode; if (uid_eq(current_cred()->fsuid, inode->i_uid)) return 0; /* Allowed if parent directory not sticky and world-writable. */ parent = nd->inode; if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH)) return 0; /* Allowed if parent directory and link owner match. */ if (uid_eq(parent->i_uid, inode->i_uid)) return 0; if (nd->flags & LOOKUP_RCU) return -ECHILD; audit_log_link_denied("follow_link", &nd->stack[0].link); return -EACCES; } /** * safe_hardlink_source - Check for safe hardlink conditions * @inode: the source inode to hardlink from * * Return false if at least one of the following conditions: * - inode is not a regular file * - inode is setuid * - inode is setgid and group-exec * - access failure for read and write * * Otherwise returns true. */ static bool safe_hardlink_source(struct inode *inode) { umode_t mode = inode->i_mode; /* Special files should not get pinned to the filesystem. */ if (!S_ISREG(mode)) return false; /* Setuid files should not get pinned to the filesystem. */ if (mode & S_ISUID) return false; /* Executable setgid files should not get pinned to the filesystem. */ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) return false; /* Hardlinking to unreadable or unwritable sources is dangerous. */ if (inode_permission(inode, MAY_READ | MAY_WRITE)) return false; return true; } /** * may_linkat - Check permissions for creating a hardlink * @link: the source to hardlink from * * Block hardlink when all of: * - sysctl_protected_hardlinks enabled * - fsuid does not match inode * - hardlink source is unsafe (see safe_hardlink_source() above) * - not CAP_FOWNER * * Returns 0 if successful, -ve on error. */ static int may_linkat(struct path *link) { const struct cred *cred; struct inode *inode; if (!sysctl_protected_hardlinks) return 0; cred = current_cred(); inode = link->dentry->d_inode; /* Source inode owner (or CAP_FOWNER) can hardlink all they like, * otherwise, it must be a safe source. */ if (uid_eq(cred->fsuid, inode->i_uid) || safe_hardlink_source(inode) || capable(CAP_FOWNER)) return 0; audit_log_link_denied("linkat", link); return -EPERM; } static __always_inline const char *get_link(struct nameidata *nd) { struct saved *last = nd->stack + nd->depth - 1; struct dentry *dentry = last->link.dentry; struct inode *inode = last->inode; int error; const char *res; if (!(nd->flags & LOOKUP_RCU)) { touch_atime(&last->link); cond_resched(); } else if (atime_needs_update(&last->link, inode)) { if (unlikely(unlazy_walk(nd, NULL, 0))) return ERR_PTR(-ECHILD); touch_atime(&last->link); } error = security_inode_follow_link(dentry, inode, nd->flags & LOOKUP_RCU); if (unlikely(error)) return ERR_PTR(error); nd->last_type = LAST_BIND; res = inode->i_link; if (!res) { if (nd->flags & LOOKUP_RCU) { if (unlikely(unlazy_walk(nd, NULL, 0))) return ERR_PTR(-ECHILD); } res = inode->i_op->follow_link(dentry, &last->cookie); if (IS_ERR_OR_NULL(res)) { last->cookie = NULL; return res; } } if (*res == '/') { if (nd->flags & LOOKUP_RCU) { struct dentry *d; if (!nd->root.mnt) set_root_rcu(nd); nd->path = nd->root; d = nd->path.dentry; nd->inode = d->d_inode; nd->seq = nd->root_seq; if (unlikely(read_seqcount_retry(&d->d_seq, nd->seq))) return ERR_PTR(-ECHILD); } else { if (!nd->root.mnt) set_root(nd); path_put(&nd->path); nd->path = nd->root; path_get(&nd->root); nd->inode = nd->path.dentry->d_inode; } nd->flags |= LOOKUP_JUMPED; while (unlikely(*++res == '/')) ; } if (!*res) res = NULL; return res; } /* * follow_up - Find the mountpoint of path's vfsmount * * Given a path, find the mountpoint of its source file system. * Replace @path with the path of the mountpoint in the parent mount. * Up is towards /. * * Return 1 if we went up a level and 0 if we were already at the * root. */ int follow_up(struct path *path) { struct mount *mnt = real_mount(path->mnt); struct mount *parent; struct dentry *mountpoint; read_seqlock_excl(&mount_lock); parent = mnt->mnt_parent; if (parent == mnt) { read_sequnlock_excl(&mount_lock); return 0; } mntget(&parent->mnt); mountpoint = dget(mnt->mnt_mountpoint); read_sequnlock_excl(&mount_lock); dput(path->dentry); path->dentry = mountpoint; mntput(path->mnt); path->mnt = &parent->mnt; return 1; } EXPORT_SYMBOL(follow_up); /* * Perform an automount * - return -EISDIR to tell follow_managed() to stop and return the path we * were called with. */ static int follow_automount(struct path *path, struct nameidata *nd, bool *need_mntput) { struct vfsmount *mnt; int err; if (!path->dentry->d_op || !path->dentry->d_op->d_automount) return -EREMOTE; /* We don't want to mount if someone's just doing a stat - * unless they're stat'ing a directory and appended a '/' to * the name. * * We do, however, want to mount if someone wants to open or * create a file of any type under the mountpoint, wants to * traverse through the mountpoint or wants to open the * mounted directory. Also, autofs may mark negative dentries * as being automount points. These will need the attentions * of the daemon to instantiate them before they can be used. */ if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) && path->dentry->d_inode) return -EISDIR; nd->total_link_count++; if (nd->total_link_count >= 40) return -ELOOP; mnt = path->dentry->d_op->d_automount(path); if (IS_ERR(mnt)) { /* * The filesystem is allowed to return -EISDIR here to indicate * it doesn't want to automount. For instance, autofs would do * this so that its userspace daemon can mount on this dentry. * * However, we can only permit this if it's a terminal point in * the path being looked up; if it wasn't then the remainder of * the path is inaccessible and we should say so. */ if (PTR_ERR(mnt) == -EISDIR && (nd->flags & LOOKUP_PARENT)) return -EREMOTE; return PTR_ERR(mnt); } if (!mnt) /* mount collision */ return 0; if (!*need_mntput) { /* lock_mount() may release path->mnt on error */ mntget(path->mnt); *need_mntput = true; } err = finish_automount(mnt, path); switch (err) { case -EBUSY: /* Someone else made a mount here whilst we were busy */ return 0; case 0: path_put(path); path->mnt = mnt; path->dentry = dget(mnt->mnt_root); return 0; default: return err; } } /* * Handle a dentry that is managed in some way. * - Flagged for transit management (autofs) * - Flagged as mountpoint * - Flagged as automount point * * This may only be called in refwalk mode. * * Serialization is taken care of in namespace.c */ static int follow_managed(struct path *path, struct nameidata *nd) { struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */ unsigned managed; bool need_mntput = false; int ret = 0; /* Given that we're not holding a lock here, we retain the value in a * local variable for each dentry as we look at it so that we don't see * the components of that value change under us */ while (managed = ACCESS_ONCE(path->dentry->d_flags), managed &= DCACHE_MANAGED_DENTRY, unlikely(managed != 0)) { /* Allow the filesystem to manage the transit without i_mutex * being held. */ if (managed & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage(path->dentry, false); if (ret < 0) break; } /* Transit to a mounted filesystem. */ if (managed & DCACHE_MOUNTED) { struct vfsmount *mounted = lookup_mnt(path); if (mounted) { dput(path->dentry); if (need_mntput) mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); need_mntput = true; continue; } /* Something is mounted on this dentry in another * namespace and/or whatever was mounted there in this * namespace got unmounted before lookup_mnt() could * get it */ } /* Handle an automount point */ if (managed & DCACHE_NEED_AUTOMOUNT) { ret = follow_automount(path, nd, &need_mntput); if (ret < 0) break; continue; } /* We didn't change the current path point */ break; } if (need_mntput && path->mnt == mnt) mntput(path->mnt); if (ret == -EISDIR) ret = 0; if (need_mntput) nd->flags |= LOOKUP_JUMPED; if (unlikely(ret < 0)) path_put_conditional(path, nd); return ret; } int follow_down_one(struct path *path) { struct vfsmount *mounted; mounted = lookup_mnt(path); if (mounted) { dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); return 1; } return 0; } EXPORT_SYMBOL(follow_down_one); static inline int managed_dentry_rcu(struct dentry *dentry) { return (dentry->d_flags & DCACHE_MANAGE_TRANSIT) ? dentry->d_op->d_manage(dentry, true) : 0; } /* * Try to skip to top of mountpoint pile in rcuwalk mode. Fail if * we meet a managed dentry that would need blocking. */ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, struct inode **inode, unsigned *seqp) { for (;;) { struct mount *mounted; /* * Don't forget we might have a non-mountpoint managed dentry * that wants to block transit. */ switch (managed_dentry_rcu(path->dentry)) { case -ECHILD: default: return false; case -EISDIR: return true; case 0: break; } if (!d_mountpoint(path->dentry)) return !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); mounted = __lookup_mnt(path->mnt, path->dentry); if (!mounted) break; path->mnt = &mounted->mnt; path->dentry = mounted->mnt.mnt_root; nd->flags |= LOOKUP_JUMPED; *seqp = read_seqcount_begin(&path->dentry->d_seq); /* * Update the inode too. We don't need to re-check the * dentry sequence number here after this d_inode read, * because a mount-point is always pinned. */ *inode = path->dentry->d_inode; } return !read_seqretry(&mount_lock, nd->m_seq) && !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); } static int follow_dotdot_rcu(struct nameidata *nd) { struct inode *inode = nd->inode; if (!nd->root.mnt) set_root_rcu(nd); while (1) { if (path_equal(&nd->path, &nd->root)) break; if (nd->path.dentry != nd->path.mnt->mnt_root) { struct dentry *old = nd->path.dentry; struct dentry *parent = old->d_parent; unsigned seq; inode = parent->d_inode; seq = read_seqcount_begin(&parent->d_seq); if (unlikely(read_seqcount_retry(&old->d_seq, nd->seq))) return -ECHILD; nd->path.dentry = parent; nd->seq = seq; if (unlikely(!path_connected(&nd->path))) return -ENOENT; break; } else { struct mount *mnt = real_mount(nd->path.mnt); struct mount *mparent = mnt->mnt_parent; struct dentry *mountpoint = mnt->mnt_mountpoint; struct inode *inode2 = mountpoint->d_inode; unsigned seq = read_seqcount_begin(&mountpoint->d_seq); if (unlikely(read_seqretry(&mount_lock, nd->m_seq))) return -ECHILD; if (&mparent->mnt == nd->path.mnt) break; /* we know that mountpoint was pinned */ nd->path.dentry = mountpoint; nd->path.mnt = &mparent->mnt; inode = inode2; nd->seq = seq; } } while (unlikely(d_mountpoint(nd->path.dentry))) { struct mount *mounted; mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry); if (unlikely(read_seqretry(&mount_lock, nd->m_seq))) return -ECHILD; if (!mounted) break; nd->path.mnt = &mounted->mnt; nd->path.dentry = mounted->mnt.mnt_root; inode = nd->path.dentry->d_inode; nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); } nd->inode = inode; return 0; } /* * Follow down to the covering mount currently visible to userspace. At each * point, the filesystem owning that dentry may be queried as to whether the * caller is permitted to proceed or not. */ int follow_down(struct path *path) { unsigned managed; int ret; while (managed = ACCESS_ONCE(path->dentry->d_flags), unlikely(managed & DCACHE_MANAGED_DENTRY)) { /* Allow the filesystem to manage the transit without i_mutex * being held. * * We indicate to the filesystem if someone is trying to mount * something here. This gives autofs the chance to deny anyone * other than its daemon the right to mount on its * superstructure. * * The filesystem may sleep at this point. */ if (managed & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage( path->dentry, false); if (ret < 0) return ret == -EISDIR ? 0 : ret; } /* Transit to a mounted filesystem. */ if (managed & DCACHE_MOUNTED) { struct vfsmount *mounted = lookup_mnt(path); if (!mounted) break; dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); continue; } /* Don't handle automount points here */ break; } return 0; } EXPORT_SYMBOL(follow_down); /* * Skip to top of mountpoint pile in refwalk mode for follow_dotdot() */ static void follow_mount(struct path *path) { while (d_mountpoint(path->dentry)) { struct vfsmount *mounted = lookup_mnt(path); if (!mounted) break; dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); } } static int follow_dotdot(struct nameidata *nd) { if (!nd->root.mnt) set_root(nd); while(1) { struct dentry *old = nd->path.dentry; if (nd->path.dentry == nd->root.dentry && nd->path.mnt == nd->root.mnt) { break; } if (nd->path.dentry != nd->path.mnt->mnt_root) { /* rare case of legitimate dget_parent()... */ nd->path.dentry = dget_parent(nd->path.dentry); dput(old); if (unlikely(!path_connected(&nd->path))) return -ENOENT; break; } if (!follow_up(&nd->path)) break; } follow_mount(&nd->path); nd->inode = nd->path.dentry->d_inode; return 0; } /* * This looks up the name in dcache, possibly revalidates the old dentry and * allocates a new one if not found or not valid. In the need_lookup argument * returns whether i_op->lookup is necessary. * * dir->d_inode->i_mutex must be held */ static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir, unsigned int flags, bool *need_lookup) { struct dentry *dentry; int error; *need_lookup = false; dentry = d_lookup(dir, name); if (dentry) { if (dentry->d_flags & DCACHE_OP_REVALIDATE) { error = d_revalidate(dentry, flags); if (unlikely(error <= 0)) { if (error < 0) { dput(dentry); return ERR_PTR(error); } else { d_invalidate(dentry); dput(dentry); dentry = NULL; } } } } if (!dentry) { dentry = d_alloc(dir, name); if (unlikely(!dentry)) return ERR_PTR(-ENOMEM); *need_lookup = true; } return dentry; } /* * Call i_op->lookup on the dentry. The dentry must be negative and * unhashed. * * dir->d_inode->i_mutex must be held */ static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct dentry *old; /* Don't create child dentry for a dead directory. */ if (unlikely(IS_DEADDIR(dir))) { dput(dentry); return ERR_PTR(-ENOENT); } old = dir->i_op->lookup(dir, dentry, flags); if (unlikely(old)) { dput(dentry); dentry = old; } return dentry; } static struct dentry *__lookup_hash(struct qstr *name, struct dentry *base, unsigned int flags) { bool need_lookup; struct dentry *dentry; dentry = lookup_dcache(name, base, flags, &need_lookup); if (!need_lookup) return dentry; return lookup_real(base->d_inode, dentry, flags); } /* * It's more convoluted than I'd like it to be, but... it's still fairly * small and for now I'd prefer to have fast path as straight as possible. * It _is_ time-critical. */ static int lookup_fast(struct nameidata *nd, struct path *path, struct inode **inode, unsigned *seqp) { struct vfsmount *mnt = nd->path.mnt; struct dentry *dentry, *parent = nd->path.dentry; int need_reval = 1; int status = 1; int err; /* * Rename seqlock is not required here because in the off chance * of a false negative due to a concurrent rename, we're going to * do the non-racy lookup, below. */ if (nd->flags & LOOKUP_RCU) { unsigned seq; bool negative; dentry = __d_lookup_rcu(parent, &nd->last, &seq); if (!dentry) goto unlazy; /* * This sequence count validates that the inode matches * the dentry name information from lookup. */ *inode = d_backing_inode(dentry); negative = d_is_negative(dentry); if (read_seqcount_retry(&dentry->d_seq, seq)) return -ECHILD; if (negative) return -ENOENT; /* * This sequence count validates that the parent had no * changes while we did the lookup of the dentry above. * * The memory barrier in read_seqcount_begin of child is * enough, we can use __read_seqcount_retry here. */ if (__read_seqcount_retry(&parent->d_seq, nd->seq)) return -ECHILD; *seqp = seq; if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { status = d_revalidate(dentry, nd->flags); if (unlikely(status <= 0)) { if (status != -ECHILD) need_reval = 0; goto unlazy; } } path->mnt = mnt; path->dentry = dentry; if (likely(__follow_mount_rcu(nd, path, inode, seqp))) return 0; unlazy: if (unlazy_walk(nd, dentry, seq)) return -ECHILD; } else { dentry = __d_lookup(parent, &nd->last); } if (unlikely(!dentry)) goto need_lookup; if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval) status = d_revalidate(dentry, nd->flags); if (unlikely(status <= 0)) { if (status < 0) { dput(dentry); return status; } d_invalidate(dentry); dput(dentry); goto need_lookup; } if (unlikely(d_is_negative(dentry))) { dput(dentry); return -ENOENT; } path->mnt = mnt; path->dentry = dentry; err = follow_managed(path, nd); if (likely(!err)) *inode = d_backing_inode(path->dentry); return err; need_lookup: return 1; } /* Fast lookup failed, do it the slow way */ static int lookup_slow(struct nameidata *nd, struct path *path) { struct dentry *dentry, *parent; parent = nd->path.dentry; BUG_ON(nd->inode != parent->d_inode); mutex_lock(&parent->d_inode->i_mutex); dentry = __lookup_hash(&nd->last, parent, nd->flags); mutex_unlock(&parent->d_inode->i_mutex); if (IS_ERR(dentry)) return PTR_ERR(dentry); path->mnt = nd->path.mnt; path->dentry = dentry; return follow_managed(path, nd); } static inline int may_lookup(struct nameidata *nd) { if (nd->flags & LOOKUP_RCU) { int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK); if (err != -ECHILD) return err; if (unlazy_walk(nd, NULL, 0)) return -ECHILD; } return inode_permission(nd->inode, MAY_EXEC); } static inline int handle_dots(struct nameidata *nd, int type) { if (type == LAST_DOTDOT) { if (nd->flags & LOOKUP_RCU) { return follow_dotdot_rcu(nd); } else return follow_dotdot(nd); } return 0; } static int pick_link(struct nameidata *nd, struct path *link, struct inode *inode, unsigned seq) { int error; struct saved *last; if (unlikely(nd->total_link_count++ >= MAXSYMLINKS)) { path_to_nameidata(link, nd); return -ELOOP; } if (!(nd->flags & LOOKUP_RCU)) { if (link->mnt == nd->path.mnt) mntget(link->mnt); } error = nd_alloc_stack(nd); if (unlikely(error)) { if (error == -ECHILD) { if (unlikely(unlazy_link(nd, link, seq))) return -ECHILD; error = nd_alloc_stack(nd); } if (error) { path_put(link); return error; } } last = nd->stack + nd->depth++; last->link = *link; last->cookie = NULL; last->inode = inode; last->seq = seq; return 1; } /* * Do we need to follow links? We _really_ want to be able * to do this check without having to look at inode->i_op, * so we keep a cache of "no, this doesn't need follow_link" * for the common case. */ static inline int should_follow_link(struct nameidata *nd, struct path *link, int follow, struct inode *inode, unsigned seq) { if (likely(!d_is_symlink(link->dentry))) return 0; if (!follow) return 0; return pick_link(nd, link, inode, seq); } enum {WALK_GET = 1, WALK_PUT = 2}; static int walk_component(struct nameidata *nd, int flags) { struct path path; struct inode *inode; unsigned seq; int err; /* * "." and ".." are special - ".." especially so because it has * to be able to know about the current root directory and * parent relationships. */ if (unlikely(nd->last_type != LAST_NORM)) { err = handle_dots(nd, nd->last_type); if (flags & WALK_PUT) put_link(nd); return err; } err = lookup_fast(nd, &path, &inode, &seq); if (unlikely(err)) { if (err < 0) return err; err = lookup_slow(nd, &path); if (err < 0) return err; inode = d_backing_inode(path.dentry); seq = 0; /* we are already out of RCU mode */ err = -ENOENT; if (d_is_negative(path.dentry)) goto out_path_put; } if (flags & WALK_PUT) put_link(nd); err = should_follow_link(nd, &path, flags & WALK_GET, inode, seq); if (unlikely(err)) return err; path_to_nameidata(&path, nd); nd->inode = inode; nd->seq = seq; return 0; out_path_put: path_to_nameidata(&path, nd); return err; } /* * We can do the critical dentry name comparison and hashing * operations one word at a time, but we are limited to: * * - Architectures with fast unaligned word accesses. We could * do a "get_unaligned()" if this helps and is sufficiently * fast. * * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we * do not trap on the (extremely unlikely) case of a page * crossing operation. * * - Furthermore, we need an efficient 64-bit compile for the * 64-bit case in order to generate the "number of bytes in * the final mask". Again, that could be replaced with a * efficient population count instruction or similar. */ #ifdef CONFIG_DCACHE_WORD_ACCESS #include <asm/word-at-a-time.h> #ifdef CONFIG_64BIT static inline unsigned int fold_hash(unsigned long hash) { return hash_64(hash, 32); } #else /* 32-bit case */ #define fold_hash(x) (x) #endif unsigned int full_name_hash(const unsigned char *name, unsigned int len) { unsigned long a, mask; unsigned long hash = 0; for (;;) { a = load_unaligned_zeropad(name); if (len < sizeof(unsigned long)) break; hash += a; hash *= 9; name += sizeof(unsigned long); len -= sizeof(unsigned long); if (!len) goto done; } mask = bytemask_from_count(len); hash += mask & a; done: return fold_hash(hash); } EXPORT_SYMBOL(full_name_hash); /* * Calculate the length and hash of the path component, and * return the "hash_len" as the result. */ static inline u64 hash_name(const char *name) { unsigned long a, b, adata, bdata, mask, hash, len; const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; hash = a = 0; len = -sizeof(unsigned long); do { hash = (hash + a) * 9; len += sizeof(unsigned long); a = load_unaligned_zeropad(name+len); b = a ^ REPEAT_BYTE('/'); } while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants))); adata = prep_zero_mask(a, adata, &constants); bdata = prep_zero_mask(b, bdata, &constants); mask = create_zero_mask(adata | bdata); hash += a & zero_bytemask(mask); len += find_zero(mask); return hashlen_create(fold_hash(hash), len); } #else unsigned int full_name_hash(const unsigned char *name, unsigned int len) { unsigned long hash = init_name_hash(); while (len--) hash = partial_name_hash(*name++, hash); return end_name_hash(hash); } EXPORT_SYMBOL(full_name_hash); /* * We know there's a real path component here of at least * one character. */ static inline u64 hash_name(const char *name) { unsigned long hash = init_name_hash(); unsigned long len = 0, c; c = (unsigned char)*name; do { len++; hash = partial_name_hash(c, hash); c = (unsigned char)name[len]; } while (c && c != '/'); return hashlen_create(end_name_hash(hash), len); } #endif /* * Name resolution. * This is the basic name resolution function, turning a pathname into * the final dentry. We expect 'base' to be positive and a directory. * * Returns 0 and nd will have valid dentry and mnt on success. * Returns error and drops reference to input namei data on failure. */ static int link_path_walk(const char *name, struct nameidata *nd) { int err; while (*name=='/') name++; if (!*name) return 0; /* At this point we know we have a real path component. */ for(;;) { u64 hash_len; int type; err = may_lookup(nd); if (err) return err; hash_len = hash_name(name); type = LAST_NORM; if (name[0] == '.') switch (hashlen_len(hash_len)) { case 2: if (name[1] == '.') { type = LAST_DOTDOT; nd->flags |= LOOKUP_JUMPED; } break; case 1: type = LAST_DOT; } if (likely(type == LAST_NORM)) { struct dentry *parent = nd->path.dentry; nd->flags &= ~LOOKUP_JUMPED; if (unlikely(parent->d_flags & DCACHE_OP_HASH)) { struct qstr this = { { .hash_len = hash_len }, .name = name }; err = parent->d_op->d_hash(parent, &this); if (err < 0) return err; hash_len = this.hash_len; name = this.name; } } nd->last.hash_len = hash_len; nd->last.name = name; nd->last_type = type; name += hashlen_len(hash_len); if (!*name) goto OK; /* * If it wasn't NUL, we know it was '/'. Skip that * slash, and continue until no more slashes. */ do { name++; } while (unlikely(*name == '/')); if (unlikely(!*name)) { OK: /* pathname body, done */ if (!nd->depth) return 0; name = nd->stack[nd->depth - 1].name; /* trailing symlink, done */ if (!name) return 0; /* last component of nested symlink */ err = walk_component(nd, WALK_GET | WALK_PUT); } else { err = walk_component(nd, WALK_GET); } if (err < 0) return err; if (err) { const char *s = get_link(nd); if (unlikely(IS_ERR(s))) return PTR_ERR(s); err = 0; if (unlikely(!s)) { /* jumped */ put_link(nd); } else { nd->stack[nd->depth - 1].name = name; name = s; continue; } } if (unlikely(!d_can_lookup(nd->path.dentry))) { if (nd->flags & LOOKUP_RCU) { if (unlazy_walk(nd, NULL, 0)) return -ECHILD; } return -ENOTDIR; } } } static const char *path_init(struct nameidata *nd, unsigned flags) { int retval = 0; const char *s = nd->name->name; nd->last_type = LAST_ROOT; /* if there are only slashes... */ nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; nd->depth = 0; nd->total_link_count = 0; if (flags & LOOKUP_ROOT) { struct dentry *root = nd->root.dentry; struct inode *inode = root->d_inode; if (*s) { if (!d_can_lookup(root)) return ERR_PTR(-ENOTDIR); retval = inode_permission(inode, MAY_EXEC); if (retval) return ERR_PTR(retval); } nd->path = nd->root; nd->inode = inode; if (flags & LOOKUP_RCU) { rcu_read_lock(); nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); nd->root_seq = nd->seq; nd->m_seq = read_seqbegin(&mount_lock); } else { path_get(&nd->path); } return s; } nd->root.mnt = NULL; nd->m_seq = read_seqbegin(&mount_lock); if (*s == '/') { if (flags & LOOKUP_RCU) { rcu_read_lock(); set_root_rcu(nd); nd->seq = nd->root_seq; } else { set_root(nd); path_get(&nd->root); } nd->path = nd->root; } else if (nd->dfd == AT_FDCWD) { if (flags & LOOKUP_RCU) { struct fs_struct *fs = current->fs; unsigned seq; rcu_read_lock(); do { seq = read_seqcount_begin(&fs->seq); nd->path = fs->pwd; nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } else { get_fs_pwd(current->fs, &nd->path); } } else { /* Caller must check execute permissions on the starting path component */ struct fd f = fdget_raw(nd->dfd); struct dentry *dentry; if (!f.file) return ERR_PTR(-EBADF); dentry = f.file->f_path.dentry; if (*s) { if (!d_can_lookup(dentry)) { fdput(f); return ERR_PTR(-ENOTDIR); } } nd->path = f.file->f_path; if (flags & LOOKUP_RCU) { rcu_read_lock(); nd->inode = nd->path.dentry->d_inode; nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); } else { path_get(&nd->path); nd->inode = nd->path.dentry->d_inode; } fdput(f); return s; } nd->inode = nd->path.dentry->d_inode; if (!(flags & LOOKUP_RCU)) return s; if (likely(!read_seqcount_retry(&nd->path.dentry->d_seq, nd->seq))) return s; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); return ERR_PTR(-ECHILD); } static const char *trailing_symlink(struct nameidata *nd) { const char *s; int error = may_follow_link(nd); if (unlikely(error)) return ERR_PTR(error); nd->flags |= LOOKUP_PARENT; nd->stack[0].name = NULL; s = get_link(nd); return s ? s : ""; } static inline int lookup_last(struct nameidata *nd) { if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len]) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; nd->flags &= ~LOOKUP_PARENT; return walk_component(nd, nd->flags & LOOKUP_FOLLOW ? nd->depth ? WALK_PUT | WALK_GET : WALK_GET : 0); } /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path) { const char *s = path_init(nd, flags); int err; if (IS_ERR(s)) return PTR_ERR(s); while (!(err = link_path_walk(s, nd)) && ((err = lookup_last(nd)) > 0)) { s = trailing_symlink(nd); if (IS_ERR(s)) { err = PTR_ERR(s); break; } } if (!err) err = complete_walk(nd); if (!err && nd->flags & LOOKUP_DIRECTORY) if (!d_can_lookup(nd->path.dentry)) err = -ENOTDIR; if (!err) { *path = nd->path; nd->path.mnt = NULL; nd->path.dentry = NULL; } terminate_walk(nd); return err; } static int filename_lookup(int dfd, struct filename *name, unsigned flags, struct path *path, struct path *root) { int retval; struct nameidata nd; if (IS_ERR(name)) return PTR_ERR(name); if (unlikely(root)) { nd.root = *root; flags |= LOOKUP_ROOT; } set_nameidata(&nd, dfd, name); retval = path_lookupat(&nd, flags | LOOKUP_RCU, path); if (unlikely(retval == -ECHILD)) retval = path_lookupat(&nd, flags, path); if (unlikely(retval == -ESTALE)) retval = path_lookupat(&nd, flags | LOOKUP_REVAL, path); if (likely(!retval)) audit_inode(name, path->dentry, flags & LOOKUP_PARENT); restore_nameidata(); putname(name); return retval; } /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ static int path_parentat(struct nameidata *nd, unsigned flags, struct path *parent) { const char *s = path_init(nd, flags); int err; if (IS_ERR(s)) return PTR_ERR(s); err = link_path_walk(s, nd); if (!err) err = complete_walk(nd); if (!err) { *parent = nd->path; nd->path.mnt = NULL; nd->path.dentry = NULL; } terminate_walk(nd); return err; } static struct filename *filename_parentat(int dfd, struct filename *name, unsigned int flags, struct path *parent, struct qstr *last, int *type) { int retval; struct nameidata nd; if (IS_ERR(name)) return name; set_nameidata(&nd, dfd, name); retval = path_parentat(&nd, flags | LOOKUP_RCU, parent); if (unlikely(retval == -ECHILD)) retval = path_parentat(&nd, flags, parent); if (unlikely(retval == -ESTALE)) retval = path_parentat(&nd, flags | LOOKUP_REVAL, parent); if (likely(!retval)) { *last = nd.last; *type = nd.last_type; audit_inode(name, parent->dentry, LOOKUP_PARENT); } else { putname(name); name = ERR_PTR(retval); } restore_nameidata(); return name; } /* does lookup, returns the object with parent locked */ struct dentry *kern_path_locked(const char *name, struct path *path) { struct filename *filename; struct dentry *d; struct qstr last; int type; filename = filename_parentat(AT_FDCWD, getname_kernel(name), 0, path, &last, &type); if (IS_ERR(filename)) return ERR_CAST(filename); if (unlikely(type != LAST_NORM)) { path_put(path); putname(filename); return ERR_PTR(-EINVAL); } mutex_lock_nested(&path->dentry->d_inode->i_mutex, I_MUTEX_PARENT); d = __lookup_hash(&last, path->dentry, 0); if (IS_ERR(d)) { mutex_unlock(&path->dentry->d_inode->i_mutex); path_put(path); } putname(filename); return d; } int kern_path(const char *name, unsigned int flags, struct path *path) { return filename_lookup(AT_FDCWD, getname_kernel(name), flags, path, NULL); } EXPORT_SYMBOL(kern_path); /** * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair * @dentry: pointer to dentry of the base directory * @mnt: pointer to vfs mount of the base directory * @name: pointer to file name * @flags: lookup flags * @path: pointer to struct path to fill */ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, const char *name, unsigned int flags, struct path *path) { struct path root = {.mnt = mnt, .dentry = dentry}; /* the first argument of filename_lookup() is ignored with root */ return filename_lookup(AT_FDCWD, getname_kernel(name), flags , path, &root); } EXPORT_SYMBOL(vfs_path_lookup); /** * lookup_one_len - filesystem helper to lookup single pathname component * @name: pathname component to lookup * @base: base directory to lookup from * @len: maximum length @len should be interpreted to * * Note that this routine is purely a helper for filesystem usage and should * not be called by generic code. */ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) { struct qstr this; unsigned int c; int err; WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex)); this.name = name; this.len = len; this.hash = full_name_hash(name, len); if (!len) return ERR_PTR(-EACCES); if (unlikely(name[0] == '.')) { if (len < 2 || (len == 2 && name[1] == '.')) return ERR_PTR(-EACCES); } while (len--) { c = *(const unsigned char *)name++; if (c == '/' || c == '\0') return ERR_PTR(-EACCES); } /* * See if the low-level filesystem might want * to use its own hash.. */ if (base->d_flags & DCACHE_OP_HASH) { int err = base->d_op->d_hash(base, &this); if (err < 0) return ERR_PTR(err); } err = inode_permission(base->d_inode, MAY_EXEC); if (err) return ERR_PTR(err); return __lookup_hash(&this, base, 0); } EXPORT_SYMBOL(lookup_one_len); int user_path_at_empty(int dfd, const char __user *name, unsigned flags, struct path *path, int *empty) { return filename_lookup(dfd, getname_flags(name, flags, empty), flags, path, NULL); } EXPORT_SYMBOL(user_path_at_empty); /* * NB: most callers don't do anything directly with the reference to the * to struct filename, but the nd->last pointer points into the name string * allocated by getname. So we must hold the reference to it until all * path-walking is complete. */ static inline struct filename * user_path_parent(int dfd, const char __user *path, struct path *parent, struct qstr *last, int *type, unsigned int flags) { /* only LOOKUP_REVAL is allowed in extra flags */ return filename_parentat(dfd, getname(path), flags & LOOKUP_REVAL, parent, last, type); } /** * mountpoint_last - look up last component for umount * @nd: pathwalk nameidata - currently pointing at parent directory of "last" * @path: pointer to container for result * * This is a special lookup_last function just for umount. In this case, we * need to resolve the path without doing any revalidation. * * The nameidata should be the result of doing a LOOKUP_PARENT pathwalk. Since * mountpoints are always pinned in the dcache, their ancestors are too. Thus, * in almost all cases, this lookup will be served out of the dcache. The only * cases where it won't are if nd->last refers to a symlink or the path is * bogus and it doesn't exist. * * Returns: * -error: if there was an error during lookup. This includes -ENOENT if the * lookup found a negative dentry. The nd->path reference will also be * put in this case. * * 0: if we successfully resolved nd->path and found it to not to be a * symlink that needs to be followed. "path" will also be populated. * The nd->path reference will also be put. * * 1: if we successfully resolved nd->last and found it to be a symlink * that needs to be followed. "path" will be populated with the path * to the link, and nd->path will *not* be put. */ static int mountpoint_last(struct nameidata *nd, struct path *path) { int error = 0; struct dentry *dentry; struct dentry *dir = nd->path.dentry; /* If we're in rcuwalk, drop out of it to handle last component */ if (nd->flags & LOOKUP_RCU) { if (unlazy_walk(nd, NULL, 0)) return -ECHILD; } nd->flags &= ~LOOKUP_PARENT; if (unlikely(nd->last_type != LAST_NORM)) { error = handle_dots(nd, nd->last_type); if (error) return error; dentry = dget(nd->path.dentry); goto done; } mutex_lock(&dir->d_inode->i_mutex); dentry = d_lookup(dir, &nd->last); if (!dentry) { /* * No cached dentry. Mounted dentries are pinned in the cache, * so that means that this dentry is probably a symlink or the * path doesn't actually point to a mounted dentry. */ dentry = d_alloc(dir, &nd->last); if (!dentry) { mutex_unlock(&dir->d_inode->i_mutex); return -ENOMEM; } dentry = lookup_real(dir->d_inode, dentry, nd->flags); if (IS_ERR(dentry)) { mutex_unlock(&dir->d_inode->i_mutex); return PTR_ERR(dentry); } } mutex_unlock(&dir->d_inode->i_mutex); done: if (d_is_negative(dentry)) { dput(dentry); return -ENOENT; } if (nd->depth) put_link(nd); path->dentry = dentry; path->mnt = nd->path.mnt; error = should_follow_link(nd, path, nd->flags & LOOKUP_FOLLOW, d_backing_inode(dentry), 0); if (unlikely(error)) return error; mntget(path->mnt); follow_mount(path); return 0; } /** * path_mountpoint - look up a path to be umounted * @nd: lookup context * @flags: lookup flags * @path: pointer to container for result * * Look up the given name, but don't attempt to revalidate the last component. * Returns 0 and "path" will be valid on success; Returns error otherwise. */ static int path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path) { const char *s = path_init(nd, flags); int err; if (IS_ERR(s)) return PTR_ERR(s); while (!(err = link_path_walk(s, nd)) && (err = mountpoint_last(nd, path)) > 0) { s = trailing_symlink(nd); if (IS_ERR(s)) { err = PTR_ERR(s); break; } } terminate_walk(nd); return err; } static int filename_mountpoint(int dfd, struct filename *name, struct path *path, unsigned int flags) { struct nameidata nd; int error; if (IS_ERR(name)) return PTR_ERR(name); set_nameidata(&nd, dfd, name); error = path_mountpoint(&nd, flags | LOOKUP_RCU, path); if (unlikely(error == -ECHILD)) error = path_mountpoint(&nd, flags, path); if (unlikely(error == -ESTALE)) error = path_mountpoint(&nd, flags | LOOKUP_REVAL, path); if (likely(!error)) audit_inode(name, path->dentry, 0); restore_nameidata(); putname(name); return error; } /** * user_path_mountpoint_at - lookup a path from userland in order to umount it * @dfd: directory file descriptor * @name: pathname from userland * @flags: lookup flags * @path: pointer to container to hold result * * A umount is a special case for path walking. We're not actually interested * in the inode in this situation, and ESTALE errors can be a problem. We * simply want track down the dentry and vfsmount attached at the mountpoint * and avoid revalidating the last component. * * Returns 0 and populates "path" on success. */ int user_path_mountpoint_at(int dfd, const char __user *name, unsigned int flags, struct path *path) { return filename_mountpoint(dfd, getname(name), path, flags); } int kern_path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags) { return filename_mountpoint(dfd, getname_kernel(name), path, flags); } EXPORT_SYMBOL(kern_path_mountpoint); int __check_sticky(struct inode *dir, struct inode *inode) { kuid_t fsuid = current_fsuid(); if (uid_eq(inode->i_uid, fsuid)) return 0; if (uid_eq(dir->i_uid, fsuid)) return 0; return !capable_wrt_inode_uidgid(inode, CAP_FOWNER); } EXPORT_SYMBOL(__check_sticky); /* * Check whether we can remove a link victim from directory dir, check * whether the type of victim is right. * 1. We can't do it if dir is read-only (done in permission()) * 2. We should have write and exec permissions on dir * 3. We can't remove anything from append-only dir * 4. We can't do anything with immutable dir (done in permission()) * 5. If the sticky bit on dir is set we should either * a. be owner of dir, or * b. be owner of victim, or * c. have CAP_FOWNER capability * 6. If the victim is append-only or immutable we can't do antyhing with * links pointing to it. * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR. * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR. * 9. We can't remove a root or mountpoint. * 10. We don't allow removal of NFS sillyrenamed files; it's handled by * nfs_async_unlink(). */ static int may_delete(struct inode *dir, struct dentry *victim, bool isdir) { struct inode *inode = d_backing_inode(victim); int error; if (d_is_negative(victim)) return -ENOENT; BUG_ON(!inode); BUG_ON(victim->d_parent->d_inode != dir); audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE); error = inode_permission(dir, MAY_WRITE | MAY_EXEC); if (error) return error; if (IS_APPEND(dir)) return -EPERM; if (check_sticky(dir, inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) || IS_SWAPFILE(inode)) return -EPERM; if (isdir) { if (!d_is_dir(victim)) return -ENOTDIR; if (IS_ROOT(victim)) return -EBUSY; } else if (d_is_dir(victim)) return -EISDIR; if (IS_DEADDIR(dir)) return -ENOENT; if (victim->d_flags & DCACHE_NFSFS_RENAMED) return -EBUSY; return 0; } /* Check whether we can create an object with dentry child in directory * dir. * 1. We can't do it if child already exists (open has special treatment for * this case, but since we are inlined it's OK) * 2. We can't do it if dir is read-only (done in permission()) * 3. We should have write and exec permissions on dir * 4. We can't do it if dir is immutable (done in permission()) */ static inline int may_create(struct inode *dir, struct dentry *child) { audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE); if (child->d_inode) return -EEXIST; if (IS_DEADDIR(dir)) return -ENOENT; return inode_permission(dir, MAY_WRITE | MAY_EXEC); } /* * p1 and p2 should be directories on the same fs. */ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) { struct dentry *p; if (p1 == p2) { mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); return NULL; } mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex); p = d_ancestor(p2, p1); if (p) { mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD); return p; } p = d_ancestor(p1, p2); if (p) { mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); return p; } mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT2); return NULL; } EXPORT_SYMBOL(lock_rename); void unlock_rename(struct dentry *p1, struct dentry *p2) { mutex_unlock(&p1->d_inode->i_mutex); if (p1 != p2) { mutex_unlock(&p2->d_inode->i_mutex); mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex); } } EXPORT_SYMBOL(unlock_rename); int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool want_excl) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->create) return -EACCES; /* shouldn't it be ENOSYS? */ mode &= S_IALLUGO; mode |= S_IFREG; error = security_inode_create(dir, dentry, mode); if (error) return error; error = dir->i_op->create(dir, dentry, mode, want_excl); if (!error) fsnotify_create(dir, dentry); return error; } EXPORT_SYMBOL(vfs_create); static int may_open(struct path *path, int acc_mode, int flag) { struct dentry *dentry = path->dentry; struct inode *inode = dentry->d_inode; int error; /* O_PATH? */ if (!acc_mode) return 0; if (!inode) return -ENOENT; switch (inode->i_mode & S_IFMT) { case S_IFLNK: return -ELOOP; case S_IFDIR: if (acc_mode & MAY_WRITE) return -EISDIR; break; case S_IFBLK: case S_IFCHR: if (path->mnt->mnt_flags & MNT_NODEV) return -EACCES; /*FALLTHRU*/ case S_IFIFO: case S_IFSOCK: flag &= ~O_TRUNC; break; } error = inode_permission(inode, acc_mode); if (error) return error; /* * An append-only file must be opened in append mode for writing. */ if (IS_APPEND(inode)) { if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND)) return -EPERM; if (flag & O_TRUNC) return -EPERM; } /* O_NOATIME can only be set by the owner or superuser */ if (flag & O_NOATIME && !inode_owner_or_capable(inode)) return -EPERM; return 0; } static int handle_truncate(struct file *filp) { struct path *path = &filp->f_path; struct inode *inode = path->dentry->d_inode; int error = get_write_access(inode); if (error) return error; /* * Refuse to truncate files with mandatory locks held on them. */ error = locks_verify_locked(filp); if (!error) error = security_path_truncate(path); if (!error) { error = do_truncate(path->dentry, 0, ATTR_MTIME|ATTR_CTIME|ATTR_OPEN, filp); } put_write_access(inode); return error; } static inline int open_to_namei_flags(int flag) { if ((flag & O_ACCMODE) == 3) flag--; return flag; } static int may_o_create(struct path *dir, struct dentry *dentry, umode_t mode) { int error = security_path_mknod(dir, dentry, mode, 0); if (error) return error; error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC); if (error) return error; return security_inode_create(dir->dentry->d_inode, dentry, mode); } /* * Attempt to atomically look up, create and open a file from a negative * dentry. * * Returns 0 if successful. The file will have been created and attached to * @file by the filesystem calling finish_open(). * * Returns 1 if the file was looked up only or didn't need creating. The * caller will need to perform the open themselves. @path will have been * updated to point to the new dentry. This may be negative. * * Returns an error code otherwise. */ static int atomic_open(struct nameidata *nd, struct dentry *dentry, struct path *path, struct file *file, const struct open_flags *op, bool got_write, bool need_lookup, int *opened) { struct inode *dir = nd->path.dentry->d_inode; unsigned open_flag = open_to_namei_flags(op->open_flag); umode_t mode; int error; int acc_mode; int create_error = 0; struct dentry *const DENTRY_NOT_SET = (void *) -1UL; bool excl; BUG_ON(dentry->d_inode); /* Don't create child dentry for a dead directory. */ if (unlikely(IS_DEADDIR(dir))) { error = -ENOENT; goto out; } mode = op->mode; if ((open_flag & O_CREAT) && !IS_POSIXACL(dir)) mode &= ~current_umask(); excl = (open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT); if (excl) open_flag &= ~O_TRUNC; /* * Checking write permission is tricky, bacuse we don't know if we are * going to actually need it: O_CREAT opens should work as long as the * file exists. But checking existence breaks atomicity. The trick is * to check access and if not granted clear O_CREAT from the flags. * * Another problem is returing the "right" error value (e.g. for an * O_EXCL open we want to return EEXIST not EROFS). */ if (((open_flag & (O_CREAT | O_TRUNC)) || (open_flag & O_ACCMODE) != O_RDONLY) && unlikely(!got_write)) { if (!(open_flag & O_CREAT)) { /* * No O_CREATE -> atomicity not a requirement -> fall * back to lookup + open */ goto no_open; } else if (open_flag & (O_EXCL | O_TRUNC)) { /* Fall back and fail with the right error */ create_error = -EROFS; goto no_open; } else { /* No side effects, safe to clear O_CREAT */ create_error = -EROFS; open_flag &= ~O_CREAT; } } if (open_flag & O_CREAT) { error = may_o_create(&nd->path, dentry, mode); if (error) { create_error = error; if (open_flag & O_EXCL) goto no_open; open_flag &= ~O_CREAT; } } if (nd->flags & LOOKUP_DIRECTORY) open_flag |= O_DIRECTORY; file->f_path.dentry = DENTRY_NOT_SET; file->f_path.mnt = nd->path.mnt; error = dir->i_op->atomic_open(dir, dentry, file, open_flag, mode, opened); if (error < 0) { if (create_error && error == -ENOENT) error = create_error; goto out; } if (error) { /* returned 1, that is */ if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) { error = -EIO; goto out; } if (file->f_path.dentry) { dput(dentry); dentry = file->f_path.dentry; } if (*opened & FILE_CREATED) fsnotify_create(dir, dentry); if (!dentry->d_inode) { WARN_ON(*opened & FILE_CREATED); if (create_error) { error = create_error; goto out; } } else { if (excl && !(*opened & FILE_CREATED)) { error = -EEXIST; goto out; } } goto looked_up; } /* * We didn't have the inode before the open, so check open permission * here. */ acc_mode = op->acc_mode; if (*opened & FILE_CREATED) { WARN_ON(!(open_flag & O_CREAT)); fsnotify_create(dir, dentry); acc_mode = MAY_OPEN; } error = may_open(&file->f_path, acc_mode, open_flag); if (error) fput(file); out: dput(dentry); return error; no_open: if (need_lookup) { dentry = lookup_real(dir, dentry, nd->flags); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (create_error) { int open_flag = op->open_flag; error = create_error; if ((open_flag & O_EXCL)) { if (!dentry->d_inode) goto out; } else if (!dentry->d_inode) { goto out; } else if ((open_flag & O_TRUNC) && d_is_reg(dentry)) { goto out; } /* will fail later, go on to get the right error */ } } looked_up: path->dentry = dentry; path->mnt = nd->path.mnt; return 1; } /* * Look up and maybe create and open the last component. * * Must be called with i_mutex held on parent. * * Returns 0 if the file was successfully atomically created (if necessary) and * opened. In this case the file will be returned attached to @file. * * Returns 1 if the file was not completely opened at this time, though lookups * and creations will have been performed and the dentry returned in @path will * be positive upon return if O_CREAT was specified. If O_CREAT wasn't * specified then a negative dentry may be returned. * * An error code is returned otherwise. * * FILE_CREATE will be set in @*opened if the dentry was created and will be * cleared otherwise prior to returning. */ static int lookup_open(struct nameidata *nd, struct path *path, struct file *file, const struct open_flags *op, bool got_write, int *opened) { struct dentry *dir = nd->path.dentry; struct inode *dir_inode = dir->d_inode; struct dentry *dentry; int error; bool need_lookup; *opened &= ~FILE_CREATED; dentry = lookup_dcache(&nd->last, dir, nd->flags, &need_lookup); if (IS_ERR(dentry)) return PTR_ERR(dentry); /* Cached positive dentry: will open in f_op->open */ if (!need_lookup && dentry->d_inode) goto out_no_open; if ((nd->flags & LOOKUP_OPEN) && dir_inode->i_op->atomic_open) { return atomic_open(nd, dentry, path, file, op, got_write, need_lookup, opened); } if (need_lookup) { BUG_ON(dentry->d_inode); dentry = lookup_real(dir_inode, dentry, nd->flags); if (IS_ERR(dentry)) return PTR_ERR(dentry); } /* Negative dentry, just create the file */ if (!dentry->d_inode && (op->open_flag & O_CREAT)) { umode_t mode = op->mode; if (!IS_POSIXACL(dir->d_inode)) mode &= ~current_umask(); /* * This write is needed to ensure that a * rw->ro transition does not occur between * the time when the file is created and when * a permanent write count is taken through * the 'struct file' in finish_open(). */ if (!got_write) { error = -EROFS; goto out_dput; } *opened |= FILE_CREATED; error = security_path_mknod(&nd->path, dentry, mode, 0); if (error) goto out_dput; error = vfs_create(dir->d_inode, dentry, mode, nd->flags & LOOKUP_EXCL); if (error) goto out_dput; } out_no_open: path->dentry = dentry; path->mnt = nd->path.mnt; return 1; out_dput: dput(dentry); return error; } /* * Handle the last step of open() */ static int do_last(struct nameidata *nd, struct file *file, const struct open_flags *op, int *opened) { struct dentry *dir = nd->path.dentry; int open_flag = op->open_flag; bool will_truncate = (open_flag & O_TRUNC) != 0; bool got_write = false; int acc_mode = op->acc_mode; unsigned seq; struct inode *inode; struct path save_parent = { .dentry = NULL, .mnt = NULL }; struct path path; bool retried = false; int error; nd->flags &= ~LOOKUP_PARENT; nd->flags |= op->intent; if (nd->last_type != LAST_NORM) { error = handle_dots(nd, nd->last_type); if (unlikely(error)) return error; goto finish_open; } if (!(open_flag & O_CREAT)) { if (nd->last.name[nd->last.len]) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; /* we _can_ be in RCU mode here */ error = lookup_fast(nd, &path, &inode, &seq); if (likely(!error)) goto finish_lookup; if (error < 0) return error; BUG_ON(nd->inode != dir->d_inode); } else { /* create side of things */ /* * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED * has been cleared when we got to the last component we are * about to look up */ error = complete_walk(nd); if (error) return error; audit_inode(nd->name, dir, LOOKUP_PARENT); /* trailing slashes? */ if (unlikely(nd->last.name[nd->last.len])) return -EISDIR; } retry_lookup: if (op->open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) { error = mnt_want_write(nd->path.mnt); if (!error) got_write = true; /* * do _not_ fail yet - we might not need that or fail with * a different error; let lookup_open() decide; we'll be * dropping this one anyway. */ } mutex_lock(&dir->d_inode->i_mutex); error = lookup_open(nd, &path, file, op, got_write, opened); mutex_unlock(&dir->d_inode->i_mutex); if (error <= 0) { if (error) goto out; if ((*opened & FILE_CREATED) || !S_ISREG(file_inode(file)->i_mode)) will_truncate = false; audit_inode(nd->name, file->f_path.dentry, 0); goto opened; } if (*opened & FILE_CREATED) { /* Don't check for write permission, don't truncate */ open_flag &= ~O_TRUNC; will_truncate = false; acc_mode = MAY_OPEN; path_to_nameidata(&path, nd); goto finish_open_created; } /* * create/update audit record if it already exists. */ if (d_is_positive(path.dentry)) audit_inode(nd->name, path.dentry, 0); /* * If atomic_open() acquired write access it is dropped now due to * possible mount and symlink following (this might be optimized away if * necessary...) */ if (got_write) { mnt_drop_write(nd->path.mnt); got_write = false; } if (unlikely((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))) { path_to_nameidata(&path, nd); return -EEXIST; } error = follow_managed(&path, nd); if (unlikely(error < 0)) return error; BUG_ON(nd->flags & LOOKUP_RCU); inode = d_backing_inode(path.dentry); seq = 0; /* out of RCU mode, so the value doesn't matter */ if (unlikely(d_is_negative(path.dentry))) { path_to_nameidata(&path, nd); return -ENOENT; } finish_lookup: if (nd->depth) put_link(nd); error = should_follow_link(nd, &path, nd->flags & LOOKUP_FOLLOW, inode, seq); if (unlikely(error)) return error; if (unlikely(d_is_symlink(path.dentry)) && !(open_flag & O_PATH)) { path_to_nameidata(&path, nd); return -ELOOP; } if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) { path_to_nameidata(&path, nd); } else { save_parent.dentry = nd->path.dentry; save_parent.mnt = mntget(path.mnt); nd->path.dentry = path.dentry; } nd->inode = inode; nd->seq = seq; /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ finish_open: error = complete_walk(nd); if (error) { path_put(&save_parent); return error; } audit_inode(nd->name, nd->path.dentry, 0); error = -EISDIR; if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry)) goto out; error = -ENOTDIR; if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry)) goto out; if (!d_is_reg(nd->path.dentry)) will_truncate = false; if (will_truncate) { error = mnt_want_write(nd->path.mnt); if (error) goto out; got_write = true; } finish_open_created: error = may_open(&nd->path, acc_mode, open_flag); if (error) goto out; BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ error = vfs_open(&nd->path, file, current_cred()); if (!error) { *opened |= FILE_OPENED; } else { if (error == -EOPENSTALE) goto stale_open; goto out; } opened: error = open_check_o_direct(file); if (error) goto exit_fput; error = ima_file_check(file, op->acc_mode, *opened); if (error) goto exit_fput; if (will_truncate) { error = handle_truncate(file); if (error) goto exit_fput; } out: if (got_write) mnt_drop_write(nd->path.mnt); path_put(&save_parent); return error; exit_fput: fput(file); goto out; stale_open: /* If no saved parent or already retried then can't retry */ if (!save_parent.dentry || retried) goto out; BUG_ON(save_parent.dentry != dir); path_put(&nd->path); nd->path = save_parent; nd->inode = dir->d_inode; save_parent.mnt = NULL; save_parent.dentry = NULL; if (got_write) { mnt_drop_write(nd->path.mnt); got_write = false; } retried = true; goto retry_lookup; } static int do_tmpfile(struct nameidata *nd, unsigned flags, const struct open_flags *op, struct file *file, int *opened) { static const struct qstr name = QSTR_INIT("/", 1); struct dentry *child; struct inode *dir; struct path path; int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path); if (unlikely(error)) return error; error = mnt_want_write(path.mnt); if (unlikely(error)) goto out; dir = path.dentry->d_inode; /* we want directory to be writable */ error = inode_permission(dir, MAY_WRITE | MAY_EXEC); if (error) goto out2; if (!dir->i_op->tmpfile) { error = -EOPNOTSUPP; goto out2; } child = d_alloc(path.dentry, &name); if (unlikely(!child)) { error = -ENOMEM; goto out2; } dput(path.dentry); path.dentry = child; error = dir->i_op->tmpfile(dir, child, op->mode); if (error) goto out2; audit_inode(nd->name, child, 0); /* Don't check for other permissions, the inode was just created */ error = may_open(&path, MAY_OPEN, op->open_flag); if (error) goto out2; file->f_path.mnt = path.mnt; error = finish_open(file, child, NULL, opened); if (error) goto out2; error = open_check_o_direct(file); if (error) { fput(file); } else if (!(op->open_flag & O_EXCL)) { struct inode *inode = file_inode(file); spin_lock(&inode->i_lock); inode->i_state |= I_LINKABLE; spin_unlock(&inode->i_lock); } out2: mnt_drop_write(path.mnt); out: path_put(&path); return error; } static struct file *path_openat(struct nameidata *nd, const struct open_flags *op, unsigned flags) { const char *s; struct file *file; int opened = 0; int error; file = get_empty_filp(); if (IS_ERR(file)) return file; file->f_flags = op->open_flag; if (unlikely(file->f_flags & __O_TMPFILE)) { error = do_tmpfile(nd, flags, op, file, &opened); goto out2; } s = path_init(nd, flags); if (IS_ERR(s)) { put_filp(file); return ERR_CAST(s); } while (!(error = link_path_walk(s, nd)) && (error = do_last(nd, file, op, &opened)) > 0) { nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL); s = trailing_symlink(nd); if (IS_ERR(s)) { error = PTR_ERR(s); break; } } terminate_walk(nd); out2: if (!(opened & FILE_OPENED)) { BUG_ON(!error); put_filp(file); } if (unlikely(error)) { if (error == -EOPENSTALE) { if (flags & LOOKUP_RCU) error = -ECHILD; else error = -ESTALE; } file = ERR_PTR(error); } return file; } struct file *do_filp_open(int dfd, struct filename *pathname, const struct open_flags *op) { struct nameidata nd; int flags = op->lookup_flags; struct file *filp; set_nameidata(&nd, dfd, pathname); filp = path_openat(&nd, op, flags | LOOKUP_RCU); if (unlikely(filp == ERR_PTR(-ECHILD))) filp = path_openat(&nd, op, flags); if (unlikely(filp == ERR_PTR(-ESTALE))) filp = path_openat(&nd, op, flags | LOOKUP_REVAL); restore_nameidata(); return filp; } struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, const char *name, const struct open_flags *op) { struct nameidata nd; struct file *file; struct filename *filename; int flags = op->lookup_flags | LOOKUP_ROOT; nd.root.mnt = mnt; nd.root.dentry = dentry; if (d_is_symlink(dentry) && op->intent & LOOKUP_OPEN) return ERR_PTR(-ELOOP); filename = getname_kernel(name); if (unlikely(IS_ERR(filename))) return ERR_CAST(filename); set_nameidata(&nd, -1, filename); file = path_openat(&nd, op, flags | LOOKUP_RCU); if (unlikely(file == ERR_PTR(-ECHILD))) file = path_openat(&nd, op, flags); if (unlikely(file == ERR_PTR(-ESTALE))) file = path_openat(&nd, op, flags | LOOKUP_REVAL); restore_nameidata(); putname(filename); return file; } static struct dentry *filename_create(int dfd, struct filename *name, struct path *path, unsigned int lookup_flags) { struct dentry *dentry = ERR_PTR(-EEXIST); struct qstr last; int type; int err2; int error; bool is_dir = (lookup_flags & LOOKUP_DIRECTORY); /* * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any * other flags passed in are ignored! */ lookup_flags &= LOOKUP_REVAL; name = filename_parentat(dfd, name, lookup_flags, path, &last, &type); if (IS_ERR(name)) return ERR_CAST(name); /* * Yucky last component or no last component at all? * (foo/., foo/.., /////) */ if (unlikely(type != LAST_NORM)) goto out; /* don't fail immediately if it's r/o, at least try to report other errors */ err2 = mnt_want_write(path->mnt); /* * Do the final lookup. */ lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL; mutex_lock_nested(&path->dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = __lookup_hash(&last, path->dentry, lookup_flags); if (IS_ERR(dentry)) goto unlock; error = -EEXIST; if (d_is_positive(dentry)) goto fail; /* * Special case - lookup gave negative, but... we had foo/bar/ * From the vfs_mknod() POV we just have a negative dentry - * all is fine. Let's be bastards - you had / on the end, you've * been asking for (non-existent) directory. -ENOENT for you. */ if (unlikely(!is_dir && last.name[last.len])) { error = -ENOENT; goto fail; } if (unlikely(err2)) { error = err2; goto fail; } putname(name); return dentry; fail: dput(dentry); dentry = ERR_PTR(error); unlock: mutex_unlock(&path->dentry->d_inode->i_mutex); if (!err2) mnt_drop_write(path->mnt); out: path_put(path); putname(name); return dentry; } struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path, unsigned int lookup_flags) { return filename_create(dfd, getname_kernel(pathname), path, lookup_flags); } EXPORT_SYMBOL(kern_path_create); void done_path_create(struct path *path, struct dentry *dentry) { dput(dentry); mutex_unlock(&path->dentry->d_inode->i_mutex); mnt_drop_write(path->mnt); path_put(path); } EXPORT_SYMBOL(done_path_create); inline struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, unsigned int lookup_flags) { return filename_create(dfd, getname(pathname), path, lookup_flags); } EXPORT_SYMBOL(user_path_create); int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { int error = may_create(dir, dentry); if (error) return error; if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD)) return -EPERM; if (!dir->i_op->mknod) return -EPERM; error = devcgroup_inode_mknod(mode, dev); if (error) return error; error = security_inode_mknod(dir, dentry, mode, dev); if (error) return error; error = dir->i_op->mknod(dir, dentry, mode, dev); if (!error) fsnotify_create(dir, dentry); return error; } EXPORT_SYMBOL(vfs_mknod); static int may_mknod(umode_t mode) { switch (mode & S_IFMT) { case S_IFREG: case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: case 0: /* zero mode translates to S_IFREG */ return 0; case S_IFDIR: return -EPERM; default: return -EINVAL; } } SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned, dev) { struct dentry *dentry; struct path path; int error; unsigned int lookup_flags = 0; error = may_mknod(mode); if (error) return error; retry: dentry = user_path_create(dfd, filename, &path, lookup_flags); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (!IS_POSIXACL(path.dentry->d_inode)) mode &= ~current_umask(); error = security_path_mknod(&path, dentry, mode, dev); if (error) goto out; switch (mode & S_IFMT) { case 0: case S_IFREG: error = vfs_create(path.dentry->d_inode,dentry,mode,true); break; case S_IFCHR: case S_IFBLK: error = vfs_mknod(path.dentry->d_inode,dentry,mode, new_decode_dev(dev)); break; case S_IFIFO: case S_IFSOCK: error = vfs_mknod(path.dentry->d_inode,dentry,mode,0); break; } out: done_path_create(&path, dentry); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev) { return sys_mknodat(AT_FDCWD, filename, mode, dev); } int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { int error = may_create(dir, dentry); unsigned max_links = dir->i_sb->s_max_links; if (error) return error; if (!dir->i_op->mkdir) return -EPERM; mode &= (S_IRWXUGO|S_ISVTX); error = security_inode_mkdir(dir, dentry, mode); if (error) return error; if (max_links && dir->i_nlink >= max_links) return -EMLINK; error = dir->i_op->mkdir(dir, dentry, mode); if (!error) fsnotify_mkdir(dir, dentry); return error; } EXPORT_SYMBOL(vfs_mkdir); SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode) { struct dentry *dentry; struct path path; int error; unsigned int lookup_flags = LOOKUP_DIRECTORY; retry: dentry = user_path_create(dfd, pathname, &path, lookup_flags); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (!IS_POSIXACL(path.dentry->d_inode)) mode &= ~current_umask(); error = security_path_mkdir(&path, dentry, mode); if (!error) error = vfs_mkdir(path.dentry->d_inode, dentry, mode); done_path_create(&path, dentry); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode) { return sys_mkdirat(AT_FDCWD, pathname, mode); } /* * The dentry_unhash() helper will try to drop the dentry early: we * should have a usage count of 1 if we're the only user of this * dentry, and if that is true (possibly after pruning the dcache), * then we drop the dentry now. * * A low-level filesystem can, if it choses, legally * do a * * if (!d_unhashed(dentry)) * return -EBUSY; * * if it cannot handle the case of removing a directory * that is still in use by something else.. */ void dentry_unhash(struct dentry *dentry) { shrink_dcache_parent(dentry); spin_lock(&dentry->d_lock); if (dentry->d_lockref.count == 1) __d_drop(dentry); spin_unlock(&dentry->d_lock); } EXPORT_SYMBOL(dentry_unhash); int vfs_rmdir(struct inode *dir, struct dentry *dentry) { int error = may_delete(dir, dentry, 1); if (error) return error; if (!dir->i_op->rmdir) return -EPERM; dget(dentry); mutex_lock(&dentry->d_inode->i_mutex); error = -EBUSY; if (is_local_mountpoint(dentry)) goto out; error = security_inode_rmdir(dir, dentry); if (error) goto out; shrink_dcache_parent(dentry); error = dir->i_op->rmdir(dir, dentry); if (error) goto out; dentry->d_inode->i_flags |= S_DEAD; dont_mount(dentry); detach_mounts(dentry); out: mutex_unlock(&dentry->d_inode->i_mutex); dput(dentry); if (!error) d_delete(dentry); return error; } EXPORT_SYMBOL(vfs_rmdir); static long do_rmdir(int dfd, const char __user *pathname) { int error = 0; struct filename *name; struct dentry *dentry; struct path path; struct qstr last; int type; unsigned int lookup_flags = 0; retry: name = user_path_parent(dfd, pathname, &path, &last, &type, lookup_flags); if (IS_ERR(name)) return PTR_ERR(name); switch (type) { case LAST_DOTDOT: error = -ENOTEMPTY; goto exit1; case LAST_DOT: error = -EINVAL; goto exit1; case LAST_ROOT: error = -EBUSY; goto exit1; } error = mnt_want_write(path.mnt); if (error) goto exit1; mutex_lock_nested(&path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = __lookup_hash(&last, path.dentry, lookup_flags); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto exit2; if (!dentry->d_inode) { error = -ENOENT; goto exit3; } error = security_path_rmdir(&path, dentry); if (error) goto exit3; error = vfs_rmdir(path.dentry->d_inode, dentry); exit3: dput(dentry); exit2: mutex_unlock(&path.dentry->d_inode->i_mutex); mnt_drop_write(path.mnt); exit1: path_put(&path); putname(name); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE1(rmdir, const char __user *, pathname) { return do_rmdir(AT_FDCWD, pathname); } /** * vfs_unlink - unlink a filesystem object * @dir: parent directory * @dentry: victim * @delegated_inode: returns victim inode, if the inode is delegated. * * The caller must hold dir->i_mutex. * * If vfs_unlink discovers a delegation, it will return -EWOULDBLOCK and * return a reference to the inode in delegated_inode. The caller * should then break the delegation on that inode and retry. Because * breaking a delegation may take a long time, the caller should drop * dir->i_mutex before doing so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not * to be NFS exported. */ int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode) { struct inode *target = dentry->d_inode; int error = may_delete(dir, dentry, 0); if (error) return error; if (!dir->i_op->unlink) return -EPERM; mutex_lock(&target->i_mutex); if (is_local_mountpoint(dentry)) error = -EBUSY; else { error = security_inode_unlink(dir, dentry); if (!error) { error = try_break_deleg(target, delegated_inode); if (error) goto out; error = dir->i_op->unlink(dir, dentry); if (!error) { dont_mount(dentry); detach_mounts(dentry); } } } out: mutex_unlock(&target->i_mutex); /* We don't d_delete() NFS sillyrenamed files--they still exist. */ if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { fsnotify_link_count(target); d_delete(dentry); } return error; } EXPORT_SYMBOL(vfs_unlink); /* * Make sure that the actual truncation of the file will occur outside its * directory's i_mutex. Truncate can take a long time if there is a lot of * writeout happening, and we don't want to prevent access to the directory * while waiting on the I/O. */ static long do_unlinkat(int dfd, const char __user *pathname) { int error; struct filename *name; struct dentry *dentry; struct path path; struct qstr last; int type; struct inode *inode = NULL; struct inode *delegated_inode = NULL; unsigned int lookup_flags = 0; retry: name = user_path_parent(dfd, pathname, &path, &last, &type, lookup_flags); if (IS_ERR(name)) return PTR_ERR(name); error = -EISDIR; if (type != LAST_NORM) goto exit1; error = mnt_want_write(path.mnt); if (error) goto exit1; retry_deleg: mutex_lock_nested(&path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = __lookup_hash(&last, path.dentry, lookup_flags); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { /* Why not before? Because we want correct error value */ if (last.name[last.len]) goto slashes; inode = dentry->d_inode; if (d_is_negative(dentry)) goto slashes; ihold(inode); error = security_path_unlink(&path, dentry); if (error) goto exit2; error = vfs_unlink(path.dentry->d_inode, dentry, &delegated_inode); exit2: dput(dentry); } mutex_unlock(&path.dentry->d_inode->i_mutex); if (inode) iput(inode); /* truncate the inode here */ inode = NULL; if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } mnt_drop_write(path.mnt); exit1: path_put(&path); putname(name); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; inode = NULL; goto retry; } return error; slashes: if (d_is_negative(dentry)) error = -ENOENT; else if (d_is_dir(dentry)) error = -EISDIR; else error = -ENOTDIR; goto exit2; } SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag) { if ((flag & ~AT_REMOVEDIR) != 0) return -EINVAL; if (flag & AT_REMOVEDIR) return do_rmdir(dfd, pathname); return do_unlinkat(dfd, pathname); } SYSCALL_DEFINE1(unlink, const char __user *, pathname) { return do_unlinkat(AT_FDCWD, pathname); } int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->symlink) return -EPERM; error = security_inode_symlink(dir, dentry, oldname); if (error) return error; error = dir->i_op->symlink(dir, dentry, oldname); if (!error) fsnotify_create(dir, dentry); return error; } EXPORT_SYMBOL(vfs_symlink); SYSCALL_DEFINE3(symlinkat, const char __user *, oldname, int, newdfd, const char __user *, newname) { int error; struct filename *from; struct dentry *dentry; struct path path; unsigned int lookup_flags = 0; from = getname(oldname); if (IS_ERR(from)) return PTR_ERR(from); retry: dentry = user_path_create(newdfd, newname, &path, lookup_flags); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out_putname; error = security_path_symlink(&path, dentry, from->name); if (!error) error = vfs_symlink(path.dentry->d_inode, dentry, from->name); done_path_create(&path, dentry); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out_putname: putname(from); return error; } SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname) { return sys_symlinkat(oldname, AT_FDCWD, newname); } /** * vfs_link - create a new link * @old_dentry: object to be linked * @dir: new parent * @new_dentry: where to create the new link * @delegated_inode: returns inode needing a delegation break * * The caller must hold dir->i_mutex * * If vfs_link discovers a delegation on the to-be-linked file in need * of breaking, it will return -EWOULDBLOCK and return a reference to the * inode in delegated_inode. The caller should then break the delegation * and retry. Because breaking a delegation may take a long time, the * caller should drop the i_mutex before doing so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not * to be NFS exported. */ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode) { struct inode *inode = old_dentry->d_inode; unsigned max_links = dir->i_sb->s_max_links; int error; if (!inode) return -ENOENT; error = may_create(dir, new_dentry); if (error) return error; if (dir->i_sb != inode->i_sb) return -EXDEV; /* * A link to an append-only or immutable file cannot be created. */ if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; if (!dir->i_op->link) return -EPERM; if (S_ISDIR(inode->i_mode)) return -EPERM; error = security_inode_link(old_dentry, dir, new_dentry); if (error) return error; mutex_lock(&inode->i_mutex); /* Make sure we don't allow creating hardlink to an unlinked file */ if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE)) error = -ENOENT; else if (max_links && inode->i_nlink >= max_links) error = -EMLINK; else { error = try_break_deleg(inode, delegated_inode); if (!error) error = dir->i_op->link(old_dentry, dir, new_dentry); } if (!error && (inode->i_state & I_LINKABLE)) { spin_lock(&inode->i_lock); inode->i_state &= ~I_LINKABLE; spin_unlock(&inode->i_lock); } mutex_unlock(&inode->i_mutex); if (!error) fsnotify_link(dir, inode, new_dentry); return error; } EXPORT_SYMBOL(vfs_link); /* * Hardlinks are often used in delicate situations. We avoid * security-related surprises by not following symlinks on the * newname. --KAB * * We don't follow them on the oldname either to be compatible * with linux 2.0, and to avoid hard-linking to directories * and other special files. --ADM */ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, int, flags) { struct dentry *new_dentry; struct path old_path, new_path; struct inode *delegated_inode = NULL; int how = 0; int error; if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) return -EINVAL; /* * To use null names we require CAP_DAC_READ_SEARCH * This ensures that not everyone will be able to create * handlink using the passed filedescriptor. */ if (flags & AT_EMPTY_PATH) { if (!capable(CAP_DAC_READ_SEARCH)) return -ENOENT; how = LOOKUP_EMPTY; } if (flags & AT_SYMLINK_FOLLOW) how |= LOOKUP_FOLLOW; retry: error = user_path_at(olddfd, oldname, how, &old_path); if (error) return error; new_dentry = user_path_create(newdfd, newname, &new_path, (how & LOOKUP_REVAL)); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) goto out; error = -EXDEV; if (old_path.mnt != new_path.mnt) goto out_dput; error = may_linkat(&old_path); if (unlikely(error)) goto out_dput; error = security_path_link(old_path.dentry, &new_path, new_dentry); if (error) goto out_dput; error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode); out_dput: done_path_create(&new_path, new_dentry); if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) { path_put(&old_path); goto retry; } } if (retry_estale(error, how)) { path_put(&old_path); how |= LOOKUP_REVAL; goto retry; } out: path_put(&old_path); return error; } SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname) { return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0); } /** * vfs_rename - rename a filesystem object * @old_dir: parent of source * @old_dentry: source * @new_dir: parent of destination * @new_dentry: destination * @delegated_inode: returns an inode needing a delegation break * @flags: rename flags * * The caller must hold multiple mutexes--see lock_rename()). * * If vfs_rename discovers a delegation in need of breaking at either * the source or destination, it will return -EWOULDBLOCK and return a * reference to the inode in delegated_inode. The caller should then * break the delegation and retry. Because breaking a delegation may * take a long time, the caller should drop all locks before doing * so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not * to be NFS exported. * * The worst of all namespace operations - renaming directory. "Perverted" * doesn't even start to describe it. Somebody in UCB had a heck of a trip... * Problems: * a) we can get into loop creation. * b) race potential - two innocent renames can create a loop together. * That's where 4.4 screws up. Current fix: serialization on * sb->s_vfs_rename_mutex. We might be more accurate, but that's another * story. * c) we have to lock _four_ objects - parents and victim (if it exists), * and source (if it is not a directory). * And that - after we got ->i_mutex on parents (until then we don't know * whether the target exists). Solution: try to be smart with locking * order for inodes. We rely on the fact that tree topology may change * only under ->s_vfs_rename_mutex _and_ that parent of the object we * move will be locked. Thus we can rank directories by the tree * (ancestors first) and rank all non-directories after them. * That works since everybody except rename does "lock parent, lookup, * lock child" and rename is under ->s_vfs_rename_mutex. * HOWEVER, it relies on the assumption that any object with ->lookup() * has no more than 1 dentry. If "hybrid" objects will ever appear, * we'd better make sure that there's no link(2) for them. * d) conversion from fhandle to dentry may come in the wrong moment - when * we are removing the target. Solution: we will have to grab ->i_mutex * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on * ->i_mutex on parents, which works but leads to some truly excessive * locking]. */ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, struct inode **delegated_inode, unsigned int flags) { int error; bool is_dir = d_is_dir(old_dentry); const unsigned char *old_name; struct inode *source = old_dentry->d_inode; struct inode *target = new_dentry->d_inode; bool new_is_dir = false; unsigned max_links = new_dir->i_sb->s_max_links; if (source == target) return 0; error = may_delete(old_dir, old_dentry, is_dir); if (error) return error; if (!target) { error = may_create(new_dir, new_dentry); } else { new_is_dir = d_is_dir(new_dentry); if (!(flags & RENAME_EXCHANGE)) error = may_delete(new_dir, new_dentry, is_dir); else error = may_delete(new_dir, new_dentry, new_is_dir); } if (error) return error; if (!old_dir->i_op->rename && !old_dir->i_op->rename2) return -EPERM; if (flags && !old_dir->i_op->rename2) return -EINVAL; /* * If we are going to change the parent - check write permissions, * we'll need to flip '..'. */ if (new_dir != old_dir) { if (is_dir) { error = inode_permission(source, MAY_WRITE); if (error) return error; } if ((flags & RENAME_EXCHANGE) && new_is_dir) { error = inode_permission(target, MAY_WRITE); if (error) return error; } } error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry, flags); if (error) return error; old_name = fsnotify_oldname_init(old_dentry->d_name.name); dget(new_dentry); if (!is_dir || (flags & RENAME_EXCHANGE)) lock_two_nondirectories(source, target); else if (target) mutex_lock(&target->i_mutex); error = -EBUSY; if (is_local_mountpoint(old_dentry) || is_local_mountpoint(new_dentry)) goto out; if (max_links && new_dir != old_dir) { error = -EMLINK; if (is_dir && !new_is_dir && new_dir->i_nlink >= max_links) goto out; if ((flags & RENAME_EXCHANGE) && !is_dir && new_is_dir && old_dir->i_nlink >= max_links) goto out; } if (is_dir && !(flags & RENAME_EXCHANGE) && target) shrink_dcache_parent(new_dentry); if (!is_dir) { error = try_break_deleg(source, delegated_inode); if (error) goto out; } if (target && !new_is_dir) { error = try_break_deleg(target, delegated_inode); if (error) goto out; } if (!old_dir->i_op->rename2) { error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); } else { WARN_ON(old_dir->i_op->rename != NULL); error = old_dir->i_op->rename2(old_dir, old_dentry, new_dir, new_dentry, flags); } if (error) goto out; if (!(flags & RENAME_EXCHANGE) && target) { if (is_dir) target->i_flags |= S_DEAD; dont_mount(new_dentry); detach_mounts(new_dentry); } if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) { if (!(flags & RENAME_EXCHANGE)) d_move(old_dentry, new_dentry); else d_exchange(old_dentry, new_dentry); } out: if (!is_dir || (flags & RENAME_EXCHANGE)) unlock_two_nondirectories(source, target); else if (target) mutex_unlock(&target->i_mutex); dput(new_dentry); if (!error) { fsnotify_move(old_dir, new_dir, old_name, is_dir, !(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry); if (flags & RENAME_EXCHANGE) { fsnotify_move(new_dir, old_dir, old_dentry->d_name.name, new_is_dir, NULL, new_dentry); } } fsnotify_oldname_free(old_name); return error; } EXPORT_SYMBOL(vfs_rename); SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, unsigned int, flags) { struct dentry *old_dentry, *new_dentry; struct dentry *trap; struct path old_path, new_path; struct qstr old_last, new_last; int old_type, new_type; struct inode *delegated_inode = NULL; struct filename *from; struct filename *to; unsigned int lookup_flags = 0, target_flags = LOOKUP_RENAME_TARGET; bool should_retry = false; int error; if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) return -EINVAL; if ((flags & (RENAME_NOREPLACE | RENAME_WHITEOUT)) && (flags & RENAME_EXCHANGE)) return -EINVAL; if ((flags & RENAME_WHITEOUT) && !capable(CAP_MKNOD)) return -EPERM; if (flags & RENAME_EXCHANGE) target_flags = 0; retry: from = user_path_parent(olddfd, oldname, &old_path, &old_last, &old_type, lookup_flags); if (IS_ERR(from)) { error = PTR_ERR(from); goto exit; } to = user_path_parent(newdfd, newname, &new_path, &new_last, &new_type, lookup_flags); if (IS_ERR(to)) { error = PTR_ERR(to); goto exit1; } error = -EXDEV; if (old_path.mnt != new_path.mnt) goto exit2; error = -EBUSY; if (old_type != LAST_NORM) goto exit2; if (flags & RENAME_NOREPLACE) error = -EEXIST; if (new_type != LAST_NORM) goto exit2; error = mnt_want_write(old_path.mnt); if (error) goto exit2; retry_deleg: trap = lock_rename(new_path.dentry, old_path.dentry); old_dentry = __lookup_hash(&old_last, old_path.dentry, lookup_flags); error = PTR_ERR(old_dentry); if (IS_ERR(old_dentry)) goto exit3; /* source must exist */ error = -ENOENT; if (d_is_negative(old_dentry)) goto exit4; new_dentry = __lookup_hash(&new_last, new_path.dentry, lookup_flags | target_flags); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) goto exit4; error = -EEXIST; if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry)) goto exit5; if (flags & RENAME_EXCHANGE) { error = -ENOENT; if (d_is_negative(new_dentry)) goto exit5; if (!d_is_dir(new_dentry)) { error = -ENOTDIR; if (new_last.name[new_last.len]) goto exit5; } } /* unless the source is a directory trailing slashes give -ENOTDIR */ if (!d_is_dir(old_dentry)) { error = -ENOTDIR; if (old_last.name[old_last.len]) goto exit5; if (!(flags & RENAME_EXCHANGE) && new_last.name[new_last.len]) goto exit5; } /* source should not be ancestor of target */ error = -EINVAL; if (old_dentry == trap) goto exit5; /* target should not be an ancestor of source */ if (!(flags & RENAME_EXCHANGE)) error = -ENOTEMPTY; if (new_dentry == trap) goto exit5; error = security_path_rename(&old_path, old_dentry, &new_path, new_dentry, flags); if (error) goto exit5; error = vfs_rename(old_path.dentry->d_inode, old_dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode, flags); exit5: dput(new_dentry); exit4: dput(old_dentry); exit3: unlock_rename(new_path.dentry, old_path.dentry); if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } mnt_drop_write(old_path.mnt); exit2: if (retry_estale(error, lookup_flags)) should_retry = true; path_put(&new_path); putname(to); exit1: path_put(&old_path); putname(from); if (should_retry) { should_retry = false; lookup_flags |= LOOKUP_REVAL; goto retry; } exit: return error; } SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname) { return sys_renameat2(olddfd, oldname, newdfd, newname, 0); } SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname) { return sys_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0); } int vfs_whiteout(struct inode *dir, struct dentry *dentry) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->mknod) return -EPERM; return dir->i_op->mknod(dir, dentry, S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); } EXPORT_SYMBOL(vfs_whiteout); int readlink_copy(char __user *buffer, int buflen, const char *link) { int len = PTR_ERR(link); if (IS_ERR(link)) goto out; len = strlen(link); if (len > (unsigned) buflen) len = buflen; if (copy_to_user(buffer, link, len)) len = -EFAULT; out: return len; } EXPORT_SYMBOL(readlink_copy); /* * A helper for ->readlink(). This should be used *ONLY* for symlinks that * have ->follow_link() touching nd only in nd_set_link(). Using (or not * using) it for any given inode is up to filesystem. */ int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen) { void *cookie; struct inode *inode = d_inode(dentry); const char *link = inode->i_link; int res; if (!link) { link = inode->i_op->follow_link(dentry, &cookie); if (IS_ERR(link)) return PTR_ERR(link); } res = readlink_copy(buffer, buflen, link); if (inode->i_op->put_link) inode->i_op->put_link(inode, cookie); return res; } EXPORT_SYMBOL(generic_readlink); /* get the link contents into pagecache */ static char *page_getlink(struct dentry * dentry, struct page **ppage) { char *kaddr; struct page *page; struct address_space *mapping = dentry->d_inode->i_mapping; page = read_mapping_page(mapping, 0, NULL); if (IS_ERR(page)) return (char*)page; *ppage = page; kaddr = kmap(page); nd_terminate_link(kaddr, dentry->d_inode->i_size, PAGE_SIZE - 1); return kaddr; } int page_readlink(struct dentry *dentry, char __user *buffer, int buflen) { struct page *page = NULL; int res = readlink_copy(buffer, buflen, page_getlink(dentry, &page)); if (page) { kunmap(page); page_cache_release(page); } return res; } EXPORT_SYMBOL(page_readlink); const char *page_follow_link_light(struct dentry *dentry, void **cookie) { struct page *page = NULL; char *res = page_getlink(dentry, &page); if (!IS_ERR(res)) *cookie = page; return res; } EXPORT_SYMBOL(page_follow_link_light); void page_put_link(struct inode *unused, void *cookie) { struct page *page = cookie; kunmap(page); page_cache_release(page); } EXPORT_SYMBOL(page_put_link); /* * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS */ int __page_symlink(struct inode *inode, const char *symname, int len, int nofs) { struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata; int err; char *kaddr; unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE; if (nofs) flags |= AOP_FLAG_NOFS; retry: err = pagecache_write_begin(NULL, mapping, 0, len-1, flags, &page, &fsdata); if (err) goto fail; kaddr = kmap_atomic(page); memcpy(kaddr, symname, len-1); kunmap_atomic(kaddr); err = pagecache_write_end(NULL, mapping, 0, len-1, len-1, page, fsdata); if (err < 0) goto fail; if (err < len-1) goto retry; mark_inode_dirty(inode); return 0; fail: return err; } EXPORT_SYMBOL(__page_symlink); int page_symlink(struct inode *inode, const char *symname, int len) { return __page_symlink(inode, symname, len, !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS)); } EXPORT_SYMBOL(page_symlink); const struct inode_operations page_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, }; EXPORT_SYMBOL(page_symlink_inode_operations);
gpl-2.0
alban/linux
drivers/gpu/drm/omapdrm/dss/output.c
131
5495
/* * Copyright (C) 2012 Texas Instruments Ltd * Author: Archit Taneja <archit@ti.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/of.h> #include "omapdss.h" #include "dss.h" static LIST_HEAD(output_list); static DEFINE_MUTEX(output_lock); int omapdss_output_set_device(struct omap_dss_device *out, struct omap_dss_device *dssdev) { int r; mutex_lock(&output_lock); if (out->dst) { DSSERR("output already has device %s connected to it\n", out->dst->name); r = -EINVAL; goto err; } if (out->output_type != dssdev->type) { DSSERR("output type and display type don't match\n"); r = -EINVAL; goto err; } out->dst = dssdev; dssdev->src = out; mutex_unlock(&output_lock); return 0; err: mutex_unlock(&output_lock); return r; } EXPORT_SYMBOL(omapdss_output_set_device); int omapdss_output_unset_device(struct omap_dss_device *out) { int r; mutex_lock(&output_lock); if (!out->dst) { DSSERR("output doesn't have a device connected to it\n"); r = -EINVAL; goto err; } if (out->dst->state != OMAP_DSS_DISPLAY_DISABLED) { DSSERR("device %s is not disabled, cannot unset device\n", out->dst->name); r = -EINVAL; goto err; } out->dst->src = NULL; out->dst = NULL; mutex_unlock(&output_lock); return 0; err: mutex_unlock(&output_lock); return r; } EXPORT_SYMBOL(omapdss_output_unset_device); int omapdss_register_output(struct omap_dss_device *out) { list_add_tail(&out->list, &output_list); return 0; } EXPORT_SYMBOL(omapdss_register_output); void omapdss_unregister_output(struct omap_dss_device *out) { list_del(&out->list); } EXPORT_SYMBOL(omapdss_unregister_output); struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id) { struct omap_dss_device *out; list_for_each_entry(out, &output_list, list) { if (out->id == id) return out; } return NULL; } EXPORT_SYMBOL(omap_dss_get_output); struct omap_dss_device *omap_dss_find_output(const char *name) { struct omap_dss_device *out; list_for_each_entry(out, &output_list, list) { if (strcmp(out->name, name) == 0) return omap_dss_get_device(out); } return NULL; } EXPORT_SYMBOL(omap_dss_find_output); struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port) { struct device_node *src_node; struct omap_dss_device *out; u32 reg; src_node = dss_of_port_get_parent_device(port); if (!src_node) return NULL; reg = dss_of_port_get_port_number(port); list_for_each_entry(out, &output_list, list) { if (out->dev->of_node == src_node && out->port_num == reg) { of_node_put(src_node); return omap_dss_get_device(out); } } of_node_put(src_node); return NULL; } EXPORT_SYMBOL(omap_dss_find_output_by_port_node); struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev) { while (dssdev->src) dssdev = dssdev->src; if (dssdev->id != 0) return omap_dss_get_device(dssdev); return NULL; } EXPORT_SYMBOL(omapdss_find_output_from_display); static const struct dss_mgr_ops *dss_mgr_ops; int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops) { if (dss_mgr_ops) return -EBUSY; dss_mgr_ops = mgr_ops; return 0; } EXPORT_SYMBOL(dss_install_mgr_ops); void dss_uninstall_mgr_ops(void) { dss_mgr_ops = NULL; } EXPORT_SYMBOL(dss_uninstall_mgr_ops); int dss_mgr_connect(enum omap_channel channel, struct omap_dss_device *dst) { return dss_mgr_ops->connect(channel, dst); } EXPORT_SYMBOL(dss_mgr_connect); void dss_mgr_disconnect(enum omap_channel channel, struct omap_dss_device *dst) { dss_mgr_ops->disconnect(channel, dst); } EXPORT_SYMBOL(dss_mgr_disconnect); void dss_mgr_set_timings(enum omap_channel channel, const struct omap_video_timings *timings) { dss_mgr_ops->set_timings(channel, timings); } EXPORT_SYMBOL(dss_mgr_set_timings); void dss_mgr_set_lcd_config(enum omap_channel channel, const struct dss_lcd_mgr_config *config) { dss_mgr_ops->set_lcd_config(channel, config); } EXPORT_SYMBOL(dss_mgr_set_lcd_config); int dss_mgr_enable(enum omap_channel channel) { return dss_mgr_ops->enable(channel); } EXPORT_SYMBOL(dss_mgr_enable); void dss_mgr_disable(enum omap_channel channel) { dss_mgr_ops->disable(channel); } EXPORT_SYMBOL(dss_mgr_disable); void dss_mgr_start_update(enum omap_channel channel) { dss_mgr_ops->start_update(channel); } EXPORT_SYMBOL(dss_mgr_start_update); int dss_mgr_register_framedone_handler(enum omap_channel channel, void (*handler)(void *), void *data) { return dss_mgr_ops->register_framedone_handler(channel, handler, data); } EXPORT_SYMBOL(dss_mgr_register_framedone_handler); void dss_mgr_unregister_framedone_handler(enum omap_channel channel, void (*handler)(void *), void *data) { dss_mgr_ops->unregister_framedone_handler(channel, handler, data); } EXPORT_SYMBOL(dss_mgr_unregister_framedone_handler);
gpl-2.0
El-Nath/shamu
drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
131
20780
/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/io.h> #include <linux/atomic.h> #include <media/v4l2-subdev.h> #include <media/msmb_isp.h> #include "msm_isp_util.h" #include "msm_isp_stats_util.h" static int msm_isp_stats_cfg_ping_pong_address(struct vfe_device *vfe_dev, struct msm_vfe_stats_stream *stream_info, uint32_t pingpong_status, struct msm_isp_buffer **done_buf) { int rc = -1; struct msm_isp_buffer *buf; uint32_t pingpong_bit = 0; uint32_t bufq_handle = stream_info->bufq_handle; uint32_t stats_pingpong_offset; uint32_t stats_idx = STATS_IDX(stream_info->stream_handle); if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type || stats_idx > MSM_ISP_STATS_MAX) { pr_err("%s Invalid stats index %d", __func__, stats_idx); return -EINVAL; } stats_pingpong_offset = vfe_dev->hw_info->stats_hw_info->stats_ping_pong_offset[ stats_idx]; pingpong_bit = (~(pingpong_status >> stats_pingpong_offset) & 0x1); rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr, vfe_dev->pdev->id, bufq_handle, &buf); if (rc < 0) { vfe_dev->error_info.stats_framedrop_count[stats_idx]++; return rc; } if (buf->num_planes != 1) { pr_err("%s: Invalid buffer\n", __func__); rc = -EINVAL; goto buf_error; } vfe_dev->hw_info->vfe_ops.stats_ops.update_ping_pong_addr( vfe_dev, stream_info, pingpong_status, buf->mapped_info[0].paddr + stream_info->buffer_offset); if (stream_info->buf[pingpong_bit] && done_buf) *done_buf = stream_info->buf[pingpong_bit]; stream_info->buf[pingpong_bit] = buf; return 0; buf_error: vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr, buf->bufq_handle, buf->buf_idx); return rc; } void msm_isp_process_stats_irq(struct vfe_device *vfe_dev, uint32_t irq_status0, uint32_t irq_status1, struct msm_isp_timestamp *ts) { int i, j, rc; struct msm_isp_event_data buf_event; struct msm_isp_stats_event *stats_event = &buf_event.u.stats; struct msm_isp_buffer *done_buf; struct msm_vfe_stats_stream *stream_info = NULL; uint32_t pingpong_status; uint32_t comp_stats_type_mask = 0, atomic_stats_mask = 0; uint32_t stats_comp_mask = 0, stats_irq_mask = 0; uint32_t num_stats_comp_mask = vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask; stats_comp_mask = vfe_dev->hw_info->vfe_ops.stats_ops. get_comp_mask(irq_status0, irq_status1); stats_irq_mask = vfe_dev->hw_info->vfe_ops.stats_ops. get_wm_mask(irq_status0, irq_status1); if (!(stats_comp_mask || stats_irq_mask)) return; ISP_DBG("%s: status: 0x%x\n", __func__, irq_status0); /* * If any of composite mask is set, clear irq bits from mask, * they will be restored by comp mask */ if (stats_comp_mask) { for (j = 0; j < num_stats_comp_mask; j++) { stats_irq_mask &= ~atomic_read( &vfe_dev->stats_data.stats_comp_mask[j]); } } for (j = 0; j < num_stats_comp_mask; j++) { atomic_stats_mask = atomic_read( &vfe_dev->stats_data.stats_comp_mask[j]); if (!stats_comp_mask) { stats_irq_mask &= ~atomic_stats_mask; } else { /* restore irq bits from composite mask */ if (stats_comp_mask & (1 << j)) stats_irq_mask |= atomic_stats_mask; } /* if no irq bits set from this composite mask continue*/ if (!stats_irq_mask) continue; memset(&buf_event, 0, sizeof(struct msm_isp_event_data)); buf_event.timestamp = ts->event_time; buf_event.frame_id = vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id; buf_event.input_intf = VFE_PIX_0; pingpong_status = vfe_dev->hw_info-> vfe_ops.stats_ops.get_pingpong_status(vfe_dev); for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) { if (!(stats_irq_mask & (1 << i))) continue; stats_irq_mask &= ~(1 << i); stream_info = &vfe_dev->stats_data.stream_info[i]; done_buf = NULL; msm_isp_stats_cfg_ping_pong_address(vfe_dev, stream_info, pingpong_status, &done_buf); if (done_buf) { rc = vfe_dev->buf_mgr->ops->buf_divert( vfe_dev->buf_mgr, done_buf->bufq_handle, done_buf->buf_idx, &ts->buf_time, vfe_dev->axi_data. src_info[VFE_PIX_0].frame_id); if (rc != 0) continue; stats_event->stats_buf_idxs [stream_info->stats_type] = done_buf->buf_idx; if (!stream_info->composite_flag) { stats_event->stats_mask = 1 << stream_info->stats_type; ISP_DBG("%s: stats frameid: 0x%x %d\n", __func__, buf_event.frame_id, stream_info->stats_type); msm_isp_send_event(vfe_dev, ISP_EVENT_STATS_NOTIFY + stream_info->stats_type, &buf_event); } else { comp_stats_type_mask |= 1 << stream_info->stats_type; } } } if (comp_stats_type_mask) { ISP_DBG("%s: comp_stats frameid: 0x%x, 0x%x\n", __func__, buf_event.frame_id, comp_stats_type_mask); stats_event->stats_mask = comp_stats_type_mask; msm_isp_send_event(vfe_dev, ISP_EVENT_COMP_STATS_NOTIFY, &buf_event); comp_stats_type_mask = 0; } } } int msm_isp_stats_create_stream(struct vfe_device *vfe_dev, struct msm_vfe_stats_stream_request_cmd *stream_req_cmd) { int rc = -1; struct msm_vfe_stats_stream *stream_info = NULL; struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data; uint32_t stats_idx; if (!(vfe_dev->hw_info->stats_hw_info->stats_capability_mask & (1 << stream_req_cmd->stats_type))) { pr_err("%s: Stats type not supported\n", __func__); return rc; } stats_idx = vfe_dev->hw_info->vfe_ops.stats_ops. get_stats_idx(stream_req_cmd->stats_type); if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) { pr_err("%s Invalid stats index %d", __func__, stats_idx); return -EINVAL; } stream_info = &stats_data->stream_info[stats_idx]; if (stream_info->state != STATS_AVALIABLE) { pr_err("%s: Stats already requested\n", __func__); return rc; } if (stream_req_cmd->framedrop_pattern >= MAX_SKIP) { pr_err("%s: Invalid framedrop pattern\n", __func__); return rc; } if (stream_req_cmd->irq_subsample_pattern >= MAX_SKIP) { pr_err("%s: Invalid irq subsample pattern\n", __func__); return rc; } stream_info->session_id = stream_req_cmd->session_id; stream_info->stream_id = stream_req_cmd->stream_id; stream_info->composite_flag = stream_req_cmd->composite_flag; stream_info->stats_type = stream_req_cmd->stats_type; stream_info->buffer_offset = stream_req_cmd->buffer_offset; stream_info->framedrop_pattern = stream_req_cmd->framedrop_pattern; stream_info->init_stats_frame_drop = stream_req_cmd->init_frame_drop; stream_info->irq_subsample_pattern = stream_req_cmd->irq_subsample_pattern; stream_info->state = STATS_INACTIVE; if ((vfe_dev->stats_data.stream_handle_cnt << 8) == 0) vfe_dev->stats_data.stream_handle_cnt++; stream_req_cmd->stream_handle = (++vfe_dev->stats_data.stream_handle_cnt) << 8 | stats_idx; stream_info->stream_handle = stream_req_cmd->stream_handle; return 0; } int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg) { int rc = -1; struct msm_vfe_stats_stream_request_cmd *stream_req_cmd = arg; struct msm_vfe_stats_stream *stream_info = NULL; struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data; uint32_t framedrop_period; uint32_t stats_idx; rc = msm_isp_stats_create_stream(vfe_dev, stream_req_cmd); if (rc < 0) { pr_err("%s: create stream failed\n", __func__); return rc; } stats_idx = STATS_IDX(stream_req_cmd->stream_handle); if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) { pr_err("%s Invalid stats index %d", __func__, stats_idx); return -EINVAL; } stream_info = &stats_data->stream_info[stats_idx]; framedrop_period = msm_isp_get_framedrop_period( stream_req_cmd->framedrop_pattern); if (stream_req_cmd->framedrop_pattern == SKIP_ALL) stream_info->framedrop_pattern = 0x0; else stream_info->framedrop_pattern = 0x1; stream_info->framedrop_period = framedrop_period - 1; if (!stream_info->composite_flag) vfe_dev->hw_info->vfe_ops.stats_ops. cfg_wm_irq_mask(vfe_dev, stream_info); if (stream_info->init_stats_frame_drop == 0) vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(vfe_dev, stream_info); return rc; } int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg) { int rc = -1; struct msm_vfe_stats_stream_cfg_cmd stream_cfg_cmd; struct msm_vfe_stats_stream_release_cmd *stream_release_cmd = arg; struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data; int stats_idx = STATS_IDX(stream_release_cmd->stream_handle); struct msm_vfe_stats_stream *stream_info = NULL; if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) { pr_err("%s Invalid stats index %d", __func__, stats_idx); return -EINVAL; } stream_info = &stats_data->stream_info[stats_idx]; if (stream_info->state == STATS_AVALIABLE) { pr_err("%s: stream already release\n", __func__); return rc; } else if (stream_info->state != STATS_INACTIVE) { stream_cfg_cmd.enable = 0; stream_cfg_cmd.num_streams = 1; stream_cfg_cmd.stream_handle[0] = stream_release_cmd->stream_handle; rc = msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd); } if (!stream_info->composite_flag) vfe_dev->hw_info->vfe_ops.stats_ops. clear_wm_irq_mask(vfe_dev, stream_info); vfe_dev->hw_info->vfe_ops.stats_ops.clear_wm_reg(vfe_dev, stream_info); memset(stream_info, 0, sizeof(struct msm_vfe_stats_stream)); return 0; } static int msm_isp_init_stats_ping_pong_reg( struct vfe_device *vfe_dev, struct msm_vfe_stats_stream *stream_info) { int rc = 0; stream_info->bufq_handle = vfe_dev->buf_mgr->ops->get_bufq_handle( vfe_dev->buf_mgr, stream_info->session_id, stream_info->stream_id); if (stream_info->bufq_handle == 0) { pr_err("%s: no buf configured for stream: 0x%x\n", __func__, stream_info->stream_handle); return -EINVAL; } rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev, stream_info, VFE_PING_FLAG, NULL); if (rc < 0) { pr_err("%s: No free buffer for ping\n", __func__); return rc; } rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev, stream_info, VFE_PONG_FLAG, NULL); if (rc < 0) { pr_err("%s: No free buffer for pong\n", __func__); return rc; } return rc; } static void msm_isp_deinit_stats_ping_pong_reg( struct vfe_device *vfe_dev, struct msm_vfe_stats_stream *stream_info) { int i; struct msm_isp_buffer *buf; for (i = 0; i < 2; i++) { buf = stream_info->buf[i]; if (buf) vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr, buf->bufq_handle, buf->buf_idx); } } void msm_isp_update_stats_framedrop_reg(struct vfe_device *vfe_dev) { int i; struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data; struct msm_vfe_stats_stream *stream_info = NULL; for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) { stream_info = &stats_data->stream_info[i]; if (stream_info->state != STATS_ACTIVE) continue; if (stream_info->init_stats_frame_drop) { stream_info->init_stats_frame_drop--; if (stream_info->init_stats_frame_drop == 0) { vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg( vfe_dev, stream_info); } } } } void msm_isp_stats_stream_update(struct vfe_device *vfe_dev) { int i; uint32_t stats_mask = 0, comp_stats_mask = 0; uint32_t enable = 0; struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data; for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) { if (stats_data->stream_info[i].state == STATS_START_PENDING || stats_data->stream_info[i].state == STATS_STOP_PENDING) { stats_mask |= i; enable = stats_data->stream_info[i].state == STATS_START_PENDING ? 1 : 0; stats_data->stream_info[i].state = stats_data->stream_info[i].state == STATS_START_PENDING ? STATS_STARTING : STATS_STOPPING; vfe_dev->hw_info->vfe_ops.stats_ops.enable_module( vfe_dev, BIT(i), enable); vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask( vfe_dev, BIT(i), enable); } else if (stats_data->stream_info[i].state == STATS_STARTING || stats_data->stream_info[i].state == STATS_STOPPING) { if (stats_data->stream_info[i].composite_flag) comp_stats_mask |= i; stats_data->stream_info[i].state = stats_data->stream_info[i].state == STATS_STARTING ? STATS_ACTIVE : STATS_INACTIVE; } } atomic_sub(1, &stats_data->stats_update); if (!atomic_read(&stats_data->stats_update)) complete(&vfe_dev->stats_config_complete); } static int msm_isp_stats_wait_for_cfg_done(struct vfe_device *vfe_dev) { int rc; init_completion(&vfe_dev->stats_config_complete); atomic_set(&vfe_dev->stats_data.stats_update, 2); rc = wait_for_completion_timeout( &vfe_dev->stats_config_complete, msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT)); if (rc == 0) { pr_err("%s: wait timeout\n", __func__); rc = -1; } else { rc = 0; } return rc; } static int msm_isp_stats_update_cgc_override(struct vfe_device *vfe_dev, struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd) { int i; uint32_t stats_mask = 0, idx; for (i = 0; i < stream_cfg_cmd->num_streams; i++) { idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]); if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) { pr_err("%s Invalid stats index %d", __func__, idx); return -EINVAL; } stats_mask |= 1 << idx; } if (vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override) { vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override( vfe_dev, stats_mask, stream_cfg_cmd->enable); } return 0; } static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev, struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd) { int i, rc = 0; uint32_t stats_mask = 0, idx; uint32_t comp_stats_mask[MAX_NUM_STATS_COMP_MASK] = {0}; uint32_t num_stats_comp_mask = 0; struct msm_vfe_stats_stream *stream_info; struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data; num_stats_comp_mask = vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask; rc = vfe_dev->hw_info->vfe_ops.stats_ops.check_streams( stats_data->stream_info); if (rc < 0) return rc; for (i = 0; i < stream_cfg_cmd->num_streams; i++) { idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]); if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) { pr_err("%s Invalid stats index %d", __func__, idx); return -EINVAL; } stream_info = &stats_data->stream_info[idx]; if (stream_info->stream_handle != stream_cfg_cmd->stream_handle[i]) { pr_err("%s: Invalid stream handle: 0x%x received\n", __func__, stream_cfg_cmd->stream_handle[i]); continue; } if (stream_info->composite_flag > num_stats_comp_mask) { pr_err("%s: comp grp %d exceed max %d\n", __func__, stream_info->composite_flag, num_stats_comp_mask); return -EINVAL; } rc = msm_isp_init_stats_ping_pong_reg(vfe_dev, stream_info); if (rc < 0) { pr_err("%s: No buffer for stream%d\n", __func__, idx); return rc; } if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) stream_info->state = STATS_START_PENDING; else stream_info->state = STATS_ACTIVE; stats_data->num_active_stream++; stats_mask |= 1 << idx; if (stream_info->composite_flag > 0) comp_stats_mask[stream_info->composite_flag-1] |= 1 << idx; ISP_DBG("%s: stats_mask %x %x active streams %d\n", __func__, comp_stats_mask[0], comp_stats_mask[1], stats_data->num_active_stream); } if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) { rc = msm_isp_stats_wait_for_cfg_done(vfe_dev); } else { vfe_dev->hw_info->vfe_ops.stats_ops.enable_module( vfe_dev, stats_mask, stream_cfg_cmd->enable); for (i = 0; i < num_stats_comp_mask; i++) { vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask( vfe_dev, comp_stats_mask[i], 1); } } return rc; } static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev, struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd) { int i, rc = 0; uint32_t stats_mask = 0, idx; uint32_t comp_stats_mask[MAX_NUM_STATS_COMP_MASK] = {0}; uint32_t num_stats_comp_mask = 0; struct msm_vfe_stats_stream *stream_info; struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data; num_stats_comp_mask = vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask; for (i = 0; i < stream_cfg_cmd->num_streams; i++) { idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]); if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) { pr_err("%s Invalid stats index %d", __func__, idx); return -EINVAL; } stream_info = &stats_data->stream_info[idx]; if (stream_info->stream_handle != stream_cfg_cmd->stream_handle[i]) { pr_err("%s: Invalid stream handle: 0x%x received\n", __func__, stream_cfg_cmd->stream_handle[i]); continue; } if (stream_info->composite_flag > num_stats_comp_mask) { pr_err("%s: comp grp %d exceed max %d\n", __func__, stream_info->composite_flag, num_stats_comp_mask); return -EINVAL; } if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) stream_info->state = STATS_STOP_PENDING; else stream_info->state = STATS_INACTIVE; stats_data->num_active_stream--; stats_mask |= 1 << idx; if (stream_info->composite_flag > 0) comp_stats_mask[stream_info->composite_flag-1] |= 1 << idx; ISP_DBG("%s: stats_mask %x %x active streams %d\n", __func__, comp_stats_mask[0], comp_stats_mask[1], stats_data->num_active_stream); } if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) { rc = msm_isp_stats_wait_for_cfg_done(vfe_dev); } else { vfe_dev->hw_info->vfe_ops.stats_ops.enable_module( vfe_dev, stats_mask, stream_cfg_cmd->enable); for (i = 0; i < num_stats_comp_mask; i++) { vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask( vfe_dev, comp_stats_mask[i], 0); } } for (i = 0; i < stream_cfg_cmd->num_streams; i++) { idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]); if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) { pr_err("%s Invalid stats index %d", __func__, idx); return -EINVAL; } stream_info = &stats_data->stream_info[idx]; msm_isp_deinit_stats_ping_pong_reg(vfe_dev, stream_info); } return rc; } int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg) { int rc = 0; struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd = arg; if (vfe_dev->stats_data.num_active_stream == 0) vfe_dev->hw_info->vfe_ops.stats_ops.cfg_ub(vfe_dev); if (stream_cfg_cmd->enable) { msm_isp_stats_update_cgc_override(vfe_dev, stream_cfg_cmd); rc = msm_isp_start_stats_stream(vfe_dev, stream_cfg_cmd); } else { rc = msm_isp_stop_stats_stream(vfe_dev, stream_cfg_cmd); msm_isp_stats_update_cgc_override(vfe_dev, stream_cfg_cmd); } return rc; } int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg) { int rc = 0, i; struct msm_vfe_stats_stream *stream_info; struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data; struct msm_vfe_axi_stream_update_cmd *update_cmd = arg; struct msm_vfe_axi_stream_cfg_update_info *update_info = NULL; /*validate request*/ for (i = 0; i < update_cmd->num_streams; i++) { update_info = &update_cmd->update_info[i]; /*check array reference bounds*/ if (STATS_IDX(update_info->stream_handle) > vfe_dev->hw_info->stats_hw_info->num_stats_type) { pr_err("%s: stats idx %d out of bound!", __func__, STATS_IDX(update_info->stream_handle)); return -EINVAL; } } for (i = 0; i < update_cmd->num_streams; i++) { update_info = &update_cmd->update_info[i]; stream_info = &stats_data->stream_info[ STATS_IDX(update_info->stream_handle)]; if (stream_info->stream_handle != update_info->stream_handle) { pr_err("%s: stats stream handle %x %x mismatch!\n", __func__, stream_info->stream_handle, update_info->stream_handle); continue; } switch (update_cmd->update_type) { case UPDATE_STREAM_STATS_FRAMEDROP_PATTERN: { uint32_t framedrop_period = msm_isp_get_framedrop_period( update_info->skip_pattern); if (update_info->skip_pattern == SKIP_ALL) stream_info->framedrop_pattern = 0x0; else stream_info->framedrop_pattern = 0x1; stream_info->framedrop_period = framedrop_period - 1; if (stream_info->init_stats_frame_drop == 0) vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg( vfe_dev, stream_info); break; } default: pr_err("%s: Invalid update type\n", __func__); return -EINVAL; } } return rc; }
gpl-2.0
Hachamacha/tf101-kernel-tegra-dev
drivers/net/sunqe.c
387
25983
/* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver. * Once again I am out to prove that every ethernet * controller out there can be most efficiently programmed * if you make it look like a LANCE. * * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/crc32.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/system.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/byteorder.h> #include <asm/idprom.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/auxio.h> #include <asm/pgtable.h> #include <asm/irq.h> #include "sunqe.h" #define DRV_NAME "sunqe" #define DRV_VERSION "4.1" #define DRV_RELDATE "August 27, 2008" #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" static char version[] = DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver"); MODULE_LICENSE("GPL"); static struct sunqec *root_qec_dev; static void qe_set_multicast(struct net_device *dev); #define QEC_RESET_TRIES 200 static inline int qec_global_reset(void __iomem *gregs) { int tries = QEC_RESET_TRIES; sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); while (--tries) { u32 tmp = sbus_readl(gregs + GLOB_CTRL); if (tmp & GLOB_CTRL_RESET) { udelay(20); continue; } break; } if (tries) return 0; printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n"); return -1; } #define MACE_RESET_RETRIES 200 #define QE_RESET_RETRIES 200 static inline int qe_stop(struct sunqe *qep) { void __iomem *cregs = qep->qcregs; void __iomem *mregs = qep->mregs; int tries; /* Reset the MACE, then the QEC channel. */ sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG); tries = MACE_RESET_RETRIES; while (--tries) { u8 tmp = sbus_readb(mregs + MREGS_BCONFIG); if (tmp & MREGS_BCONFIG_RESET) { udelay(20); continue; } break; } if (!tries) { printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n"); return -1; } sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL); tries = QE_RESET_RETRIES; while (--tries) { u32 tmp = sbus_readl(cregs + CREG_CTRL); if (tmp & CREG_CTRL_RESET) { udelay(20); continue; } break; } if (!tries) { printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n"); return -1; } return 0; } static void qe_init_rings(struct sunqe *qep) { struct qe_init_block *qb = qep->qe_block; struct sunqe_buffers *qbufs = qep->buffers; __u32 qbufs_dvma = qep->buffers_dvma; int i; qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; memset(qb, 0, sizeof(struct qe_init_block)); memset(qbufs, 0, sizeof(struct sunqe_buffers)); for (i = 0; i < RX_RING_SIZE; i++) { qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i); qb->qe_rxd[i].rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); } } static int qe_init(struct sunqe *qep, int from_irq) { struct sunqec *qecp = qep->parent; void __iomem *cregs = qep->qcregs; void __iomem *mregs = qep->mregs; void __iomem *gregs = qecp->gregs; unsigned char *e = &qep->dev->dev_addr[0]; u32 tmp; int i; /* Shut it up. */ if (qe_stop(qep)) return -EAGAIN; /* Setup initial rx/tx init block pointers. */ sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); /* Enable/mask the various irq's. */ sbus_writel(0, cregs + CREG_RIMASK); sbus_writel(1, cregs + CREG_TIMASK); sbus_writel(0, cregs + CREG_QMASK); sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK); /* Setup the FIFO pointers into QEC local memory. */ tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE); sbus_writel(tmp, cregs + CREG_RXRBUFPTR); sbus_writel(tmp, cregs + CREG_RXWBUFPTR); tmp = sbus_readl(cregs + CREG_RXRBUFPTR) + sbus_readl(gregs + GLOB_RSIZE); sbus_writel(tmp, cregs + CREG_TXRBUFPTR); sbus_writel(tmp, cregs + CREG_TXWBUFPTR); /* Clear the channel collision counter. */ sbus_writel(0, cregs + CREG_CCNT); /* For 10baseT, inter frame space nor throttle seems to be necessary. */ sbus_writel(0, cregs + CREG_PIPG); /* Now dork with the AMD MACE. */ sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG); sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL); sbus_writeb(0, mregs + MREGS_RXFCNTL); /* The QEC dma's the rx'd packets from local memory out to main memory, * and therefore it interrupts when the packet reception is "complete". * So don't listen for the MACE talking about it. */ sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK); sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG); sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 | MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU), mregs + MREGS_FCONFIG); /* Only usable interface on QuadEther is twisted pair. */ sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG); /* Tell MACE we are changing the ether address. */ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET, mregs + MREGS_IACONFIG); while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); sbus_writeb(e[0], mregs + MREGS_ETHADDR); sbus_writeb(e[1], mregs + MREGS_ETHADDR); sbus_writeb(e[2], mregs + MREGS_ETHADDR); sbus_writeb(e[3], mregs + MREGS_ETHADDR); sbus_writeb(e[4], mregs + MREGS_ETHADDR); sbus_writeb(e[5], mregs + MREGS_ETHADDR); /* Clear out the address filter. */ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, mregs + MREGS_IACONFIG); while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); for (i = 0; i < 8; i++) sbus_writeb(0, mregs + MREGS_FILTER); /* Address changes are now complete. */ sbus_writeb(0, mregs + MREGS_IACONFIG); qe_init_rings(qep); /* Wait a little bit for the link to come up... */ mdelay(5); if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) { int tries = 50; while (--tries) { u8 tmp; mdelay(5); barrier(); tmp = sbus_readb(mregs + MREGS_PHYCONFIG); if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0) break; } if (tries == 0) printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name); } /* Missed packet counter is cleared on a read. */ sbus_readb(mregs + MREGS_MPCNT); /* Reload multicast information, this will enable the receiver * and transmitter. */ qe_set_multicast(qep->dev); /* QEC should now start to show interrupts. */ return 0; } /* Grrr, certain error conditions completely lock up the AMD MACE, * so when we get these we _must_ reset the chip. */ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) { struct net_device *dev = qep->dev; int mace_hwbug_workaround = 0; if (qe_status & CREG_STAT_EDEFER) { printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); dev->stats.tx_errors++; } if (qe_status & CREG_STAT_CLOSS) { printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_carrier_errors++; } if (qe_status & CREG_STAT_ERETRIES) { printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_LCOLL) { printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); dev->stats.tx_errors++; dev->stats.collisions++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_FUFLOW) { printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_JERROR) { printk(KERN_ERR "%s: Jabber error.\n", dev->name); } if (qe_status & CREG_STAT_BERROR) { printk(KERN_ERR "%s: Babble error.\n", dev->name); } if (qe_status & CREG_STAT_CCOFLOW) { dev->stats.tx_errors += 256; dev->stats.collisions += 256; } if (qe_status & CREG_STAT_TXDERROR) { printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXLERR) { printk(KERN_ERR "%s: Transmit late error.\n", dev->name); dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXPERR) { printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXSERR) { printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RCCOFLOW) { dev->stats.rx_errors += 256; dev->stats.collisions += 256; } if (qe_status & CREG_STAT_RUOFLOW) { dev->stats.rx_errors += 256; dev->stats.rx_over_errors += 256; } if (qe_status & CREG_STAT_MCOFLOW) { dev->stats.rx_errors += 256; dev->stats.rx_missed_errors += 256; } if (qe_status & CREG_STAT_RXFOFLOW) { printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_over_errors++; } if (qe_status & CREG_STAT_RLCOLL) { printk(KERN_ERR "%s: Late receive collision.\n", dev->name); dev->stats.rx_errors++; dev->stats.collisions++; } if (qe_status & CREG_STAT_FCOFLOW) { dev->stats.rx_errors += 256; dev->stats.rx_frame_errors += 256; } if (qe_status & CREG_STAT_CECOFLOW) { dev->stats.rx_errors += 256; dev->stats.rx_crc_errors += 256; } if (qe_status & CREG_STAT_RXDROP) { printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_dropped++; dev->stats.rx_missed_errors++; } if (qe_status & CREG_STAT_RXSMALL) { printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_length_errors++; } if (qe_status & CREG_STAT_RXLERR) { printk(KERN_ERR "%s: Receive late error.\n", dev->name); dev->stats.rx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RXPERR) { printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_missed_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RXSERR) { printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); dev->stats.rx_errors++; dev->stats.rx_missed_errors++; mace_hwbug_workaround = 1; } if (mace_hwbug_workaround) qe_init(qep, 1); return mace_hwbug_workaround; } /* Per-QE receive interrupt service routine. Just like on the happy meal * we receive directly into skb's with a small packet copy water mark. */ static void qe_rx(struct sunqe *qep) { struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; struct net_device *dev = qep->dev; struct qe_rxd *this; struct sunqe_buffers *qbufs = qep->buffers; __u32 qbufs_dvma = qep->buffers_dvma; int elem = qep->rx_new, drops = 0; u32 flags; this = &rxbase[elem]; while (!((flags = this->rx_flags) & RXD_OWN)) { struct sk_buff *skb; unsigned char *this_qbuf = &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0]; __u32 this_qbuf_dvma = qbufs_dvma + qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1))); struct qe_rxd *end_rxd = &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)]; int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */ /* Check for errors. */ if (len < ETH_ZLEN) { dev->stats.rx_errors++; dev->stats.rx_length_errors++; dev->stats.rx_dropped++; } else { skb = dev_alloc_skb(len + 2); if (skb == NULL) { drops++; dev->stats.rx_dropped++; } else { skb_reserve(skb, 2); skb_put(skb, len); skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf, len); skb->protocol = eth_type_trans(skb, qep->dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; } } end_rxd->rx_addr = this_qbuf_dvma; end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); elem = NEXT_RX(elem); this = &rxbase[elem]; } qep->rx_new = elem; if (drops) printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name); } static void qe_tx_reclaim(struct sunqe *qep); /* Interrupts for all QE's get filtered out via the QEC master controller, * so we just run through each qe and check to see who is signaling * and thus needs to be serviced. */ static irqreturn_t qec_interrupt(int irq, void *dev_id) { struct sunqec *qecp = dev_id; u32 qec_status; int channel = 0; /* Latch the status now. */ qec_status = sbus_readl(qecp->gregs + GLOB_STAT); while (channel < 4) { if (qec_status & 0xf) { struct sunqe *qep = qecp->qes[channel]; u32 qe_status; qe_status = sbus_readl(qep->qcregs + CREG_STAT); if (qe_status & CREG_STAT_ERRORS) { if (qe_is_bolixed(qep, qe_status)) goto next; } if (qe_status & CREG_STAT_RXIRQ) qe_rx(qep); if (netif_queue_stopped(qep->dev) && (qe_status & CREG_STAT_TXIRQ)) { spin_lock(&qep->lock); qe_tx_reclaim(qep); if (TX_BUFFS_AVAIL(qep) > 0) { /* Wake net queue and return to * lazy tx reclaim. */ netif_wake_queue(qep->dev); sbus_writel(1, qep->qcregs + CREG_TIMASK); } spin_unlock(&qep->lock); } next: ; } qec_status >>= 4; channel++; } return IRQ_HANDLED; } static int qe_open(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); qep->mconfig = (MREGS_MCONFIG_TXENAB | MREGS_MCONFIG_RXENAB | MREGS_MCONFIG_MBAENAB); return qe_init(qep, 0); } static int qe_close(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); qe_stop(qep); return 0; } /* Reclaim TX'd frames from the ring. This must always run under * the IRQ protected qep->lock. */ static void qe_tx_reclaim(struct sunqe *qep) { struct qe_txd *txbase = &qep->qe_block->qe_txd[0]; int elem = qep->tx_old; while (elem != qep->tx_new) { u32 flags = txbase[elem].tx_flags; if (flags & TXD_OWN) break; elem = NEXT_TX(elem); } qep->tx_old = elem; } static void qe_tx_timeout(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); int tx_full; spin_lock_irq(&qep->lock); /* Try to reclaim, if that frees up some tx * entries, we're fine. */ qe_tx_reclaim(qep); tx_full = TX_BUFFS_AVAIL(qep) <= 0; spin_unlock_irq(&qep->lock); if (! tx_full) goto out; printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); qe_init(qep, 1); out: netif_wake_queue(dev); } /* Get a packet queued to go onto the wire. */ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); struct sunqe_buffers *qbufs = qep->buffers; __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; unsigned char *txbuf; int len, entry; spin_lock_irq(&qep->lock); qe_tx_reclaim(qep); len = skb->len; entry = qep->tx_new; txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0]; txbuf_dvma = qbufs_dvma + qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1))); /* Avoid a race... */ qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; skb_copy_from_linear_data(skb, txbuf, len); qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; qep->qe_block->qe_txd[entry].tx_flags = (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); qep->tx_new = NEXT_TX(entry); /* Get it going. */ sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); dev->stats.tx_packets++; dev->stats.tx_bytes += len; if (TX_BUFFS_AVAIL(qep) <= 0) { /* Halt the net queue and enable tx interrupts. * When the tx queue empties the tx irq handler * will wake up the queue and return us back to * the lazy tx reclaim scheme. */ netif_stop_queue(dev); sbus_writel(0, qep->qcregs + CREG_TIMASK); } spin_unlock_irq(&qep->lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } static void qe_set_multicast(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); struct netdev_hw_addr *ha; u8 new_mconfig = qep->mconfig; int i; u32 crc; /* Lock out others. */ netif_stop_queue(dev); if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, qep->mregs + MREGS_IACONFIG); while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); for (i = 0; i < 8; i++) sbus_writeb(0xff, qep->mregs + MREGS_FILTER); sbus_writeb(0, qep->mregs + MREGS_IACONFIG); } else if (dev->flags & IFF_PROMISC) { new_mconfig |= MREGS_MCONFIG_PROMISC; } else { u16 hash_table[4]; u8 *hbytes = (unsigned char *) &hash_table[0]; memset(hash_table, 0, sizeof(hash_table)); netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc >>= 26; hash_table[crc >> 4] |= 1 << (crc & 0xf); } /* Program the qe with the new filter value. */ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, qep->mregs + MREGS_IACONFIG); while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) barrier(); for (i = 0; i < 8; i++) { u8 tmp = *hbytes++; sbus_writeb(tmp, qep->mregs + MREGS_FILTER); } sbus_writeb(0, qep->mregs + MREGS_IACONFIG); } /* Any change of the logical address filter, the physical address, * or enabling/disabling promiscuous mode causes the MACE to disable * the receiver. So we must re-enable them here or else the MACE * refuses to listen to anything on the network. Sheesh, took * me a day or two to find this bug. */ qep->mconfig = new_mconfig; sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG); /* Let us get going again. */ netif_wake_queue(dev); } /* Ethtool support... */ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { const struct linux_prom_registers *regs; struct sunqe *qep = netdev_priv(dev); struct platform_device *op; strcpy(info->driver, "sunqe"); strcpy(info->version, "3.0"); op = qep->op; regs = of_get_property(op->dev.of_node, "reg", NULL); if (regs) sprintf(info->bus_info, "SBUS:%d", regs->which_io); } static u32 qe_get_link(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); void __iomem *mregs = qep->mregs; u8 phyconfig; spin_lock_irq(&qep->lock); phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG); spin_unlock_irq(&qep->lock); return phyconfig & MREGS_PHYCONFIG_LSTAT; } static const struct ethtool_ops qe_ethtool_ops = { .get_drvinfo = qe_get_drvinfo, .get_link = qe_get_link, }; /* This is only called once at boot time for each card probed. */ static void qec_init_once(struct sunqec *qecp, struct platform_device *op) { u8 bsizes = qecp->qec_bursts; if (sbus_can_burst64() && (bsizes & DMA_BURST64)) { sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL); } else if (bsizes & DMA_BURST32) { sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL); } else { sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL); } /* Packetsize only used in 100baseT BigMAC configurations, * set it to zero just to be on the safe side. */ sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE); /* Set the local memsize register, divided up to one piece per QE channel. */ sbus_writel((resource_size(&op->resource[1]) >> 2), qecp->gregs + GLOB_MSIZE); /* Divide up the local QEC memory amongst the 4 QE receiver and * transmitter FIFOs. Basically it is (total / 2 / num_channels). */ sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1, qecp->gregs + GLOB_TSIZE); sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1, qecp->gregs + GLOB_RSIZE); } static u8 __devinit qec_get_burst(struct device_node *dp) { u8 bsizes, bsizes_more; /* Find and set the burst sizes for the QEC, since it * does the actual dma for all 4 channels. */ bsizes = of_getintprop_default(dp, "burst-sizes", 0xff); bsizes &= 0xff; bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff); if (bsizes_more != 0xff) bsizes &= bsizes_more; if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || (bsizes & DMA_BURST32)==0) bsizes = (DMA_BURST32 - 1); return bsizes; } static struct sunqec * __devinit get_qec(struct platform_device *child) { struct platform_device *op = to_platform_device(child->dev.parent); struct sunqec *qecp; qecp = dev_get_drvdata(&op->dev); if (!qecp) { qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL); if (qecp) { u32 ctrl; qecp->op = op; qecp->gregs = of_ioremap(&op->resource[0], 0, GLOB_REG_SIZE, "QEC Global Registers"); if (!qecp->gregs) goto fail; /* Make sure the QEC is in MACE mode. */ ctrl = sbus_readl(qecp->gregs + GLOB_CTRL); ctrl &= 0xf0000000; if (ctrl != GLOB_CTRL_MMODE) { printk(KERN_ERR "qec: Not in MACE mode!\n"); goto fail; } if (qec_global_reset(qecp->gregs)) goto fail; qecp->qec_bursts = qec_get_burst(op->dev.of_node); qec_init_once(qecp, op); if (request_irq(op->archdata.irqs[0], qec_interrupt, IRQF_SHARED, "qec", (void *) qecp)) { printk(KERN_ERR "qec: Can't register irq.\n"); goto fail; } dev_set_drvdata(&op->dev, qecp); qecp->next_module = root_qec_dev; root_qec_dev = qecp; } } return qecp; fail: if (qecp->gregs) of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE); kfree(qecp); return NULL; } static const struct net_device_ops qec_ops = { .ndo_open = qe_open, .ndo_stop = qe_close, .ndo_start_xmit = qe_start_xmit, .ndo_set_multicast_list = qe_set_multicast, .ndo_tx_timeout = qe_tx_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int __devinit qec_ether_init(struct platform_device *op) { static unsigned version_printed; struct net_device *dev; struct sunqec *qecp; struct sunqe *qe; int i, res; if (version_printed++ == 0) printk(KERN_INFO "%s", version); dev = alloc_etherdev(sizeof(struct sunqe)); if (!dev) return -ENOMEM; memcpy(dev->dev_addr, idprom->id_ethaddr, 6); qe = netdev_priv(dev); res = -ENODEV; i = of_getintprop_default(op->dev.of_node, "channel#", -1); if (i == -1) goto fail; qe->channel = i; spin_lock_init(&qe->lock); qecp = get_qec(op); if (!qecp) goto fail; qecp->qes[qe->channel] = qe; qe->dev = dev; qe->parent = qecp; qe->op = op; res = -ENOMEM; qe->qcregs = of_ioremap(&op->resource[0], 0, CREG_REG_SIZE, "QEC Channel Registers"); if (!qe->qcregs) { printk(KERN_ERR "qe: Cannot map channel registers.\n"); goto fail; } qe->mregs = of_ioremap(&op->resource[1], 0, MREGS_REG_SIZE, "QE MACE Registers"); if (!qe->mregs) { printk(KERN_ERR "qe: Cannot map MACE registers.\n"); goto fail; } qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE, &qe->qblock_dvma, GFP_ATOMIC); qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers), &qe->buffers_dvma, GFP_ATOMIC); if (qe->qe_block == NULL || qe->qblock_dvma == 0 || qe->buffers == NULL || qe->buffers_dvma == 0) goto fail; /* Stop this QE. */ qe_stop(qe); SET_NETDEV_DEV(dev, &op->dev); dev->watchdog_timeo = 5*HZ; dev->irq = op->archdata.irqs[0]; dev->dma = 0; dev->ethtool_ops = &qe_ethtool_ops; dev->netdev_ops = &qec_ops; res = register_netdev(dev); if (res) goto fail; dev_set_drvdata(&op->dev, qe); printk(KERN_INFO "%s: qe channel[%d] ", dev->name, qe->channel); for (i = 0; i < 6; i++) printk ("%2.2x%c", dev->dev_addr[i], i == 5 ? ' ': ':'); printk("\n"); return 0; fail: if (qe->qcregs) of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE); if (qe->mregs) of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE); if (qe->qe_block) dma_free_coherent(&op->dev, PAGE_SIZE, qe->qe_block, qe->qblock_dvma); if (qe->buffers) dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers), qe->buffers, qe->buffers_dvma); free_netdev(dev); return res; } static int __devinit qec_sbus_probe(struct platform_device *op) { return qec_ether_init(op); } static int __devexit qec_sbus_remove(struct platform_device *op) { struct sunqe *qp = dev_get_drvdata(&op->dev); struct net_device *net_dev = qp->dev; unregister_netdev(net_dev); of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE); of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE); dma_free_coherent(&op->dev, PAGE_SIZE, qp->qe_block, qp->qblock_dvma); dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers), qp->buffers, qp->buffers_dvma); free_netdev(net_dev); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id qec_sbus_match[] = { { .name = "qe", }, {}, }; MODULE_DEVICE_TABLE(of, qec_sbus_match); static struct platform_driver qec_sbus_driver = { .driver = { .name = "qec", .owner = THIS_MODULE, .of_match_table = qec_sbus_match, }, .probe = qec_sbus_probe, .remove = __devexit_p(qec_sbus_remove), }; static int __init qec_init(void) { return platform_driver_register(&qec_sbus_driver); } static void __exit qec_exit(void) { platform_driver_unregister(&qec_sbus_driver); while (root_qec_dev) { struct sunqec *next = root_qec_dev->next_module; struct platform_device *op = root_qec_dev->op; free_irq(op->archdata.irqs[0], (void *) root_qec_dev); of_iounmap(&op->resource[0], root_qec_dev->gregs, GLOB_REG_SIZE); kfree(root_qec_dev); root_qec_dev = next; } } module_init(qec_init); module_exit(qec_exit);
gpl-2.0
Split-Screen/android_kernel_samsung_msm7x30-common
drivers/staging/prima/CORE/MAC/src/pe/sch/schBeaconGen.c
387
35880
/* * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * Airgo Networks, Inc proprietary. All rights reserved. * This file schBeaconGen.cc contains beacon generation related * functions * * Author: Sandesh Goel * Date: 02/25/02 * History:- * Date Modified by Modification Information * -------------------------------------------------------------------- * */ #include "palTypes.h" #include "wniCfgAp.h" #include "aniGlobal.h" #include "sirMacProtDef.h" #include "limUtils.h" #include "limApi.h" #ifdef FEATURE_WLAN_NON_INTEGRATED_SOC #include "halCommonApi.h" #include "halDataStruct.h" #endif #include "halMsgApi.h" #include "cfgApi.h" #include "pmmApi.h" #include "schApi.h" #include "parserApi.h" #include "schDebug.h" // // March 15, 2006 // Temporarily (maybe for all of Alpha-1), assuming TIM = 0 // const tANI_U8 P2pOui[] = {0x50, 0x6F, 0x9A, 0x9}; #ifdef ANI_PRODUCT_TYPE_AP static void specialBeaconProcessing(tpAniSirGlobal pMac, tANI_U32 beaconSize); #endif #if defined(WLAN_SOFTAP_FEATURE) && defined(WLAN_FEATURE_P2P) tSirRetStatus schGetP2pIeOffset(tANI_U8 *pExtraIe, tANI_U32 extraIeLen, tANI_U16 *pP2pIeOffset) { tSirRetStatus status = eSIR_FAILURE; *pP2pIeOffset = 0; // Extra IE is not present if(0 == extraIeLen) { return status; } // Calculate the P2P IE Offset do { if(*pExtraIe == 0xDD) { if(palEqualMemory(NULL, (void *)(pExtraIe+2), &P2pOui, sizeof(P2pOui))) { (*pP2pIeOffset)++; status = eSIR_SUCCESS; break; } } (*pP2pIeOffset)++; pExtraIe++; }while(--extraIeLen > 0); return status; } #endif tSirRetStatus schAppendAddnIE(tpAniSirGlobal pMac, tpPESession psessionEntry, tANI_U8 *pFrame, tANI_U32 maxBeaconSize, tANI_U32 *nBytes) { tSirRetStatus status = eSIR_FAILURE; tANI_U32 present, len; tANI_U8 addIE[WNI_CFG_PROBE_RSP_BCN_ADDNIE_DATA_LEN]; if((status = wlan_cfgGetInt(pMac, WNI_CFG_PROBE_RSP_BCN_ADDNIE_FLAG, &present)) != eSIR_SUCCESS) { limLog(pMac, LOGP, FL("Unable to get WNI_CFG_PROBE_RSP_BCN_ADDNIE_FLAG")); return status; } if(present) { if((status = wlan_cfgGetStrLen(pMac, WNI_CFG_PROBE_RSP_BCN_ADDNIE_DATA, &len)) != eSIR_SUCCESS) { limLog(pMac, LOGP, FL("Unable to get WNI_CFG_PROBE_RSP_BCN_ADDNIE_DATA length")); return status; } if(len <= WNI_CFG_PROBE_RSP_BCN_ADDNIE_DATA_LEN && len && ((len + *nBytes) <= maxBeaconSize)) { if((status = wlan_cfgGetStr(pMac, WNI_CFG_PROBE_RSP_BCN_ADDNIE_DATA, &addIE[0], &len)) == eSIR_SUCCESS) { #ifdef WLAN_FEATURE_P2P tANI_U8* pP2pIe = limGetP2pIEPtr(pMac, &addIE[0], len); if(pP2pIe != NULL) { tANI_U8 noaLen = 0; tANI_U8 noaStream[SIR_MAX_NOA_ATTR_LEN + SIR_P2P_IE_HEADER_LEN]; //get NoA attribute stream P2P IE noaLen = limGetNoaAttrStream(pMac, noaStream, psessionEntry); if(noaLen) { if(noaLen + len <= WNI_CFG_PROBE_RSP_BCN_ADDNIE_DATA_LEN) { vos_mem_copy(&addIE[len], noaStream, noaLen); len += noaLen; /* Update IE Len */ pP2pIe[1] += noaLen; } else { limLog(pMac, LOGE, FL("Not able to insert NoA because of length constraint")); } } } #endif vos_mem_copy(pFrame, &addIE[0], len); *nBytes = *nBytes + len; } } } return status; } // -------------------------------------------------------------------- /** * schSetFixedBeaconFields * * FUNCTION: * * LOGIC: * * ASSUMPTIONS: * * NOTE: * * @param None * @return None */ tSirRetStatus schSetFixedBeaconFields(tpAniSirGlobal pMac,tpPESession psessionEntry) { tpAniBeaconStruct pBeacon = (tpAniBeaconStruct) pMac->sch.schObject.gSchBeaconFrameBegin; tpSirMacMgmtHdr mac; tANI_U16 offset; tANI_U8 *ptr; tDot11fBeacon1 *pBcn1; tDot11fBeacon2 *pBcn2; tANI_U32 i, nStatus, nBytes; tANI_U32 wpsApEnable=0, tmp; #ifdef WLAN_SOFTAP_FEATURE tDot11fIEWscProbeRes *pWscProbeRes; #ifdef WLAN_FEATURE_P2P tANI_U8 *pExtraIe = NULL; tANI_U32 extraIeLen =0; tANI_U16 extraIeOffset = 0; tANI_U16 p2pIeOffset = 0; tSirRetStatus status = eSIR_SUCCESS; #endif #endif status = palAllocateMemory(pMac->hHdd, (void **)&pBcn1, sizeof(tDot11fBeacon1)); if(status != eSIR_SUCCESS) { schLog(pMac, LOGE, FL("Failed to allocate memory\n") ); return eSIR_FAILURE; } status = palAllocateMemory(pMac->hHdd, (void **)&pBcn2, sizeof(tDot11fBeacon2)); if(status != eSIR_SUCCESS) { schLog(pMac, LOGE, FL("Failed to allocate memory\n") ); palFreeMemory(pMac->hHdd, pBcn1); return eSIR_FAILURE; } #ifdef WLAN_SOFTAP_FEATURE status = palAllocateMemory(pMac->hHdd, (void **)&pWscProbeRes, sizeof(tDot11fIEWscProbeRes)); if(status != eSIR_SUCCESS) { schLog(pMac, LOGE, FL("Failed to allocate memory\n") ); palFreeMemory(pMac->hHdd, pBcn1); palFreeMemory(pMac->hHdd, pBcn2); return eSIR_FAILURE; } #endif PELOG1(schLog(pMac, LOG1, FL("Setting fixed beacon fields\n"));) /* * First set the fixed fields */ // set the TFP headers // set the mac header palZeroMemory( pMac->hHdd, ( tANI_U8*) &pBeacon->macHdr, sizeof( tSirMacMgmtHdr ) ); mac = (tpSirMacMgmtHdr) &pBeacon->macHdr; mac->fc.type = SIR_MAC_MGMT_FRAME; mac->fc.subType = SIR_MAC_MGMT_BEACON; for (i=0; i<6; i++) mac->da[i] = 0xff; /* Knocking out Global pMac update */ /* limGetMyMacAddr(pMac, mac->sa); */ /* limGetBssid(pMac, mac->bssId); */ palCopyMemory(pMac->hHdd, mac->sa, psessionEntry->selfMacAddr, sizeof(psessionEntry->selfMacAddr)); palCopyMemory(pMac->hHdd, mac->bssId, psessionEntry->bssId, sizeof (psessionEntry->bssId)); mac->fc.fromDS = 0; mac->fc.toDS = 0; /* * Now set the beacon body */ palZeroMemory( pMac->hHdd, ( tANI_U8*) pBcn1, sizeof( tDot11fBeacon1 ) ); // Skip over the timestamp (it'll be updated later). pBcn1->BeaconInterval.interval = pMac->sch.schObject.gSchBeaconInterval; PopulateDot11fCapabilities( pMac, &pBcn1->Capabilities, psessionEntry ); if (psessionEntry->ssidHidden) { pBcn1->SSID.present = 1; //rest of the fileds are 0 for hidden ssid } else { PopulateDot11fSSID( pMac, &psessionEntry->ssId, &pBcn1->SSID ); } PopulateDot11fSuppRates( pMac, POPULATE_DOT11F_RATES_OPERATIONAL, &pBcn1->SuppRates,psessionEntry); PopulateDot11fDSParams( pMac, &pBcn1->DSParams, psessionEntry->currentOperChannel, psessionEntry); PopulateDot11fIBSSParams( pMac, &pBcn1->IBSSParams,psessionEntry); offset = sizeof( tAniBeaconStruct ); ptr = pMac->sch.schObject.gSchBeaconFrameBegin + offset; #ifdef WLAN_SOFTAP_FEATURE if((psessionEntry->limSystemRole == eLIM_AP_ROLE) && (psessionEntry->proxyProbeRspEn)) { /* Initialize the default IE bitmap to zero */ palZeroMemory( pMac->hHdd, ( tANI_U8* )&(psessionEntry->DefProbeRspIeBitmap), (sizeof( tANI_U32 ) * 8)); /* Initialize the default IE bitmap to zero */ palZeroMemory( pMac->hHdd, ( tANI_U8* )&(psessionEntry->probeRespFrame), sizeof(psessionEntry->probeRespFrame)); /* Can be efficiently updated whenever new IE added in Probe response in future */ limUpdateProbeRspTemplateIeBitmapBeacon1(pMac,pBcn1,&psessionEntry->DefProbeRspIeBitmap[0], &psessionEntry->probeRespFrame); } #endif nStatus = dot11fPackBeacon1( pMac, pBcn1, ptr, SCH_MAX_BEACON_SIZE - offset, &nBytes ); if ( DOT11F_FAILED( nStatus ) ) { schLog( pMac, LOGE, FL("Failed to packed a tDot11fBeacon1 (0x%0" "8x.).\n"), nStatus ); palFreeMemory(pMac->hHdd, pBcn1); palFreeMemory(pMac->hHdd, pBcn2); #ifdef WLAN_SOFTAP_FEATURE palFreeMemory(pMac->hHdd, pWscProbeRes); #endif return eSIR_FAILURE; } else if ( DOT11F_WARNED( nStatus ) ) { schLog( pMac, LOGE, FL("There were warnings while packing a tDo" "t11fBeacon1 (0x%08x.).\n"), nStatus ); } /*changed to correct beacon corruption */ palZeroMemory( pMac->hHdd, ( tANI_U8*) pBcn2, sizeof( tDot11fBeacon2 ) ); pMac->sch.schObject.gSchBeaconOffsetBegin = offset + ( tANI_U16 )nBytes; schLog( pMac, LOG1, FL("Initialized beacon begin, offset %d\n"), offset ); /* * Initialize the 'new' fields at the end of the beacon */ PopulateDot11fCountry( pMac, &pBcn2->Country, psessionEntry); if(pBcn1->Capabilities.qos) { PopulateDot11fEDCAParamSet( pMac, &pBcn2->EDCAParamSet, psessionEntry); } if(psessionEntry->lim11hEnable) { PopulateDot11fPowerConstraints( pMac, &pBcn2->PowerConstraints ); PopulateDot11fTPCReport( pMac, &pBcn2->TPCReport, psessionEntry); } #ifdef ANI_PRODUCT_TYPE_AP if( psessionEntry->lim11hEnable && (eLIM_QUIET_RUNNING == psessionEntry->gLimSpecMgmt.quietState)) { PopulateDot11fQuiet( pMac, &pBcn2->Quiet ); } /* If 11h is enabled, and AP is in the state of changing either the * primary channel, or both primary & secondary channel, and the * channel switch count is still being decremented, then AP shall * populate the 802.11h channel switch IE in its Beacons and Probe * Responses. */ if ( (psessionEntry->lim11hEnable) && (psessionEntry->gLimChannelSwitch.switchCount != 0) && (psessionEntry->gLimSpecMgmt.dot11hChanSwState == eLIM_11H_CHANSW_RUNNING)) { PopulateDot11fChanSwitchAnn( pMac, &pBcn2->ChanSwitchAnn,psessionEntry ); PopulateDot11fExtChanSwitchAnn(pMac, &pBcn2->ExtChanSwitchAnn,psessionEntry); } #endif if (psessionEntry->dot11mode != WNI_CFG_DOT11_MODE_11B) PopulateDot11fERPInfo( pMac, &pBcn2->ERPInfo, psessionEntry ); if(psessionEntry->htCapability) { PopulateDot11fHTCaps( pMac,psessionEntry, &pBcn2->HTCaps ); #ifdef WLAN_SOFTAP_FEATURE PopulateDot11fHTInfo( pMac, &pBcn2->HTInfo, psessionEntry ); #else PopulateDot11fHTInfo( pMac, &pBcn2->HTInfo ); #endif } #ifdef WLAN_FEATURE_11AC if(psessionEntry->vhtCapability) { limLog( pMac, LOGW, FL("Populate VHT IEs in Beacon\n")); PopulateDot11fVHTCaps( pMac, &pBcn2->VHTCaps ); PopulateDot11fVHTOperation( pMac, &pBcn2->VHTOperation); // we do not support multi users yet //PopulateDot11fVHTExtBssLoad( pMac, &bcn2.VHTExtBssLoad); } #endif PopulateDot11fExtSuppRates( pMac, POPULATE_DOT11F_RATES_OPERATIONAL, &pBcn2->ExtSuppRates, psessionEntry ); if( psessionEntry->pLimStartBssReq != NULL ) { PopulateDot11fWPA( pMac, &psessionEntry->pLimStartBssReq->rsnIE, &pBcn2->WPA ); PopulateDot11fRSN( pMac, &psessionEntry->pLimStartBssReq->rsnIE, &pBcn2->RSN ); } if(psessionEntry->limWmeEnabled) { PopulateDot11fWMM( pMac, &pBcn2->WMMInfoAp, &pBcn2->WMMParams, &pBcn2->WMMCaps, psessionEntry); } #ifdef WLAN_SOFTAP_FEATURE if(psessionEntry->limSystemRole == eLIM_AP_ROLE) { if(psessionEntry->wps_state != SAP_WPS_DISABLED) { PopulateDot11fBeaconWPSIEs( pMac, &pBcn2->WscBeacon, psessionEntry); } } else { #endif if (wlan_cfgGetInt(pMac, (tANI_U16) WNI_CFG_WPS_ENABLE, &tmp) != eSIR_SUCCESS) limLog(pMac, LOGP,"Failed to cfg get id %d\n", WNI_CFG_WPS_ENABLE ); wpsApEnable = tmp & WNI_CFG_WPS_ENABLE_AP; if (wpsApEnable) { PopulateDot11fWsc(pMac, &pBcn2->WscBeacon); } if (pMac->lim.wscIeInfo.wscEnrollmentState == eLIM_WSC_ENROLL_BEGIN) { PopulateDot11fWscRegistrarInfo(pMac, &pBcn2->WscBeacon); pMac->lim.wscIeInfo.wscEnrollmentState = eLIM_WSC_ENROLL_IN_PROGRESS; } if (pMac->lim.wscIeInfo.wscEnrollmentState == eLIM_WSC_ENROLL_END) { DePopulateDot11fWscRegistrarInfo(pMac, &pBcn2->WscBeacon); pMac->lim.wscIeInfo.wscEnrollmentState = eLIM_WSC_ENROLL_NOOP; } #ifdef WLAN_SOFTAP_FEATURE } #endif #ifdef WLAN_SOFTAP_FEATURE if((psessionEntry->limSystemRole == eLIM_AP_ROLE) && (psessionEntry->proxyProbeRspEn)) { /* Can be efficiently updated whenever new IE added in Probe response in future */ limUpdateProbeRspTemplateIeBitmapBeacon2(pMac,pBcn2,&psessionEntry->DefProbeRspIeBitmap[0], &psessionEntry->probeRespFrame); /* update probe response WPS IE instead of beacon WPS IE * */ if(psessionEntry->wps_state != SAP_WPS_DISABLED) { if(psessionEntry->APWPSIEs.SirWPSProbeRspIE.FieldPresent) { PopulateDot11fProbeResWPSIEs(pMac, pWscProbeRes, psessionEntry); } else { pWscProbeRes->present = 0; } if(pWscProbeRes->present) { SetProbeRspIeBitmap(&psessionEntry->DefProbeRspIeBitmap[0],SIR_MAC_WPA_EID); palCopyMemory(pMac->hHdd, (void *)&psessionEntry->probeRespFrame.WscProbeRes, (void *)pWscProbeRes, sizeof(tDot11fIEWscProbeRes)); } } } #endif nStatus = dot11fPackBeacon2( pMac, pBcn2, pMac->sch.schObject.gSchBeaconFrameEnd, SCH_MAX_BEACON_SIZE, &nBytes ); if ( DOT11F_FAILED( nStatus ) ) { schLog( pMac, LOGE, FL("Failed to packed a tDot11fBeacon2 (0x%0" "8x.).\n"), nStatus ); palFreeMemory(pMac->hHdd, pBcn1); palFreeMemory(pMac->hHdd, pBcn2); #ifdef WLAN_SOFTAP_FEATURE palFreeMemory(pMac->hHdd, pWscProbeRes); #endif return eSIR_FAILURE; } else if ( DOT11F_WARNED( nStatus ) ) { schLog( pMac, LOGE, FL("There were warnings while packing a tDo" "t11fBeacon2 (0x%08x.).\n"), nStatus ); } #if defined(WLAN_SOFTAP_FEATURE) && defined(WLAN_FEATURE_P2P) pExtraIe = pMac->sch.schObject.gSchBeaconFrameEnd + nBytes; extraIeOffset = nBytes; #endif //TODO: Append additional IE here. schAppendAddnIE(pMac, psessionEntry, pMac->sch.schObject.gSchBeaconFrameEnd + nBytes, SCH_MAX_BEACON_SIZE, &nBytes); pMac->sch.schObject.gSchBeaconOffsetEnd = ( tANI_U16 )nBytes; #if defined(WLAN_SOFTAP_FEATURE) && defined(WLAN_FEATURE_P2P) extraIeLen = nBytes - extraIeOffset; //Get the p2p Ie Offset status = schGetP2pIeOffset(pExtraIe, extraIeLen, &p2pIeOffset); if(eSIR_SUCCESS == status) { //Update the P2P Ie Offset pMac->sch.schObject.p2pIeOffset = pMac->sch.schObject.gSchBeaconOffsetBegin + TIM_IE_SIZE + extraIeOffset + p2pIeOffset; } else { pMac->sch.schObject.p2pIeOffset = 0; } #endif schLog( pMac, LOG1, FL("Initialized beacon end, offset %d\n"), pMac->sch.schObject.gSchBeaconOffsetEnd ); pMac->sch.schObject.fBeaconChanged = 1; palFreeMemory(pMac->hHdd, pBcn1); palFreeMemory(pMac->hHdd, pBcn2); #ifdef WLAN_SOFTAP_FEATURE palFreeMemory(pMac->hHdd, pWscProbeRes); #endif return eSIR_SUCCESS; } #ifdef WLAN_SOFTAP_FEATURE void limUpdateProbeRspTemplateIeBitmapBeacon1(tpAniSirGlobal pMac, tDot11fBeacon1* beacon1, tANI_U32* DefProbeRspIeBitmap, tDot11fProbeResponse* prb_rsp) { prb_rsp->BeaconInterval = beacon1->BeaconInterval; palCopyMemory(pMac->hHdd,(void *)&prb_rsp->Capabilities, (void *)&beacon1->Capabilities, sizeof(beacon1->Capabilities)); /* SSID */ if(beacon1->SSID.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_SSID_EID); /* populating it , because probe response has to go with SSID even in hidden case */ PopulateDot11fSSID2( pMac, &prb_rsp->SSID ); } /* supported rates */ if(beacon1->SuppRates.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_RATESET_EID); palCopyMemory(pMac->hHdd,(void *)&prb_rsp->SuppRates, (void *)&beacon1->SuppRates, sizeof(beacon1->SuppRates)); } /* DS Parameter set */ if(beacon1->DSParams.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_DS_PARAM_SET_EID); palCopyMemory(pMac->hHdd,(void *)&prb_rsp->DSParams, (void *)&beacon1->DSParams, sizeof(beacon1->DSParams)); } /* IBSS params will not be present in the Beacons transmitted by AP */ } void limUpdateProbeRspTemplateIeBitmapBeacon2(tpAniSirGlobal pMac, tDot11fBeacon2* beacon2, tANI_U32* DefProbeRspIeBitmap, tDot11fProbeResponse* prb_rsp) { /* IBSS parameter set - will not be present in probe response tx by AP */ /* country */ if(beacon2->Country.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_COUNTRY_EID); palCopyMemory(pMac->hHdd,(void *)&prb_rsp->Country, (void *)&beacon2->Country, sizeof(beacon2->Country)); } /* Power constraint */ if(beacon2->PowerConstraints.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_PWR_CONSTRAINT_EID); palCopyMemory(pMac->hHdd,(void *)&prb_rsp->PowerConstraints, (void *)&beacon2->PowerConstraints, sizeof(beacon2->PowerConstraints)); } /* Channel Switch Annoouncement SIR_MAC_CHNL_SWITCH_ANN_EID */ if(beacon2->ChanSwitchAnn.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_CHNL_SWITCH_ANN_EID); palCopyMemory(pMac->hHdd,(void *)&prb_rsp->ChanSwitchAnn, (void *)&beacon2->ChanSwitchAnn, sizeof(beacon2->ChanSwitchAnn)); } /* ERP information */ if(beacon2->ERPInfo.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_ERP_INFO_EID); palCopyMemory(pMac->hHdd,(void *)&prb_rsp->ERPInfo, (void *)&beacon2->ERPInfo, sizeof(beacon2->ERPInfo)); } /* Extended supported rates */ if(beacon2->ExtSuppRates.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_EXTENDED_RATE_EID); palCopyMemory(pMac->hHdd,(void *)&prb_rsp->ExtSuppRates, (void *)&beacon2->ExtSuppRates, sizeof(beacon2->ExtSuppRates)); } /* WPA */ if(beacon2->WPA.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_WPA_EID); palCopyMemory(pMac->hHdd,(void *)&prb_rsp->WPA, (void *)&beacon2->WPA, sizeof(beacon2->WPA)); } /* RSN */ if(beacon2->RSN.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_RSN_EID); palCopyMemory(pMac->hHdd,(void *)&prb_rsp->RSN, (void *)&beacon2->RSN, sizeof(beacon2->RSN)); } /* // BSS load if(beacon2->QBSSLoad.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_QBSS_LOAD_EID); } */ /* EDCA Parameter set */ if(beacon2->EDCAParamSet.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_EDCA_PARAM_SET_EID); palCopyMemory(pMac->hHdd,(void *)&prb_rsp->EDCAParamSet, (void *)&beacon2->EDCAParamSet, sizeof(beacon2->EDCAParamSet)); } /* Vendor specific - currently no vendor specific IEs added */ /* Requested IEs - currently we are not processing this will be added later */ //HT capability IE if(beacon2->HTCaps.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_HT_CAPABILITIES_EID); palCopyMemory(pMac->hHdd,(void *)&prb_rsp->HTCaps, (void *)&beacon2->HTCaps, sizeof(beacon2->HTCaps)); } // HT Info IE if(beacon2->HTInfo.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_HT_INFO_EID); palCopyMemory(pMac->hHdd,(void *)&prb_rsp->HTInfo, (void *)&beacon2->HTInfo, sizeof(beacon2->HTInfo)); } #ifdef WLAN_FEATURE_11AC if(beacon2->VHTCaps.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_VHT_CAPABILITIES_EID); palCopyMemory(pMac->hHdd,(void *)&prb_rsp->VHTCaps, (void *)&beacon2->VHTCaps, sizeof(beacon2->VHTCaps)); } if(beacon2->VHTOperation.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_VHT_OPERATION_EID); palCopyMemory(pMac->hHdd,(void *)&prb_rsp->VHTOperation, (void *)&beacon2->VHTOperation, sizeof(beacon2->VHTOperation)); } if(beacon2->VHTExtBssLoad.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_VHT_EXT_BSS_LOAD_EID); palCopyMemory(pMac->hHdd,(void *)&prb_rsp->VHTExtBssLoad, (void *)&beacon2->VHTExtBssLoad, sizeof(beacon2->VHTExtBssLoad)); } #endif //WMM IE if(beacon2->WMMParams.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_WPA_EID); palCopyMemory(pMac->hHdd,(void *)&prb_rsp->WMMParams, (void *)&beacon2->WMMParams, sizeof(beacon2->WMMParams)); } //WMM capability - most of the case won't be present if(beacon2->WMMCaps.present) { SetProbeRspIeBitmap(DefProbeRspIeBitmap,SIR_MAC_WPA_EID); palCopyMemory(pMac->hHdd,(void *)&prb_rsp->WMMCaps, (void *)&beacon2->WMMCaps, sizeof(beacon2->WMMCaps)); } } void SetProbeRspIeBitmap(tANI_U32* IeBitmap,tANI_U32 pos) { tANI_U32 index,temp; index = pos >> 5; if(index >= 8 ) { return; } temp = IeBitmap[index]; temp |= 1 << (pos & 0x1F); IeBitmap[index] = temp; } #endif #ifdef ANI_PRODUCT_TYPE_AP //---------------------------- /** * @function : schUpdateCfpParam * @brief : Generate the CFP Parameter Set * * @param : pMac - tpAniSirGlobal * ptr - Pointer to the BeaconFrame * pbeaconSize - The beaconSize * * @return : Return the Updated Ptrlocation */ static tANI_U8 * __schUpdateCfpParam(tpAniSirGlobal pMac, tANI_U8 *ptr, tANI_U32 *pbeaconSize) { tANI_U32 val; *ptr++ = SIR_MAC_CF_PARAM_SET_EID; *ptr++ = SIR_MAC_CF_PARAM_SET_EID_MIN; wlan_cfgGetInt(pMac, WNI_CFG_CFP_PERIOD, &val); if (++pMac->sch.schObject.gSchCFPCount == val) pMac->sch.schObject.gSchCFPCount = 0; *ptr++ = pMac->sch.schObject.gSchCFPCount; *ptr++ = (tANI_U8)val; wlan_cfgGetInt(pMac, WNI_CFG_CFP_MAX_DURATION, &val); pMac->sch.schObject.gSchCFPMaxDuration = (tANI_U8)val; sirStoreU16(ptr, (tANI_U16)val); ptr += 2; if (pMac->sch.schObject.gSchCFPCount == 0) pMac->sch.schObject.gSchCFPDurRemaining = pMac->sch.schObject.gSchCFPMaxDuration; else if (pMac->sch.schObject.gSchCFPDurRemaining > pMac->sch.schObject.gSchBeaconInterval) pMac->sch.schObject.gSchCFPDurRemaining -= pMac->sch.schObject.gSchBeaconInterval; else pMac->sch.schObject.gSchCFPDurRemaining = 0; sirStoreU16(ptr, pMac->sch.schObject.gSchCFPDurRemaining); ptr += 2; (*pbeaconSize) += 2 + SIR_MAC_CF_PARAM_SET_EID_MIN; return ptr; } #endif // -------------------------------------------------------------------- /** * writeBeaconToMemory * * FUNCTION: * * LOGIC: * * ASSUMPTIONS: * * NOTE: * * @param None * @param size Size of the beacon to write to memory * @param length Length field of the beacon to write to memory * @return None */ void writeBeaconToMemory(tpAniSirGlobal pMac, tANI_U16 size, tANI_U16 length, tpPESession psessionEntry) { tANI_U16 i; tpAniBeaconStruct pBeacon; // copy end of beacon only if length > 0 if (length > 0) { for (i=0; i < pMac->sch.schObject.gSchBeaconOffsetEnd; i++) pMac->sch.schObject.gSchBeaconFrameBegin[size++] = pMac->sch.schObject.gSchBeaconFrameEnd[i]; } // Update the beacon length pBeacon = (tpAniBeaconStruct) pMac->sch.schObject.gSchBeaconFrameBegin; // Do not include the beaconLength indicator itself if (length == 0) { pBeacon->beaconLength = 0; // Dont copy entire beacon, Copy length field alone size = 4; } else pBeacon->beaconLength = (tANI_U32) size - sizeof( tANI_U32 ); // write size bytes from gSchBeaconFrameBegin PELOG2(schLog(pMac, LOG2, FL("Beacon size - %d bytes\n"), size);) PELOG2(sirDumpBuf(pMac, SIR_SCH_MODULE_ID, LOG2, pMac->sch.schObject.gSchBeaconFrameBegin, size);) if (! pMac->sch.schObject.fBeaconChanged) return; pMac->sch.gSchGenBeacon = 1; if (pMac->sch.gSchGenBeacon) { pMac->sch.gSchBeaconsSent++; // // Copy beacon data to SoftMAC shared memory... // Do this by sending a message to HAL // size = (size + 3) & (~3); if( eSIR_SUCCESS != schSendBeaconReq( pMac, pMac->sch.schObject.gSchBeaconFrameBegin, size , psessionEntry)) PELOGE(schLog(pMac, LOGE, FL("schSendBeaconReq() returned an error (zsize %d)\n"), size);) else { pMac->sch.gSchBeaconsWritten++; } } pMac->sch.schObject.fBeaconChanged = 0; } // -------------------------------------------------------------------- /** * @function: SchProcessPreBeaconInd * * @brief : Process the PreBeacon Indication from the Lim * * ASSUMPTIONS: * * NOTE: * * @param : pMac - tpAniSirGlobal * * @return None */ void schProcessPreBeaconInd(tpAniSirGlobal pMac, tpSirMsgQ limMsg) { tpBeaconGenParams pMsg = (tpBeaconGenParams)limMsg->bodyptr; tANI_U32 beaconSize = pMac->sch.schObject.gSchBeaconOffsetBegin; tpPESession psessionEntry; tANI_U8 sessionId; if((psessionEntry = peFindSessionByBssid(pMac,pMsg->bssId, &sessionId))== NULL) { PELOGE(schLog(pMac, LOGE, FL("session lookup fails\n"));) goto end; } // If SME is not in normal mode, no need to generate beacon if (psessionEntry->limSmeState != eLIM_SME_NORMAL_STATE) { PELOGE(schLog(pMac, LOG1, FL("PreBeaconInd received in invalid state: %d\n"), psessionEntry->limSmeState);) goto end; } switch(psessionEntry->limSystemRole){ case eLIM_STA_IN_IBSS_ROLE: case eLIM_BT_AMP_AP_ROLE: case eLIM_BT_AMP_STA_ROLE: // generate IBSS parameter set if(psessionEntry->statypeForBss == STA_ENTRY_SELF) writeBeaconToMemory(pMac, (tANI_U16) beaconSize, (tANI_U16)beaconSize, psessionEntry); else PELOGE(schLog(pMac, LOGE, FL("can not send beacon for PEER session entry\n"));) break; #ifdef WLAN_SOFTAP_FEATURE case eLIM_AP_ROLE:{ tANI_U8 *ptr = &pMac->sch.schObject.gSchBeaconFrameBegin[pMac->sch.schObject.gSchBeaconOffsetBegin]; tANI_U16 timLength = 0; if(psessionEntry->statypeForBss == STA_ENTRY_SELF){ pmmGenerateTIM(pMac, &ptr, &timLength, psessionEntry->dtimPeriod); beaconSize += 2 + timLength; writeBeaconToMemory(pMac, (tANI_U16) beaconSize, (tANI_U16)beaconSize, psessionEntry); } else PELOGE(schLog(pMac, LOGE, FL("can not send beacon for PEER session entry\n"));) } break; #endif #ifdef ANI_PRODUCT_TYPE_AP case eLIM_AP_ROLE: { tANI_U8 *ptr = &pMac->sch.schObject.gSchBeaconFrameBegin[pMac->sch.schObject.gSchBeaconOffsetBegin]; tANI_U16 timLength = 0; if (pMac->sch.schObject.gSchCFPEnabled) ptr = __schUpdateCfpParam( pMac, ptr, &beaconSize); // generate TIM pmmGenerateTIM(pMac, &ptr, &timLength); beaconSize += 2 + timLength; /** * Safe to call this each time. * Based on the requirement for updating the * fixed beacon fields, this routine will * appropriately update the fixed fields */ specialBeaconProcessing(pMac, beaconSize); writeBeaconToMemory(pMac, beaconSize, beaconSize, psessionEntry); pmmHandleTimBasedDisassociation( pMac, psessionEntry ); } break; #endif default: PELOGE(schLog(pMac, LOGE, FL("Error-PE has Receive PreBeconGenIndication when System is in %d role"), psessionEntry->limSystemRole);) } end: palFreeMemory(pMac->hHdd, (void*)pMsg); } /**------------------------------------------------------------- \fn specialBeaconProcessing \brief To add/update channel switch IE/ Quiet IE in beacons. And also to resume transmission and measurement after switching the channel. \param pMac \param beaconSize Size of the beacon \return NONE --------------------------------------------------------------*/ #ifdef ANI_PRODUCT_TYPE_AP static void specialBeaconProcessing( tpAniSirGlobal pMac, tANI_U32 beaconSize) { tpPESession psessionEntry = &pMac->lim.gpSession[0]; //TBD-RAJESH HOW TO GET sessionEntry????? tANI_BOOLEAN fBeaconChanged = eANI_BOOLEAN_FALSE; fBeaconChanged = limUpdateQuietIEInBeacons( pMac ); if((pMac->lim.wscIeInfo.wscEnrollmentState == eLIM_WSC_ENROLL_BEGIN) || (pMac->lim.wscIeInfo.wscEnrollmentState == eLIM_WSC_ENROLL_END)) { fBeaconChanged = eANI_BOOLEAN_TRUE; } /******************************* * Processing Channel Switch IE *******************************/ if (pMac->lim.gLimSpecMgmt.dot11hChanSwState == eLIM_11H_CHANSW_RUNNING) { fBeaconChanged = eANI_BOOLEAN_TRUE; #if 0 // If the station doesn't support 11h or have link monitoring enabled, // AP has to send disassoc frame to indicate station before going // to new channel. Otherwise station wont connect to AP in new channel. if (pMac->lim.gLimChannelSwitch.switchCount == 1) { if((pMac->lim.gLimChannelSwitch.state == eLIM_CHANNEL_SWITCH_PRIMARY_ONLY) || (pMac->lim.gLimChannelSwitch.state == eLIM_CHANNEL_SWITCH_PRIMARY_AND_SECONDARY)) { tSirMacAddr bcAddr = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; limSendDisassocMgmtFrame(pMac, eSIR_MAC_DISASSOC_LEAVING_BSS_REASON, bcAddr); } } #endif if (pMac->lim.gLimChannelSwitch.switchCount == 0) { /* length is set to 0, so that no beacon is transmitted without channel switch IE * before switching to new channel */ pMac->sch.schObject.fBeaconChanged = 1; writeBeaconToMemory(pMac, beaconSize, 0, psessionEntry); schSetFixedBeaconFields(pMac,psessionEntry); PELOG3(limLog(pMac, LOG3, FL("Channel switch state = %d\n"), pMac->lim.gLimChannelSwitch.state);) switch(pMac->lim.gLimChannelSwitch.state) { case eLIM_CHANNEL_SWITCH_PRIMARY_ONLY: limSwitchPrimaryChannel(pMac, pMac->lim.gLimChannelSwitch.primaryChannel); break; case eLIM_CHANNEL_SWITCH_SECONDARY_ONLY: limSwitchPrimarySecondaryChannel(pMac, psessionEntry, psessionEntry->currentOperChannel, pMac->lim.gLimChannelSwitch.secondarySubBand); break; case eLIM_CHANNEL_SWITCH_PRIMARY_AND_SECONDARY: limSwitchPrimarySecondaryChannel(pMac, psessionEntry, pMac->lim.gLimChannelSwitch.primaryChannel, pMac->lim.gLimChannelSwitch.secondarySubBand); break; case eLIM_CHANNEL_SWITCH_IDLE: PELOGE(schLog(pMac, LOGE, FL("incorrect state - CHANNEL_SWITCH_IDLE\n"));) break; default: break; } pMac->lim.gLimChannelSwitch.state = eLIM_CHANNEL_SWITCH_IDLE; limSendSmeRsp(pMac, eWNI_SME_SWITCH_CHL_RSP, eSIR_SME_SUCCESS); limFrameTransmissionControl(pMac, eLIM_TX_BSS_BUT_BEACON, eLIM_RESUME_TX); /* Flag to indicate 11h channel switch is done. */ pMac->lim.gLimSpecMgmt.dot11hChanSwState = eLIM_11H_CHANSW_INIT; pMac->lim.gLimSpecMgmt.quietState = eLIM_QUIET_INIT; LIM_SET_RADAR_DETECTED(pMac, eANI_BOOLEAN_FALSE); if (pMac->lim.gpLimMeasReq) limReEnableLearnMode(pMac); return; } } if (fBeaconChanged) { schSetFixedBeaconFields(pMac,psessionEntry); if (pMac->lim.gLimChannelSwitch.switchCount > 0) pMac->lim.gLimChannelSwitch.switchCount--; } } #endif
gpl-2.0
binkybear/AK-OnePone
arch/arm/mach-msm/qdsp6v2/audio_utils.c
643
18224
/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/atomic.h> #include <asm/ioctls.h> #include "audio_utils.h" #define MIN_FRAME_SIZE 1536 #define NUM_FRAMES 5 #define META_SIZE (sizeof(struct meta_out_dsp)) #define FRAME_SIZE (1 + ((MIN_FRAME_SIZE + META_SIZE) * NUM_FRAMES)) static int audio_in_pause(struct q6audio_in *audio) { int rc; rc = q6asm_cmd(audio->ac, CMD_PAUSE); if (rc < 0) pr_err("%s:session id %d: pause cmd failed rc=%d\n", __func__, audio->ac->session, rc); return rc; } static int audio_in_flush(struct q6audio_in *audio) { int rc; pr_debug("%s:session id %d: flush\n", __func__, audio->ac->session); /* Flush if session running */ if (audio->enabled) { /* Implicitly issue a pause to the encoder before flushing */ rc = audio_in_pause(audio); if (rc < 0) { pr_err("%s:session id %d: pause cmd failed rc=%d\n", __func__, audio->ac->session, rc); return rc; } rc = q6asm_cmd(audio->ac, CMD_FLUSH); if (rc < 0) { pr_err("%s:session id %d: flush cmd failed rc=%d\n", __func__, audio->ac->session, rc); return rc; } /* 2nd arg: 0 -> run immediately 3rd arg: 0 -> msw_ts, 4th arg: 0 ->lsw_ts */ q6asm_run(audio->ac, 0x00, 0x00, 0x00); pr_debug("Rerun the session\n"); } audio->rflush = 1; audio->wflush = 1; memset(audio->out_frame_info, 0, sizeof(audio->out_frame_info)); wake_up(&audio->read_wait); /* get read_lock to ensure no more waiting read thread */ mutex_lock(&audio->read_lock); audio->rflush = 0; mutex_unlock(&audio->read_lock); wake_up(&audio->write_wait); /* get write_lock to ensure no more waiting write thread */ mutex_lock(&audio->write_lock); audio->wflush = 0; mutex_unlock(&audio->write_lock); pr_debug("%s:session id %d: in_bytes %d\n", __func__, audio->ac->session, atomic_read(&audio->in_bytes)); pr_debug("%s:session id %d: in_samples %d\n", __func__, audio->ac->session, atomic_read(&audio->in_samples)); atomic_set(&audio->in_bytes, 0); atomic_set(&audio->in_samples, 0); atomic_set(&audio->out_count, 0); return 0; } /* must be called with audio->lock held */ int audio_in_enable(struct q6audio_in *audio) { if (audio->enabled) return 0; /* 2nd arg: 0 -> run immediately 3rd arg: 0 -> msw_ts, 4th arg: 0 ->lsw_ts */ return q6asm_run(audio->ac, 0x00, 0x00, 0x00); } /* must be called with audio->lock held */ int audio_in_disable(struct q6audio_in *audio) { int rc = 0; if (!audio->stopped) { audio->enabled = 0; audio->opened = 0; pr_debug("%s:session id %d: inbytes[%d] insamples[%d]\n", __func__, audio->ac->session, atomic_read(&audio->in_bytes), atomic_read(&audio->in_samples)); rc = q6asm_cmd(audio->ac, CMD_CLOSE); if (rc < 0) pr_err("%s:session id %d: Failed to close the session rc=%d\n", __func__, audio->ac->session, rc); audio->stopped = 1; memset(audio->out_frame_info, 0, sizeof(audio->out_frame_info)); wake_up(&audio->read_wait); wake_up(&audio->write_wait); } pr_debug("%s:session id %d: enabled[%d]\n", __func__, audio->ac->session, audio->enabled); return rc; } int audio_in_buf_alloc(struct q6audio_in *audio) { int rc = 0; switch (audio->buf_alloc) { case NO_BUF_ALLOC: if (audio->feedback == NON_TUNNEL_MODE) { rc = q6asm_audio_client_buf_alloc(IN, audio->ac, ALIGN_BUF_SIZE(audio->pcm_cfg.buffer_size), audio->pcm_cfg.buffer_count); if (rc < 0) { pr_err("%s:session id %d: Buffer Alloc failed\n", __func__, audio->ac->session); rc = -ENOMEM; break; } audio->buf_alloc |= BUF_ALLOC_IN; } rc = q6asm_audio_client_buf_alloc(OUT, audio->ac, ALIGN_BUF_SIZE(audio->str_cfg.buffer_size), audio->str_cfg.buffer_count); if (rc < 0) { pr_err("%s:session id %d: Buffer Alloc failed rc=%d\n", __func__, audio->ac->session, rc); rc = -ENOMEM; break; } audio->buf_alloc |= BUF_ALLOC_OUT; break; case BUF_ALLOC_IN: rc = q6asm_audio_client_buf_alloc(OUT, audio->ac, ALIGN_BUF_SIZE(audio->str_cfg.buffer_size), audio->str_cfg.buffer_count); if (rc < 0) { pr_err("%s:session id %d: Buffer Alloc failed rc=%d\n", __func__, audio->ac->session, rc); rc = -ENOMEM; break; } audio->buf_alloc |= BUF_ALLOC_OUT; break; case BUF_ALLOC_OUT: if (audio->feedback == NON_TUNNEL_MODE) { rc = q6asm_audio_client_buf_alloc(IN, audio->ac, ALIGN_BUF_SIZE(audio->pcm_cfg.buffer_size), audio->pcm_cfg.buffer_count); if (rc < 0) { pr_err("%s:session id %d: Buffer Alloc failed\n", __func__, audio->ac->session); rc = -ENOMEM; break; } audio->buf_alloc |= BUF_ALLOC_IN; } break; default: pr_debug("%s:session id %d: buf[%d]\n", __func__, audio->ac->session, audio->buf_alloc); } return rc; } /* ------------------- device --------------------- */ long audio_in_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct q6audio_in *audio = file->private_data; int rc = 0; if (cmd == AUDIO_GET_STATS) { struct msm_audio_stats stats; memset(&stats, 0, sizeof(stats)); stats.byte_count = atomic_read(&audio->in_bytes); stats.sample_count = atomic_read(&audio->in_samples); if (copy_to_user((void *) arg, &stats, sizeof(stats))) return -EFAULT; return rc; } mutex_lock(&audio->lock); switch (cmd) { case AUDIO_FLUSH: { /* Make sure we're stopped and we wake any threads * that might be blocked holding the read_lock. * While audio->stopped read threads will always * exit immediately. */ rc = audio_in_flush(audio); if (rc < 0) pr_err("%s:session id %d: Flush Fail rc=%d\n", __func__, audio->ac->session, rc); else { /* Register back the flushed read buffer with DSP */ int cnt = 0; while (cnt++ < audio->str_cfg.buffer_count) q6asm_read(audio->ac); /* Push buffer to DSP */ pr_debug("register the read buffer\n"); } break; } case AUDIO_PAUSE: { pr_debug("%s:session id %d: AUDIO_PAUSE\n", __func__, audio->ac->session); if (audio->enabled) audio_in_pause(audio); break; } case AUDIO_GET_STREAM_CONFIG: { struct msm_audio_stream_config cfg; memset(&cfg, 0, sizeof(cfg)); cfg.buffer_size = audio->str_cfg.buffer_size; cfg.buffer_count = audio->str_cfg.buffer_count; if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) rc = -EFAULT; pr_debug("%s:session id %d: AUDIO_GET_STREAM_CONFIG %d %d\n", __func__, audio->ac->session, cfg.buffer_size, cfg.buffer_count); break; } case AUDIO_SET_STREAM_CONFIG: { struct msm_audio_stream_config cfg; if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { rc = -EFAULT; break; } /* Minimum single frame size, but with in maximum frames number */ if ((cfg.buffer_size < (audio->min_frame_size+ \ sizeof(struct meta_out_dsp))) || (cfg.buffer_count < FRAME_NUM)) { rc = -EINVAL; break; } if ((cfg.buffer_size > FRAME_SIZE) || (cfg.buffer_count != FRAME_NUM)) { rc = -EINVAL; break; } audio->str_cfg.buffer_size = cfg.buffer_size; audio->str_cfg.buffer_count = cfg.buffer_count; if(audio->opened){ rc = q6asm_audio_client_buf_alloc(OUT,audio->ac, ALIGN_BUF_SIZE(audio->str_cfg.buffer_size), audio->str_cfg.buffer_count); if (rc < 0) { pr_err("%s: session id %d: Buffer Alloc failed rc=%d\n", __func__, audio->ac->session, rc); rc = -ENOMEM; break; } } audio->buf_alloc |= BUF_ALLOC_OUT; rc = 0; pr_debug("%s:session id %d: AUDIO_SET_STREAM_CONFIG %d %d\n", __func__, audio->ac->session, audio->str_cfg.buffer_size, audio->str_cfg.buffer_count); break; } case AUDIO_GET_SESSION_ID: { if (copy_to_user((void *) arg, &audio->ac->session, sizeof(unsigned short))) { rc = -EFAULT; } break; } case AUDIO_SET_BUF_CFG: { struct msm_audio_buf_cfg cfg; if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { rc = -EFAULT; break; } if ((audio->feedback == NON_TUNNEL_MODE) && !cfg.meta_info_enable) { rc = -EFAULT; break; } /* Restrict the num of frames per buf to coincide with * default buf size */ if (cfg.frames_per_buf > audio->max_frames_per_buf) { rc = -EFAULT; break; } audio->buf_cfg.meta_info_enable = cfg.meta_info_enable; audio->buf_cfg.frames_per_buf = cfg.frames_per_buf; pr_debug("%s:session id %d: Set-buf-cfg: meta[%d] framesperbuf[%d]\n", __func__, audio->ac->session, cfg.meta_info_enable, cfg.frames_per_buf); break; } case AUDIO_GET_BUF_CFG: { pr_debug("%s:session id %d: Get-buf-cfg: meta[%d] framesperbuf[%d]\n", __func__, audio->ac->session, audio->buf_cfg.meta_info_enable, audio->buf_cfg.frames_per_buf); if (copy_to_user((void *)arg, &audio->buf_cfg, sizeof(struct msm_audio_buf_cfg))) rc = -EFAULT; break; } case AUDIO_GET_CONFIG: { if (copy_to_user((void *)arg, &audio->pcm_cfg, sizeof(struct msm_audio_config))) rc = -EFAULT; break; } case AUDIO_SET_CONFIG: { struct msm_audio_config cfg; if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { rc = -EFAULT; break; } if (audio->feedback != NON_TUNNEL_MODE) { pr_err("%s:session id %d: Not sufficient permission to change the record mode\n", __func__, audio->ac->session); rc = -EACCES; break; } if ((cfg.buffer_count > PCM_BUF_COUNT) || (cfg.buffer_count == 1)) cfg.buffer_count = PCM_BUF_COUNT; audio->pcm_cfg.buffer_count = cfg.buffer_count; audio->pcm_cfg.buffer_size = cfg.buffer_size; audio->pcm_cfg.channel_count = cfg.channel_count; audio->pcm_cfg.sample_rate = cfg.sample_rate; if(audio->opened && audio->feedback == NON_TUNNEL_MODE){ rc = q6asm_audio_client_buf_alloc(IN, audio->ac, ALIGN_BUF_SIZE(audio->pcm_cfg.buffer_size), audio->pcm_cfg.buffer_count); if(rc < 0){ pr_err("%s:session id %d: Buffer Alloc failed\n", __func__,audio->ac->session); rc = -ENOMEM; break; } } audio->buf_alloc |= BUF_ALLOC_IN; rc = 0; pr_debug("%s:session id %d: AUDIO_SET_CONFIG %d %d\n", __func__, audio->ac->session, audio->pcm_cfg.buffer_count, audio->pcm_cfg.buffer_size); break; } default: /* call codec specific ioctl */ rc = audio->enc_ioctl(file, cmd, arg); } mutex_unlock(&audio->lock); return rc; } ssize_t audio_in_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct q6audio_in *audio = file->private_data; const char __user *start = buf; unsigned char *data; uint32_t offset = 0; uint32_t size = 0; int rc = 0; uint32_t idx; struct meta_out_dsp meta; uint32_t bytes_to_copy = 0; uint32_t mfield_size = (audio->buf_cfg.meta_info_enable == 0) ? 0 : (sizeof(unsigned char) + (sizeof(struct meta_out_dsp)*(audio->buf_cfg.frames_per_buf))); memset(&meta, 0, sizeof(meta)); pr_debug("%s:session id %d: read - %d\n", __func__, audio->ac->session, count); if (!audio->enabled) return -EFAULT; mutex_lock(&audio->read_lock); while (count > 0) { rc = wait_event_interruptible( audio->read_wait, ((atomic_read(&audio->out_count) > 0) || (audio->stopped) || audio->rflush || audio->eos_rsp || audio->event_abort)); if (audio->event_abort) { rc = -EIO; break; } if (rc < 0) break; if ((audio->stopped && !(atomic_read(&audio->out_count))) || audio->rflush) { pr_debug("%s:session id %d: driver in stop state or flush,No more buf to read", __func__, audio->ac->session); rc = 0;/* End of File */ break; } if (!(atomic_read(&audio->out_count)) && (audio->eos_rsp == 1) && (count >= (sizeof(unsigned char) + sizeof(struct meta_out_dsp)))) { unsigned char num_of_frames; pr_info("%s:session id %d: eos %d at output\n", __func__, audio->ac->session, audio->eos_rsp); if (buf != start) break; num_of_frames = 0xFF; if (copy_to_user(buf, &num_of_frames, sizeof(unsigned char))) { rc = -EFAULT; break; } buf += sizeof(unsigned char); meta.frame_size = 0xFFFF; meta.encoded_pcm_samples = 0xFFFF; meta.msw_ts = 0x00; meta.lsw_ts = 0x00; meta.nflags = AUD_EOS_SET; audio->eos_rsp = 0; if (copy_to_user(buf, &meta, sizeof(meta))) { rc = -EFAULT; break; } buf += sizeof(meta); break; } data = (unsigned char *)q6asm_is_cpu_buf_avail(OUT, audio->ac, &size, &idx); if ((count >= (size + mfield_size)) && data) { if (audio->buf_cfg.meta_info_enable) { if (copy_to_user(buf, &audio->out_frame_info[idx][0], sizeof(unsigned char))) { rc = -EFAULT; break; } bytes_to_copy = (size + audio->out_frame_info[idx][1]); /* Number of frames information copied */ buf += sizeof(unsigned char); count -= sizeof(unsigned char); } else { offset = audio->out_frame_info[idx][1]; bytes_to_copy = size; } pr_debug("%s:session id %d: offset=%d nr of frames= %d\n", __func__, audio->ac->session, audio->out_frame_info[idx][1], audio->out_frame_info[idx][0]); if (copy_to_user(buf, &data[offset], bytes_to_copy)) { rc = -EFAULT; break; } count -= bytes_to_copy; buf += bytes_to_copy; } else { pr_err("%s:session id %d: short read data[%p] bytesavail[%d]bytesrequest[%d]\n", __func__, audio->ac->session, data, size, count); } atomic_dec(&audio->out_count); q6asm_read(audio->ac); break; } mutex_unlock(&audio->read_lock); pr_debug("%s:session id %d: read: %d bytes\n", __func__, audio->ac->session, (buf-start)); if (buf > start) return buf - start; return rc; } static int extract_meta_info(char *buf, unsigned long *msw_ts, unsigned long *lsw_ts, unsigned int *flags) { struct meta_in *meta = (struct meta_in *)buf; *msw_ts = meta->ntimestamp.highpart; *lsw_ts = meta->ntimestamp.lowpart; *flags = meta->nflags; return 0; } ssize_t audio_in_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct q6audio_in *audio = file->private_data; const char __user *start = buf; size_t xfer = 0; char *cpy_ptr; int rc = 0; unsigned char *data; uint32_t size = 0; uint32_t idx = 0; uint32_t nflags = 0; unsigned long msw_ts = 0; unsigned long lsw_ts = 0; uint32_t mfield_size = (audio->buf_cfg.meta_info_enable == 0) ? 0 : sizeof(struct meta_in); pr_debug("%s:session id %d: to write[%d]\n", __func__, audio->ac->session, count); if (!audio->enabled) return -EFAULT; mutex_lock(&audio->write_lock); while (count > 0) { rc = wait_event_interruptible(audio->write_wait, ((atomic_read(&audio->in_count) > 0) || (audio->stopped) || (audio->wflush) || (audio->event_abort))); if (audio->event_abort) { rc = -EIO; break; } if (rc < 0) break; if (audio->stopped || audio->wflush) { pr_debug("%s: session id %d: stop or flush\n", __func__, audio->ac->session); rc = -EBUSY; break; } /* if no PCM data, might have only eos buffer such case do not hold cpu buffer */ if ((buf == start) && (count == mfield_size)) { char eos_buf[sizeof(struct meta_in)]; /* Processing begining of user buffer */ if (copy_from_user(eos_buf, buf, mfield_size)) { rc = -EFAULT; break; } /* Check if EOS flag is set and buffer has * contains just meta field */ extract_meta_info(eos_buf, &msw_ts, &lsw_ts, &nflags); buf += mfield_size; /* send the EOS and return */ pr_debug("%s:session id %d: send EOS 0x%8x\n", __func__, audio->ac->session, nflags); break; } data = (unsigned char *)q6asm_is_cpu_buf_avail(IN, audio->ac, &size, &idx); if (!data) { pr_debug("%s:session id %d: No buf available\n", __func__, audio->ac->session); continue; } cpy_ptr = data; if (audio->buf_cfg.meta_info_enable) { if (buf == start) { /* Processing beginning of user buffer */ if (copy_from_user(cpy_ptr, buf, mfield_size)) { rc = -EFAULT; break; } /* Check if EOS flag is set and buffer has * contains just meta field */ extract_meta_info(cpy_ptr, &msw_ts, &lsw_ts, &nflags); buf += mfield_size; count -= mfield_size; } else { pr_debug("%s:session id %d: continuous buffer\n", __func__, audio->ac->session); } } xfer = (count > (audio->pcm_cfg.buffer_size)) ? (audio->pcm_cfg.buffer_size) : count; if (copy_from_user(cpy_ptr, buf, xfer)) { rc = -EFAULT; break; } rc = q6asm_write(audio->ac, xfer, msw_ts, lsw_ts, 0x00); if (rc < 0) { rc = -EFAULT; break; } atomic_dec(&audio->in_count); count -= xfer; buf += xfer; } mutex_unlock(&audio->write_lock); pr_debug("%s:session id %d: eos_condition 0x%8x buf[0x%x] start[0x%x]\n", __func__, audio->ac->session, nflags, (int) buf, (int) start); if (nflags & AUD_EOS_SET) { rc = q6asm_cmd(audio->ac, CMD_EOS); pr_info("%s:session id %d: eos %d at input\n", __func__, audio->ac->session, audio->eos_rsp); } pr_debug("%s:session id %d: Written %d Avail Buf[%d]", __func__, audio->ac->session, (buf - start - mfield_size), atomic_read(&audio->in_count)); if (!rc) { if (buf > start) return buf - start; } return rc; } int audio_in_release(struct inode *inode, struct file *file) { struct q6audio_in *audio = file->private_data; pr_info("%s: session id %d\n", __func__, audio->ac->session); mutex_lock(&audio->lock); audio_in_disable(audio); q6asm_audio_client_free(audio->ac); mutex_unlock(&audio->lock); kfree(audio->enc_cfg); kfree(audio->codec_cfg); kfree(audio); return 0; }
gpl-2.0
rodrigues-daniel/linux
drivers/ata/pata_octeon_cf.c
899
28011
/* * Driver for the Octeon bootbus compact flash. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2005 - 2012 Cavium Inc. * Copyright (C) 2008 Wind River Systems */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/libata.h> #include <linux/hrtimer.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <scsi/scsi_host.h> #include <asm/byteorder.h> #include <asm/octeon/octeon.h> /* * The Octeon bootbus compact flash interface is connected in at least * 3 different configurations on various evaluation boards: * * -- 8 bits no irq, no DMA * -- 16 bits no irq, no DMA * -- 16 bits True IDE mode with DMA, but no irq. * * In the last case the DMA engine can generate an interrupt when the * transfer is complete. For the first two cases only PIO is supported. * */ #define DRV_NAME "pata_octeon_cf" #define DRV_VERSION "2.2" /* Poll interval in nS. */ #define OCTEON_CF_BUSY_POLL_INTERVAL 500000 #define DMA_CFG 0 #define DMA_TIM 0x20 #define DMA_INT 0x38 #define DMA_INT_EN 0x50 struct octeon_cf_port { struct hrtimer delayed_finish; struct ata_port *ap; int dma_finished; void *c0; unsigned int cs0; unsigned int cs1; bool is_true_ide; u64 dma_base; }; static struct scsi_host_template octeon_cf_sht = { ATA_PIO_SHT(DRV_NAME), }; static int enable_dma; module_param(enable_dma, int, 0444); MODULE_PARM_DESC(enable_dma, "Enable use of DMA on interfaces that support it (0=no dma [default], 1=use dma)"); /** * Convert nanosecond based time to setting used in the * boot bus timing register, based on timing multiple */ static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs) { unsigned int val; /* * Compute # of eclock periods to get desired duration in * nanoseconds. */ val = DIV_ROUND_UP(nsecs * (octeon_get_io_clock_rate() / 1000000), 1000 * tim_mult); return val; } static void octeon_cf_set_boot_reg_cfg(int cs, unsigned int multiplier) { union cvmx_mio_boot_reg_cfgx reg_cfg; unsigned int tim_mult; switch (multiplier) { case 8: tim_mult = 3; break; case 4: tim_mult = 0; break; case 2: tim_mult = 2; break; default: tim_mult = 1; break; } reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs)); reg_cfg.s.dmack = 0; /* Don't assert DMACK on access */ reg_cfg.s.tim_mult = tim_mult; /* Timing mutiplier */ reg_cfg.s.rd_dly = 0; /* Sample on falling edge of BOOT_OE */ reg_cfg.s.sam = 0; /* Don't combine write and output enable */ reg_cfg.s.we_ext = 0; /* No write enable extension */ reg_cfg.s.oe_ext = 0; /* No read enable extension */ reg_cfg.s.en = 1; /* Enable this region */ reg_cfg.s.orbit = 0; /* Don't combine with previous region */ reg_cfg.s.ale = 0; /* Don't do address multiplexing */ cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), reg_cfg.u64); } /** * Called after libata determines the needed PIO mode. This * function programs the Octeon bootbus regions to support the * timing requirements of the PIO mode. * * @ap: ATA port information * @dev: ATA device */ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev) { struct octeon_cf_port *cf_port = ap->private_data; union cvmx_mio_boot_reg_timx reg_tim; int T; struct ata_timing timing; unsigned int div; int use_iordy; int trh; int pause; /* These names are timing parameters from the ATA spec */ int t1; int t2; int t2i; /* * A divisor value of four will overflow the timing fields at * clock rates greater than 800MHz */ if (octeon_get_io_clock_rate() <= 800000000) div = 4; else div = 8; T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate()); if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T)) BUG(); t1 = timing.setup; if (t1) t1--; t2 = timing.active; if (t2) t2--; t2i = timing.act8b; if (t2i) t2i--; trh = ns_to_tim_reg(div, 20); if (trh) trh--; pause = (int)timing.cycle - (int)timing.active - (int)timing.setup - trh; if (pause < 0) pause = 0; if (pause) pause--; octeon_cf_set_boot_reg_cfg(cf_port->cs0, div); if (cf_port->is_true_ide) /* True IDE mode, program both chip selects. */ octeon_cf_set_boot_reg_cfg(cf_port->cs1, div); use_iordy = ata_pio_need_iordy(dev); reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0)); /* Disable page mode */ reg_tim.s.pagem = 0; /* Enable dynamic timing */ reg_tim.s.waitm = use_iordy; /* Pages are disabled */ reg_tim.s.pages = 0; /* We don't use multiplexed address mode */ reg_tim.s.ale = 0; /* Not used */ reg_tim.s.page = 0; /* Time after IORDY to coninue to assert the data */ reg_tim.s.wait = 0; /* Time to wait to complete the cycle. */ reg_tim.s.pause = pause; /* How long to hold after a write to de-assert CE. */ reg_tim.s.wr_hld = trh; /* How long to wait after a read to de-assert CE. */ reg_tim.s.rd_hld = trh; /* How long write enable is asserted */ reg_tim.s.we = t2; /* How long read enable is asserted */ reg_tim.s.oe = t2; /* Time after CE that read/write starts */ reg_tim.s.ce = ns_to_tim_reg(div, 5); /* Time before CE that address is valid */ reg_tim.s.adr = 0; /* Program the bootbus region timing for the data port chip select. */ cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0), reg_tim.u64); if (cf_port->is_true_ide) /* True IDE mode, program both chip selects. */ cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs1), reg_tim.u64); } static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev) { struct octeon_cf_port *cf_port = ap->private_data; union cvmx_mio_boot_pin_defs pin_defs; union cvmx_mio_boot_dma_timx dma_tim; unsigned int oe_a; unsigned int oe_n; unsigned int dma_ackh; unsigned int dma_arq; unsigned int pause; unsigned int T0, Tkr, Td; unsigned int tim_mult; int c; const struct ata_timing *timing; timing = ata_timing_find_mode(dev->dma_mode); T0 = timing->cycle; Td = timing->active; Tkr = timing->recover; dma_ackh = timing->dmack_hold; dma_tim.u64 = 0; /* dma_tim.s.tim_mult = 0 --> 4x */ tim_mult = 4; /* not spec'ed, value in eclocks, not affected by tim_mult */ dma_arq = 8; pause = 25 - dma_arq * 1000 / (octeon_get_io_clock_rate() / 1000000); /* Tz */ oe_a = Td; /* Tkr from cf spec, lengthened to meet T0 */ oe_n = max(T0 - oe_a, Tkr); pin_defs.u64 = cvmx_read_csr(CVMX_MIO_BOOT_PIN_DEFS); /* DMA channel number. */ c = (cf_port->dma_base & 8) >> 3; /* Invert the polarity if the default is 0*/ dma_tim.s.dmack_pi = (pin_defs.u64 & (1ull << (11 + c))) ? 0 : 1; dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n); dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a); /* * This is tI, C.F. spec. says 0, but Sony CF card requires * more, we use 20 nS. */ dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20); dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh); dma_tim.s.dmarq = dma_arq; dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause); dma_tim.s.rd_dly = 0; /* Sample right on edge */ /* writes only */ dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n); dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a); pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60, ns_to_tim_reg(tim_mult, 60)); pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n", dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s, dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause); cvmx_write_csr(cf_port->dma_base + DMA_TIM, dma_tim.u64); } /** * Handle an 8 bit I/O request. * * @dev: Device to access * @buffer: Data buffer * @buflen: Length of the buffer. * @rw: True to write. */ static unsigned int octeon_cf_data_xfer8(struct ata_device *dev, unsigned char *buffer, unsigned int buflen, int rw) { struct ata_port *ap = dev->link->ap; void __iomem *data_addr = ap->ioaddr.data_addr; unsigned long words; int count; words = buflen; if (rw) { count = 16; while (words--) { iowrite8(*buffer, data_addr); buffer++; /* * Every 16 writes do a read so the bootbus * FIFO doesn't fill up. */ if (--count == 0) { ioread8(ap->ioaddr.altstatus_addr); count = 16; } } } else { ioread8_rep(data_addr, buffer, words); } return buflen; } /** * Handle a 16 bit I/O request. * * @dev: Device to access * @buffer: Data buffer * @buflen: Length of the buffer. * @rw: True to write. */ static unsigned int octeon_cf_data_xfer16(struct ata_device *dev, unsigned char *buffer, unsigned int buflen, int rw) { struct ata_port *ap = dev->link->ap; void __iomem *data_addr = ap->ioaddr.data_addr; unsigned long words; int count; words = buflen / 2; if (rw) { count = 16; while (words--) { iowrite16(*(uint16_t *)buffer, data_addr); buffer += sizeof(uint16_t); /* * Every 16 writes do a read so the bootbus * FIFO doesn't fill up. */ if (--count == 0) { ioread8(ap->ioaddr.altstatus_addr); count = 16; } } } else { while (words--) { *(uint16_t *)buffer = ioread16(data_addr); buffer += sizeof(uint16_t); } } /* Transfer trailing 1 byte, if any. */ if (unlikely(buflen & 0x01)) { __le16 align_buf[1] = { 0 }; if (rw == READ) { align_buf[0] = cpu_to_le16(ioread16(data_addr)); memcpy(buffer, align_buf, 1); } else { memcpy(align_buf, buffer, 1); iowrite16(le16_to_cpu(align_buf[0]), data_addr); } words++; } return buflen; } /** * Read the taskfile for 16bit non-True IDE only. */ static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf) { u16 blob; /* The base of the registers is at ioaddr.data_addr. */ void __iomem *base = ap->ioaddr.data_addr; blob = __raw_readw(base + 0xc); tf->feature = blob >> 8; blob = __raw_readw(base + 2); tf->nsect = blob & 0xff; tf->lbal = blob >> 8; blob = __raw_readw(base + 4); tf->lbam = blob & 0xff; tf->lbah = blob >> 8; blob = __raw_readw(base + 6); tf->device = blob & 0xff; tf->command = blob >> 8; if (tf->flags & ATA_TFLAG_LBA48) { if (likely(ap->ioaddr.ctl_addr)) { iowrite8(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr); blob = __raw_readw(base + 0xc); tf->hob_feature = blob >> 8; blob = __raw_readw(base + 2); tf->hob_nsect = blob & 0xff; tf->hob_lbal = blob >> 8; blob = __raw_readw(base + 4); tf->hob_lbam = blob & 0xff; tf->hob_lbah = blob >> 8; iowrite8(tf->ctl, ap->ioaddr.ctl_addr); ap->last_ctl = tf->ctl; } else { WARN_ON(1); } } } static u8 octeon_cf_check_status16(struct ata_port *ap) { u16 blob; void __iomem *base = ap->ioaddr.data_addr; blob = __raw_readw(base + 6); return blob >> 8; } static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes, unsigned long deadline) { struct ata_port *ap = link->ap; void __iomem *base = ap->ioaddr.data_addr; int rc; u8 err; DPRINTK("about to softreset\n"); __raw_writew(ap->ctl, base + 0xe); udelay(20); __raw_writew(ap->ctl | ATA_SRST, base + 0xe); udelay(20); __raw_writew(ap->ctl, base + 0xe); rc = ata_sff_wait_after_reset(link, 1, deadline); if (rc) { ata_link_err(link, "SRST failed (errno=%d)\n", rc); return rc; } /* determine by signature whether we have ATA or ATAPI devices */ classes[0] = ata_sff_dev_classify(&link->device[0], 1, &err); DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); return 0; } /** * Load the taskfile for 16bit non-True IDE only. The device_addr is * not loaded, we do this as part of octeon_cf_exec_command16. */ static void octeon_cf_tf_load16(struct ata_port *ap, const struct ata_taskfile *tf) { unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; /* The base of the registers is at ioaddr.data_addr. */ void __iomem *base = ap->ioaddr.data_addr; if (tf->ctl != ap->last_ctl) { iowrite8(tf->ctl, ap->ioaddr.ctl_addr); ap->last_ctl = tf->ctl; ata_wait_idle(ap); } if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { __raw_writew(tf->hob_feature << 8, base + 0xc); __raw_writew(tf->hob_nsect | tf->hob_lbal << 8, base + 2); __raw_writew(tf->hob_lbam | tf->hob_lbah << 8, base + 4); VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam, tf->hob_lbah); } if (is_addr) { __raw_writew(tf->feature << 8, base + 0xc); __raw_writew(tf->nsect | tf->lbal << 8, base + 2); __raw_writew(tf->lbam | tf->lbah << 8, base + 4); VPRINTK("feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", tf->feature, tf->nsect, tf->lbal, tf->lbam, tf->lbah); } ata_wait_idle(ap); } static void octeon_cf_dev_select(struct ata_port *ap, unsigned int device) { /* There is only one device, do nothing. */ return; } /* * Issue ATA command to host controller. The device_addr is also sent * as it must be written in a combined write with the command. */ static void octeon_cf_exec_command16(struct ata_port *ap, const struct ata_taskfile *tf) { /* The base of the registers is at ioaddr.data_addr. */ void __iomem *base = ap->ioaddr.data_addr; u16 blob; if (tf->flags & ATA_TFLAG_DEVICE) { VPRINTK("device 0x%X\n", tf->device); blob = tf->device; } else { blob = 0; } DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); blob |= (tf->command << 8); __raw_writew(blob, base + 6); ata_wait_idle(ap); } static void octeon_cf_ata_port_noaction(struct ata_port *ap) { } static void octeon_cf_dma_setup(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct octeon_cf_port *cf_port; cf_port = ap->private_data; DPRINTK("ENTER\n"); /* issue r/w command */ qc->cursg = qc->sg; cf_port->dma_finished = 0; ap->ops->sff_exec_command(ap, &qc->tf); DPRINTK("EXIT\n"); } /** * Start a DMA transfer that was already setup * * @qc: Information about the DMA */ static void octeon_cf_dma_start(struct ata_queued_cmd *qc) { struct octeon_cf_port *cf_port = qc->ap->private_data; union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg; union cvmx_mio_boot_dma_intx mio_boot_dma_int; struct scatterlist *sg; VPRINTK("%d scatterlists\n", qc->n_elem); /* Get the scatter list entry we need to DMA into */ sg = qc->cursg; BUG_ON(!sg); /* * Clear the DMA complete status. */ mio_boot_dma_int.u64 = 0; mio_boot_dma_int.s.done = 1; cvmx_write_csr(cf_port->dma_base + DMA_INT, mio_boot_dma_int.u64); /* Enable the interrupt. */ cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, mio_boot_dma_int.u64); /* Set the direction of the DMA */ mio_boot_dma_cfg.u64 = 0; #ifdef __LITTLE_ENDIAN mio_boot_dma_cfg.s.endian = 1; #endif mio_boot_dma_cfg.s.en = 1; mio_boot_dma_cfg.s.rw = ((qc->tf.flags & ATA_TFLAG_WRITE) != 0); /* * Don't stop the DMA if the device deasserts DMARQ. Many * compact flashes deassert DMARQ for a short time between * sectors. Instead of stopping and restarting the DMA, we'll * let the hardware do it. If the DMA is really stopped early * due to an error condition, a later timeout will force us to * stop. */ mio_boot_dma_cfg.s.clr = 0; /* Size is specified in 16bit words and minus one notation */ mio_boot_dma_cfg.s.size = sg_dma_len(sg) / 2 - 1; /* We need to swap the high and low bytes of every 16 bits */ mio_boot_dma_cfg.s.swap8 = 1; mio_boot_dma_cfg.s.adr = sg_dma_address(sg); VPRINTK("%s %d bytes address=%p\n", (mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length, (void *)(unsigned long)mio_boot_dma_cfg.s.adr); cvmx_write_csr(cf_port->dma_base + DMA_CFG, mio_boot_dma_cfg.u64); } /** * * LOCKING: * spin_lock_irqsave(host lock) * */ static unsigned int octeon_cf_dma_finished(struct ata_port *ap, struct ata_queued_cmd *qc) { struct ata_eh_info *ehi = &ap->link.eh_info; struct octeon_cf_port *cf_port = ap->private_data; union cvmx_mio_boot_dma_cfgx dma_cfg; union cvmx_mio_boot_dma_intx dma_int; u8 status; VPRINTK("ata%u: protocol %d task_state %d\n", ap->print_id, qc->tf.protocol, ap->hsm_task_state); if (ap->hsm_task_state != HSM_ST_LAST) return 0; dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG); if (dma_cfg.s.size != 0xfffff) { /* Error, the transfer was not complete. */ qc->err_mask |= AC_ERR_HOST_BUS; ap->hsm_task_state = HSM_ST_ERR; } /* Stop and clear the dma engine. */ dma_cfg.u64 = 0; dma_cfg.s.size = -1; cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64); /* Disable the interrupt. */ dma_int.u64 = 0; cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64); /* Clear the DMA complete status */ dma_int.s.done = 1; cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64); status = ap->ops->sff_check_status(ap); ata_sff_hsm_move(ap, qc, status, 0); if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA)) ata_ehi_push_desc(ehi, "DMA stat 0x%x", status); return 1; } /* * Check if any queued commands have more DMAs, if so start the next * transfer, else do end of transfer handling. */ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; struct octeon_cf_port *cf_port; int i; unsigned int handled = 0; unsigned long flags; spin_lock_irqsave(&host->lock, flags); DPRINTK("ENTER\n"); for (i = 0; i < host->n_ports; i++) { u8 status; struct ata_port *ap; struct ata_queued_cmd *qc; union cvmx_mio_boot_dma_intx dma_int; union cvmx_mio_boot_dma_cfgx dma_cfg; ap = host->ports[i]; cf_port = ap->private_data; dma_int.u64 = cvmx_read_csr(cf_port->dma_base + DMA_INT); dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG); qc = ata_qc_from_tag(ap, ap->link.active_tag); if (!qc || (qc->tf.flags & ATA_TFLAG_POLLING)) continue; if (dma_int.s.done && !dma_cfg.s.en) { if (!sg_is_last(qc->cursg)) { qc->cursg = sg_next(qc->cursg); handled = 1; octeon_cf_dma_start(qc); continue; } else { cf_port->dma_finished = 1; } } if (!cf_port->dma_finished) continue; status = ioread8(ap->ioaddr.altstatus_addr); if (status & (ATA_BUSY | ATA_DRQ)) { /* * We are busy, try to handle it later. This * is the DMA finished interrupt, and it could * take a little while for the card to be * ready for more commands. */ /* Clear DMA irq. */ dma_int.u64 = 0; dma_int.s.done = 1; cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64); hrtimer_start_range_ns(&cf_port->delayed_finish, ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL), OCTEON_CF_BUSY_POLL_INTERVAL / 5, HRTIMER_MODE_REL); handled = 1; } else { handled |= octeon_cf_dma_finished(ap, qc); } } spin_unlock_irqrestore(&host->lock, flags); DPRINTK("EXIT\n"); return IRQ_RETVAL(handled); } static enum hrtimer_restart octeon_cf_delayed_finish(struct hrtimer *hrt) { struct octeon_cf_port *cf_port = container_of(hrt, struct octeon_cf_port, delayed_finish); struct ata_port *ap = cf_port->ap; struct ata_host *host = ap->host; struct ata_queued_cmd *qc; unsigned long flags; u8 status; enum hrtimer_restart rv = HRTIMER_NORESTART; spin_lock_irqsave(&host->lock, flags); /* * If the port is not waiting for completion, it must have * handled it previously. The hsm_task_state is * protected by host->lock. */ if (ap->hsm_task_state != HSM_ST_LAST || !cf_port->dma_finished) goto out; status = ioread8(ap->ioaddr.altstatus_addr); if (status & (ATA_BUSY | ATA_DRQ)) { /* Still busy, try again. */ hrtimer_forward_now(hrt, ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL)); rv = HRTIMER_RESTART; goto out; } qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) octeon_cf_dma_finished(ap, qc); out: spin_unlock_irqrestore(&host->lock, flags); return rv; } static void octeon_cf_dev_config(struct ata_device *dev) { /* * A maximum of 2^20 - 1 16 bit transfers are possible with * the bootbus DMA. So we need to throttle max_sectors to * (2^12 - 1 == 4095) to assure that this can never happen. */ dev->max_sectors = min(dev->max_sectors, 4095U); } /* * We don't do ATAPI DMA so return 0. */ static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc) { return 0; } static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; switch (qc->tf.protocol) { case ATA_PROT_DMA: WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ octeon_cf_dma_setup(qc); /* set up dma */ octeon_cf_dma_start(qc); /* initiate dma */ ap->hsm_task_state = HSM_ST_LAST; break; case ATAPI_PROT_DMA: dev_err(ap->dev, "Error, ATAPI not supported\n"); BUG(); default: return ata_sff_qc_issue(qc); } return 0; } static struct ata_port_operations octeon_cf_ops = { .inherits = &ata_sff_port_ops, .check_atapi_dma = octeon_cf_check_atapi_dma, .qc_prep = ata_noop_qc_prep, .qc_issue = octeon_cf_qc_issue, .sff_dev_select = octeon_cf_dev_select, .sff_irq_on = octeon_cf_ata_port_noaction, .sff_irq_clear = octeon_cf_ata_port_noaction, .cable_detect = ata_cable_40wire, .set_piomode = octeon_cf_set_piomode, .set_dmamode = octeon_cf_set_dmamode, .dev_config = octeon_cf_dev_config, }; static int octeon_cf_probe(struct platform_device *pdev) { struct resource *res_cs0, *res_cs1; bool is_16bit; const __be32 *cs_num; struct property *reg_prop; int n_addr, n_size, reg_len; struct device_node *node; const void *prop; void __iomem *cs0; void __iomem *cs1 = NULL; struct ata_host *host; struct ata_port *ap; int irq = 0; irq_handler_t irq_handler = NULL; void __iomem *base; struct octeon_cf_port *cf_port; int rv = -ENOMEM; node = pdev->dev.of_node; if (node == NULL) return -EINVAL; cf_port = devm_kzalloc(&pdev->dev, sizeof(*cf_port), GFP_KERNEL); if (!cf_port) return -ENOMEM; cf_port->is_true_ide = (of_find_property(node, "cavium,true-ide", NULL) != NULL); prop = of_get_property(node, "cavium,bus-width", NULL); if (prop) is_16bit = (be32_to_cpup(prop) == 16); else is_16bit = false; n_addr = of_n_addr_cells(node); n_size = of_n_size_cells(node); reg_prop = of_find_property(node, "reg", &reg_len); if (!reg_prop || reg_len < sizeof(__be32)) return -EINVAL; cs_num = reg_prop->value; cf_port->cs0 = be32_to_cpup(cs_num); if (cf_port->is_true_ide) { struct device_node *dma_node; dma_node = of_parse_phandle(node, "cavium,dma-engine-handle", 0); if (dma_node) { struct platform_device *dma_dev; dma_dev = of_find_device_by_node(dma_node); if (dma_dev) { struct resource *res_dma; int i; res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0); if (!res_dma) { of_node_put(dma_node); return -EINVAL; } cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start, resource_size(res_dma)); if (!cf_port->dma_base) { of_node_put(dma_node); return -EINVAL; } irq_handler = octeon_cf_interrupt; i = platform_get_irq(dma_dev, 0); if (i > 0) irq = i; } of_node_put(dma_node); } res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res_cs1) return -EINVAL; cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start, resource_size(res_cs1)); if (!cs1) return rv; if (reg_len < (n_addr + n_size + 1) * sizeof(__be32)) return -EINVAL; cs_num += n_addr + n_size; cf_port->cs1 = be32_to_cpup(cs_num); } res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res_cs0) return -EINVAL; cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start, resource_size(res_cs0)); if (!cs0) return rv; /* allocate host */ host = ata_host_alloc(&pdev->dev, 1); if (!host) return rv; ap = host->ports[0]; ap->private_data = cf_port; pdev->dev.platform_data = cf_port; cf_port->ap = ap; ap->ops = &octeon_cf_ops; ap->pio_mask = ATA_PIO6; ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING; if (!is_16bit) { base = cs0 + 0x800; ap->ioaddr.cmd_addr = base; ata_sff_std_ports(&ap->ioaddr); ap->ioaddr.altstatus_addr = base + 0xe; ap->ioaddr.ctl_addr = base + 0xe; octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8; } else if (cf_port->is_true_ide) { base = cs0; ap->ioaddr.cmd_addr = base + (ATA_REG_CMD << 1) + 1; ap->ioaddr.data_addr = base + (ATA_REG_DATA << 1); ap->ioaddr.error_addr = base + (ATA_REG_ERR << 1) + 1; ap->ioaddr.feature_addr = base + (ATA_REG_FEATURE << 1) + 1; ap->ioaddr.nsect_addr = base + (ATA_REG_NSECT << 1) + 1; ap->ioaddr.lbal_addr = base + (ATA_REG_LBAL << 1) + 1; ap->ioaddr.lbam_addr = base + (ATA_REG_LBAM << 1) + 1; ap->ioaddr.lbah_addr = base + (ATA_REG_LBAH << 1) + 1; ap->ioaddr.device_addr = base + (ATA_REG_DEVICE << 1) + 1; ap->ioaddr.status_addr = base + (ATA_REG_STATUS << 1) + 1; ap->ioaddr.command_addr = base + (ATA_REG_CMD << 1) + 1; ap->ioaddr.altstatus_addr = cs1 + (6 << 1) + 1; ap->ioaddr.ctl_addr = cs1 + (6 << 1) + 1; octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16; ap->mwdma_mask = enable_dma ? ATA_MWDMA4 : 0; /* True IDE mode needs a timer to poll for not-busy. */ hrtimer_init(&cf_port->delayed_finish, CLOCK_MONOTONIC, HRTIMER_MODE_REL); cf_port->delayed_finish.function = octeon_cf_delayed_finish; } else { /* 16 bit but not True IDE */ base = cs0 + 0x800; octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16; octeon_cf_ops.softreset = octeon_cf_softreset16; octeon_cf_ops.sff_check_status = octeon_cf_check_status16; octeon_cf_ops.sff_tf_read = octeon_cf_tf_read16; octeon_cf_ops.sff_tf_load = octeon_cf_tf_load16; octeon_cf_ops.sff_exec_command = octeon_cf_exec_command16; ap->ioaddr.data_addr = base + ATA_REG_DATA; ap->ioaddr.nsect_addr = base + ATA_REG_NSECT; ap->ioaddr.lbal_addr = base + ATA_REG_LBAL; ap->ioaddr.ctl_addr = base + 0xe; ap->ioaddr.altstatus_addr = base + 0xe; } cf_port->c0 = ap->ioaddr.ctl_addr; rv = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (rv) return rv; ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr); dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n", is_16bit ? 16 : 8, cf_port->is_true_ide ? ", True IDE" : ""); return ata_host_activate(host, irq, irq_handler, IRQF_SHARED, &octeon_cf_sht); } static void octeon_cf_shutdown(struct device *dev) { union cvmx_mio_boot_dma_cfgx dma_cfg; union cvmx_mio_boot_dma_intx dma_int; struct octeon_cf_port *cf_port = dev_get_platdata(dev); if (cf_port->dma_base) { /* Stop and clear the dma engine. */ dma_cfg.u64 = 0; dma_cfg.s.size = -1; cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64); /* Disable the interrupt. */ dma_int.u64 = 0; cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64); /* Clear the DMA complete status */ dma_int.s.done = 1; cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64); __raw_writeb(0, cf_port->c0); udelay(20); __raw_writeb(ATA_SRST, cf_port->c0); udelay(20); __raw_writeb(0, cf_port->c0); mdelay(100); } } static struct of_device_id octeon_cf_match[] = { { .compatible = "cavium,ebt3000-compact-flash", }, {}, }; MODULE_DEVICE_TABLE(of, octeon_cf_match); static struct platform_driver octeon_cf_driver = { .probe = octeon_cf_probe, .driver = { .name = DRV_NAME, .of_match_table = octeon_cf_match, .shutdown = octeon_cf_shutdown }, }; static int __init octeon_cf_init(void) { return platform_driver_register(&octeon_cf_driver); } MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>"); MODULE_DESCRIPTION("low-level driver for Cavium OCTEON Compact Flash PATA"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_ALIAS("platform:" DRV_NAME); module_init(octeon_cf_init);
gpl-2.0
jonjonarnearne/smi2021
tools/perf/tests/rdpmc.c
1155
3242
#include <unistd.h> #include <stdlib.h> #include <signal.h> #include <sys/mman.h> #include <linux/types.h> #include "perf.h" #include "debug.h" #include "tests.h" #include "cloexec.h" #if defined(__x86_64__) || defined(__i386__) static u64 rdpmc(unsigned int counter) { unsigned int low, high; asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter)); return low | ((u64)high) << 32; } static u64 rdtsc(void) { unsigned int low, high; asm volatile("rdtsc" : "=a" (low), "=d" (high)); return low | ((u64)high) << 32; } static u64 mmap_read_self(void *addr) { struct perf_event_mmap_page *pc = addr; u32 seq, idx, time_mult = 0, time_shift = 0; u64 count, cyc = 0, time_offset = 0, enabled, running, delta; do { seq = pc->lock; barrier(); enabled = pc->time_enabled; running = pc->time_running; if (enabled != running) { cyc = rdtsc(); time_mult = pc->time_mult; time_shift = pc->time_shift; time_offset = pc->time_offset; } idx = pc->index; count = pc->offset; if (idx) count += rdpmc(idx - 1); barrier(); } while (pc->lock != seq); if (enabled != running) { u64 quot, rem; quot = (cyc >> time_shift); rem = cyc & ((1 << time_shift) - 1); delta = time_offset + quot * time_mult + ((rem * time_mult) >> time_shift); enabled += delta; if (idx) running += delta; quot = count / running; rem = count % running; count = quot * enabled + (rem * enabled) / running; } return count; } /* * If the RDPMC instruction faults then signal this back to the test parent task: */ static void segfault_handler(int sig __maybe_unused, siginfo_t *info __maybe_unused, void *uc __maybe_unused) { exit(-1); } static int __test__rdpmc(void) { volatile int tmp = 0; u64 i, loops = 1000; int n; int fd; void *addr; struct perf_event_attr attr = { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS, .exclude_kernel = 1, }; u64 delta_sum = 0; struct sigaction sa; char sbuf[STRERR_BUFSIZE]; sigfillset(&sa.sa_mask); sa.sa_sigaction = segfault_handler; sigaction(SIGSEGV, &sa, NULL); fd = sys_perf_event_open(&attr, 0, -1, -1, perf_event_open_cloexec_flag()); if (fd < 0) { pr_err("Error: sys_perf_event_open() syscall returned " "with %d (%s)\n", fd, strerror_r(errno, sbuf, sizeof(sbuf))); return -1; } addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0); if (addr == (void *)(-1)) { pr_err("Error: mmap() syscall returned with (%s)\n", strerror_r(errno, sbuf, sizeof(sbuf))); goto out_close; } for (n = 0; n < 6; n++) { u64 stamp, now, delta; stamp = mmap_read_self(addr); for (i = 0; i < loops; i++) tmp++; now = mmap_read_self(addr); loops *= 10; delta = now - stamp; pr_debug("%14d: %14Lu\n", n, (long long)delta); delta_sum += delta; } munmap(addr, page_size); pr_debug(" "); out_close: close(fd); if (!delta_sum) return -1; return 0; } int test__rdpmc(void) { int status = 0; int wret = 0; int ret; int pid; pid = fork(); if (pid < 0) return -1; if (!pid) { ret = __test__rdpmc(); exit(ret); } wret = waitpid(pid, &status, 0); if (wret < 0 || status) return -1; return 0; } #endif
gpl-2.0
troth/linux-kernel
drivers/edac/i5100_edac.c
2179
31071
/* * Intel 5100 Memory Controllers kernel module * * This file may be distributed under the terms of the * GNU General Public License. * * This module is based on the following document: * * Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet * http://download.intel.com/design/chipsets/datashts/318378.pdf * * The intel 5100 has two independent channels. EDAC core currently * can not reflect this configuration so instead the chip-select * rows for each respective channel are laid out one after another, * the first half belonging to channel 0, the second half belonging * to channel 1. * * This driver is for DDR2 DIMMs, and it uses chip select to select among the * several ranks. However, instead of showing memories as ranks, it outputs * them as DIMM's. An internal table creates the association between ranks * and DIMM's. */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/edac.h> #include <linux/delay.h> #include <linux/mmzone.h> #include <linux/debugfs.h> #include "edac_core.h" /* register addresses */ /* device 16, func 1 */ #define I5100_MC 0x40 /* Memory Control Register */ #define I5100_MC_SCRBEN_MASK (1 << 7) #define I5100_MC_SCRBDONE_MASK (1 << 4) #define I5100_MS 0x44 /* Memory Status Register */ #define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */ #define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */ #define I5100_TOLM 0x6c /* Top of Low Memory */ #define I5100_MIR0 0x80 /* Memory Interleave Range 0 */ #define I5100_MIR1 0x84 /* Memory Interleave Range 1 */ #define I5100_AMIR_0 0x8c /* Adjusted Memory Interleave Range 0 */ #define I5100_AMIR_1 0x90 /* Adjusted Memory Interleave Range 1 */ #define I5100_FERR_NF_MEM 0xa0 /* MC First Non Fatal Errors */ #define I5100_FERR_NF_MEM_M16ERR_MASK (1 << 16) #define I5100_FERR_NF_MEM_M15ERR_MASK (1 << 15) #define I5100_FERR_NF_MEM_M14ERR_MASK (1 << 14) #define I5100_FERR_NF_MEM_M12ERR_MASK (1 << 12) #define I5100_FERR_NF_MEM_M11ERR_MASK (1 << 11) #define I5100_FERR_NF_MEM_M10ERR_MASK (1 << 10) #define I5100_FERR_NF_MEM_M6ERR_MASK (1 << 6) #define I5100_FERR_NF_MEM_M5ERR_MASK (1 << 5) #define I5100_FERR_NF_MEM_M4ERR_MASK (1 << 4) #define I5100_FERR_NF_MEM_M1ERR_MASK (1 << 1) #define I5100_FERR_NF_MEM_ANY_MASK \ (I5100_FERR_NF_MEM_M16ERR_MASK | \ I5100_FERR_NF_MEM_M15ERR_MASK | \ I5100_FERR_NF_MEM_M14ERR_MASK | \ I5100_FERR_NF_MEM_M12ERR_MASK | \ I5100_FERR_NF_MEM_M11ERR_MASK | \ I5100_FERR_NF_MEM_M10ERR_MASK | \ I5100_FERR_NF_MEM_M6ERR_MASK | \ I5100_FERR_NF_MEM_M5ERR_MASK | \ I5100_FERR_NF_MEM_M4ERR_MASK | \ I5100_FERR_NF_MEM_M1ERR_MASK) #define I5100_NERR_NF_MEM 0xa4 /* MC Next Non-Fatal Errors */ #define I5100_EMASK_MEM 0xa8 /* MC Error Mask Register */ #define I5100_MEM0EINJMSK0 0x200 /* Injection Mask0 Register Channel 0 */ #define I5100_MEM1EINJMSK0 0x208 /* Injection Mask0 Register Channel 1 */ #define I5100_MEMXEINJMSK0_EINJEN (1 << 27) #define I5100_MEM0EINJMSK1 0x204 /* Injection Mask1 Register Channel 0 */ #define I5100_MEM1EINJMSK1 0x206 /* Injection Mask1 Register Channel 1 */ /* Device 19, Function 0 */ #define I5100_DINJ0 0x9a /* device 21 and 22, func 0 */ #define I5100_MTR_0 0x154 /* Memory Technology Registers 0-3 */ #define I5100_DMIR 0x15c /* DIMM Interleave Range */ #define I5100_VALIDLOG 0x18c /* Valid Log Markers */ #define I5100_NRECMEMA 0x190 /* Non-Recoverable Memory Error Log Reg A */ #define I5100_NRECMEMB 0x194 /* Non-Recoverable Memory Error Log Reg B */ #define I5100_REDMEMA 0x198 /* Recoverable Memory Data Error Log Reg A */ #define I5100_REDMEMB 0x19c /* Recoverable Memory Data Error Log Reg B */ #define I5100_RECMEMA 0x1a0 /* Recoverable Memory Error Log Reg A */ #define I5100_RECMEMB 0x1a4 /* Recoverable Memory Error Log Reg B */ #define I5100_MTR_4 0x1b0 /* Memory Technology Registers 4,5 */ /* bit field accessors */ static inline u32 i5100_mc_scrben(u32 mc) { return mc >> 7 & 1; } static inline u32 i5100_mc_errdeten(u32 mc) { return mc >> 5 & 1; } static inline u32 i5100_mc_scrbdone(u32 mc) { return mc >> 4 & 1; } static inline u16 i5100_spddata_rdo(u16 a) { return a >> 15 & 1; } static inline u16 i5100_spddata_sbe(u16 a) { return a >> 13 & 1; } static inline u16 i5100_spddata_busy(u16 a) { return a >> 12 & 1; } static inline u16 i5100_spddata_data(u16 a) { return a & ((1 << 8) - 1); } static inline u32 i5100_spdcmd_create(u32 dti, u32 ckovrd, u32 sa, u32 ba, u32 data, u32 cmd) { return ((dti & ((1 << 4) - 1)) << 28) | ((ckovrd & 1) << 27) | ((sa & ((1 << 3) - 1)) << 24) | ((ba & ((1 << 8) - 1)) << 16) | ((data & ((1 << 8) - 1)) << 8) | (cmd & 1); } static inline u16 i5100_tolm_tolm(u16 a) { return a >> 12 & ((1 << 4) - 1); } static inline u16 i5100_mir_limit(u16 a) { return a >> 4 & ((1 << 12) - 1); } static inline u16 i5100_mir_way1(u16 a) { return a >> 1 & 1; } static inline u16 i5100_mir_way0(u16 a) { return a & 1; } static inline u32 i5100_ferr_nf_mem_chan_indx(u32 a) { return a >> 28 & 1; } static inline u32 i5100_ferr_nf_mem_any(u32 a) { return a & I5100_FERR_NF_MEM_ANY_MASK; } static inline u32 i5100_nerr_nf_mem_any(u32 a) { return i5100_ferr_nf_mem_any(a); } static inline u32 i5100_dmir_limit(u32 a) { return a >> 16 & ((1 << 11) - 1); } static inline u32 i5100_dmir_rank(u32 a, u32 i) { return a >> (4 * i) & ((1 << 2) - 1); } static inline u16 i5100_mtr_present(u16 a) { return a >> 10 & 1; } static inline u16 i5100_mtr_ethrottle(u16 a) { return a >> 9 & 1; } static inline u16 i5100_mtr_width(u16 a) { return a >> 8 & 1; } static inline u16 i5100_mtr_numbank(u16 a) { return a >> 6 & 1; } static inline u16 i5100_mtr_numrow(u16 a) { return a >> 2 & ((1 << 2) - 1); } static inline u16 i5100_mtr_numcol(u16 a) { return a & ((1 << 2) - 1); } static inline u32 i5100_validlog_redmemvalid(u32 a) { return a >> 2 & 1; } static inline u32 i5100_validlog_recmemvalid(u32 a) { return a >> 1 & 1; } static inline u32 i5100_validlog_nrecmemvalid(u32 a) { return a & 1; } static inline u32 i5100_nrecmema_merr(u32 a) { return a >> 15 & ((1 << 5) - 1); } static inline u32 i5100_nrecmema_bank(u32 a) { return a >> 12 & ((1 << 3) - 1); } static inline u32 i5100_nrecmema_rank(u32 a) { return a >> 8 & ((1 << 3) - 1); } static inline u32 i5100_nrecmema_dm_buf_id(u32 a) { return a & ((1 << 8) - 1); } static inline u32 i5100_nrecmemb_cas(u32 a) { return a >> 16 & ((1 << 13) - 1); } static inline u32 i5100_nrecmemb_ras(u32 a) { return a & ((1 << 16) - 1); } static inline u32 i5100_redmemb_ecc_locator(u32 a) { return a & ((1 << 18) - 1); } static inline u32 i5100_recmema_merr(u32 a) { return i5100_nrecmema_merr(a); } static inline u32 i5100_recmema_bank(u32 a) { return i5100_nrecmema_bank(a); } static inline u32 i5100_recmema_rank(u32 a) { return i5100_nrecmema_rank(a); } static inline u32 i5100_recmema_dm_buf_id(u32 a) { return i5100_nrecmema_dm_buf_id(a); } static inline u32 i5100_recmemb_cas(u32 a) { return i5100_nrecmemb_cas(a); } static inline u32 i5100_recmemb_ras(u32 a) { return i5100_nrecmemb_ras(a); } /* some generic limits */ #define I5100_MAX_RANKS_PER_CHAN 6 #define I5100_CHANNELS 2 #define I5100_MAX_RANKS_PER_DIMM 4 #define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */ #define I5100_MAX_DIMM_SLOTS_PER_CHAN 4 #define I5100_MAX_RANK_INTERLEAVE 4 #define I5100_MAX_DMIRS 5 #define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ) struct i5100_priv { /* ranks on each dimm -- 0 maps to not present -- obtained via SPD */ int dimm_numrank[I5100_CHANNELS][I5100_MAX_DIMM_SLOTS_PER_CHAN]; /* * mainboard chip select map -- maps i5100 chip selects to * DIMM slot chip selects. In the case of only 4 ranks per * channel, the mapping is fairly obvious but not unique. * we map -1 -> NC and assume both channels use the same * map... * */ int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CHAN][I5100_MAX_RANKS_PER_DIMM]; /* memory interleave range */ struct { u64 limit; unsigned way[2]; } mir[I5100_CHANNELS]; /* adjusted memory interleave range register */ unsigned amir[I5100_CHANNELS]; /* dimm interleave range */ struct { unsigned rank[I5100_MAX_RANK_INTERLEAVE]; u64 limit; } dmir[I5100_CHANNELS][I5100_MAX_DMIRS]; /* memory technology registers... */ struct { unsigned present; /* 0 or 1 */ unsigned ethrottle; /* 0 or 1 */ unsigned width; /* 4 or 8 bits */ unsigned numbank; /* 2 or 3 lines */ unsigned numrow; /* 13 .. 16 lines */ unsigned numcol; /* 11 .. 12 lines */ } mtr[I5100_CHANNELS][I5100_MAX_RANKS_PER_CHAN]; u64 tolm; /* top of low memory in bytes */ unsigned ranksperchan; /* number of ranks per channel */ struct pci_dev *mc; /* device 16 func 1 */ struct pci_dev *einj; /* device 19 func 0 */ struct pci_dev *ch0mm; /* device 21 func 0 */ struct pci_dev *ch1mm; /* device 22 func 0 */ struct delayed_work i5100_scrubbing; int scrub_enable; /* Error injection */ u8 inject_channel; u8 inject_hlinesel; u8 inject_deviceptr1; u8 inject_deviceptr2; u16 inject_eccmask1; u16 inject_eccmask2; struct dentry *debugfs; }; static struct dentry *i5100_debugfs; /* map a rank/chan to a slot number on the mainboard */ static int i5100_rank_to_slot(const struct mem_ctl_info *mci, int chan, int rank) { const struct i5100_priv *priv = mci->pvt_info; int i; for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) { int j; const int numrank = priv->dimm_numrank[chan][i]; for (j = 0; j < numrank; j++) if (priv->dimm_csmap[i][j] == rank) return i * 2 + chan; } return -1; } static const char *i5100_err_msg(unsigned err) { static const char *merrs[] = { "unknown", /* 0 */ "uncorrectable data ECC on replay", /* 1 */ "unknown", /* 2 */ "unknown", /* 3 */ "aliased uncorrectable demand data ECC", /* 4 */ "aliased uncorrectable spare-copy data ECC", /* 5 */ "aliased uncorrectable patrol data ECC", /* 6 */ "unknown", /* 7 */ "unknown", /* 8 */ "unknown", /* 9 */ "non-aliased uncorrectable demand data ECC", /* 10 */ "non-aliased uncorrectable spare-copy data ECC", /* 11 */ "non-aliased uncorrectable patrol data ECC", /* 12 */ "unknown", /* 13 */ "correctable demand data ECC", /* 14 */ "correctable spare-copy data ECC", /* 15 */ "correctable patrol data ECC", /* 16 */ "unknown", /* 17 */ "SPD protocol error", /* 18 */ "unknown", /* 19 */ "spare copy initiated", /* 20 */ "spare copy completed", /* 21 */ }; unsigned i; for (i = 0; i < ARRAY_SIZE(merrs); i++) if (1 << i & err) return merrs[i]; return "none"; } /* convert csrow index into a rank (per channel -- 0..5) */ static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow) { const struct i5100_priv *priv = mci->pvt_info; return csrow % priv->ranksperchan; } /* convert csrow index into a channel (0..1) */ static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow) { const struct i5100_priv *priv = mci->pvt_info; return csrow / priv->ranksperchan; } static void i5100_handle_ce(struct mem_ctl_info *mci, int chan, unsigned bank, unsigned rank, unsigned long syndrome, unsigned cas, unsigned ras, const char *msg) { char detail[80]; /* Form out message */ snprintf(detail, sizeof(detail), "bank %u, cas %u, ras %u\n", bank, cas, ras); edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, syndrome, chan, rank, -1, msg, detail); } static void i5100_handle_ue(struct mem_ctl_info *mci, int chan, unsigned bank, unsigned rank, unsigned long syndrome, unsigned cas, unsigned ras, const char *msg) { char detail[80]; /* Form out message */ snprintf(detail, sizeof(detail), "bank %u, cas %u, ras %u\n", bank, cas, ras); edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, syndrome, chan, rank, -1, msg, detail); } static void i5100_read_log(struct mem_ctl_info *mci, int chan, u32 ferr, u32 nerr) { struct i5100_priv *priv = mci->pvt_info; struct pci_dev *pdev = (chan) ? priv->ch1mm : priv->ch0mm; u32 dw; u32 dw2; unsigned syndrome = 0; unsigned ecc_loc = 0; unsigned merr; unsigned bank; unsigned rank; unsigned cas; unsigned ras; pci_read_config_dword(pdev, I5100_VALIDLOG, &dw); if (i5100_validlog_redmemvalid(dw)) { pci_read_config_dword(pdev, I5100_REDMEMA, &dw2); syndrome = dw2; pci_read_config_dword(pdev, I5100_REDMEMB, &dw2); ecc_loc = i5100_redmemb_ecc_locator(dw2); } if (i5100_validlog_recmemvalid(dw)) { const char *msg; pci_read_config_dword(pdev, I5100_RECMEMA, &dw2); merr = i5100_recmema_merr(dw2); bank = i5100_recmema_bank(dw2); rank = i5100_recmema_rank(dw2); pci_read_config_dword(pdev, I5100_RECMEMB, &dw2); cas = i5100_recmemb_cas(dw2); ras = i5100_recmemb_ras(dw2); /* FIXME: not really sure if this is what merr is... */ if (!merr) msg = i5100_err_msg(ferr); else msg = i5100_err_msg(nerr); i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg); } if (i5100_validlog_nrecmemvalid(dw)) { const char *msg; pci_read_config_dword(pdev, I5100_NRECMEMA, &dw2); merr = i5100_nrecmema_merr(dw2); bank = i5100_nrecmema_bank(dw2); rank = i5100_nrecmema_rank(dw2); pci_read_config_dword(pdev, I5100_NRECMEMB, &dw2); cas = i5100_nrecmemb_cas(dw2); ras = i5100_nrecmemb_ras(dw2); /* FIXME: not really sure if this is what merr is... */ if (!merr) msg = i5100_err_msg(ferr); else msg = i5100_err_msg(nerr); i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg); } pci_write_config_dword(pdev, I5100_VALIDLOG, dw); } static void i5100_check_error(struct mem_ctl_info *mci) { struct i5100_priv *priv = mci->pvt_info; u32 dw, dw2; pci_read_config_dword(priv->mc, I5100_FERR_NF_MEM, &dw); if (i5100_ferr_nf_mem_any(dw)) { pci_read_config_dword(priv->mc, I5100_NERR_NF_MEM, &dw2); i5100_read_log(mci, i5100_ferr_nf_mem_chan_indx(dw), i5100_ferr_nf_mem_any(dw), i5100_nerr_nf_mem_any(dw2)); pci_write_config_dword(priv->mc, I5100_NERR_NF_MEM, dw2); } pci_write_config_dword(priv->mc, I5100_FERR_NF_MEM, dw); } /* The i5100 chipset will scrub the entire memory once, then * set a done bit. Continuous scrubbing is achieved by enqueing * delayed work to a workqueue, checking every few minutes if * the scrubbing has completed and if so reinitiating it. */ static void i5100_refresh_scrubbing(struct work_struct *work) { struct delayed_work *i5100_scrubbing = container_of(work, struct delayed_work, work); struct i5100_priv *priv = container_of(i5100_scrubbing, struct i5100_priv, i5100_scrubbing); u32 dw; pci_read_config_dword(priv->mc, I5100_MC, &dw); if (priv->scrub_enable) { pci_read_config_dword(priv->mc, I5100_MC, &dw); if (i5100_mc_scrbdone(dw)) { dw |= I5100_MC_SCRBEN_MASK; pci_write_config_dword(priv->mc, I5100_MC, dw); pci_read_config_dword(priv->mc, I5100_MC, &dw); } schedule_delayed_work(&(priv->i5100_scrubbing), I5100_SCRUB_REFRESH_RATE); } } /* * The bandwidth is based on experimentation, feel free to refine it. */ static int i5100_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth) { struct i5100_priv *priv = mci->pvt_info; u32 dw; pci_read_config_dword(priv->mc, I5100_MC, &dw); if (bandwidth) { priv->scrub_enable = 1; dw |= I5100_MC_SCRBEN_MASK; schedule_delayed_work(&(priv->i5100_scrubbing), I5100_SCRUB_REFRESH_RATE); } else { priv->scrub_enable = 0; dw &= ~I5100_MC_SCRBEN_MASK; cancel_delayed_work(&(priv->i5100_scrubbing)); } pci_write_config_dword(priv->mc, I5100_MC, dw); pci_read_config_dword(priv->mc, I5100_MC, &dw); bandwidth = 5900000 * i5100_mc_scrben(dw); return bandwidth; } static int i5100_get_scrub_rate(struct mem_ctl_info *mci) { struct i5100_priv *priv = mci->pvt_info; u32 dw; pci_read_config_dword(priv->mc, I5100_MC, &dw); return 5900000 * i5100_mc_scrben(dw); } static struct pci_dev *pci_get_device_func(unsigned vendor, unsigned device, unsigned func) { struct pci_dev *ret = NULL; while (1) { ret = pci_get_device(vendor, device, ret); if (!ret) break; if (PCI_FUNC(ret->devfn) == func) break; } return ret; } static unsigned long i5100_npages(struct mem_ctl_info *mci, int csrow) { struct i5100_priv *priv = mci->pvt_info; const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow); const unsigned chan = i5100_csrow_to_chan(mci, csrow); unsigned addr_lines; /* dimm present? */ if (!priv->mtr[chan][chan_rank].present) return 0ULL; addr_lines = I5100_DIMM_ADDR_LINES + priv->mtr[chan][chan_rank].numcol + priv->mtr[chan][chan_rank].numrow + priv->mtr[chan][chan_rank].numbank; return (unsigned long) ((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE); } static void i5100_init_mtr(struct mem_ctl_info *mci) { struct i5100_priv *priv = mci->pvt_info; struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm }; int i; for (i = 0; i < I5100_CHANNELS; i++) { int j; struct pci_dev *pdev = mms[i]; for (j = 0; j < I5100_MAX_RANKS_PER_CHAN; j++) { const unsigned addr = (j < 4) ? I5100_MTR_0 + j * 2 : I5100_MTR_4 + (j - 4) * 2; u16 w; pci_read_config_word(pdev, addr, &w); priv->mtr[i][j].present = i5100_mtr_present(w); priv->mtr[i][j].ethrottle = i5100_mtr_ethrottle(w); priv->mtr[i][j].width = 4 + 4 * i5100_mtr_width(w); priv->mtr[i][j].numbank = 2 + i5100_mtr_numbank(w); priv->mtr[i][j].numrow = 13 + i5100_mtr_numrow(w); priv->mtr[i][j].numcol = 10 + i5100_mtr_numcol(w); } } } /* * FIXME: make this into a real i2c adapter (so that dimm-decode * will work)? */ static int i5100_read_spd_byte(const struct mem_ctl_info *mci, u8 ch, u8 slot, u8 addr, u8 *byte) { struct i5100_priv *priv = mci->pvt_info; u16 w; unsigned long et; pci_read_config_word(priv->mc, I5100_SPDDATA, &w); if (i5100_spddata_busy(w)) return -1; pci_write_config_dword(priv->mc, I5100_SPDCMD, i5100_spdcmd_create(0xa, 1, ch * 4 + slot, addr, 0, 0)); /* wait up to 100ms */ et = jiffies + HZ / 10; udelay(100); while (1) { pci_read_config_word(priv->mc, I5100_SPDDATA, &w); if (!i5100_spddata_busy(w)) break; udelay(100); } if (!i5100_spddata_rdo(w) || i5100_spddata_sbe(w)) return -1; *byte = i5100_spddata_data(w); return 0; } /* * fill dimm chip select map * * FIXME: * o not the only way to may chip selects to dimm slots * o investigate if there is some way to obtain this map from the bios */ static void i5100_init_dimm_csmap(struct mem_ctl_info *mci) { struct i5100_priv *priv = mci->pvt_info; int i; for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) { int j; for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++) priv->dimm_csmap[i][j] = -1; /* default NC */ } /* only 2 chip selects per slot... */ if (priv->ranksperchan == 4) { priv->dimm_csmap[0][0] = 0; priv->dimm_csmap[0][1] = 3; priv->dimm_csmap[1][0] = 1; priv->dimm_csmap[1][1] = 2; priv->dimm_csmap[2][0] = 2; priv->dimm_csmap[3][0] = 3; } else { priv->dimm_csmap[0][0] = 0; priv->dimm_csmap[0][1] = 1; priv->dimm_csmap[1][0] = 2; priv->dimm_csmap[1][1] = 3; priv->dimm_csmap[2][0] = 4; priv->dimm_csmap[2][1] = 5; } } static void i5100_init_dimm_layout(struct pci_dev *pdev, struct mem_ctl_info *mci) { struct i5100_priv *priv = mci->pvt_info; int i; for (i = 0; i < I5100_CHANNELS; i++) { int j; for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CHAN; j++) { u8 rank; if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0) priv->dimm_numrank[i][j] = 0; else priv->dimm_numrank[i][j] = (rank & 3) + 1; } } i5100_init_dimm_csmap(mci); } static void i5100_init_interleaving(struct pci_dev *pdev, struct mem_ctl_info *mci) { u16 w; u32 dw; struct i5100_priv *priv = mci->pvt_info; struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm }; int i; pci_read_config_word(pdev, I5100_TOLM, &w); priv->tolm = (u64) i5100_tolm_tolm(w) * 256 * 1024 * 1024; pci_read_config_word(pdev, I5100_MIR0, &w); priv->mir[0].limit = (u64) i5100_mir_limit(w) << 28; priv->mir[0].way[1] = i5100_mir_way1(w); priv->mir[0].way[0] = i5100_mir_way0(w); pci_read_config_word(pdev, I5100_MIR1, &w); priv->mir[1].limit = (u64) i5100_mir_limit(w) << 28; priv->mir[1].way[1] = i5100_mir_way1(w); priv->mir[1].way[0] = i5100_mir_way0(w); pci_read_config_word(pdev, I5100_AMIR_0, &w); priv->amir[0] = w; pci_read_config_word(pdev, I5100_AMIR_1, &w); priv->amir[1] = w; for (i = 0; i < I5100_CHANNELS; i++) { int j; for (j = 0; j < 5; j++) { int k; pci_read_config_dword(mms[i], I5100_DMIR + j * 4, &dw); priv->dmir[i][j].limit = (u64) i5100_dmir_limit(dw) << 28; for (k = 0; k < I5100_MAX_RANKS_PER_DIMM; k++) priv->dmir[i][j].rank[k] = i5100_dmir_rank(dw, k); } } i5100_init_mtr(mci); } static void i5100_init_csrows(struct mem_ctl_info *mci) { int i; struct i5100_priv *priv = mci->pvt_info; for (i = 0; i < mci->tot_dimms; i++) { struct dimm_info *dimm; const unsigned long npages = i5100_npages(mci, i); const unsigned chan = i5100_csrow_to_chan(mci, i); const unsigned rank = i5100_csrow_to_rank(mci, i); if (!npages) continue; dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, chan, rank, 0); dimm->nr_pages = npages; if (npages) { dimm->grain = 32; dimm->dtype = (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8; dimm->mtype = MEM_RDDR2; dimm->edac_mode = EDAC_SECDED; snprintf(dimm->label, sizeof(dimm->label), "DIMM%u", i5100_rank_to_slot(mci, chan, rank)); } edac_dbg(2, "dimm channel %d, rank %d, size %ld\n", chan, rank, (long)PAGES_TO_MiB(npages)); } } /**************************************************************************** * Error injection routines ****************************************************************************/ static void i5100_do_inject(struct mem_ctl_info *mci) { struct i5100_priv *priv = mci->pvt_info; u32 mask0; u16 mask1; /* MEM[1:0]EINJMSK0 * 31 - ADDRMATCHEN * 29:28 - HLINESEL * 00 Reserved * 01 Lower half of cache line * 10 Upper half of cache line * 11 Both upper and lower parts of cache line * 27 - EINJEN * 25:19 - XORMASK1 for deviceptr1 * 9:5 - SEC2RAM for deviceptr2 * 4:0 - FIR2RAM for deviceptr1 */ mask0 = ((priv->inject_hlinesel & 0x3) << 28) | I5100_MEMXEINJMSK0_EINJEN | ((priv->inject_eccmask1 & 0xffff) << 10) | ((priv->inject_deviceptr2 & 0x1f) << 5) | (priv->inject_deviceptr1 & 0x1f); /* MEM[1:0]EINJMSK1 * 15:0 - XORMASK2 for deviceptr2 */ mask1 = priv->inject_eccmask2; if (priv->inject_channel == 0) { pci_write_config_dword(priv->mc, I5100_MEM0EINJMSK0, mask0); pci_write_config_word(priv->mc, I5100_MEM0EINJMSK1, mask1); } else { pci_write_config_dword(priv->mc, I5100_MEM1EINJMSK0, mask0); pci_write_config_word(priv->mc, I5100_MEM1EINJMSK1, mask1); } /* Error Injection Response Function * Intel 5100 Memory Controller Hub Chipset (318378) datasheet * hints about this register but carry no data about them. All * data regarding device 19 is based on experimentation and the * Intel 7300 Chipset Memory Controller Hub (318082) datasheet * which appears to be accurate for the i5100 in this area. * * The injection code don't work without setting this register. * The register needs to be flipped off then on else the hardware * will only preform the first injection. * * Stop condition bits 7:4 * 1010 - Stop after one injection * 1011 - Never stop injecting faults * * Start condition bits 3:0 * 1010 - Never start * 1011 - Start immediately */ pci_write_config_byte(priv->einj, I5100_DINJ0, 0xaa); pci_write_config_byte(priv->einj, I5100_DINJ0, 0xab); } #define to_mci(k) container_of(k, struct mem_ctl_info, dev) static ssize_t inject_enable_write(struct file *file, const char __user *data, size_t count, loff_t *ppos) { struct device *dev = file->private_data; struct mem_ctl_info *mci = to_mci(dev); i5100_do_inject(mci); return count; } static const struct file_operations i5100_inject_enable_fops = { .open = simple_open, .write = inject_enable_write, .llseek = generic_file_llseek, }; static int i5100_setup_debugfs(struct mem_ctl_info *mci) { struct i5100_priv *priv = mci->pvt_info; if (!i5100_debugfs) return -ENODEV; priv->debugfs = debugfs_create_dir(mci->bus->name, i5100_debugfs); if (!priv->debugfs) return -ENOMEM; debugfs_create_x8("inject_channel", S_IRUGO | S_IWUSR, priv->debugfs, &priv->inject_channel); debugfs_create_x8("inject_hlinesel", S_IRUGO | S_IWUSR, priv->debugfs, &priv->inject_hlinesel); debugfs_create_x8("inject_deviceptr1", S_IRUGO | S_IWUSR, priv->debugfs, &priv->inject_deviceptr1); debugfs_create_x8("inject_deviceptr2", S_IRUGO | S_IWUSR, priv->debugfs, &priv->inject_deviceptr2); debugfs_create_x16("inject_eccmask1", S_IRUGO | S_IWUSR, priv->debugfs, &priv->inject_eccmask1); debugfs_create_x16("inject_eccmask2", S_IRUGO | S_IWUSR, priv->debugfs, &priv->inject_eccmask2); debugfs_create_file("inject_enable", S_IWUSR, priv->debugfs, &mci->dev, &i5100_inject_enable_fops); return 0; } static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { int rc; struct mem_ctl_info *mci; struct edac_mc_layer layers[2]; struct i5100_priv *priv; struct pci_dev *ch0mm, *ch1mm, *einj; int ret = 0; u32 dw; int ranksperch; if (PCI_FUNC(pdev->devfn) != 1) return -ENODEV; rc = pci_enable_device(pdev); if (rc < 0) { ret = rc; goto bail; } /* ECC enabled? */ pci_read_config_dword(pdev, I5100_MC, &dw); if (!i5100_mc_errdeten(dw)) { printk(KERN_INFO "i5100_edac: ECC not enabled.\n"); ret = -ENODEV; goto bail_pdev; } /* figure out how many ranks, from strapped state of 48GB_Mode input */ pci_read_config_dword(pdev, I5100_MS, &dw); ranksperch = !!(dw & (1 << 8)) * 2 + 4; /* enable error reporting... */ pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw); dw &= ~I5100_FERR_NF_MEM_ANY_MASK; pci_write_config_dword(pdev, I5100_EMASK_MEM, dw); /* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */ ch0mm = pci_get_device_func(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_21, 0); if (!ch0mm) { ret = -ENODEV; goto bail_pdev; } rc = pci_enable_device(ch0mm); if (rc < 0) { ret = rc; goto bail_ch0; } /* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */ ch1mm = pci_get_device_func(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_22, 0); if (!ch1mm) { ret = -ENODEV; goto bail_disable_ch0; } rc = pci_enable_device(ch1mm); if (rc < 0) { ret = rc; goto bail_ch1; } layers[0].type = EDAC_MC_LAYER_CHANNEL; layers[0].size = 2; layers[0].is_virt_csrow = false; layers[1].type = EDAC_MC_LAYER_SLOT; layers[1].size = ranksperch; layers[1].is_virt_csrow = true; mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*priv)); if (!mci) { ret = -ENOMEM; goto bail_disable_ch1; } /* device 19, func 0, Error injection */ einj = pci_get_device_func(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_19, 0); if (!einj) { ret = -ENODEV; goto bail_einj; } rc = pci_enable_device(einj); if (rc < 0) { ret = rc; goto bail_disable_einj; } mci->pdev = &pdev->dev; priv = mci->pvt_info; priv->ranksperchan = ranksperch; priv->mc = pdev; priv->ch0mm = ch0mm; priv->ch1mm = ch1mm; priv->einj = einj; INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing); /* If scrubbing was already enabled by the bios, start maintaining it */ pci_read_config_dword(pdev, I5100_MC, &dw); if (i5100_mc_scrben(dw)) { priv->scrub_enable = 1; schedule_delayed_work(&(priv->i5100_scrubbing), I5100_SCRUB_REFRESH_RATE); } i5100_init_dimm_layout(pdev, mci); i5100_init_interleaving(pdev, mci); mci->mtype_cap = MEM_FLAG_FB_DDR2; mci->edac_ctl_cap = EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_SECDED; mci->mod_name = "i5100_edac.c"; mci->mod_ver = "not versioned"; mci->ctl_name = "i5100"; mci->dev_name = pci_name(pdev); mci->ctl_page_to_phys = NULL; mci->edac_check = i5100_check_error; mci->set_sdram_scrub_rate = i5100_set_scrub_rate; mci->get_sdram_scrub_rate = i5100_get_scrub_rate; priv->inject_channel = 0; priv->inject_hlinesel = 0; priv->inject_deviceptr1 = 0; priv->inject_deviceptr2 = 0; priv->inject_eccmask1 = 0; priv->inject_eccmask2 = 0; i5100_init_csrows(mci); /* this strange construction seems to be in every driver, dunno why */ switch (edac_op_state) { case EDAC_OPSTATE_POLL: case EDAC_OPSTATE_NMI: break; default: edac_op_state = EDAC_OPSTATE_POLL; break; } if (edac_mc_add_mc(mci)) { ret = -ENODEV; goto bail_scrub; } i5100_setup_debugfs(mci); return ret; bail_scrub: priv->scrub_enable = 0; cancel_delayed_work_sync(&(priv->i5100_scrubbing)); edac_mc_free(mci); bail_disable_einj: pci_disable_device(einj); bail_einj: pci_dev_put(einj); bail_disable_ch1: pci_disable_device(ch1mm); bail_ch1: pci_dev_put(ch1mm); bail_disable_ch0: pci_disable_device(ch0mm); bail_ch0: pci_dev_put(ch0mm); bail_pdev: pci_disable_device(pdev); bail: return ret; } static void i5100_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; struct i5100_priv *priv; mci = edac_mc_del_mc(&pdev->dev); if (!mci) return; priv = mci->pvt_info; debugfs_remove_recursive(priv->debugfs); priv->scrub_enable = 0; cancel_delayed_work_sync(&(priv->i5100_scrubbing)); pci_disable_device(pdev); pci_disable_device(priv->ch0mm); pci_disable_device(priv->ch1mm); pci_disable_device(priv->einj); pci_dev_put(priv->ch0mm); pci_dev_put(priv->ch1mm); pci_dev_put(priv->einj); edac_mc_free(mci); } static DEFINE_PCI_DEVICE_TABLE(i5100_pci_tbl) = { /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) }, { 0, } }; MODULE_DEVICE_TABLE(pci, i5100_pci_tbl); static struct pci_driver i5100_driver = { .name = KBUILD_BASENAME, .probe = i5100_init_one, .remove = i5100_remove_one, .id_table = i5100_pci_tbl, }; static int __init i5100_init(void) { int pci_rc; i5100_debugfs = debugfs_create_dir("i5100_edac", NULL); pci_rc = pci_register_driver(&i5100_driver); return (pci_rc < 0) ? pci_rc : 0; } static void __exit i5100_exit(void) { debugfs_remove(i5100_debugfs); pci_unregister_driver(&i5100_driver); } module_init(i5100_init); module_exit(i5100_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR ("Arthur Jones <ajones@riverbed.com>"); MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers");
gpl-2.0
SM-G920P/SM-N920
drivers/media/pci/cx23885/netup-init.c
2947
2852
/* * netup-init.c * * NetUP Dual DVB-S2 CI driver * * Copyright (C) 2009 NetUP Inc. * Copyright (C) 2009 Igor M. Liplianin <liplianin@netup.ru> * Copyright (C) 2009 Abylay Ospan <aospan@netup.ru> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "cx23885.h" #include "netup-init.h" static void i2c_av_write(struct i2c_adapter *i2c, u16 reg, u8 val) { int ret; u8 buf[3]; struct i2c_msg msg = { .addr = 0x88 >> 1, .flags = 0, .buf = buf, .len = 3 }; buf[0] = reg >> 8; buf[1] = reg & 0xff; buf[2] = val; ret = i2c_transfer(i2c, &msg, 1); if (ret != 1) printk(KERN_ERR "%s: i2c write error!\n", __func__); } static void i2c_av_write4(struct i2c_adapter *i2c, u16 reg, u32 val) { int ret; u8 buf[6]; struct i2c_msg msg = { .addr = 0x88 >> 1, .flags = 0, .buf = buf, .len = 6 }; buf[0] = reg >> 8; buf[1] = reg & 0xff; buf[2] = val & 0xff; buf[3] = (val >> 8) & 0xff; buf[4] = (val >> 16) & 0xff; buf[5] = val >> 24; ret = i2c_transfer(i2c, &msg, 1); if (ret != 1) printk(KERN_ERR "%s: i2c write error!\n", __func__); } static u8 i2c_av_read(struct i2c_adapter *i2c, u16 reg) { int ret; u8 buf[2]; struct i2c_msg msg = { .addr = 0x88 >> 1, .flags = 0, .buf = buf, .len = 2 }; buf[0] = reg >> 8; buf[1] = reg & 0xff; ret = i2c_transfer(i2c, &msg, 1); if (ret != 1) printk(KERN_ERR "%s: i2c write error!\n", __func__); msg.flags = I2C_M_RD; msg.len = 1; ret = i2c_transfer(i2c, &msg, 1); if (ret != 1) printk(KERN_ERR "%s: i2c read error!\n", __func__); return buf[0]; } static void i2c_av_and_or(struct i2c_adapter *i2c, u16 reg, unsigned and_mask, u8 or_value) { i2c_av_write(i2c, reg, (i2c_av_read(i2c, reg) & and_mask) | or_value); } /* set 27MHz on AUX_CLK */ void netup_initialize(struct cx23885_dev *dev) { struct cx23885_i2c *i2c_bus = &dev->i2c_bus[2]; struct i2c_adapter *i2c = &i2c_bus->i2c_adap; /* Stop microcontroller */ i2c_av_and_or(i2c, 0x803, ~0x10, 0x00); /* Aux PLL frac for 27 MHz */ i2c_av_write4(i2c, 0x114, 0xea0eb3); /* Aux PLL int for 27 MHz */ i2c_av_write4(i2c, 0x110, 0x090319); /* start microcontroller */ i2c_av_and_or(i2c, 0x803, ~0x10, 0x10); }
gpl-2.0
tusharjain95/android_kernel_xiaomi_cancro
drivers/rtc/interface.c
3203
23784
/* * RTC subsystem, interface functions * * Copyright (C) 2005 Tower Technologies * Author: Alessandro Zummo <a.zummo@towertech.it> * * based on arch/arm/common/rtctime.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/rtc.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/log2.h> #include <linux/workqueue.h> static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer); static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer); static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) { int err; if (!rtc->ops) err = -ENODEV; else if (!rtc->ops->read_time) err = -EINVAL; else { memset(tm, 0, sizeof(struct rtc_time)); err = rtc->ops->read_time(rtc->dev.parent, tm); } return err; } int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) { int err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; err = __rtc_read_time(rtc, tm); mutex_unlock(&rtc->ops_lock); return err; } EXPORT_SYMBOL_GPL(rtc_read_time); int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) { int err; err = rtc_valid_tm(tm); if (err != 0) return err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (!rtc->ops) err = -ENODEV; else if (rtc->ops->set_time) err = rtc->ops->set_time(rtc->dev.parent, tm); else if (rtc->ops->set_mmss) { unsigned long secs; err = rtc_tm_to_time(tm, &secs); if (err == 0) err = rtc->ops->set_mmss(rtc->dev.parent, secs); } else err = -EINVAL; mutex_unlock(&rtc->ops_lock); /* A timer might have just expired */ schedule_work(&rtc->irqwork); return err; } EXPORT_SYMBOL_GPL(rtc_set_time); int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs) { int err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (!rtc->ops) err = -ENODEV; else if (rtc->ops->set_mmss) err = rtc->ops->set_mmss(rtc->dev.parent, secs); else if (rtc->ops->read_time && rtc->ops->set_time) { struct rtc_time new, old; err = rtc->ops->read_time(rtc->dev.parent, &old); if (err == 0) { rtc_time_to_tm(secs, &new); /* * avoid writing when we're going to change the day of * the month. We will retry in the next minute. This * basically means that if the RTC must not drift * by more than 1 minute in 11 minutes. */ if (!((old.tm_hour == 23 && old.tm_min == 59) || (new.tm_hour == 23 && new.tm_min == 59))) err = rtc->ops->set_time(rtc->dev.parent, &new); } } else err = -EINVAL; mutex_unlock(&rtc->ops_lock); /* A timer might have just expired */ schedule_work(&rtc->irqwork); return err; } EXPORT_SYMBOL_GPL(rtc_set_mmss); static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (rtc->ops == NULL) err = -ENODEV; else if (!rtc->ops->read_alarm) err = -EINVAL; else { memset(alarm, 0, sizeof(struct rtc_wkalrm)); err = rtc->ops->read_alarm(rtc->dev.parent, alarm); } mutex_unlock(&rtc->ops_lock); return err; } int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; struct rtc_time before, now; int first_time = 1; unsigned long t_now, t_alm; enum { none, day, month, year } missing = none; unsigned days; /* The lower level RTC driver may return -1 in some fields, * creating invalid alarm->time values, for reasons like: * * - The hardware may not be capable of filling them in; * many alarms match only on time-of-day fields, not * day/month/year calendar data. * * - Some hardware uses illegal values as "wildcard" match * values, which non-Linux firmware (like a BIOS) may try * to set up as e.g. "alarm 15 minutes after each hour". * Linux uses only oneshot alarms. * * When we see that here, we deal with it by using values from * a current RTC timestamp for any missing (-1) values. The * RTC driver prevents "periodic alarm" modes. * * But this can be racey, because some fields of the RTC timestamp * may have wrapped in the interval since we read the RTC alarm, * which would lead to us inserting inconsistent values in place * of the -1 fields. * * Reading the alarm and timestamp in the reverse sequence * would have the same race condition, and not solve the issue. * * So, we must first read the RTC timestamp, * then read the RTC alarm value, * and then read a second RTC timestamp. * * If any fields of the second timestamp have changed * when compared with the first timestamp, then we know * our timestamp may be inconsistent with that used by * the low-level rtc_read_alarm_internal() function. * * So, when the two timestamps disagree, we just loop and do * the process again to get a fully consistent set of values. * * This could all instead be done in the lower level driver, * but since more than one lower level RTC implementation needs it, * then it's probably best best to do it here instead of there.. */ /* Get the "before" timestamp */ err = rtc_read_time(rtc, &before); if (err < 0) return err; do { if (!first_time) memcpy(&before, &now, sizeof(struct rtc_time)); first_time = 0; /* get the RTC alarm values, which may be incomplete */ err = rtc_read_alarm_internal(rtc, alarm); if (err) return err; /* full-function RTCs won't have such missing fields */ if (rtc_valid_tm(&alarm->time) == 0) return 0; /* get the "after" timestamp, to detect wrapped fields */ err = rtc_read_time(rtc, &now); if (err < 0) return err; /* note that tm_sec is a "don't care" value here: */ } while ( before.tm_min != now.tm_min || before.tm_hour != now.tm_hour || before.tm_mon != now.tm_mon || before.tm_year != now.tm_year); /* Fill in the missing alarm fields using the timestamp; we * know there's at least one since alarm->time is invalid. */ if (alarm->time.tm_sec == -1) alarm->time.tm_sec = now.tm_sec; if (alarm->time.tm_min == -1) alarm->time.tm_min = now.tm_min; if (alarm->time.tm_hour == -1) alarm->time.tm_hour = now.tm_hour; /* For simplicity, only support date rollover for now */ if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) { alarm->time.tm_mday = now.tm_mday; missing = day; } if ((unsigned)alarm->time.tm_mon >= 12) { alarm->time.tm_mon = now.tm_mon; if (missing == none) missing = month; } if (alarm->time.tm_year == -1) { alarm->time.tm_year = now.tm_year; if (missing == none) missing = year; } /* with luck, no rollover is needed */ rtc_tm_to_time(&now, &t_now); rtc_tm_to_time(&alarm->time, &t_alm); if (t_now < t_alm) goto done; switch (missing) { /* 24 hour rollover ... if it's now 10am Monday, an alarm that * that will trigger at 5am will do so at 5am Tuesday, which * could also be in the next month or year. This is a common * case, especially for PCs. */ case day: dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day"); t_alm += 24 * 60 * 60; rtc_time_to_tm(t_alm, &alarm->time); break; /* Month rollover ... if it's the 31th, an alarm on the 3rd will * be next month. An alarm matching on the 30th, 29th, or 28th * may end up in the month after that! Many newer PCs support * this type of alarm. */ case month: dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month"); do { if (alarm->time.tm_mon < 11) alarm->time.tm_mon++; else { alarm->time.tm_mon = 0; alarm->time.tm_year++; } days = rtc_month_days(alarm->time.tm_mon, alarm->time.tm_year); } while (days < alarm->time.tm_mday); break; /* Year rollover ... easy except for leap years! */ case year: dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year"); do { alarm->time.tm_year++; } while (rtc_valid_tm(&alarm->time) != 0); break; default: dev_warn(&rtc->dev, "alarm rollover not handled\n"); } done: return 0; } int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (rtc->ops == NULL) err = -ENODEV; else if (!rtc->ops->read_alarm) err = -EINVAL; else { memset(alarm, 0, sizeof(struct rtc_wkalrm)); alarm->enabled = rtc->aie_timer.enabled; alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires); } mutex_unlock(&rtc->ops_lock); return err; } EXPORT_SYMBOL_GPL(rtc_read_alarm); static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { struct rtc_time tm; long now, scheduled; int err; err = rtc_valid_tm(&alarm->time); if (err) return err; rtc_tm_to_time(&alarm->time, &scheduled); /* Make sure we're not setting alarms in the past */ err = __rtc_read_time(rtc, &tm); rtc_tm_to_time(&tm, &now); if (scheduled <= now) return -ETIME; /* * XXX - We just checked to make sure the alarm time is not * in the past, but there is still a race window where if * the is alarm set for the next second and the second ticks * over right here, before we set the alarm. */ if (!rtc->ops) err = -ENODEV; else if (!rtc->ops->set_alarm) err = -EINVAL; else err = rtc->ops->set_alarm(rtc->dev.parent, alarm); return err; } int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; err = rtc_valid_tm(&alarm->time); if (err != 0) return err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (rtc->aie_timer.enabled) { rtc_timer_remove(rtc, &rtc->aie_timer); } rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); rtc->aie_timer.period = ktime_set(0, 0); if (alarm->enabled) { err = rtc_timer_enqueue(rtc, &rtc->aie_timer); } mutex_unlock(&rtc->ops_lock); return err; } EXPORT_SYMBOL_GPL(rtc_set_alarm); /* Called once per device from rtc_device_register */ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; struct rtc_time now; err = rtc_valid_tm(&alarm->time); if (err != 0) return err; err = rtc_read_time(rtc, &now); if (err) return err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); rtc->aie_timer.period = ktime_set(0, 0); /* Alarm has to be enabled & in the futrure for us to enqueue it */ if (alarm->enabled && (rtc_tm_to_ktime(now).tv64 < rtc->aie_timer.node.expires.tv64)) { rtc->aie_timer.enabled = 1; timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node); } mutex_unlock(&rtc->ops_lock); return err; } EXPORT_SYMBOL_GPL(rtc_initialize_alarm); int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) { int err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (rtc->aie_timer.enabled != enabled) { if (enabled) err = rtc_timer_enqueue(rtc, &rtc->aie_timer); else rtc_timer_remove(rtc, &rtc->aie_timer); } if (err) /* nothing */; else if (!rtc->ops) err = -ENODEV; else if (!rtc->ops->alarm_irq_enable) err = -EINVAL; else err = rtc->ops->alarm_irq_enable(rtc->dev.parent, enabled); mutex_unlock(&rtc->ops_lock); return err; } EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable); int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) { int err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL if (enabled == 0 && rtc->uie_irq_active) { mutex_unlock(&rtc->ops_lock); return rtc_dev_update_irq_enable_emul(rtc, 0); } #endif /* make sure we're changing state */ if (rtc->uie_rtctimer.enabled == enabled) goto out; if (rtc->uie_unsupported) { err = -EINVAL; goto out; } if (enabled) { struct rtc_time tm; ktime_t now, onesec; __rtc_read_time(rtc, &tm); onesec = ktime_set(1, 0); now = rtc_tm_to_ktime(tm); rtc->uie_rtctimer.node.expires = ktime_add(now, onesec); rtc->uie_rtctimer.period = ktime_set(1, 0); err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer); } else rtc_timer_remove(rtc, &rtc->uie_rtctimer); out: mutex_unlock(&rtc->ops_lock); #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL /* * Enable emulation if the driver did not provide * the update_irq_enable function pointer or if returned * -EINVAL to signal that it has been configured without * interrupts or that are not available at the moment. */ if (err == -EINVAL) err = rtc_dev_update_irq_enable_emul(rtc, enabled); #endif return err; } EXPORT_SYMBOL_GPL(rtc_update_irq_enable); /** * rtc_handle_legacy_irq - AIE, UIE and PIE event hook * @rtc: pointer to the rtc device * * This function is called when an AIE, UIE or PIE mode interrupt * has occurred (or been emulated). * * Triggers the registered irq_task function callback. */ void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) { unsigned long flags; /* mark one irq of the appropriate mode */ spin_lock_irqsave(&rtc->irq_lock, flags); rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF|mode); spin_unlock_irqrestore(&rtc->irq_lock, flags); /* call the task func */ spin_lock_irqsave(&rtc->irq_task_lock, flags); if (rtc->irq_task) rtc->irq_task->func(rtc->irq_task->private_data); spin_unlock_irqrestore(&rtc->irq_task_lock, flags); wake_up_interruptible(&rtc->irq_queue); kill_fasync(&rtc->async_queue, SIGIO, POLL_IN); } /** * rtc_aie_update_irq - AIE mode rtctimer hook * @private: pointer to the rtc_device * * This functions is called when the aie_timer expires. */ void rtc_aie_update_irq(void *private) { struct rtc_device *rtc = (struct rtc_device *)private; rtc_handle_legacy_irq(rtc, 1, RTC_AF); } /** * rtc_uie_update_irq - UIE mode rtctimer hook * @private: pointer to the rtc_device * * This functions is called when the uie_timer expires. */ void rtc_uie_update_irq(void *private) { struct rtc_device *rtc = (struct rtc_device *)private; rtc_handle_legacy_irq(rtc, 1, RTC_UF); } /** * rtc_pie_update_irq - PIE mode hrtimer hook * @timer: pointer to the pie mode hrtimer * * This function is used to emulate PIE mode interrupts * using an hrtimer. This function is called when the periodic * hrtimer expires. */ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer) { struct rtc_device *rtc; ktime_t period; int count; rtc = container_of(timer, struct rtc_device, pie_timer); period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq); count = hrtimer_forward_now(timer, period); rtc_handle_legacy_irq(rtc, count, RTC_PF); return HRTIMER_RESTART; } /** * rtc_update_irq - Triggered when a RTC interrupt occurs. * @rtc: the rtc device * @num: how many irqs are being reported (usually one) * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF * Context: any */ void rtc_update_irq(struct rtc_device *rtc, unsigned long num, unsigned long events) { schedule_work(&rtc->irqwork); } EXPORT_SYMBOL_GPL(rtc_update_irq); static int __rtc_match(struct device *dev, void *data) { char *name = (char *)data; if (strcmp(dev_name(dev), name) == 0) return 1; return 0; } struct rtc_device *rtc_class_open(char *name) { struct device *dev; struct rtc_device *rtc = NULL; dev = class_find_device(rtc_class, NULL, name, __rtc_match); if (dev) rtc = to_rtc_device(dev); if (rtc) { if (!try_module_get(rtc->owner)) { put_device(dev); rtc = NULL; } } return rtc; } EXPORT_SYMBOL_GPL(rtc_class_open); void rtc_class_close(struct rtc_device *rtc) { module_put(rtc->owner); put_device(&rtc->dev); } EXPORT_SYMBOL_GPL(rtc_class_close); int rtc_irq_register(struct rtc_device *rtc, struct rtc_task *task) { int retval = -EBUSY; if (task == NULL || task->func == NULL) return -EINVAL; /* Cannot register while the char dev is in use */ if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags)) return -EBUSY; spin_lock_irq(&rtc->irq_task_lock); if (rtc->irq_task == NULL) { rtc->irq_task = task; retval = 0; } spin_unlock_irq(&rtc->irq_task_lock); clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); return retval; } EXPORT_SYMBOL_GPL(rtc_irq_register); void rtc_irq_unregister(struct rtc_device *rtc, struct rtc_task *task) { spin_lock_irq(&rtc->irq_task_lock); if (rtc->irq_task == task) rtc->irq_task = NULL; spin_unlock_irq(&rtc->irq_task_lock); } EXPORT_SYMBOL_GPL(rtc_irq_unregister); static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled) { /* * We always cancel the timer here first, because otherwise * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); * when we manage to start the timer before the callback * returns HRTIMER_RESTART. * * We cannot use hrtimer_cancel() here as a running callback * could be blocked on rtc->irq_task_lock and hrtimer_cancel() * would spin forever. */ if (hrtimer_try_to_cancel(&rtc->pie_timer) < 0) return -1; if (enabled) { ktime_t period = ktime_set(0, NSEC_PER_SEC / rtc->irq_freq); hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL); } return 0; } /** * rtc_irq_set_state - enable/disable 2^N Hz periodic IRQs * @rtc: the rtc device * @task: currently registered with rtc_irq_register() * @enabled: true to enable periodic IRQs * Context: any * * Note that rtc_irq_set_freq() should previously have been used to * specify the desired frequency of periodic IRQ task->func() callbacks. */ int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled) { int err = 0; unsigned long flags; retry: spin_lock_irqsave(&rtc->irq_task_lock, flags); if (rtc->irq_task != NULL && task == NULL) err = -EBUSY; if (rtc->irq_task != task) err = -EACCES; if (!err) { if (rtc_update_hrtimer(rtc, enabled) < 0) { spin_unlock_irqrestore(&rtc->irq_task_lock, flags); cpu_relax(); goto retry; } rtc->pie_enabled = enabled; } spin_unlock_irqrestore(&rtc->irq_task_lock, flags); return err; } EXPORT_SYMBOL_GPL(rtc_irq_set_state); /** * rtc_irq_set_freq - set 2^N Hz periodic IRQ frequency for IRQ * @rtc: the rtc device * @task: currently registered with rtc_irq_register() * @freq: positive frequency with which task->func() will be called * Context: any * * Note that rtc_irq_set_state() is used to enable or disable the * periodic IRQs. */ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq) { int err = 0; unsigned long flags; if (freq <= 0 || freq > RTC_MAX_FREQ) return -EINVAL; retry: spin_lock_irqsave(&rtc->irq_task_lock, flags); if (rtc->irq_task != NULL && task == NULL) err = -EBUSY; if (rtc->irq_task != task) err = -EACCES; if (!err) { rtc->irq_freq = freq; if (rtc->pie_enabled && rtc_update_hrtimer(rtc, 1) < 0) { spin_unlock_irqrestore(&rtc->irq_task_lock, flags); cpu_relax(); goto retry; } } spin_unlock_irqrestore(&rtc->irq_task_lock, flags); return err; } EXPORT_SYMBOL_GPL(rtc_irq_set_freq); /** * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue * @rtc rtc device * @timer timer being added. * * Enqueues a timer onto the rtc devices timerqueue and sets * the next alarm event appropriately. * * Sets the enabled bit on the added timer. * * Must hold ops_lock for proper serialization of timerqueue */ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) { timer->enabled = 1; timerqueue_add(&rtc->timerqueue, &timer->node); if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) { struct rtc_wkalrm alarm; int err; alarm.time = rtc_ktime_to_tm(timer->node.expires); alarm.enabled = 1; err = __rtc_set_alarm(rtc, &alarm); if (err == -ETIME) schedule_work(&rtc->irqwork); else if (err) { timerqueue_del(&rtc->timerqueue, &timer->node); timer->enabled = 0; return err; } } return 0; } static void rtc_alarm_disable(struct rtc_device *rtc) { if (!rtc->ops || !rtc->ops->alarm_irq_enable) return; rtc->ops->alarm_irq_enable(rtc->dev.parent, false); } /** * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue * @rtc rtc device * @timer timer being removed. * * Removes a timer onto the rtc devices timerqueue and sets * the next alarm event appropriately. * * Clears the enabled bit on the removed timer. * * Must hold ops_lock for proper serialization of timerqueue */ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) { struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); timerqueue_del(&rtc->timerqueue, &timer->node); timer->enabled = 0; if (next == &timer->node) { struct rtc_wkalrm alarm; int err; next = timerqueue_getnext(&rtc->timerqueue); if (!next) { rtc_alarm_disable(rtc); return; } alarm.time = rtc_ktime_to_tm(next->expires); alarm.enabled = 1; err = __rtc_set_alarm(rtc, &alarm); if (err == -ETIME) schedule_work(&rtc->irqwork); } } /** * rtc_timer_do_work - Expires rtc timers * @rtc rtc device * @timer timer being removed. * * Expires rtc timers. Reprograms next alarm event if needed. * Called via worktask. * * Serializes access to timerqueue via ops_lock mutex */ void rtc_timer_do_work(struct work_struct *work) { struct rtc_timer *timer; struct timerqueue_node *next; ktime_t now; struct rtc_time tm; struct rtc_device *rtc = container_of(work, struct rtc_device, irqwork); mutex_lock(&rtc->ops_lock); again: __rtc_read_time(rtc, &tm); now = rtc_tm_to_ktime(tm); while ((next = timerqueue_getnext(&rtc->timerqueue))) { if (next->expires.tv64 > now.tv64) break; /* expire timer */ timer = container_of(next, struct rtc_timer, node); timerqueue_del(&rtc->timerqueue, &timer->node); timer->enabled = 0; if (timer->task.func) timer->task.func(timer->task.private_data); /* Re-add/fwd periodic timers */ if (ktime_to_ns(timer->period)) { timer->node.expires = ktime_add(timer->node.expires, timer->period); timer->enabled = 1; timerqueue_add(&rtc->timerqueue, &timer->node); } } /* Set next alarm */ if (next) { struct rtc_wkalrm alarm; int err; alarm.time = rtc_ktime_to_tm(next->expires); alarm.enabled = 1; err = __rtc_set_alarm(rtc, &alarm); if (err == -ETIME) goto again; } else rtc_alarm_disable(rtc); mutex_unlock(&rtc->ops_lock); } /* rtc_timer_init - Initializes an rtc_timer * @timer: timer to be intiialized * @f: function pointer to be called when timer fires * @data: private data passed to function pointer * * Kernel interface to initializing an rtc_timer. */ void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data) { timerqueue_init(&timer->node); timer->enabled = 0; timer->task.func = f; timer->task.private_data = data; } /* rtc_timer_start - Sets an rtc_timer to fire in the future * @ rtc: rtc device to be used * @ timer: timer being set * @ expires: time at which to expire the timer * @ period: period that the timer will recur * * Kernel interface to set an rtc_timer */ int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer, ktime_t expires, ktime_t period) { int ret = 0; mutex_lock(&rtc->ops_lock); if (timer->enabled) rtc_timer_remove(rtc, timer); timer->node.expires = expires; timer->period = period; ret = rtc_timer_enqueue(rtc, timer); mutex_unlock(&rtc->ops_lock); return ret; } /* rtc_timer_cancel - Stops an rtc_timer * @ rtc: rtc device to be used * @ timer: timer being set * * Kernel interface to cancel an rtc_timer */ int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer) { int ret = 0; mutex_lock(&rtc->ops_lock); if (timer->enabled) rtc_timer_remove(rtc, timer); mutex_unlock(&rtc->ops_lock); return ret; }
gpl-2.0
wangxingchao/s3c6410
sound/soc/nuc900/nuc900-audio.c
3203
1699
/* * Copyright (c) 2010 Nuvoton technology corporation. * * Wan ZongShun <mcuos.com@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation;version 2 of the License. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include "nuc900-audio.h" static struct snd_soc_dai_link nuc900evb_ac97_dai = { .name = "AC97", .stream_name = "AC97 HiFi", .cpu_dai_name = "nuc900-ac97", .codec_dai_name = "ac97-hifi", .codec_name = "ac97-codec", .platform_name = "nuc900-pcm-audio", }; static struct snd_soc_card nuc900evb_audio_machine = { .name = "NUC900EVB_AC97", .dai_link = &nuc900evb_ac97_dai, .num_links = 1, }; static struct platform_device *nuc900evb_asoc_dev; static int __init nuc900evb_audio_init(void) { int ret; ret = -ENOMEM; nuc900evb_asoc_dev = platform_device_alloc("soc-audio", -1); if (!nuc900evb_asoc_dev) goto out; /* nuc900 board audio device */ platform_set_drvdata(nuc900evb_asoc_dev, &nuc900evb_audio_machine); ret = platform_device_add(nuc900evb_asoc_dev); if (ret) { platform_device_put(nuc900evb_asoc_dev); nuc900evb_asoc_dev = NULL; } out: return ret; } static void __exit nuc900evb_audio_exit(void) { platform_device_unregister(nuc900evb_asoc_dev); } module_init(nuc900evb_audio_init); module_exit(nuc900evb_audio_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("NUC900 Series ASoC audio support"); MODULE_AUTHOR("Wan ZongShun");
gpl-2.0
stargo/android_kernel_amazon_ford
drivers/media/dvb-frontends/dvb-pll.c
3971
21209
/* * descriptions + helper functions for simple dvb plls. * * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs] * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/module.h> #include <linux/dvb/frontend.h> #include <asm/types.h> #include "dvb-pll.h" struct dvb_pll_priv { /* pll number */ int nr; /* i2c details */ int pll_i2c_address; struct i2c_adapter *i2c; /* the PLL descriptor */ struct dvb_pll_desc *pll_desc; /* cached frequency/bandwidth */ u32 frequency; u32 bandwidth; }; #define DVB_PLL_MAX 64 static unsigned int dvb_pll_devcount; static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable verbose debug messages"); static unsigned int id[DVB_PLL_MAX] = { [ 0 ... (DVB_PLL_MAX-1) ] = DVB_PLL_UNDEFINED }; module_param_array(id, int, NULL, 0644); MODULE_PARM_DESC(id, "force pll id to use (DEBUG ONLY)"); /* ----------------------------------------------------------- */ struct dvb_pll_desc { char *name; u32 min; u32 max; u32 iffreq; void (*set)(struct dvb_frontend *fe, u8 *buf); u8 *initdata; u8 *initdata2; u8 *sleepdata; int count; struct { u32 limit; u32 stepsize; u8 config; u8 cb; } entries[12]; }; /* ----------------------------------------------------------- */ /* descriptions */ static struct dvb_pll_desc dvb_pll_thomson_dtt7579 = { .name = "Thomson dtt7579", .min = 177000000, .max = 858000000, .iffreq= 36166667, .sleepdata = (u8[]){ 2, 0xb4, 0x03 }, .count = 4, .entries = { { 443250000, 166667, 0xb4, 0x02 }, { 542000000, 166667, 0xb4, 0x08 }, { 771000000, 166667, 0xbc, 0x08 }, { 999999999, 166667, 0xf4, 0x08 }, }, }; static void thomson_dtt759x_bw(struct dvb_frontend *fe, u8 *buf) { u32 bw = fe->dtv_property_cache.bandwidth_hz; if (bw == 7000000) buf[3] |= 0x10; } static struct dvb_pll_desc dvb_pll_thomson_dtt759x = { .name = "Thomson dtt759x", .min = 177000000, .max = 896000000, .set = thomson_dtt759x_bw, .iffreq= 36166667, .sleepdata = (u8[]){ 2, 0x84, 0x03 }, .count = 5, .entries = { { 264000000, 166667, 0xb4, 0x02 }, { 470000000, 166667, 0xbc, 0x02 }, { 735000000, 166667, 0xbc, 0x08 }, { 835000000, 166667, 0xf4, 0x08 }, { 999999999, 166667, 0xfc, 0x08 }, }, }; static void thomson_dtt7520x_bw(struct dvb_frontend *fe, u8 *buf) { u32 bw = fe->dtv_property_cache.bandwidth_hz; if (bw == 8000000) buf[3] ^= 0x10; } static struct dvb_pll_desc dvb_pll_thomson_dtt7520x = { .name = "Thomson dtt7520x", .min = 185000000, .max = 900000000, .set = thomson_dtt7520x_bw, .iffreq = 36166667, .count = 7, .entries = { { 305000000, 166667, 0xb4, 0x12 }, { 405000000, 166667, 0xbc, 0x12 }, { 445000000, 166667, 0xbc, 0x12 }, { 465000000, 166667, 0xf4, 0x18 }, { 735000000, 166667, 0xfc, 0x18 }, { 835000000, 166667, 0xbc, 0x18 }, { 999999999, 166667, 0xfc, 0x18 }, }, }; static struct dvb_pll_desc dvb_pll_lg_z201 = { .name = "LG z201", .min = 174000000, .max = 862000000, .iffreq= 36166667, .sleepdata = (u8[]){ 2, 0xbc, 0x03 }, .count = 5, .entries = { { 157500000, 166667, 0xbc, 0x01 }, { 443250000, 166667, 0xbc, 0x02 }, { 542000000, 166667, 0xbc, 0x04 }, { 830000000, 166667, 0xf4, 0x04 }, { 999999999, 166667, 0xfc, 0x04 }, }, }; static struct dvb_pll_desc dvb_pll_unknown_1 = { .name = "unknown 1", /* used by dntv live dvb-t */ .min = 174000000, .max = 862000000, .iffreq= 36166667, .count = 9, .entries = { { 150000000, 166667, 0xb4, 0x01 }, { 173000000, 166667, 0xbc, 0x01 }, { 250000000, 166667, 0xb4, 0x02 }, { 400000000, 166667, 0xbc, 0x02 }, { 420000000, 166667, 0xf4, 0x02 }, { 470000000, 166667, 0xfc, 0x02 }, { 600000000, 166667, 0xbc, 0x08 }, { 730000000, 166667, 0xf4, 0x08 }, { 999999999, 166667, 0xfc, 0x08 }, }, }; /* Infineon TUA6010XS * used in Thomson Cable Tuner */ static struct dvb_pll_desc dvb_pll_tua6010xs = { .name = "Infineon TUA6010XS", .min = 44250000, .max = 858000000, .iffreq= 36125000, .count = 3, .entries = { { 115750000, 62500, 0x8e, 0x03 }, { 403250000, 62500, 0x8e, 0x06 }, { 999999999, 62500, 0x8e, 0x85 }, }, }; /* Panasonic env57h1xd5 (some Philips PLL ?) */ static struct dvb_pll_desc dvb_pll_env57h1xd5 = { .name = "Panasonic ENV57H1XD5", .min = 44250000, .max = 858000000, .iffreq= 36125000, .count = 4, .entries = { { 153000000, 166667, 0xc2, 0x41 }, { 470000000, 166667, 0xc2, 0x42 }, { 526000000, 166667, 0xc2, 0x84 }, { 999999999, 166667, 0xc2, 0xa4 }, }, }; /* Philips TDA6650/TDA6651 * used in Panasonic ENV77H11D5 */ static void tda665x_bw(struct dvb_frontend *fe, u8 *buf) { u32 bw = fe->dtv_property_cache.bandwidth_hz; if (bw == 8000000) buf[3] |= 0x08; } static struct dvb_pll_desc dvb_pll_tda665x = { .name = "Philips TDA6650/TDA6651", .min = 44250000, .max = 858000000, .set = tda665x_bw, .iffreq= 36166667, .initdata = (u8[]){ 4, 0x0b, 0xf5, 0x85, 0xab }, .count = 12, .entries = { { 93834000, 166667, 0xca, 0x61 /* 011 0 0 0 01 */ }, { 123834000, 166667, 0xca, 0xa1 /* 101 0 0 0 01 */ }, { 161000000, 166667, 0xca, 0xa1 /* 101 0 0 0 01 */ }, { 163834000, 166667, 0xca, 0xc2 /* 110 0 0 0 10 */ }, { 253834000, 166667, 0xca, 0x62 /* 011 0 0 0 10 */ }, { 383834000, 166667, 0xca, 0xa2 /* 101 0 0 0 10 */ }, { 443834000, 166667, 0xca, 0xc2 /* 110 0 0 0 10 */ }, { 444000000, 166667, 0xca, 0xc4 /* 110 0 0 1 00 */ }, { 583834000, 166667, 0xca, 0x64 /* 011 0 0 1 00 */ }, { 793834000, 166667, 0xca, 0xa4 /* 101 0 0 1 00 */ }, { 444834000, 166667, 0xca, 0xc4 /* 110 0 0 1 00 */ }, { 861000000, 166667, 0xca, 0xe4 /* 111 0 0 1 00 */ }, } }; /* Infineon TUA6034 * used in LG TDTP E102P */ static void tua6034_bw(struct dvb_frontend *fe, u8 *buf) { u32 bw = fe->dtv_property_cache.bandwidth_hz; if (bw == 7000000) buf[3] |= 0x08; } static struct dvb_pll_desc dvb_pll_tua6034 = { .name = "Infineon TUA6034", .min = 44250000, .max = 858000000, .iffreq= 36166667, .count = 3, .set = tua6034_bw, .entries = { { 174500000, 62500, 0xce, 0x01 }, { 230000000, 62500, 0xce, 0x02 }, { 999999999, 62500, 0xce, 0x04 }, }, }; /* ALPS TDED4 * used in Nebula-Cards and USB boxes */ static void tded4_bw(struct dvb_frontend *fe, u8 *buf) { u32 bw = fe->dtv_property_cache.bandwidth_hz; if (bw == 8000000) buf[3] |= 0x04; } static struct dvb_pll_desc dvb_pll_tded4 = { .name = "ALPS TDED4", .min = 47000000, .max = 863000000, .iffreq= 36166667, .set = tded4_bw, .count = 4, .entries = { { 153000000, 166667, 0x85, 0x01 }, { 470000000, 166667, 0x85, 0x02 }, { 823000000, 166667, 0x85, 0x08 }, { 999999999, 166667, 0x85, 0x88 }, } }; /* ALPS TDHU2 * used in AverTVHD MCE A180 */ static struct dvb_pll_desc dvb_pll_tdhu2 = { .name = "ALPS TDHU2", .min = 54000000, .max = 864000000, .iffreq= 44000000, .count = 4, .entries = { { 162000000, 62500, 0x85, 0x01 }, { 426000000, 62500, 0x85, 0x02 }, { 782000000, 62500, 0x85, 0x08 }, { 999999999, 62500, 0x85, 0x88 }, } }; /* Samsung TBMV30111IN / TBMV30712IN1 * used in Air2PC ATSC - 2nd generation (nxt2002) */ static struct dvb_pll_desc dvb_pll_samsung_tbmv = { .name = "Samsung TBMV30111IN / TBMV30712IN1", .min = 54000000, .max = 860000000, .iffreq= 44000000, .count = 6, .entries = { { 172000000, 166667, 0xb4, 0x01 }, { 214000000, 166667, 0xb4, 0x02 }, { 467000000, 166667, 0xbc, 0x02 }, { 721000000, 166667, 0xbc, 0x08 }, { 841000000, 166667, 0xf4, 0x08 }, { 999999999, 166667, 0xfc, 0x02 }, } }; /* * Philips SD1878 Tuner. */ static struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = { .name = "Philips SD1878", .min = 950000, .max = 2150000, .iffreq= 249, /* zero-IF, offset 249 is to round up */ .count = 4, .entries = { { 1250000, 500, 0xc4, 0x00}, { 1450000, 500, 0xc4, 0x40}, { 2050000, 500, 0xc4, 0x80}, { 2150000, 500, 0xc4, 0xc0}, }, }; static void opera1_bw(struct dvb_frontend *fe, u8 *buf) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_pll_priv *priv = fe->tuner_priv; u32 b_w = (c->symbol_rate * 27) / 32000; struct i2c_msg msg = { .addr = priv->pll_i2c_address, .flags = 0, .buf = buf, .len = 4 }; int result; u8 lpf; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); result = i2c_transfer(priv->i2c, &msg, 1); if (result != 1) printk(KERN_ERR "%s: i2c_transfer failed:%d", __func__, result); if (b_w <= 10000) lpf = 0xc; else if (b_w <= 12000) lpf = 0x2; else if (b_w <= 14000) lpf = 0xa; else if (b_w <= 16000) lpf = 0x6; else if (b_w <= 18000) lpf = 0xe; else if (b_w <= 20000) lpf = 0x1; else if (b_w <= 22000) lpf = 0x9; else if (b_w <= 24000) lpf = 0x5; else if (b_w <= 26000) lpf = 0xd; else if (b_w <= 28000) lpf = 0x3; else lpf = 0xb; buf[2] ^= 0x1c; /* Flip bits 3-5 */ /* Set lpf */ buf[2] |= ((lpf >> 2) & 0x3) << 3; buf[3] |= (lpf & 0x3) << 2; return; } static struct dvb_pll_desc dvb_pll_opera1 = { .name = "Opera Tuner", .min = 900000, .max = 2250000, .initdata = (u8[]){ 4, 0x08, 0xe5, 0xe1, 0x00 }, .initdata2 = (u8[]){ 4, 0x08, 0xe5, 0xe5, 0x00 }, .iffreq= 0, .set = opera1_bw, .count = 8, .entries = { { 1064000, 500, 0xf9, 0xc2 }, { 1169000, 500, 0xf9, 0xe2 }, { 1299000, 500, 0xf9, 0x20 }, { 1444000, 500, 0xf9, 0x40 }, { 1606000, 500, 0xf9, 0x60 }, { 1777000, 500, 0xf9, 0x80 }, { 1941000, 500, 0xf9, 0xa0 }, { 2250000, 500, 0xf9, 0xc0 }, } }; static void samsung_dtos403ih102a_set(struct dvb_frontend *fe, u8 *buf) { struct dvb_pll_priv *priv = fe->tuner_priv; struct i2c_msg msg = { .addr = priv->pll_i2c_address, .flags = 0, .buf = buf, .len = 4 }; int result; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); result = i2c_transfer(priv->i2c, &msg, 1); if (result != 1) printk(KERN_ERR "%s: i2c_transfer failed:%d", __func__, result); buf[2] = 0x9e; buf[3] = 0x90; return; } /* unknown pll used in Samsung DTOS403IH102A DVB-C tuner */ static struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = { .name = "Samsung DTOS403IH102A", .min = 44250000, .max = 858000000, .iffreq = 36125000, .count = 8, .set = samsung_dtos403ih102a_set, .entries = { { 135000000, 62500, 0xbe, 0x01 }, { 177000000, 62500, 0xf6, 0x01 }, { 370000000, 62500, 0xbe, 0x02 }, { 450000000, 62500, 0xf6, 0x02 }, { 466000000, 62500, 0xfe, 0x02 }, { 538000000, 62500, 0xbe, 0x08 }, { 826000000, 62500, 0xf6, 0x08 }, { 999999999, 62500, 0xfe, 0x08 }, } }; /* Samsung TDTC9251DH0 DVB-T NIM, as used on AirStar 2 */ static struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = { .name = "Samsung TDTC9251DH0", .min = 48000000, .max = 863000000, .iffreq = 36166667, .count = 3, .entries = { { 157500000, 166667, 0xcc, 0x09 }, { 443000000, 166667, 0xcc, 0x0a }, { 863000000, 166667, 0xcc, 0x08 }, } }; /* Samsung TBDU18132 DVB-S NIM with TSA5059 PLL, used in SkyStar2 DVB-S 2.3 */ static struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = { .name = "Samsung TBDU18132", .min = 950000, .max = 2150000, /* guesses */ .iffreq = 0, .count = 2, .entries = { { 1550000, 125, 0x84, 0x82 }, { 4095937, 125, 0x84, 0x80 }, } /* TSA5059 PLL has a 17 bit divisor rather than the 15 bits supported * by this driver. The two extra bits are 0x60 in the third byte. 15 * bits is enough for over 4 GHz, which is enough to cover the range * of this tuner. We could use the additional divisor bits by adding * more entries, e.g. { 0x0ffff * 125 + 125/2, 125, 0x84 | 0x20, }, { 0x17fff * 125 + 125/2, 125, 0x84 | 0x40, }, { 0x1ffff * 125 + 125/2, 125, 0x84 | 0x60, }, */ }; /* Samsung TBMU24112 DVB-S NIM with SL1935 zero-IF tuner */ static struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = { .name = "Samsung TBMU24112", .min = 950000, .max = 2150000, /* guesses */ .iffreq = 0, .count = 2, .entries = { { 1500000, 125, 0x84, 0x18 }, { 9999999, 125, 0x84, 0x08 }, } }; /* Alps TDEE4 DVB-C NIM, used on Cablestar 2 */ /* byte 4 : 1 * * AGD R3 R2 R1 R0 * byte 5 : C1 * RE RTS BS4 BS3 BS2 BS1 * AGD = 1, R3 R2 R1 R0 = 0 1 0 1 => byte 4 = 1**10101 = 0x95 * Range(MHz) C1 * RE RTS BS4 BS3 BS2 BS1 Byte 5 * 47 - 153 0 * 0 0 0 0 0 1 0x01 * 153 - 430 0 * 0 0 0 0 1 0 0x02 * 430 - 822 0 * 0 0 1 0 0 0 0x08 * 822 - 862 1 * 0 0 1 0 0 0 0x88 */ static struct dvb_pll_desc dvb_pll_alps_tdee4 = { .name = "ALPS TDEE4", .min = 47000000, .max = 862000000, .iffreq = 36125000, .count = 4, .entries = { { 153000000, 62500, 0x95, 0x01 }, { 430000000, 62500, 0x95, 0x02 }, { 822000000, 62500, 0x95, 0x08 }, { 999999999, 62500, 0x95, 0x88 }, } }; /* ----------------------------------------------------------- */ static struct dvb_pll_desc *pll_list[] = { [DVB_PLL_UNDEFINED] = NULL, [DVB_PLL_THOMSON_DTT7579] = &dvb_pll_thomson_dtt7579, [DVB_PLL_THOMSON_DTT759X] = &dvb_pll_thomson_dtt759x, [DVB_PLL_THOMSON_DTT7520X] = &dvb_pll_thomson_dtt7520x, [DVB_PLL_LG_Z201] = &dvb_pll_lg_z201, [DVB_PLL_UNKNOWN_1] = &dvb_pll_unknown_1, [DVB_PLL_TUA6010XS] = &dvb_pll_tua6010xs, [DVB_PLL_ENV57H1XD5] = &dvb_pll_env57h1xd5, [DVB_PLL_TUA6034] = &dvb_pll_tua6034, [DVB_PLL_TDA665X] = &dvb_pll_tda665x, [DVB_PLL_TDED4] = &dvb_pll_tded4, [DVB_PLL_TDEE4] = &dvb_pll_alps_tdee4, [DVB_PLL_TDHU2] = &dvb_pll_tdhu2, [DVB_PLL_SAMSUNG_TBMV] = &dvb_pll_samsung_tbmv, [DVB_PLL_PHILIPS_SD1878_TDA8261] = &dvb_pll_philips_sd1878_tda8261, [DVB_PLL_OPERA1] = &dvb_pll_opera1, [DVB_PLL_SAMSUNG_DTOS403IH102A] = &dvb_pll_samsung_dtos403ih102a, [DVB_PLL_SAMSUNG_TDTC9251DH0] = &dvb_pll_samsung_tdtc9251dh0, [DVB_PLL_SAMSUNG_TBDU18132] = &dvb_pll_samsung_tbdu18132, [DVB_PLL_SAMSUNG_TBMU24112] = &dvb_pll_samsung_tbmu24112, }; /* ----------------------------------------------------------- */ /* code */ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf, const u32 frequency) { struct dvb_pll_priv *priv = fe->tuner_priv; struct dvb_pll_desc *desc = priv->pll_desc; u32 div; int i; if (frequency && (frequency < desc->min || frequency > desc->max)) return -EINVAL; for (i = 0; i < desc->count; i++) { if (frequency > desc->entries[i].limit) continue; break; } if (debug) printk("pll: %s: freq=%d | i=%d/%d\n", desc->name, frequency, i, desc->count); if (i == desc->count) return -EINVAL; div = (frequency + desc->iffreq + desc->entries[i].stepsize/2) / desc->entries[i].stepsize; buf[0] = div >> 8; buf[1] = div & 0xff; buf[2] = desc->entries[i].config; buf[3] = desc->entries[i].cb; if (desc->set) desc->set(fe, buf); if (debug) printk("pll: %s: div=%d | buf=0x%02x,0x%02x,0x%02x,0x%02x\n", desc->name, div, buf[0], buf[1], buf[2], buf[3]); // calculate the frequency we set it to return (div * desc->entries[i].stepsize) - desc->iffreq; } static int dvb_pll_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } static int dvb_pll_sleep(struct dvb_frontend *fe) { struct dvb_pll_priv *priv = fe->tuner_priv; if (priv->i2c == NULL) return -EINVAL; if (priv->pll_desc->sleepdata) { struct i2c_msg msg = { .flags = 0, .addr = priv->pll_i2c_address, .buf = priv->pll_desc->sleepdata + 1, .len = priv->pll_desc->sleepdata[0] }; int result; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if ((result = i2c_transfer(priv->i2c, &msg, 1)) != 1) { return result; } return 0; } /* Shouldn't be called when initdata is NULL, maybe BUG()? */ return -EINVAL; } static int dvb_pll_set_params(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_pll_priv *priv = fe->tuner_priv; u8 buf[4]; struct i2c_msg msg = { .addr = priv->pll_i2c_address, .flags = 0, .buf = buf, .len = sizeof(buf) }; int result; u32 frequency = 0; if (priv->i2c == NULL) return -EINVAL; result = dvb_pll_configure(fe, buf, c->frequency); if (result < 0) return result; else frequency = result; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if ((result = i2c_transfer(priv->i2c, &msg, 1)) != 1) { return result; } priv->frequency = frequency; priv->bandwidth = c->bandwidth_hz; return 0; } static int dvb_pll_calc_regs(struct dvb_frontend *fe, u8 *buf, int buf_len) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_pll_priv *priv = fe->tuner_priv; int result; u32 frequency = 0; if (buf_len < 5) return -EINVAL; result = dvb_pll_configure(fe, buf + 1, c->frequency); if (result < 0) return result; else frequency = result; buf[0] = priv->pll_i2c_address; priv->frequency = frequency; priv->bandwidth = c->bandwidth_hz; return 5; } static int dvb_pll_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct dvb_pll_priv *priv = fe->tuner_priv; *frequency = priv->frequency; return 0; } static int dvb_pll_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth) { struct dvb_pll_priv *priv = fe->tuner_priv; *bandwidth = priv->bandwidth; return 0; } static int dvb_pll_init(struct dvb_frontend *fe) { struct dvb_pll_priv *priv = fe->tuner_priv; if (priv->i2c == NULL) return -EINVAL; if (priv->pll_desc->initdata) { struct i2c_msg msg = { .flags = 0, .addr = priv->pll_i2c_address, .buf = priv->pll_desc->initdata + 1, .len = priv->pll_desc->initdata[0] }; int result; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); result = i2c_transfer(priv->i2c, &msg, 1); if (result != 1) return result; if (priv->pll_desc->initdata2) { msg.buf = priv->pll_desc->initdata2 + 1; msg.len = priv->pll_desc->initdata2[0]; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); result = i2c_transfer(priv->i2c, &msg, 1); if (result != 1) return result; } return 0; } /* Shouldn't be called when initdata is NULL, maybe BUG()? */ return -EINVAL; } static struct dvb_tuner_ops dvb_pll_tuner_ops = { .release = dvb_pll_release, .sleep = dvb_pll_sleep, .init = dvb_pll_init, .set_params = dvb_pll_set_params, .calc_regs = dvb_pll_calc_regs, .get_frequency = dvb_pll_get_frequency, .get_bandwidth = dvb_pll_get_bandwidth, }; struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, struct i2c_adapter *i2c, unsigned int pll_desc_id) { u8 b1 [] = { 0 }; struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, .buf = b1, .len = 1 }; struct dvb_pll_priv *priv = NULL; int ret; struct dvb_pll_desc *desc; if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) && (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list))) pll_desc_id = id[dvb_pll_devcount]; BUG_ON(pll_desc_id < 1 || pll_desc_id >= ARRAY_SIZE(pll_list)); desc = pll_list[pll_desc_id]; if (i2c != NULL) { if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); ret = i2c_transfer (i2c, &msg, 1); if (ret != 1) return NULL; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL); if (priv == NULL) return NULL; priv->pll_i2c_address = pll_addr; priv->i2c = i2c; priv->pll_desc = desc; priv->nr = dvb_pll_devcount++; memcpy(&fe->ops.tuner_ops, &dvb_pll_tuner_ops, sizeof(struct dvb_tuner_ops)); strncpy(fe->ops.tuner_ops.info.name, desc->name, sizeof(fe->ops.tuner_ops.info.name)); fe->ops.tuner_ops.info.frequency_min = desc->min; fe->ops.tuner_ops.info.frequency_max = desc->max; if (!desc->initdata) fe->ops.tuner_ops.init = NULL; if (!desc->sleepdata) fe->ops.tuner_ops.sleep = NULL; fe->tuner_priv = priv; if ((debug) || (id[priv->nr] == pll_desc_id)) { printk("dvb-pll[%d]", priv->nr); if (i2c != NULL) printk(" %d-%04x", i2c_adapter_id(i2c), pll_addr); printk(": id# %d (%s) attached, %s\n", pll_desc_id, desc->name, id[priv->nr] == pll_desc_id ? "insmod option" : "autodetected"); } return fe; } EXPORT_SYMBOL(dvb_pll_attach); MODULE_DESCRIPTION("dvb pll library"); MODULE_AUTHOR("Gerd Knorr"); MODULE_LICENSE("GPL");
gpl-2.0
rezvorck/android_kernel_s450m_4g_mm
arch/arm/mach-davinci/common.c
4227
2972
/* * Code commons to all DaVinci SoCs. * * Author: Mark A. Greer <mgreer@mvista.com> * * 2009 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/module.h> #include <linux/io.h> #include <linux/etherdevice.h> #include <linux/davinci_emac.h> #include <linux/dma-mapping.h> #include <asm/tlb.h> #include <asm/mach/map.h> #include <mach/common.h> #include <mach/cputype.h> #include "clock.h" struct davinci_soc_info davinci_soc_info; EXPORT_SYMBOL(davinci_soc_info); void __iomem *davinci_intc_base; int davinci_intc_type; void davinci_get_mac_addr(struct memory_accessor *mem_acc, void *context) { char *mac_addr = davinci_soc_info.emac_pdata->mac_addr; off_t offset = (off_t)context; /* Read MAC addr from EEPROM */ if (mem_acc->read(mem_acc, mac_addr, offset, ETH_ALEN) == ETH_ALEN) pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr); } static int __init davinci_init_id(struct davinci_soc_info *soc_info) { int i; struct davinci_id *dip; u8 variant; u16 part_no; void __iomem *base; base = ioremap(soc_info->jtag_id_reg, SZ_4K); if (!base) { pr_err("Unable to map JTAG ID register\n"); return -ENOMEM; } soc_info->jtag_id = __raw_readl(base); iounmap(base); variant = (soc_info->jtag_id & 0xf0000000) >> 28; part_no = (soc_info->jtag_id & 0x0ffff000) >> 12; for (i = 0, dip = soc_info->ids; i < soc_info->ids_num; i++, dip++) /* Don't care about the manufacturer right now */ if ((dip->part_no == part_no) && (dip->variant == variant)) { soc_info->cpu_id = dip->cpu_id; pr_info("DaVinci %s variant 0x%x\n", dip->name, dip->variant); return 0; } pr_err("Unknown DaVinci JTAG ID 0x%x\n", soc_info->jtag_id); return -EINVAL; } void __init davinci_common_init(struct davinci_soc_info *soc_info) { int ret; if (!soc_info) { ret = -EINVAL; goto err; } memcpy(&davinci_soc_info, soc_info, sizeof(struct davinci_soc_info)); if (davinci_soc_info.io_desc && (davinci_soc_info.io_desc_num > 0)) iotable_init(davinci_soc_info.io_desc, davinci_soc_info.io_desc_num); /* * Normally devicemaps_init() would flush caches and tlb after * mdesc->map_io(), but we must also do it here because of the CPU * revision check below. */ local_flush_tlb_all(); flush_cache_all(); /* * We want to check CPU revision early for cpu_is_xxxx() macros. * IO space mapping must be initialized before we can do that. */ ret = davinci_init_id(&davinci_soc_info); if (ret < 0) goto err; if (davinci_soc_info.cpu_clks) { ret = davinci_clk_init(davinci_soc_info.cpu_clks); if (ret != 0) goto err; } return; err: panic("davinci_common_init: SoC Initialization failed\n"); } void __init davinci_init_late(void) { davinci_cpufreq_init(); davinci_pm_init(); davinci_clk_disable_unused(); }
gpl-2.0
Callie-Cacophony/CAF-test-kernel
fs/ramfs/inode.c
4483
6887
/* * Resizable simple ram filesystem for Linux. * * Copyright (C) 2000 Linus Torvalds. * 2000 Transmeta Corp. * * Usage limits added by David Gibson, Linuxcare Australia. * This file is released under the GPL. */ /* * NOTE! This filesystem is probably most useful * not as a real filesystem, but as an example of * how virtual filesystems can be written. * * It doesn't get much simpler than this. Consider * that this file implements the full semantics of * a POSIX-compliant read-write filesystem. * * Note in particular how the filesystem does not * need to implement any data structures of its own * to keep track of the virtual data: using the VFS * caches is sufficient. */ #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/time.h> #include <linux/init.h> #include <linux/string.h> #include <linux/backing-dev.h> #include <linux/ramfs.h> #include <linux/sched.h> #include <linux/parser.h> #include <linux/magic.h> #include <linux/slab.h> #include <asm/uaccess.h> #include "internal.h" #define RAMFS_DEFAULT_MODE 0755 static const struct super_operations ramfs_ops; static const struct inode_operations ramfs_dir_inode_operations; static struct backing_dev_info ramfs_backing_dev_info = { .name = "ramfs", .ra_pages = 0, /* No readahead */ .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_DIRECT | BDI_CAP_MAP_COPY | BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP, }; struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir, umode_t mode, dev_t dev) { struct inode * inode = new_inode(sb); if (inode) { inode->i_ino = get_next_ino(); inode_init_owner(inode, dir, mode); inode->i_mapping->a_ops = &ramfs_aops; inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info; mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); mapping_set_unevictable(inode->i_mapping); inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; switch (mode & S_IFMT) { default: init_special_inode(inode, mode, dev); break; case S_IFREG: inode->i_op = &ramfs_file_inode_operations; inode->i_fop = &ramfs_file_operations; break; case S_IFDIR: inode->i_op = &ramfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); break; case S_IFLNK: inode->i_op = &page_symlink_inode_operations; break; } } return inode; } /* * File creation. Allocate an inode, and we're done.. */ /* SMP-safe */ static int ramfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { struct inode * inode = ramfs_get_inode(dir->i_sb, dir, mode, dev); int error = -ENOSPC; if (inode) { d_instantiate(dentry, inode); dget(dentry); /* Extra count - pin the dentry in core */ error = 0; dir->i_mtime = dir->i_ctime = CURRENT_TIME; } return error; } static int ramfs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) { int retval = ramfs_mknod(dir, dentry, mode | S_IFDIR, 0); if (!retval) inc_nlink(dir); return retval; } static int ramfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { return ramfs_mknod(dir, dentry, mode | S_IFREG, 0); } static int ramfs_symlink(struct inode * dir, struct dentry *dentry, const char * symname) { struct inode *inode; int error = -ENOSPC; inode = ramfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); if (inode) { int l = strlen(symname)+1; error = page_symlink(inode, symname, l); if (!error) { d_instantiate(dentry, inode); dget(dentry); dir->i_mtime = dir->i_ctime = CURRENT_TIME; } else iput(inode); } return error; } static const struct inode_operations ramfs_dir_inode_operations = { .create = ramfs_create, .lookup = simple_lookup, .link = simple_link, .unlink = simple_unlink, .symlink = ramfs_symlink, .mkdir = ramfs_mkdir, .rmdir = simple_rmdir, .mknod = ramfs_mknod, .rename = simple_rename, }; static const struct super_operations ramfs_ops = { .statfs = simple_statfs, .drop_inode = generic_delete_inode, .show_options = generic_show_options, }; struct ramfs_mount_opts { umode_t mode; }; enum { Opt_mode, Opt_err }; static const match_table_t tokens = { {Opt_mode, "mode=%o"}, {Opt_err, NULL} }; struct ramfs_fs_info { struct ramfs_mount_opts mount_opts; }; static int ramfs_parse_options(char *data, struct ramfs_mount_opts *opts) { substring_t args[MAX_OPT_ARGS]; int option; int token; char *p; opts->mode = RAMFS_DEFAULT_MODE; while ((p = strsep(&data, ",")) != NULL) { if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_mode: if (match_octal(&args[0], &option)) return -EINVAL; opts->mode = option & S_IALLUGO; break; /* * We might like to report bad mount options here; * but traditionally ramfs has ignored all mount options, * and as it is used as a !CONFIG_SHMEM simple substitute * for tmpfs, better continue to ignore other mount options. */ } } return 0; } int ramfs_fill_super(struct super_block *sb, void *data, int silent) { struct ramfs_fs_info *fsi; struct inode *inode; int err; save_mount_options(sb, data); fsi = kzalloc(sizeof(struct ramfs_fs_info), GFP_KERNEL); sb->s_fs_info = fsi; if (!fsi) return -ENOMEM; err = ramfs_parse_options(data, &fsi->mount_opts); if (err) return err; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = RAMFS_MAGIC; sb->s_op = &ramfs_ops; sb->s_time_gran = 1; inode = ramfs_get_inode(sb, NULL, S_IFDIR | fsi->mount_opts.mode, 0); sb->s_root = d_make_root(inode); if (!sb->s_root) return -ENOMEM; return 0; } struct dentry *ramfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_nodev(fs_type, flags, data, ramfs_fill_super); } static struct dentry *rootfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_nodev(fs_type, flags|MS_NOUSER, data, ramfs_fill_super); } static void ramfs_kill_sb(struct super_block *sb) { kfree(sb->s_fs_info); kill_litter_super(sb); } static struct file_system_type ramfs_fs_type = { .name = "ramfs", .mount = ramfs_mount, .kill_sb = ramfs_kill_sb, }; static struct file_system_type rootfs_fs_type = { .name = "rootfs", .mount = rootfs_mount, .kill_sb = kill_litter_super, }; static int __init init_ramfs_fs(void) { return register_filesystem(&ramfs_fs_type); } module_init(init_ramfs_fs) int __init init_rootfs(void) { int err; err = bdi_init(&ramfs_backing_dev_info); if (err) return err; err = register_filesystem(&rootfs_fs_type); if (err) bdi_destroy(&ramfs_backing_dev_info); return err; }
gpl-2.0
MiCode/mi2_kernel
drivers/char/agp/intel-gtt.c
4483
40126
/* * Intel GTT (Graphics Translation Table) routines * * Caveat: This driver implements the linux agp interface, but this is far from * a agp driver! GTT support ended up here for purely historical reasons: The * old userspace intel graphics drivers needed an interface to map memory into * the GTT. And the drm provides a default interface for graphic devices sitting * on an agp port. So it made sense to fake the GTT support as an agp port to * avoid having to create a new api. * * With gem this does not make much sense anymore, just needlessly complicates * the code. But as long as the old graphics stack is still support, it's stuck * here. * * /fairy-tale-mode off */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/pagemap.h> #include <linux/agp_backend.h> #include <linux/delay.h> #include <asm/smp.h> #include "agp.h" #include "intel-agp.h" #include <drm/intel-gtt.h> /* * If we have Intel graphics, we're not going to have anything other than * an Intel IOMMU. So make the correct use of the PCI DMA API contingent * on the Intel IOMMU support (CONFIG_INTEL_IOMMU). * Only newer chipsets need to bother with this, of course. */ #ifdef CONFIG_INTEL_IOMMU #define USE_PCI_DMA_API 1 #else #define USE_PCI_DMA_API 0 #endif struct intel_gtt_driver { unsigned int gen : 8; unsigned int is_g33 : 1; unsigned int is_pineview : 1; unsigned int is_ironlake : 1; unsigned int has_pgtbl_enable : 1; unsigned int dma_mask_size : 8; /* Chipset specific GTT setup */ int (*setup)(void); /* This should undo anything done in ->setup() save the unmapping * of the mmio register file, that's done in the generic code. */ void (*cleanup)(void); void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags); /* Flags is a more or less chipset specific opaque value. * For chipsets that need to support old ums (non-gem) code, this * needs to be identical to the various supported agp memory types! */ bool (*check_flags)(unsigned int flags); void (*chipset_flush)(void); }; static struct _intel_private { struct intel_gtt base; const struct intel_gtt_driver *driver; struct pci_dev *pcidev; /* device one */ struct pci_dev *bridge_dev; u8 __iomem *registers; phys_addr_t gtt_bus_addr; phys_addr_t gma_bus_addr; u32 PGETBL_save; u32 __iomem *gtt; /* I915G */ bool clear_fake_agp; /* on first access via agp, fill with scratch */ int num_dcache_entries; void __iomem *i9xx_flush_page; char *i81x_gtt_table; struct resource ifp_resource; int resource_valid; struct page *scratch_page; } intel_private; #define INTEL_GTT_GEN intel_private.driver->gen #define IS_G33 intel_private.driver->is_g33 #define IS_PINEVIEW intel_private.driver->is_pineview #define IS_IRONLAKE intel_private.driver->is_ironlake #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable int intel_gtt_map_memory(struct page **pages, unsigned int num_entries, struct scatterlist **sg_list, int *num_sg) { struct sg_table st; struct scatterlist *sg; int i; if (*sg_list) return 0; /* already mapped (for e.g. resume */ DBG("try mapping %lu pages\n", (unsigned long)num_entries); if (sg_alloc_table(&st, num_entries, GFP_KERNEL)) goto err; *sg_list = sg = st.sgl; for (i = 0 ; i < num_entries; i++, sg = sg_next(sg)) sg_set_page(sg, pages[i], PAGE_SIZE, 0); *num_sg = pci_map_sg(intel_private.pcidev, *sg_list, num_entries, PCI_DMA_BIDIRECTIONAL); if (unlikely(!*num_sg)) goto err; return 0; err: sg_free_table(&st); return -ENOMEM; } EXPORT_SYMBOL(intel_gtt_map_memory); void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg) { struct sg_table st; DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); pci_unmap_sg(intel_private.pcidev, sg_list, num_sg, PCI_DMA_BIDIRECTIONAL); st.sgl = sg_list; st.orig_nents = st.nents = num_sg; sg_free_table(&st); } EXPORT_SYMBOL(intel_gtt_unmap_memory); static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) { return; } /* Exists to support ARGB cursors */ static struct page *i8xx_alloc_pages(void) { struct page *page; page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); if (page == NULL) return NULL; if (set_pages_uc(page, 4) < 0) { set_pages_wb(page, 4); __free_pages(page, 2); return NULL; } get_page(page); atomic_inc(&agp_bridge->current_memory_agp); return page; } static void i8xx_destroy_pages(struct page *page) { if (page == NULL) return; set_pages_wb(page, 4); put_page(page); __free_pages(page, 2); atomic_dec(&agp_bridge->current_memory_agp); } #define I810_GTT_ORDER 4 static int i810_setup(void) { u32 reg_addr; char *gtt_table; /* i81x does not preallocate the gtt. It's always 64kb in size. */ gtt_table = alloc_gatt_pages(I810_GTT_ORDER); if (gtt_table == NULL) return -ENOMEM; intel_private.i81x_gtt_table = gtt_table; pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr); reg_addr &= 0xfff80000; intel_private.registers = ioremap(reg_addr, KB(64)); if (!intel_private.registers) return -ENOMEM; writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; if ((readl(intel_private.registers+I810_DRAM_CTL) & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { dev_info(&intel_private.pcidev->dev, "detected 4MB dedicated video ram\n"); intel_private.num_dcache_entries = 1024; } return 0; } static void i810_cleanup(void) { writel(0, intel_private.registers+I810_PGETBL_CTL); free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER); } static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start, int type) { int i; if ((pg_start + mem->page_count) > intel_private.num_dcache_entries) return -EINVAL; if (!mem->is_flushed) global_cache_flush(); for (i = pg_start; i < (pg_start + mem->page_count); i++) { dma_addr_t addr = i << PAGE_SHIFT; intel_private.driver->write_entry(addr, i, type); } readl(intel_private.gtt+i-1); return 0; } /* * The i810/i830 requires a physical address to program its mouse * pointer into hardware. * However the Xserver still writes to it through the agp aperture. */ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) { struct agp_memory *new; struct page *page; switch (pg_count) { case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge); break; case 4: /* kludge to get 4 physical pages for ARGB cursor */ page = i8xx_alloc_pages(); break; default: return NULL; } if (page == NULL) return NULL; new = agp_create_memory(pg_count); if (new == NULL) return NULL; new->pages[0] = page; if (pg_count == 4) { /* kludge to get 4 physical pages for ARGB cursor */ new->pages[1] = new->pages[0] + 1; new->pages[2] = new->pages[1] + 1; new->pages[3] = new->pages[2] + 1; } new->page_count = pg_count; new->num_scratch_pages = pg_count; new->type = AGP_PHYS_MEMORY; new->physical = page_to_phys(new->pages[0]); return new; } static void intel_i810_free_by_type(struct agp_memory *curr) { agp_free_key(curr->key); if (curr->type == AGP_PHYS_MEMORY) { if (curr->page_count == 4) i8xx_destroy_pages(curr->pages[0]); else { agp_bridge->driver->agp_destroy_page(curr->pages[0], AGP_PAGE_DESTROY_UNMAP); agp_bridge->driver->agp_destroy_page(curr->pages[0], AGP_PAGE_DESTROY_FREE); } agp_free_page_array(curr); } kfree(curr); } static int intel_gtt_setup_scratch_page(void) { struct page *page; dma_addr_t dma_addr; page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); if (page == NULL) return -ENOMEM; get_page(page); set_pages_uc(page, 1); if (intel_private.base.needs_dmar) { dma_addr = pci_map_page(intel_private.pcidev, page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) return -EINVAL; intel_private.base.scratch_page_dma = dma_addr; } else intel_private.base.scratch_page_dma = page_to_phys(page); intel_private.scratch_page = page; return 0; } static void i810_write_entry(dma_addr_t addr, unsigned int entry, unsigned int flags) { u32 pte_flags = I810_PTE_VALID; switch (flags) { case AGP_DCACHE_MEMORY: pte_flags |= I810_PTE_LOCAL; break; case AGP_USER_CACHED_MEMORY: pte_flags |= I830_PTE_SYSTEM_CACHED; break; } writel(addr | pte_flags, intel_private.gtt + entry); } static const struct aper_size_info_fixed intel_fake_agp_sizes[] = { {32, 8192, 3}, {64, 16384, 4}, {128, 32768, 5}, {256, 65536, 6}, {512, 131072, 7}, }; static unsigned int intel_gtt_stolen_size(void) { u16 gmch_ctrl; u8 rdct; int local = 0; static const int ddt[4] = { 0, 16, 32, 64 }; unsigned int stolen_size = 0; if (INTEL_GTT_GEN == 1) return 0; /* no stolen mem on i81x */ pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB || intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { switch (gmch_ctrl & I830_GMCH_GMS_MASK) { case I830_GMCH_GMS_STOLEN_512: stolen_size = KB(512); break; case I830_GMCH_GMS_STOLEN_1024: stolen_size = MB(1); break; case I830_GMCH_GMS_STOLEN_8192: stolen_size = MB(8); break; case I830_GMCH_GMS_LOCAL: rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); stolen_size = (I830_RDRAM_ND(rdct) + 1) * MB(ddt[I830_RDRAM_DDT(rdct)]); local = 1; break; default: stolen_size = 0; break; } } else if (INTEL_GTT_GEN == 6) { /* * SandyBridge has new memory control reg at 0x50.w */ u16 snb_gmch_ctl; pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { case SNB_GMCH_GMS_STOLEN_32M: stolen_size = MB(32); break; case SNB_GMCH_GMS_STOLEN_64M: stolen_size = MB(64); break; case SNB_GMCH_GMS_STOLEN_96M: stolen_size = MB(96); break; case SNB_GMCH_GMS_STOLEN_128M: stolen_size = MB(128); break; case SNB_GMCH_GMS_STOLEN_160M: stolen_size = MB(160); break; case SNB_GMCH_GMS_STOLEN_192M: stolen_size = MB(192); break; case SNB_GMCH_GMS_STOLEN_224M: stolen_size = MB(224); break; case SNB_GMCH_GMS_STOLEN_256M: stolen_size = MB(256); break; case SNB_GMCH_GMS_STOLEN_288M: stolen_size = MB(288); break; case SNB_GMCH_GMS_STOLEN_320M: stolen_size = MB(320); break; case SNB_GMCH_GMS_STOLEN_352M: stolen_size = MB(352); break; case SNB_GMCH_GMS_STOLEN_384M: stolen_size = MB(384); break; case SNB_GMCH_GMS_STOLEN_416M: stolen_size = MB(416); break; case SNB_GMCH_GMS_STOLEN_448M: stolen_size = MB(448); break; case SNB_GMCH_GMS_STOLEN_480M: stolen_size = MB(480); break; case SNB_GMCH_GMS_STOLEN_512M: stolen_size = MB(512); break; } } else { switch (gmch_ctrl & I855_GMCH_GMS_MASK) { case I855_GMCH_GMS_STOLEN_1M: stolen_size = MB(1); break; case I855_GMCH_GMS_STOLEN_4M: stolen_size = MB(4); break; case I855_GMCH_GMS_STOLEN_8M: stolen_size = MB(8); break; case I855_GMCH_GMS_STOLEN_16M: stolen_size = MB(16); break; case I855_GMCH_GMS_STOLEN_32M: stolen_size = MB(32); break; case I915_GMCH_GMS_STOLEN_48M: stolen_size = MB(48); break; case I915_GMCH_GMS_STOLEN_64M: stolen_size = MB(64); break; case G33_GMCH_GMS_STOLEN_128M: stolen_size = MB(128); break; case G33_GMCH_GMS_STOLEN_256M: stolen_size = MB(256); break; case INTEL_GMCH_GMS_STOLEN_96M: stolen_size = MB(96); break; case INTEL_GMCH_GMS_STOLEN_160M: stolen_size = MB(160); break; case INTEL_GMCH_GMS_STOLEN_224M: stolen_size = MB(224); break; case INTEL_GMCH_GMS_STOLEN_352M: stolen_size = MB(352); break; default: stolen_size = 0; break; } } if (stolen_size > 0) { dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n", stolen_size / KB(1), local ? "local" : "stolen"); } else { dev_info(&intel_private.bridge_dev->dev, "no pre-allocated video memory detected\n"); stolen_size = 0; } return stolen_size; } static void i965_adjust_pgetbl_size(unsigned int size_flag) { u32 pgetbl_ctl, pgetbl_ctl2; /* ensure that ppgtt is disabled */ pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2); pgetbl_ctl2 &= ~I810_PGETBL_ENABLED; writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2); /* write the new ggtt size */ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK; pgetbl_ctl |= size_flag; writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL); } static unsigned int i965_gtt_total_entries(void) { int size; u32 pgetbl_ctl; u16 gmch_ctl; pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctl); if (INTEL_GTT_GEN == 5) { switch (gmch_ctl & G4x_GMCH_SIZE_MASK) { case G4x_GMCH_SIZE_1M: case G4x_GMCH_SIZE_VT_1M: i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB); break; case G4x_GMCH_SIZE_VT_1_5M: i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB); break; case G4x_GMCH_SIZE_2M: case G4x_GMCH_SIZE_VT_2M: i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB); break; } } pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { case I965_PGETBL_SIZE_128KB: size = KB(128); break; case I965_PGETBL_SIZE_256KB: size = KB(256); break; case I965_PGETBL_SIZE_512KB: size = KB(512); break; /* GTT pagetable sizes bigger than 512KB are not possible on G33! */ case I965_PGETBL_SIZE_1MB: size = KB(1024); break; case I965_PGETBL_SIZE_2MB: size = KB(2048); break; case I965_PGETBL_SIZE_1_5MB: size = KB(1024 + 512); break; default: dev_info(&intel_private.pcidev->dev, "unknown page table size, assuming 512KB\n"); size = KB(512); } return size/4; } static unsigned int intel_gtt_total_entries(void) { int size; if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) return i965_gtt_total_entries(); else if (INTEL_GTT_GEN == 6) { u16 snb_gmch_ctl; pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { default: case SNB_GTT_SIZE_0M: printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); size = MB(0); break; case SNB_GTT_SIZE_1M: size = MB(1); break; case SNB_GTT_SIZE_2M: size = MB(2); break; } return size/4; } else { /* On previous hardware, the GTT size was just what was * required to map the aperture. */ return intel_private.base.gtt_mappable_entries; } } static unsigned int intel_gtt_mappable_entries(void) { unsigned int aperture_size; if (INTEL_GTT_GEN == 1) { u32 smram_miscc; pci_read_config_dword(intel_private.bridge_dev, I810_SMRAM_MISCC, &smram_miscc); if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) aperture_size = MB(32); else aperture_size = MB(64); } else if (INTEL_GTT_GEN == 2) { u16 gmch_ctrl; pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M) aperture_size = MB(64); else aperture_size = MB(128); } else { /* 9xx supports large sizes, just look at the length */ aperture_size = pci_resource_len(intel_private.pcidev, 2); } return aperture_size >> PAGE_SHIFT; } static void intel_gtt_teardown_scratch_page(void) { set_pages_wb(intel_private.scratch_page, 1); pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); put_page(intel_private.scratch_page); __free_page(intel_private.scratch_page); } static void intel_gtt_cleanup(void) { intel_private.driver->cleanup(); iounmap(intel_private.gtt); iounmap(intel_private.registers); intel_gtt_teardown_scratch_page(); } static int intel_gtt_init(void) { u32 gtt_map_size; int ret; ret = intel_private.driver->setup(); if (ret != 0) return ret; intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); intel_private.base.gtt_total_entries = intel_gtt_total_entries(); /* save the PGETBL reg for resume */ intel_private.PGETBL_save = readl(intel_private.registers+I810_PGETBL_CTL) & ~I810_PGETBL_ENABLED; /* we only ever restore the register when enabling the PGTBL... */ if (HAS_PGTBL_EN) intel_private.PGETBL_save |= I810_PGETBL_ENABLED; dev_info(&intel_private.bridge_dev->dev, "detected gtt size: %dK total, %dK mappable\n", intel_private.base.gtt_total_entries * 4, intel_private.base.gtt_mappable_entries * 4); gtt_map_size = intel_private.base.gtt_total_entries * 4; intel_private.gtt = ioremap(intel_private.gtt_bus_addr, gtt_map_size); if (!intel_private.gtt) { intel_private.driver->cleanup(); iounmap(intel_private.registers); return -ENOMEM; } intel_private.base.gtt = intel_private.gtt; global_cache_flush(); /* FIXME: ? */ intel_private.base.stolen_size = intel_gtt_stolen_size(); intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; ret = intel_gtt_setup_scratch_page(); if (ret != 0) { intel_gtt_cleanup(); return ret; } return 0; } static int intel_fake_agp_fetch_size(void) { int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes); unsigned int aper_size; int i; aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT) / MB(1); for (i = 0; i < num_sizes; i++) { if (aper_size == intel_fake_agp_sizes[i].size) { agp_bridge->current_size = (void *) (intel_fake_agp_sizes + i); return aper_size; } } return 0; } static void i830_cleanup(void) { } /* The chipset_flush interface needs to get data that has already been * flushed out of the CPU all the way out to main memory, because the GPU * doesn't snoop those buffers. * * The 8xx series doesn't have the same lovely interface for flushing the * chipset write buffers that the later chips do. According to the 865 * specs, it's 64 octwords, or 1KB. So, to get those previous things in * that buffer out, we just fill 1KB and clflush it out, on the assumption * that it'll push whatever was in there out. It appears to work. */ static void i830_chipset_flush(void) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); /* Forcibly evict everything from the CPU write buffers. * clflush appears to be insufficient. */ wbinvd_on_all_cpus(); /* Now we've only seen documents for this magic bit on 855GM, * we hope it exists for the other gen2 chipsets... * * Also works as advertised on my 845G. */ writel(readl(intel_private.registers+I830_HIC) | (1<<31), intel_private.registers+I830_HIC); while (readl(intel_private.registers+I830_HIC) & (1<<31)) { if (time_after(jiffies, timeout)) break; udelay(50); } } static void i830_write_entry(dma_addr_t addr, unsigned int entry, unsigned int flags) { u32 pte_flags = I810_PTE_VALID; if (flags == AGP_USER_CACHED_MEMORY) pte_flags |= I830_PTE_SYSTEM_CACHED; writel(addr | pte_flags, intel_private.gtt + entry); } static bool intel_enable_gtt(void) { u32 gma_addr; u8 __iomem *reg; if (INTEL_GTT_GEN <= 2) pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &gma_addr); else pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &gma_addr); intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); if (INTEL_GTT_GEN >= 6) return true; if (INTEL_GTT_GEN == 2) { u16 gmch_ctrl; pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); gmch_ctrl |= I830_GMCH_ENABLED; pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl); pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) { dev_err(&intel_private.pcidev->dev, "failed to enable the GTT: GMCH_CTRL=%x\n", gmch_ctrl); return false; } } /* On the resume path we may be adjusting the PGTBL value, so * be paranoid and flush all chipset write buffers... */ if (INTEL_GTT_GEN >= 3) writel(0, intel_private.registers+GFX_FLSH_CNTL); reg = intel_private.registers+I810_PGETBL_CTL; writel(intel_private.PGETBL_save, reg); if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) { dev_err(&intel_private.pcidev->dev, "failed to enable the GTT: PGETBL=%x [expected %x]\n", readl(reg), intel_private.PGETBL_save); return false; } if (INTEL_GTT_GEN >= 3) writel(0, intel_private.registers+GFX_FLSH_CNTL); return true; } static int i830_setup(void) { u32 reg_addr; pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr); reg_addr &= 0xfff80000; intel_private.registers = ioremap(reg_addr, KB(64)); if (!intel_private.registers) return -ENOMEM; intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; return 0; } static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge) { agp_bridge->gatt_table_real = NULL; agp_bridge->gatt_table = NULL; agp_bridge->gatt_bus_addr = 0; return 0; } static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge) { return 0; } static int intel_fake_agp_configure(void) { if (!intel_enable_gtt()) return -EIO; intel_private.clear_fake_agp = true; agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; return 0; } static bool i830_check_flags(unsigned int flags) { switch (flags) { case 0: case AGP_PHYS_MEMORY: case AGP_USER_CACHED_MEMORY: case AGP_USER_MEMORY: return true; } return false; } void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, unsigned int sg_len, unsigned int pg_start, unsigned int flags) { struct scatterlist *sg; unsigned int len, m; int i, j; j = pg_start; /* sg may merge pages, but we have to separate * per-page addr for GTT */ for_each_sg(sg_list, sg, sg_len, i) { len = sg_dma_len(sg) >> PAGE_SHIFT; for (m = 0; m < len; m++) { dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); intel_private.driver->write_entry(addr, j, flags); j++; } } readl(intel_private.gtt+j-1); } EXPORT_SYMBOL(intel_gtt_insert_sg_entries); void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries, struct page **pages, unsigned int flags) { int i, j; for (i = 0, j = first_entry; i < num_entries; i++, j++) { dma_addr_t addr = page_to_phys(pages[i]); intel_private.driver->write_entry(addr, j, flags); } readl(intel_private.gtt+j-1); } EXPORT_SYMBOL(intel_gtt_insert_pages); static int intel_fake_agp_insert_entries(struct agp_memory *mem, off_t pg_start, int type) { int ret = -EINVAL; if (intel_private.base.do_idle_maps) return -ENODEV; if (intel_private.clear_fake_agp) { int start = intel_private.base.stolen_size / PAGE_SIZE; int end = intel_private.base.gtt_mappable_entries; intel_gtt_clear_range(start, end - start); intel_private.clear_fake_agp = false; } if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY) return i810_insert_dcache_entries(mem, pg_start, type); if (mem->page_count == 0) goto out; if (pg_start + mem->page_count > intel_private.base.gtt_total_entries) goto out_err; if (type != mem->type) goto out_err; if (!intel_private.driver->check_flags(type)) goto out_err; if (!mem->is_flushed) global_cache_flush(); if (intel_private.base.needs_dmar) { ret = intel_gtt_map_memory(mem->pages, mem->page_count, &mem->sg_list, &mem->num_sg); if (ret != 0) return ret; intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg, pg_start, type); } else intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, type); out: ret = 0; out_err: mem->is_flushed = true; return ret; } void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries) { unsigned int i; for (i = first_entry; i < (first_entry + num_entries); i++) { intel_private.driver->write_entry(intel_private.base.scratch_page_dma, i, 0); } readl(intel_private.gtt+i-1); } EXPORT_SYMBOL(intel_gtt_clear_range); static int intel_fake_agp_remove_entries(struct agp_memory *mem, off_t pg_start, int type) { if (mem->page_count == 0) return 0; if (intel_private.base.do_idle_maps) return -ENODEV; intel_gtt_clear_range(pg_start, mem->page_count); if (intel_private.base.needs_dmar) { intel_gtt_unmap_memory(mem->sg_list, mem->num_sg); mem->sg_list = NULL; mem->num_sg = 0; } return 0; } static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count, int type) { struct agp_memory *new; if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) { if (pg_count != intel_private.num_dcache_entries) return NULL; new = agp_create_memory(1); if (new == NULL) return NULL; new->type = AGP_DCACHE_MEMORY; new->page_count = pg_count; new->num_scratch_pages = 0; agp_free_page_array(new); return new; } if (type == AGP_PHYS_MEMORY) return alloc_agpphysmem_i8xx(pg_count, type); /* always return NULL for other allocation types for now */ return NULL; } static int intel_alloc_chipset_flush_resource(void) { int ret; ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE, PAGE_SIZE, PCIBIOS_MIN_MEM, 0, pcibios_align_resource, intel_private.bridge_dev); return ret; } static void intel_i915_setup_chipset_flush(void) { int ret; u32 temp; pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp); if (!(temp & 0x1)) { intel_alloc_chipset_flush_resource(); intel_private.resource_valid = 1; pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); } else { temp &= ~1; intel_private.resource_valid = 1; intel_private.ifp_resource.start = temp; intel_private.ifp_resource.end = temp + PAGE_SIZE; ret = request_resource(&iomem_resource, &intel_private.ifp_resource); /* some BIOSes reserve this area in a pnp some don't */ if (ret) intel_private.resource_valid = 0; } } static void intel_i965_g33_setup_chipset_flush(void) { u32 temp_hi, temp_lo; int ret; pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi); pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo); if (!(temp_lo & 0x1)) { intel_alloc_chipset_flush_resource(); intel_private.resource_valid = 1; pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, upper_32_bits(intel_private.ifp_resource.start)); pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); } else { u64 l64; temp_lo &= ~0x1; l64 = ((u64)temp_hi << 32) | temp_lo; intel_private.resource_valid = 1; intel_private.ifp_resource.start = l64; intel_private.ifp_resource.end = l64 + PAGE_SIZE; ret = request_resource(&iomem_resource, &intel_private.ifp_resource); /* some BIOSes reserve this area in a pnp some don't */ if (ret) intel_private.resource_valid = 0; } } static void intel_i9xx_setup_flush(void) { /* return if already configured */ if (intel_private.ifp_resource.start) return; if (INTEL_GTT_GEN == 6) return; /* setup a resource for this object */ intel_private.ifp_resource.name = "Intel Flush Page"; intel_private.ifp_resource.flags = IORESOURCE_MEM; /* Setup chipset flush for 915 */ if (IS_G33 || INTEL_GTT_GEN >= 4) { intel_i965_g33_setup_chipset_flush(); } else { intel_i915_setup_chipset_flush(); } if (intel_private.ifp_resource.start) intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); if (!intel_private.i9xx_flush_page) dev_err(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing\n"); } static void i9xx_cleanup(void) { if (intel_private.i9xx_flush_page) iounmap(intel_private.i9xx_flush_page); if (intel_private.resource_valid) release_resource(&intel_private.ifp_resource); intel_private.ifp_resource.start = 0; intel_private.resource_valid = 0; } static void i9xx_chipset_flush(void) { if (intel_private.i9xx_flush_page) writel(1, intel_private.i9xx_flush_page); } static void i965_write_entry(dma_addr_t addr, unsigned int entry, unsigned int flags) { u32 pte_flags; pte_flags = I810_PTE_VALID; if (flags == AGP_USER_CACHED_MEMORY) pte_flags |= I830_PTE_SYSTEM_CACHED; /* Shift high bits down */ addr |= (addr >> 28) & 0xf0; writel(addr | pte_flags, intel_private.gtt + entry); } static bool gen6_check_flags(unsigned int flags) { return true; } static void gen6_write_entry(dma_addr_t addr, unsigned int entry, unsigned int flags) { unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; u32 pte_flags; if (type_mask == AGP_USER_MEMORY) pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; if (gfdt) pte_flags |= GEN6_PTE_GFDT; } else { /* set 'normal'/'cached' to LLC by default */ pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; if (gfdt) pte_flags |= GEN6_PTE_GFDT; } /* gen6 has bit11-4 for physical addr bit39-32 */ addr |= (addr >> 28) & 0xff0; writel(addr | pte_flags, intel_private.gtt + entry); } static void gen6_cleanup(void) { } /* Certain Gen5 chipsets require require idling the GPU before * unmapping anything from the GTT when VT-d is enabled. */ static inline int needs_idle_maps(void) { #ifdef CONFIG_INTEL_IOMMU const unsigned short gpu_devid = intel_private.pcidev->device; /* Query intel_iommu to see if we need the workaround. Presumably that * was loaded first. */ if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && intel_iommu_gfx_mapped) return 1; #endif return 0; } static int i9xx_setup(void) { u32 reg_addr; pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr); reg_addr &= 0xfff80000; intel_private.registers = ioremap(reg_addr, 128 * 4096); if (!intel_private.registers) return -ENOMEM; if (INTEL_GTT_GEN == 3) { u32 gtt_addr; pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &gtt_addr); intel_private.gtt_bus_addr = gtt_addr; } else { u32 gtt_offset; switch (INTEL_GTT_GEN) { case 5: case 6: gtt_offset = MB(2); break; case 4: default: gtt_offset = KB(512); break; } intel_private.gtt_bus_addr = reg_addr + gtt_offset; } if (needs_idle_maps()) intel_private.base.do_idle_maps = 1; intel_i9xx_setup_flush(); return 0; } static const struct agp_bridge_driver intel_fake_agp_driver = { .owner = THIS_MODULE, .size_type = FIXED_APER_SIZE, .aperture_sizes = intel_fake_agp_sizes, .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), .configure = intel_fake_agp_configure, .fetch_size = intel_fake_agp_fetch_size, .cleanup = intel_gtt_cleanup, .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, .create_gatt_table = intel_fake_agp_create_gatt_table, .free_gatt_table = intel_fake_agp_free_gatt_table, .insert_memory = intel_fake_agp_insert_entries, .remove_memory = intel_fake_agp_remove_entries, .alloc_by_type = intel_fake_agp_alloc_by_type, .free_by_type = intel_i810_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, }; static const struct intel_gtt_driver i81x_gtt_driver = { .gen = 1, .has_pgtbl_enable = 1, .dma_mask_size = 32, .setup = i810_setup, .cleanup = i810_cleanup, .check_flags = i830_check_flags, .write_entry = i810_write_entry, }; static const struct intel_gtt_driver i8xx_gtt_driver = { .gen = 2, .has_pgtbl_enable = 1, .setup = i830_setup, .cleanup = i830_cleanup, .write_entry = i830_write_entry, .dma_mask_size = 32, .check_flags = i830_check_flags, .chipset_flush = i830_chipset_flush, }; static const struct intel_gtt_driver i915_gtt_driver = { .gen = 3, .has_pgtbl_enable = 1, .setup = i9xx_setup, .cleanup = i9xx_cleanup, /* i945 is the last gpu to need phys mem (for overlay and cursors). */ .write_entry = i830_write_entry, .dma_mask_size = 32, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; static const struct intel_gtt_driver g33_gtt_driver = { .gen = 3, .is_g33 = 1, .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; static const struct intel_gtt_driver pineview_gtt_driver = { .gen = 3, .is_pineview = 1, .is_g33 = 1, .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; static const struct intel_gtt_driver i965_gtt_driver = { .gen = 4, .has_pgtbl_enable = 1, .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; static const struct intel_gtt_driver g4x_gtt_driver = { .gen = 5, .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; static const struct intel_gtt_driver ironlake_gtt_driver = { .gen = 5, .is_ironlake = 1, .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, .dma_mask_size = 36, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; static const struct intel_gtt_driver sandybridge_gtt_driver = { .gen = 6, .setup = i9xx_setup, .cleanup = gen6_cleanup, .write_entry = gen6_write_entry, .dma_mask_size = 40, .check_flags = gen6_check_flags, .chipset_flush = i9xx_chipset_flush, }; /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of * driver and gmch_driver must be non-null, and find_gmch will determine * which one should be used if a gmch_chip_id is present. */ static const struct intel_gtt_driver_description { unsigned int gmch_chip_id; char *name; const struct intel_gtt_driver *gtt_driver; } intel_gtt_chipsets[] = { { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &i81x_gtt_driver}, { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &i81x_gtt_driver}, { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &i81x_gtt_driver}, { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &i81x_gtt_driver}, { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82845G_IG, "845G", &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82854_IG, "854", &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82865_IG, "865", &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_G33_IG, "G33", &g33_gtt_driver }, { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", &g33_gtt_driver }, { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", &g33_gtt_driver }, { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", &pineview_gtt_driver }, { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", &pineview_gtt_driver }, { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_B43_IG, "B43", &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43", &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_G41_IG, "G41", &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, "HD Graphics", &ironlake_gtt_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, "HD Graphics", &ironlake_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG, "Ivybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG, "Ivybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG, "Ivybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG, "Ivybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG, "Ivybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG, "Ivybridge", &sandybridge_gtt_driver }, { 0, NULL, NULL } }; static int find_gmch(u16 device) { struct pci_dev *gmch_device; gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, gmch_device); } if (!gmch_device) return 0; intel_private.pcidev = gmch_device; return 1; } int intel_gmch_probe(struct pci_dev *pdev, struct agp_bridge_data *bridge) { int i, mask; intel_private.driver = NULL; for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { intel_private.driver = intel_gtt_chipsets[i].gtt_driver; break; } } if (!intel_private.driver) return 0; bridge->driver = &intel_fake_agp_driver; bridge->dev_private_data = &intel_private; bridge->dev = pdev; intel_private.bridge_dev = pci_dev_get(pdev); dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); mask = intel_private.driver->dma_mask_size; if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) dev_err(&intel_private.pcidev->dev, "set gfx device dma mask %d-bit failed!\n", mask); else pci_set_consistent_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)); /*if (bridge->driver == &intel_810_driver) return 1;*/ if (intel_gtt_init() != 0) return 0; return 1; } EXPORT_SYMBOL(intel_gmch_probe); const struct intel_gtt *intel_gtt_get(void) { return &intel_private.base; } EXPORT_SYMBOL(intel_gtt_get); void intel_gtt_chipset_flush(void) { if (intel_private.driver->chipset_flush) intel_private.driver->chipset_flush(); } EXPORT_SYMBOL(intel_gtt_chipset_flush); void intel_gmch_remove(struct pci_dev *pdev) { if (intel_private.pcidev) pci_dev_put(intel_private.pcidev); if (intel_private.bridge_dev) pci_dev_put(intel_private.bridge_dev); } EXPORT_SYMBOL(intel_gmch_remove); MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); MODULE_LICENSE("GPL and additional rights");
gpl-2.0
thewisenerd/android_kernel_xiaomi_armani
drivers/staging/wlan-ng/prism2sta.c
5507
58887
/* src/prism2/driver/prism2sta.c * * Implements the station functionality for prism2 * * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. * -------------------------------------------------------------------- * * linux-wlan * * The contents of this file are subject to the Mozilla Public * License Version 1.1 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of * the License at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or * implied. See the License for the specific language governing * rights and limitations under the License. * * Alternatively, the contents of this file may be used under the * terms of the GNU Public License version 2 (the "GPL"), in which * case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use * your version of this file under the MPL, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete * the provisions above, a recipient may use your version of this * file under either the MPL or the GPL. * * -------------------------------------------------------------------- * * Inquiries regarding the linux-wlan Open Source project can be * made directly to: * * AbsoluteValue Systems Inc. * info@linux-wlan.com * http://www.linux-wlan.com * * -------------------------------------------------------------------- * * Portions of the development of this software were funded by * Intersil Corporation as part of PRISM(R) chipset product development. * * -------------------------------------------------------------------- * * This file implements the module and linux pcmcia routines for the * prism2 driver. * * -------------------------------------------------------------------- */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/wireless.h> #include <linux/netdevice.h> #include <linux/workqueue.h> #include <linux/byteorder/generic.h> #include <linux/ctype.h> #include <linux/io.h> #include <linux/delay.h> #include <asm/byteorder.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/bitops.h> #include "p80211types.h" #include "p80211hdr.h" #include "p80211mgmt.h" #include "p80211conv.h" #include "p80211msg.h" #include "p80211netdev.h" #include "p80211req.h" #include "p80211metadef.h" #include "p80211metastruct.h" #include "hfa384x.h" #include "prism2mgmt.h" /* Create a string of printable chars from something that might not be */ /* It's recommended that the str be 4*len + 1 bytes long */ #define wlan_mkprintstr(buf, buflen, str, strlen) \ { \ int i = 0; \ int j = 0; \ memset(str, 0, (strlen)); \ for (i = 0; i < (buflen); i++) { \ if (isprint((buf)[i])) { \ (str)[j] = (buf)[i]; \ j++; \ } else { \ (str)[j] = '\\'; \ (str)[j+1] = 'x'; \ (str)[j+2] = hex_asc_hi((buf)[i]); \ (str)[j+3] = hex_asc_lo((buf)[i]); \ j += 4; \ } \ } \ } static char *dev_info = "prism2_usb"; static wlandevice_t *create_wlan(void); int prism2_reset_holdtime = 30; /* Reset hold time in ms */ int prism2_reset_settletime = 100; /* Reset settle time in ms */ static int prism2_doreset; /* Do a reset at init? */ module_param(prism2_doreset, int, 0644); MODULE_PARM_DESC(prism2_doreset, "Issue a reset on initialization"); module_param(prism2_reset_holdtime, int, 0644); MODULE_PARM_DESC(prism2_reset_holdtime, "reset hold time in ms"); module_param(prism2_reset_settletime, int, 0644); MODULE_PARM_DESC(prism2_reset_settletime, "reset settle time in ms"); MODULE_LICENSE("Dual MPL/GPL"); void prism2_connect_result(wlandevice_t *wlandev, u8 failed); void prism2_disconnected(wlandevice_t *wlandev); void prism2_roamed(wlandevice_t *wlandev); static int prism2sta_open(wlandevice_t *wlandev); static int prism2sta_close(wlandevice_t *wlandev); static void prism2sta_reset(wlandevice_t *wlandev); static int prism2sta_txframe(wlandevice_t *wlandev, struct sk_buff *skb, union p80211_hdr *p80211_hdr, struct p80211_metawep *p80211_wep); static int prism2sta_mlmerequest(wlandevice_t *wlandev, struct p80211msg *msg); static int prism2sta_getcardinfo(wlandevice_t *wlandev); static int prism2sta_globalsetup(wlandevice_t *wlandev); static int prism2sta_setmulticast(wlandevice_t *wlandev, netdevice_t *dev); static void prism2sta_inf_handover(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf); static void prism2sta_inf_tallies(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf); static void prism2sta_inf_hostscanresults(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf); static void prism2sta_inf_scanresults(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf); static void prism2sta_inf_chinforesults(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf); static void prism2sta_inf_linkstatus(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf); static void prism2sta_inf_assocstatus(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf); static void prism2sta_inf_authreq(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf); static void prism2sta_inf_authreq_defer(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf); static void prism2sta_inf_psusercnt(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf); /*---------------------------------------------------------------- * prism2sta_open * * WLAN device open method. Called from p80211netdev when kernel * device open (start) method is called in response to the * SIOCSIIFFLAGS ioctl changing the flags bit IFF_UP * from clear to set. * * Arguments: * wlandev wlan device structure * * Returns: * 0 success * >0 f/w reported error * <0 driver reported error * * Side effects: * * Call context: * process thread ----------------------------------------------------------------*/ static int prism2sta_open(wlandevice_t *wlandev) { /* We don't currently have to do anything else. * The setup of the MAC should be subsequently completed via * the mlme commands. * Higher layers know we're ready from dev->start==1 and * dev->tbusy==0. Our rx path knows to pass up received/ * frames because of dev->flags&IFF_UP is true. */ return 0; } /*---------------------------------------------------------------- * prism2sta_close * * WLAN device close method. Called from p80211netdev when kernel * device close method is called in response to the * SIOCSIIFFLAGS ioctl changing the flags bit IFF_UP * from set to clear. * * Arguments: * wlandev wlan device structure * * Returns: * 0 success * >0 f/w reported error * <0 driver reported error * * Side effects: * * Call context: * process thread ----------------------------------------------------------------*/ static int prism2sta_close(wlandevice_t *wlandev) { /* We don't currently have to do anything else. * Higher layers know we're not ready from dev->start==0 and * dev->tbusy==1. Our rx path knows to not pass up received * frames because of dev->flags&IFF_UP is false. */ return 0; } /*---------------------------------------------------------------- * prism2sta_reset * * Not currently implented. * * Arguments: * wlandev wlan device structure * none * * Returns: * nothing * * Side effects: * * Call context: * process thread ----------------------------------------------------------------*/ static void prism2sta_reset(wlandevice_t *wlandev) { return; } /*---------------------------------------------------------------- * prism2sta_txframe * * Takes a frame from p80211 and queues it for transmission. * * Arguments: * wlandev wlan device structure * pb packet buffer struct. Contains an 802.11 * data frame. * p80211_hdr points to the 802.11 header for the packet. * Returns: * 0 Success and more buffs available * 1 Success but no more buffs * 2 Allocation failure * 4 Buffer full or queue busy * * Side effects: * * Call context: * process thread ----------------------------------------------------------------*/ static int prism2sta_txframe(wlandevice_t *wlandev, struct sk_buff *skb, union p80211_hdr *p80211_hdr, struct p80211_metawep *p80211_wep) { hfa384x_t *hw = (hfa384x_t *) wlandev->priv; int result; /* If necessary, set the 802.11 WEP bit */ if ((wlandev->hostwep & (HOSTWEP_PRIVACYINVOKED | HOSTWEP_ENCRYPT)) == HOSTWEP_PRIVACYINVOKED) { p80211_hdr->a3.fc |= cpu_to_le16(WLAN_SET_FC_ISWEP(1)); } result = hfa384x_drvr_txframe(hw, skb, p80211_hdr, p80211_wep); return result; } /*---------------------------------------------------------------- * prism2sta_mlmerequest * * wlan command message handler. All we do here is pass the message * over to the prism2sta_mgmt_handler. * * Arguments: * wlandev wlan device structure * msg wlan command message * Returns: * 0 success * <0 successful acceptance of message, but we're * waiting for an async process to finish before * we're done with the msg. When the asynch * process is done, we'll call the p80211 * function p80211req_confirm() . * >0 An error occurred while we were handling * the message. * * Side effects: * * Call context: * process thread ----------------------------------------------------------------*/ static int prism2sta_mlmerequest(wlandevice_t *wlandev, struct p80211msg *msg) { hfa384x_t *hw = (hfa384x_t *) wlandev->priv; int result = 0; switch (msg->msgcode) { case DIDmsg_dot11req_mibget: pr_debug("Received mibget request\n"); result = prism2mgmt_mibset_mibget(wlandev, msg); break; case DIDmsg_dot11req_mibset: pr_debug("Received mibset request\n"); result = prism2mgmt_mibset_mibget(wlandev, msg); break; case DIDmsg_dot11req_scan: pr_debug("Received scan request\n"); result = prism2mgmt_scan(wlandev, msg); break; case DIDmsg_dot11req_scan_results: pr_debug("Received scan_results request\n"); result = prism2mgmt_scan_results(wlandev, msg); break; case DIDmsg_dot11req_start: pr_debug("Received mlme start request\n"); result = prism2mgmt_start(wlandev, msg); break; /* * Prism2 specific messages */ case DIDmsg_p2req_readpda: pr_debug("Received mlme readpda request\n"); result = prism2mgmt_readpda(wlandev, msg); break; case DIDmsg_p2req_ramdl_state: pr_debug("Received mlme ramdl_state request\n"); result = prism2mgmt_ramdl_state(wlandev, msg); break; case DIDmsg_p2req_ramdl_write: pr_debug("Received mlme ramdl_write request\n"); result = prism2mgmt_ramdl_write(wlandev, msg); break; case DIDmsg_p2req_flashdl_state: pr_debug("Received mlme flashdl_state request\n"); result = prism2mgmt_flashdl_state(wlandev, msg); break; case DIDmsg_p2req_flashdl_write: pr_debug("Received mlme flashdl_write request\n"); result = prism2mgmt_flashdl_write(wlandev, msg); break; /* * Linux specific messages */ case DIDmsg_lnxreq_hostwep: break; /* ignore me. */ case DIDmsg_lnxreq_ifstate: { struct p80211msg_lnxreq_ifstate *ifstatemsg; pr_debug("Received mlme ifstate request\n"); ifstatemsg = (struct p80211msg_lnxreq_ifstate *) msg; result = prism2sta_ifstate(wlandev, ifstatemsg->ifstate.data); ifstatemsg->resultcode.status = P80211ENUM_msgitem_status_data_ok; ifstatemsg->resultcode.data = result; result = 0; } break; case DIDmsg_lnxreq_wlansniff: pr_debug("Received mlme wlansniff request\n"); result = prism2mgmt_wlansniff(wlandev, msg); break; case DIDmsg_lnxreq_autojoin: pr_debug("Received mlme autojoin request\n"); result = prism2mgmt_autojoin(wlandev, msg); break; case DIDmsg_lnxreq_commsquality:{ struct p80211msg_lnxreq_commsquality *qualmsg; pr_debug("Received commsquality request\n"); qualmsg = (struct p80211msg_lnxreq_commsquality *) msg; qualmsg->link.status = P80211ENUM_msgitem_status_data_ok; qualmsg->level.status = P80211ENUM_msgitem_status_data_ok; qualmsg->noise.status = P80211ENUM_msgitem_status_data_ok; qualmsg->link.data = le16_to_cpu(hw->qual.CQ_currBSS); qualmsg->level.data = le16_to_cpu(hw->qual.ASL_currBSS); qualmsg->noise.data = le16_to_cpu(hw->qual.ANL_currFC); qualmsg->txrate.data = hw->txrate; break; } default: printk(KERN_WARNING "Unknown mgmt request message 0x%08x", msg->msgcode); break; } return result; } /*---------------------------------------------------------------- * prism2sta_ifstate * * Interface state. This is the primary WLAN interface enable/disable * handler. Following the driver/load/deviceprobe sequence, this * function must be called with a state of "enable" before any other * commands will be accepted. * * Arguments: * wlandev wlan device structure * msgp ptr to msg buffer * * Returns: * A p80211 message resultcode value. * * Side effects: * * Call context: * process thread (usually) * interrupt ----------------------------------------------------------------*/ u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate) { hfa384x_t *hw = (hfa384x_t *) wlandev->priv; u32 result; result = P80211ENUM_resultcode_implementation_failure; pr_debug("Current MSD state(%d), requesting(%d)\n", wlandev->msdstate, ifstate); switch (ifstate) { case P80211ENUM_ifstate_fwload: switch (wlandev->msdstate) { case WLAN_MSD_HWPRESENT: wlandev->msdstate = WLAN_MSD_FWLOAD_PENDING; /* * Initialize the device+driver sufficiently * for firmware loading. */ result = hfa384x_drvr_start(hw); if (result) { printk(KERN_ERR "hfa384x_drvr_start() failed," "result=%d\n", (int)result); result = P80211ENUM_resultcode_implementation_failure; wlandev->msdstate = WLAN_MSD_HWPRESENT; break; } wlandev->msdstate = WLAN_MSD_FWLOAD; result = P80211ENUM_resultcode_success; break; case WLAN_MSD_FWLOAD: hfa384x_cmd_initialize(hw); result = P80211ENUM_resultcode_success; break; case WLAN_MSD_RUNNING: printk(KERN_WARNING "Cannot enter fwload state from enable state," "you must disable first.\n"); result = P80211ENUM_resultcode_invalid_parameters; break; case WLAN_MSD_HWFAIL: default: /* probe() had a problem or the msdstate contains * an unrecognized value, there's nothing we can do. */ result = P80211ENUM_resultcode_implementation_failure; break; } break; case P80211ENUM_ifstate_enable: switch (wlandev->msdstate) { case WLAN_MSD_HWPRESENT: case WLAN_MSD_FWLOAD: wlandev->msdstate = WLAN_MSD_RUNNING_PENDING; /* Initialize the device+driver for full * operation. Note that this might me an FWLOAD to * to RUNNING transition so we must not do a chip * or board level reset. Note that on failure, * the MSD state is set to HWPRESENT because we * can't make any assumptions about the state * of the hardware or a previous firmware load. */ result = hfa384x_drvr_start(hw); if (result) { printk(KERN_ERR "hfa384x_drvr_start() failed," "result=%d\n", (int)result); result = P80211ENUM_resultcode_implementation_failure; wlandev->msdstate = WLAN_MSD_HWPRESENT; break; } result = prism2sta_getcardinfo(wlandev); if (result) { printk(KERN_ERR "prism2sta_getcardinfo() failed," "result=%d\n", (int)result); result = P80211ENUM_resultcode_implementation_failure; hfa384x_drvr_stop(hw); wlandev->msdstate = WLAN_MSD_HWPRESENT; break; } result = prism2sta_globalsetup(wlandev); if (result) { printk(KERN_ERR "prism2sta_globalsetup() failed," "result=%d\n", (int)result); result = P80211ENUM_resultcode_implementation_failure; hfa384x_drvr_stop(hw); wlandev->msdstate = WLAN_MSD_HWPRESENT; break; } wlandev->msdstate = WLAN_MSD_RUNNING; hw->join_ap = 0; hw->join_retries = 60; result = P80211ENUM_resultcode_success; break; case WLAN_MSD_RUNNING: /* Do nothing, we're already in this state. */ result = P80211ENUM_resultcode_success; break; case WLAN_MSD_HWFAIL: default: /* probe() had a problem or the msdstate contains * an unrecognized value, there's nothing we can do. */ result = P80211ENUM_resultcode_implementation_failure; break; } break; case P80211ENUM_ifstate_disable: switch (wlandev->msdstate) { case WLAN_MSD_HWPRESENT: /* Do nothing, we're already in this state. */ result = P80211ENUM_resultcode_success; break; case WLAN_MSD_FWLOAD: case WLAN_MSD_RUNNING: wlandev->msdstate = WLAN_MSD_HWPRESENT_PENDING; /* * TODO: Shut down the MAC completely. Here a chip * or board level reset is probably called for. * After a "disable" _all_ results are lost, even * those from a fwload. */ if (!wlandev->hwremoved) netif_carrier_off(wlandev->netdev); hfa384x_drvr_stop(hw); wlandev->macmode = WLAN_MACMODE_NONE; wlandev->msdstate = WLAN_MSD_HWPRESENT; result = P80211ENUM_resultcode_success; break; case WLAN_MSD_HWFAIL: default: /* probe() had a problem or the msdstate contains * an unrecognized value, there's nothing we can do. */ result = P80211ENUM_resultcode_implementation_failure; break; } break; default: result = P80211ENUM_resultcode_invalid_parameters; break; } return result; } /*---------------------------------------------------------------- * prism2sta_getcardinfo * * Collect the NICID, firmware version and any other identifiers * we'd like to have in host-side data structures. * * Arguments: * wlandev wlan device structure * * Returns: * 0 success * >0 f/w reported error * <0 driver reported error * * Side effects: * * Call context: * Either. ----------------------------------------------------------------*/ static int prism2sta_getcardinfo(wlandevice_t *wlandev) { int result = 0; hfa384x_t *hw = (hfa384x_t *) wlandev->priv; u16 temp; u8 snum[HFA384x_RID_NICSERIALNUMBER_LEN]; char pstr[(HFA384x_RID_NICSERIALNUMBER_LEN * 4) + 1]; /* Collect version and compatibility info */ /* Some are critical, some are not */ /* NIC identity */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_NICIDENTITY, &hw->ident_nic, sizeof(hfa384x_compident_t)); if (result) { printk(KERN_ERR "Failed to retrieve NICIDENTITY\n"); goto failed; } /* get all the nic id fields in host byte order */ hw->ident_nic.id = le16_to_cpu(hw->ident_nic.id); hw->ident_nic.variant = le16_to_cpu(hw->ident_nic.variant); hw->ident_nic.major = le16_to_cpu(hw->ident_nic.major); hw->ident_nic.minor = le16_to_cpu(hw->ident_nic.minor); printk(KERN_INFO "ident: nic h/w: id=0x%02x %d.%d.%d\n", hw->ident_nic.id, hw->ident_nic.major, hw->ident_nic.minor, hw->ident_nic.variant); /* Primary f/w identity */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRIIDENTITY, &hw->ident_pri_fw, sizeof(hfa384x_compident_t)); if (result) { printk(KERN_ERR "Failed to retrieve PRIIDENTITY\n"); goto failed; } /* get all the private fw id fields in host byte order */ hw->ident_pri_fw.id = le16_to_cpu(hw->ident_pri_fw.id); hw->ident_pri_fw.variant = le16_to_cpu(hw->ident_pri_fw.variant); hw->ident_pri_fw.major = le16_to_cpu(hw->ident_pri_fw.major); hw->ident_pri_fw.minor = le16_to_cpu(hw->ident_pri_fw.minor); printk(KERN_INFO "ident: pri f/w: id=0x%02x %d.%d.%d\n", hw->ident_pri_fw.id, hw->ident_pri_fw.major, hw->ident_pri_fw.minor, hw->ident_pri_fw.variant); /* Station (Secondary?) f/w identity */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STAIDENTITY, &hw->ident_sta_fw, sizeof(hfa384x_compident_t)); if (result) { printk(KERN_ERR "Failed to retrieve STAIDENTITY\n"); goto failed; } if (hw->ident_nic.id < 0x8000) { printk(KERN_ERR "FATAL: Card is not an Intersil Prism2/2.5/3\n"); result = -1; goto failed; } /* get all the station fw id fields in host byte order */ hw->ident_sta_fw.id = le16_to_cpu(hw->ident_sta_fw.id); hw->ident_sta_fw.variant = le16_to_cpu(hw->ident_sta_fw.variant); hw->ident_sta_fw.major = le16_to_cpu(hw->ident_sta_fw.major); hw->ident_sta_fw.minor = le16_to_cpu(hw->ident_sta_fw.minor); /* strip out the 'special' variant bits */ hw->mm_mods = hw->ident_sta_fw.variant & (BIT(14) | BIT(15)); hw->ident_sta_fw.variant &= ~((u16) (BIT(14) | BIT(15))); if (hw->ident_sta_fw.id == 0x1f) { printk(KERN_INFO "ident: sta f/w: id=0x%02x %d.%d.%d\n", hw->ident_sta_fw.id, hw->ident_sta_fw.major, hw->ident_sta_fw.minor, hw->ident_sta_fw.variant); } else { printk(KERN_INFO "ident: ap f/w: id=0x%02x %d.%d.%d\n", hw->ident_sta_fw.id, hw->ident_sta_fw.major, hw->ident_sta_fw.minor, hw->ident_sta_fw.variant); printk(KERN_ERR "Unsupported Tertiary AP firmeare loaded!\n"); goto failed; } /* Compatibility range, Modem supplier */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_MFISUPRANGE, &hw->cap_sup_mfi, sizeof(hfa384x_caplevel_t)); if (result) { printk(KERN_ERR "Failed to retrieve MFISUPRANGE\n"); goto failed; } /* get all the Compatibility range, modem interface supplier fields in byte order */ hw->cap_sup_mfi.role = le16_to_cpu(hw->cap_sup_mfi.role); hw->cap_sup_mfi.id = le16_to_cpu(hw->cap_sup_mfi.id); hw->cap_sup_mfi.variant = le16_to_cpu(hw->cap_sup_mfi.variant); hw->cap_sup_mfi.bottom = le16_to_cpu(hw->cap_sup_mfi.bottom); hw->cap_sup_mfi.top = le16_to_cpu(hw->cap_sup_mfi.top); printk(KERN_INFO "MFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", hw->cap_sup_mfi.role, hw->cap_sup_mfi.id, hw->cap_sup_mfi.variant, hw->cap_sup_mfi.bottom, hw->cap_sup_mfi.top); /* Compatibility range, Controller supplier */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CFISUPRANGE, &hw->cap_sup_cfi, sizeof(hfa384x_caplevel_t)); if (result) { printk(KERN_ERR "Failed to retrieve CFISUPRANGE\n"); goto failed; } /* get all the Compatibility range, controller interface supplier fields in byte order */ hw->cap_sup_cfi.role = le16_to_cpu(hw->cap_sup_cfi.role); hw->cap_sup_cfi.id = le16_to_cpu(hw->cap_sup_cfi.id); hw->cap_sup_cfi.variant = le16_to_cpu(hw->cap_sup_cfi.variant); hw->cap_sup_cfi.bottom = le16_to_cpu(hw->cap_sup_cfi.bottom); hw->cap_sup_cfi.top = le16_to_cpu(hw->cap_sup_cfi.top); printk(KERN_INFO "CFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", hw->cap_sup_cfi.role, hw->cap_sup_cfi.id, hw->cap_sup_cfi.variant, hw->cap_sup_cfi.bottom, hw->cap_sup_cfi.top); /* Compatibility range, Primary f/w supplier */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRISUPRANGE, &hw->cap_sup_pri, sizeof(hfa384x_caplevel_t)); if (result) { printk(KERN_ERR "Failed to retrieve PRISUPRANGE\n"); goto failed; } /* get all the Compatibility range, primary firmware supplier fields in byte order */ hw->cap_sup_pri.role = le16_to_cpu(hw->cap_sup_pri.role); hw->cap_sup_pri.id = le16_to_cpu(hw->cap_sup_pri.id); hw->cap_sup_pri.variant = le16_to_cpu(hw->cap_sup_pri.variant); hw->cap_sup_pri.bottom = le16_to_cpu(hw->cap_sup_pri.bottom); hw->cap_sup_pri.top = le16_to_cpu(hw->cap_sup_pri.top); printk(KERN_INFO "PRI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", hw->cap_sup_pri.role, hw->cap_sup_pri.id, hw->cap_sup_pri.variant, hw->cap_sup_pri.bottom, hw->cap_sup_pri.top); /* Compatibility range, Station f/w supplier */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STASUPRANGE, &hw->cap_sup_sta, sizeof(hfa384x_caplevel_t)); if (result) { printk(KERN_ERR "Failed to retrieve STASUPRANGE\n"); goto failed; } /* get all the Compatibility range, station firmware supplier fields in byte order */ hw->cap_sup_sta.role = le16_to_cpu(hw->cap_sup_sta.role); hw->cap_sup_sta.id = le16_to_cpu(hw->cap_sup_sta.id); hw->cap_sup_sta.variant = le16_to_cpu(hw->cap_sup_sta.variant); hw->cap_sup_sta.bottom = le16_to_cpu(hw->cap_sup_sta.bottom); hw->cap_sup_sta.top = le16_to_cpu(hw->cap_sup_sta.top); if (hw->cap_sup_sta.id == 0x04) { printk(KERN_INFO "STA:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", hw->cap_sup_sta.role, hw->cap_sup_sta.id, hw->cap_sup_sta.variant, hw->cap_sup_sta.bottom, hw->cap_sup_sta.top); } else { printk(KERN_INFO "AP:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", hw->cap_sup_sta.role, hw->cap_sup_sta.id, hw->cap_sup_sta.variant, hw->cap_sup_sta.bottom, hw->cap_sup_sta.top); } /* Compatibility range, primary f/w actor, CFI supplier */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRI_CFIACTRANGES, &hw->cap_act_pri_cfi, sizeof(hfa384x_caplevel_t)); if (result) { printk(KERN_ERR "Failed to retrieve PRI_CFIACTRANGES\n"); goto failed; } /* get all the Compatibility range, primary f/w actor, CFI supplier fields in byte order */ hw->cap_act_pri_cfi.role = le16_to_cpu(hw->cap_act_pri_cfi.role); hw->cap_act_pri_cfi.id = le16_to_cpu(hw->cap_act_pri_cfi.id); hw->cap_act_pri_cfi.variant = le16_to_cpu(hw->cap_act_pri_cfi.variant); hw->cap_act_pri_cfi.bottom = le16_to_cpu(hw->cap_act_pri_cfi.bottom); hw->cap_act_pri_cfi.top = le16_to_cpu(hw->cap_act_pri_cfi.top); printk(KERN_INFO "PRI-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", hw->cap_act_pri_cfi.role, hw->cap_act_pri_cfi.id, hw->cap_act_pri_cfi.variant, hw->cap_act_pri_cfi.bottom, hw->cap_act_pri_cfi.top); /* Compatibility range, sta f/w actor, CFI supplier */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STA_CFIACTRANGES, &hw->cap_act_sta_cfi, sizeof(hfa384x_caplevel_t)); if (result) { printk(KERN_ERR "Failed to retrieve STA_CFIACTRANGES\n"); goto failed; } /* get all the Compatibility range, station f/w actor, CFI supplier fields in byte order */ hw->cap_act_sta_cfi.role = le16_to_cpu(hw->cap_act_sta_cfi.role); hw->cap_act_sta_cfi.id = le16_to_cpu(hw->cap_act_sta_cfi.id); hw->cap_act_sta_cfi.variant = le16_to_cpu(hw->cap_act_sta_cfi.variant); hw->cap_act_sta_cfi.bottom = le16_to_cpu(hw->cap_act_sta_cfi.bottom); hw->cap_act_sta_cfi.top = le16_to_cpu(hw->cap_act_sta_cfi.top); printk(KERN_INFO "STA-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", hw->cap_act_sta_cfi.role, hw->cap_act_sta_cfi.id, hw->cap_act_sta_cfi.variant, hw->cap_act_sta_cfi.bottom, hw->cap_act_sta_cfi.top); /* Compatibility range, sta f/w actor, MFI supplier */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STA_MFIACTRANGES, &hw->cap_act_sta_mfi, sizeof(hfa384x_caplevel_t)); if (result) { printk(KERN_ERR "Failed to retrieve STA_MFIACTRANGES\n"); goto failed; } /* get all the Compatibility range, station f/w actor, MFI supplier fields in byte order */ hw->cap_act_sta_mfi.role = le16_to_cpu(hw->cap_act_sta_mfi.role); hw->cap_act_sta_mfi.id = le16_to_cpu(hw->cap_act_sta_mfi.id); hw->cap_act_sta_mfi.variant = le16_to_cpu(hw->cap_act_sta_mfi.variant); hw->cap_act_sta_mfi.bottom = le16_to_cpu(hw->cap_act_sta_mfi.bottom); hw->cap_act_sta_mfi.top = le16_to_cpu(hw->cap_act_sta_mfi.top); printk(KERN_INFO "STA-MFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n", hw->cap_act_sta_mfi.role, hw->cap_act_sta_mfi.id, hw->cap_act_sta_mfi.variant, hw->cap_act_sta_mfi.bottom, hw->cap_act_sta_mfi.top); /* Serial Number */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_NICSERIALNUMBER, snum, HFA384x_RID_NICSERIALNUMBER_LEN); if (!result) { wlan_mkprintstr(snum, HFA384x_RID_NICSERIALNUMBER_LEN, pstr, sizeof(pstr)); printk(KERN_INFO "Prism2 card SN: %s\n", pstr); } else { printk(KERN_ERR "Failed to retrieve Prism2 Card SN\n"); goto failed; } /* Collect the MAC address */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CNFOWNMACADDR, wlandev->netdev->dev_addr, ETH_ALEN); if (result != 0) { printk(KERN_ERR "Failed to retrieve mac address\n"); goto failed; } /* short preamble is always implemented */ wlandev->nsdcaps |= P80211_NSDCAP_SHORT_PREAMBLE; /* find out if hardware wep is implemented */ hfa384x_drvr_getconfig16(hw, HFA384x_RID_PRIVACYOPTIMP, &temp); if (temp) wlandev->nsdcaps |= P80211_NSDCAP_HARDWAREWEP; /* get the dBm Scaling constant */ hfa384x_drvr_getconfig16(hw, HFA384x_RID_CNFDBMADJUST, &temp); hw->dbmadjust = temp; /* Only enable scan by default on newer firmware */ if (HFA384x_FIRMWARE_VERSION(hw->ident_sta_fw.major, hw->ident_sta_fw.minor, hw->ident_sta_fw.variant) < HFA384x_FIRMWARE_VERSION(1, 5, 5)) { wlandev->nsdcaps |= P80211_NSDCAP_NOSCAN; } /* TODO: Set any internally managed config items */ goto done; failed: printk(KERN_ERR "Failed, result=%d\n", result); done: return result; } /*---------------------------------------------------------------- * prism2sta_globalsetup * * Set any global RIDs that we want to set at device activation. * * Arguments: * wlandev wlan device structure * * Returns: * 0 success * >0 f/w reported error * <0 driver reported error * * Side effects: * * Call context: * process thread ----------------------------------------------------------------*/ static int prism2sta_globalsetup(wlandevice_t *wlandev) { hfa384x_t *hw = (hfa384x_t *) wlandev->priv; /* Set the maximum frame size */ return hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFMAXDATALEN, WLAN_DATA_MAXLEN); } static int prism2sta_setmulticast(wlandevice_t *wlandev, netdevice_t *dev) { int result = 0; hfa384x_t *hw = (hfa384x_t *) wlandev->priv; u16 promisc; /* If we're not ready, what's the point? */ if (hw->state != HFA384x_STATE_RUNNING) goto exit; if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) promisc = P80211ENUM_truth_true; else promisc = P80211ENUM_truth_false; result = hfa384x_drvr_setconfig16_async(hw, HFA384x_RID_PROMISCMODE, promisc); exit: return result; } /*---------------------------------------------------------------- * prism2sta_inf_handover * * Handles the receipt of a Handover info frame. Should only be present * in APs only. * * Arguments: * wlandev wlan device structure * inf ptr to info frame (contents in hfa384x order) * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void prism2sta_inf_handover(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { pr_debug("received infoframe:HANDOVER (unhandled)\n"); return; } /*---------------------------------------------------------------- * prism2sta_inf_tallies * * Handles the receipt of a CommTallies info frame. * * Arguments: * wlandev wlan device structure * inf ptr to info frame (contents in hfa384x order) * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void prism2sta_inf_tallies(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { hfa384x_t *hw = (hfa384x_t *) wlandev->priv; u16 *src16; u32 *dst; u32 *src32; int i; int cnt; /* ** Determine if these are 16-bit or 32-bit tallies, based on the ** record length of the info record. */ cnt = sizeof(hfa384x_CommTallies32_t) / sizeof(u32); if (inf->framelen > 22) { dst = (u32 *) &hw->tallies; src32 = (u32 *) &inf->info.commtallies32; for (i = 0; i < cnt; i++, dst++, src32++) *dst += le32_to_cpu(*src32); } else { dst = (u32 *) &hw->tallies; src16 = (u16 *) &inf->info.commtallies16; for (i = 0; i < cnt; i++, dst++, src16++) *dst += le16_to_cpu(*src16); } return; } /*---------------------------------------------------------------- * prism2sta_inf_scanresults * * Handles the receipt of a Scan Results info frame. * * Arguments: * wlandev wlan device structure * inf ptr to info frame (contents in hfa384x order) * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void prism2sta_inf_scanresults(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { hfa384x_t *hw = (hfa384x_t *) wlandev->priv; int nbss; hfa384x_ScanResult_t *sr = &(inf->info.scanresult); int i; hfa384x_JoinRequest_data_t joinreq; int result; /* Get the number of results, first in bytes, then in results */ nbss = (inf->framelen * sizeof(u16)) - sizeof(inf->infotype) - sizeof(inf->info.scanresult.scanreason); nbss /= sizeof(hfa384x_ScanResultSub_t); /* Print em */ pr_debug("rx scanresults, reason=%d, nbss=%d:\n", inf->info.scanresult.scanreason, nbss); for (i = 0; i < nbss; i++) { pr_debug("chid=%d anl=%d sl=%d bcnint=%d\n", sr->result[i].chid, sr->result[i].anl, sr->result[i].sl, sr->result[i].bcnint); pr_debug(" capinfo=0x%04x proberesp_rate=%d\n", sr->result[i].capinfo, sr->result[i].proberesp_rate); } /* issue a join request */ joinreq.channel = sr->result[0].chid; memcpy(joinreq.bssid, sr->result[0].bssid, WLAN_BSSID_LEN); result = hfa384x_drvr_setconfig(hw, HFA384x_RID_JOINREQUEST, &joinreq, HFA384x_RID_JOINREQUEST_LEN); if (result) { printk(KERN_ERR "setconfig(joinreq) failed, result=%d\n", result); } return; } /*---------------------------------------------------------------- * prism2sta_inf_hostscanresults * * Handles the receipt of a Scan Results info frame. * * Arguments: * wlandev wlan device structure * inf ptr to info frame (contents in hfa384x order) * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void prism2sta_inf_hostscanresults(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { hfa384x_t *hw = (hfa384x_t *) wlandev->priv; int nbss; nbss = (inf->framelen - 3) / 32; pr_debug("Received %d hostscan results\n", nbss); if (nbss > 32) nbss = 32; kfree(hw->scanresults); hw->scanresults = kmalloc(sizeof(hfa384x_InfFrame_t), GFP_ATOMIC); memcpy(hw->scanresults, inf, sizeof(hfa384x_InfFrame_t)); if (nbss == 0) nbss = -1; /* Notify/wake the sleeping caller. */ hw->scanflag = nbss; wake_up_interruptible(&hw->cmdq); }; /*---------------------------------------------------------------- * prism2sta_inf_chinforesults * * Handles the receipt of a Channel Info Results info frame. * * Arguments: * wlandev wlan device structure * inf ptr to info frame (contents in hfa384x order) * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void prism2sta_inf_chinforesults(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { hfa384x_t *hw = (hfa384x_t *) wlandev->priv; unsigned int i, n; hw->channel_info.results.scanchannels = le16_to_cpu(inf->info.chinforesult.scanchannels); for (i = 0, n = 0; i < HFA384x_CHINFORESULT_MAX; i++) { if (hw->channel_info.results.scanchannels & (1 << i)) { int channel = le16_to_cpu(inf->info.chinforesult.result[n].chid) - 1; hfa384x_ChInfoResultSub_t *chinforesult = &hw->channel_info.results.result[channel]; chinforesult->chid = channel; chinforesult->anl = le16_to_cpu(inf->info.chinforesult.result[n].anl); chinforesult->pnl = le16_to_cpu(inf->info.chinforesult.result[n].pnl); chinforesult->active = le16_to_cpu(inf->info.chinforesult.result[n]. active); pr_debug ("chinfo: channel %d, %s level (avg/peak)=%d/%d dB, pcf %d\n", channel + 1, chinforesult-> active & HFA384x_CHINFORESULT_BSSACTIVE ? "signal" : "noise", chinforesult->anl, chinforesult->pnl, chinforesult-> active & HFA384x_CHINFORESULT_PCFACTIVE ? 1 : 0); n++; } } atomic_set(&hw->channel_info.done, 2); hw->channel_info.count = n; return; } void prism2sta_processing_defer(struct work_struct *data) { hfa384x_t *hw = container_of(data, struct hfa384x, link_bh); wlandevice_t *wlandev = hw->wlandev; hfa384x_bytestr32_t ssid; int result; /* First let's process the auth frames */ { struct sk_buff *skb; hfa384x_InfFrame_t *inf; while ((skb = skb_dequeue(&hw->authq))) { inf = (hfa384x_InfFrame_t *) skb->data; prism2sta_inf_authreq_defer(wlandev, inf); } } /* Now let's handle the linkstatus stuff */ if (hw->link_status == hw->link_status_new) goto failed; hw->link_status = hw->link_status_new; switch (hw->link_status) { case HFA384x_LINK_NOTCONNECTED: /* I'm currently assuming that this is the initial link * state. It should only be possible immediately * following an Enable command. * Response: * Block Transmits, Ignore receives of data frames */ netif_carrier_off(wlandev->netdev); printk(KERN_INFO "linkstatus=NOTCONNECTED (unhandled)\n"); break; case HFA384x_LINK_CONNECTED: /* This one indicates a successful scan/join/auth/assoc. * When we have the full MLME complement, this event will * signify successful completion of both mlme_authenticate * and mlme_associate. State management will get a little * ugly here. * Response: * Indicate authentication and/or association * Enable Transmits, Receives and pass up data frames */ netif_carrier_on(wlandev->netdev); /* If we are joining a specific AP, set our * state and reset retries */ if (hw->join_ap == 1) hw->join_ap = 2; hw->join_retries = 60; /* Don't call this in monitor mode */ if (wlandev->netdev->type == ARPHRD_ETHER) { u16 portstatus; printk(KERN_INFO "linkstatus=CONNECTED\n"); /* For non-usb devices, we can use the sync versions */ /* Collect the BSSID, and set state to allow tx */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CURRENTBSSID, wlandev->bssid, WLAN_BSSID_LEN); if (result) { pr_debug ("getconfig(0x%02x) failed, result = %d\n", HFA384x_RID_CURRENTBSSID, result); goto failed; } result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CURRENTSSID, &ssid, sizeof(ssid)); if (result) { pr_debug ("getconfig(0x%02x) failed, result = %d\n", HFA384x_RID_CURRENTSSID, result); goto failed; } prism2mgmt_bytestr2pstr((hfa384x_bytestr_t *) &ssid, (p80211pstrd_t *) & wlandev->ssid); /* Collect the port status */ result = hfa384x_drvr_getconfig16(hw, HFA384x_RID_PORTSTATUS, &portstatus); if (result) { pr_debug ("getconfig(0x%02x) failed, result = %d\n", HFA384x_RID_PORTSTATUS, result); goto failed; } wlandev->macmode = (portstatus == HFA384x_PSTATUS_CONN_IBSS) ? WLAN_MACMODE_IBSS_STA : WLAN_MACMODE_ESS_STA; /* signal back up to cfg80211 layer */ prism2_connect_result(wlandev, P80211ENUM_truth_false); /* Get the ball rolling on the comms quality stuff */ prism2sta_commsqual_defer(&hw->commsqual_bh); } break; case HFA384x_LINK_DISCONNECTED: /* This one indicates that our association is gone. We've * lost connection with the AP and/or been disassociated. * This indicates that the MAC has completely cleared it's * associated state. We * should send a deauth indication * (implying disassoc) up * to the MLME. * Response: * Indicate Deauthentication * Block Transmits, Ignore receives of data frames */ if (wlandev->netdev->type == ARPHRD_ETHER) printk(KERN_INFO "linkstatus=DISCONNECTED (unhandled)\n"); wlandev->macmode = WLAN_MACMODE_NONE; netif_carrier_off(wlandev->netdev); /* signal back up to cfg80211 layer */ prism2_disconnected(wlandev); break; case HFA384x_LINK_AP_CHANGE: /* This one indicates that the MAC has decided to and * successfully completed a change to another AP. We * should probably implement a reassociation indication * in response to this one. I'm thinking that the the * p80211 layer needs to be notified in case of * buffering/queueing issues. User mode also needs to be * notified so that any BSS dependent elements can be * updated. * associated state. We * should send a deauth indication * (implying disassoc) up * to the MLME. * Response: * Indicate Reassociation * Enable Transmits, Receives and pass up data frames */ printk(KERN_INFO "linkstatus=AP_CHANGE\n"); result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CURRENTBSSID, wlandev->bssid, WLAN_BSSID_LEN); if (result) { pr_debug("getconfig(0x%02x) failed, result = %d\n", HFA384x_RID_CURRENTBSSID, result); goto failed; } result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CURRENTSSID, &ssid, sizeof(ssid)); if (result) { pr_debug("getconfig(0x%02x) failed, result = %d\n", HFA384x_RID_CURRENTSSID, result); goto failed; } prism2mgmt_bytestr2pstr((hfa384x_bytestr_t *) &ssid, (p80211pstrd_t *) &wlandev->ssid); hw->link_status = HFA384x_LINK_CONNECTED; netif_carrier_on(wlandev->netdev); /* signal back up to cfg80211 layer */ prism2_roamed(wlandev); break; case HFA384x_LINK_AP_OUTOFRANGE: /* This one indicates that the MAC has decided that the * AP is out of range, but hasn't found a better candidate * so the MAC maintains its "associated" state in case * we get back in range. We should block transmits and * receives in this state. Do we need an indication here? * Probably not since a polling user-mode element would * get this status from from p2PortStatus(FD40). What about * p80211? * Response: * Block Transmits, Ignore receives of data frames */ printk(KERN_INFO "linkstatus=AP_OUTOFRANGE (unhandled)\n"); netif_carrier_off(wlandev->netdev); break; case HFA384x_LINK_AP_INRANGE: /* This one indicates that the MAC has decided that the * AP is back in range. We continue working with our * existing association. * Response: * Enable Transmits, Receives and pass up data frames */ printk(KERN_INFO "linkstatus=AP_INRANGE\n"); hw->link_status = HFA384x_LINK_CONNECTED; netif_carrier_on(wlandev->netdev); break; case HFA384x_LINK_ASSOCFAIL: /* This one is actually a peer to CONNECTED. We've * requested a join for a given SSID and optionally BSSID. * We can use this one to indicate authentication and * association failures. The trick is going to be * 1) identifying the failure, and 2) state management. * Response: * Disable Transmits, Ignore receives of data frames */ if (hw->join_ap && --hw->join_retries > 0) { hfa384x_JoinRequest_data_t joinreq; joinreq = hw->joinreq; /* Send the join request */ hfa384x_drvr_setconfig(hw, HFA384x_RID_JOINREQUEST, &joinreq, HFA384x_RID_JOINREQUEST_LEN); printk(KERN_INFO "linkstatus=ASSOCFAIL (re-submitting join)\n"); } else { printk(KERN_INFO "linkstatus=ASSOCFAIL (unhandled)\n"); } netif_carrier_off(wlandev->netdev); /* signal back up to cfg80211 layer */ prism2_connect_result(wlandev, P80211ENUM_truth_true); break; default: /* This is bad, IO port problems? */ printk(KERN_WARNING "unknown linkstatus=0x%02x\n", hw->link_status); goto failed; break; } wlandev->linkstatus = (hw->link_status == HFA384x_LINK_CONNECTED); failed: return; } /*---------------------------------------------------------------- * prism2sta_inf_linkstatus * * Handles the receipt of a Link Status info frame. * * Arguments: * wlandev wlan device structure * inf ptr to info frame (contents in hfa384x order) * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void prism2sta_inf_linkstatus(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { hfa384x_t *hw = (hfa384x_t *) wlandev->priv; hw->link_status_new = le16_to_cpu(inf->info.linkstatus.linkstatus); schedule_work(&hw->link_bh); return; } /*---------------------------------------------------------------- * prism2sta_inf_assocstatus * * Handles the receipt of an Association Status info frame. Should * be present in APs only. * * Arguments: * wlandev wlan device structure * inf ptr to info frame (contents in hfa384x order) * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void prism2sta_inf_assocstatus(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { hfa384x_t *hw = (hfa384x_t *) wlandev->priv; hfa384x_AssocStatus_t rec; int i; memcpy(&rec, &inf->info.assocstatus, sizeof(rec)); rec.assocstatus = le16_to_cpu(rec.assocstatus); rec.reason = le16_to_cpu(rec.reason); /* ** Find the address in the list of authenticated stations. ** If it wasn't found, then this address has not been previously ** authenticated and something weird has happened if this is ** anything other than an "authentication failed" message. ** If the address was found, then set the "associated" flag for ** that station, based on whether the station is associating or ** losing its association. Something weird has also happened ** if we find the address in the list of authenticated stations ** but we are getting an "authentication failed" message. */ for (i = 0; i < hw->authlist.cnt; i++) if (memcmp(rec.sta_addr, hw->authlist.addr[i], ETH_ALEN) == 0) break; if (i >= hw->authlist.cnt) { if (rec.assocstatus != HFA384x_ASSOCSTATUS_AUTHFAIL) printk(KERN_WARNING "assocstatus info frame received for non-authenticated station.\n"); } else { hw->authlist.assoc[i] = (rec.assocstatus == HFA384x_ASSOCSTATUS_STAASSOC || rec.assocstatus == HFA384x_ASSOCSTATUS_REASSOC); if (rec.assocstatus == HFA384x_ASSOCSTATUS_AUTHFAIL) printk(KERN_WARNING "authfail assocstatus info frame received for authenticated station.\n"); } return; } /*---------------------------------------------------------------- * prism2sta_inf_authreq * * Handles the receipt of an Authentication Request info frame. Should * be present in APs only. * * Arguments: * wlandev wlan device structure * inf ptr to info frame (contents in hfa384x order) * * Returns: * nothing * * Side effects: * * Call context: * interrupt * ----------------------------------------------------------------*/ static void prism2sta_inf_authreq(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { hfa384x_t *hw = (hfa384x_t *) wlandev->priv; struct sk_buff *skb; skb = dev_alloc_skb(sizeof(*inf)); if (skb) { skb_put(skb, sizeof(*inf)); memcpy(skb->data, inf, sizeof(*inf)); skb_queue_tail(&hw->authq, skb); schedule_work(&hw->link_bh); } } static void prism2sta_inf_authreq_defer(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { hfa384x_t *hw = (hfa384x_t *) wlandev->priv; hfa384x_authenticateStation_data_t rec; int i, added, result, cnt; u8 *addr; /* ** Build the AuthenticateStation record. Initialize it for denying ** authentication. */ memcpy(rec.address, inf->info.authreq.sta_addr, ETH_ALEN); rec.status = P80211ENUM_status_unspec_failure; /* ** Authenticate based on the access mode. */ switch (hw->accessmode) { case WLAN_ACCESS_NONE: /* ** Deny all new authentications. However, if a station ** is ALREADY authenticated, then accept it. */ for (i = 0; i < hw->authlist.cnt; i++) if (memcmp(rec.address, hw->authlist.addr[i], ETH_ALEN) == 0) { rec.status = P80211ENUM_status_successful; break; } break; case WLAN_ACCESS_ALL: /* ** Allow all authentications. */ rec.status = P80211ENUM_status_successful; break; case WLAN_ACCESS_ALLOW: /* ** Only allow the authentication if the MAC address ** is in the list of allowed addresses. ** ** Since this is the interrupt handler, we may be here ** while the access list is in the middle of being ** updated. Choose the list which is currently okay. ** See "prism2mib_priv_accessallow()" for details. */ if (hw->allow.modify == 0) { cnt = hw->allow.cnt; addr = hw->allow.addr[0]; } else { cnt = hw->allow.cnt1; addr = hw->allow.addr1[0]; } for (i = 0; i < cnt; i++, addr += ETH_ALEN) if (memcmp(rec.address, addr, ETH_ALEN) == 0) { rec.status = P80211ENUM_status_successful; break; } break; case WLAN_ACCESS_DENY: /* ** Allow the authentication UNLESS the MAC address is ** in the list of denied addresses. ** ** Since this is the interrupt handler, we may be here ** while the access list is in the middle of being ** updated. Choose the list which is currently okay. ** See "prism2mib_priv_accessdeny()" for details. */ if (hw->deny.modify == 0) { cnt = hw->deny.cnt; addr = hw->deny.addr[0]; } else { cnt = hw->deny.cnt1; addr = hw->deny.addr1[0]; } rec.status = P80211ENUM_status_successful; for (i = 0; i < cnt; i++, addr += ETH_ALEN) if (memcmp(rec.address, addr, ETH_ALEN) == 0) { rec.status = P80211ENUM_status_unspec_failure; break; } break; } /* ** If the authentication is okay, then add the MAC address to the ** list of authenticated stations. Don't add the address if it ** is already in the list. (802.11b does not seem to disallow ** a station from issuing an authentication request when the ** station is already authenticated. Does this sort of thing ** ever happen? We might as well do the check just in case.) */ added = 0; if (rec.status == P80211ENUM_status_successful) { for (i = 0; i < hw->authlist.cnt; i++) if (memcmp(rec.address, hw->authlist.addr[i], ETH_ALEN) == 0) break; if (i >= hw->authlist.cnt) { if (hw->authlist.cnt >= WLAN_AUTH_MAX) { rec.status = P80211ENUM_status_ap_full; } else { memcpy(hw->authlist.addr[hw->authlist.cnt], rec.address, ETH_ALEN); hw->authlist.cnt++; added = 1; } } } /* ** Send back the results of the authentication. If this doesn't work, ** then make sure to remove the address from the authenticated list if ** it was added. */ rec.status = cpu_to_le16(rec.status); rec.algorithm = inf->info.authreq.algorithm; result = hfa384x_drvr_setconfig(hw, HFA384x_RID_AUTHENTICATESTA, &rec, sizeof(rec)); if (result) { if (added) hw->authlist.cnt--; printk(KERN_ERR "setconfig(authenticatestation) failed, result=%d\n", result); } return; } /*---------------------------------------------------------------- * prism2sta_inf_psusercnt * * Handles the receipt of a PowerSaveUserCount info frame. Should * be present in APs only. * * Arguments: * wlandev wlan device structure * inf ptr to info frame (contents in hfa384x order) * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void prism2sta_inf_psusercnt(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { hfa384x_t *hw = (hfa384x_t *) wlandev->priv; hw->psusercount = le16_to_cpu(inf->info.psusercnt.usercnt); return; } /*---------------------------------------------------------------- * prism2sta_ev_info * * Handles the Info event. * * Arguments: * wlandev wlan device structure * inf ptr to a generic info frame * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ void prism2sta_ev_info(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { inf->infotype = le16_to_cpu(inf->infotype); /* Dispatch */ switch (inf->infotype) { case HFA384x_IT_HANDOVERADDR: prism2sta_inf_handover(wlandev, inf); break; case HFA384x_IT_COMMTALLIES: prism2sta_inf_tallies(wlandev, inf); break; case HFA384x_IT_HOSTSCANRESULTS: prism2sta_inf_hostscanresults(wlandev, inf); break; case HFA384x_IT_SCANRESULTS: prism2sta_inf_scanresults(wlandev, inf); break; case HFA384x_IT_CHINFORESULTS: prism2sta_inf_chinforesults(wlandev, inf); break; case HFA384x_IT_LINKSTATUS: prism2sta_inf_linkstatus(wlandev, inf); break; case HFA384x_IT_ASSOCSTATUS: prism2sta_inf_assocstatus(wlandev, inf); break; case HFA384x_IT_AUTHREQ: prism2sta_inf_authreq(wlandev, inf); break; case HFA384x_IT_PSUSERCNT: prism2sta_inf_psusercnt(wlandev, inf); break; case HFA384x_IT_KEYIDCHANGED: printk(KERN_WARNING "Unhandled IT_KEYIDCHANGED\n"); break; case HFA384x_IT_ASSOCREQ: printk(KERN_WARNING "Unhandled IT_ASSOCREQ\n"); break; case HFA384x_IT_MICFAILURE: printk(KERN_WARNING "Unhandled IT_MICFAILURE\n"); break; default: printk(KERN_WARNING "Unknown info type=0x%02x\n", inf->infotype); break; } return; } /*---------------------------------------------------------------- * prism2sta_ev_txexc * * Handles the TxExc event. A Transmit Exception event indicates * that the MAC's TX process was unsuccessful - so the packet did * not get transmitted. * * Arguments: * wlandev wlan device structure * status tx frame status word * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ void prism2sta_ev_txexc(wlandevice_t *wlandev, u16 status) { pr_debug("TxExc status=0x%x.\n", status); return; } /*---------------------------------------------------------------- * prism2sta_ev_tx * * Handles the Tx event. * * Arguments: * wlandev wlan device structure * status tx frame status word * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ void prism2sta_ev_tx(wlandevice_t *wlandev, u16 status) { pr_debug("Tx Complete, status=0x%04x\n", status); /* update linux network stats */ wlandev->linux_stats.tx_packets++; return; } /*---------------------------------------------------------------- * prism2sta_ev_rx * * Handles the Rx event. * * Arguments: * wlandev wlan device structure * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ void prism2sta_ev_rx(wlandevice_t *wlandev, struct sk_buff *skb) { p80211netdev_rx(wlandev, skb); return; } /*---------------------------------------------------------------- * prism2sta_ev_alloc * * Handles the Alloc event. * * Arguments: * wlandev wlan device structure * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ void prism2sta_ev_alloc(wlandevice_t *wlandev) { netif_wake_queue(wlandev->netdev); return; } /*---------------------------------------------------------------- * create_wlan * * Called at module init time. This creates the wlandevice_t structure * and initializes it with relevant bits. * * Arguments: * none * * Returns: * the created wlandevice_t structure. * * Side effects: * also allocates the priv/hw structures. * * Call context: * process thread * ----------------------------------------------------------------*/ static wlandevice_t *create_wlan(void) { wlandevice_t *wlandev = NULL; hfa384x_t *hw = NULL; /* Alloc our structures */ wlandev = kmalloc(sizeof(wlandevice_t), GFP_KERNEL); hw = kmalloc(sizeof(hfa384x_t), GFP_KERNEL); if (!wlandev || !hw) { printk(KERN_ERR "%s: Memory allocation failure.\n", dev_info); kfree(wlandev); kfree(hw); return NULL; } /* Clear all the structs */ memset(wlandev, 0, sizeof(wlandevice_t)); memset(hw, 0, sizeof(hfa384x_t)); /* Initialize the network device object. */ wlandev->nsdname = dev_info; wlandev->msdstate = WLAN_MSD_HWPRESENT_PENDING; wlandev->priv = hw; wlandev->open = prism2sta_open; wlandev->close = prism2sta_close; wlandev->reset = prism2sta_reset; wlandev->txframe = prism2sta_txframe; wlandev->mlmerequest = prism2sta_mlmerequest; wlandev->set_multicast_list = prism2sta_setmulticast; wlandev->tx_timeout = hfa384x_tx_timeout; wlandev->nsdcaps = P80211_NSDCAP_HWFRAGMENT | P80211_NSDCAP_AUTOJOIN; /* Initialize the device private data structure. */ hw->dot11_desired_bss_type = 1; return wlandev; } void prism2sta_commsqual_defer(struct work_struct *data) { hfa384x_t *hw = container_of(data, struct hfa384x, commsqual_bh); wlandevice_t *wlandev = hw->wlandev; hfa384x_bytestr32_t ssid; struct p80211msg_dot11req_mibget msg; p80211item_uint32_t *mibitem = (p80211item_uint32_t *) &msg.mibattribute.data; int result = 0; if (hw->wlandev->hwremoved) goto done; /* we don't care if we're in AP mode */ if ((wlandev->macmode == WLAN_MACMODE_NONE) || (wlandev->macmode == WLAN_MACMODE_ESS_AP)) { goto done; } /* It only makes sense to poll these in non-IBSS */ if (wlandev->macmode != WLAN_MACMODE_IBSS_STA) { result = hfa384x_drvr_getconfig( hw, HFA384x_RID_DBMCOMMSQUALITY, &hw->qual, HFA384x_RID_DBMCOMMSQUALITY_LEN); if (result) { printk(KERN_ERR "error fetching commsqual\n"); goto done; } pr_debug("commsqual %d %d %d\n", le16_to_cpu(hw->qual.CQ_currBSS), le16_to_cpu(hw->qual.ASL_currBSS), le16_to_cpu(hw->qual.ANL_currFC)); } /* Get the signal rate */ msg.msgcode = DIDmsg_dot11req_mibget; mibitem->did = DIDmib_p2_p2MAC_p2CurrentTxRate; result = p80211req_dorequest(wlandev, (u8 *) &msg); if (result) { pr_debug("get signal rate failed, result = %d\n", result); goto done; } switch (mibitem->data) { case HFA384x_RATEBIT_1: hw->txrate = 10; break; case HFA384x_RATEBIT_2: hw->txrate = 20; break; case HFA384x_RATEBIT_5dot5: hw->txrate = 55; break; case HFA384x_RATEBIT_11: hw->txrate = 110; break; default: pr_debug("Bad ratebit (%d)\n", mibitem->data); } /* Lastly, we need to make sure the BSSID didn't change on us */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CURRENTBSSID, wlandev->bssid, WLAN_BSSID_LEN); if (result) { pr_debug("getconfig(0x%02x) failed, result = %d\n", HFA384x_RID_CURRENTBSSID, result); goto done; } result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CURRENTSSID, &ssid, sizeof(ssid)); if (result) { pr_debug("getconfig(0x%02x) failed, result = %d\n", HFA384x_RID_CURRENTSSID, result); goto done; } prism2mgmt_bytestr2pstr((hfa384x_bytestr_t *) &ssid, (p80211pstrd_t *) &wlandev->ssid); /* Reschedule timer */ mod_timer(&hw->commsqual_timer, jiffies + HZ); done: ; } void prism2sta_commsqual_timer(unsigned long data) { hfa384x_t *hw = (hfa384x_t *) data; schedule_work(&hw->commsqual_bh); }
gpl-2.0
MassStash/htc_jewel_kernel_sense
drivers/media/radio/radio-aztech.c
7299
5060
/* * radio-aztech.c - Aztech radio card driver * * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@xs4all.nl> * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> * Adapted to support the Video for Linux API by * Russell Kroll <rkroll@exploits.org>. Based on original tuner code by: * * Quay Ly * Donald Song * Jason Lewis (jlewis@twilight.vtc.vsc.edu) * Scott McGrath (smcgrath@twilight.vtc.vsc.edu) * William McGrath (wmcgrath@twilight.vtc.vsc.edu) * * Fully tested with the Keene USB FM Transmitter and the v4l2-compliance tool. */ #include <linux/module.h> /* Modules */ #include <linux/init.h> /* Initdata */ #include <linux/ioport.h> /* request_region */ #include <linux/delay.h> /* udelay */ #include <linux/videodev2.h> /* kernel radio structs */ #include <linux/io.h> /* outb, outb_p */ #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include "radio-isa.h" MODULE_AUTHOR("Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath"); MODULE_DESCRIPTION("A driver for the Aztech radio card."); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0.0"); /* acceptable ports: 0x350 (JP3 shorted), 0x358 (JP3 open) */ #ifndef CONFIG_RADIO_AZTECH_PORT #define CONFIG_RADIO_AZTECH_PORT -1 #endif #define AZTECH_MAX 2 static int io[AZTECH_MAX] = { [0] = CONFIG_RADIO_AZTECH_PORT, [1 ... (AZTECH_MAX - 1)] = -1 }; static int radio_nr[AZTECH_MAX] = { [0 ... (AZTECH_MAX - 1)] = -1 }; static const int radio_wait_time = 1000; module_param_array(io, int, NULL, 0444); MODULE_PARM_DESC(io, "I/O addresses of the Aztech card (0x350 or 0x358)"); module_param_array(radio_nr, int, NULL, 0444); MODULE_PARM_DESC(radio_nr, "Radio device numbers"); struct aztech { struct radio_isa_card isa; int curvol; }; static void send_0_byte(struct aztech *az) { udelay(radio_wait_time); outb_p(2 + az->curvol, az->isa.io); outb_p(64 + 2 + az->curvol, az->isa.io); } static void send_1_byte(struct aztech *az) { udelay(radio_wait_time); outb_p(128 + 2 + az->curvol, az->isa.io); outb_p(128 + 64 + 2 + az->curvol, az->isa.io); } static struct radio_isa_card *aztech_alloc(void) { struct aztech *az = kzalloc(sizeof(*az), GFP_KERNEL); return az ? &az->isa : NULL; } static int aztech_s_frequency(struct radio_isa_card *isa, u32 freq) { struct aztech *az = container_of(isa, struct aztech, isa); int i; freq += 171200; /* Add 10.7 MHz IF */ freq /= 800; /* Convert to 50 kHz units */ send_0_byte(az); /* 0: LSB of frequency */ for (i = 0; i < 13; i++) /* : frequency bits (1-13) */ if (freq & (1 << i)) send_1_byte(az); else send_0_byte(az); send_0_byte(az); /* 14: test bit - always 0 */ send_0_byte(az); /* 15: test bit - always 0 */ send_0_byte(az); /* 16: band data 0 - always 0 */ if (isa->stereo) /* 17: stereo (1 to enable) */ send_1_byte(az); else send_0_byte(az); send_1_byte(az); /* 18: band data 1 - unknown */ send_0_byte(az); /* 19: time base - always 0 */ send_0_byte(az); /* 20: spacing (0 = 25 kHz) */ send_1_byte(az); /* 21: spacing (1 = 25 kHz) */ send_0_byte(az); /* 22: spacing (0 = 25 kHz) */ send_1_byte(az); /* 23: AM/FM (FM = 1, always) */ /* latch frequency */ udelay(radio_wait_time); outb_p(128 + 64 + az->curvol, az->isa.io); return 0; } /* thanks to Michael Dwyer for giving me a dose of clues in * the signal strength department.. * * This card has a stereo bit - bit 0 set = mono, not set = stereo */ static u32 aztech_g_rxsubchans(struct radio_isa_card *isa) { if (inb(isa->io) & 1) return V4L2_TUNER_SUB_MONO; return V4L2_TUNER_SUB_STEREO; } static int aztech_s_stereo(struct radio_isa_card *isa, bool stereo) { return aztech_s_frequency(isa, isa->freq); } static int aztech_s_mute_volume(struct radio_isa_card *isa, bool mute, int vol) { struct aztech *az = container_of(isa, struct aztech, isa); if (mute) vol = 0; az->curvol = (vol & 1) + ((vol & 2) << 1); outb(az->curvol, isa->io); return 0; } static const struct radio_isa_ops aztech_ops = { .alloc = aztech_alloc, .s_mute_volume = aztech_s_mute_volume, .s_frequency = aztech_s_frequency, .s_stereo = aztech_s_stereo, .g_rxsubchans = aztech_g_rxsubchans, }; static const int aztech_ioports[] = { 0x350, 0x358 }; static struct radio_isa_driver aztech_driver = { .driver = { .match = radio_isa_match, .probe = radio_isa_probe, .remove = radio_isa_remove, .driver = { .name = "radio-aztech", }, }, .io_params = io, .radio_nr_params = radio_nr, .io_ports = aztech_ioports, .num_of_io_ports = ARRAY_SIZE(aztech_ioports), .region_size = 2, .card = "Aztech Radio", .ops = &aztech_ops, .has_stereo = true, .max_volume = 3, }; static int __init aztech_init(void) { return isa_register_driver(&aztech_driver.driver, AZTECH_MAX); } static void __exit aztech_exit(void) { isa_unregister_driver(&aztech_driver.driver); } module_init(aztech_init); module_exit(aztech_exit);
gpl-2.0
micropi/a20-b2g-kernel
drivers/scsi/mesh.c
7811
53907
/* * SCSI low-level driver for the MESH (Macintosh Enhanced SCSI Hardware) * bus adaptor found on Power Macintosh computers. * We assume the MESH is connected to a DBDMA (descriptor-based DMA) * controller. * * Paul Mackerras, August 1996. * Copyright (C) 1996 Paul Mackerras. * * Apr. 21 2002 - BenH Rework bus reset code for new error handler * Add delay after initial bus reset * Add module parameters * * Sep. 27 2003 - BenH Move to new driver model, fix some write posting * issues * To do: * - handle aborts correctly * - retry arbitration if lost (unless higher levels do this for us) * - power down the chip when no device is detected */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/string.h> #include <linux/blkdev.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/interrupt.h> #include <linux/reboot.h> #include <linux/spinlock.h> #include <asm/dbdma.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/prom.h> #include <asm/irq.h> #include <asm/hydra.h> #include <asm/processor.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/pci-bridge.h> #include <asm/macio.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "mesh.h" #if 1 #undef KERN_DEBUG #define KERN_DEBUG KERN_WARNING #endif MODULE_AUTHOR("Paul Mackerras (paulus@samba.org)"); MODULE_DESCRIPTION("PowerMac MESH SCSI driver"); MODULE_LICENSE("GPL"); static int sync_rate = CONFIG_SCSI_MESH_SYNC_RATE; static int sync_targets = 0xff; static int resel_targets = 0xff; static int debug_targets = 0; /* print debug for these targets */ static int init_reset_delay = CONFIG_SCSI_MESH_RESET_DELAY_MS; module_param(sync_rate, int, 0); MODULE_PARM_DESC(sync_rate, "Synchronous rate (0..10, 0=async)"); module_param(sync_targets, int, 0); MODULE_PARM_DESC(sync_targets, "Bitmask of targets allowed to set synchronous"); module_param(resel_targets, int, 0); MODULE_PARM_DESC(resel_targets, "Bitmask of targets allowed to set disconnect"); module_param(debug_targets, int, 0644); MODULE_PARM_DESC(debug_targets, "Bitmask of debugged targets"); module_param(init_reset_delay, int, 0); MODULE_PARM_DESC(init_reset_delay, "Initial bus reset delay (0=no reset)"); static int mesh_sync_period = 100; static int mesh_sync_offset = 0; static unsigned char use_active_neg = 0; /* bit mask for SEQ_ACTIVE_NEG if used */ #define ALLOW_SYNC(tgt) ((sync_targets >> (tgt)) & 1) #define ALLOW_RESEL(tgt) ((resel_targets >> (tgt)) & 1) #define ALLOW_DEBUG(tgt) ((debug_targets >> (tgt)) & 1) #define DEBUG_TARGET(cmd) ((cmd) && ALLOW_DEBUG((cmd)->device->id)) #undef MESH_DBG #define N_DBG_LOG 50 #define N_DBG_SLOG 20 #define NUM_DBG_EVENTS 13 #undef DBG_USE_TB /* bombs on 601 */ struct dbglog { char *fmt; u32 tb; u8 phase; u8 bs0; u8 bs1; u8 tgt; int d; }; enum mesh_phase { idle, arbitrating, selecting, commanding, dataing, statusing, busfreeing, disconnecting, reselecting, sleeping }; enum msg_phase { msg_none, msg_out, msg_out_xxx, msg_out_last, msg_in, msg_in_bad, }; enum sdtr_phase { do_sdtr, sdtr_sent, sdtr_done }; struct mesh_target { enum sdtr_phase sdtr_state; int sync_params; int data_goes_out; /* guess as to data direction */ struct scsi_cmnd *current_req; u32 saved_ptr; #ifdef MESH_DBG int log_ix; int n_log; struct dbglog log[N_DBG_LOG]; #endif }; struct mesh_state { volatile struct mesh_regs __iomem *mesh; int meshintr; volatile struct dbdma_regs __iomem *dma; int dmaintr; struct Scsi_Host *host; struct mesh_state *next; struct scsi_cmnd *request_q; struct scsi_cmnd *request_qtail; enum mesh_phase phase; /* what we're currently trying to do */ enum msg_phase msgphase; int conn_tgt; /* target we're connected to */ struct scsi_cmnd *current_req; /* req we're currently working on */ int data_ptr; int dma_started; int dma_count; int stat; int aborting; int expect_reply; int n_msgin; u8 msgin[16]; int n_msgout; int last_n_msgout; u8 msgout[16]; struct dbdma_cmd *dma_cmds; /* space for dbdma commands, aligned */ dma_addr_t dma_cmd_bus; void *dma_cmd_space; int dma_cmd_size; int clk_freq; struct mesh_target tgts[8]; struct macio_dev *mdev; struct pci_dev* pdev; #ifdef MESH_DBG int log_ix; int n_log; struct dbglog log[N_DBG_SLOG]; #endif }; /* * Driver is too messy, we need a few prototypes... */ static void mesh_done(struct mesh_state *ms, int start_next); static void mesh_interrupt(struct mesh_state *ms); static void cmd_complete(struct mesh_state *ms); static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd); static void halt_dma(struct mesh_state *ms); static void phase_mismatch(struct mesh_state *ms); /* * Some debugging & logging routines */ #ifdef MESH_DBG static inline u32 readtb(void) { u32 tb; #ifdef DBG_USE_TB /* Beware: if you enable this, it will crash on 601s. */ asm ("mftb %0" : "=r" (tb) : ); #else tb = 0; #endif return tb; } static void dlog(struct mesh_state *ms, char *fmt, int a) { struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; struct dbglog *tlp, *slp; tlp = &tp->log[tp->log_ix]; slp = &ms->log[ms->log_ix]; tlp->fmt = fmt; tlp->tb = readtb(); tlp->phase = (ms->msgphase << 4) + ms->phase; tlp->bs0 = ms->mesh->bus_status0; tlp->bs1 = ms->mesh->bus_status1; tlp->tgt = ms->conn_tgt; tlp->d = a; *slp = *tlp; if (++tp->log_ix >= N_DBG_LOG) tp->log_ix = 0; if (tp->n_log < N_DBG_LOG) ++tp->n_log; if (++ms->log_ix >= N_DBG_SLOG) ms->log_ix = 0; if (ms->n_log < N_DBG_SLOG) ++ms->n_log; } static void dumplog(struct mesh_state *ms, int t) { struct mesh_target *tp = &ms->tgts[t]; struct dbglog *lp; int i; if (tp->n_log == 0) return; i = tp->log_ix - tp->n_log; if (i < 0) i += N_DBG_LOG; tp->n_log = 0; do { lp = &tp->log[i]; printk(KERN_DEBUG "mesh log %d: bs=%.2x%.2x ph=%.2x ", t, lp->bs1, lp->bs0, lp->phase); #ifdef DBG_USE_TB printk("tb=%10u ", lp->tb); #endif printk(lp->fmt, lp->d); printk("\n"); if (++i >= N_DBG_LOG) i = 0; } while (i != tp->log_ix); } static void dumpslog(struct mesh_state *ms) { struct dbglog *lp; int i; if (ms->n_log == 0) return; i = ms->log_ix - ms->n_log; if (i < 0) i += N_DBG_SLOG; ms->n_log = 0; do { lp = &ms->log[i]; printk(KERN_DEBUG "mesh log: bs=%.2x%.2x ph=%.2x t%d ", lp->bs1, lp->bs0, lp->phase, lp->tgt); #ifdef DBG_USE_TB printk("tb=%10u ", lp->tb); #endif printk(lp->fmt, lp->d); printk("\n"); if (++i >= N_DBG_SLOG) i = 0; } while (i != ms->log_ix); } #else static inline void dlog(struct mesh_state *ms, char *fmt, int a) {} static inline void dumplog(struct mesh_state *ms, int tgt) {} static inline void dumpslog(struct mesh_state *ms) {} #endif /* MESH_DBG */ #define MKWORD(a, b, c, d) (((a) << 24) + ((b) << 16) + ((c) << 8) + (d)) static void mesh_dump_regs(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; volatile struct dbdma_regs __iomem *md = ms->dma; int t; struct mesh_target *tp; printk(KERN_DEBUG "mesh: state at %p, regs at %p, dma at %p\n", ms, mr, md); printk(KERN_DEBUG " ct=%4x seq=%2x bs=%4x fc=%2x " "exc=%2x err=%2x im=%2x int=%2x sp=%2x\n", (mr->count_hi << 8) + mr->count_lo, mr->sequence, (mr->bus_status1 << 8) + mr->bus_status0, mr->fifo_count, mr->exception, mr->error, mr->intr_mask, mr->interrupt, mr->sync_params); while(in_8(&mr->fifo_count)) printk(KERN_DEBUG " fifo data=%.2x\n",in_8(&mr->fifo)); printk(KERN_DEBUG " dma stat=%x cmdptr=%x\n", in_le32(&md->status), in_le32(&md->cmdptr)); printk(KERN_DEBUG " phase=%d msgphase=%d conn_tgt=%d data_ptr=%d\n", ms->phase, ms->msgphase, ms->conn_tgt, ms->data_ptr); printk(KERN_DEBUG " dma_st=%d dma_ct=%d n_msgout=%d\n", ms->dma_started, ms->dma_count, ms->n_msgout); for (t = 0; t < 8; ++t) { tp = &ms->tgts[t]; if (tp->current_req == NULL) continue; printk(KERN_DEBUG " target %d: req=%p goes_out=%d saved_ptr=%d\n", t, tp->current_req, tp->data_goes_out, tp->saved_ptr); } } /* * Flush write buffers on the bus path to the mesh */ static inline void mesh_flush_io(volatile struct mesh_regs __iomem *mr) { (void)in_8(&mr->mesh_id); } /* * Complete a SCSI command */ static void mesh_completed(struct mesh_state *ms, struct scsi_cmnd *cmd) { (*cmd->scsi_done)(cmd); } /* Called with meshinterrupt disabled, initialize the chipset * and eventually do the initial bus reset. The lock must not be * held since we can schedule. */ static void mesh_init(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; volatile struct dbdma_regs __iomem *md = ms->dma; mesh_flush_io(mr); udelay(100); /* Reset controller */ out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */ out_8(&mr->exception, 0xff); /* clear all exception bits */ out_8(&mr->error, 0xff); /* clear all error bits */ out_8(&mr->sequence, SEQ_RESETMESH); mesh_flush_io(mr); udelay(10); out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); out_8(&mr->source_id, ms->host->this_id); out_8(&mr->sel_timeout, 25); /* 250ms */ out_8(&mr->sync_params, ASYNC_PARAMS); if (init_reset_delay) { printk(KERN_INFO "mesh: performing initial bus reset...\n"); /* Reset bus */ out_8(&mr->bus_status1, BS1_RST); /* assert RST */ mesh_flush_io(mr); udelay(30); /* leave it on for >= 25us */ out_8(&mr->bus_status1, 0); /* negate RST */ mesh_flush_io(mr); /* Wait for bus to come back */ msleep(init_reset_delay); } /* Reconfigure controller */ out_8(&mr->interrupt, 0xff); /* clear all interrupt bits */ out_8(&mr->sequence, SEQ_FLUSHFIFO); mesh_flush_io(mr); udelay(1); out_8(&mr->sync_params, ASYNC_PARAMS); out_8(&mr->sequence, SEQ_ENBRESEL); ms->phase = idle; ms->msgphase = msg_none; } static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd) { volatile struct mesh_regs __iomem *mr = ms->mesh; int t, id; id = cmd->device->id; ms->current_req = cmd; ms->tgts[id].data_goes_out = cmd->sc_data_direction == DMA_TO_DEVICE; ms->tgts[id].current_req = cmd; #if 1 if (DEBUG_TARGET(cmd)) { int i; printk(KERN_DEBUG "mesh_start: %p tgt=%d cmd=", cmd, id); for (i = 0; i < cmd->cmd_len; ++i) printk(" %x", cmd->cmnd[i]); printk(" use_sg=%d buffer=%p bufflen=%u\n", scsi_sg_count(cmd), scsi_sglist(cmd), scsi_bufflen(cmd)); } #endif if (ms->dma_started) panic("mesh: double DMA start !\n"); ms->phase = arbitrating; ms->msgphase = msg_none; ms->data_ptr = 0; ms->dma_started = 0; ms->n_msgout = 0; ms->last_n_msgout = 0; ms->expect_reply = 0; ms->conn_tgt = id; ms->tgts[id].saved_ptr = 0; ms->stat = DID_OK; ms->aborting = 0; #ifdef MESH_DBG ms->tgts[id].n_log = 0; dlog(ms, "start cmd=%x", (int) cmd); #endif /* Off we go */ dlog(ms, "about to arb, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); out_8(&mr->interrupt, INT_CMDDONE); out_8(&mr->sequence, SEQ_ENBRESEL); mesh_flush_io(mr); udelay(1); if (in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) { /* * Some other device has the bus or is arbitrating for it - * probably a target which is about to reselect us. */ dlog(ms, "busy b4 arb, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); for (t = 100; t > 0; --t) { if ((in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) == 0) break; if (in_8(&mr->interrupt) != 0) { dlog(ms, "intr b4 arb, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); mesh_interrupt(ms); if (ms->phase != arbitrating) return; } udelay(1); } if (in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) { /* XXX should try again in a little while */ ms->stat = DID_BUS_BUSY; ms->phase = idle; mesh_done(ms, 0); return; } } /* * Apparently the mesh has a bug where it will assert both its * own bit and the target's bit on the bus during arbitration. */ out_8(&mr->dest_id, mr->source_id); /* * There appears to be a race with reselection sometimes, * where a target reselects us just as we issue the * arbitrate command. It seems that then the arbitrate * command just hangs waiting for the bus to be free * without giving us a reselection exception. * The only way I have found to get it to respond correctly * is this: disable reselection before issuing the arbitrate * command, then after issuing it, if it looks like a target * is trying to reselect us, reset the mesh and then enable * reselection. */ out_8(&mr->sequence, SEQ_DISRESEL); if (in_8(&mr->interrupt) != 0) { dlog(ms, "intr after disresel, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); mesh_interrupt(ms); if (ms->phase != arbitrating) return; dlog(ms, "after intr after disresel, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); } out_8(&mr->sequence, SEQ_ARBITRATE); for (t = 230; t > 0; --t) { if (in_8(&mr->interrupt) != 0) break; udelay(1); } dlog(ms, "after arb, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); if (in_8(&mr->interrupt) == 0 && (in_8(&mr->bus_status1) & BS1_SEL) && (in_8(&mr->bus_status0) & BS0_IO)) { /* looks like a reselection - try resetting the mesh */ dlog(ms, "resel? after arb, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); out_8(&mr->sequence, SEQ_RESETMESH); mesh_flush_io(mr); udelay(10); out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); out_8(&mr->sequence, SEQ_ENBRESEL); mesh_flush_io(mr); for (t = 10; t > 0 && in_8(&mr->interrupt) == 0; --t) udelay(1); dlog(ms, "tried reset after arb, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); #ifndef MESH_MULTIPLE_HOSTS if (in_8(&mr->interrupt) == 0 && (in_8(&mr->bus_status1) & BS1_SEL) && (in_8(&mr->bus_status0) & BS0_IO)) { printk(KERN_ERR "mesh: controller not responding" " to reselection!\n"); /* * If this is a target reselecting us, and the * mesh isn't responding, the higher levels of * the scsi code will eventually time out and * reset the bus. */ } #endif } } /* * Start the next command for a MESH. * Should be called with interrupts disabled. */ static void mesh_start(struct mesh_state *ms) { struct scsi_cmnd *cmd, *prev, *next; if (ms->phase != idle || ms->current_req != NULL) { printk(KERN_ERR "inappropriate mesh_start (phase=%d, ms=%p)", ms->phase, ms); return; } while (ms->phase == idle) { prev = NULL; for (cmd = ms->request_q; ; cmd = (struct scsi_cmnd *) cmd->host_scribble) { if (cmd == NULL) return; if (ms->tgts[cmd->device->id].current_req == NULL) break; prev = cmd; } next = (struct scsi_cmnd *) cmd->host_scribble; if (prev == NULL) ms->request_q = next; else prev->host_scribble = (void *) next; if (next == NULL) ms->request_qtail = prev; mesh_start_cmd(ms, cmd); } } static void mesh_done(struct mesh_state *ms, int start_next) { struct scsi_cmnd *cmd; struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; cmd = ms->current_req; ms->current_req = NULL; tp->current_req = NULL; if (cmd) { cmd->result = (ms->stat << 16) + cmd->SCp.Status; if (ms->stat == DID_OK) cmd->result += (cmd->SCp.Message << 8); if (DEBUG_TARGET(cmd)) { printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n", cmd->result, ms->data_ptr, scsi_bufflen(cmd)); #if 0 /* needs to use sg? */ if ((cmd->cmnd[0] == 0 || cmd->cmnd[0] == 0x12 || cmd->cmnd[0] == 3) && cmd->request_buffer != 0) { unsigned char *b = cmd->request_buffer; printk(KERN_DEBUG "buffer = %x %x %x %x %x %x %x %x\n", b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); } #endif } cmd->SCp.this_residual -= ms->data_ptr; mesh_completed(ms, cmd); } if (start_next) { out_8(&ms->mesh->sequence, SEQ_ENBRESEL); mesh_flush_io(ms->mesh); udelay(1); ms->phase = idle; mesh_start(ms); } } static inline void add_sdtr_msg(struct mesh_state *ms) { int i = ms->n_msgout; ms->msgout[i] = EXTENDED_MESSAGE; ms->msgout[i+1] = 3; ms->msgout[i+2] = EXTENDED_SDTR; ms->msgout[i+3] = mesh_sync_period/4; ms->msgout[i+4] = (ALLOW_SYNC(ms->conn_tgt)? mesh_sync_offset: 0); ms->n_msgout = i + 5; } static void set_sdtr(struct mesh_state *ms, int period, int offset) { struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; volatile struct mesh_regs __iomem *mr = ms->mesh; int v, tr; tp->sdtr_state = sdtr_done; if (offset == 0) { /* asynchronous */ if (SYNC_OFF(tp->sync_params)) printk(KERN_INFO "mesh: target %d now asynchronous\n", ms->conn_tgt); tp->sync_params = ASYNC_PARAMS; out_8(&mr->sync_params, ASYNC_PARAMS); return; } /* * We need to compute ceil(clk_freq * period / 500e6) - 2 * without incurring overflow. */ v = (ms->clk_freq / 5000) * period; if (v <= 250000) { /* special case: sync_period == 5 * clk_period */ v = 0; /* units of tr are 100kB/s */ tr = (ms->clk_freq + 250000) / 500000; } else { /* sync_period == (v + 2) * 2 * clk_period */ v = (v + 99999) / 100000 - 2; if (v > 15) v = 15; /* oops */ tr = ((ms->clk_freq / (v + 2)) + 199999) / 200000; } if (offset > 15) offset = 15; /* can't happen */ tp->sync_params = SYNC_PARAMS(offset, v); out_8(&mr->sync_params, tp->sync_params); printk(KERN_INFO "mesh: target %d synchronous at %d.%d MB/s\n", ms->conn_tgt, tr/10, tr%10); } static void start_phase(struct mesh_state *ms) { int i, seq, nb; volatile struct mesh_regs __iomem *mr = ms->mesh; volatile struct dbdma_regs __iomem *md = ms->dma; struct scsi_cmnd *cmd = ms->current_req; struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; dlog(ms, "start_phase nmo/exc/fc/seq = %.8x", MKWORD(ms->n_msgout, mr->exception, mr->fifo_count, mr->sequence)); out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0); switch (ms->msgphase) { case msg_none: break; case msg_in: out_8(&mr->count_hi, 0); out_8(&mr->count_lo, 1); out_8(&mr->sequence, SEQ_MSGIN + seq); ms->n_msgin = 0; return; case msg_out: /* * To make sure ATN drops before we assert ACK for * the last byte of the message, we have to do the * last byte specially. */ if (ms->n_msgout <= 0) { printk(KERN_ERR "mesh: msg_out but n_msgout=%d\n", ms->n_msgout); mesh_dump_regs(ms); ms->msgphase = msg_none; break; } if (ALLOW_DEBUG(ms->conn_tgt)) { printk(KERN_DEBUG "mesh: sending %d msg bytes:", ms->n_msgout); for (i = 0; i < ms->n_msgout; ++i) printk(" %x", ms->msgout[i]); printk("\n"); } dlog(ms, "msgout msg=%.8x", MKWORD(ms->n_msgout, ms->msgout[0], ms->msgout[1], ms->msgout[2])); out_8(&mr->count_hi, 0); out_8(&mr->sequence, SEQ_FLUSHFIFO); mesh_flush_io(mr); udelay(1); /* * If ATN is not already asserted, we assert it, then * issue a SEQ_MSGOUT to get the mesh to drop ACK. */ if ((in_8(&mr->bus_status0) & BS0_ATN) == 0) { dlog(ms, "bus0 was %.2x explicitly asserting ATN", mr->bus_status0); out_8(&mr->bus_status0, BS0_ATN); /* explicit ATN */ mesh_flush_io(mr); udelay(1); out_8(&mr->count_lo, 1); out_8(&mr->sequence, SEQ_MSGOUT + seq); out_8(&mr->bus_status0, 0); /* release explicit ATN */ dlog(ms,"hace: after explicit ATN bus0=%.2x",mr->bus_status0); } if (ms->n_msgout == 1) { /* * We can't issue the SEQ_MSGOUT without ATN * until the target has asserted REQ. The logic * in cmd_complete handles both situations: * REQ already asserted or not. */ cmd_complete(ms); } else { out_8(&mr->count_lo, ms->n_msgout - 1); out_8(&mr->sequence, SEQ_MSGOUT + seq); for (i = 0; i < ms->n_msgout - 1; ++i) out_8(&mr->fifo, ms->msgout[i]); } return; default: printk(KERN_ERR "mesh bug: start_phase msgphase=%d\n", ms->msgphase); } switch (ms->phase) { case selecting: out_8(&mr->dest_id, ms->conn_tgt); out_8(&mr->sequence, SEQ_SELECT + SEQ_ATN); break; case commanding: out_8(&mr->sync_params, tp->sync_params); out_8(&mr->count_hi, 0); if (cmd) { out_8(&mr->count_lo, cmd->cmd_len); out_8(&mr->sequence, SEQ_COMMAND + seq); for (i = 0; i < cmd->cmd_len; ++i) out_8(&mr->fifo, cmd->cmnd[i]); } else { out_8(&mr->count_lo, 6); out_8(&mr->sequence, SEQ_COMMAND + seq); for (i = 0; i < 6; ++i) out_8(&mr->fifo, 0); } break; case dataing: /* transfer data, if any */ if (!ms->dma_started) { set_dma_cmds(ms, cmd); out_le32(&md->cmdptr, virt_to_phys(ms->dma_cmds)); out_le32(&md->control, (RUN << 16) | RUN); ms->dma_started = 1; } nb = ms->dma_count; if (nb > 0xfff0) nb = 0xfff0; ms->dma_count -= nb; ms->data_ptr += nb; out_8(&mr->count_lo, nb); out_8(&mr->count_hi, nb >> 8); out_8(&mr->sequence, (tp->data_goes_out? SEQ_DATAOUT: SEQ_DATAIN) + SEQ_DMA_MODE + seq); break; case statusing: out_8(&mr->count_hi, 0); out_8(&mr->count_lo, 1); out_8(&mr->sequence, SEQ_STATUS + seq); break; case busfreeing: case disconnecting: out_8(&mr->sequence, SEQ_ENBRESEL); mesh_flush_io(mr); udelay(1); dlog(ms, "enbresel intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); out_8(&mr->sequence, SEQ_BUSFREE); break; default: printk(KERN_ERR "mesh: start_phase called with phase=%d\n", ms->phase); dumpslog(ms); } } static inline void get_msgin(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; int i, n; n = mr->fifo_count; if (n != 0) { i = ms->n_msgin; ms->n_msgin = i + n; for (; n > 0; --n) ms->msgin[i++] = in_8(&mr->fifo); } } static inline int msgin_length(struct mesh_state *ms) { int b, n; n = 1; if (ms->n_msgin > 0) { b = ms->msgin[0]; if (b == 1) { /* extended message */ n = ms->n_msgin < 2? 2: ms->msgin[1] + 2; } else if (0x20 <= b && b <= 0x2f) { /* 2-byte message */ n = 2; } } return n; } static void reselected(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; struct scsi_cmnd *cmd; struct mesh_target *tp; int b, t, prev; switch (ms->phase) { case idle: break; case arbitrating: if ((cmd = ms->current_req) != NULL) { /* put the command back on the queue */ cmd->host_scribble = (void *) ms->request_q; if (ms->request_q == NULL) ms->request_qtail = cmd; ms->request_q = cmd; tp = &ms->tgts[cmd->device->id]; tp->current_req = NULL; } break; case busfreeing: ms->phase = reselecting; mesh_done(ms, 0); break; case disconnecting: break; default: printk(KERN_ERR "mesh: reselected in phase %d/%d tgt %d\n", ms->msgphase, ms->phase, ms->conn_tgt); dumplog(ms, ms->conn_tgt); dumpslog(ms); } if (ms->dma_started) { printk(KERN_ERR "mesh: reselected with DMA started !\n"); halt_dma(ms); } ms->current_req = NULL; ms->phase = dataing; ms->msgphase = msg_in; ms->n_msgout = 0; ms->last_n_msgout = 0; prev = ms->conn_tgt; /* * We seem to get abortive reselections sometimes. */ while ((in_8(&mr->bus_status1) & BS1_BSY) == 0) { static int mesh_aborted_resels; mesh_aborted_resels++; out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); mesh_flush_io(mr); udelay(1); out_8(&mr->sequence, SEQ_ENBRESEL); mesh_flush_io(mr); udelay(5); dlog(ms, "extra resel err/exc/fc = %.6x", MKWORD(0, mr->error, mr->exception, mr->fifo_count)); } out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); mesh_flush_io(mr); udelay(1); out_8(&mr->sequence, SEQ_ENBRESEL); mesh_flush_io(mr); udelay(1); out_8(&mr->sync_params, ASYNC_PARAMS); /* * Find out who reselected us. */ if (in_8(&mr->fifo_count) == 0) { printk(KERN_ERR "mesh: reselection but nothing in fifo?\n"); ms->conn_tgt = ms->host->this_id; goto bogus; } /* get the last byte in the fifo */ do { b = in_8(&mr->fifo); dlog(ms, "reseldata %x", b); } while (in_8(&mr->fifo_count)); for (t = 0; t < 8; ++t) if ((b & (1 << t)) != 0 && t != ms->host->this_id) break; if (b != (1 << t) + (1 << ms->host->this_id)) { printk(KERN_ERR "mesh: bad reselection data %x\n", b); ms->conn_tgt = ms->host->this_id; goto bogus; } /* * Set up to continue with that target's transfer. */ ms->conn_tgt = t; tp = &ms->tgts[t]; out_8(&mr->sync_params, tp->sync_params); if (ALLOW_DEBUG(t)) { printk(KERN_DEBUG "mesh: reselected by target %d\n", t); printk(KERN_DEBUG "mesh: saved_ptr=%x goes_out=%d cmd=%p\n", tp->saved_ptr, tp->data_goes_out, tp->current_req); } ms->current_req = tp->current_req; if (tp->current_req == NULL) { printk(KERN_ERR "mesh: reselected by tgt %d but no cmd!\n", t); goto bogus; } ms->data_ptr = tp->saved_ptr; dlog(ms, "resel prev tgt=%d", prev); dlog(ms, "resel err/exc=%.4x", MKWORD(0, 0, mr->error, mr->exception)); start_phase(ms); return; bogus: dumplog(ms, ms->conn_tgt); dumpslog(ms); ms->data_ptr = 0; ms->aborting = 1; start_phase(ms); } static void do_abort(struct mesh_state *ms) { ms->msgout[0] = ABORT; ms->n_msgout = 1; ms->aborting = 1; ms->stat = DID_ABORT; dlog(ms, "abort", 0); } static void handle_reset(struct mesh_state *ms) { int tgt; struct mesh_target *tp; struct scsi_cmnd *cmd; volatile struct mesh_regs __iomem *mr = ms->mesh; for (tgt = 0; tgt < 8; ++tgt) { tp = &ms->tgts[tgt]; if ((cmd = tp->current_req) != NULL) { cmd->result = DID_RESET << 16; tp->current_req = NULL; mesh_completed(ms, cmd); } ms->tgts[tgt].sdtr_state = do_sdtr; ms->tgts[tgt].sync_params = ASYNC_PARAMS; } ms->current_req = NULL; while ((cmd = ms->request_q) != NULL) { ms->request_q = (struct scsi_cmnd *) cmd->host_scribble; cmd->result = DID_RESET << 16; mesh_completed(ms, cmd); } ms->phase = idle; ms->msgphase = msg_none; out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); out_8(&mr->sequence, SEQ_FLUSHFIFO); mesh_flush_io(mr); udelay(1); out_8(&mr->sync_params, ASYNC_PARAMS); out_8(&mr->sequence, SEQ_ENBRESEL); } static irqreturn_t do_mesh_interrupt(int irq, void *dev_id) { unsigned long flags; struct mesh_state *ms = dev_id; struct Scsi_Host *dev = ms->host; spin_lock_irqsave(dev->host_lock, flags); mesh_interrupt(ms); spin_unlock_irqrestore(dev->host_lock, flags); return IRQ_HANDLED; } static void handle_error(struct mesh_state *ms) { int err, exc, count; volatile struct mesh_regs __iomem *mr = ms->mesh; err = in_8(&mr->error); exc = in_8(&mr->exception); out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); dlog(ms, "error err/exc/fc/cl=%.8x", MKWORD(err, exc, mr->fifo_count, mr->count_lo)); if (err & ERR_SCSIRESET) { /* SCSI bus was reset */ printk(KERN_INFO "mesh: SCSI bus reset detected: " "waiting for end..."); while ((in_8(&mr->bus_status1) & BS1_RST) != 0) udelay(1); printk("done\n"); handle_reset(ms); /* request_q is empty, no point in mesh_start() */ return; } if (err & ERR_UNEXPDISC) { /* Unexpected disconnect */ if (exc & EXC_RESELECTED) { reselected(ms); return; } if (!ms->aborting) { printk(KERN_WARNING "mesh: target %d aborted\n", ms->conn_tgt); dumplog(ms, ms->conn_tgt); dumpslog(ms); } out_8(&mr->interrupt, INT_CMDDONE); ms->stat = DID_ABORT; mesh_done(ms, 1); return; } if (err & ERR_PARITY) { if (ms->msgphase == msg_in) { printk(KERN_ERR "mesh: msg parity error, target %d\n", ms->conn_tgt); ms->msgout[0] = MSG_PARITY_ERROR; ms->n_msgout = 1; ms->msgphase = msg_in_bad; cmd_complete(ms); return; } if (ms->stat == DID_OK) { printk(KERN_ERR "mesh: parity error, target %d\n", ms->conn_tgt); ms->stat = DID_PARITY; } count = (mr->count_hi << 8) + mr->count_lo; if (count == 0) { cmd_complete(ms); } else { /* reissue the data transfer command */ out_8(&mr->sequence, mr->sequence); } return; } if (err & ERR_SEQERR) { if (exc & EXC_RESELECTED) { /* This can happen if we issue a command to get the bus just after the target reselects us. */ static int mesh_resel_seqerr; mesh_resel_seqerr++; reselected(ms); return; } if (exc == EXC_PHASEMM) { static int mesh_phasemm_seqerr; mesh_phasemm_seqerr++; phase_mismatch(ms); return; } printk(KERN_ERR "mesh: sequence error (err=%x exc=%x)\n", err, exc); } else { printk(KERN_ERR "mesh: unknown error %x (exc=%x)\n", err, exc); } mesh_dump_regs(ms); dumplog(ms, ms->conn_tgt); if (ms->phase > selecting && (in_8(&mr->bus_status1) & BS1_BSY)) { /* try to do what the target wants */ do_abort(ms); phase_mismatch(ms); return; } ms->stat = DID_ERROR; mesh_done(ms, 1); } static void handle_exception(struct mesh_state *ms) { int exc; volatile struct mesh_regs __iomem *mr = ms->mesh; exc = in_8(&mr->exception); out_8(&mr->interrupt, INT_EXCEPTION | INT_CMDDONE); if (exc & EXC_RESELECTED) { static int mesh_resel_exc; mesh_resel_exc++; reselected(ms); } else if (exc == EXC_ARBLOST) { printk(KERN_DEBUG "mesh: lost arbitration\n"); ms->stat = DID_BUS_BUSY; mesh_done(ms, 1); } else if (exc == EXC_SELTO) { /* selection timed out */ ms->stat = DID_BAD_TARGET; mesh_done(ms, 1); } else if (exc == EXC_PHASEMM) { /* target wants to do something different: find out what it wants and do it. */ phase_mismatch(ms); } else { printk(KERN_ERR "mesh: can't cope with exception %x\n", exc); mesh_dump_regs(ms); dumplog(ms, ms->conn_tgt); do_abort(ms); phase_mismatch(ms); } } static void handle_msgin(struct mesh_state *ms) { int i, code; struct scsi_cmnd *cmd = ms->current_req; struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; if (ms->n_msgin == 0) return; code = ms->msgin[0]; if (ALLOW_DEBUG(ms->conn_tgt)) { printk(KERN_DEBUG "got %d message bytes:", ms->n_msgin); for (i = 0; i < ms->n_msgin; ++i) printk(" %x", ms->msgin[i]); printk("\n"); } dlog(ms, "msgin msg=%.8x", MKWORD(ms->n_msgin, code, ms->msgin[1], ms->msgin[2])); ms->expect_reply = 0; ms->n_msgout = 0; if (ms->n_msgin < msgin_length(ms)) goto reject; if (cmd) cmd->SCp.Message = code; switch (code) { case COMMAND_COMPLETE: break; case EXTENDED_MESSAGE: switch (ms->msgin[2]) { case EXTENDED_MODIFY_DATA_POINTER: ms->data_ptr += (ms->msgin[3] << 24) + ms->msgin[6] + (ms->msgin[4] << 16) + (ms->msgin[5] << 8); break; case EXTENDED_SDTR: if (tp->sdtr_state != sdtr_sent) { /* reply with an SDTR */ add_sdtr_msg(ms); /* limit period to at least his value, offset to no more than his */ if (ms->msgout[3] < ms->msgin[3]) ms->msgout[3] = ms->msgin[3]; if (ms->msgout[4] > ms->msgin[4]) ms->msgout[4] = ms->msgin[4]; set_sdtr(ms, ms->msgout[3], ms->msgout[4]); ms->msgphase = msg_out; } else { set_sdtr(ms, ms->msgin[3], ms->msgin[4]); } break; default: goto reject; } break; case SAVE_POINTERS: tp->saved_ptr = ms->data_ptr; break; case RESTORE_POINTERS: ms->data_ptr = tp->saved_ptr; break; case DISCONNECT: ms->phase = disconnecting; break; case ABORT: break; case MESSAGE_REJECT: if (tp->sdtr_state == sdtr_sent) set_sdtr(ms, 0, 0); break; case NOP: break; default: if (IDENTIFY_BASE <= code && code <= IDENTIFY_BASE + 7) { if (cmd == NULL) { do_abort(ms); ms->msgphase = msg_out; } else if (code != cmd->device->lun + IDENTIFY_BASE) { printk(KERN_WARNING "mesh: lun mismatch " "(%d != %d) on reselection from " "target %d\n", code - IDENTIFY_BASE, cmd->device->lun, ms->conn_tgt); } break; } goto reject; } return; reject: printk(KERN_WARNING "mesh: rejecting message from target %d:", ms->conn_tgt); for (i = 0; i < ms->n_msgin; ++i) printk(" %x", ms->msgin[i]); printk("\n"); ms->msgout[0] = MESSAGE_REJECT; ms->n_msgout = 1; ms->msgphase = msg_out; } /* * Set up DMA commands for transferring data. */ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd) { int i, dma_cmd, total, off, dtot; struct scatterlist *scl; struct dbdma_cmd *dcmds; dma_cmd = ms->tgts[ms->conn_tgt].data_goes_out? OUTPUT_MORE: INPUT_MORE; dcmds = ms->dma_cmds; dtot = 0; if (cmd) { int nseg; cmd->SCp.this_residual = scsi_bufflen(cmd); nseg = scsi_dma_map(cmd); BUG_ON(nseg < 0); if (nseg) { total = 0; off = ms->data_ptr; scsi_for_each_sg(cmd, scl, nseg, i) { u32 dma_addr = sg_dma_address(scl); u32 dma_len = sg_dma_len(scl); total += scl->length; if (off >= dma_len) { off -= dma_len; continue; } if (dma_len > 0xffff) panic("mesh: scatterlist element >= 64k"); st_le16(&dcmds->req_count, dma_len - off); st_le16(&dcmds->command, dma_cmd); st_le32(&dcmds->phy_addr, dma_addr + off); dcmds->xfer_status = 0; ++dcmds; dtot += dma_len - off; off = 0; } } } if (dtot == 0) { /* Either the target has overrun our buffer, or the caller didn't provide a buffer. */ static char mesh_extra_buf[64]; dtot = sizeof(mesh_extra_buf); st_le16(&dcmds->req_count, dtot); st_le32(&dcmds->phy_addr, virt_to_phys(mesh_extra_buf)); dcmds->xfer_status = 0; ++dcmds; } dma_cmd += OUTPUT_LAST - OUTPUT_MORE; st_le16(&dcmds[-1].command, dma_cmd); memset(dcmds, 0, sizeof(*dcmds)); st_le16(&dcmds->command, DBDMA_STOP); ms->dma_count = dtot; } static void halt_dma(struct mesh_state *ms) { volatile struct dbdma_regs __iomem *md = ms->dma; volatile struct mesh_regs __iomem *mr = ms->mesh; struct scsi_cmnd *cmd = ms->current_req; int t, nb; if (!ms->tgts[ms->conn_tgt].data_goes_out) { /* wait a little while until the fifo drains */ t = 50; while (t > 0 && in_8(&mr->fifo_count) != 0 && (in_le32(&md->status) & ACTIVE) != 0) { --t; udelay(1); } } out_le32(&md->control, RUN << 16); /* turn off RUN bit */ nb = (mr->count_hi << 8) + mr->count_lo; dlog(ms, "halt_dma fc/count=%.6x", MKWORD(0, mr->fifo_count, 0, nb)); if (ms->tgts[ms->conn_tgt].data_goes_out) nb += mr->fifo_count; /* nb is the number of bytes not yet transferred to/from the target. */ ms->data_ptr -= nb; dlog(ms, "data_ptr %x", ms->data_ptr); if (ms->data_ptr < 0) { printk(KERN_ERR "mesh: halt_dma: data_ptr=%d (nb=%d, ms=%p)\n", ms->data_ptr, nb, ms); ms->data_ptr = 0; #ifdef MESH_DBG dumplog(ms, ms->conn_tgt); dumpslog(ms); #endif /* MESH_DBG */ } else if (cmd && scsi_bufflen(cmd) && ms->data_ptr > scsi_bufflen(cmd)) { printk(KERN_DEBUG "mesh: target %d overrun, " "data_ptr=%x total=%x goes_out=%d\n", ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd), ms->tgts[ms->conn_tgt].data_goes_out); } scsi_dma_unmap(cmd); ms->dma_started = 0; } static void phase_mismatch(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; int phase; dlog(ms, "phasemm ch/cl/seq/fc=%.8x", MKWORD(mr->count_hi, mr->count_lo, mr->sequence, mr->fifo_count)); phase = in_8(&mr->bus_status0) & BS0_PHASE; if (ms->msgphase == msg_out_xxx && phase == BP_MSGOUT) { /* output the last byte of the message, without ATN */ out_8(&mr->count_lo, 1); out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg); mesh_flush_io(mr); udelay(1); out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]); ms->msgphase = msg_out_last; return; } if (ms->msgphase == msg_in) { get_msgin(ms); if (ms->n_msgin) handle_msgin(ms); } if (ms->dma_started) halt_dma(ms); if (mr->fifo_count) { out_8(&mr->sequence, SEQ_FLUSHFIFO); mesh_flush_io(mr); udelay(1); } ms->msgphase = msg_none; switch (phase) { case BP_DATAIN: ms->tgts[ms->conn_tgt].data_goes_out = 0; ms->phase = dataing; break; case BP_DATAOUT: ms->tgts[ms->conn_tgt].data_goes_out = 1; ms->phase = dataing; break; case BP_COMMAND: ms->phase = commanding; break; case BP_STATUS: ms->phase = statusing; break; case BP_MSGIN: ms->msgphase = msg_in; ms->n_msgin = 0; break; case BP_MSGOUT: ms->msgphase = msg_out; if (ms->n_msgout == 0) { if (ms->aborting) { do_abort(ms); } else { if (ms->last_n_msgout == 0) { printk(KERN_DEBUG "mesh: no msg to repeat\n"); ms->msgout[0] = NOP; ms->last_n_msgout = 1; } ms->n_msgout = ms->last_n_msgout; } } break; default: printk(KERN_DEBUG "mesh: unknown scsi phase %x\n", phase); ms->stat = DID_ERROR; mesh_done(ms, 1); return; } start_phase(ms); } static void cmd_complete(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; struct scsi_cmnd *cmd = ms->current_req; struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; int seq, n, t; dlog(ms, "cmd_complete fc=%x", mr->fifo_count); seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0); switch (ms->msgphase) { case msg_out_xxx: /* huh? we expected a phase mismatch */ ms->n_msgin = 0; ms->msgphase = msg_in; /* fall through */ case msg_in: /* should have some message bytes in fifo */ get_msgin(ms); n = msgin_length(ms); if (ms->n_msgin < n) { out_8(&mr->count_lo, n - ms->n_msgin); out_8(&mr->sequence, SEQ_MSGIN + seq); } else { ms->msgphase = msg_none; handle_msgin(ms); start_phase(ms); } break; case msg_in_bad: out_8(&mr->sequence, SEQ_FLUSHFIFO); mesh_flush_io(mr); udelay(1); out_8(&mr->count_lo, 1); out_8(&mr->sequence, SEQ_MSGIN + SEQ_ATN + use_active_neg); break; case msg_out: /* * To get the right timing on ATN wrt ACK, we have * to get the MESH to drop ACK, wait until REQ gets * asserted, then drop ATN. To do this we first * issue a SEQ_MSGOUT with ATN and wait for REQ, * then change the command to a SEQ_MSGOUT w/o ATN. * If we don't see REQ in a reasonable time, we * change the command to SEQ_MSGIN with ATN, * wait for the phase mismatch interrupt, then * issue the SEQ_MSGOUT without ATN. */ out_8(&mr->count_lo, 1); out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg + SEQ_ATN); t = 30; /* wait up to 30us */ while ((in_8(&mr->bus_status0) & BS0_REQ) == 0 && --t >= 0) udelay(1); dlog(ms, "last_mbyte err/exc/fc/cl=%.8x", MKWORD(mr->error, mr->exception, mr->fifo_count, mr->count_lo)); if (in_8(&mr->interrupt) & (INT_ERROR | INT_EXCEPTION)) { /* whoops, target didn't do what we expected */ ms->last_n_msgout = ms->n_msgout; ms->n_msgout = 0; if (in_8(&mr->interrupt) & INT_ERROR) { printk(KERN_ERR "mesh: error %x in msg_out\n", in_8(&mr->error)); handle_error(ms); return; } if (in_8(&mr->exception) != EXC_PHASEMM) printk(KERN_ERR "mesh: exc %x in msg_out\n", in_8(&mr->exception)); else printk(KERN_DEBUG "mesh: bs0=%x in msg_out\n", in_8(&mr->bus_status0)); handle_exception(ms); return; } if (in_8(&mr->bus_status0) & BS0_REQ) { out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg); mesh_flush_io(mr); udelay(1); out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]); ms->msgphase = msg_out_last; } else { out_8(&mr->sequence, SEQ_MSGIN + use_active_neg + SEQ_ATN); ms->msgphase = msg_out_xxx; } break; case msg_out_last: ms->last_n_msgout = ms->n_msgout; ms->n_msgout = 0; ms->msgphase = ms->expect_reply? msg_in: msg_none; start_phase(ms); break; case msg_none: switch (ms->phase) { case idle: printk(KERN_ERR "mesh: interrupt in idle phase?\n"); dumpslog(ms); return; case selecting: dlog(ms, "Selecting phase at command completion",0); ms->msgout[0] = IDENTIFY(ALLOW_RESEL(ms->conn_tgt), (cmd? cmd->device->lun: 0)); ms->n_msgout = 1; ms->expect_reply = 0; if (ms->aborting) { ms->msgout[0] = ABORT; ms->n_msgout++; } else if (tp->sdtr_state == do_sdtr) { /* add SDTR message */ add_sdtr_msg(ms); ms->expect_reply = 1; tp->sdtr_state = sdtr_sent; } ms->msgphase = msg_out; /* * We need to wait for REQ before dropping ATN. * We wait for at most 30us, then fall back to * a scheme where we issue a SEQ_COMMAND with ATN, * which will give us a phase mismatch interrupt * when REQ does come, and then we send the message. */ t = 230; /* wait up to 230us */ while ((in_8(&mr->bus_status0) & BS0_REQ) == 0) { if (--t < 0) { dlog(ms, "impatient for req", ms->n_msgout); ms->msgphase = msg_none; break; } udelay(1); } break; case dataing: if (ms->dma_count != 0) { start_phase(ms); return; } /* * We can get a phase mismatch here if the target * changes to the status phase, even though we have * had a command complete interrupt. Then, if we * issue the SEQ_STATUS command, we'll get a sequence * error interrupt. Which isn't so bad except that * occasionally the mesh actually executes the * SEQ_STATUS *as well as* giving us the sequence * error and phase mismatch exception. */ out_8(&mr->sequence, 0); out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); halt_dma(ms); break; case statusing: if (cmd) { cmd->SCp.Status = mr->fifo; if (DEBUG_TARGET(cmd)) printk(KERN_DEBUG "mesh: status is %x\n", cmd->SCp.Status); } ms->msgphase = msg_in; break; case busfreeing: mesh_done(ms, 1); return; case disconnecting: ms->current_req = NULL; ms->phase = idle; mesh_start(ms); return; default: break; } ++ms->phase; start_phase(ms); break; } } /* * Called by midlayer with host locked to queue a new * request */ static int mesh_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct mesh_state *ms; cmd->scsi_done = done; cmd->host_scribble = NULL; ms = (struct mesh_state *) cmd->device->host->hostdata; if (ms->request_q == NULL) ms->request_q = cmd; else ms->request_qtail->host_scribble = (void *) cmd; ms->request_qtail = cmd; if (ms->phase == idle) mesh_start(ms); return 0; } static DEF_SCSI_QCMD(mesh_queue) /* * Called to handle interrupts, either call by the interrupt * handler (do_mesh_interrupt) or by other functions in * exceptional circumstances */ static void mesh_interrupt(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; int intr; #if 0 if (ALLOW_DEBUG(ms->conn_tgt)) printk(KERN_DEBUG "mesh_intr, bs0=%x int=%x exc=%x err=%x " "phase=%d msgphase=%d\n", mr->bus_status0, mr->interrupt, mr->exception, mr->error, ms->phase, ms->msgphase); #endif while ((intr = in_8(&mr->interrupt)) != 0) { dlog(ms, "interrupt intr/err/exc/seq=%.8x", MKWORD(intr, mr->error, mr->exception, mr->sequence)); if (intr & INT_ERROR) { handle_error(ms); } else if (intr & INT_EXCEPTION) { handle_exception(ms); } else if (intr & INT_CMDDONE) { out_8(&mr->interrupt, INT_CMDDONE); cmd_complete(ms); } } } /* Todo: here we can at least try to remove the command from the * queue if it isn't connected yet, and for pending command, assert * ATN until the bus gets freed. */ static int mesh_abort(struct scsi_cmnd *cmd) { struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata; printk(KERN_DEBUG "mesh_abort(%p)\n", cmd); mesh_dump_regs(ms); dumplog(ms, cmd->device->id); dumpslog(ms); return FAILED; } /* * Called by the midlayer with the lock held to reset the * SCSI host and bus. * The midlayer will wait for devices to come back, we don't need * to do that ourselves */ static int mesh_host_reset(struct scsi_cmnd *cmd) { struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata; volatile struct mesh_regs __iomem *mr = ms->mesh; volatile struct dbdma_regs __iomem *md = ms->dma; unsigned long flags; printk(KERN_DEBUG "mesh_host_reset\n"); spin_lock_irqsave(ms->host->host_lock, flags); /* Reset the controller & dbdma channel */ out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */ out_8(&mr->exception, 0xff); /* clear all exception bits */ out_8(&mr->error, 0xff); /* clear all error bits */ out_8(&mr->sequence, SEQ_RESETMESH); mesh_flush_io(mr); udelay(1); out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); out_8(&mr->source_id, ms->host->this_id); out_8(&mr->sel_timeout, 25); /* 250ms */ out_8(&mr->sync_params, ASYNC_PARAMS); /* Reset the bus */ out_8(&mr->bus_status1, BS1_RST); /* assert RST */ mesh_flush_io(mr); udelay(30); /* leave it on for >= 25us */ out_8(&mr->bus_status1, 0); /* negate RST */ /* Complete pending commands */ handle_reset(ms); spin_unlock_irqrestore(ms->host->host_lock, flags); return SUCCESS; } static void set_mesh_power(struct mesh_state *ms, int state) { if (!machine_is(powermac)) return; if (state) { pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 1); msleep(200); } else { pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 0); msleep(10); } } #ifdef CONFIG_PM static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg) { struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); unsigned long flags; switch (mesg.event) { case PM_EVENT_SUSPEND: case PM_EVENT_HIBERNATE: case PM_EVENT_FREEZE: break; default: return 0; } if (ms->phase == sleeping) return 0; scsi_block_requests(ms->host); spin_lock_irqsave(ms->host->host_lock, flags); while(ms->phase != idle) { spin_unlock_irqrestore(ms->host->host_lock, flags); msleep(10); spin_lock_irqsave(ms->host->host_lock, flags); } ms->phase = sleeping; spin_unlock_irqrestore(ms->host->host_lock, flags); disable_irq(ms->meshintr); set_mesh_power(ms, 0); return 0; } static int mesh_resume(struct macio_dev *mdev) { struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); unsigned long flags; if (ms->phase != sleeping) return 0; set_mesh_power(ms, 1); mesh_init(ms); spin_lock_irqsave(ms->host->host_lock, flags); mesh_start(ms); spin_unlock_irqrestore(ms->host->host_lock, flags); enable_irq(ms->meshintr); scsi_unblock_requests(ms->host); return 0; } #endif /* CONFIG_PM */ /* * If we leave drives set for synchronous transfers (especially * CDROMs), and reboot to MacOS, it gets confused, poor thing. * So, on reboot we reset the SCSI bus. */ static int mesh_shutdown(struct macio_dev *mdev) { struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); volatile struct mesh_regs __iomem *mr; unsigned long flags; printk(KERN_INFO "resetting MESH scsi bus(es)\n"); spin_lock_irqsave(ms->host->host_lock, flags); mr = ms->mesh; out_8(&mr->intr_mask, 0); out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); out_8(&mr->bus_status1, BS1_RST); mesh_flush_io(mr); udelay(30); out_8(&mr->bus_status1, 0); spin_unlock_irqrestore(ms->host->host_lock, flags); return 0; } static struct scsi_host_template mesh_template = { .proc_name = "mesh", .name = "MESH", .queuecommand = mesh_queue, .eh_abort_handler = mesh_abort, .eh_host_reset_handler = mesh_host_reset, .can_queue = 20, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = 2, .use_clustering = DISABLE_CLUSTERING, }; static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match) { struct device_node *mesh = macio_get_of_node(mdev); struct pci_dev* pdev = macio_get_pci_dev(mdev); int tgt, minper; const int *cfp; struct mesh_state *ms; struct Scsi_Host *mesh_host; void *dma_cmd_space; dma_addr_t dma_cmd_bus; switch (mdev->bus->chip->type) { case macio_heathrow: case macio_gatwick: case macio_paddington: use_active_neg = 0; break; default: use_active_neg = SEQ_ACTIVE_NEG; } if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) { printk(KERN_ERR "mesh: expected 2 addrs and 2 intrs" " (got %d,%d)\n", macio_resource_count(mdev), macio_irq_count(mdev)); return -ENODEV; } if (macio_request_resources(mdev, "mesh") != 0) { printk(KERN_ERR "mesh: unable to request memory resources"); return -EBUSY; } mesh_host = scsi_host_alloc(&mesh_template, sizeof(struct mesh_state)); if (mesh_host == NULL) { printk(KERN_ERR "mesh: couldn't register host"); goto out_release; } /* Old junk for root discovery, that will die ultimately */ #if !defined(MODULE) note_scsi_host(mesh, mesh_host); #endif mesh_host->base = macio_resource_start(mdev, 0); mesh_host->irq = macio_irq(mdev, 0); ms = (struct mesh_state *) mesh_host->hostdata; macio_set_drvdata(mdev, ms); ms->host = mesh_host; ms->mdev = mdev; ms->pdev = pdev; ms->mesh = ioremap(macio_resource_start(mdev, 0), 0x1000); if (ms->mesh == NULL) { printk(KERN_ERR "mesh: can't map registers\n"); goto out_free; } ms->dma = ioremap(macio_resource_start(mdev, 1), 0x1000); if (ms->dma == NULL) { printk(KERN_ERR "mesh: can't map registers\n"); iounmap(ms->mesh); goto out_free; } ms->meshintr = macio_irq(mdev, 0); ms->dmaintr = macio_irq(mdev, 1); /* Space for dma command list: +1 for stop command, * +1 to allow for aligning. */ ms->dma_cmd_size = (mesh_host->sg_tablesize + 2) * sizeof(struct dbdma_cmd); /* We use the PCI APIs for now until the generic one gets fixed * enough or until we get some macio-specific versions */ dma_cmd_space = pci_alloc_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size, &dma_cmd_bus); if (dma_cmd_space == NULL) { printk(KERN_ERR "mesh: can't allocate DMA table\n"); goto out_unmap; } memset(dma_cmd_space, 0, ms->dma_cmd_size); ms->dma_cmds = (struct dbdma_cmd *) DBDMA_ALIGN(dma_cmd_space); ms->dma_cmd_space = dma_cmd_space; ms->dma_cmd_bus = dma_cmd_bus + ((unsigned long)ms->dma_cmds) - (unsigned long)dma_cmd_space; ms->current_req = NULL; for (tgt = 0; tgt < 8; ++tgt) { ms->tgts[tgt].sdtr_state = do_sdtr; ms->tgts[tgt].sync_params = ASYNC_PARAMS; ms->tgts[tgt].current_req = NULL; } if ((cfp = of_get_property(mesh, "clock-frequency", NULL))) ms->clk_freq = *cfp; else { printk(KERN_INFO "mesh: assuming 50MHz clock frequency\n"); ms->clk_freq = 50000000; } /* The maximum sync rate is clock / 5; increase * mesh_sync_period if necessary. */ minper = 1000000000 / (ms->clk_freq / 5); /* ns */ if (mesh_sync_period < minper) mesh_sync_period = minper; /* Power up the chip */ set_mesh_power(ms, 1); /* Set it up */ mesh_init(ms); /* Request interrupt */ if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) { printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr); goto out_shutdown; } /* Add scsi host & scan */ if (scsi_add_host(mesh_host, &mdev->ofdev.dev)) goto out_release_irq; scsi_scan_host(mesh_host); return 0; out_release_irq: free_irq(ms->meshintr, ms); out_shutdown: /* shutdown & reset bus in case of error or macos can be confused * at reboot if the bus was set to synchronous mode already */ mesh_shutdown(mdev); set_mesh_power(ms, 0); pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size, ms->dma_cmd_space, ms->dma_cmd_bus); out_unmap: iounmap(ms->dma); iounmap(ms->mesh); out_free: scsi_host_put(mesh_host); out_release: macio_release_resources(mdev); return -ENODEV; } static int mesh_remove(struct macio_dev *mdev) { struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); struct Scsi_Host *mesh_host = ms->host; scsi_remove_host(mesh_host); free_irq(ms->meshintr, ms); /* Reset scsi bus */ mesh_shutdown(mdev); /* Shut down chip & termination */ set_mesh_power(ms, 0); /* Unmap registers & dma controller */ iounmap(ms->mesh); iounmap(ms->dma); /* Free DMA commands memory */ pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size, ms->dma_cmd_space, ms->dma_cmd_bus); /* Release memory resources */ macio_release_resources(mdev); scsi_host_put(mesh_host); return 0; } static struct of_device_id mesh_match[] = { { .name = "mesh", }, { .type = "scsi", .compatible = "chrp,mesh0" }, {}, }; MODULE_DEVICE_TABLE (of, mesh_match); static struct macio_driver mesh_driver = { .driver = { .name = "mesh", .owner = THIS_MODULE, .of_match_table = mesh_match, }, .probe = mesh_probe, .remove = mesh_remove, .shutdown = mesh_shutdown, #ifdef CONFIG_PM .suspend = mesh_suspend, .resume = mesh_resume, #endif }; static int __init init_mesh(void) { /* Calculate sync rate from module parameters */ if (sync_rate > 10) sync_rate = 10; if (sync_rate > 0) { printk(KERN_INFO "mesh: configured for synchronous %d MB/s\n", sync_rate); mesh_sync_period = 1000 / sync_rate; /* ns */ mesh_sync_offset = 15; } else printk(KERN_INFO "mesh: configured for asynchronous\n"); return macio_register_driver(&mesh_driver); } static void __exit exit_mesh(void) { return macio_unregister_driver(&mesh_driver); } module_init(init_mesh); module_exit(exit_mesh);
gpl-2.0
milaq/android_kernel_lenovo_a107
drivers/pci/search.c
8323
11393
/* * PCI searching functions. * * Copyright (C) 1993 -- 1997 Drew Eckhardt, Frederic Potter, * David Mosberger-Tang * Copyright (C) 1997 -- 2000 Martin Mares <mj@ucw.cz> * Copyright (C) 2003 -- 2004 Greg Kroah-Hartman <greg@kroah.com> */ #include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/interrupt.h> #include "pci.h" DECLARE_RWSEM(pci_bus_sem); /* * find the upstream PCIe-to-PCI bridge of a PCI device * if the device is PCIE, return NULL * if the device isn't connected to a PCIe bridge (that is its parent is a * legacy PCI bridge and the bridge is directly connected to bus 0), return its * parent */ struct pci_dev * pci_find_upstream_pcie_bridge(struct pci_dev *pdev) { struct pci_dev *tmp = NULL; if (pci_is_pcie(pdev)) return NULL; while (1) { if (pci_is_root_bus(pdev->bus)) break; pdev = pdev->bus->self; /* a p2p bridge */ if (!pci_is_pcie(pdev)) { tmp = pdev; continue; } /* PCI device should connect to a PCIe bridge */ if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) { /* Busted hardware? */ WARN_ON_ONCE(1); return NULL; } return pdev; } return tmp; } static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr) { struct pci_bus* child; struct list_head *tmp; if(bus->number == busnr) return bus; list_for_each(tmp, &bus->children) { child = pci_do_find_bus(pci_bus_b(tmp), busnr); if(child) return child; } return NULL; } /** * pci_find_bus - locate PCI bus from a given domain and bus number * @domain: number of PCI domain to search * @busnr: number of desired PCI bus * * Given a PCI bus number and domain number, the desired PCI bus is located * in the global list of PCI buses. If the bus is found, a pointer to its * data structure is returned. If no bus is found, %NULL is returned. */ struct pci_bus * pci_find_bus(int domain, int busnr) { struct pci_bus *bus = NULL; struct pci_bus *tmp_bus; while ((bus = pci_find_next_bus(bus)) != NULL) { if (pci_domain_nr(bus) != domain) continue; tmp_bus = pci_do_find_bus(bus, busnr); if (tmp_bus) return tmp_bus; } return NULL; } /** * pci_find_next_bus - begin or continue searching for a PCI bus * @from: Previous PCI bus found, or %NULL for new search. * * Iterates through the list of known PCI busses. A new search is * initiated by passing %NULL as the @from argument. Otherwise if * @from is not %NULL, searches continue from next device on the * global list. */ struct pci_bus * pci_find_next_bus(const struct pci_bus *from) { struct list_head *n; struct pci_bus *b = NULL; WARN_ON(in_interrupt()); down_read(&pci_bus_sem); n = from ? from->node.next : pci_root_buses.next; if (n != &pci_root_buses) b = pci_bus_b(n); up_read(&pci_bus_sem); return b; } /** * pci_get_slot - locate PCI device for a given PCI slot * @bus: PCI bus on which desired PCI device resides * @devfn: encodes number of PCI slot in which the desired PCI * device resides and the logical device number within that slot * in case of multi-function devices. * * Given a PCI bus and slot/function number, the desired PCI device * is located in the list of PCI devices. * If the device is found, its reference count is increased and this * function returns a pointer to its data structure. The caller must * decrement the reference count by calling pci_dev_put(). * If no device is found, %NULL is returned. */ struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn) { struct list_head *tmp; struct pci_dev *dev; WARN_ON(in_interrupt()); down_read(&pci_bus_sem); list_for_each(tmp, &bus->devices) { dev = pci_dev_b(tmp); if (dev->devfn == devfn) goto out; } dev = NULL; out: pci_dev_get(dev); up_read(&pci_bus_sem); return dev; } /** * pci_get_domain_bus_and_slot - locate PCI device for a given PCI domain (segment), bus, and slot * @domain: PCI domain/segment on which the PCI device resides. * @bus: PCI bus on which desired PCI device resides * @devfn: encodes number of PCI slot in which the desired PCI device * resides and the logical device number within that slot in case of * multi-function devices. * * Given a PCI domain, bus, and slot/function number, the desired PCI * device is located in the list of PCI devices. If the device is * found, its reference count is increased and this function returns a * pointer to its data structure. The caller must decrement the * reference count by calling pci_dev_put(). If no device is found, * %NULL is returned. */ struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, unsigned int devfn) { struct pci_dev *dev = NULL; for_each_pci_dev(dev) { if (pci_domain_nr(dev->bus) == domain && (dev->bus->number == bus && dev->devfn == devfn)) return dev; } return NULL; } EXPORT_SYMBOL(pci_get_domain_bus_and_slot); static int match_pci_dev_by_id(struct device *dev, void *data) { struct pci_dev *pdev = to_pci_dev(dev); struct pci_device_id *id = data; if (pci_match_one_device(id, pdev)) return 1; return 0; } /* * pci_get_dev_by_id - begin or continue searching for a PCI device by id * @id: pointer to struct pci_device_id to match for the device * @from: Previous PCI device found in search, or %NULL for new search. * * Iterates through the list of known PCI devices. If a PCI device is found * with a matching id a pointer to its device structure is returned, and the * reference count to the device is incremented. Otherwise, %NULL is returned. * A new search is initiated by passing %NULL as the @from argument. Otherwise * if @from is not %NULL, searches continue from next device on the global * list. The reference count for @from is always decremented if it is not * %NULL. * * This is an internal function for use by the other search functions in * this file. */ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id, struct pci_dev *from) { struct device *dev; struct device *dev_start = NULL; struct pci_dev *pdev = NULL; WARN_ON(in_interrupt()); if (from) dev_start = &from->dev; dev = bus_find_device(&pci_bus_type, dev_start, (void *)id, match_pci_dev_by_id); if (dev) pdev = to_pci_dev(dev); if (from) pci_dev_put(from); return pdev; } /** * pci_get_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids * @ss_vendor: PCI subsystem vendor id to match, or %PCI_ANY_ID to match all vendor ids * @ss_device: PCI subsystem device id to match, or %PCI_ANY_ID to match all device ids * @from: Previous PCI device found in search, or %NULL for new search. * * Iterates through the list of known PCI devices. If a PCI device is found * with a matching @vendor, @device, @ss_vendor and @ss_device, a pointer to its * device structure is returned, and the reference count to the device is * incremented. Otherwise, %NULL is returned. A new search is initiated by * passing %NULL as the @from argument. Otherwise if @from is not %NULL, * searches continue from next device on the global list. * The reference count for @from is always decremented if it is not %NULL. */ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, unsigned int ss_vendor, unsigned int ss_device, struct pci_dev *from) { struct pci_dev *pdev; struct pci_device_id *id; /* * pci_find_subsys() can be called on the ide_setup() path, * super-early in boot. But the down_read() will enable local * interrupts, which can cause some machines to crash. So here we * detect and flag that situation and bail out early. */ if (unlikely(no_pci_devices())) return NULL; id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) return NULL; id->vendor = vendor; id->device = device; id->subvendor = ss_vendor; id->subdevice = ss_device; pdev = pci_get_dev_by_id(id, from); kfree(id); return pdev; } /** * pci_get_device - begin or continue searching for a PCI device by vendor/device id * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids * @from: Previous PCI device found in search, or %NULL for new search. * * Iterates through the list of known PCI devices. If a PCI device is * found with a matching @vendor and @device, the reference count to the * device is incremented and a pointer to its device structure is returned. * Otherwise, %NULL is returned. A new search is initiated by passing %NULL * as the @from argument. Otherwise if @from is not %NULL, searches continue * from next device on the global list. The reference count for @from is * always decremented if it is not %NULL. */ struct pci_dev * pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) { return pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); } /** * pci_get_class - begin or continue searching for a PCI device by class * @class: search for a PCI device with this class designation * @from: Previous PCI device found in search, or %NULL for new search. * * Iterates through the list of known PCI devices. If a PCI device is * found with a matching @class, the reference count to the device is * incremented and a pointer to its device structure is returned. * Otherwise, %NULL is returned. * A new search is initiated by passing %NULL as the @from argument. * Otherwise if @from is not %NULL, searches continue from next device * on the global list. The reference count for @from is always decremented * if it is not %NULL. */ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) { struct pci_dev *dev; struct pci_device_id *id; id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) return NULL; id->vendor = id->device = id->subvendor = id->subdevice = PCI_ANY_ID; id->class_mask = PCI_ANY_ID; id->class = class; dev = pci_get_dev_by_id(id, from); kfree(id); return dev; } /** * pci_dev_present - Returns 1 if device matching the device list is present, 0 if not. * @ids: A pointer to a null terminated list of struct pci_device_id structures * that describe the type of PCI device the caller is trying to find. * * Obvious fact: You do not have a reference to any device that might be found * by this function, so if that device is removed from the system right after * this function is finished, the value will be stale. Use this function to * find devices that are usually built into a system, or for a general hint as * to if another device happens to be present at this specific moment in time. */ int pci_dev_present(const struct pci_device_id *ids) { struct pci_dev *found = NULL; WARN_ON(in_interrupt()); while (ids->vendor || ids->subvendor || ids->class_mask) { found = pci_get_dev_by_id(ids, NULL); if (found) goto exit; ids++; } exit: if (found) return 1; return 0; } EXPORT_SYMBOL(pci_dev_present); /* For boot time work */ EXPORT_SYMBOL(pci_find_bus); EXPORT_SYMBOL(pci_find_next_bus); /* For everyone */ EXPORT_SYMBOL(pci_get_device); EXPORT_SYMBOL(pci_get_subsys); EXPORT_SYMBOL(pci_get_slot); EXPORT_SYMBOL(pci_get_class);
gpl-2.0
jfdsmabalot/kernel_hammerhead
drivers/pci/search.c
8323
11393
/* * PCI searching functions. * * Copyright (C) 1993 -- 1997 Drew Eckhardt, Frederic Potter, * David Mosberger-Tang * Copyright (C) 1997 -- 2000 Martin Mares <mj@ucw.cz> * Copyright (C) 2003 -- 2004 Greg Kroah-Hartman <greg@kroah.com> */ #include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/interrupt.h> #include "pci.h" DECLARE_RWSEM(pci_bus_sem); /* * find the upstream PCIe-to-PCI bridge of a PCI device * if the device is PCIE, return NULL * if the device isn't connected to a PCIe bridge (that is its parent is a * legacy PCI bridge and the bridge is directly connected to bus 0), return its * parent */ struct pci_dev * pci_find_upstream_pcie_bridge(struct pci_dev *pdev) { struct pci_dev *tmp = NULL; if (pci_is_pcie(pdev)) return NULL; while (1) { if (pci_is_root_bus(pdev->bus)) break; pdev = pdev->bus->self; /* a p2p bridge */ if (!pci_is_pcie(pdev)) { tmp = pdev; continue; } /* PCI device should connect to a PCIe bridge */ if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) { /* Busted hardware? */ WARN_ON_ONCE(1); return NULL; } return pdev; } return tmp; } static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr) { struct pci_bus* child; struct list_head *tmp; if(bus->number == busnr) return bus; list_for_each(tmp, &bus->children) { child = pci_do_find_bus(pci_bus_b(tmp), busnr); if(child) return child; } return NULL; } /** * pci_find_bus - locate PCI bus from a given domain and bus number * @domain: number of PCI domain to search * @busnr: number of desired PCI bus * * Given a PCI bus number and domain number, the desired PCI bus is located * in the global list of PCI buses. If the bus is found, a pointer to its * data structure is returned. If no bus is found, %NULL is returned. */ struct pci_bus * pci_find_bus(int domain, int busnr) { struct pci_bus *bus = NULL; struct pci_bus *tmp_bus; while ((bus = pci_find_next_bus(bus)) != NULL) { if (pci_domain_nr(bus) != domain) continue; tmp_bus = pci_do_find_bus(bus, busnr); if (tmp_bus) return tmp_bus; } return NULL; } /** * pci_find_next_bus - begin or continue searching for a PCI bus * @from: Previous PCI bus found, or %NULL for new search. * * Iterates through the list of known PCI busses. A new search is * initiated by passing %NULL as the @from argument. Otherwise if * @from is not %NULL, searches continue from next device on the * global list. */ struct pci_bus * pci_find_next_bus(const struct pci_bus *from) { struct list_head *n; struct pci_bus *b = NULL; WARN_ON(in_interrupt()); down_read(&pci_bus_sem); n = from ? from->node.next : pci_root_buses.next; if (n != &pci_root_buses) b = pci_bus_b(n); up_read(&pci_bus_sem); return b; } /** * pci_get_slot - locate PCI device for a given PCI slot * @bus: PCI bus on which desired PCI device resides * @devfn: encodes number of PCI slot in which the desired PCI * device resides and the logical device number within that slot * in case of multi-function devices. * * Given a PCI bus and slot/function number, the desired PCI device * is located in the list of PCI devices. * If the device is found, its reference count is increased and this * function returns a pointer to its data structure. The caller must * decrement the reference count by calling pci_dev_put(). * If no device is found, %NULL is returned. */ struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn) { struct list_head *tmp; struct pci_dev *dev; WARN_ON(in_interrupt()); down_read(&pci_bus_sem); list_for_each(tmp, &bus->devices) { dev = pci_dev_b(tmp); if (dev->devfn == devfn) goto out; } dev = NULL; out: pci_dev_get(dev); up_read(&pci_bus_sem); return dev; } /** * pci_get_domain_bus_and_slot - locate PCI device for a given PCI domain (segment), bus, and slot * @domain: PCI domain/segment on which the PCI device resides. * @bus: PCI bus on which desired PCI device resides * @devfn: encodes number of PCI slot in which the desired PCI device * resides and the logical device number within that slot in case of * multi-function devices. * * Given a PCI domain, bus, and slot/function number, the desired PCI * device is located in the list of PCI devices. If the device is * found, its reference count is increased and this function returns a * pointer to its data structure. The caller must decrement the * reference count by calling pci_dev_put(). If no device is found, * %NULL is returned. */ struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, unsigned int devfn) { struct pci_dev *dev = NULL; for_each_pci_dev(dev) { if (pci_domain_nr(dev->bus) == domain && (dev->bus->number == bus && dev->devfn == devfn)) return dev; } return NULL; } EXPORT_SYMBOL(pci_get_domain_bus_and_slot); static int match_pci_dev_by_id(struct device *dev, void *data) { struct pci_dev *pdev = to_pci_dev(dev); struct pci_device_id *id = data; if (pci_match_one_device(id, pdev)) return 1; return 0; } /* * pci_get_dev_by_id - begin or continue searching for a PCI device by id * @id: pointer to struct pci_device_id to match for the device * @from: Previous PCI device found in search, or %NULL for new search. * * Iterates through the list of known PCI devices. If a PCI device is found * with a matching id a pointer to its device structure is returned, and the * reference count to the device is incremented. Otherwise, %NULL is returned. * A new search is initiated by passing %NULL as the @from argument. Otherwise * if @from is not %NULL, searches continue from next device on the global * list. The reference count for @from is always decremented if it is not * %NULL. * * This is an internal function for use by the other search functions in * this file. */ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id, struct pci_dev *from) { struct device *dev; struct device *dev_start = NULL; struct pci_dev *pdev = NULL; WARN_ON(in_interrupt()); if (from) dev_start = &from->dev; dev = bus_find_device(&pci_bus_type, dev_start, (void *)id, match_pci_dev_by_id); if (dev) pdev = to_pci_dev(dev); if (from) pci_dev_put(from); return pdev; } /** * pci_get_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids * @ss_vendor: PCI subsystem vendor id to match, or %PCI_ANY_ID to match all vendor ids * @ss_device: PCI subsystem device id to match, or %PCI_ANY_ID to match all device ids * @from: Previous PCI device found in search, or %NULL for new search. * * Iterates through the list of known PCI devices. If a PCI device is found * with a matching @vendor, @device, @ss_vendor and @ss_device, a pointer to its * device structure is returned, and the reference count to the device is * incremented. Otherwise, %NULL is returned. A new search is initiated by * passing %NULL as the @from argument. Otherwise if @from is not %NULL, * searches continue from next device on the global list. * The reference count for @from is always decremented if it is not %NULL. */ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, unsigned int ss_vendor, unsigned int ss_device, struct pci_dev *from) { struct pci_dev *pdev; struct pci_device_id *id; /* * pci_find_subsys() can be called on the ide_setup() path, * super-early in boot. But the down_read() will enable local * interrupts, which can cause some machines to crash. So here we * detect and flag that situation and bail out early. */ if (unlikely(no_pci_devices())) return NULL; id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) return NULL; id->vendor = vendor; id->device = device; id->subvendor = ss_vendor; id->subdevice = ss_device; pdev = pci_get_dev_by_id(id, from); kfree(id); return pdev; } /** * pci_get_device - begin or continue searching for a PCI device by vendor/device id * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids * @from: Previous PCI device found in search, or %NULL for new search. * * Iterates through the list of known PCI devices. If a PCI device is * found with a matching @vendor and @device, the reference count to the * device is incremented and a pointer to its device structure is returned. * Otherwise, %NULL is returned. A new search is initiated by passing %NULL * as the @from argument. Otherwise if @from is not %NULL, searches continue * from next device on the global list. The reference count for @from is * always decremented if it is not %NULL. */ struct pci_dev * pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) { return pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); } /** * pci_get_class - begin or continue searching for a PCI device by class * @class: search for a PCI device with this class designation * @from: Previous PCI device found in search, or %NULL for new search. * * Iterates through the list of known PCI devices. If a PCI device is * found with a matching @class, the reference count to the device is * incremented and a pointer to its device structure is returned. * Otherwise, %NULL is returned. * A new search is initiated by passing %NULL as the @from argument. * Otherwise if @from is not %NULL, searches continue from next device * on the global list. The reference count for @from is always decremented * if it is not %NULL. */ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) { struct pci_dev *dev; struct pci_device_id *id; id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) return NULL; id->vendor = id->device = id->subvendor = id->subdevice = PCI_ANY_ID; id->class_mask = PCI_ANY_ID; id->class = class; dev = pci_get_dev_by_id(id, from); kfree(id); return dev; } /** * pci_dev_present - Returns 1 if device matching the device list is present, 0 if not. * @ids: A pointer to a null terminated list of struct pci_device_id structures * that describe the type of PCI device the caller is trying to find. * * Obvious fact: You do not have a reference to any device that might be found * by this function, so if that device is removed from the system right after * this function is finished, the value will be stale. Use this function to * find devices that are usually built into a system, or for a general hint as * to if another device happens to be present at this specific moment in time. */ int pci_dev_present(const struct pci_device_id *ids) { struct pci_dev *found = NULL; WARN_ON(in_interrupt()); while (ids->vendor || ids->subvendor || ids->class_mask) { found = pci_get_dev_by_id(ids, NULL); if (found) goto exit; ids++; } exit: if (found) return 1; return 0; } EXPORT_SYMBOL(pci_dev_present); /* For boot time work */ EXPORT_SYMBOL(pci_find_bus); EXPORT_SYMBOL(pci_find_next_bus); /* For everyone */ EXPORT_SYMBOL(pci_get_device); EXPORT_SYMBOL(pci_get_subsys); EXPORT_SYMBOL(pci_get_slot); EXPORT_SYMBOL(pci_get_class);
gpl-2.0
bilalliberty/kernel_golfu
fs/minix/itree_v2.c
9091
1925
#include <linux/buffer_head.h> #include "minix.h" enum {DIRECT = 7, DEPTH = 4}; /* Have triple indirect */ typedef u32 block_t; /* 32 bit, host order */ static inline unsigned long block_to_cpu(block_t n) { return n; } static inline block_t cpu_to_block(unsigned long n) { return n; } static inline block_t *i_data(struct inode *inode) { return (block_t *)minix_i(inode)->u.i2_data; } #define DIRCOUNT 7 #define INDIRCOUNT(sb) (1 << ((sb)->s_blocksize_bits - 2)) static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) { int n = 0; char b[BDEVNAME_SIZE]; struct super_block *sb = inode->i_sb; if (block < 0) { printk("MINIX-fs: block_to_path: block %ld < 0 on dev %s\n", block, bdevname(sb->s_bdev, b)); } else if (block >= (minix_sb(inode->i_sb)->s_max_size/sb->s_blocksize)) { if (printk_ratelimit()) printk("MINIX-fs: block_to_path: " "block %ld too big on dev %s\n", block, bdevname(sb->s_bdev, b)); } else if (block < DIRCOUNT) { offsets[n++] = block; } else if ((block -= DIRCOUNT) < INDIRCOUNT(sb)) { offsets[n++] = DIRCOUNT; offsets[n++] = block; } else if ((block -= INDIRCOUNT(sb)) < INDIRCOUNT(sb) * INDIRCOUNT(sb)) { offsets[n++] = DIRCOUNT + 1; offsets[n++] = block / INDIRCOUNT(sb); offsets[n++] = block % INDIRCOUNT(sb); } else { block -= INDIRCOUNT(sb) * INDIRCOUNT(sb); offsets[n++] = DIRCOUNT + 2; offsets[n++] = (block / INDIRCOUNT(sb)) / INDIRCOUNT(sb); offsets[n++] = (block / INDIRCOUNT(sb)) % INDIRCOUNT(sb); offsets[n++] = block % INDIRCOUNT(sb); } return n; } #include "itree_common.c" int V2_minix_get_block(struct inode * inode, long block, struct buffer_head *bh_result, int create) { return get_block(inode, block, bh_result, create); } void V2_minix_truncate(struct inode * inode) { truncate(inode); } unsigned V2_minix_blocks(loff_t size, struct super_block *sb) { return nblocks(size, sb); }
gpl-2.0
dwander/linaro-base
drivers/net/wireless/b43/tables.c
10627
14620
/* Broadcom B43 wireless driver Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it> Copyright (c) 2006, 2006 Michael Buesch <m@bues.ch> Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org> Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "b43.h" #include "tables.h" #include "phy_g.h" const u32 b43_tab_rotor[] = { 0xFEB93FFD, 0xFEC63FFD, /* 0 */ 0xFED23FFD, 0xFEDF3FFD, 0xFEEC3FFE, 0xFEF83FFE, 0xFF053FFE, 0xFF113FFE, 0xFF1E3FFE, 0xFF2A3FFF, /* 8 */ 0xFF373FFF, 0xFF443FFF, 0xFF503FFF, 0xFF5D3FFF, 0xFF693FFF, 0xFF763FFF, 0xFF824000, 0xFF8F4000, /* 16 */ 0xFF9B4000, 0xFFA84000, 0xFFB54000, 0xFFC14000, 0xFFCE4000, 0xFFDA4000, 0xFFE74000, 0xFFF34000, /* 24 */ 0x00004000, 0x000D4000, 0x00194000, 0x00264000, 0x00324000, 0x003F4000, 0x004B4000, 0x00584000, /* 32 */ 0x00654000, 0x00714000, 0x007E4000, 0x008A3FFF, 0x00973FFF, 0x00A33FFF, 0x00B03FFF, 0x00BC3FFF, /* 40 */ 0x00C93FFF, 0x00D63FFF, 0x00E23FFE, 0x00EF3FFE, 0x00FB3FFE, 0x01083FFE, 0x01143FFE, 0x01213FFD, /* 48 */ 0x012E3FFD, 0x013A3FFD, 0x01473FFD, }; const u32 b43_tab_retard[] = { 0xDB93CB87, 0xD666CF64, /* 0 */ 0xD1FDD358, 0xCDA6D826, 0xCA38DD9F, 0xC729E2B4, 0xC469E88E, 0xC26AEE2B, 0xC0DEF46C, 0xC073FA62, /* 8 */ 0xC01D00D5, 0xC0760743, 0xC1560D1E, 0xC2E51369, 0xC4ED18FF, 0xC7AC1ED7, 0xCB2823B2, 0xCEFA28D9, /* 16 */ 0xD2F62D3F, 0xD7BB3197, 0xDCE53568, 0xE1FE3875, 0xE7D13B35, 0xED663D35, 0xF39B3EC4, 0xF98E3FA7, /* 24 */ 0x00004000, 0x06723FA7, 0x0C653EC4, 0x129A3D35, 0x182F3B35, 0x1E023875, 0x231B3568, 0x28453197, /* 32 */ 0x2D0A2D3F, 0x310628D9, 0x34D823B2, 0x38541ED7, 0x3B1318FF, 0x3D1B1369, 0x3EAA0D1E, 0x3F8A0743, /* 40 */ 0x3FE300D5, 0x3F8DFA62, 0x3F22F46C, 0x3D96EE2B, 0x3B97E88E, 0x38D7E2B4, 0x35C8DD9F, 0x325AD826, /* 48 */ 0x2E03D358, 0x299ACF64, 0x246DCB87, }; const u16 b43_tab_finefreqa[] = { 0x0082, 0x0082, 0x0102, 0x0182, /* 0 */ 0x0202, 0x0282, 0x0302, 0x0382, 0x0402, 0x0482, 0x0502, 0x0582, 0x05E2, 0x0662, 0x06E2, 0x0762, 0x07E2, 0x0842, 0x08C2, 0x0942, /* 16 */ 0x09C2, 0x0A22, 0x0AA2, 0x0B02, 0x0B82, 0x0BE2, 0x0C62, 0x0CC2, 0x0D42, 0x0DA2, 0x0E02, 0x0E62, 0x0EE2, 0x0F42, 0x0FA2, 0x1002, /* 32 */ 0x1062, 0x10C2, 0x1122, 0x1182, 0x11E2, 0x1242, 0x12A2, 0x12E2, 0x1342, 0x13A2, 0x1402, 0x1442, 0x14A2, 0x14E2, 0x1542, 0x1582, /* 48 */ 0x15E2, 0x1622, 0x1662, 0x16C1, 0x1701, 0x1741, 0x1781, 0x17E1, 0x1821, 0x1861, 0x18A1, 0x18E1, 0x1921, 0x1961, 0x19A1, 0x19E1, /* 64 */ 0x1A21, 0x1A61, 0x1AA1, 0x1AC1, 0x1B01, 0x1B41, 0x1B81, 0x1BA1, 0x1BE1, 0x1C21, 0x1C41, 0x1C81, 0x1CA1, 0x1CE1, 0x1D01, 0x1D41, /* 80 */ 0x1D61, 0x1DA1, 0x1DC1, 0x1E01, 0x1E21, 0x1E61, 0x1E81, 0x1EA1, 0x1EE1, 0x1F01, 0x1F21, 0x1F41, 0x1F81, 0x1FA1, 0x1FC1, 0x1FE1, /* 96 */ 0x2001, 0x2041, 0x2061, 0x2081, 0x20A1, 0x20C1, 0x20E1, 0x2101, 0x2121, 0x2141, 0x2161, 0x2181, 0x21A1, 0x21C1, 0x21E1, 0x2201, /* 112 */ 0x2221, 0x2241, 0x2261, 0x2281, 0x22A1, 0x22C1, 0x22C1, 0x22E1, 0x2301, 0x2321, 0x2341, 0x2361, 0x2361, 0x2381, 0x23A1, 0x23C1, /* 128 */ 0x23E1, 0x23E1, 0x2401, 0x2421, 0x2441, 0x2441, 0x2461, 0x2481, 0x2481, 0x24A1, 0x24C1, 0x24C1, 0x24E1, 0x2501, 0x2501, 0x2521, /* 144 */ 0x2541, 0x2541, 0x2561, 0x2561, 0x2581, 0x25A1, 0x25A1, 0x25C1, 0x25C1, 0x25E1, 0x2601, 0x2601, 0x2621, 0x2621, 0x2641, 0x2641, /* 160 */ 0x2661, 0x2661, 0x2681, 0x2681, 0x26A1, 0x26A1, 0x26C1, 0x26C1, 0x26E1, 0x26E1, 0x2701, 0x2701, 0x2721, 0x2721, 0x2740, 0x2740, /* 176 */ 0x2760, 0x2760, 0x2780, 0x2780, 0x2780, 0x27A0, 0x27A0, 0x27C0, 0x27C0, 0x27E0, 0x27E0, 0x27E0, 0x2800, 0x2800, 0x2820, 0x2820, /* 192 */ 0x2820, 0x2840, 0x2840, 0x2840, 0x2860, 0x2860, 0x2880, 0x2880, 0x2880, 0x28A0, 0x28A0, 0x28A0, 0x28C0, 0x28C0, 0x28C0, 0x28E0, /* 208 */ 0x28E0, 0x28E0, 0x2900, 0x2900, 0x2900, 0x2920, 0x2920, 0x2920, 0x2940, 0x2940, 0x2940, 0x2960, 0x2960, 0x2960, 0x2960, 0x2980, /* 224 */ 0x2980, 0x2980, 0x29A0, 0x29A0, 0x29A0, 0x29A0, 0x29C0, 0x29C0, 0x29C0, 0x29E0, 0x29E0, 0x29E0, 0x29E0, 0x2A00, 0x2A00, 0x2A00, /* 240 */ 0x2A00, 0x2A20, 0x2A20, 0x2A20, 0x2A20, 0x2A40, 0x2A40, 0x2A40, 0x2A40, 0x2A60, 0x2A60, 0x2A60, }; const u16 b43_tab_finefreqg[] = { 0x0089, 0x02E9, 0x0409, 0x04E9, /* 0 */ 0x05A9, 0x0669, 0x0709, 0x0789, 0x0829, 0x08A9, 0x0929, 0x0989, 0x0A09, 0x0A69, 0x0AC9, 0x0B29, 0x0BA9, 0x0BE9, 0x0C49, 0x0CA9, /* 16 */ 0x0D09, 0x0D69, 0x0DA9, 0x0E09, 0x0E69, 0x0EA9, 0x0F09, 0x0F49, 0x0FA9, 0x0FE9, 0x1029, 0x1089, 0x10C9, 0x1109, 0x1169, 0x11A9, /* 32 */ 0x11E9, 0x1229, 0x1289, 0x12C9, 0x1309, 0x1349, 0x1389, 0x13C9, 0x1409, 0x1449, 0x14A9, 0x14E9, 0x1529, 0x1569, 0x15A9, 0x15E9, /* 48 */ 0x1629, 0x1669, 0x16A9, 0x16E8, 0x1728, 0x1768, 0x17A8, 0x17E8, 0x1828, 0x1868, 0x18A8, 0x18E8, 0x1928, 0x1968, 0x19A8, 0x19E8, /* 64 */ 0x1A28, 0x1A68, 0x1AA8, 0x1AE8, 0x1B28, 0x1B68, 0x1BA8, 0x1BE8, 0x1C28, 0x1C68, 0x1CA8, 0x1CE8, 0x1D28, 0x1D68, 0x1DC8, 0x1E08, /* 80 */ 0x1E48, 0x1E88, 0x1EC8, 0x1F08, 0x1F48, 0x1F88, 0x1FE8, 0x2028, 0x2068, 0x20A8, 0x2108, 0x2148, 0x2188, 0x21C8, 0x2228, 0x2268, /* 96 */ 0x22C8, 0x2308, 0x2348, 0x23A8, 0x23E8, 0x2448, 0x24A8, 0x24E8, 0x2548, 0x25A8, 0x2608, 0x2668, 0x26C8, 0x2728, 0x2787, 0x27E7, /* 112 */ 0x2847, 0x28C7, 0x2947, 0x29A7, 0x2A27, 0x2AC7, 0x2B47, 0x2BE7, 0x2CA7, 0x2D67, 0x2E47, 0x2F67, 0x3247, 0x3526, 0x3646, 0x3726, /* 128 */ 0x3806, 0x38A6, 0x3946, 0x39E6, 0x3A66, 0x3AE6, 0x3B66, 0x3BC6, 0x3C45, 0x3CA5, 0x3D05, 0x3D85, 0x3DE5, 0x3E45, 0x3EA5, 0x3EE5, /* 144 */ 0x3F45, 0x3FA5, 0x4005, 0x4045, 0x40A5, 0x40E5, 0x4145, 0x4185, 0x41E5, 0x4225, 0x4265, 0x42C5, 0x4305, 0x4345, 0x43A5, 0x43E5, /* 160 */ 0x4424, 0x4464, 0x44C4, 0x4504, 0x4544, 0x4584, 0x45C4, 0x4604, 0x4644, 0x46A4, 0x46E4, 0x4724, 0x4764, 0x47A4, 0x47E4, 0x4824, /* 176 */ 0x4864, 0x48A4, 0x48E4, 0x4924, 0x4964, 0x49A4, 0x49E4, 0x4A24, 0x4A64, 0x4AA4, 0x4AE4, 0x4B23, 0x4B63, 0x4BA3, 0x4BE3, 0x4C23, /* 192 */ 0x4C63, 0x4CA3, 0x4CE3, 0x4D23, 0x4D63, 0x4DA3, 0x4DE3, 0x4E23, 0x4E63, 0x4EA3, 0x4EE3, 0x4F23, 0x4F63, 0x4FC3, 0x5003, 0x5043, /* 208 */ 0x5083, 0x50C3, 0x5103, 0x5143, 0x5183, 0x51E2, 0x5222, 0x5262, 0x52A2, 0x52E2, 0x5342, 0x5382, 0x53C2, 0x5402, 0x5462, 0x54A2, /* 224 */ 0x5502, 0x5542, 0x55A2, 0x55E2, 0x5642, 0x5682, 0x56E2, 0x5722, 0x5782, 0x57E1, 0x5841, 0x58A1, 0x5901, 0x5961, 0x59C1, 0x5A21, /* 240 */ 0x5AA1, 0x5B01, 0x5B81, 0x5BE1, 0x5C61, 0x5D01, 0x5D80, 0x5E20, 0x5EE0, 0x5FA0, 0x6080, 0x61C0, }; const u16 b43_tab_noisea2[] = { 0x0001, 0x0001, 0x0001, 0xFFFE, 0xFFFE, 0x3FFF, 0x1000, 0x0393, }; const u16 b43_tab_noisea3[] = { 0x5E5E, 0x5E5E, 0x5E5E, 0x3F48, 0x4C4C, 0x4C4C, 0x4C4C, 0x2D36, }; const u16 b43_tab_noiseg1[] = { 0x013C, 0x01F5, 0x031A, 0x0631, 0x0001, 0x0001, 0x0001, 0x0001, }; const u16 b43_tab_noiseg2[] = { 0x5484, 0x3C40, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; const u16 b43_tab_noisescalea2[] = { 0x6767, 0x6767, 0x6767, 0x6767, /* 0 */ 0x6767, 0x6767, 0x6767, 0x6767, 0x6767, 0x6767, 0x6767, 0x6767, 0x6767, 0x6700, 0x6767, 0x6767, 0x6767, 0x6767, 0x6767, 0x6767, /* 16 */ 0x6767, 0x6767, 0x6767, 0x6767, 0x6767, 0x6767, 0x0067, }; const u16 b43_tab_noisescalea3[] = { 0x2323, 0x2323, 0x2323, 0x2323, /* 0 */ 0x2323, 0x2323, 0x2323, 0x2323, 0x2323, 0x2323, 0x2323, 0x2323, 0x2323, 0x2300, 0x2323, 0x2323, 0x2323, 0x2323, 0x2323, 0x2323, /* 16 */ 0x2323, 0x2323, 0x2323, 0x2323, 0x2323, 0x2323, 0x0023, }; const u16 b43_tab_noisescaleg1[] = { 0x6C77, 0x5162, 0x3B40, 0x3335, /* 0 */ 0x2F2D, 0x2A2A, 0x2527, 0x1F21, 0x1A1D, 0x1719, 0x1616, 0x1414, 0x1414, 0x1400, 0x1414, 0x1614, 0x1716, 0x1A19, 0x1F1D, 0x2521, /* 16 */ 0x2A27, 0x2F2A, 0x332D, 0x3B35, 0x5140, 0x6C62, 0x0077, }; const u16 b43_tab_noisescaleg2[] = { 0xD8DD, 0xCBD4, 0xBCC0, 0xB6B7, /* 0 */ 0xB2B0, 0xADAD, 0xA7A9, 0x9FA1, 0x969B, 0x9195, 0x8F8F, 0x8A8A, 0x8A8A, 0x8A00, 0x8A8A, 0x8F8A, 0x918F, 0x9695, 0x9F9B, 0xA7A1, /* 16 */ 0xADA9, 0xB2AD, 0xB6B0, 0xBCB7, 0xCBC0, 0xD8D4, 0x00DD, }; const u16 b43_tab_noisescaleg3[] = { 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 0 */ 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA400, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 16 */ 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0x00A4, }; const u16 b43_tab_sigmasqr1[] = { 0x007A, 0x0075, 0x0071, 0x006C, /* 0 */ 0x0067, 0x0063, 0x005E, 0x0059, 0x0054, 0x0050, 0x004B, 0x0046, 0x0042, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, /* 16 */ 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x0000, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, /* 32 */ 0x003D, 0x003D, 0x003D, 0x003D, 0x0042, 0x0046, 0x004B, 0x0050, 0x0054, 0x0059, 0x005E, 0x0063, 0x0067, 0x006C, 0x0071, 0x0075, /* 48 */ 0x007A, }; const u16 b43_tab_sigmasqr2[] = { 0x00DE, 0x00DC, 0x00DA, 0x00D8, /* 0 */ 0x00D6, 0x00D4, 0x00D2, 0x00CF, 0x00CD, 0x00CA, 0x00C7, 0x00C4, 0x00C1, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 16 */ 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x0000, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 32 */ 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00C1, 0x00C4, 0x00C7, 0x00CA, 0x00CD, 0x00CF, 0x00D2, 0x00D4, 0x00D6, 0x00D8, 0x00DA, 0x00DC, /* 48 */ 0x00DE, }; const u16 b43_tab_rssiagc1[] = { 0xFFF8, 0xFFF8, 0xFFF8, 0xFFF8, /* 0 */ 0xFFF8, 0xFFF9, 0xFFFC, 0xFFFE, 0xFFF8, 0xFFF8, 0xFFF8, 0xFFF8, 0xFFF8, 0xFFF8, 0xFFF8, 0xFFF8, }; const u16 b43_tab_rssiagc2[] = { 0x0820, 0x0820, 0x0920, 0x0C38, /* 0 */ 0x0820, 0x0820, 0x0820, 0x0820, 0x0820, 0x0820, 0x0920, 0x0A38, 0x0820, 0x0820, 0x0820, 0x0820, 0x0820, 0x0820, 0x0920, 0x0A38, /* 16 */ 0x0820, 0x0820, 0x0820, 0x0820, 0x0820, 0x0820, 0x0920, 0x0A38, 0x0820, 0x0820, 0x0820, 0x0820, 0x0820, 0x0820, 0x0920, 0x0A38, /* 32 */ 0x0820, 0x0820, 0x0820, 0x0820, 0x0820, 0x0820, 0x0920, 0x0A38, 0x0820, 0x0820, 0x0820, 0x0820, }; static inline void assert_sizes(void) { BUILD_BUG_ON(B43_TAB_ROTOR_SIZE != ARRAY_SIZE(b43_tab_rotor)); BUILD_BUG_ON(B43_TAB_RETARD_SIZE != ARRAY_SIZE(b43_tab_retard)); BUILD_BUG_ON(B43_TAB_FINEFREQA_SIZE != ARRAY_SIZE(b43_tab_finefreqa)); BUILD_BUG_ON(B43_TAB_FINEFREQG_SIZE != ARRAY_SIZE(b43_tab_finefreqg)); BUILD_BUG_ON(B43_TAB_NOISEA2_SIZE != ARRAY_SIZE(b43_tab_noisea2)); BUILD_BUG_ON(B43_TAB_NOISEA3_SIZE != ARRAY_SIZE(b43_tab_noisea3)); BUILD_BUG_ON(B43_TAB_NOISEG1_SIZE != ARRAY_SIZE(b43_tab_noiseg1)); BUILD_BUG_ON(B43_TAB_NOISEG2_SIZE != ARRAY_SIZE(b43_tab_noiseg2)); BUILD_BUG_ON(B43_TAB_NOISESCALE_SIZE != ARRAY_SIZE(b43_tab_noisescalea2)); BUILD_BUG_ON(B43_TAB_NOISESCALE_SIZE != ARRAY_SIZE(b43_tab_noisescalea3)); BUILD_BUG_ON(B43_TAB_NOISESCALE_SIZE != ARRAY_SIZE(b43_tab_noisescaleg1)); BUILD_BUG_ON(B43_TAB_NOISESCALE_SIZE != ARRAY_SIZE(b43_tab_noisescaleg2)); BUILD_BUG_ON(B43_TAB_NOISESCALE_SIZE != ARRAY_SIZE(b43_tab_noisescaleg3)); BUILD_BUG_ON(B43_TAB_SIGMASQR_SIZE != ARRAY_SIZE(b43_tab_sigmasqr1)); BUILD_BUG_ON(B43_TAB_SIGMASQR_SIZE != ARRAY_SIZE(b43_tab_sigmasqr2)); BUILD_BUG_ON(B43_TAB_RSSIAGC1_SIZE != ARRAY_SIZE(b43_tab_rssiagc1)); BUILD_BUG_ON(B43_TAB_RSSIAGC2_SIZE != ARRAY_SIZE(b43_tab_rssiagc2)); } u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset) { struct b43_phy_g *gphy = dev->phy.g; u16 addr; addr = table + offset; if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_READ) || (addr - 1 != gphy->ofdmtab_addr)) { /* The hardware has a different address in memory. Update it. */ b43_phy_write(dev, B43_PHY_OTABLECTL, addr); gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_READ; } gphy->ofdmtab_addr = addr; return b43_phy_read(dev, B43_PHY_OTABLEI); /* Some compiletime assertions... */ assert_sizes(); } void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table, u16 offset, u16 value) { struct b43_phy_g *gphy = dev->phy.g; u16 addr; addr = table + offset; if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_WRITE) || (addr - 1 != gphy->ofdmtab_addr)) { /* The hardware has a different address in memory. Update it. */ b43_phy_write(dev, B43_PHY_OTABLECTL, addr); gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_WRITE; } gphy->ofdmtab_addr = addr; b43_phy_write(dev, B43_PHY_OTABLEI, value); } u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset) { struct b43_phy_g *gphy = dev->phy.g; u32 ret; u16 addr; addr = table + offset; if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_READ) || (addr - 1 != gphy->ofdmtab_addr)) { /* The hardware has a different address in memory. Update it. */ b43_phy_write(dev, B43_PHY_OTABLECTL, addr); gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_READ; } gphy->ofdmtab_addr = addr; ret = b43_phy_read(dev, B43_PHY_OTABLEQ); ret <<= 16; ret |= b43_phy_read(dev, B43_PHY_OTABLEI); return ret; } void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table, u16 offset, u32 value) { struct b43_phy_g *gphy = dev->phy.g; u16 addr; addr = table + offset; if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_WRITE) || (addr - 1 != gphy->ofdmtab_addr)) { /* The hardware has a different address in memory. Update it. */ b43_phy_write(dev, B43_PHY_OTABLECTL, addr); gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_WRITE; } gphy->ofdmtab_addr = addr; b43_phy_write(dev, B43_PHY_OTABLEI, value); b43_phy_write(dev, B43_PHY_OTABLEQ, (value >> 16)); } u16 b43_gtab_read(struct b43_wldev *dev, u16 table, u16 offset) { b43_phy_write(dev, B43_PHY_GTABCTL, table + offset); return b43_phy_read(dev, B43_PHY_GTABDATA); } void b43_gtab_write(struct b43_wldev *dev, u16 table, u16 offset, u16 value) { b43_phy_write(dev, B43_PHY_GTABCTL, table + offset); b43_phy_write(dev, B43_PHY_GTABDATA, value); }
gpl-2.0
columbia/linux-2.6-racepro
drivers/usb/gadget/f_rndis.c
132
25176
/* * f_rndis.c -- RNDIS link function driver * * Copyright (C) 2003-2005,2008 David Brownell * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger * Copyright (C) 2008 Nokia Corporation * Copyright (C) 2009 Samsung Electronics * Author: Michal Nazarewicz (m.nazarewicz@samsung.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* #define VERBOSE_DEBUG */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/etherdevice.h> #include <asm/atomic.h> #include "u_ether.h" #include "rndis.h" /* * This function is an RNDIS Ethernet port -- a Microsoft protocol that's * been promoted instead of the standard CDC Ethernet. The published RNDIS * spec is ambiguous, incomplete, and needlessly complex. Variants such as * ActiveSync have even worse status in terms of specification. * * In short: it's a protocol controlled by (and for) Microsoft, not for an * Open ecosystem or markets. Linux supports it *only* because Microsoft * doesn't support the CDC Ethernet standard. * * The RNDIS data transfer model is complex, with multiple Ethernet packets * per USB message, and out of band data. The control model is built around * what's essentially an "RNDIS RPC" protocol. It's all wrapped in a CDC ACM * (modem, not Ethernet) veneer, with those ACM descriptors being entirely * useless (they're ignored). RNDIS expects to be the only function in its * configuration, so it's no real help if you need composite devices; and * it expects to be the first configuration too. * * There is a single technical advantage of RNDIS over CDC Ethernet, if you * discount the fluff that its RPC can be made to deliver: it doesn't need * a NOP altsetting for the data interface. That lets it work on some of the * "so smart it's stupid" hardware which takes over configuration changes * from the software, and adds restrictions like "no altsettings". * * Unfortunately MSFT's RNDIS drivers are buggy. They hang or oops, and * have all sorts of contrary-to-specification oddities that can prevent * them from working sanely. Since bugfixes (or accurate specs, letting * Linux work around those bugs) are unlikely to ever come from MSFT, you * may want to avoid using RNDIS on purely operational grounds. * * Omissions from the RNDIS 1.0 specification include: * * - Power management ... references data that's scattered around lots * of other documentation, which is incorrect/incomplete there too. * * - There are various undocumented protocol requirements, like the need * to send garbage in some control-OUT messages. * * - MS-Windows drivers sometimes emit undocumented requests. */ struct rndis_ep_descs { struct usb_endpoint_descriptor *in; struct usb_endpoint_descriptor *out; struct usb_endpoint_descriptor *notify; }; struct f_rndis { struct gether port; u8 ctrl_id, data_id; u8 ethaddr[ETH_ALEN]; int config; struct rndis_ep_descs fs; struct rndis_ep_descs hs; struct usb_ep *notify; struct usb_endpoint_descriptor *notify_desc; struct usb_request *notify_req; atomic_t notify_count; }; static inline struct f_rndis *func_to_rndis(struct usb_function *f) { return container_of(f, struct f_rndis, port.func); } /* peak (theoretical) bulk transfer rate in bits-per-second */ static unsigned int bitrate(struct usb_gadget *g) { if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) return 13 * 512 * 8 * 1000 * 8; else return 19 * 64 * 1 * 1000 * 8; } /*-------------------------------------------------------------------------*/ /* */ #define LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */ #define STATUS_BYTECOUNT 8 /* 8 bytes data */ /* interface descriptor: */ static struct usb_interface_descriptor rndis_control_intf = { .bLength = sizeof rndis_control_intf, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ /* status endpoint is optional; this could be patched later */ .bNumEndpoints = 1, .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM, .bInterfaceProtocol = USB_CDC_ACM_PROTO_VENDOR, /* .iInterface = DYNAMIC */ }; static struct usb_cdc_header_desc header_desc = { .bLength = sizeof header_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_HEADER_TYPE, .bcdCDC = cpu_to_le16(0x0110), }; static struct usb_cdc_call_mgmt_descriptor call_mgmt_descriptor = { .bLength = sizeof call_mgmt_descriptor, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE, .bmCapabilities = 0x00, .bDataInterface = 0x01, }; static struct usb_cdc_acm_descriptor rndis_acm_descriptor = { .bLength = sizeof rndis_acm_descriptor, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_ACM_TYPE, .bmCapabilities = 0x00, }; static struct usb_cdc_union_desc rndis_union_desc = { .bLength = sizeof(rndis_union_desc), .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_UNION_TYPE, /* .bMasterInterface0 = DYNAMIC */ /* .bSlaveInterface0 = DYNAMIC */ }; /* the data interface has two bulk endpoints */ static struct usb_interface_descriptor rndis_data_intf = { .bLength = sizeof rndis_data_intf, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_CDC_DATA, .bInterfaceSubClass = 0, .bInterfaceProtocol = 0, /* .iInterface = DYNAMIC */ }; static struct usb_interface_assoc_descriptor rndis_iad_descriptor = { .bLength = sizeof rndis_iad_descriptor, .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, .bFirstInterface = 0, /* XXX, hardcoded */ .bInterfaceCount = 2, // control + data .bFunctionClass = USB_CLASS_COMM, .bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET, .bFunctionProtocol = USB_CDC_PROTO_NONE, /* .iFunction = DYNAMIC */ }; /* full speed support: */ static struct usb_endpoint_descriptor fs_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = cpu_to_le16(STATUS_BYTECOUNT), .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC, }; static struct usb_endpoint_descriptor fs_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor fs_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_descriptor_header *eth_fs_function[] = { (struct usb_descriptor_header *) &rndis_iad_descriptor, /* control interface matches ACM, not Ethernet */ (struct usb_descriptor_header *) &rndis_control_intf, (struct usb_descriptor_header *) &header_desc, (struct usb_descriptor_header *) &call_mgmt_descriptor, (struct usb_descriptor_header *) &rndis_acm_descriptor, (struct usb_descriptor_header *) &rndis_union_desc, (struct usb_descriptor_header *) &fs_notify_desc, /* data interface has no altsetting */ (struct usb_descriptor_header *) &rndis_data_intf, (struct usb_descriptor_header *) &fs_in_desc, (struct usb_descriptor_header *) &fs_out_desc, NULL, }; /* high speed support: */ static struct usb_endpoint_descriptor hs_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = cpu_to_le16(STATUS_BYTECOUNT), .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4, }; static struct usb_endpoint_descriptor hs_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_endpoint_descriptor hs_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_descriptor_header *eth_hs_function[] = { (struct usb_descriptor_header *) &rndis_iad_descriptor, /* control interface matches ACM, not Ethernet */ (struct usb_descriptor_header *) &rndis_control_intf, (struct usb_descriptor_header *) &header_desc, (struct usb_descriptor_header *) &call_mgmt_descriptor, (struct usb_descriptor_header *) &rndis_acm_descriptor, (struct usb_descriptor_header *) &rndis_union_desc, (struct usb_descriptor_header *) &hs_notify_desc, /* data interface has no altsetting */ (struct usb_descriptor_header *) &rndis_data_intf, (struct usb_descriptor_header *) &hs_in_desc, (struct usb_descriptor_header *) &hs_out_desc, NULL, }; /* string descriptors: */ static struct usb_string rndis_string_defs[] = { [0].s = "RNDIS Communications Control", [1].s = "RNDIS Ethernet Data", [2].s = "RNDIS", { } /* end of list */ }; static struct usb_gadget_strings rndis_string_table = { .language = 0x0409, /* en-us */ .strings = rndis_string_defs, }; static struct usb_gadget_strings *rndis_strings[] = { &rndis_string_table, NULL, }; /*-------------------------------------------------------------------------*/ static struct sk_buff *rndis_add_header(struct gether *port, struct sk_buff *skb) { struct sk_buff *skb2; skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type)); if (skb2) rndis_add_hdr(skb2); dev_kfree_skb_any(skb); return skb2; } static void rndis_response_available(void *_rndis) { struct f_rndis *rndis = _rndis; struct usb_request *req = rndis->notify_req; struct usb_composite_dev *cdev = rndis->port.func.config->cdev; __le32 *data = req->buf; int status; if (atomic_inc_return(&rndis->notify_count) != 1) return; /* Send RNDIS RESPONSE_AVAILABLE notification; a * USB_CDC_NOTIFY_RESPONSE_AVAILABLE "should" work too * * This is the only notification defined by RNDIS. */ data[0] = cpu_to_le32(1); data[1] = cpu_to_le32(0); status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC); if (status) { atomic_dec(&rndis->notify_count); DBG(cdev, "notify/0 --> %d\n", status); } } static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req) { struct f_rndis *rndis = req->context; struct usb_composite_dev *cdev = rndis->port.func.config->cdev; int status = req->status; /* after TX: * - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control) * - RNDIS_RESPONSE_AVAILABLE (status/irq) */ switch (status) { case -ECONNRESET: case -ESHUTDOWN: /* connection gone */ atomic_set(&rndis->notify_count, 0); break; default: DBG(cdev, "RNDIS %s response error %d, %d/%d\n", ep->name, status, req->actual, req->length); /* FALLTHROUGH */ case 0: if (ep != rndis->notify) break; /* handle multiple pending RNDIS_RESPONSE_AVAILABLE * notifications by resending until we're done */ if (atomic_dec_and_test(&rndis->notify_count)) break; status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC); if (status) { atomic_dec(&rndis->notify_count); DBG(cdev, "notify/1 --> %d\n", status); } break; } } static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req) { struct f_rndis *rndis = req->context; struct usb_composite_dev *cdev = rndis->port.func.config->cdev; int status; /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */ // spin_lock(&dev->lock); status = rndis_msg_parser(rndis->config, (u8 *) req->buf); if (status < 0) ERROR(cdev, "RNDIS command error %d, %d/%d\n", status, req->actual, req->length); // spin_unlock(&dev->lock); } static int rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct f_rndis *rndis = func_to_rndis(f); struct usb_composite_dev *cdev = f->config->cdev; struct usb_request *req = cdev->req; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); /* composite driver infrastructure handles everything except * CDC class messages; interface activation uses set_alt(). */ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { /* RNDIS uses the CDC command encapsulation mechanism to implement * an RPC scheme, with much getting/setting of attributes by OID. */ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SEND_ENCAPSULATED_COMMAND: if (w_length > req->length || w_value || w_index != rndis->ctrl_id) goto invalid; /* read the request; process it later */ value = w_length; req->complete = rndis_command_complete; req->context = rndis; /* later, rndis_response_available() sends a notification */ break; case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_ENCAPSULATED_RESPONSE: if (w_value || w_index != rndis->ctrl_id) goto invalid; else { u8 *buf; u32 n; /* return the result */ buf = rndis_get_next_response(rndis->config, &n); if (buf) { memcpy(req->buf, buf, n); req->complete = rndis_response_complete; rndis_free_response(rndis->config, buf); value = n; } /* else stalls ... spec says to avoid that */ } break; default: invalid: VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); } /* respond with data transfer or status phase? */ if (value >= 0) { DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); req->zero = (value < w_length); req->length = value; value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); if (value < 0) ERROR(cdev, "rndis response on err %d\n", value); } /* device either stalls (value < 0) or reports success */ return value; } static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_rndis *rndis = func_to_rndis(f); struct usb_composite_dev *cdev = f->config->cdev; /* we know alt == 0 */ if (intf == rndis->ctrl_id) { if (rndis->notify->driver_data) { VDBG(cdev, "reset rndis control %d\n", intf); usb_ep_disable(rndis->notify); } else { VDBG(cdev, "init rndis ctrl %d\n", intf); rndis->notify_desc = ep_choose(cdev->gadget, rndis->hs.notify, rndis->fs.notify); } usb_ep_enable(rndis->notify, rndis->notify_desc); rndis->notify->driver_data = rndis; } else if (intf == rndis->data_id) { struct net_device *net; if (rndis->port.in_ep->driver_data) { DBG(cdev, "reset rndis\n"); gether_disconnect(&rndis->port); } if (!rndis->port.in) { DBG(cdev, "init rndis\n"); rndis->port.in = ep_choose(cdev->gadget, rndis->hs.in, rndis->fs.in); rndis->port.out = ep_choose(cdev->gadget, rndis->hs.out, rndis->fs.out); } /* Avoid ZLPs; they can be troublesome. */ rndis->port.is_zlp_ok = false; /* RNDIS should be in the "RNDIS uninitialized" state, * either never activated or after rndis_uninit(). * * We don't want data to flow here until a nonzero packet * filter is set, at which point it enters "RNDIS data * initialized" state ... but we do want the endpoints * to be activated. It's a strange little state. * * REVISIT the RNDIS gadget code has done this wrong for a * very long time. We need another call to the link layer * code -- gether_updown(...bool) maybe -- to do it right. */ rndis->port.cdc_filter = 0; DBG(cdev, "RNDIS RX/TX early activation ... \n"); net = gether_connect(&rndis->port); if (IS_ERR(net)) return PTR_ERR(net); rndis_set_param_dev(rndis->config, net, &rndis->port.cdc_filter); } else goto fail; return 0; fail: return -EINVAL; } static void rndis_disable(struct usb_function *f) { struct f_rndis *rndis = func_to_rndis(f); struct usb_composite_dev *cdev = f->config->cdev; if (!rndis->notify->driver_data) return; DBG(cdev, "rndis deactivated\n"); rndis_uninit(rndis->config); gether_disconnect(&rndis->port); usb_ep_disable(rndis->notify); rndis->notify->driver_data = NULL; } /*-------------------------------------------------------------------------*/ /* * This isn't quite the same mechanism as CDC Ethernet, since the * notification scheme passes less data, but the same set of link * states must be tested. A key difference is that altsettings are * not used to tell whether the link should send packets or not. */ static void rndis_open(struct gether *geth) { struct f_rndis *rndis = func_to_rndis(&geth->func); struct usb_composite_dev *cdev = geth->func.config->cdev; DBG(cdev, "%s\n", __func__); rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, bitrate(cdev->gadget) / 100); rndis_signal_connect(rndis->config); } static void rndis_close(struct gether *geth) { struct f_rndis *rndis = func_to_rndis(&geth->func); DBG(geth->func.config->cdev, "%s\n", __func__); rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 0); rndis_signal_disconnect(rndis->config); } /*-------------------------------------------------------------------------*/ /* ethernet function driver setup/binding */ static int rndis_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct f_rndis *rndis = func_to_rndis(f); int status; struct usb_ep *ep; /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) goto fail; rndis->ctrl_id = status; rndis_iad_descriptor.bFirstInterface = status; rndis_control_intf.bInterfaceNumber = status; rndis_union_desc.bMasterInterface0 = status; status = usb_interface_id(c, f); if (status < 0) goto fail; rndis->data_id = status; rndis_data_intf.bInterfaceNumber = status; rndis_union_desc.bSlaveInterface0 = status; status = -ENODEV; /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &fs_in_desc); if (!ep) goto fail; rndis->port.in_ep = ep; ep->driver_data = cdev; /* claim */ ep = usb_ep_autoconfig(cdev->gadget, &fs_out_desc); if (!ep) goto fail; rndis->port.out_ep = ep; ep->driver_data = cdev; /* claim */ /* NOTE: a status/notification endpoint is, strictly speaking, * optional. We don't treat it that way though! It's simpler, * and some newer profiles don't treat it as optional. */ ep = usb_ep_autoconfig(cdev->gadget, &fs_notify_desc); if (!ep) goto fail; rndis->notify = ep; ep->driver_data = cdev; /* claim */ status = -ENOMEM; /* allocate notification request and buffer */ rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL); if (!rndis->notify_req) goto fail; rndis->notify_req->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL); if (!rndis->notify_req->buf) goto fail; rndis->notify_req->length = STATUS_BYTECOUNT; rndis->notify_req->context = rndis; rndis->notify_req->complete = rndis_response_complete; /* copy descriptors, and track endpoint copies */ f->descriptors = usb_copy_descriptors(eth_fs_function); if (!f->descriptors) goto fail; rndis->fs.in = usb_find_endpoint(eth_fs_function, f->descriptors, &fs_in_desc); rndis->fs.out = usb_find_endpoint(eth_fs_function, f->descriptors, &fs_out_desc); rndis->fs.notify = usb_find_endpoint(eth_fs_function, f->descriptors, &fs_notify_desc); /* support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at * both speeds */ if (gadget_is_dualspeed(c->cdev->gadget)) { hs_in_desc.bEndpointAddress = fs_in_desc.bEndpointAddress; hs_out_desc.bEndpointAddress = fs_out_desc.bEndpointAddress; hs_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->hs_descriptors = usb_copy_descriptors(eth_hs_function); if (!f->hs_descriptors) goto fail; rndis->hs.in = usb_find_endpoint(eth_hs_function, f->hs_descriptors, &hs_in_desc); rndis->hs.out = usb_find_endpoint(eth_hs_function, f->hs_descriptors, &hs_out_desc); rndis->hs.notify = usb_find_endpoint(eth_hs_function, f->hs_descriptors, &hs_notify_desc); } rndis->port.open = rndis_open; rndis->port.close = rndis_close; status = rndis_register(rndis_response_available, rndis); if (status < 0) goto fail; rndis->config = status; rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 0); rndis_set_host_mac(rndis->config, rndis->ethaddr); #if 0 // FIXME if (rndis_set_param_vendor(rndis->config, vendorID, manufacturer)) goto fail0; #endif /* NOTE: all that is done without knowing or caring about * the network link ... which is unavailable to this code * until we're activated via set_alt(). */ DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n", gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", rndis->port.in_ep->name, rndis->port.out_ep->name, rndis->notify->name); return 0; fail: if (gadget_is_dualspeed(c->cdev->gadget) && f->hs_descriptors) usb_free_descriptors(f->hs_descriptors); if (f->descriptors) usb_free_descriptors(f->descriptors); if (rndis->notify_req) { kfree(rndis->notify_req->buf); usb_ep_free_request(rndis->notify, rndis->notify_req); } /* we might as well release our claims on endpoints */ if (rndis->notify) rndis->notify->driver_data = NULL; if (rndis->port.out) rndis->port.out_ep->driver_data = NULL; if (rndis->port.in) rndis->port.in_ep->driver_data = NULL; ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); return status; } static void rndis_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_rndis *rndis = func_to_rndis(f); rndis_deregister(rndis->config); rndis_exit(); if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); kfree(rndis->notify_req->buf); usb_ep_free_request(rndis->notify, rndis->notify_req); kfree(rndis); } /* Some controllers can't support RNDIS ... */ static inline bool can_support_rndis(struct usb_configuration *c) { /* everything else is *presumably* fine */ return true; } /** * rndis_bind_config - add RNDIS network link to a configuration * @c: the configuration to support the network link * @ethaddr: a buffer in which the ethernet address of the host side * side of the link was recorded * Context: single threaded during gadget setup * * Returns zero on success, else negative errno. * * Caller must have called @gether_setup(). Caller is also responsible * for calling @gether_cleanup() before module unload. */ int rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]) { struct f_rndis *rndis; int status; if (!can_support_rndis(c) || !ethaddr) return -EINVAL; /* maybe allocate device-global string IDs */ if (rndis_string_defs[0].id == 0) { /* ... and setup RNDIS itself */ status = rndis_init(); if (status < 0) return status; /* control interface label */ status = usb_string_id(c->cdev); if (status < 0) return status; rndis_string_defs[0].id = status; rndis_control_intf.iInterface = status; /* data interface label */ status = usb_string_id(c->cdev); if (status < 0) return status; rndis_string_defs[1].id = status; rndis_data_intf.iInterface = status; /* IAD iFunction label */ status = usb_string_id(c->cdev); if (status < 0) return status; rndis_string_defs[2].id = status; rndis_iad_descriptor.iFunction = status; } /* allocate and initialize one new instance */ status = -ENOMEM; rndis = kzalloc(sizeof *rndis, GFP_KERNEL); if (!rndis) goto fail; memcpy(rndis->ethaddr, ethaddr, ETH_ALEN); /* RNDIS activates when the host changes this filter */ rndis->port.cdc_filter = 0; /* RNDIS has special (and complex) framing */ rndis->port.header_len = sizeof(struct rndis_packet_msg_type); rndis->port.wrap = rndis_add_header; rndis->port.unwrap = rndis_rm_hdr; rndis->port.func.name = "rndis"; rndis->port.func.strings = rndis_strings; /* descriptors are per-instance copies */ rndis->port.func.bind = rndis_bind; rndis->port.func.unbind = rndis_unbind; rndis->port.func.set_alt = rndis_set_alt; rndis->port.func.setup = rndis_setup; rndis->port.func.disable = rndis_disable; status = usb_add_function(c, &rndis->port.func); if (status) { kfree(rndis); fail: rndis_exit(); } return status; }
gpl-2.0
Jaykay-x/Mini2440_BH1750fvi
net/netfilter/nf_log.c
388
7161
#include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/netfilter.h> #include <linux/seq_file.h> #include <net/protocol.h> #include <net/netfilter/nf_log.h> #include "nf_internals.h" /* Internal logging interface, which relies on the real LOG target modules */ #define NF_LOG_PREFIXLEN 128 #define NFLOGGER_NAME_LEN 64 static const struct nf_logger *nf_loggers[NFPROTO_NUMPROTO] __read_mostly; static struct list_head nf_loggers_l[NFPROTO_NUMPROTO] __read_mostly; static DEFINE_MUTEX(nf_log_mutex); static struct nf_logger *__find_logger(int pf, const char *str_logger) { struct nf_logger *t; list_for_each_entry(t, &nf_loggers_l[pf], list[pf]) { if (!strnicmp(str_logger, t->name, strlen(t->name))) return t; } return NULL; } /* return EEXIST if the same logger is registred, 0 on success. */ int nf_log_register(u_int8_t pf, struct nf_logger *logger) { const struct nf_logger *llog; int i; if (pf >= ARRAY_SIZE(nf_loggers)) return -EINVAL; for (i = 0; i < ARRAY_SIZE(logger->list); i++) INIT_LIST_HEAD(&logger->list[i]); mutex_lock(&nf_log_mutex); if (pf == NFPROTO_UNSPEC) { for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) list_add_tail(&(logger->list[i]), &(nf_loggers_l[i])); } else { /* register at end of list to honor first register win */ list_add_tail(&logger->list[pf], &nf_loggers_l[pf]); llog = rcu_dereference(nf_loggers[pf]); if (llog == NULL) rcu_assign_pointer(nf_loggers[pf], logger); } mutex_unlock(&nf_log_mutex); return 0; } EXPORT_SYMBOL(nf_log_register); void nf_log_unregister(struct nf_logger *logger) { const struct nf_logger *c_logger; int i; mutex_lock(&nf_log_mutex); for (i = 0; i < ARRAY_SIZE(nf_loggers); i++) { c_logger = rcu_dereference(nf_loggers[i]); if (c_logger == logger) rcu_assign_pointer(nf_loggers[i], NULL); list_del(&logger->list[i]); } mutex_unlock(&nf_log_mutex); synchronize_rcu(); } EXPORT_SYMBOL(nf_log_unregister); int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) { mutex_lock(&nf_log_mutex); if (__find_logger(pf, logger->name) == NULL) { mutex_unlock(&nf_log_mutex); return -ENOENT; } rcu_assign_pointer(nf_loggers[pf], logger); mutex_unlock(&nf_log_mutex); return 0; } EXPORT_SYMBOL(nf_log_bind_pf); void nf_log_unbind_pf(u_int8_t pf) { mutex_lock(&nf_log_mutex); rcu_assign_pointer(nf_loggers[pf], NULL); mutex_unlock(&nf_log_mutex); } EXPORT_SYMBOL(nf_log_unbind_pf); void nf_log_packet(u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *fmt, ...) { va_list args; char prefix[NF_LOG_PREFIXLEN]; const struct nf_logger *logger; rcu_read_lock(); logger = rcu_dereference(nf_loggers[pf]); if (logger) { va_start(args, fmt); vsnprintf(prefix, sizeof(prefix), fmt, args); va_end(args); logger->logfn(pf, hooknum, skb, in, out, loginfo, prefix); } rcu_read_unlock(); } EXPORT_SYMBOL(nf_log_packet); #ifdef CONFIG_PROC_FS static void *seq_start(struct seq_file *seq, loff_t *pos) { mutex_lock(&nf_log_mutex); if (*pos >= ARRAY_SIZE(nf_loggers)) return NULL; return pos; } static void *seq_next(struct seq_file *s, void *v, loff_t *pos) { (*pos)++; if (*pos >= ARRAY_SIZE(nf_loggers)) return NULL; return pos; } static void seq_stop(struct seq_file *s, void *v) { mutex_unlock(&nf_log_mutex); } static int seq_show(struct seq_file *s, void *v) { loff_t *pos = v; const struct nf_logger *logger; struct nf_logger *t; int ret; logger = nf_loggers[*pos]; if (!logger) ret = seq_printf(s, "%2lld NONE (", *pos); else ret = seq_printf(s, "%2lld %s (", *pos, logger->name); if (ret < 0) return ret; list_for_each_entry(t, &nf_loggers_l[*pos], list[*pos]) { ret = seq_printf(s, "%s", t->name); if (ret < 0) return ret; if (&t->list[*pos] != nf_loggers_l[*pos].prev) { ret = seq_printf(s, ","); if (ret < 0) return ret; } } return seq_printf(s, ")\n"); } static const struct seq_operations nflog_seq_ops = { .start = seq_start, .next = seq_next, .stop = seq_stop, .show = seq_show, }; static int nflog_open(struct inode *inode, struct file *file) { return seq_open(file, &nflog_seq_ops); } static const struct file_operations nflog_file_ops = { .owner = THIS_MODULE, .open = nflog_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* PROC_FS */ #ifdef CONFIG_SYSCTL static struct ctl_path nf_log_sysctl_path[] = { { .procname = "net", .ctl_name = CTL_NET, }, { .procname = "netfilter", .ctl_name = NET_NETFILTER, }, { .procname = "nf_log", .ctl_name = CTL_UNNUMBERED, }, { } }; static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3]; static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1]; static struct ctl_table_header *nf_log_dir_header; static int nf_log_proc_dostring(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { const struct nf_logger *logger; char buf[NFLOGGER_NAME_LEN]; size_t size = *lenp; int r = 0; int tindex = (unsigned long)table->extra1; if (write) { if (size > sizeof(buf)) size = sizeof(buf); if (copy_from_user(buf, buffer, size)) return -EFAULT; if (!strcmp(buf, "NONE")) { nf_log_unbind_pf(tindex); return 0; } mutex_lock(&nf_log_mutex); logger = __find_logger(tindex, buf); if (logger == NULL) { mutex_unlock(&nf_log_mutex); return -ENOENT; } rcu_assign_pointer(nf_loggers[tindex], logger); mutex_unlock(&nf_log_mutex); } else { mutex_lock(&nf_log_mutex); logger = nf_loggers[tindex]; if (!logger) table->data = "NONE"; else table->data = logger->name; r = proc_dostring(table, write, buffer, lenp, ppos); mutex_unlock(&nf_log_mutex); } return r; } static __init int netfilter_log_sysctl_init(void) { int i; for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) { snprintf(nf_log_sysctl_fnames[i-NFPROTO_UNSPEC], 3, "%d", i); nf_log_sysctl_table[i].ctl_name = CTL_UNNUMBERED; nf_log_sysctl_table[i].procname = nf_log_sysctl_fnames[i-NFPROTO_UNSPEC]; nf_log_sysctl_table[i].data = NULL; nf_log_sysctl_table[i].maxlen = NFLOGGER_NAME_LEN * sizeof(char); nf_log_sysctl_table[i].mode = 0644; nf_log_sysctl_table[i].proc_handler = nf_log_proc_dostring; nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i; } nf_log_dir_header = register_sysctl_paths(nf_log_sysctl_path, nf_log_sysctl_table); if (!nf_log_dir_header) return -ENOMEM; return 0; } #else static __init int netfilter_log_sysctl_init(void) { return 0; } #endif /* CONFIG_SYSCTL */ int __init netfilter_log_init(void) { int i, r; #ifdef CONFIG_PROC_FS if (!proc_create("nf_log", S_IRUGO, proc_net_netfilter, &nflog_file_ops)) return -1; #endif /* Errors will trigger panic, unroll on error is unnecessary. */ r = netfilter_log_sysctl_init(); if (r < 0) return r; for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) INIT_LIST_HEAD(&(nf_loggers_l[i])); return 0; }
gpl-2.0
AndroidSymmetry/Old_Sparky
drivers/net/ethernet/msm/emac/emac_hw.c
388
39901
/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* MSM EMAC Ethernet Controller Hardware support */ #include <linux/crc32.h> #include <linux/if_vlan.h> #include <linux/jiffies.h> #include <linux/phy.h> #include "emac_hw.h" #include "emac_ptp.h" #define RFD_PREF_LOW_TH 0x10 #define RFD_PREF_UP_TH 0x10 #define JUMBO_1KAH 0x4 #define RXF_DOF_TH 0x0be #define RXF_UOF_TH 0x1a0 #define RXD_TH 0x100 static int emac_hw_sgmii_setup_link(struct emac_hw *hw, u32 speed, bool autoneg, bool fc); /* RGMII specific macros */ #define EMAC_RGMII_PLL_LOCK_TIMEOUT (HZ / 1000) /* 1ms */ #define EMAC_RGMII_CORE_IE_C 0x2001 #define EMAC_RGMII_PLL_L_VAL 0x14 #define EMAC_RGMII_PHY_MODE 0 /* REG */ u32 emac_reg_r32(struct emac_hw *hw, u8 base, u32 reg) { return readl_relaxed(hw->reg_addr[base] + reg); } void emac_reg_w32(struct emac_hw *hw, u8 base, u32 reg, u32 val) { writel_relaxed(val, hw->reg_addr[base] + reg); } void emac_reg_update32(struct emac_hw *hw, u8 base, u32 reg, u32 mask, u32 val) { u32 data; data = emac_reg_r32(hw, base, reg); emac_reg_w32(hw, base, reg, ((data & ~mask) | val)); } u32 emac_reg_field_r32(struct emac_hw *hw, u8 base, u32 reg, u32 mask, u32 shift) { u32 data; data = emac_reg_r32(hw, base, reg); return (data & mask) >> shift; } /* PHY */ static int emac_disable_mdio_autopoll(struct emac_hw *hw) { u32 i, val; emac_reg_update32(hw, EMAC, EMAC_MDIO_CTRL, MDIO_AP_EN, 0); wmb(); /* ensure mdio autopoll disable is requested */ /* wait for any mdio polling to complete */ for (i = 0; i < MDIO_WAIT_TIMES; i++) { val = emac_reg_r32(hw, EMAC, EMAC_MDIO_CTRL); if (!(val & MDIO_BUSY)) return 0; udelay(100); } /* failed to disable; ensure it is enabled before returning */ emac_reg_update32(hw, EMAC, EMAC_MDIO_CTRL, 0, MDIO_AP_EN); wmb(); /* ensure mdio autopoll is enabled */ return -EBUSY; } static void emac_enable_mdio_autopoll(struct emac_hw *hw) { emac_reg_update32(hw, EMAC, EMAC_MDIO_CTRL, 0, MDIO_AP_EN); wmb(); /* ensure mdio autopoll is enabled */ } int emac_hw_read_phy_reg(struct emac_hw *hw, bool ext, u8 dev, bool fast, u16 reg_addr, u16 *phy_data) { u32 i, clk_sel, val = 0; int retval = 0; *phy_data = 0; clk_sel = fast ? MDIO_CLK_25_4 : MDIO_CLK_25_28; if (hw->adpt->no_ephy == false) { retval = emac_disable_mdio_autopoll(hw); if (retval) return retval; } emac_reg_update32(hw, EMAC, EMAC_PHY_STS, PHY_ADDR_BMSK, (dev << PHY_ADDR_SHFT)); wmb(); /* ensure PHY address is set before we proceed */ if (ext) { val = ((dev << DEVAD_SHFT) & DEVAD_BMSK) | ((reg_addr << EX_REG_ADDR_SHFT) & EX_REG_ADDR_BMSK); emac_reg_w32(hw, EMAC, EMAC_MDIO_EX_CTRL, val); wmb(); /* ensure proper address is set before proceeding */ val = SUP_PREAMBLE | ((clk_sel << MDIO_CLK_SEL_SHFT) & MDIO_CLK_SEL_BMSK) | MDIO_START | MDIO_MODE | MDIO_RD_NWR; } else { val = val & ~(MDIO_REG_ADDR_BMSK | MDIO_CLK_SEL_BMSK | MDIO_MODE | MDIO_PR); val = SUP_PREAMBLE | ((clk_sel << MDIO_CLK_SEL_SHFT) & MDIO_CLK_SEL_BMSK) | ((reg_addr << MDIO_REG_ADDR_SHFT) & MDIO_REG_ADDR_BMSK) | MDIO_START | MDIO_RD_NWR; } emac_reg_w32(hw, EMAC, EMAC_MDIO_CTRL, val); mb(); /* ensure hw starts the operation before we check for result */ for (i = 0; i < MDIO_WAIT_TIMES; i++) { val = emac_reg_r32(hw, EMAC, EMAC_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) { *phy_data = (u16)((val >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK); break; } udelay(100); } if (i == MDIO_WAIT_TIMES) retval = -EIO; if (hw->adpt->no_ephy == false) emac_enable_mdio_autopoll(hw); return retval; } int emac_hw_write_phy_reg(struct emac_hw *hw, bool ext, u8 dev, bool fast, u16 reg_addr, u16 phy_data) { u32 i, clk_sel, val = 0; int retval = 0; clk_sel = fast ? MDIO_CLK_25_4 : MDIO_CLK_25_28; if (hw->adpt->no_ephy == false) { retval = emac_disable_mdio_autopoll(hw); if (retval) return retval; } emac_reg_update32(hw, EMAC, EMAC_PHY_STS, PHY_ADDR_BMSK, (dev << PHY_ADDR_SHFT)); wmb(); /* ensure PHY address is set before we proceed */ if (ext) { val = ((dev << DEVAD_SHFT) & DEVAD_BMSK) | ((reg_addr << EX_REG_ADDR_SHFT) & EX_REG_ADDR_BMSK); emac_reg_w32(hw, EMAC, EMAC_MDIO_EX_CTRL, val); wmb(); /* ensure proper address is set before proceeding */ val = SUP_PREAMBLE | ((clk_sel << MDIO_CLK_SEL_SHFT) & MDIO_CLK_SEL_BMSK) | ((phy_data << MDIO_DATA_SHFT) & MDIO_DATA_BMSK) | MDIO_START | MDIO_MODE; } else { val = val & ~(MDIO_REG_ADDR_BMSK | MDIO_CLK_SEL_BMSK | MDIO_DATA_BMSK | MDIO_MODE | MDIO_PR); val = SUP_PREAMBLE | ((clk_sel << MDIO_CLK_SEL_SHFT) & MDIO_CLK_SEL_BMSK) | ((reg_addr << MDIO_REG_ADDR_SHFT) & MDIO_REG_ADDR_BMSK) | ((phy_data << MDIO_DATA_SHFT) & MDIO_DATA_BMSK) | MDIO_START; } emac_reg_w32(hw, EMAC, EMAC_MDIO_CTRL, val); mb(); /* ensure hw starts the operation before we check for result */ for (i = 0; i < MDIO_WAIT_TIMES; i++) { val = emac_reg_r32(hw, EMAC, EMAC_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; udelay(100); } if (i == MDIO_WAIT_TIMES) retval = -EIO; if (hw->adpt->no_ephy == false) emac_enable_mdio_autopoll(hw); return retval; } int emac_read_phy_reg(struct emac_hw *hw, u16 phy_addr, u16 reg_addr, u16 *phy_data) { unsigned long flags; int retval; spin_lock_irqsave(&hw->mdio_lock, flags); retval = emac_hw_read_phy_reg(hw, false, phy_addr, true, reg_addr, phy_data); spin_unlock_irqrestore(&hw->mdio_lock, flags); if (retval) emac_err(hw->adpt, "error reading phy reg 0x%02x\n", reg_addr); else emac_dbg(hw->adpt, hw, "EMAC PHY RD: 0x%02x -> 0x%04x\n", reg_addr, *phy_data); return retval; } int emac_write_phy_reg(struct emac_hw *hw, u16 phy_addr, u16 reg_addr, u16 phy_data) { unsigned long flags; int retval; spin_lock_irqsave(&hw->mdio_lock, flags); retval = emac_hw_write_phy_reg(hw, false, phy_addr, true, reg_addr, phy_data); spin_unlock_irqrestore(&hw->mdio_lock, flags); if (retval) emac_err(hw->adpt, "error writing phy reg 0x%02x\n", reg_addr); else emac_dbg(hw->adpt, hw, "EMAC PHY WR: 0x%02x <- 0x%04x\n", reg_addr, phy_data); return retval; } int emac_hw_ack_phy_intr(struct emac_hw *hw) { /* ack phy interrupt */ return 0; } int emac_hw_init_sgmii(struct emac_hw *hw) { int i; emac_hw_sgmii_setup_link(hw, hw->autoneg_advertised, hw->autoneg, !hw->disable_fc_autoneg); /* PCS programming */ emac_reg_w32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_CDR_CTRL0, SGMII_CDR_MAX_CNT); emac_reg_w32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B); emac_reg_w32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_CMN_PWR_CTRL, BIAS_EN | SYSCLK_EN | CLKBUF_L_EN | PLL_TXCLK_EN | PLL_RXCLK_EN); emac_reg_w32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_TX_PWR_CTRL, L0_TX_EN | L0_CLKBUF_EN | L0_TRAN_BIAS_EN); emac_reg_w32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_RX_PWR_CTRL, L0_RX_SIGDET_EN | (1 << L0_RX_TERM_MODE_SHFT) | L0_RX_I_EN); emac_reg_w32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_CMN_PWR_CTRL, BIAS_EN | PLL_EN | SYSCLK_EN | CLKBUF_L_EN | PLL_TXCLK_EN | PLL_RXCLK_EN); emac_reg_w32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_LANE_CTRL1, L0_RX_EQ_EN | L0_RESET_TSYNC_EN | L0_DRV_LVL_BMSK); wmb(); /* sysclk/refclk setting */ emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_SYSCLK_EN_SEL, SYSCLK_SEL_CMOS); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_SYS_CLK_CTRL, SYSCLK_CM | SYSCLK_AC_COUPLE); /* PLL setting */ emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_PLL_IP_SETI, QSERDES_PLL_IPSETI); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_PLL_CP_SETI, QSERDES_PLL_CP_SETI); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_PLL_IP_SETP, QSERDES_PLL_IP_SETP); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_PLL_CP_SETP, QSERDES_PLL_CP_SETP); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_PLL_CRCTRL, QSERDES_PLL_CRCTRL); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_PLL_CNTRL, OCP_EN | PLL_DIV_FFEN | PLL_DIV_ORD); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_DEC_START1, DEC_START1_MUX | QSERDES_PLL_DEC); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_DEC_START2, DEC_START2_MUX | DEC_START2); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_DIV_FRAC_START1, DIV_FRAC_START1_MUX | QSERDES_PLL_DIV_FRAC_START1); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_DIV_FRAC_START2, DIV_FRAC_START2_MUX | QSERDES_PLL_DIV_FRAC_START2); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_DIV_FRAC_START3, DIV_FRAC_START3_MUX | QSERDES_PLL_DIV_FRAC_START3); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_PLLLOCK_CMP1, QSERDES_PLL_LOCK_CMP1); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_PLLLOCK_CMP2, QSERDES_PLL_LOCK_CMP2); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_PLLLOCK_CMP3, QSERDES_PLL_LOCK_CMP3); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_PLLLOCK_CMP_EN, PLLLOCK_CMP_EN); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_RESETSM_CNTRL, FRQ_TUNE_MODE); /* CDR setting */ emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_RX_CDR_CONTROL, SECONDORDERENABLE | (QSERDES_RX_CDR_CTRL1_THRESH << FIRSTORDER_THRESH_SHFT) | (QSERDES_RX_CDR_CTRL1_GAIN << SECONDORDERGAIN_SHFT)); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_RX_CDR_CONTROL2, SECONDORDERENABLE | (QSERDES_RX_CDR_CTRL2_THRESH << FIRSTORDER_THRESH_SHFT) | (QSERDES_RX_CDR_CTRL2_GAIN << SECONDORDERGAIN_SHFT)); /* TX/RX setting */ emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_TX_BIST_MODE_LANENO, QSERDES_TX_BIST_MODE_LANENO); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_TX_TX_DRV_LVL, TX_DRV_LVL_MUX | (QSERDES_TX_DRV_LVL << TX_DRV_LVL_SHFT)); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN, EMP_EN_MUX | EMP_EN); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_TX_TX_EMP_POST1_LVL, TX_EMP_POST1_LVL_MUX | (QSERDES_TX_EMP_POST1_LVL << TX_EMP_POST1_LVL_SHFT)); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_RX_RX_EQ_GAIN12, (QSERDES_RX_EQ_GAIN2 << RX_EQ_GAIN2_SHFT) | (QSERDES_RX_EQ_GAIN1 << RX_EQ_GAIN1_SHFT)); emac_reg_w32(hw, EMAC_QSERDES, EMAC_QSERDES_TX_LANE_MODE, QSERDES_TX_LANE_MODE); wmb(); emac_reg_w32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_SERDES_START, SERDES_START); wmb(); for (i = 0; i < SERDES_START_WAIT_TIMES; i++) { if (emac_reg_r32(hw, EMAC_QSERDES, EMAC_QSERDES_COM_RESET_SM) & QSERDES_READY) break; usleep_range(100, 200); } if (i == SERDES_START_WAIT_TIMES) { emac_err(hw->adpt, "serdes failed to start\n"); return -EIO; } /* Mask out all the SGMII Interrupt */ emac_reg_w32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_INTERRUPT_MASK, 0); wmb(); emac_hw_clear_sgmii_intr_status(hw, SGMII_PHY_INTERRUPT_ERR); return 0; } int emac_hw_reset_sgmii(struct emac_hw *hw) { /* It may take about 100ms to reset the SGMII PHY*/ emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, PHY_RESET, PHY_RESET); wmb(); msleep(50); emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, PHY_RESET, 0); wmb(); msleep(50); return emac_hw_init_sgmii(hw); } /* initialize RGMII PHY */ static int emac_hw_init_rgmii(struct emac_hw *hw) { u32 val; unsigned long timeout; emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1, 0, FREQ_MODE); emac_reg_w32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR18, EMAC_RGMII_CORE_IE_C); emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, RGMII_PHY_MODE_BMSK, (EMAC_RGMII_PHY_MODE << RGMII_PHY_MODE_SHFT)); emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, PHY_RESET, 0); emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR3, PLL_L_VAL_5_0_BMSK, (EMAC_RGMII_PLL_L_VAL << PLL_L_VAL_5_0_SHFT)); /* reset PHY PLL and ensure PLL is reset */ emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR3, 0, PLL_RESET); wmb(); udelay(10); /* power down analog sections of PLL and ensure the same */ emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR3, 0, BYPASSNL); wmb(); udelay(10); emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, 0, CKEDGE_SEL); emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, TX_ID_EN_L, RX_ID_EN_L); emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, HDRIVE_BMSK, (0x0 << HDRIVE_SHFT)); emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, WOL_EN, 0); /* reset PHY and ensure reset is complete */ emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, 0, PHY_RESET); wmb(); udelay(10); /* pull PHY out of reset and ensure PHY is normal */ emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, PHY_RESET, 0); wmb(); udelay(1000); /* pull PHY PLL out of reset and ensure PLL is working */ emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR3, PLL_RESET, 0); wmb(); udelay(10); emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR5, 0, RMII_125_CLK_EN); wmb(); /* wait for PLL to lock */ timeout = jiffies + EMAC_RGMII_PLL_LOCK_TIMEOUT; do { val = emac_reg_r32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_STATUS); if (val & PLL_LOCK_DET) break; udelay(100); } while (time_after_eq(timeout, jiffies)); if (time_after(jiffies, timeout)) { emac_err(hw->adpt, "PHY PLL lock failed\n"); return -EIO; } return 0; } /* initialize phy */ int emac_hw_init_phy(struct emac_hw *hw) { int retval = 0; spin_lock_init(&hw->mdio_lock); hw->autoneg = true; hw->autoneg_advertised = EMAC_LINK_SPEED_DEFAULT; if (hw->adpt->phy_mode == PHY_INTERFACE_MODE_SGMII) retval = emac_hw_init_sgmii(hw); else if (hw->adpt->phy_mode == PHY_INTERFACE_MODE_RGMII) retval = emac_hw_init_rgmii(hw); return retval; } /* initialize external phy */ int emac_hw_init_ephy(struct emac_hw *hw) { u16 val, phy_id[2]; int retval = 0; if (hw->adpt->no_ephy == false) { retval = emac_read_phy_reg(hw, hw->phy_addr, MII_PHYSID1, &phy_id[0]); if (retval) return retval; retval = emac_read_phy_reg(hw, hw->phy_addr, MII_PHYSID2, &phy_id[1]); if (retval) return retval; hw->phy_id[0] = phy_id[0]; hw->phy_id[1] = phy_id[1]; } else { emac_disable_mdio_autopoll(hw); } /* disable hibernation in case of rgmii phy */ if (hw->adpt->phy_mode == PHY_INTERFACE_MODE_RGMII) { retval = emac_write_phy_reg(hw, hw->phy_addr, MII_DBG_ADDR, HIBERNATE_CTRL_REG); if (retval) return retval; retval = emac_read_phy_reg(hw, hw->phy_addr, MII_DBG_DATA, &val); if (retval) return retval; val &= ~HIBERNATE_EN; retval = emac_write_phy_reg(hw, hw->phy_addr, MII_DBG_DATA, val); } return retval; } /* LINK */ static int emac_hw_sgmii_setup_link(struct emac_hw *hw, u32 speed, bool autoneg, bool fc) { u32 val; u32 speed_cfg = 0; val = emac_reg_r32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_AUTONEG_CFG2); if (autoneg) { val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG); val |= AN_ENABLE; emac_reg_w32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_AUTONEG_CFG2, val); wmb(); } else { switch (speed) { case EMAC_LINK_SPEED_10_HALF: speed_cfg = SPDMODE_10; break; case EMAC_LINK_SPEED_10_FULL: speed_cfg = SPDMODE_10 | DUPLEX_MODE; break; case EMAC_LINK_SPEED_100_HALF: speed_cfg = SPDMODE_100; break; case EMAC_LINK_SPEED_100_FULL: speed_cfg = SPDMODE_100 | DUPLEX_MODE; break; case EMAC_LINK_SPEED_1GB_FULL: speed_cfg = SPDMODE_1000 | DUPLEX_MODE; break; default: return -EINVAL; } val &= ~AN_ENABLE; emac_reg_w32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_SPEED_CFG1, speed_cfg); emac_reg_w32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_AUTONEG_CFG2, val); wmb(); } return 0; } static int emac_hw_setup_phy_link(struct emac_hw *hw, u32 speed, bool autoneg, bool fc) { u16 adv, bmcr, ctrl1000 = 0; int retval = 0; if (autoneg) { adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; if (!fc) adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); if (speed & EMAC_LINK_SPEED_10_HALF) adv |= ADVERTISE_10HALF; if (speed & EMAC_LINK_SPEED_10_FULL) adv |= ADVERTISE_10HALF | ADVERTISE_10FULL; if (speed & EMAC_LINK_SPEED_100_HALF) adv |= ADVERTISE_100HALF; if (speed & EMAC_LINK_SPEED_100_FULL) adv |= ADVERTISE_100HALF | ADVERTISE_100FULL; if (speed & EMAC_LINK_SPEED_1GB_FULL) ctrl1000 |= ADVERTISE_1000FULL; retval |= emac_write_phy_reg(hw, hw->phy_addr, MII_ADVERTISE, adv); retval |= emac_write_phy_reg(hw, hw->phy_addr, MII_CTRL1000, ctrl1000); bmcr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART; retval |= emac_write_phy_reg(hw, hw->phy_addr, MII_BMCR, bmcr); } else { bmcr = BMCR_RESET; switch (speed) { case EMAC_LINK_SPEED_10_HALF: bmcr |= BMCR_SPEED10; break; case EMAC_LINK_SPEED_10_FULL: bmcr |= BMCR_SPEED10 | BMCR_FULLDPLX; break; case EMAC_LINK_SPEED_100_HALF: bmcr |= BMCR_SPEED100; break; case EMAC_LINK_SPEED_100_FULL: bmcr |= BMCR_SPEED100 | BMCR_FULLDPLX; break; default: return -EINVAL; } retval |= emac_write_phy_reg(hw, hw->phy_addr, MII_BMCR, bmcr); } return retval; } int emac_setup_phy_link(struct emac_hw *hw, u32 speed, bool autoneg, bool fc) { int retval = 0; if (hw->adpt->no_ephy == true) { if (hw->adpt->phy_mode == PHY_INTERFACE_MODE_SGMII) { hw->autoneg = autoneg; hw->autoneg_advertised = speed; /* The AN_ENABLE and SPEED_CFG can't change on fly. The SGMII_PHY has to be re-initialized. */ return emac_hw_reset_sgmii(hw); } else { emac_err(hw->adpt, "can't setup phy link without ephy\n"); return -ENOTSUPP; } } if (emac_hw_setup_phy_link(hw, speed, autoneg, fc)) { emac_err(hw->adpt, "error when init phy speed and fc\n"); retval = -EINVAL; } else { hw->autoneg = autoneg; } return retval; } int emac_setup_phy_link_speed(struct emac_hw *hw, u32 speed, bool autoneg, bool fc) { /* update speed based on input link speed */ hw->autoneg_advertised = speed & EMAC_LINK_SPEED_DEFAULT; return emac_setup_phy_link(hw, hw->autoneg_advertised, autoneg, fc); } int emac_check_phy_link(struct emac_hw *hw, u32 *speed, bool *link_up) { u16 bmsr, pssr; int retval; if (hw->adpt->no_ephy == true) { if (hw->adpt->phy_mode == PHY_INTERFACE_MODE_SGMII) { return emac_check_sgmii_link(hw, speed, link_up); } else { emac_err(hw->adpt, "can't check phy link without ephy\n"); return -ENOTSUPP; } } retval = emac_read_phy_reg(hw, hw->phy_addr, MII_BMSR, &bmsr); if (retval) return retval; if (!(bmsr & BMSR_LSTATUS)) { *link_up = false; *speed = EMAC_LINK_SPEED_UNKNOWN; return 0; } *link_up = true; retval = emac_read_phy_reg(hw, hw->phy_addr, MII_PSSR, &pssr); if (retval) return retval; if (!(pssr & PSSR_SPD_DPLX_RESOLVED)) { emac_err(hw->adpt, "error for speed duplex resolved\n"); return -EINVAL; } switch (pssr & PSSR_SPEED) { case PSSR_1000MBS: if (pssr & PSSR_DPLX) *speed = EMAC_LINK_SPEED_1GB_FULL; else emac_err(hw->adpt, "1000M half duplex is invalid"); break; case PSSR_100MBS: if (pssr & PSSR_DPLX) *speed = EMAC_LINK_SPEED_100_FULL; else *speed = EMAC_LINK_SPEED_100_HALF; break; case PSSR_10MBS: if (pssr & PSSR_DPLX) *speed = EMAC_LINK_SPEED_10_FULL; else *speed = EMAC_LINK_SPEED_10_HALF; break; default: *speed = EMAC_LINK_SPEED_UNKNOWN; retval = -EINVAL; break; } return retval; } int emac_hw_get_lpa_speed(struct emac_hw *hw, u32 *speed) { int retval; u16 lpa, stat1000; bool link; if (hw->adpt->no_ephy == true) { if (hw->adpt->phy_mode == PHY_INTERFACE_MODE_SGMII) { return emac_check_sgmii_link(hw, speed, &link); } else { emac_err(hw->adpt, "can't get lpa speed without ephy\n"); return -ENOTSUPP; } } retval = emac_read_phy_reg(hw, hw->phy_addr, MII_LPA, &lpa); retval |= emac_read_phy_reg(hw, hw->phy_addr, MII_STAT1000, &stat1000); if (retval) return retval; *speed = EMAC_LINK_SPEED_10_HALF; if (lpa & LPA_10FULL) *speed = EMAC_LINK_SPEED_10_FULL; else if (lpa & LPA_10HALF) *speed = EMAC_LINK_SPEED_10_HALF; else if (lpa & LPA_100FULL) *speed = EMAC_LINK_SPEED_100_FULL; else if (lpa & LPA_100HALF) *speed = EMAC_LINK_SPEED_100_HALF; else if (stat1000 & LPA_1000FULL) *speed = EMAC_LINK_SPEED_1GB_FULL; return 0; } int emac_hw_clear_sgmii_intr_status(struct emac_hw *hw, u32 irq_bits) { u32 status; int i; emac_reg_w32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_INTERRUPT_CLEAR, irq_bits); emac_reg_w32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_IRQ_CMD, IRQ_GLOBAL_CLEAR); wmb(); /* After set the IRQ_GLOBAL_CLEAR bit, the status clearing must * be confirmed before clear the bits in other registers. * It takes a few cycles for hw to clear the interrupt status. */ for (i = 0; i < SGMII_PHY_IRQ_CLR_WAIT_TIME; i++) { udelay(1); status = emac_reg_r32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_INTERRUPT_STATUS); if (!(status & irq_bits)) break; } if (status & irq_bits) { emac_err(hw->adpt, "failed to clear SGMII irq: status 0x%x bits 0x%x\n", status, irq_bits); return -EIO; } emac_reg_w32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_IRQ_CMD, 0); emac_reg_w32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_INTERRUPT_CLEAR, 0); wmb(); return 0; } int emac_check_sgmii_link(struct emac_hw *hw, u32 *speed, bool *link_up) { u32 val; val = emac_reg_r32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_AUTONEG_CFG2); if (val & AN_ENABLE) return emac_check_sgmii_autoneg(hw, speed, link_up); val = emac_reg_r32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_SPEED_CFG1); val &= DUPLEX_MODE | SPDMODE_BMSK; switch (val) { case DUPLEX_MODE | SPDMODE_1000: *speed = EMAC_LINK_SPEED_1GB_FULL; break; case DUPLEX_MODE | SPDMODE_100: *speed = EMAC_LINK_SPEED_100_FULL; break; case SPDMODE_100: *speed = EMAC_LINK_SPEED_100_HALF; break; case DUPLEX_MODE | SPDMODE_10: *speed = EMAC_LINK_SPEED_10_FULL; break; case SPDMODE_10: *speed = EMAC_LINK_SPEED_10_HALF; break; default: *speed = EMAC_LINK_SPEED_UNKNOWN; break; } *link_up = true; return 0; } int emac_check_sgmii_autoneg(struct emac_hw *hw, u32 *speed, bool *link_up) { u32 status; status = emac_reg_r32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_AUTONEG1_STATUS) & 0xff; status <<= 8; status |= emac_reg_r32(hw, EMAC_SGMII_PHY, EMAC_SGMII_PHY_AUTONEG0_STATUS) & 0xff; if (!(status & TXCFG_LINK)) { *link_up = false; *speed = EMAC_LINK_SPEED_UNKNOWN; return 0; } *link_up = true; switch (status & TXCFG_MODE_BMSK) { case TXCFG_1000_FULL: *speed = EMAC_LINK_SPEED_1GB_FULL; break; case TXCFG_100_FULL: *speed = EMAC_LINK_SPEED_100_FULL; break; case TXCFG_100_HALF: *speed = EMAC_LINK_SPEED_100_HALF; break; case TXCFG_10_FULL: *speed = EMAC_LINK_SPEED_10_FULL; break; case TXCFG_10_HALF: *speed = EMAC_LINK_SPEED_10_HALF; break; default: *speed = EMAC_LINK_SPEED_UNKNOWN; break; } return 0; } /* INTR */ void emac_hw_enable_intr(struct emac_hw *hw) { struct emac_adapter *adpt = hw->adpt; struct emac_irq_info *irq_info; int i; for (i = 0; i < EMAC_NUM_CORE_IRQ; i++) { irq_info = &adpt->irq_info[i]; emac_reg_w32(hw, EMAC, irq_info->status_reg, ~DIS_INT); emac_reg_w32(hw, EMAC, irq_info->mask_reg, irq_info->mask); } if (adpt->phy_mode == PHY_INTERFACE_MODE_SGMII) { irq_info = &adpt->irq_info[EMAC_SGMII_PHY_IRQ]; emac_reg_w32(hw, EMAC_SGMII_PHY, irq_info->mask_reg, irq_info->mask); } wmb(); } void emac_hw_disable_intr(struct emac_hw *hw) { struct emac_adapter *adpt = hw->adpt; struct emac_irq_info *irq_info; int i; for (i = 0; i < EMAC_NUM_CORE_IRQ; i++) { irq_info = &adpt->irq_info[i]; emac_reg_w32(hw, EMAC, irq_info->status_reg, DIS_INT); emac_reg_w32(hw, EMAC, irq_info->mask_reg, 0); } if (adpt->tstamp_en) emac_reg_w32(hw, EMAC_1588, EMAC_P1588_PTP_EXPANDED_INT_MASK, 0); if (adpt->phy_mode == PHY_INTERFACE_MODE_SGMII) { irq_info = &adpt->irq_info[EMAC_SGMII_PHY_IRQ]; emac_reg_w32(hw, EMAC_SGMII_PHY, irq_info->mask_reg, 0); } wmb(); } /* MC */ void emac_hw_set_mc_addr(struct emac_hw *hw, u8 *addr) { u32 crc32, bit, reg, mta; /* Calculate the CRC of the MAC address */ crc32 = ether_crc(ETH_ALEN, addr); /* The HASH Table is an array of 2 32-bit registers. It is * treated like an array of 64 bits (BitArray[hash_value]). * Use the upper 6 bits of the above CRC as the hash value. */ reg = (crc32 >> 31) & 0x1; bit = (crc32 >> 26) & 0x1F; mta = emac_reg_r32(hw, EMAC, EMAC_HASH_TAB_REG0 + (reg << 2)); mta |= (0x1 << bit); emac_reg_w32(hw, EMAC, EMAC_HASH_TAB_REG0 + (reg << 2), mta); wmb(); } void emac_hw_clear_mc_addr(struct emac_hw *hw) { emac_reg_w32(hw, EMAC, EMAC_HASH_TAB_REG0, 0); emac_reg_w32(hw, EMAC, EMAC_HASH_TAB_REG1, 0); wmb(); } /* definitions for RSS */ #define EMAC_RSS_KEY(_i, _type) \ (EMAC_RSS_KEY0 + ((_i) * sizeof(_type))) #define EMAC_RSS_TBL(_i, _type) \ (EMAC_IDT_TABLE0 + ((_i) * sizeof(_type))) /* RSS */ void emac_hw_config_rss(struct emac_hw *hw) { int key_len_by_u32 = sizeof(hw->rss_key) / sizeof(u32); int idt_len_by_u32 = sizeof(hw->rss_idt) / sizeof(u32); u32 rxq0; int i; /* Fill out hash function keys */ for (i = 0; i < key_len_by_u32; i++) { u32 key, idx_base; idx_base = (key_len_by_u32 - i) * 4; key = ((hw->rss_key[idx_base - 1]) | (hw->rss_key[idx_base - 2] << 8) | (hw->rss_key[idx_base - 3] << 16) | (hw->rss_key[idx_base - 4] << 24)); emac_reg_w32(hw, EMAC, EMAC_RSS_KEY(i, u32), key); } /* Fill out redirection table */ for (i = 0; i < idt_len_by_u32; i++) emac_reg_w32(hw, EMAC, EMAC_RSS_TBL(i, u32), hw->rss_idt[i]); emac_reg_w32(hw, EMAC, EMAC_BASE_CPU_NUMBER, hw->rss_base_cpu); rxq0 = emac_reg_r32(hw, EMAC, EMAC_RXQ_CTRL_0); if (hw->rss_hstype & EMAC_RSS_HSTYP_IPV4_EN) rxq0 |= RXQ0_RSS_HSTYP_IPV4_EN; else rxq0 &= ~RXQ0_RSS_HSTYP_IPV4_EN; if (hw->rss_hstype & EMAC_RSS_HSTYP_TCP4_EN) rxq0 |= RXQ0_RSS_HSTYP_IPV4_TCP_EN; else rxq0 &= ~RXQ0_RSS_HSTYP_IPV4_TCP_EN; if (hw->rss_hstype & EMAC_RSS_HSTYP_IPV6_EN) rxq0 |= RXQ0_RSS_HSTYP_IPV6_EN; else rxq0 &= ~RXQ0_RSS_HSTYP_IPV6_EN; if (hw->rss_hstype & EMAC_RSS_HSTYP_TCP6_EN) rxq0 |= RXQ0_RSS_HSTYP_IPV6_TCP_EN; else rxq0 &= ~RXQ0_RSS_HSTYP_IPV6_TCP_EN; rxq0 |= ((hw->rss_idt_size << IDT_TABLE_SIZE_SHFT) & IDT_TABLE_SIZE_BMSK); rxq0 |= RSS_HASH_EN; wmb(); /* ensure all parameters are written before we enable RSS */ emac_reg_w32(hw, EMAC, EMAC_RXQ_CTRL_0, rxq0); wmb(); } /* Config MAC modes */ void emac_hw_config_mac_ctrl(struct emac_hw *hw) { u32 mac; mac = emac_reg_r32(hw, EMAC, EMAC_MAC_CTRL); if (CHK_HW_FLAG(VLANSTRIP_EN)) mac |= VLAN_STRIP; else mac &= ~VLAN_STRIP; if (CHK_HW_FLAG(PROMISC_EN)) mac |= PROM_MODE; else mac &= ~PROM_MODE; if (CHK_HW_FLAG(MULTIALL_EN)) mac |= MULTI_ALL; else mac &= ~MULTI_ALL; if (CHK_HW_FLAG(LOOPBACK_EN)) mac |= MAC_LP_EN; else mac &= ~MAC_LP_EN; emac_reg_w32(hw, EMAC, EMAC_MAC_CTRL, mac); wmb(); } /* Wake On LAN (WOL) */ void emac_hw_config_wol(struct emac_hw *hw, u32 wufc) { u32 wol = 0; /* turn on magic packet event */ if (wufc & EMAC_WOL_MAGIC) wol |= MG_FRAME_EN | MG_FRAME_PME | WK_FRAME_EN; /* turn on link up event */ if (wufc & EMAC_WOL_PHY) wol |= LK_CHG_EN | LK_CHG_PME; emac_reg_w32(hw, EMAC, EMAC_WOL_CTRL0, wol); wmb(); } /* Power Management */ void emac_hw_config_pow_save(struct emac_hw *hw, u32 speed, bool wol_en, bool rx_en) { u32 dma_mas, mac; dma_mas = emac_reg_r32(hw, EMAC, EMAC_DMA_MAS_CTRL); dma_mas &= ~LPW_CLK_SEL; dma_mas |= LPW_STATE; mac = emac_reg_r32(hw, EMAC, EMAC_MAC_CTRL); mac &= ~(FULLD | RXEN | TXEN); mac = (mac & ~SPEED_BMSK) | (((u32)emac_mac_speed_10_100 << SPEED_SHFT) & SPEED_BMSK); if (wol_en) { if (rx_en) mac |= (RXEN | BROAD_EN); /* If WOL is enabled, set link speed/duplex for mac */ if (EMAC_LINK_SPEED_1GB_FULL == speed) mac = (mac & ~SPEED_BMSK) | (((u32)emac_mac_speed_1000 << SPEED_SHFT) & SPEED_BMSK); if (EMAC_LINK_SPEED_10_FULL == speed || EMAC_LINK_SPEED_100_FULL == speed || EMAC_LINK_SPEED_1GB_FULL == speed) mac |= FULLD; } else { /* select lower clock speed if WOL is disabled */ dma_mas |= LPW_CLK_SEL; } emac_reg_w32(hw, EMAC, EMAC_DMA_MAS_CTRL, dma_mas); emac_reg_w32(hw, EMAC, EMAC_MAC_CTRL, mac); wmb(); } /* Config descriptor rings */ static void emac_hw_config_ring_ctrl(struct emac_hw *hw) { struct emac_adapter *adpt = hw->adpt; if (adpt->tstamp_en) { emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1, 0, ENABLE_RRD_TIMESTAMP); } /* TPD */ emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_1, EMAC_DMA_ADDR_HI(adpt->tx_queue[0].tpd.tpdma)); switch (adpt->num_txques) { case 4: emac_reg_w32(hw, EMAC, EMAC_H3TPD_BASE_ADDR_LO, EMAC_DMA_ADDR_LO(adpt->tx_queue[3].tpd.tpdma)); case 3: emac_reg_w32(hw, EMAC, EMAC_H2TPD_BASE_ADDR_LO, EMAC_DMA_ADDR_LO(adpt->tx_queue[2].tpd.tpdma)); case 2: emac_reg_w32(hw, EMAC, EMAC_H1TPD_BASE_ADDR_LO, EMAC_DMA_ADDR_LO(adpt->tx_queue[1].tpd.tpdma)); case 1: emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_8, EMAC_DMA_ADDR_LO(adpt->tx_queue[0].tpd.tpdma)); break; default: emac_err(hw->adpt, "Invalid number of TX queues (%d)\n", adpt->num_txques); return; } emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_9, adpt->tx_queue[0].tpd.count & TPD_RING_SIZE_BMSK); /* RFD & RRD */ emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_0, EMAC_DMA_ADDR_HI(adpt->rx_queue[0].rfd.rfdma)); switch (adpt->num_rxques) { case 4: emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_13, EMAC_DMA_ADDR_LO(adpt->rx_queue[3].rfd.rfdma)); emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_16, EMAC_DMA_ADDR_LO(adpt->rx_queue[3].rrd.rrdma)); case 3: emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_12, EMAC_DMA_ADDR_LO(adpt->rx_queue[2].rfd.rfdma)); emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_15, EMAC_DMA_ADDR_LO(adpt->rx_queue[2].rrd.rrdma)); case 2: emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_10, EMAC_DMA_ADDR_LO(adpt->rx_queue[1].rfd.rfdma)); emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_14, EMAC_DMA_ADDR_LO(adpt->rx_queue[1].rrd.rrdma)); case 1: emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_2, EMAC_DMA_ADDR_LO(adpt->rx_queue[0].rfd.rfdma)); emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_5, EMAC_DMA_ADDR_LO(adpt->rx_queue[0].rrd.rrdma)); break; default: emac_err(hw->adpt, "Invalid number of RX queues (%d)\n", adpt->num_rxques); return; } emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_3, adpt->rx_queue[0].rfd.count & RFD_RING_SIZE_BMSK); emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_6, adpt->rx_queue[0].rrd.count & RRD_RING_SIZE_BMSK); emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_4, adpt->rxbuf_size & RX_BUFFER_SIZE_BMSK); emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_11, 0); wmb(); /* ensure all parameters are written before we enable them */ /* Load all of base address above */ emac_reg_w32(hw, EMAC, EMAC_INTER_SRAM_PART9, 1); wmb(); } /* Config transmit parameters */ static void emac_hw_config_tx_ctrl(struct emac_hw *hw) { u16 tx_offload_thresh = EMAC_MAX_TX_OFFLOAD_THRESH; u32 val; emac_reg_w32(hw, EMAC, EMAC_TXQ_CTRL_1, (tx_offload_thresh >> 3) & JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK); val = (hw->tpd_burst << NUM_TPD_BURST_PREF_SHFT) & NUM_TPD_BURST_PREF_BMSK; val |= (TXQ_MODE | LS_8023_SP); val |= (0x0100 << NUM_TXF_BURST_PREF_SHFT) & NUM_TXF_BURST_PREF_BMSK; emac_reg_w32(hw, EMAC, EMAC_TXQ_CTRL_0, val); emac_reg_update32(hw, EMAC, EMAC_TXQ_CTRL_2, (TXF_HWM_BMSK | TXF_LWM_BMSK), 0); wmb(); } /* Config receive parameters */ static void emac_hw_config_rx_ctrl(struct emac_hw *hw) { u32 val; val = ((hw->rfd_burst << NUM_RFD_BURST_PREF_SHFT) & NUM_RFD_BURST_PREF_BMSK); val |= (SP_IPV6 | CUT_THRU_EN); emac_reg_w32(hw, EMAC, EMAC_RXQ_CTRL_0, val); val = emac_reg_r32(hw, EMAC, EMAC_RXQ_CTRL_1); val &= ~(JUMBO_1KAH_BMSK | RFD_PREF_LOW_THRESHOLD_BMSK | RFD_PREF_UP_THRESHOLD_BMSK); val |= (JUMBO_1KAH << JUMBO_1KAH_SHFT) | (RFD_PREF_LOW_TH << RFD_PREF_LOW_THRESHOLD_SHFT) | (RFD_PREF_UP_TH << RFD_PREF_UP_THRESHOLD_SHFT); emac_reg_w32(hw, EMAC, EMAC_RXQ_CTRL_1, val); val = emac_reg_r32(hw, EMAC, EMAC_RXQ_CTRL_2); val &= ~(RXF_DOF_THRESHOLD_BMSK | RXF_UOF_THRESHOLD_BMSK); val |= (RXF_DOF_TH << RXF_DOF_THRESHOLD_SHFT) | (RXF_UOF_TH << RXF_UOF_THRESHOLD_SHFT); emac_reg_w32(hw, EMAC, EMAC_RXQ_CTRL_2, val); val = emac_reg_r32(hw, EMAC, EMAC_RXQ_CTRL_3); val &= ~(RXD_TIMER_BMSK | RXD_THRESHOLD_BMSK); val |= RXD_TH << RXD_THRESHOLD_SHFT; emac_reg_w32(hw, EMAC, EMAC_RXQ_CTRL_3, val); wmb(); } /* Config dma */ static void emac_hw_config_dma_ctrl(struct emac_hw *hw) { u32 dma_ctrl; dma_ctrl = DMAR_REQ_PRI; switch (hw->dma_order) { case emac_dma_ord_in: dma_ctrl |= IN_ORDER_MODE; break; case emac_dma_ord_enh: dma_ctrl |= ENH_ORDER_MODE; break; case emac_dma_ord_out: dma_ctrl |= OUT_ORDER_MODE; break; default: break; } dma_ctrl |= (((u32)hw->dmar_block) << REGRDBLEN_SHFT) & REGRDBLEN_BMSK; dma_ctrl |= (((u32)hw->dmaw_block) << REGWRBLEN_SHFT) & REGWRBLEN_BMSK; dma_ctrl |= (((u32)hw->dmar_dly_cnt) << DMAR_DLY_CNT_SHFT) & DMAR_DLY_CNT_BMSK; dma_ctrl |= (((u32) hw->dmaw_dly_cnt) << DMAW_DLY_CNT_SHFT) & DMAW_DLY_CNT_BMSK; emac_reg_w32(hw, EMAC, EMAC_DMA_CTRL, dma_ctrl); wmb(); } /* Flow Control (fc) */ static int emac_get_fc_mode(struct emac_hw *hw, enum emac_fc_mode *mode) { u16 i, bmsr = 0, pssr = 0; int retval = 0; for (i = 0; i < EMAC_MAX_SETUP_LNK_CYCLE; i++) { retval = emac_read_phy_reg(hw, hw->phy_addr, MII_BMSR, &bmsr); if (retval) return retval; if (bmsr & BMSR_LSTATUS) { retval = emac_read_phy_reg(hw, hw->phy_addr, MII_PSSR, &pssr); if (retval) return retval; if (!(pssr & PSSR_SPD_DPLX_RESOLVED)) { emac_err(hw->adpt, "error for speed duplex resolved\n"); return -EINVAL; } if ((pssr & PSSR_FC_TXEN) && (pssr & PSSR_FC_RXEN)) { *mode = emac_fc_full; } else if (pssr & PSSR_FC_TXEN) { *mode = emac_fc_tx_pause; } else if (pssr & PSSR_FC_RXEN) { *mode = emac_fc_rx_pause; } else { *mode = emac_fc_none; } break; } msleep(100); /* link can take upto few seconds to come up */ } if (i == EMAC_MAX_SETUP_LNK_CYCLE) { emac_err(hw->adpt, "error when get flow control mode\n"); retval = -EINVAL; } return retval; } int emac_hw_config_fc(struct emac_hw *hw) { u32 mac; int retval; if (hw->disable_fc_autoneg) { hw->cur_fc_mode = hw->req_fc_mode; } else { retval = emac_get_fc_mode(hw, &hw->cur_fc_mode); if (retval) return retval; } mac = emac_reg_r32(hw, EMAC, EMAC_MAC_CTRL); switch (hw->cur_fc_mode) { case emac_fc_none: mac &= ~(RXFC | TXFC); break; case emac_fc_rx_pause: mac &= ~TXFC; mac |= RXFC; break; case emac_fc_tx_pause: mac |= TXFC; mac &= ~RXFC; break; case emac_fc_full: case emac_fc_default: mac |= (TXFC | RXFC); break; default: emac_err(hw->adpt, "flow control param set incorrectly\n"); return -EINVAL; } emac_reg_w32(hw, EMAC, EMAC_MAC_CTRL, mac); wmb(); return 0; } /* Configure MAC */ void emac_hw_config_mac(struct emac_hw *hw) { u32 val; emac_hw_set_mac_addr(hw, hw->mac_addr); emac_hw_config_ring_ctrl(hw); emac_reg_w32(hw, EMAC, EMAC_MAX_FRAM_LEN_CTRL, hw->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); emac_hw_config_tx_ctrl(hw); emac_hw_config_rx_ctrl(hw); emac_hw_config_dma_ctrl(hw); if (CHK_HW_FLAG(PTP_CAP)) emac_ptp_config(hw); val = emac_reg_r32(hw, EMAC, EMAC_AXI_MAST_CTRL); val &= ~(DATA_BYTE_SWAP | MAX_BOUND); val |= MAX_BTYPE; emac_reg_w32(hw, EMAC, EMAC_AXI_MAST_CTRL, val); emac_reg_w32(hw, EMAC, EMAC_CLK_GATE_CTRL, 0); emac_reg_w32(hw, EMAC, EMAC_MISC_CTRL, RX_UNCPL_INT_EN); wmb(); } /* Reset MAC */ void emac_hw_reset_mac(struct emac_hw *hw) { emac_reg_w32(hw, EMAC, EMAC_INT_MASK, 0); emac_reg_w32(hw, EMAC, EMAC_INT_STATUS, DIS_INT); emac_hw_stop_mac(hw); emac_reg_update32(hw, EMAC, EMAC_DMA_MAS_CTRL, 0, SOFT_RST); wmb(); /* ensure mac is fully reset */ udelay(100); emac_reg_update32(hw, EMAC, EMAC_DMA_MAS_CTRL, 0, INT_RD_CLR_EN); wmb(); } /* Start MAC */ void emac_hw_start_mac(struct emac_hw *hw) { u32 mac, csr1; /* enable tx queue */ if (hw->adpt->num_txques && (hw->adpt->num_txques <= EMAC_MAX_TX_QUEUES)) { emac_reg_update32(hw, EMAC, EMAC_TXQ_CTRL_0, 0, TXQ_EN); } /* enable rx queue */ if (hw->adpt->num_rxques && (hw->adpt->num_rxques <= EMAC_MAX_RX_QUEUES)) { emac_reg_update32(hw, EMAC, EMAC_RXQ_CTRL_0, 0, RXQ_EN); } /* enable mac control */ mac = emac_reg_r32(hw, EMAC, EMAC_MAC_CTRL); csr1 = emac_reg_r32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1); mac |= TXEN | RXEN; /* enable RX/TX */ mac |= (TXFC | RXFC); /* enable RX/TX Flow Control */ /* setup link speed */ mac &= ~SPEED_BMSK; switch (hw->link_speed) { case EMAC_LINK_SPEED_1GB_FULL: mac |= ((emac_mac_speed_1000 << SPEED_SHFT) & SPEED_BMSK); csr1 |= FREQ_MODE; break; default: mac |= ((emac_mac_speed_10_100 << SPEED_SHFT) & SPEED_BMSK); csr1 &= ~FREQ_MODE; break; } switch (hw->link_speed) { case EMAC_LINK_SPEED_1GB_FULL: case EMAC_LINK_SPEED_100_FULL: case EMAC_LINK_SPEED_10_FULL: mac |= FULLD; break; default: mac &= ~FULLD; } /* other parameters */ mac |= (CRCE | PCRCE); mac |= ((hw->preamble << PRLEN_SHFT) & PRLEN_BMSK); mac |= BROAD_EN; mac |= (FLCHK | RX_CHKSUM_EN); mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL | DEBUG_MODE | SINGLE_PAUSE_MODE); emac_reg_w32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1, csr1); emac_reg_w32(hw, EMAC, EMAC_MAC_CTRL, mac); /* enable interrupt read clear, low power sleep mode and the irq moderators */ emac_reg_w32(hw, EMAC, EMAC_IRQ_MOD_TIM_INIT, hw->irq_mod); emac_reg_w32(hw, EMAC, EMAC_DMA_MAS_CTRL, (INT_RD_CLR_EN | LPW_MODE | IRQ_MODERATOR_EN | IRQ_MODERATOR2_EN)); if (CHK_HW_FLAG(PTP_CAP)) { if (hw->link_speed == EMAC_LINK_SPEED_1GB_FULL) emac_ptp_set_linkspeed(hw, emac_mac_speed_1000); else emac_ptp_set_linkspeed(hw, emac_mac_speed_10_100); } emac_hw_config_mac_ctrl(hw); emac_reg_update32(hw, EMAC, EMAC_ATHR_HEADER_CTRL, (HEADER_ENABLE | HEADER_CNT_EN), 0); emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, 0, WOL_EN); wmb(); } /* Stop MAC */ void emac_hw_stop_mac(struct emac_hw *hw) { emac_reg_update32(hw, EMAC, EMAC_RXQ_CTRL_0, RXQ_EN, 0); emac_reg_update32(hw, EMAC, EMAC_TXQ_CTRL_0, TXQ_EN, 0); emac_reg_update32(hw, EMAC, EMAC_MAC_CTRL, (TXEN | RXEN), 0); wmb(); /* make sure mac is stopped before we proceede */ udelay(1000); } /* set MAC address */ void emac_hw_set_mac_addr(struct emac_hw *hw, u8 *addr) { u32 sta; /* for example: 00-A0-C6-11-22-33 * 0<-->C6112233, 1<-->00A0. */ /* low dword */ sta = (((u32)addr[2]) << 24) | (((u32)addr[3]) << 16) | (((u32)addr[4]) << 8) | (((u32)addr[5])); emac_reg_w32(hw, EMAC, EMAC_MAC_STA_ADDR0, sta); /* hight dword */ sta = (((u32)addr[0]) << 8) | (((u32)addr[1])); emac_reg_w32(hw, EMAC, EMAC_MAC_STA_ADDR1, sta); wmb(); }
gpl-2.0
liusen09003110-163-com/linux
drivers/ata/libata-eh.c
388
111266
/* * libata-eh.c - libata error handling * * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * * Copyright 2006 Tejun Heo <htejun@gmail.com> * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, * USA. * * * libata documentation is available via 'make {ps|pdf}docs', * as Documentation/DocBook/libata.* * * Hardware documentation available from http://www.t13.org/ and * http://www.sata-io.org/ * */ #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/export.h> #include <linux/pci.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include "../scsi/scsi_transport_api.h" #include <linux/libata.h> #include <trace/events/libata.h> #include "libata.h" enum { /* speed down verdicts */ ATA_EH_SPDN_NCQ_OFF = (1 << 0), ATA_EH_SPDN_SPEED_DOWN = (1 << 1), ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), /* error flags */ ATA_EFLAG_IS_IO = (1 << 0), ATA_EFLAG_DUBIOUS_XFER = (1 << 1), ATA_EFLAG_OLD_ER = (1 << 31), /* error categories */ ATA_ECAT_NONE = 0, ATA_ECAT_ATA_BUS = 1, ATA_ECAT_TOUT_HSM = 2, ATA_ECAT_UNK_DEV = 3, ATA_ECAT_DUBIOUS_NONE = 4, ATA_ECAT_DUBIOUS_ATA_BUS = 5, ATA_ECAT_DUBIOUS_TOUT_HSM = 6, ATA_ECAT_DUBIOUS_UNK_DEV = 7, ATA_ECAT_NR = 8, ATA_EH_CMD_DFL_TIMEOUT = 5000, /* always put at least this amount of time between resets */ ATA_EH_RESET_COOL_DOWN = 5000, /* Waiting in ->prereset can never be reliable. It's * sometimes nice to wait there but it can't be depended upon; * otherwise, we wouldn't be resetting. Just give it enough * time for most drives to spin up. */ ATA_EH_PRERESET_TIMEOUT = 10000, ATA_EH_FASTDRAIN_INTERVAL = 3000, ATA_EH_UA_TRIES = 5, /* probe speed down parameters, see ata_eh_schedule_probe() */ ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ ATA_EH_PROBE_TRIALS = 2, }; /* The following table determines how we sequence resets. Each entry * represents timeout for that try. The first try can be soft or * hardreset. All others are hardreset if available. In most cases * the first reset w/ 10sec timeout should succeed. Following entries * are mostly for error handling, hotplug and those outlier devices that * take an exceptionally long time to recover from reset. */ static const unsigned long ata_eh_reset_timeouts[] = { 10000, /* most drives spin up by 10sec */ 10000, /* > 99% working drives spin up before 20sec */ 35000, /* give > 30 secs of idleness for outlier devices */ 5000, /* and sweet one last chance */ ULONG_MAX, /* > 1 min has elapsed, give up */ }; static const unsigned long ata_eh_identify_timeouts[] = { 5000, /* covers > 99% of successes and not too boring on failures */ 10000, /* combined time till here is enough even for media access */ 30000, /* for true idiots */ ULONG_MAX, }; static const unsigned long ata_eh_flush_timeouts[] = { 15000, /* be generous with flush */ 15000, /* ditto */ 30000, /* and even more generous */ ULONG_MAX, }; static const unsigned long ata_eh_other_timeouts[] = { 5000, /* same rationale as identify timeout */ 10000, /* ditto */ /* but no merciful 30sec for other commands, it just isn't worth it */ ULONG_MAX, }; struct ata_eh_cmd_timeout_ent { const u8 *commands; const unsigned long *timeouts; }; /* The following table determines timeouts to use for EH internal * commands. Each table entry is a command class and matches the * commands the entry applies to and the timeout table to use. * * On the retry after a command timed out, the next timeout value from * the table is used. If the table doesn't contain further entries, * the last value is used. * * ehc->cmd_timeout_idx keeps track of which timeout to use per * command class, so if SET_FEATURES times out on the first try, the * next try will use the second timeout value only for that class. */ #define CMDS(cmds...) (const u8 []){ cmds, 0 } static const struct ata_eh_cmd_timeout_ent ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), .timeouts = ata_eh_identify_timeouts, }, { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), .timeouts = ata_eh_other_timeouts, }, { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), .timeouts = ata_eh_other_timeouts, }, { .commands = CMDS(ATA_CMD_SET_FEATURES), .timeouts = ata_eh_other_timeouts, }, { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), .timeouts = ata_eh_other_timeouts, }, { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), .timeouts = ata_eh_flush_timeouts }, }; #undef CMDS static void __ata_port_freeze(struct ata_port *ap); #ifdef CONFIG_PM static void ata_eh_handle_port_suspend(struct ata_port *ap); static void ata_eh_handle_port_resume(struct ata_port *ap); #else /* CONFIG_PM */ static void ata_eh_handle_port_suspend(struct ata_port *ap) { } static void ata_eh_handle_port_resume(struct ata_port *ap) { } #endif /* CONFIG_PM */ static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, va_list args) { ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, ATA_EH_DESC_LEN - ehi->desc_len, fmt, args); } /** * __ata_ehi_push_desc - push error description without adding separator * @ehi: target EHI * @fmt: printf format string * * Format string according to @fmt and append it to @ehi->desc. * * LOCKING: * spin_lock_irqsave(host lock) */ void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) { va_list args; va_start(args, fmt); __ata_ehi_pushv_desc(ehi, fmt, args); va_end(args); } /** * ata_ehi_push_desc - push error description with separator * @ehi: target EHI * @fmt: printf format string * * Format string according to @fmt and append it to @ehi->desc. * If @ehi->desc is not empty, ", " is added in-between. * * LOCKING: * spin_lock_irqsave(host lock) */ void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) { va_list args; if (ehi->desc_len) __ata_ehi_push_desc(ehi, ", "); va_start(args, fmt); __ata_ehi_pushv_desc(ehi, fmt, args); va_end(args); } /** * ata_ehi_clear_desc - clean error description * @ehi: target EHI * * Clear @ehi->desc. * * LOCKING: * spin_lock_irqsave(host lock) */ void ata_ehi_clear_desc(struct ata_eh_info *ehi) { ehi->desc[0] = '\0'; ehi->desc_len = 0; } /** * ata_port_desc - append port description * @ap: target ATA port * @fmt: printf format string * * Format string according to @fmt and append it to port * description. If port description is not empty, " " is added * in-between. This function is to be used while initializing * ata_host. The description is printed on host registration. * * LOCKING: * None. */ void ata_port_desc(struct ata_port *ap, const char *fmt, ...) { va_list args; WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); if (ap->link.eh_info.desc_len) __ata_ehi_push_desc(&ap->link.eh_info, " "); va_start(args, fmt); __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); va_end(args); } #ifdef CONFIG_PCI /** * ata_port_pbar_desc - append PCI BAR description * @ap: target ATA port * @bar: target PCI BAR * @offset: offset into PCI BAR * @name: name of the area * * If @offset is negative, this function formats a string which * contains the name, address, size and type of the BAR and * appends it to the port description. If @offset is zero or * positive, only name and offsetted address is appended. * * LOCKING: * None. */ void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, const char *name) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); char *type = ""; unsigned long long start, len; if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) type = "m"; else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) type = "i"; start = (unsigned long long)pci_resource_start(pdev, bar); len = (unsigned long long)pci_resource_len(pdev, bar); if (offset < 0) ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); else ata_port_desc(ap, "%s 0x%llx", name, start + (unsigned long long)offset); } #endif /* CONFIG_PCI */ static int ata_lookup_timeout_table(u8 cmd) { int i; for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { const u8 *cur; for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) if (*cur == cmd) return i; } return -1; } /** * ata_internal_cmd_timeout - determine timeout for an internal command * @dev: target device * @cmd: internal command to be issued * * Determine timeout for internal command @cmd for @dev. * * LOCKING: * EH context. * * RETURNS: * Determined timeout. */ unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) { struct ata_eh_context *ehc = &dev->link->eh_context; int ent = ata_lookup_timeout_table(cmd); int idx; if (ent < 0) return ATA_EH_CMD_DFL_TIMEOUT; idx = ehc->cmd_timeout_idx[dev->devno][ent]; return ata_eh_cmd_timeout_table[ent].timeouts[idx]; } /** * ata_internal_cmd_timed_out - notification for internal command timeout * @dev: target device * @cmd: internal command which timed out * * Notify EH that internal command @cmd for @dev timed out. This * function should be called only for commands whose timeouts are * determined using ata_internal_cmd_timeout(). * * LOCKING: * EH context. */ void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) { struct ata_eh_context *ehc = &dev->link->eh_context; int ent = ata_lookup_timeout_table(cmd); int idx; if (ent < 0) return; idx = ehc->cmd_timeout_idx[dev->devno][ent]; if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) ehc->cmd_timeout_idx[dev->devno][ent]++; } static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, unsigned int err_mask) { struct ata_ering_entry *ent; WARN_ON(!err_mask); ering->cursor++; ering->cursor %= ATA_ERING_SIZE; ent = &ering->ring[ering->cursor]; ent->eflags = eflags; ent->err_mask = err_mask; ent->timestamp = get_jiffies_64(); } static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) { struct ata_ering_entry *ent = &ering->ring[ering->cursor]; if (ent->err_mask) return ent; return NULL; } int ata_ering_map(struct ata_ering *ering, int (*map_fn)(struct ata_ering_entry *, void *), void *arg) { int idx, rc = 0; struct ata_ering_entry *ent; idx = ering->cursor; do { ent = &ering->ring[idx]; if (!ent->err_mask) break; rc = map_fn(ent, arg); if (rc) break; idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; } while (idx != ering->cursor); return rc; } static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg) { ent->eflags |= ATA_EFLAG_OLD_ER; return 0; } static void ata_ering_clear(struct ata_ering *ering) { ata_ering_map(ering, ata_ering_clear_cb, NULL); } static unsigned int ata_eh_dev_action(struct ata_device *dev) { struct ata_eh_context *ehc = &dev->link->eh_context; return ehc->i.action | ehc->i.dev_action[dev->devno]; } static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, struct ata_eh_info *ehi, unsigned int action) { struct ata_device *tdev; if (!dev) { ehi->action &= ~action; ata_for_each_dev(tdev, link, ALL) ehi->dev_action[tdev->devno] &= ~action; } else { /* doesn't make sense for port-wide EH actions */ WARN_ON(!(action & ATA_EH_PERDEV_MASK)); /* break ehi->action into ehi->dev_action */ if (ehi->action & action) { ata_for_each_dev(tdev, link, ALL) ehi->dev_action[tdev->devno] |= ehi->action & action; ehi->action &= ~action; } /* turn off the specified per-dev action */ ehi->dev_action[dev->devno] &= ~action; } } /** * ata_eh_acquire - acquire EH ownership * @ap: ATA port to acquire EH ownership for * * Acquire EH ownership for @ap. This is the basic exclusion * mechanism for ports sharing a host. Only one port hanging off * the same host can claim the ownership of EH. * * LOCKING: * EH context. */ void ata_eh_acquire(struct ata_port *ap) { mutex_lock(&ap->host->eh_mutex); WARN_ON_ONCE(ap->host->eh_owner); ap->host->eh_owner = current; } /** * ata_eh_release - release EH ownership * @ap: ATA port to release EH ownership for * * Release EH ownership for @ap if the caller. The caller must * have acquired EH ownership using ata_eh_acquire() previously. * * LOCKING: * EH context. */ void ata_eh_release(struct ata_port *ap) { WARN_ON_ONCE(ap->host->eh_owner != current); ap->host->eh_owner = NULL; mutex_unlock(&ap->host->eh_mutex); } /** * ata_scsi_timed_out - SCSI layer time out callback * @cmd: timed out SCSI command * * Handles SCSI layer timeout. We race with normal completion of * the qc for @cmd. If the qc is already gone, we lose and let * the scsi command finish (EH_HANDLED). Otherwise, the qc has * timed out and EH should be invoked. Prevent ata_qc_complete() * from finishing it by setting EH_SCHEDULED and return * EH_NOT_HANDLED. * * TODO: kill this function once old EH is gone. * * LOCKING: * Called from timer context * * RETURNS: * EH_HANDLED or EH_NOT_HANDLED */ enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct ata_port *ap = ata_shost_to_port(host); unsigned long flags; struct ata_queued_cmd *qc; enum blk_eh_timer_return ret; DPRINTK("ENTER\n"); if (ap->ops->error_handler) { ret = BLK_EH_NOT_HANDLED; goto out; } ret = BLK_EH_HANDLED; spin_lock_irqsave(ap->lock, flags); qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc) { WARN_ON(qc->scsicmd != cmd); qc->flags |= ATA_QCFLAG_EH_SCHEDULED; qc->err_mask |= AC_ERR_TIMEOUT; ret = BLK_EH_NOT_HANDLED; } spin_unlock_irqrestore(ap->lock, flags); out: DPRINTK("EXIT, ret=%d\n", ret); return ret; } static void ata_eh_unload(struct ata_port *ap) { struct ata_link *link; struct ata_device *dev; unsigned long flags; /* Restore SControl IPM and SPD for the next driver and * disable attached devices. */ ata_for_each_link(link, ap, PMP_FIRST) { sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); ata_for_each_dev(dev, link, ALL) ata_dev_disable(dev); } /* freeze and set UNLOADED */ spin_lock_irqsave(ap->lock, flags); ata_port_freeze(ap); /* won't be thawed */ ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ ap->pflags |= ATA_PFLAG_UNLOADED; spin_unlock_irqrestore(ap->lock, flags); } /** * ata_scsi_error - SCSI layer error handler callback * @host: SCSI host on which error occurred * * Handles SCSI-layer-thrown error events. * * LOCKING: * Inherited from SCSI layer (none, can sleep) * * RETURNS: * Zero. */ void ata_scsi_error(struct Scsi_Host *host) { struct ata_port *ap = ata_shost_to_port(host); unsigned long flags; LIST_HEAD(eh_work_q); DPRINTK("ENTER\n"); spin_lock_irqsave(host->host_lock, flags); list_splice_init(&host->eh_cmd_q, &eh_work_q); spin_unlock_irqrestore(host->host_lock, flags); ata_scsi_cmd_error_handler(host, ap, &eh_work_q); /* If we timed raced normal completion and there is nothing to recover nr_timedout == 0 why exactly are we doing error recovery ? */ ata_scsi_port_error_handler(host, ap); /* finish or retry handled scmd's and clean up */ WARN_ON(host->host_failed || !list_empty(&eh_work_q)); DPRINTK("EXIT\n"); } /** * ata_scsi_cmd_error_handler - error callback for a list of commands * @host: scsi host containing the port * @ap: ATA port within the host * @eh_work_q: list of commands to process * * process the given list of commands and return those finished to the * ap->eh_done_q. This function is the first part of the libata error * handler which processes a given list of failed commands. */ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_work_q) { int i; unsigned long flags; /* make sure sff pio task is not running */ ata_sff_flush_pio_task(ap); /* synchronize with host lock and sort out timeouts */ /* For new EH, all qcs are finished in one of three ways - * normal completion, error completion, and SCSI timeout. * Both completions can race against SCSI timeout. When normal * completion wins, the qc never reaches EH. When error * completion wins, the qc has ATA_QCFLAG_FAILED set. * * When SCSI timeout wins, things are a bit more complex. * Normal or error completion can occur after the timeout but * before this point. In such cases, both types of * completions are honored. A scmd is determined to have * timed out iff its associated qc is active and not failed. */ if (ap->ops->error_handler) { struct scsi_cmnd *scmd, *tmp; int nr_timedout = 0; spin_lock_irqsave(ap->lock, flags); /* This must occur under the ap->lock as we don't want a polled recovery to race the real interrupt handler The lost_interrupt handler checks for any completed but non-notified command and completes much like an IRQ handler. We then fall into the error recovery code which will treat this as if normal completion won the race */ if (ap->ops->lost_interrupt) ap->ops->lost_interrupt(ap); list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { struct ata_queued_cmd *qc; for (i = 0; i < ATA_MAX_QUEUE; i++) { qc = __ata_qc_from_tag(ap, i); if (qc->flags & ATA_QCFLAG_ACTIVE && qc->scsicmd == scmd) break; } if (i < ATA_MAX_QUEUE) { /* the scmd has an associated qc */ if (!(qc->flags & ATA_QCFLAG_FAILED)) { /* which hasn't failed yet, timeout */ qc->err_mask |= AC_ERR_TIMEOUT; qc->flags |= ATA_QCFLAG_FAILED; nr_timedout++; } } else { /* Normal completion occurred after * SCSI timeout but before this point. * Successfully complete it. */ scmd->retries = scmd->allowed; scsi_eh_finish_cmd(scmd, &ap->eh_done_q); } } /* If we have timed out qcs. They belong to EH from * this point but the state of the controller is * unknown. Freeze the port to make sure the IRQ * handler doesn't diddle with those qcs. This must * be done atomically w.r.t. setting QCFLAG_FAILED. */ if (nr_timedout) __ata_port_freeze(ap); spin_unlock_irqrestore(ap->lock, flags); /* initialize eh_tries */ ap->eh_tries = ATA_EH_MAX_TRIES; } else spin_unlock_wait(ap->lock); } EXPORT_SYMBOL(ata_scsi_cmd_error_handler); /** * ata_scsi_port_error_handler - recover the port after the commands * @host: SCSI host containing the port * @ap: the ATA port * * Handle the recovery of the port @ap after all the commands * have been recovered. */ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) { unsigned long flags; /* invoke error handler */ if (ap->ops->error_handler) { struct ata_link *link; /* acquire EH ownership */ ata_eh_acquire(ap); repeat: /* kill fast drain timer */ del_timer_sync(&ap->fastdrain_timer); /* process port resume request */ ata_eh_handle_port_resume(ap); /* fetch & clear EH info */ spin_lock_irqsave(ap->lock, flags); ata_for_each_link(link, ap, HOST_FIRST) { struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev; memset(&link->eh_context, 0, sizeof(link->eh_context)); link->eh_context.i = link->eh_info; memset(&link->eh_info, 0, sizeof(link->eh_info)); ata_for_each_dev(dev, link, ENABLED) { int devno = dev->devno; ehc->saved_xfer_mode[devno] = dev->xfer_mode; if (ata_ncq_enabled(dev)) ehc->saved_ncq_enabled |= 1 << devno; } } ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; ap->pflags &= ~ATA_PFLAG_EH_PENDING; ap->excl_link = NULL; /* don't maintain exclusion over EH */ spin_unlock_irqrestore(ap->lock, flags); /* invoke EH, skip if unloading or suspended */ if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) ap->ops->error_handler(ap); else { /* if unloading, commence suicide */ if ((ap->pflags & ATA_PFLAG_UNLOADING) && !(ap->pflags & ATA_PFLAG_UNLOADED)) ata_eh_unload(ap); ata_eh_finish(ap); } /* process port suspend request */ ata_eh_handle_port_suspend(ap); /* Exception might have happened after ->error_handler * recovered the port but before this point. Repeat * EH in such case. */ spin_lock_irqsave(ap->lock, flags); if (ap->pflags & ATA_PFLAG_EH_PENDING) { if (--ap->eh_tries) { spin_unlock_irqrestore(ap->lock, flags); goto repeat; } ata_port_err(ap, "EH pending after %d tries, giving up\n", ATA_EH_MAX_TRIES); ap->pflags &= ~ATA_PFLAG_EH_PENDING; } /* this run is complete, make sure EH info is clear */ ata_for_each_link(link, ap, HOST_FIRST) memset(&link->eh_info, 0, sizeof(link->eh_info)); /* end eh (clear host_eh_scheduled) while holding * ap->lock such that if exception occurs after this * point but before EH completion, SCSI midlayer will * re-initiate EH. */ ap->ops->end_eh(ap); spin_unlock_irqrestore(ap->lock, flags); ata_eh_release(ap); } else { WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); ap->ops->eng_timeout(ap); } scsi_eh_flush_done_q(&ap->eh_done_q); /* clean up */ spin_lock_irqsave(ap->lock, flags); if (ap->pflags & ATA_PFLAG_LOADING) ap->pflags &= ~ATA_PFLAG_LOADING; else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) schedule_delayed_work(&ap->hotplug_task, 0); if (ap->pflags & ATA_PFLAG_RECOVERED) ata_port_info(ap, "EH complete\n"); ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); /* tell wait_eh that we're done */ ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; wake_up_all(&ap->eh_wait_q); spin_unlock_irqrestore(ap->lock, flags); } EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler); /** * ata_port_wait_eh - Wait for the currently pending EH to complete * @ap: Port to wait EH for * * Wait until the currently pending EH is complete. * * LOCKING: * Kernel thread context (may sleep). */ void ata_port_wait_eh(struct ata_port *ap) { unsigned long flags; DEFINE_WAIT(wait); retry: spin_lock_irqsave(ap->lock, flags); while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); spin_unlock_irqrestore(ap->lock, flags); schedule(); spin_lock_irqsave(ap->lock, flags); } finish_wait(&ap->eh_wait_q, &wait); spin_unlock_irqrestore(ap->lock, flags); /* make sure SCSI EH is complete */ if (scsi_host_in_recovery(ap->scsi_host)) { ata_msleep(ap, 10); goto retry; } } EXPORT_SYMBOL_GPL(ata_port_wait_eh); static int ata_eh_nr_in_flight(struct ata_port *ap) { unsigned int tag; int nr = 0; /* count only non-internal commands */ for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) if (ata_qc_from_tag(ap, tag)) nr++; return nr; } void ata_eh_fastdrain_timerfn(unsigned long arg) { struct ata_port *ap = (void *)arg; unsigned long flags; int cnt; spin_lock_irqsave(ap->lock, flags); cnt = ata_eh_nr_in_flight(ap); /* are we done? */ if (!cnt) goto out_unlock; if (cnt == ap->fastdrain_cnt) { unsigned int tag; /* No progress during the last interval, tag all * in-flight qcs as timed out and freeze the port. */ for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); if (qc) qc->err_mask |= AC_ERR_TIMEOUT; } ata_port_freeze(ap); } else { /* some qcs have finished, give it another chance */ ap->fastdrain_cnt = cnt; ap->fastdrain_timer.expires = ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); add_timer(&ap->fastdrain_timer); } out_unlock: spin_unlock_irqrestore(ap->lock, flags); } /** * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain * @ap: target ATA port * @fastdrain: activate fast drain * * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain * is non-zero and EH wasn't pending before. Fast drain ensures * that EH kicks in in timely manner. * * LOCKING: * spin_lock_irqsave(host lock) */ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) { int cnt; /* already scheduled? */ if (ap->pflags & ATA_PFLAG_EH_PENDING) return; ap->pflags |= ATA_PFLAG_EH_PENDING; if (!fastdrain) return; /* do we have in-flight qcs? */ cnt = ata_eh_nr_in_flight(ap); if (!cnt) return; /* activate fast drain */ ap->fastdrain_cnt = cnt; ap->fastdrain_timer.expires = ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); add_timer(&ap->fastdrain_timer); } /** * ata_qc_schedule_eh - schedule qc for error handling * @qc: command to schedule error handling for * * Schedule error handling for @qc. EH will kick in as soon as * other commands are drained. * * LOCKING: * spin_lock_irqsave(host lock) */ void ata_qc_schedule_eh(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct request_queue *q = qc->scsicmd->device->request_queue; unsigned long flags; WARN_ON(!ap->ops->error_handler); qc->flags |= ATA_QCFLAG_FAILED; ata_eh_set_pending(ap, 1); /* The following will fail if timeout has already expired. * ata_scsi_error() takes care of such scmds on EH entry. * Note that ATA_QCFLAG_FAILED is unconditionally set after * this function completes. */ spin_lock_irqsave(q->queue_lock, flags); blk_abort_request(qc->scsicmd->request); spin_unlock_irqrestore(q->queue_lock, flags); } /** * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine * @ap: ATA port to schedule EH for * * LOCKING: inherited from ata_port_schedule_eh * spin_lock_irqsave(host lock) */ void ata_std_sched_eh(struct ata_port *ap) { WARN_ON(!ap->ops->error_handler); if (ap->pflags & ATA_PFLAG_INITIALIZING) return; ata_eh_set_pending(ap, 1); scsi_schedule_eh(ap->scsi_host); DPRINTK("port EH scheduled\n"); } EXPORT_SYMBOL_GPL(ata_std_sched_eh); /** * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine * @ap: ATA port to end EH for * * In the libata object model there is a 1:1 mapping of ata_port to * shost, so host fields can be directly manipulated under ap->lock, in * the libsas case we need to hold a lock at the ha->level to coordinate * these events. * * LOCKING: * spin_lock_irqsave(host lock) */ void ata_std_end_eh(struct ata_port *ap) { struct Scsi_Host *host = ap->scsi_host; host->host_eh_scheduled = 0; } EXPORT_SYMBOL(ata_std_end_eh); /** * ata_port_schedule_eh - schedule error handling without a qc * @ap: ATA port to schedule EH for * * Schedule error handling for @ap. EH will kick in as soon as * all commands are drained. * * LOCKING: * spin_lock_irqsave(host lock) */ void ata_port_schedule_eh(struct ata_port *ap) { /* see: ata_std_sched_eh, unless you know better */ ap->ops->sched_eh(ap); } static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) { int tag, nr_aborted = 0; WARN_ON(!ap->ops->error_handler); /* we're gonna abort all commands, no need for fast drain */ ata_eh_set_pending(ap, 0); for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); if (qc && (!link || qc->dev->link == link)) { qc->flags |= ATA_QCFLAG_FAILED; ata_qc_complete(qc); nr_aborted++; } } if (!nr_aborted) ata_port_schedule_eh(ap); return nr_aborted; } /** * ata_link_abort - abort all qc's on the link * @link: ATA link to abort qc's for * * Abort all active qc's active on @link and schedule EH. * * LOCKING: * spin_lock_irqsave(host lock) * * RETURNS: * Number of aborted qc's. */ int ata_link_abort(struct ata_link *link) { return ata_do_link_abort(link->ap, link); } /** * ata_port_abort - abort all qc's on the port * @ap: ATA port to abort qc's for * * Abort all active qc's of @ap and schedule EH. * * LOCKING: * spin_lock_irqsave(host_set lock) * * RETURNS: * Number of aborted qc's. */ int ata_port_abort(struct ata_port *ap) { return ata_do_link_abort(ap, NULL); } /** * __ata_port_freeze - freeze port * @ap: ATA port to freeze * * This function is called when HSM violation or some other * condition disrupts normal operation of the port. Frozen port * is not allowed to perform any operation until the port is * thawed, which usually follows a successful reset. * * ap->ops->freeze() callback can be used for freezing the port * hardware-wise (e.g. mask interrupt and stop DMA engine). If a * port cannot be frozen hardware-wise, the interrupt handler * must ack and clear interrupts unconditionally while the port * is frozen. * * LOCKING: * spin_lock_irqsave(host lock) */ static void __ata_port_freeze(struct ata_port *ap) { WARN_ON(!ap->ops->error_handler); if (ap->ops->freeze) ap->ops->freeze(ap); ap->pflags |= ATA_PFLAG_FROZEN; DPRINTK("ata%u port frozen\n", ap->print_id); } /** * ata_port_freeze - abort & freeze port * @ap: ATA port to freeze * * Abort and freeze @ap. The freeze operation must be called * first, because some hardware requires special operations * before the taskfile registers are accessible. * * LOCKING: * spin_lock_irqsave(host lock) * * RETURNS: * Number of aborted commands. */ int ata_port_freeze(struct ata_port *ap) { int nr_aborted; WARN_ON(!ap->ops->error_handler); __ata_port_freeze(ap); nr_aborted = ata_port_abort(ap); return nr_aborted; } /** * sata_async_notification - SATA async notification handler * @ap: ATA port where async notification is received * * Handler to be called when async notification via SDB FIS is * received. This function schedules EH if necessary. * * LOCKING: * spin_lock_irqsave(host lock) * * RETURNS: * 1 if EH is scheduled, 0 otherwise. */ int sata_async_notification(struct ata_port *ap) { u32 sntf; int rc; if (!(ap->flags & ATA_FLAG_AN)) return 0; rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); if (rc == 0) sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); if (!sata_pmp_attached(ap) || rc) { /* PMP is not attached or SNTF is not available */ if (!sata_pmp_attached(ap)) { /* PMP is not attached. Check whether ATAPI * AN is configured. If so, notify media * change. */ struct ata_device *dev = ap->link.device; if ((dev->class == ATA_DEV_ATAPI) && (dev->flags & ATA_DFLAG_AN)) ata_scsi_media_change_notify(dev); return 0; } else { /* PMP is attached but SNTF is not available. * ATAPI async media change notification is * not used. The PMP must be reporting PHY * status change, schedule EH. */ ata_port_schedule_eh(ap); return 1; } } else { /* PMP is attached and SNTF is available */ struct ata_link *link; /* check and notify ATAPI AN */ ata_for_each_link(link, ap, EDGE) { if (!(sntf & (1 << link->pmp))) continue; if ((link->device->class == ATA_DEV_ATAPI) && (link->device->flags & ATA_DFLAG_AN)) ata_scsi_media_change_notify(link->device); } /* If PMP is reporting that PHY status of some * downstream ports has changed, schedule EH. */ if (sntf & (1 << SATA_PMP_CTRL_PORT)) { ata_port_schedule_eh(ap); return 1; } return 0; } } /** * ata_eh_freeze_port - EH helper to freeze port * @ap: ATA port to freeze * * Freeze @ap. * * LOCKING: * None. */ void ata_eh_freeze_port(struct ata_port *ap) { unsigned long flags; if (!ap->ops->error_handler) return; spin_lock_irqsave(ap->lock, flags); __ata_port_freeze(ap); spin_unlock_irqrestore(ap->lock, flags); } /** * ata_port_thaw_port - EH helper to thaw port * @ap: ATA port to thaw * * Thaw frozen port @ap. * * LOCKING: * None. */ void ata_eh_thaw_port(struct ata_port *ap) { unsigned long flags; if (!ap->ops->error_handler) return; spin_lock_irqsave(ap->lock, flags); ap->pflags &= ~ATA_PFLAG_FROZEN; if (ap->ops->thaw) ap->ops->thaw(ap); spin_unlock_irqrestore(ap->lock, flags); DPRINTK("ata%u port thawed\n", ap->print_id); } static void ata_eh_scsidone(struct scsi_cmnd *scmd) { /* nada */ } static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct scsi_cmnd *scmd = qc->scsicmd; unsigned long flags; spin_lock_irqsave(ap->lock, flags); qc->scsidone = ata_eh_scsidone; __ata_qc_complete(qc); WARN_ON(ata_tag_valid(qc->tag)); spin_unlock_irqrestore(ap->lock, flags); scsi_eh_finish_cmd(scmd, &ap->eh_done_q); } /** * ata_eh_qc_complete - Complete an active ATA command from EH * @qc: Command to complete * * Indicate to the mid and upper layers that an ATA command has * completed. To be used from EH. */ void ata_eh_qc_complete(struct ata_queued_cmd *qc) { struct scsi_cmnd *scmd = qc->scsicmd; scmd->retries = scmd->allowed; __ata_eh_qc_complete(qc); } /** * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH * @qc: Command to retry * * Indicate to the mid and upper layers that an ATA command * should be retried. To be used from EH. * * SCSI midlayer limits the number of retries to scmd->allowed. * scmd->allowed is incremented for commands which get retried * due to unrelated failures (qc->err_mask is zero). */ void ata_eh_qc_retry(struct ata_queued_cmd *qc) { struct scsi_cmnd *scmd = qc->scsicmd; if (!qc->err_mask) scmd->allowed++; __ata_eh_qc_complete(qc); } /** * ata_dev_disable - disable ATA device * @dev: ATA device to disable * * Disable @dev. * * Locking: * EH context. */ void ata_dev_disable(struct ata_device *dev) { if (!ata_dev_enabled(dev)) return; if (ata_msg_drv(dev->link->ap)) ata_dev_warn(dev, "disabled\n"); ata_acpi_on_disable(dev); ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); dev->class++; /* From now till the next successful probe, ering is used to * track probe failures. Clear accumulated device error info. */ ata_ering_clear(&dev->ering); } /** * ata_eh_detach_dev - detach ATA device * @dev: ATA device to detach * * Detach @dev. * * LOCKING: * None. */ void ata_eh_detach_dev(struct ata_device *dev) { struct ata_link *link = dev->link; struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; unsigned long flags; ata_dev_disable(dev); spin_lock_irqsave(ap->lock, flags); dev->flags &= ~ATA_DFLAG_DETACH; if (ata_scsi_offline_dev(dev)) { dev->flags |= ATA_DFLAG_DETACHED; ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; } /* clear per-dev EH info */ ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); ehc->saved_xfer_mode[dev->devno] = 0; ehc->saved_ncq_enabled &= ~(1 << dev->devno); spin_unlock_irqrestore(ap->lock, flags); } /** * ata_eh_about_to_do - about to perform eh_action * @link: target ATA link * @dev: target ATA dev for per-dev action (can be NULL) * @action: action about to be performed * * Called just before performing EH actions to clear related bits * in @link->eh_info such that eh actions are not unnecessarily * repeated. * * LOCKING: * None. */ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, unsigned int action) { struct ata_port *ap = link->ap; struct ata_eh_info *ehi = &link->eh_info; struct ata_eh_context *ehc = &link->eh_context; unsigned long flags; spin_lock_irqsave(ap->lock, flags); ata_eh_clear_action(link, dev, ehi, action); /* About to take EH action, set RECOVERED. Ignore actions on * slave links as master will do them again. */ if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) ap->pflags |= ATA_PFLAG_RECOVERED; spin_unlock_irqrestore(ap->lock, flags); } /** * ata_eh_done - EH action complete * @ap: target ATA port * @dev: target ATA dev for per-dev action (can be NULL) * @action: action just completed * * Called right after performing EH actions to clear related bits * in @link->eh_context. * * LOCKING: * None. */ void ata_eh_done(struct ata_link *link, struct ata_device *dev, unsigned int action) { struct ata_eh_context *ehc = &link->eh_context; ata_eh_clear_action(link, dev, &ehc->i, action); } /** * ata_err_string - convert err_mask to descriptive string * @err_mask: error mask to convert to string * * Convert @err_mask to descriptive string. Errors are * prioritized according to severity and only the most severe * error is reported. * * LOCKING: * None. * * RETURNS: * Descriptive string for @err_mask */ static const char *ata_err_string(unsigned int err_mask) { if (err_mask & AC_ERR_HOST_BUS) return "host bus error"; if (err_mask & AC_ERR_ATA_BUS) return "ATA bus error"; if (err_mask & AC_ERR_TIMEOUT) return "timeout"; if (err_mask & AC_ERR_HSM) return "HSM violation"; if (err_mask & AC_ERR_SYSTEM) return "internal error"; if (err_mask & AC_ERR_MEDIA) return "media error"; if (err_mask & AC_ERR_INVALID) return "invalid argument"; if (err_mask & AC_ERR_DEV) return "device error"; return "unknown error"; } /** * ata_read_log_page - read a specific log page * @dev: target device * @log: log to read * @page: page to read * @buf: buffer to store read page * @sectors: number of sectors to read * * Read log page using READ_LOG_EXT command. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * 0 on success, AC_ERR_* mask otherwise. */ unsigned int ata_read_log_page(struct ata_device *dev, u8 log, u8 page, void *buf, unsigned int sectors) { struct ata_taskfile tf; unsigned int err_mask; bool dma = false; DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page); retry: ata_tf_init(dev, &tf); if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) && !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) { tf.command = ATA_CMD_READ_LOG_DMA_EXT; tf.protocol = ATA_PROT_DMA; dma = true; } else { tf.command = ATA_CMD_READ_LOG_EXT; tf.protocol = ATA_PROT_PIO; dma = false; } tf.lbal = log; tf.lbam = page; tf.nsect = sectors; tf.hob_nsect = sectors >> 8; tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, buf, sectors * ATA_SECT_SIZE, 0); if (err_mask && dma) { dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG; ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n"); goto retry; } DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; } /** * ata_eh_read_log_10h - Read log page 10h for NCQ error details * @dev: Device to read log page 10h from * @tag: Resulting tag of the failed command * @tf: Resulting taskfile registers of the failed command * * Read log page 10h to obtain NCQ error details and clear error * condition. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * 0 on success, -errno otherwise. */ static int ata_eh_read_log_10h(struct ata_device *dev, int *tag, struct ata_taskfile *tf) { u8 *buf = dev->link->ap->sector_buf; unsigned int err_mask; u8 csum; int i; err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1); if (err_mask) return -EIO; csum = 0; for (i = 0; i < ATA_SECT_SIZE; i++) csum += buf[i]; if (csum) ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n", csum); if (buf[0] & 0x80) return -ENOENT; *tag = buf[0] & 0x1f; tf->command = buf[2]; tf->feature = buf[3]; tf->lbal = buf[4]; tf->lbam = buf[5]; tf->lbah = buf[6]; tf->device = buf[7]; tf->hob_lbal = buf[8]; tf->hob_lbam = buf[9]; tf->hob_lbah = buf[10]; tf->nsect = buf[12]; tf->hob_nsect = buf[13]; return 0; } /** * atapi_eh_tur - perform ATAPI TEST_UNIT_READY * @dev: target ATAPI device * @r_sense_key: out parameter for sense_key * * Perform ATAPI TEST_UNIT_READY. * * LOCKING: * EH context (may sleep). * * RETURNS: * 0 on success, AC_ERR_* mask on failure. */ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) { u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; struct ata_taskfile tf; unsigned int err_mask; ata_tf_init(dev, &tf); tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf.command = ATA_CMD_PACKET; tf.protocol = ATAPI_PROT_NODATA; err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); if (err_mask == AC_ERR_DEV) *r_sense_key = tf.feature >> 4; return err_mask; } /** * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE * @dev: device to perform REQUEST_SENSE to * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) * @dfl_sense_key: default sense key to use * * Perform ATAPI REQUEST_SENSE after the device reported CHECK * SENSE. This function is EH helper. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * 0 on success, AC_ERR_* mask on failure */ unsigned int atapi_eh_request_sense(struct ata_device *dev, u8 *sense_buf, u8 dfl_sense_key) { u8 cdb[ATAPI_CDB_LEN] = { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; struct ata_port *ap = dev->link->ap; struct ata_taskfile tf; DPRINTK("ATAPI request sense\n"); memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); /* initialize sense_buf with the error register, * for the case where they are -not- overwritten */ sense_buf[0] = 0x70; sense_buf[2] = dfl_sense_key; /* some devices time out if garbage left in tf */ ata_tf_init(dev, &tf); tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf.command = ATA_CMD_PACKET; /* is it pointless to prefer PIO for "safety reasons"? */ if (ap->flags & ATA_FLAG_PIO_DMA) { tf.protocol = ATAPI_PROT_DMA; tf.feature |= ATAPI_PKT_DMA; } else { tf.protocol = ATAPI_PROT_PIO; tf.lbam = SCSI_SENSE_BUFFERSIZE; tf.lbah = 0; } return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, sense_buf, SCSI_SENSE_BUFFERSIZE, 0); } /** * ata_eh_analyze_serror - analyze SError for a failed port * @link: ATA link to analyze SError for * * Analyze SError if available and further determine cause of * failure. * * LOCKING: * None. */ static void ata_eh_analyze_serror(struct ata_link *link) { struct ata_eh_context *ehc = &link->eh_context; u32 serror = ehc->i.serror; unsigned int err_mask = 0, action = 0; u32 hotplug_mask; if (serror & (SERR_PERSISTENT | SERR_DATA)) { err_mask |= AC_ERR_ATA_BUS; action |= ATA_EH_RESET; } if (serror & SERR_PROTOCOL) { err_mask |= AC_ERR_HSM; action |= ATA_EH_RESET; } if (serror & SERR_INTERNAL) { err_mask |= AC_ERR_SYSTEM; action |= ATA_EH_RESET; } /* Determine whether a hotplug event has occurred. Both * SError.N/X are considered hotplug events for enabled or * host links. For disabled PMP links, only N bit is * considered as X bit is left at 1 for link plugging. */ if (link->lpm_policy > ATA_LPM_MAX_POWER) hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; else hotplug_mask = SERR_PHYRDY_CHG; if (serror & hotplug_mask) ata_ehi_hotplugged(&ehc->i); ehc->i.err_mask |= err_mask; ehc->i.action |= action; } /** * ata_eh_analyze_ncq_error - analyze NCQ error * @link: ATA link to analyze NCQ error for * * Read log page 10h, determine the offending qc and acquire * error status TF. For NCQ device errors, all LLDDs have to do * is setting AC_ERR_DEV in ehi->err_mask. This function takes * care of the rest. * * LOCKING: * Kernel thread context (may sleep). */ void ata_eh_analyze_ncq_error(struct ata_link *link) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev = link->device; struct ata_queued_cmd *qc; struct ata_taskfile tf; int tag, rc; /* if frozen, we can't do much */ if (ap->pflags & ATA_PFLAG_FROZEN) return; /* is it NCQ device error? */ if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) return; /* has LLDD analyzed already? */ for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { qc = __ata_qc_from_tag(ap, tag); if (!(qc->flags & ATA_QCFLAG_FAILED)) continue; if (qc->err_mask) return; } /* okay, this error is ours */ memset(&tf, 0, sizeof(tf)); rc = ata_eh_read_log_10h(dev, &tag, &tf); if (rc) { ata_link_err(link, "failed to read log page 10h (errno=%d)\n", rc); return; } if (!(link->sactive & (1 << tag))) { ata_link_err(link, "log page 10h reported inactive tag %d\n", tag); return; } /* we've got the perpetrator, condemn it */ qc = __ata_qc_from_tag(ap, tag); memcpy(&qc->result_tf, &tf, sizeof(tf)); qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; ehc->i.err_mask &= ~AC_ERR_DEV; } /** * ata_eh_analyze_tf - analyze taskfile of a failed qc * @qc: qc to analyze * @tf: Taskfile registers to analyze * * Analyze taskfile of @qc and further determine cause of * failure. This function also requests ATAPI sense data if * available. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * Determined recovery action */ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, const struct ata_taskfile *tf) { unsigned int tmp, action = 0; u8 stat = tf->command, err = tf->feature; if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { qc->err_mask |= AC_ERR_HSM; return ATA_EH_RESET; } if (stat & (ATA_ERR | ATA_DF)) qc->err_mask |= AC_ERR_DEV; else return 0; switch (qc->dev->class) { case ATA_DEV_ATA: case ATA_DEV_ZAC: if (err & ATA_ICRC) qc->err_mask |= AC_ERR_ATA_BUS; if (err & (ATA_UNC | ATA_AMNF)) qc->err_mask |= AC_ERR_MEDIA; if (err & ATA_IDNF) qc->err_mask |= AC_ERR_INVALID; break; case ATA_DEV_ATAPI: if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { tmp = atapi_eh_request_sense(qc->dev, qc->scsicmd->sense_buffer, qc->result_tf.feature >> 4); if (!tmp) { /* ATA_QCFLAG_SENSE_VALID is used to * tell atapi_qc_complete() that sense * data is already valid. * * TODO: interpret sense data and set * appropriate err_mask. */ qc->flags |= ATA_QCFLAG_SENSE_VALID; } else qc->err_mask |= tmp; } } if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) action |= ATA_EH_RESET; return action; } static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, int *xfer_ok) { int base = 0; if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) *xfer_ok = 1; if (!*xfer_ok) base = ATA_ECAT_DUBIOUS_NONE; if (err_mask & AC_ERR_ATA_BUS) return base + ATA_ECAT_ATA_BUS; if (err_mask & AC_ERR_TIMEOUT) return base + ATA_ECAT_TOUT_HSM; if (eflags & ATA_EFLAG_IS_IO) { if (err_mask & AC_ERR_HSM) return base + ATA_ECAT_TOUT_HSM; if ((err_mask & (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) return base + ATA_ECAT_UNK_DEV; } return 0; } struct speed_down_verdict_arg { u64 since; int xfer_ok; int nr_errors[ATA_ECAT_NR]; }; static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) { struct speed_down_verdict_arg *arg = void_arg; int cat; if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) return -1; cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, &arg->xfer_ok); arg->nr_errors[cat]++; return 0; } /** * ata_eh_speed_down_verdict - Determine speed down verdict * @dev: Device of interest * * This function examines error ring of @dev and determines * whether NCQ needs to be turned off, transfer speed should be * stepped down, or falling back to PIO is necessary. * * ECAT_ATA_BUS : ATA_BUS error for any command * * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for * IO commands * * ECAT_UNK_DEV : Unknown DEV error for IO commands * * ECAT_DUBIOUS_* : Identical to above three but occurred while * data transfer hasn't been verified. * * Verdicts are * * NCQ_OFF : Turn off NCQ. * * SPEED_DOWN : Speed down transfer speed but don't fall back * to PIO. * * FALLBACK_TO_PIO : Fall back to PIO. * * Even if multiple verdicts are returned, only one action is * taken per error. An action triggered by non-DUBIOUS errors * clears ering, while one triggered by DUBIOUS_* errors doesn't. * This is to expedite speed down decisions right after device is * initially configured. * * The followings are speed down rules. #1 and #2 deal with * DUBIOUS errors. * * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. * * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors * occurred during last 5 mins, NCQ_OFF. * * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors * occurred during last 5 mins, FALLBACK_TO_PIO * * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred * during last 10 mins, NCQ_OFF. * * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. * * LOCKING: * Inherited from caller. * * RETURNS: * OR of ATA_EH_SPDN_* flags. */ static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) { const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; u64 j64 = get_jiffies_64(); struct speed_down_verdict_arg arg; unsigned int verdict = 0; /* scan past 5 mins of error history */ memset(&arg, 0, sizeof(arg)); arg.since = j64 - min(j64, j5mins); ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) verdict |= ATA_EH_SPDN_SPEED_DOWN | ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; if (arg.nr_errors[ATA_ECAT_ATA_BUS] + arg.nr_errors[ATA_ECAT_TOUT_HSM] + arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; /* scan past 10 mins of error history */ memset(&arg, 0, sizeof(arg)); arg.since = j64 - min(j64, j10mins); ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) verdict |= ATA_EH_SPDN_NCQ_OFF; if (arg.nr_errors[ATA_ECAT_ATA_BUS] + arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) verdict |= ATA_EH_SPDN_SPEED_DOWN; return verdict; } /** * ata_eh_speed_down - record error and speed down if necessary * @dev: Failed device * @eflags: mask of ATA_EFLAG_* flags * @err_mask: err_mask of the error * * Record error and examine error history to determine whether * adjusting transmission speed is necessary. It also sets * transmission limits appropriately if such adjustment is * necessary. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * Determined recovery action. */ static unsigned int ata_eh_speed_down(struct ata_device *dev, unsigned int eflags, unsigned int err_mask) { struct ata_link *link = ata_dev_phys_link(dev); int xfer_ok = 0; unsigned int verdict; unsigned int action = 0; /* don't bother if Cat-0 error */ if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) return 0; /* record error and determine whether speed down is necessary */ ata_ering_record(&dev->ering, eflags, err_mask); verdict = ata_eh_speed_down_verdict(dev); /* turn off NCQ? */ if ((verdict & ATA_EH_SPDN_NCQ_OFF) && (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { dev->flags |= ATA_DFLAG_NCQ_OFF; ata_dev_warn(dev, "NCQ disabled due to excessive errors\n"); goto done; } /* speed down? */ if (verdict & ATA_EH_SPDN_SPEED_DOWN) { /* speed down SATA link speed if possible */ if (sata_down_spd_limit(link, 0) == 0) { action |= ATA_EH_RESET; goto done; } /* lower transfer mode */ if (dev->spdn_cnt < 2) { static const int dma_dnxfer_sel[] = { ATA_DNXFER_DMA, ATA_DNXFER_40C }; static const int pio_dnxfer_sel[] = { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; int sel; if (dev->xfer_shift != ATA_SHIFT_PIO) sel = dma_dnxfer_sel[dev->spdn_cnt]; else sel = pio_dnxfer_sel[dev->spdn_cnt]; dev->spdn_cnt++; if (ata_down_xfermask_limit(dev, sel) == 0) { action |= ATA_EH_RESET; goto done; } } } /* Fall back to PIO? Slowing down to PIO is meaningless for * SATA ATA devices. Consider it only for PATA and SATAPI. */ if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && (dev->xfer_shift != ATA_SHIFT_PIO)) { if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { dev->spdn_cnt = 0; action |= ATA_EH_RESET; goto done; } } return 0; done: /* device has been slowed down, blow error history */ if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) ata_ering_clear(&dev->ering); return action; } /** * ata_eh_worth_retry - analyze error and decide whether to retry * @qc: qc to possibly retry * * Look at the cause of the error and decide if a retry * might be useful or not. We don't want to retry media errors * because the drive itself has probably already taken 10-30 seconds * doing its own internal retries before reporting the failure. */ static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc) { if (qc->err_mask & AC_ERR_MEDIA) return 0; /* don't retry media errors */ if (qc->flags & ATA_QCFLAG_IO) return 1; /* otherwise retry anything from fs stack */ if (qc->err_mask & AC_ERR_INVALID) return 0; /* don't retry these */ return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */ } /** * ata_eh_link_autopsy - analyze error and determine recovery action * @link: host link to perform autopsy on * * Analyze why @link failed and determine which recovery actions * are needed. This function also sets more detailed AC_ERR_* * values and fills sense data for ATAPI CHECK SENSE. * * LOCKING: * Kernel thread context (may sleep). */ static void ata_eh_link_autopsy(struct ata_link *link) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev; unsigned int all_err_mask = 0, eflags = 0; int tag; u32 serror; int rc; DPRINTK("ENTER\n"); if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) return; /* obtain and analyze SError */ rc = sata_scr_read(link, SCR_ERROR, &serror); if (rc == 0) { ehc->i.serror |= serror; ata_eh_analyze_serror(link); } else if (rc != -EOPNOTSUPP) { /* SError read failed, force reset and probing */ ehc->i.probe_mask |= ATA_ALL_DEVICES; ehc->i.action |= ATA_EH_RESET; ehc->i.err_mask |= AC_ERR_OTHER; } /* analyze NCQ failure */ ata_eh_analyze_ncq_error(link); /* any real error trumps AC_ERR_OTHER */ if (ehc->i.err_mask & ~AC_ERR_OTHER) ehc->i.err_mask &= ~AC_ERR_OTHER; all_err_mask |= ehc->i.err_mask; for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); if (!(qc->flags & ATA_QCFLAG_FAILED) || ata_dev_phys_link(qc->dev) != link) continue; /* inherit upper level err_mask */ qc->err_mask |= ehc->i.err_mask; /* analyze TF */ ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); /* DEV errors are probably spurious in case of ATA_BUS error */ if (qc->err_mask & AC_ERR_ATA_BUS) qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | AC_ERR_INVALID); /* any real error trumps unknown error */ if (qc->err_mask & ~AC_ERR_OTHER) qc->err_mask &= ~AC_ERR_OTHER; /* SENSE_VALID trumps dev/unknown error and revalidation */ if (qc->flags & ATA_QCFLAG_SENSE_VALID) qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); /* determine whether the command is worth retrying */ if (ata_eh_worth_retry(qc)) qc->flags |= ATA_QCFLAG_RETRY; /* accumulate error info */ ehc->i.dev = qc->dev; all_err_mask |= qc->err_mask; if (qc->flags & ATA_QCFLAG_IO) eflags |= ATA_EFLAG_IS_IO; trace_ata_eh_link_autopsy_qc(qc); } /* enforce default EH actions */ if (ap->pflags & ATA_PFLAG_FROZEN || all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) ehc->i.action |= ATA_EH_RESET; else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) ehc->i.action |= ATA_EH_REVALIDATE; /* If we have offending qcs and the associated failed device, * perform per-dev EH action only on the offending device. */ if (ehc->i.dev) { ehc->i.dev_action[ehc->i.dev->devno] |= ehc->i.action & ATA_EH_PERDEV_MASK; ehc->i.action &= ~ATA_EH_PERDEV_MASK; } /* propagate timeout to host link */ if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; /* record error and consider speeding down */ dev = ehc->i.dev; if (!dev && ((ata_link_max_devices(link) == 1 && ata_dev_enabled(link->device)))) dev = link->device; if (dev) { if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) eflags |= ATA_EFLAG_DUBIOUS_XFER; ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); } trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); DPRINTK("EXIT\n"); } /** * ata_eh_autopsy - analyze error and determine recovery action * @ap: host port to perform autopsy on * * Analyze all links of @ap and determine why they failed and * which recovery actions are needed. * * LOCKING: * Kernel thread context (may sleep). */ void ata_eh_autopsy(struct ata_port *ap) { struct ata_link *link; ata_for_each_link(link, ap, EDGE) ata_eh_link_autopsy(link); /* Handle the frigging slave link. Autopsy is done similarly * but actions and flags are transferred over to the master * link and handled from there. */ if (ap->slave_link) { struct ata_eh_context *mehc = &ap->link.eh_context; struct ata_eh_context *sehc = &ap->slave_link->eh_context; /* transfer control flags from master to slave */ sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; /* perform autopsy on the slave link */ ata_eh_link_autopsy(ap->slave_link); /* transfer actions from slave to master and clear slave */ ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); mehc->i.action |= sehc->i.action; mehc->i.dev_action[1] |= sehc->i.dev_action[1]; mehc->i.flags |= sehc->i.flags; ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); } /* Autopsy of fanout ports can affect host link autopsy. * Perform host link autopsy last. */ if (sata_pmp_attached(ap)) ata_eh_link_autopsy(&ap->link); } /** * ata_get_cmd_descript - get description for ATA command * @command: ATA command code to get description for * * Return a textual description of the given command, or NULL if the * command is not known. * * LOCKING: * None */ const char *ata_get_cmd_descript(u8 command) { #ifdef CONFIG_ATA_VERBOSE_ERROR static const struct { u8 command; const char *text; } cmd_descr[] = { { ATA_CMD_DEV_RESET, "DEVICE RESET" }, { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, { ATA_CMD_STANDBY, "STANDBY" }, { ATA_CMD_IDLE, "IDLE" }, { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" }, { ATA_CMD_NOP, "NOP" }, { ATA_CMD_FLUSH, "FLUSH CACHE" }, { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, { ATA_CMD_SERVICE, "SERVICE" }, { ATA_CMD_READ, "READ DMA" }, { ATA_CMD_READ_EXT, "READ DMA EXT" }, { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, { ATA_CMD_WRITE, "WRITE DMA" }, { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, { ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" }, { ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" }, { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, { ATA_CMD_SET_FEATURES, "SET FEATURES" }, { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, { ATA_CMD_SLEEP, "SLEEP" }, { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" }, { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, { ATA_CMD_PMP_READ, "READ BUFFER" }, { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" }, { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, { ATA_CMD_PMP_WRITE_DMA, "WRITE BUFFER DMA" }, { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, { ATA_CMD_SMART, "SMART" }, { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" }, { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" }, { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, { ATA_CMD_RESTORE, "RECALIBRATE" }, { 0, NULL } /* terminate list */ }; unsigned int i; for (i = 0; cmd_descr[i].text; i++) if (cmd_descr[i].command == command) return cmd_descr[i].text; #endif return NULL; } EXPORT_SYMBOL_GPL(ata_get_cmd_descript); /** * ata_eh_link_report - report error handling to user * @link: ATA link EH is going on * * Report EH to user. * * LOCKING: * None. */ static void ata_eh_link_report(struct ata_link *link) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; const char *frozen, *desc; char tries_buf[6] = ""; int tag, nr_failed = 0; if (ehc->i.flags & ATA_EHI_QUIET) return; desc = NULL; if (ehc->i.desc[0] != '\0') desc = ehc->i.desc; for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); if (!(qc->flags & ATA_QCFLAG_FAILED) || ata_dev_phys_link(qc->dev) != link || ((qc->flags & ATA_QCFLAG_QUIET) && qc->err_mask == AC_ERR_DEV)) continue; if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) continue; nr_failed++; } if (!nr_failed && !ehc->i.err_mask) return; frozen = ""; if (ap->pflags & ATA_PFLAG_FROZEN) frozen = " frozen"; if (ap->eh_tries < ATA_EH_MAX_TRIES) snprintf(tries_buf, sizeof(tries_buf), " t%d", ap->eh_tries); if (ehc->i.dev) { ata_dev_err(ehc->i.dev, "exception Emask 0x%x " "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", ehc->i.err_mask, link->sactive, ehc->i.serror, ehc->i.action, frozen, tries_buf); if (desc) ata_dev_err(ehc->i.dev, "%s\n", desc); } else { ata_link_err(link, "exception Emask 0x%x " "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", ehc->i.err_mask, link->sactive, ehc->i.serror, ehc->i.action, frozen, tries_buf); if (desc) ata_link_err(link, "%s\n", desc); } #ifdef CONFIG_ATA_VERBOSE_ERROR if (ehc->i.serror) ata_link_err(link, "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", ehc->i.serror & SERR_DATA ? "UnrecovData " : "", ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", ehc->i.serror & SERR_CRC ? "BadCRC " : "", ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); #endif for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; char data_buf[20] = ""; char cdb_buf[70] = ""; if (!(qc->flags & ATA_QCFLAG_FAILED) || ata_dev_phys_link(qc->dev) != link || !qc->err_mask) continue; if (qc->dma_dir != DMA_NONE) { static const char *dma_str[] = { [DMA_BIDIRECTIONAL] = "bidi", [DMA_TO_DEVICE] = "out", [DMA_FROM_DEVICE] = "in", }; static const char *prot_str[] = { [ATA_PROT_PIO] = "pio", [ATA_PROT_DMA] = "dma", [ATA_PROT_NCQ] = "ncq", [ATAPI_PROT_PIO] = "pio", [ATAPI_PROT_DMA] = "dma", }; snprintf(data_buf, sizeof(data_buf), " %s %u %s", prot_str[qc->tf.protocol], qc->nbytes, dma_str[qc->dma_dir]); } if (ata_is_atapi(qc->tf.protocol)) { const u8 *cdb = qc->cdb; size_t cdb_len = qc->dev->cdb_len; if (qc->scsicmd) { cdb = qc->scsicmd->cmnd; cdb_len = qc->scsicmd->cmd_len; } __scsi_format_command(cdb_buf, sizeof(cdb_buf), cdb, cdb_len); } else { const char *descr = ata_get_cmd_descript(cmd->command); if (descr) ata_dev_err(qc->dev, "failed command: %s\n", descr); } ata_dev_err(qc->dev, "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " "tag %d%s\n %s" "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " "Emask 0x%x (%s)%s\n", cmd->command, cmd->feature, cmd->nsect, cmd->lbal, cmd->lbam, cmd->lbah, cmd->hob_feature, cmd->hob_nsect, cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, cmd->device, qc->tag, data_buf, cdb_buf, res->command, res->feature, res->nsect, res->lbal, res->lbam, res->lbah, res->hob_feature, res->hob_nsect, res->hob_lbal, res->hob_lbam, res->hob_lbah, res->device, qc->err_mask, ata_err_string(qc->err_mask), qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); #ifdef CONFIG_ATA_VERBOSE_ERROR if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) { if (res->command & ATA_BUSY) ata_dev_err(qc->dev, "status: { Busy }\n"); else ata_dev_err(qc->dev, "status: { %s%s%s%s}\n", res->command & ATA_DRDY ? "DRDY " : "", res->command & ATA_DF ? "DF " : "", res->command & ATA_DRQ ? "DRQ " : "", res->command & ATA_ERR ? "ERR " : ""); } if (cmd->command != ATA_CMD_PACKET && (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF | ATA_IDNF | ATA_ABORTED))) ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", res->feature & ATA_ICRC ? "ICRC " : "", res->feature & ATA_UNC ? "UNC " : "", res->feature & ATA_AMNF ? "AMNF " : "", res->feature & ATA_IDNF ? "IDNF " : "", res->feature & ATA_ABORTED ? "ABRT " : ""); #endif } } /** * ata_eh_report - report error handling to user * @ap: ATA port to report EH about * * Report EH to user. * * LOCKING: * None. */ void ata_eh_report(struct ata_port *ap) { struct ata_link *link; ata_for_each_link(link, ap, HOST_FIRST) ata_eh_link_report(link); } static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, unsigned int *classes, unsigned long deadline, bool clear_classes) { struct ata_device *dev; if (clear_classes) ata_for_each_dev(dev, link, ALL) classes[dev->devno] = ATA_DEV_UNKNOWN; return reset(link, classes, deadline); } static int ata_eh_followup_srst_needed(struct ata_link *link, int rc) { if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) return 0; if (rc == -EAGAIN) return 1; if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) return 1; return 0; } int ata_eh_reset(struct ata_link *link, int classify, ata_prereset_fn_t prereset, ata_reset_fn_t softreset, ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) { struct ata_port *ap = link->ap; struct ata_link *slave = ap->slave_link; struct ata_eh_context *ehc = &link->eh_context; struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; unsigned int *classes = ehc->classes; unsigned int lflags = link->flags; int verbose = !(ehc->i.flags & ATA_EHI_QUIET); int max_tries = 0, try = 0; struct ata_link *failed_link; struct ata_device *dev; unsigned long deadline, now; ata_reset_fn_t reset; unsigned long flags; u32 sstatus; int nr_unknown, rc; /* * Prepare to reset */ while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) max_tries++; if (link->flags & ATA_LFLAG_RST_ONCE) max_tries = 1; if (link->flags & ATA_LFLAG_NO_HRST) hardreset = NULL; if (link->flags & ATA_LFLAG_NO_SRST) softreset = NULL; /* make sure each reset attempt is at least COOL_DOWN apart */ if (ehc->i.flags & ATA_EHI_DID_RESET) { now = jiffies; WARN_ON(time_after(ehc->last_reset, now)); deadline = ata_deadline(ehc->last_reset, ATA_EH_RESET_COOL_DOWN); if (time_before(now, deadline)) schedule_timeout_uninterruptible(deadline - now); } spin_lock_irqsave(ap->lock, flags); ap->pflags |= ATA_PFLAG_RESETTING; spin_unlock_irqrestore(ap->lock, flags); ata_eh_about_to_do(link, NULL, ATA_EH_RESET); ata_for_each_dev(dev, link, ALL) { /* If we issue an SRST then an ATA drive (not ATAPI) * may change configuration and be in PIO0 timing. If * we do a hard reset (or are coming from power on) * this is true for ATA or ATAPI. Until we've set a * suitable controller mode we should not touch the * bus as we may be talking too fast. */ dev->pio_mode = XFER_PIO_0; dev->dma_mode = 0xff; /* If the controller has a pio mode setup function * then use it to set the chipset to rights. Don't * touch the DMA setup as that will be dealt with when * configuring devices. */ if (ap->ops->set_piomode) ap->ops->set_piomode(ap, dev); } /* prefer hardreset */ reset = NULL; ehc->i.action &= ~ATA_EH_RESET; if (hardreset) { reset = hardreset; ehc->i.action |= ATA_EH_HARDRESET; } else if (softreset) { reset = softreset; ehc->i.action |= ATA_EH_SOFTRESET; } if (prereset) { unsigned long deadline = ata_deadline(jiffies, ATA_EH_PRERESET_TIMEOUT); if (slave) { sehc->i.action &= ~ATA_EH_RESET; sehc->i.action |= ehc->i.action; } rc = prereset(link, deadline); /* If present, do prereset on slave link too. Reset * is skipped iff both master and slave links report * -ENOENT or clear ATA_EH_RESET. */ if (slave && (rc == 0 || rc == -ENOENT)) { int tmp; tmp = prereset(slave, deadline); if (tmp != -ENOENT) rc = tmp; ehc->i.action |= sehc->i.action; } if (rc) { if (rc == -ENOENT) { ata_link_dbg(link, "port disabled--ignoring\n"); ehc->i.action &= ~ATA_EH_RESET; ata_for_each_dev(dev, link, ALL) classes[dev->devno] = ATA_DEV_NONE; rc = 0; } else ata_link_err(link, "prereset failed (errno=%d)\n", rc); goto out; } /* prereset() might have cleared ATA_EH_RESET. If so, * bang classes, thaw and return. */ if (reset && !(ehc->i.action & ATA_EH_RESET)) { ata_for_each_dev(dev, link, ALL) classes[dev->devno] = ATA_DEV_NONE; if ((ap->pflags & ATA_PFLAG_FROZEN) && ata_is_host_link(link)) ata_eh_thaw_port(ap); rc = 0; goto out; } } retry: /* * Perform reset */ if (ata_is_host_link(link)) ata_eh_freeze_port(ap); deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); if (reset) { if (verbose) ata_link_info(link, "%s resetting link\n", reset == softreset ? "soft" : "hard"); /* mark that this EH session started with reset */ ehc->last_reset = jiffies; if (reset == hardreset) ehc->i.flags |= ATA_EHI_DID_HARDRESET; else ehc->i.flags |= ATA_EHI_DID_SOFTRESET; rc = ata_do_reset(link, reset, classes, deadline, true); if (rc && rc != -EAGAIN) { failed_link = link; goto fail; } /* hardreset slave link if existent */ if (slave && reset == hardreset) { int tmp; if (verbose) ata_link_info(slave, "hard resetting link\n"); ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); tmp = ata_do_reset(slave, reset, classes, deadline, false); switch (tmp) { case -EAGAIN: rc = -EAGAIN; case 0: break; default: failed_link = slave; rc = tmp; goto fail; } } /* perform follow-up SRST if necessary */ if (reset == hardreset && ata_eh_followup_srst_needed(link, rc)) { reset = softreset; if (!reset) { ata_link_err(link, "follow-up softreset required but no softreset available\n"); failed_link = link; rc = -EINVAL; goto fail; } ata_eh_about_to_do(link, NULL, ATA_EH_RESET); rc = ata_do_reset(link, reset, classes, deadline, true); if (rc) { failed_link = link; goto fail; } } } else { if (verbose) ata_link_info(link, "no reset method available, skipping reset\n"); if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) lflags |= ATA_LFLAG_ASSUME_ATA; } /* * Post-reset processing */ ata_for_each_dev(dev, link, ALL) { /* After the reset, the device state is PIO 0 and the * controller state is undefined. Reset also wakes up * drives from sleeping mode. */ dev->pio_mode = XFER_PIO_0; dev->flags &= ~ATA_DFLAG_SLEEPING; if (ata_phys_link_offline(ata_dev_phys_link(dev))) continue; /* apply class override */ if (lflags & ATA_LFLAG_ASSUME_ATA) classes[dev->devno] = ATA_DEV_ATA; else if (lflags & ATA_LFLAG_ASSUME_SEMB) classes[dev->devno] = ATA_DEV_SEMB_UNSUP; } /* record current link speed */ if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) link->sata_spd = (sstatus >> 4) & 0xf; if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) slave->sata_spd = (sstatus >> 4) & 0xf; /* thaw the port */ if (ata_is_host_link(link)) ata_eh_thaw_port(ap); /* postreset() should clear hardware SError. Although SError * is cleared during link resume, clearing SError here is * necessary as some PHYs raise hotplug events after SRST. * This introduces race condition where hotplug occurs between * reset and here. This race is mediated by cross checking * link onlineness and classification result later. */ if (postreset) { postreset(link, classes); if (slave) postreset(slave, classes); } /* * Some controllers can't be frozen very well and may set spurious * error conditions during reset. Clear accumulated error * information and re-thaw the port if frozen. As reset is the * final recovery action and we cross check link onlineness against * device classification later, no hotplug event is lost by this. */ spin_lock_irqsave(link->ap->lock, flags); memset(&link->eh_info, 0, sizeof(link->eh_info)); if (slave) memset(&slave->eh_info, 0, sizeof(link->eh_info)); ap->pflags &= ~ATA_PFLAG_EH_PENDING; spin_unlock_irqrestore(link->ap->lock, flags); if (ap->pflags & ATA_PFLAG_FROZEN) ata_eh_thaw_port(ap); /* * Make sure onlineness and classification result correspond. * Hotplug could have happened during reset and some * controllers fail to wait while a drive is spinning up after * being hotplugged causing misdetection. By cross checking * link on/offlineness and classification result, those * conditions can be reliably detected and retried. */ nr_unknown = 0; ata_for_each_dev(dev, link, ALL) { if (ata_phys_link_online(ata_dev_phys_link(dev))) { if (classes[dev->devno] == ATA_DEV_UNKNOWN) { ata_dev_dbg(dev, "link online but device misclassified\n"); classes[dev->devno] = ATA_DEV_NONE; nr_unknown++; } } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) { if (ata_class_enabled(classes[dev->devno])) ata_dev_dbg(dev, "link offline, clearing class %d to NONE\n", classes[dev->devno]); classes[dev->devno] = ATA_DEV_NONE; } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { ata_dev_dbg(dev, "link status unknown, clearing UNKNOWN to NONE\n"); classes[dev->devno] = ATA_DEV_NONE; } } if (classify && nr_unknown) { if (try < max_tries) { ata_link_warn(link, "link online but %d devices misclassified, retrying\n", nr_unknown); failed_link = link; rc = -EAGAIN; goto fail; } ata_link_warn(link, "link online but %d devices misclassified, " "device detection might fail\n", nr_unknown); } /* reset successful, schedule revalidation */ ata_eh_done(link, NULL, ATA_EH_RESET); if (slave) ata_eh_done(slave, NULL, ATA_EH_RESET); ehc->last_reset = jiffies; /* update to completion time */ ehc->i.action |= ATA_EH_REVALIDATE; link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ rc = 0; out: /* clear hotplug flag */ ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; if (slave) sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; spin_lock_irqsave(ap->lock, flags); ap->pflags &= ~ATA_PFLAG_RESETTING; spin_unlock_irqrestore(ap->lock, flags); return rc; fail: /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ if (!ata_is_host_link(link) && sata_scr_read(link, SCR_STATUS, &sstatus)) rc = -ERESTART; if (try >= max_tries) { /* * Thaw host port even if reset failed, so that the port * can be retried on the next phy event. This risks * repeated EH runs but seems to be a better tradeoff than * shutting down a port after a botched hotplug attempt. */ if (ata_is_host_link(link)) ata_eh_thaw_port(ap); goto out; } now = jiffies; if (time_before(now, deadline)) { unsigned long delta = deadline - now; ata_link_warn(failed_link, "reset failed (errno=%d), retrying in %u secs\n", rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); ata_eh_release(ap); while (delta) delta = schedule_timeout_uninterruptible(delta); ata_eh_acquire(ap); } /* * While disks spinup behind PMP, some controllers fail sending SRST. * They need to be reset - as well as the PMP - before retrying. */ if (rc == -ERESTART) { if (ata_is_host_link(link)) ata_eh_thaw_port(ap); goto out; } if (try == max_tries - 1) { sata_down_spd_limit(link, 0); if (slave) sata_down_spd_limit(slave, 0); } else if (rc == -EPIPE) sata_down_spd_limit(failed_link, 0); if (hardreset) reset = hardreset; goto retry; } static inline void ata_eh_pull_park_action(struct ata_port *ap) { struct ata_link *link; struct ata_device *dev; unsigned long flags; /* * This function can be thought of as an extended version of * ata_eh_about_to_do() specially crafted to accommodate the * requirements of ATA_EH_PARK handling. Since the EH thread * does not leave the do {} while () loop in ata_eh_recover as * long as the timeout for a park request to *one* device on * the port has not expired, and since we still want to pick * up park requests to other devices on the same port or * timeout updates for the same device, we have to pull * ATA_EH_PARK actions from eh_info into eh_context.i * ourselves at the beginning of each pass over the loop. * * Additionally, all write accesses to &ap->park_req_pending * through reinit_completion() (see below) or complete_all() * (see ata_scsi_park_store()) are protected by the host lock. * As a result we have that park_req_pending.done is zero on * exit from this function, i.e. when ATA_EH_PARK actions for * *all* devices on port ap have been pulled into the * respective eh_context structs. If, and only if, * park_req_pending.done is non-zero by the time we reach * wait_for_completion_timeout(), another ATA_EH_PARK action * has been scheduled for at least one of the devices on port * ap and we have to cycle over the do {} while () loop in * ata_eh_recover() again. */ spin_lock_irqsave(ap->lock, flags); reinit_completion(&ap->park_req_pending); ata_for_each_link(link, ap, EDGE) { ata_for_each_dev(dev, link, ALL) { struct ata_eh_info *ehi = &link->eh_info; link->eh_context.i.dev_action[dev->devno] |= ehi->dev_action[dev->devno] & ATA_EH_PARK; ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); } } spin_unlock_irqrestore(ap->lock, flags); } static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) { struct ata_eh_context *ehc = &dev->link->eh_context; struct ata_taskfile tf; unsigned int err_mask; ata_tf_init(dev, &tf); if (park) { ehc->unloaded_mask |= 1 << dev->devno; tf.command = ATA_CMD_IDLEIMMEDIATE; tf.feature = 0x44; tf.lbal = 0x4c; tf.lbam = 0x4e; tf.lbah = 0x55; } else { ehc->unloaded_mask &= ~(1 << dev->devno); tf.command = ATA_CMD_CHK_POWER; } tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; tf.protocol |= ATA_PROT_NODATA; err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); if (park && (err_mask || tf.lbal != 0xc4)) { ata_dev_err(dev, "head unload failed!\n"); ehc->unloaded_mask &= ~(1 << dev->devno); } } static int ata_eh_revalidate_and_attach(struct ata_link *link, struct ata_device **r_failed_dev) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev; unsigned int new_mask = 0; unsigned long flags; int rc = 0; DPRINTK("ENTER\n"); /* For PATA drive side cable detection to work, IDENTIFY must * be done backwards such that PDIAG- is released by the slave * device before the master device is identified. */ ata_for_each_dev(dev, link, ALL_REVERSE) { unsigned int action = ata_eh_dev_action(dev); unsigned int readid_flags = 0; if (ehc->i.flags & ATA_EHI_DID_RESET) readid_flags |= ATA_READID_POSTRESET; if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { WARN_ON(dev->class == ATA_DEV_PMP); if (ata_phys_link_offline(ata_dev_phys_link(dev))) { rc = -EIO; goto err; } ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], readid_flags); if (rc) goto err; ata_eh_done(link, dev, ATA_EH_REVALIDATE); /* Configuration may have changed, reconfigure * transfer mode. */ ehc->i.flags |= ATA_EHI_SETMODE; /* schedule the scsi_rescan_device() here */ schedule_work(&(ap->scsi_rescan_task)); } else if (dev->class == ATA_DEV_UNKNOWN && ehc->tries[dev->devno] && ata_class_enabled(ehc->classes[dev->devno])) { /* Temporarily set dev->class, it will be * permanently set once all configurations are * complete. This is necessary because new * device configuration is done in two * separate loops. */ dev->class = ehc->classes[dev->devno]; if (dev->class == ATA_DEV_PMP) rc = sata_pmp_attach(dev); else rc = ata_dev_read_id(dev, &dev->class, readid_flags, dev->id); /* read_id might have changed class, store and reset */ ehc->classes[dev->devno] = dev->class; dev->class = ATA_DEV_UNKNOWN; switch (rc) { case 0: /* clear error info accumulated during probe */ ata_ering_clear(&dev->ering); new_mask |= 1 << dev->devno; break; case -ENOENT: /* IDENTIFY was issued to non-existent * device. No need to reset. Just * thaw and ignore the device. */ ata_eh_thaw_port(ap); break; default: goto err; } } } /* PDIAG- should have been released, ask cable type if post-reset */ if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { if (ap->ops->cable_detect) ap->cbl = ap->ops->cable_detect(ap); ata_force_cbl(ap); } /* Configure new devices forward such that user doesn't see * device detection messages backwards. */ ata_for_each_dev(dev, link, ALL) { if (!(new_mask & (1 << dev->devno))) continue; dev->class = ehc->classes[dev->devno]; if (dev->class == ATA_DEV_PMP) continue; ehc->i.flags |= ATA_EHI_PRINTINFO; rc = ata_dev_configure(dev); ehc->i.flags &= ~ATA_EHI_PRINTINFO; if (rc) { dev->class = ATA_DEV_UNKNOWN; goto err; } spin_lock_irqsave(ap->lock, flags); ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; spin_unlock_irqrestore(ap->lock, flags); /* new device discovered, configure xfermode */ ehc->i.flags |= ATA_EHI_SETMODE; } return 0; err: *r_failed_dev = dev; DPRINTK("EXIT rc=%d\n", rc); return rc; } /** * ata_set_mode - Program timings and issue SET FEATURES - XFER * @link: link on which timings will be programmed * @r_failed_dev: out parameter for failed device * * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If * ata_set_mode() fails, pointer to the failing device is * returned in @r_failed_dev. * * LOCKING: * PCI/etc. bus probe sem. * * RETURNS: * 0 on success, negative errno otherwise */ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) { struct ata_port *ap = link->ap; struct ata_device *dev; int rc; /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ ata_for_each_dev(dev, link, ENABLED) { if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { struct ata_ering_entry *ent; ent = ata_ering_top(&dev->ering); if (ent) ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; } } /* has private set_mode? */ if (ap->ops->set_mode) rc = ap->ops->set_mode(link, r_failed_dev); else rc = ata_do_set_mode(link, r_failed_dev); /* if transfer mode has changed, set DUBIOUS_XFER on device */ ata_for_each_dev(dev, link, ENABLED) { struct ata_eh_context *ehc = &link->eh_context; u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); if (dev->xfer_mode != saved_xfer_mode || ata_ncq_enabled(dev) != saved_ncq) dev->flags |= ATA_DFLAG_DUBIOUS_XFER; } return rc; } /** * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset * @dev: ATAPI device to clear UA for * * Resets and other operations can make an ATAPI device raise * UNIT ATTENTION which causes the next operation to fail. This * function clears UA. * * LOCKING: * EH context (may sleep). * * RETURNS: * 0 on success, -errno on failure. */ static int atapi_eh_clear_ua(struct ata_device *dev) { int i; for (i = 0; i < ATA_EH_UA_TRIES; i++) { u8 *sense_buffer = dev->link->ap->sector_buf; u8 sense_key = 0; unsigned int err_mask; err_mask = atapi_eh_tur(dev, &sense_key); if (err_mask != 0 && err_mask != AC_ERR_DEV) { ata_dev_warn(dev, "TEST_UNIT_READY failed (err_mask=0x%x)\n", err_mask); return -EIO; } if (!err_mask || sense_key != UNIT_ATTENTION) return 0; err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); if (err_mask) { ata_dev_warn(dev, "failed to clear " "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); return -EIO; } } ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES); return 0; } /** * ata_eh_maybe_retry_flush - Retry FLUSH if necessary * @dev: ATA device which may need FLUSH retry * * If @dev failed FLUSH, it needs to be reported upper layer * immediately as it means that @dev failed to remap and already * lost at least a sector and further FLUSH retrials won't make * any difference to the lost sector. However, if FLUSH failed * for other reasons, for example transmission error, FLUSH needs * to be retried. * * This function determines whether FLUSH failure retry is * necessary and performs it if so. * * RETURNS: * 0 if EH can continue, -errno if EH needs to be repeated. */ static int ata_eh_maybe_retry_flush(struct ata_device *dev) { struct ata_link *link = dev->link; struct ata_port *ap = link->ap; struct ata_queued_cmd *qc; struct ata_taskfile tf; unsigned int err_mask; int rc = 0; /* did flush fail for this device? */ if (!ata_tag_valid(link->active_tag)) return 0; qc = __ata_qc_from_tag(ap, link->active_tag); if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && qc->tf.command != ATA_CMD_FLUSH)) return 0; /* if the device failed it, it should be reported to upper layers */ if (qc->err_mask & AC_ERR_DEV) return 0; /* flush failed for some other reason, give it another shot */ ata_tf_init(dev, &tf); tf.command = qc->tf.command; tf.flags |= ATA_TFLAG_DEVICE; tf.protocol = ATA_PROT_NODATA; ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n", tf.command, qc->err_mask); err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); if (!err_mask) { /* * FLUSH is complete but there's no way to * successfully complete a failed command from EH. * Making sure retry is allowed at least once and * retrying it should do the trick - whatever was in * the cache is already on the platter and this won't * cause infinite loop. */ qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); } else { ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n", err_mask); rc = -EIO; /* if device failed it, report it to upper layers */ if (err_mask & AC_ERR_DEV) { qc->err_mask |= AC_ERR_DEV; qc->result_tf = tf; if (!(ap->pflags & ATA_PFLAG_FROZEN)) rc = 0; } } return rc; } /** * ata_eh_set_lpm - configure SATA interface power management * @link: link to configure power management * @policy: the link power management policy * @r_failed_dev: out parameter for failed device * * Enable SATA Interface power management. This will enable * Device Interface Power Management (DIPM) for min_power * policy, and then call driver specific callbacks for * enabling Host Initiated Power management. * * LOCKING: * EH context. * * RETURNS: * 0 on success, -errno on failure. */ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, struct ata_device **r_failed_dev) { struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; enum ata_lpm_policy old_policy = link->lpm_policy; bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; unsigned int err_mask; int rc; /* if the link or host doesn't do LPM, noop */ if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) return 0; /* * DIPM is enabled only for MIN_POWER as some devices * misbehave when the host NACKs transition to SLUMBER. Order * device and link configurations such that the host always * allows DIPM requests. */ ata_for_each_dev(dev, link, ENABLED) { bool hipm = ata_id_has_hipm(dev->id); bool dipm = ata_id_has_dipm(dev->id) && !no_dipm; /* find the first enabled and LPM enabled devices */ if (!link_dev) link_dev = dev; if (!lpm_dev && (hipm || dipm)) lpm_dev = dev; hints &= ~ATA_LPM_EMPTY; if (!hipm) hints &= ~ATA_LPM_HIPM; /* disable DIPM before changing link config */ if (policy != ATA_LPM_MIN_POWER && dipm) { err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_DISABLE, SATA_DIPM); if (err_mask && err_mask != AC_ERR_DEV) { ata_dev_warn(dev, "failed to disable DIPM, Emask 0x%x\n", err_mask); rc = -EIO; goto fail; } } } if (ap) { rc = ap->ops->set_lpm(link, policy, hints); if (!rc && ap->slave_link) rc = ap->ops->set_lpm(ap->slave_link, policy, hints); } else rc = sata_pmp_set_lpm(link, policy, hints); /* * Attribute link config failure to the first (LPM) enabled * device on the link. */ if (rc) { if (rc == -EOPNOTSUPP) { link->flags |= ATA_LFLAG_NO_LPM; return 0; } dev = lpm_dev ? lpm_dev : link_dev; goto fail; } /* * Low level driver acked the transition. Issue DIPM command * with the new policy set. */ link->lpm_policy = policy; if (ap && ap->slave_link) ap->slave_link->lpm_policy = policy; /* host config updated, enable DIPM if transitioning to MIN_POWER */ ata_for_each_dev(dev, link, ENABLED) { if (policy == ATA_LPM_MIN_POWER && !no_dipm && ata_id_has_dipm(dev->id)) { err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, SATA_DIPM); if (err_mask && err_mask != AC_ERR_DEV) { ata_dev_warn(dev, "failed to enable DIPM, Emask 0x%x\n", err_mask); rc = -EIO; goto fail; } } } link->last_lpm_change = jiffies; link->flags |= ATA_LFLAG_CHANGED; return 0; fail: /* restore the old policy */ link->lpm_policy = old_policy; if (ap && ap->slave_link) ap->slave_link->lpm_policy = old_policy; /* if no device or only one more chance is left, disable LPM */ if (!dev || ehc->tries[dev->devno] <= 2) { ata_link_warn(link, "disabling LPM on the link\n"); link->flags |= ATA_LFLAG_NO_LPM; } if (r_failed_dev) *r_failed_dev = dev; return rc; } int ata_link_nr_enabled(struct ata_link *link) { struct ata_device *dev; int cnt = 0; ata_for_each_dev(dev, link, ENABLED) cnt++; return cnt; } static int ata_link_nr_vacant(struct ata_link *link) { struct ata_device *dev; int cnt = 0; ata_for_each_dev(dev, link, ALL) if (dev->class == ATA_DEV_UNKNOWN) cnt++; return cnt; } static int ata_eh_skip_recovery(struct ata_link *link) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev; /* skip disabled links */ if (link->flags & ATA_LFLAG_DISABLED) return 1; /* skip if explicitly requested */ if (ehc->i.flags & ATA_EHI_NO_RECOVERY) return 1; /* thaw frozen port and recover failed devices */ if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) return 0; /* reset at least once if reset is requested */ if ((ehc->i.action & ATA_EH_RESET) && !(ehc->i.flags & ATA_EHI_DID_RESET)) return 0; /* skip if class codes for all vacant slots are ATA_DEV_NONE */ ata_for_each_dev(dev, link, ALL) { if (dev->class == ATA_DEV_UNKNOWN && ehc->classes[dev->devno] != ATA_DEV_NONE) return 0; } return 1; } static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) { u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); u64 now = get_jiffies_64(); int *trials = void_arg; if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < now - min(now, interval))) return -1; (*trials)++; return 0; } static int ata_eh_schedule_probe(struct ata_device *dev) { struct ata_eh_context *ehc = &dev->link->eh_context; struct ata_link *link = ata_dev_phys_link(dev); int trials = 0; if (!(ehc->i.probe_mask & (1 << dev->devno)) || (ehc->did_probe_mask & (1 << dev->devno))) return 0; ata_eh_detach_dev(dev); ata_dev_init(dev); ehc->did_probe_mask |= (1 << dev->devno); ehc->i.action |= ATA_EH_RESET; ehc->saved_xfer_mode[dev->devno] = 0; ehc->saved_ncq_enabled &= ~(1 << dev->devno); /* the link maybe in a deep sleep, wake it up */ if (link->lpm_policy > ATA_LPM_MAX_POWER) { if (ata_is_host_link(link)) link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, ATA_LPM_EMPTY); else sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER, ATA_LPM_EMPTY); } /* Record and count probe trials on the ering. The specific * error mask used is irrelevant. Because a successful device * detection clears the ering, this count accumulates only if * there are consecutive failed probes. * * If the count is equal to or higher than ATA_EH_PROBE_TRIALS * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is * forced to 1.5Gbps. * * This is to work around cases where failed link speed * negotiation results in device misdetection leading to * infinite DEVXCHG or PHRDY CHG events. */ ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); if (trials > ATA_EH_PROBE_TRIALS) sata_down_spd_limit(link, 1); return 1; } static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) { struct ata_eh_context *ehc = &dev->link->eh_context; /* -EAGAIN from EH routine indicates retry without prejudice. * The requester is responsible for ensuring forward progress. */ if (err != -EAGAIN) ehc->tries[dev->devno]--; switch (err) { case -ENODEV: /* device missing or wrong IDENTIFY data, schedule probing */ ehc->i.probe_mask |= (1 << dev->devno); case -EINVAL: /* give it just one more chance */ ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); case -EIO: if (ehc->tries[dev->devno] == 1) { /* This is the last chance, better to slow * down than lose it. */ sata_down_spd_limit(ata_dev_phys_link(dev), 0); if (dev->pio_mode > XFER_PIO_0) ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); } } if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { /* disable device if it has used up all its chances */ ata_dev_disable(dev); /* detach if offline */ if (ata_phys_link_offline(ata_dev_phys_link(dev))) ata_eh_detach_dev(dev); /* schedule probe if necessary */ if (ata_eh_schedule_probe(dev)) { ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; memset(ehc->cmd_timeout_idx[dev->devno], 0, sizeof(ehc->cmd_timeout_idx[dev->devno])); } return 1; } else { ehc->i.action |= ATA_EH_RESET; return 0; } } /** * ata_eh_recover - recover host port after error * @ap: host port to recover * @prereset: prereset method (can be NULL) * @softreset: softreset method (can be NULL) * @hardreset: hardreset method (can be NULL) * @postreset: postreset method (can be NULL) * @r_failed_link: out parameter for failed link * * This is the alpha and omega, eum and yang, heart and soul of * libata exception handling. On entry, actions required to * recover each link and hotplug requests are recorded in the * link's eh_context. This function executes all the operations * with appropriate retrials and fallbacks to resurrect failed * devices, detach goners and greet newcomers. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * 0 on success, -errno on failure. */ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, ata_reset_fn_t softreset, ata_reset_fn_t hardreset, ata_postreset_fn_t postreset, struct ata_link **r_failed_link) { struct ata_link *link; struct ata_device *dev; int rc, nr_fails; unsigned long flags, deadline; DPRINTK("ENTER\n"); /* prep for recovery */ ata_for_each_link(link, ap, EDGE) { struct ata_eh_context *ehc = &link->eh_context; /* re-enable link? */ if (ehc->i.action & ATA_EH_ENABLE_LINK) { ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); spin_lock_irqsave(ap->lock, flags); link->flags &= ~ATA_LFLAG_DISABLED; spin_unlock_irqrestore(ap->lock, flags); ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); } ata_for_each_dev(dev, link, ALL) { if (link->flags & ATA_LFLAG_NO_RETRY) ehc->tries[dev->devno] = 1; else ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; /* collect port action mask recorded in dev actions */ ehc->i.action |= ehc->i.dev_action[dev->devno] & ~ATA_EH_PERDEV_MASK; ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; /* process hotplug request */ if (dev->flags & ATA_DFLAG_DETACH) ata_eh_detach_dev(dev); /* schedule probe if necessary */ if (!ata_dev_enabled(dev)) ata_eh_schedule_probe(dev); } } retry: rc = 0; /* if UNLOADING, finish immediately */ if (ap->pflags & ATA_PFLAG_UNLOADING) goto out; /* prep for EH */ ata_for_each_link(link, ap, EDGE) { struct ata_eh_context *ehc = &link->eh_context; /* skip EH if possible. */ if (ata_eh_skip_recovery(link)) ehc->i.action = 0; ata_for_each_dev(dev, link, ALL) ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; } /* reset */ ata_for_each_link(link, ap, EDGE) { struct ata_eh_context *ehc = &link->eh_context; if (!(ehc->i.action & ATA_EH_RESET)) continue; rc = ata_eh_reset(link, ata_link_nr_vacant(link), prereset, softreset, hardreset, postreset); if (rc) { ata_link_err(link, "reset failed, giving up\n"); goto out; } } do { unsigned long now; /* * clears ATA_EH_PARK in eh_info and resets * ap->park_req_pending */ ata_eh_pull_park_action(ap); deadline = jiffies; ata_for_each_link(link, ap, EDGE) { ata_for_each_dev(dev, link, ALL) { struct ata_eh_context *ehc = &link->eh_context; unsigned long tmp; if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) continue; if (!(ehc->i.dev_action[dev->devno] & ATA_EH_PARK)) continue; tmp = dev->unpark_deadline; if (time_before(deadline, tmp)) deadline = tmp; else if (time_before_eq(tmp, jiffies)) continue; if (ehc->unloaded_mask & (1 << dev->devno)) continue; ata_eh_park_issue_cmd(dev, 1); } } now = jiffies; if (time_before_eq(deadline, now)) break; ata_eh_release(ap); deadline = wait_for_completion_timeout(&ap->park_req_pending, deadline - now); ata_eh_acquire(ap); } while (deadline); ata_for_each_link(link, ap, EDGE) { ata_for_each_dev(dev, link, ALL) { if (!(link->eh_context.unloaded_mask & (1 << dev->devno))) continue; ata_eh_park_issue_cmd(dev, 0); ata_eh_done(link, dev, ATA_EH_PARK); } } /* the rest */ nr_fails = 0; ata_for_each_link(link, ap, PMP_FIRST) { struct ata_eh_context *ehc = &link->eh_context; if (sata_pmp_attached(ap) && ata_is_host_link(link)) goto config_lpm; /* revalidate existing devices and attach new ones */ rc = ata_eh_revalidate_and_attach(link, &dev); if (rc) goto rest_fail; /* if PMP got attached, return, pmp EH will take care of it */ if (link->device->class == ATA_DEV_PMP) { ehc->i.action = 0; return 0; } /* configure transfer mode if necessary */ if (ehc->i.flags & ATA_EHI_SETMODE) { rc = ata_set_mode(link, &dev); if (rc) goto rest_fail; ehc->i.flags &= ~ATA_EHI_SETMODE; } /* If reset has been issued, clear UA to avoid * disrupting the current users of the device. */ if (ehc->i.flags & ATA_EHI_DID_RESET) { ata_for_each_dev(dev, link, ALL) { if (dev->class != ATA_DEV_ATAPI) continue; rc = atapi_eh_clear_ua(dev); if (rc) goto rest_fail; if (zpodd_dev_enabled(dev)) zpodd_post_poweron(dev); } } /* retry flush if necessary */ ata_for_each_dev(dev, link, ALL) { if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) continue; rc = ata_eh_maybe_retry_flush(dev); if (rc) goto rest_fail; } config_lpm: /* configure link power saving */ if (link->lpm_policy != ap->target_lpm_policy) { rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); if (rc) goto rest_fail; } /* this link is okay now */ ehc->i.flags = 0; continue; rest_fail: nr_fails++; if (dev) ata_eh_handle_dev_fail(dev, rc); if (ap->pflags & ATA_PFLAG_FROZEN) { /* PMP reset requires working host port. * Can't retry if it's frozen. */ if (sata_pmp_attached(ap)) goto out; break; } } if (nr_fails) goto retry; out: if (rc && r_failed_link) *r_failed_link = link; DPRINTK("EXIT, rc=%d\n", rc); return rc; } /** * ata_eh_finish - finish up EH * @ap: host port to finish EH for * * Recovery is complete. Clean up EH states and retry or finish * failed qcs. * * LOCKING: * None. */ void ata_eh_finish(struct ata_port *ap) { int tag; /* retry or finish qcs */ for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); if (!(qc->flags & ATA_QCFLAG_FAILED)) continue; if (qc->err_mask) { /* FIXME: Once EH migration is complete, * generate sense data in this function, * considering both err_mask and tf. */ if (qc->flags & ATA_QCFLAG_RETRY) ata_eh_qc_retry(qc); else ata_eh_qc_complete(qc); } else { if (qc->flags & ATA_QCFLAG_SENSE_VALID) { ata_eh_qc_complete(qc); } else { /* feed zero TF to sense generation */ memset(&qc->result_tf, 0, sizeof(qc->result_tf)); ata_eh_qc_retry(qc); } } } /* make sure nr_active_links is zero after EH */ WARN_ON(ap->nr_active_links); ap->nr_active_links = 0; } /** * ata_do_eh - do standard error handling * @ap: host port to handle error for * * @prereset: prereset method (can be NULL) * @softreset: softreset method (can be NULL) * @hardreset: hardreset method (can be NULL) * @postreset: postreset method (can be NULL) * * Perform standard error handling sequence. * * LOCKING: * Kernel thread context (may sleep). */ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, ata_reset_fn_t softreset, ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) { struct ata_device *dev; int rc; ata_eh_autopsy(ap); ata_eh_report(ap); rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, NULL); if (rc) { ata_for_each_dev(dev, &ap->link, ALL) ata_dev_disable(dev); } ata_eh_finish(ap); } /** * ata_std_error_handler - standard error handler * @ap: host port to handle error for * * Standard error handler * * LOCKING: * Kernel thread context (may sleep). */ void ata_std_error_handler(struct ata_port *ap) { struct ata_port_operations *ops = ap->ops; ata_reset_fn_t hardreset = ops->hardreset; /* ignore built-in hardreset if SCR access is not available */ if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) hardreset = NULL; ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); } #ifdef CONFIG_PM /** * ata_eh_handle_port_suspend - perform port suspend operation * @ap: port to suspend * * Suspend @ap. * * LOCKING: * Kernel thread context (may sleep). */ static void ata_eh_handle_port_suspend(struct ata_port *ap) { unsigned long flags; int rc = 0; struct ata_device *dev; /* are we suspending? */ spin_lock_irqsave(ap->lock, flags); if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || ap->pm_mesg.event & PM_EVENT_RESUME) { spin_unlock_irqrestore(ap->lock, flags); return; } spin_unlock_irqrestore(ap->lock, flags); WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); /* * If we have a ZPODD attached, check its zero * power ready status before the port is frozen. * Only needed for runtime suspend. */ if (PMSG_IS_AUTO(ap->pm_mesg)) { ata_for_each_dev(dev, &ap->link, ENABLED) { if (zpodd_dev_enabled(dev)) zpodd_on_suspend(dev); } } /* tell ACPI we're suspending */ rc = ata_acpi_on_suspend(ap); if (rc) goto out; /* suspend */ ata_eh_freeze_port(ap); if (ap->ops->port_suspend) rc = ap->ops->port_suspend(ap, ap->pm_mesg); ata_acpi_set_state(ap, ap->pm_mesg); out: /* update the flags */ spin_lock_irqsave(ap->lock, flags); ap->pflags &= ~ATA_PFLAG_PM_PENDING; if (rc == 0) ap->pflags |= ATA_PFLAG_SUSPENDED; else if (ap->pflags & ATA_PFLAG_FROZEN) ata_port_schedule_eh(ap); spin_unlock_irqrestore(ap->lock, flags); return; } /** * ata_eh_handle_port_resume - perform port resume operation * @ap: port to resume * * Resume @ap. * * LOCKING: * Kernel thread context (may sleep). */ static void ata_eh_handle_port_resume(struct ata_port *ap) { struct ata_link *link; struct ata_device *dev; unsigned long flags; int rc = 0; /* are we resuming? */ spin_lock_irqsave(ap->lock, flags); if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || !(ap->pm_mesg.event & PM_EVENT_RESUME)) { spin_unlock_irqrestore(ap->lock, flags); return; } spin_unlock_irqrestore(ap->lock, flags); WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); /* * Error timestamps are in jiffies which doesn't run while * suspended and PHY events during resume isn't too uncommon. * When the two are combined, it can lead to unnecessary speed * downs if the machine is suspended and resumed repeatedly. * Clear error history. */ ata_for_each_link(link, ap, HOST_FIRST) ata_for_each_dev(dev, link, ALL) ata_ering_clear(&dev->ering); ata_acpi_set_state(ap, ap->pm_mesg); if (ap->ops->port_resume) rc = ap->ops->port_resume(ap); /* tell ACPI that we're resuming */ ata_acpi_on_resume(ap); /* update the flags */ spin_lock_irqsave(ap->lock, flags); ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); spin_unlock_irqrestore(ap->lock, flags); } #endif /* CONFIG_PM */
gpl-2.0
allanm84/linux-fslc
drivers/block/drbd/drbd_bitmap.c
900
47620
/* drbd_bitmap.c This file is part of DRBD by Philipp Reisner and Lars Ellenberg. Copyright (C) 2004-2008, LINBIT Information Technologies GmbH. Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>. Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. drbd is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. drbd is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with drbd; see the file COPYING. If not, write to the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/bitops.h> #include <linux/vmalloc.h> #include <linux/string.h> #include <linux/drbd.h> #include <linux/slab.h> #include <asm/kmap_types.h> #include "drbd_int.h" /* OPAQUE outside this file! * interface defined in drbd_int.h * convention: * function name drbd_bm_... => used elsewhere, "public". * function name bm_... => internal to implementation, "private". */ /* * LIMITATIONS: * We want to support >= peta byte of backend storage, while for now still using * a granularity of one bit per 4KiB of storage. * 1 << 50 bytes backend storage (1 PiB) * 1 << (50 - 12) bits needed * 38 --> we need u64 to index and count bits * 1 << (38 - 3) bitmap bytes needed * 35 --> we still need u64 to index and count bytes * (that's 32 GiB of bitmap for 1 PiB storage) * 1 << (35 - 2) 32bit longs needed * 33 --> we'd even need u64 to index and count 32bit long words. * 1 << (35 - 3) 64bit longs needed * 32 --> we could get away with a 32bit unsigned int to index and count * 64bit long words, but I rather stay with unsigned long for now. * We probably should neither count nor point to bytes or long words * directly, but either by bitnumber, or by page index and offset. * 1 << (35 - 12) * 22 --> we need that much 4KiB pages of bitmap. * 1 << (22 + 3) --> on a 64bit arch, * we need 32 MiB to store the array of page pointers. * * Because I'm lazy, and because the resulting patch was too large, too ugly * and still incomplete, on 32bit we still "only" support 16 TiB (minus some), * (1 << 32) bits * 4k storage. * * bitmap storage and IO: * Bitmap is stored little endian on disk, and is kept little endian in * core memory. Currently we still hold the full bitmap in core as long * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage * seems excessive. * * We plan to reduce the amount of in-core bitmap pages by paging them in * and out against their on-disk location as necessary, but need to make * sure we don't cause too much meta data IO, and must not deadlock in * tight memory situations. This needs some more work. */ /* * NOTE * Access to the *bm_pages is protected by bm_lock. * It is safe to read the other members within the lock. * * drbd_bm_set_bits is called from bio_endio callbacks, * We may be called with irq already disabled, * so we need spin_lock_irqsave(). * And we need the kmap_atomic. */ struct drbd_bitmap { struct page **bm_pages; spinlock_t bm_lock; /* see LIMITATIONS: above */ unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */ unsigned long bm_bits; size_t bm_words; size_t bm_number_of_pages; sector_t bm_dev_capacity; struct mutex bm_change; /* serializes resize operations */ wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */ enum bm_flag bm_flags; /* debugging aid, in case we are still racy somewhere */ char *bm_why; struct task_struct *bm_task; }; #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__) static void __bm_print_lock_info(struct drbd_device *device, const char *func) { struct drbd_bitmap *b = device->bitmap; if (!__ratelimit(&drbd_ratelimit_state)) return; drbd_err(device, "FIXME %s[%d] in %s, bitmap locked for '%s' by %s[%d]\n", current->comm, task_pid_nr(current), func, b->bm_why ?: "?", b->bm_task->comm, task_pid_nr(b->bm_task)); } void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags) { struct drbd_bitmap *b = device->bitmap; int trylock_failed; if (!b) { drbd_err(device, "FIXME no bitmap in drbd_bm_lock!?\n"); return; } trylock_failed = !mutex_trylock(&b->bm_change); if (trylock_failed) { drbd_warn(device, "%s[%d] going to '%s' but bitmap already locked for '%s' by %s[%d]\n", current->comm, task_pid_nr(current), why, b->bm_why ?: "?", b->bm_task->comm, task_pid_nr(b->bm_task)); mutex_lock(&b->bm_change); } if (BM_LOCKED_MASK & b->bm_flags) drbd_err(device, "FIXME bitmap already locked in bm_lock\n"); b->bm_flags |= flags & BM_LOCKED_MASK; b->bm_why = why; b->bm_task = current; } void drbd_bm_unlock(struct drbd_device *device) { struct drbd_bitmap *b = device->bitmap; if (!b) { drbd_err(device, "FIXME no bitmap in drbd_bm_unlock!?\n"); return; } if (!(BM_LOCKED_MASK & device->bitmap->bm_flags)) drbd_err(device, "FIXME bitmap not locked in bm_unlock\n"); b->bm_flags &= ~BM_LOCKED_MASK; b->bm_why = NULL; b->bm_task = NULL; mutex_unlock(&b->bm_change); } /* we store some "meta" info about our pages in page->private */ /* at a granularity of 4k storage per bitmap bit: * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks * 1<<38 bits, * 1<<23 4k bitmap pages. * Use 24 bits as page index, covers 2 peta byte storage * at a granularity of 4k per bit. * Used to report the failed page idx on io error from the endio handlers. */ #define BM_PAGE_IDX_MASK ((1UL<<24)-1) /* this page is currently read in, or written back */ #define BM_PAGE_IO_LOCK 31 /* if there has been an IO error for this page */ #define BM_PAGE_IO_ERROR 30 /* this is to be able to intelligently skip disk IO, * set if bits have been set since last IO. */ #define BM_PAGE_NEED_WRITEOUT 29 /* to mark for lazy writeout once syncer cleared all clearable bits, * we if bits have been cleared since last IO. */ #define BM_PAGE_LAZY_WRITEOUT 28 /* pages marked with this "HINT" will be considered for writeout * on activity log transactions */ #define BM_PAGE_HINT_WRITEOUT 27 /* store_page_idx uses non-atomic assignment. It is only used directly after * allocating the page. All other bm_set_page_* and bm_clear_page_* need to * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap * changes) may happen from various contexts, and wait_on_bit/wake_up_bit * requires it all to be atomic as well. */ static void bm_store_page_idx(struct page *page, unsigned long idx) { BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK)); set_page_private(page, idx); } static unsigned long bm_page_to_idx(struct page *page) { return page_private(page) & BM_PAGE_IDX_MASK; } /* As is very unlikely that the same page is under IO from more than one * context, we can get away with a bit per page and one wait queue per bitmap. */ static void bm_page_lock_io(struct drbd_device *device, int page_nr) { struct drbd_bitmap *b = device->bitmap; void *addr = &page_private(b->bm_pages[page_nr]); wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr)); } static void bm_page_unlock_io(struct drbd_device *device, int page_nr) { struct drbd_bitmap *b = device->bitmap; void *addr = &page_private(b->bm_pages[page_nr]); clear_bit_unlock(BM_PAGE_IO_LOCK, addr); wake_up(&device->bitmap->bm_io_wait); } /* set _before_ submit_io, so it may be reset due to being changed * while this page is in flight... will get submitted later again */ static void bm_set_page_unchanged(struct page *page) { /* use cmpxchg? */ clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); } static void bm_set_page_need_writeout(struct page *page) { set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); } /** * drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout * @device: DRBD device. * @page_nr: the bitmap page to mark with the "hint" flag * * From within an activity log transaction, we mark a few pages with these * hints, then call drbd_bm_write_hinted(), which will only write out changed * pages which are flagged with this mark. */ void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr) { struct page *page; if (page_nr >= device->bitmap->bm_number_of_pages) { drbd_warn(device, "BAD: page_nr: %u, number_of_pages: %u\n", page_nr, (int)device->bitmap->bm_number_of_pages); return; } page = device->bitmap->bm_pages[page_nr]; set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page)); } static int bm_test_page_unchanged(struct page *page) { volatile const unsigned long *addr = &page_private(page); return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0; } static void bm_set_page_io_err(struct page *page) { set_bit(BM_PAGE_IO_ERROR, &page_private(page)); } static void bm_clear_page_io_err(struct page *page) { clear_bit(BM_PAGE_IO_ERROR, &page_private(page)); } static void bm_set_page_lazy_writeout(struct page *page) { set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); } static int bm_test_page_lazy_writeout(struct page *page) { return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); } /* on a 32bit box, this would allow for exactly (2<<38) bits. */ static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr) { /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */ unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3); BUG_ON(page_nr >= b->bm_number_of_pages); return page_nr; } static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr) { /* page_nr = (bitnr/8) >> PAGE_SHIFT; */ unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3); BUG_ON(page_nr >= b->bm_number_of_pages); return page_nr; } static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) { struct page *page = b->bm_pages[idx]; return (unsigned long *) kmap_atomic(page); } static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) { return __bm_map_pidx(b, idx); } static void __bm_unmap(unsigned long *p_addr) { kunmap_atomic(p_addr); }; static void bm_unmap(unsigned long *p_addr) { return __bm_unmap(p_addr); } /* long word offset of _bitmap_ sector */ #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) /* word offset from start of bitmap to word number _in_page_ * modulo longs per page #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long)) hm, well, Philipp thinks gcc might not optimize the % into & (... - 1) so do it explicitly: */ #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1)) /* Long words per page */ #define LWPP (PAGE_SIZE/sizeof(long)) /* * actually most functions herein should take a struct drbd_bitmap*, not a * struct drbd_device*, but for the debug macros I like to have the device around * to be able to report device specific. */ static void bm_free_pages(struct page **pages, unsigned long number) { unsigned long i; if (!pages) return; for (i = 0; i < number; i++) { if (!pages[i]) { pr_alert("bm_free_pages tried to free a NULL pointer; i=%lu n=%lu\n", i, number); continue; } __free_page(pages[i]); pages[i] = NULL; } } static void bm_vk_free(void *ptr, int v) { if (v) vfree(ptr); else kfree(ptr); } /* * "have" and "want" are NUMBER OF PAGES. */ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want) { struct page **old_pages = b->bm_pages; struct page **new_pages, *page; unsigned int i, bytes, vmalloced = 0; unsigned long have = b->bm_number_of_pages; BUG_ON(have == 0 && old_pages != NULL); BUG_ON(have != 0 && old_pages == NULL); if (have == want) return old_pages; /* Trying kmalloc first, falling back to vmalloc. * GFP_NOIO, as this is called while drbd IO is "suspended", * and during resize or attach on diskless Primary, * we must not block on IO to ourselves. * Context is receiver thread or dmsetup. */ bytes = sizeof(struct page *)*want; new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN); if (!new_pages) { new_pages = __vmalloc(bytes, GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); if (!new_pages) return NULL; vmalloced = 1; } if (want >= have) { for (i = 0; i < have; i++) new_pages[i] = old_pages[i]; for (; i < want; i++) { page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); if (!page) { bm_free_pages(new_pages + have, i - have); bm_vk_free(new_pages, vmalloced); return NULL; } /* we want to know which page it is * from the endio handlers */ bm_store_page_idx(page, i); new_pages[i] = page; } } else { for (i = 0; i < want; i++) new_pages[i] = old_pages[i]; /* NOT HERE, we are outside the spinlock! bm_free_pages(old_pages + want, have - want); */ } if (vmalloced) b->bm_flags |= BM_P_VMALLOCED; else b->bm_flags &= ~BM_P_VMALLOCED; return new_pages; } /* * called on driver init only. TODO call when a device is created. * allocates the drbd_bitmap, and stores it in device->bitmap. */ int drbd_bm_init(struct drbd_device *device) { struct drbd_bitmap *b = device->bitmap; WARN_ON(b != NULL); b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL); if (!b) return -ENOMEM; spin_lock_init(&b->bm_lock); mutex_init(&b->bm_change); init_waitqueue_head(&b->bm_io_wait); device->bitmap = b; return 0; } sector_t drbd_bm_capacity(struct drbd_device *device) { if (!expect(device->bitmap)) return 0; return device->bitmap->bm_dev_capacity; } /* called on driver unload. TODO: call when a device is destroyed. */ void drbd_bm_cleanup(struct drbd_device *device) { if (!expect(device->bitmap)) return; bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages); bm_vk_free(device->bitmap->bm_pages, (BM_P_VMALLOCED & device->bitmap->bm_flags)); kfree(device->bitmap); device->bitmap = NULL; } /* * since (b->bm_bits % BITS_PER_LONG) != 0, * this masks out the remaining bits. * Returns the number of bits cleared. */ #define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3)) #define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1) #define BITS_PER_LONG_MASK (BITS_PER_LONG - 1) static int bm_clear_surplus(struct drbd_bitmap *b) { unsigned long mask; unsigned long *p_addr, *bm; int tmp; int cleared = 0; /* number of bits modulo bits per page */ tmp = (b->bm_bits & BITS_PER_PAGE_MASK); /* mask the used bits of the word containing the last bit */ mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; /* bitmap is always stored little endian, * on disk and in core memory alike */ mask = cpu_to_lel(mask); p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); bm = p_addr + (tmp/BITS_PER_LONG); if (mask) { /* If mask != 0, we are not exactly aligned, so bm now points * to the long containing the last bit. * If mask == 0, bm already points to the word immediately * after the last (long word aligned) bit. */ cleared = hweight_long(*bm & ~mask); *bm &= mask; bm++; } if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { /* on a 32bit arch, we may need to zero out * a padding long to align with a 64bit remote */ cleared += hweight_long(*bm); *bm = 0; } bm_unmap(p_addr); return cleared; } static void bm_set_surplus(struct drbd_bitmap *b) { unsigned long mask; unsigned long *p_addr, *bm; int tmp; /* number of bits modulo bits per page */ tmp = (b->bm_bits & BITS_PER_PAGE_MASK); /* mask the used bits of the word containing the last bit */ mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; /* bitmap is always stored little endian, * on disk and in core memory alike */ mask = cpu_to_lel(mask); p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); bm = p_addr + (tmp/BITS_PER_LONG); if (mask) { /* If mask != 0, we are not exactly aligned, so bm now points * to the long containing the last bit. * If mask == 0, bm already points to the word immediately * after the last (long word aligned) bit. */ *bm |= ~mask; bm++; } if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { /* on a 32bit arch, we may need to zero out * a padding long to align with a 64bit remote */ *bm = ~0UL; } bm_unmap(p_addr); } /* you better not modify the bitmap while this is running, * or its results will be stale */ static unsigned long bm_count_bits(struct drbd_bitmap *b) { unsigned long *p_addr; unsigned long bits = 0; unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1; int idx, i, last_word; /* all but last page */ for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) { p_addr = __bm_map_pidx(b, idx); for (i = 0; i < LWPP; i++) bits += hweight_long(p_addr[i]); __bm_unmap(p_addr); cond_resched(); } /* last (or only) page */ last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL; p_addr = __bm_map_pidx(b, idx); for (i = 0; i < last_word; i++) bits += hweight_long(p_addr[i]); p_addr[last_word] &= cpu_to_lel(mask); bits += hweight_long(p_addr[last_word]); /* 32bit arch, may have an unused padding long */ if (BITS_PER_LONG == 32 && (last_word & 1) == 0) p_addr[last_word+1] = 0; __bm_unmap(p_addr); return bits; } /* offset and len in long words.*/ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) { unsigned long *p_addr, *bm; unsigned int idx; size_t do_now, end; end = offset + len; if (end > b->bm_words) { pr_alert("bm_memset end > bm_words\n"); return; } while (offset < end) { do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset; idx = bm_word_to_page_idx(b, offset); p_addr = bm_map_pidx(b, idx); bm = p_addr + MLPP(offset); if (bm+do_now > p_addr + LWPP) { pr_alert("BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n", p_addr, bm, (int)do_now); } else memset(bm, c, do_now * sizeof(long)); bm_unmap(p_addr); bm_set_page_need_writeout(b->bm_pages[idx]); offset += do_now; } } /* For the layout, see comment above drbd_md_set_sector_offsets(). */ static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev) { u64 bitmap_sectors; if (ldev->md.al_offset == 8) bitmap_sectors = ldev->md.md_size_sect - ldev->md.bm_offset; else bitmap_sectors = ldev->md.al_offset - ldev->md.bm_offset; return bitmap_sectors << (9 + 3); } /* * make sure the bitmap has enough room for the attached storage, * if necessary, resize. * called whenever we may have changed the device size. * returns -ENOMEM if we could not allocate enough memory, 0 on success. * In case this is actually a resize, we copy the old bitmap into the new one. * Otherwise, the bitmap is initialized to all bits set. */ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bits) { struct drbd_bitmap *b = device->bitmap; unsigned long bits, words, owords, obits; unsigned long want, have, onpages; /* number of pages */ struct page **npages, **opages = NULL; int err = 0, growing; int opages_vmalloced; if (!expect(b)) return -ENOMEM; drbd_bm_lock(device, "resize", BM_LOCKED_MASK); drbd_info(device, "drbd_bm_resize called with capacity == %llu\n", (unsigned long long)capacity); if (capacity == b->bm_dev_capacity) goto out; opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags); if (capacity == 0) { spin_lock_irq(&b->bm_lock); opages = b->bm_pages; onpages = b->bm_number_of_pages; owords = b->bm_words; b->bm_pages = NULL; b->bm_number_of_pages = b->bm_set = b->bm_bits = b->bm_words = b->bm_dev_capacity = 0; spin_unlock_irq(&b->bm_lock); bm_free_pages(opages, onpages); bm_vk_free(opages, opages_vmalloced); goto out; } bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT)); /* if we would use words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL; a 32bit host could present the wrong number of words to a 64bit host. */ words = ALIGN(bits, 64) >> LN2_BPL; if (get_ldev(device)) { u64 bits_on_disk = drbd_md_on_disk_bits(device->ldev); put_ldev(device); if (bits > bits_on_disk) { drbd_info(device, "bits = %lu\n", bits); drbd_info(device, "bits_on_disk = %llu\n", bits_on_disk); err = -ENOSPC; goto out; } } want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT; have = b->bm_number_of_pages; if (want == have) { D_ASSERT(device, b->bm_pages != NULL); npages = b->bm_pages; } else { if (drbd_insert_fault(device, DRBD_FAULT_BM_ALLOC)) npages = NULL; else npages = bm_realloc_pages(b, want); } if (!npages) { err = -ENOMEM; goto out; } spin_lock_irq(&b->bm_lock); opages = b->bm_pages; owords = b->bm_words; obits = b->bm_bits; growing = bits > obits; if (opages && growing && set_new_bits) bm_set_surplus(b); b->bm_pages = npages; b->bm_number_of_pages = want; b->bm_bits = bits; b->bm_words = words; b->bm_dev_capacity = capacity; if (growing) { if (set_new_bits) { bm_memset(b, owords, 0xff, words-owords); b->bm_set += bits - obits; } else bm_memset(b, owords, 0x00, words-owords); } if (want < have) { /* implicit: (opages != NULL) && (opages != npages) */ bm_free_pages(opages + want, have - want); } (void)bm_clear_surplus(b); spin_unlock_irq(&b->bm_lock); if (opages != npages) bm_vk_free(opages, opages_vmalloced); if (!growing) b->bm_set = bm_count_bits(b); drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want); out: drbd_bm_unlock(device); return err; } /* inherently racy: * if not protected by other means, return value may be out of date when * leaving this function... * we still need to lock it, since it is important that this returns * bm_set == 0 precisely. * * maybe bm_set should be atomic_t ? */ unsigned long _drbd_bm_total_weight(struct drbd_device *device) { struct drbd_bitmap *b = device->bitmap; unsigned long s; unsigned long flags; if (!expect(b)) return 0; if (!expect(b->bm_pages)) return 0; spin_lock_irqsave(&b->bm_lock, flags); s = b->bm_set; spin_unlock_irqrestore(&b->bm_lock, flags); return s; } unsigned long drbd_bm_total_weight(struct drbd_device *device) { unsigned long s; /* if I don't have a disk, I don't know about out-of-sync status */ if (!get_ldev_if_state(device, D_NEGOTIATING)) return 0; s = _drbd_bm_total_weight(device); put_ldev(device); return s; } size_t drbd_bm_words(struct drbd_device *device) { struct drbd_bitmap *b = device->bitmap; if (!expect(b)) return 0; if (!expect(b->bm_pages)) return 0; return b->bm_words; } unsigned long drbd_bm_bits(struct drbd_device *device) { struct drbd_bitmap *b = device->bitmap; if (!expect(b)) return 0; return b->bm_bits; } /* merge number words from buffer into the bitmap starting at offset. * buffer[i] is expected to be little endian unsigned long. * bitmap must be locked by drbd_bm_lock. * currently only used from receive_bitmap. */ void drbd_bm_merge_lel(struct drbd_device *device, size_t offset, size_t number, unsigned long *buffer) { struct drbd_bitmap *b = device->bitmap; unsigned long *p_addr, *bm; unsigned long word, bits; unsigned int idx; size_t end, do_now; end = offset + number; if (!expect(b)) return; if (!expect(b->bm_pages)) return; if (number == 0) return; WARN_ON(offset >= b->bm_words); WARN_ON(end > b->bm_words); spin_lock_irq(&b->bm_lock); while (offset < end) { do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; idx = bm_word_to_page_idx(b, offset); p_addr = bm_map_pidx(b, idx); bm = p_addr + MLPP(offset); offset += do_now; while (do_now--) { bits = hweight_long(*bm); word = *bm | *buffer++; *bm++ = word; b->bm_set += hweight_long(word) - bits; } bm_unmap(p_addr); bm_set_page_need_writeout(b->bm_pages[idx]); } /* with 32bit <-> 64bit cross-platform connect * this is only correct for current usage, * where we _know_ that we are 64 bit aligned, * and know that this function is used in this way, too... */ if (end == b->bm_words) b->bm_set -= bm_clear_surplus(b); spin_unlock_irq(&b->bm_lock); } /* copy number words from the bitmap starting at offset into the buffer. * buffer[i] will be little endian unsigned long. */ void drbd_bm_get_lel(struct drbd_device *device, size_t offset, size_t number, unsigned long *buffer) { struct drbd_bitmap *b = device->bitmap; unsigned long *p_addr, *bm; size_t end, do_now; end = offset + number; if (!expect(b)) return; if (!expect(b->bm_pages)) return; spin_lock_irq(&b->bm_lock); if ((offset >= b->bm_words) || (end > b->bm_words) || (number <= 0)) drbd_err(device, "offset=%lu number=%lu bm_words=%lu\n", (unsigned long) offset, (unsigned long) number, (unsigned long) b->bm_words); else { while (offset < end) { do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset)); bm = p_addr + MLPP(offset); offset += do_now; while (do_now--) *buffer++ = *bm++; bm_unmap(p_addr); } } spin_unlock_irq(&b->bm_lock); } /* set all bits in the bitmap */ void drbd_bm_set_all(struct drbd_device *device) { struct drbd_bitmap *b = device->bitmap; if (!expect(b)) return; if (!expect(b->bm_pages)) return; spin_lock_irq(&b->bm_lock); bm_memset(b, 0, 0xff, b->bm_words); (void)bm_clear_surplus(b); b->bm_set = b->bm_bits; spin_unlock_irq(&b->bm_lock); } /* clear all bits in the bitmap */ void drbd_bm_clear_all(struct drbd_device *device) { struct drbd_bitmap *b = device->bitmap; if (!expect(b)) return; if (!expect(b->bm_pages)) return; spin_lock_irq(&b->bm_lock); bm_memset(b, 0, 0, b->bm_words); b->bm_set = 0; spin_unlock_irq(&b->bm_lock); } static void drbd_bm_aio_ctx_destroy(struct kref *kref) { struct drbd_bm_aio_ctx *ctx = container_of(kref, struct drbd_bm_aio_ctx, kref); unsigned long flags; spin_lock_irqsave(&ctx->device->resource->req_lock, flags); list_del(&ctx->list); spin_unlock_irqrestore(&ctx->device->resource->req_lock, flags); put_ldev(ctx->device); kfree(ctx); } /* bv_page may be a copy, or may be the original */ static void drbd_bm_endio(struct bio *bio, int error) { struct drbd_bm_aio_ctx *ctx = bio->bi_private; struct drbd_device *device = ctx->device; struct drbd_bitmap *b = device->bitmap; unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); int uptodate = bio_flagged(bio, BIO_UPTODATE); /* strange behavior of some lower level drivers... * fail the request by clearing the uptodate flag, * but do not return any error?! * do we want to WARN() on this? */ if (!error && !uptodate) error = -EIO; if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 && !bm_test_page_unchanged(b->bm_pages[idx])) drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx); if (error) { /* ctx error will hold the completed-last non-zero error code, * in case error codes differ. */ ctx->error = error; bm_set_page_io_err(b->bm_pages[idx]); /* Not identical to on disk version of it. * Is BM_PAGE_IO_ERROR enough? */ if (__ratelimit(&drbd_ratelimit_state)) drbd_err(device, "IO ERROR %d on bitmap page idx %u\n", error, idx); } else { bm_clear_page_io_err(b->bm_pages[idx]); dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx); } bm_page_unlock_io(device, idx); if (ctx->flags & BM_AIO_COPY_PAGES) mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool); bio_put(bio); if (atomic_dec_and_test(&ctx->in_flight)) { ctx->done = 1; wake_up(&device->misc_wait); kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy); } } static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local) { struct bio *bio = bio_alloc_drbd(GFP_NOIO); struct drbd_device *device = ctx->device; struct drbd_bitmap *b = device->bitmap; struct page *page; unsigned int len; unsigned int rw = (ctx->flags & BM_AIO_READ) ? READ : WRITE; sector_t on_disk_sector = device->ldev->md.md_offset + device->ldev->md.bm_offset; on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9); /* this might happen with very small * flexible external meta data device, * or with PAGE_SIZE > 4k */ len = min_t(unsigned int, PAGE_SIZE, (drbd_md_last_sector(device->ldev) - on_disk_sector + 1)<<9); /* serialize IO on this page */ bm_page_lock_io(device, page_nr); /* before memcpy and submit, * so it can be redirtied any time */ bm_set_page_unchanged(b->bm_pages[page_nr]); if (ctx->flags & BM_AIO_COPY_PAGES) { page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT); copy_highpage(page, b->bm_pages[page_nr]); bm_store_page_idx(page, page_nr); } else page = b->bm_pages[page_nr]; bio->bi_bdev = device->ldev->md_bdev; bio->bi_iter.bi_sector = on_disk_sector; /* bio_add_page of a single page to an empty bio will always succeed, * according to api. Do we want to assert that? */ bio_add_page(bio, page, len, 0); bio->bi_private = ctx; bio->bi_end_io = drbd_bm_endio; if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { bio->bi_rw |= rw; bio_endio(bio, -EIO); } else { submit_bio(rw, bio); /* this should not count as user activity and cause the * resync to throttle -- see drbd_rs_should_slow_down(). */ atomic_add(len >> 9, &device->rs_sect_ev); } } /* * bm_rw: read/write the whole bitmap from/to its on disk location. */ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned lazy_writeout_upper_idx) __must_hold(local) { struct drbd_bm_aio_ctx *ctx; struct drbd_bitmap *b = device->bitmap; int num_pages, i, count = 0; unsigned long now; char ppb[10]; int err = 0; /* * We are protected against bitmap disappearing/resizing by holding an * ldev reference (caller must have called get_ldev()). * For read/write, we are protected against changes to the bitmap by * the bitmap lock (see drbd_bitmap_io). * For lazy writeout, we don't care for ongoing changes to the bitmap, * as we submit copies of pages anyways. */ ctx = kmalloc(sizeof(struct drbd_bm_aio_ctx), GFP_NOIO); if (!ctx) return -ENOMEM; *ctx = (struct drbd_bm_aio_ctx) { .device = device, .start_jif = jiffies, .in_flight = ATOMIC_INIT(1), .done = 0, .flags = flags, .error = 0, .kref = { ATOMIC_INIT(2) }, }; if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in drbd_bm_aio_ctx_destroy() */ drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n"); kfree(ctx); return -ENODEV; } /* Here D_ATTACHING is sufficient since drbd_bm_read() is called only from drbd_adm_attach(), after device->ldev was assigned. */ if (0 == (ctx->flags & ~BM_AIO_READ)) WARN_ON(!(BM_LOCKED_MASK & b->bm_flags)); spin_lock_irq(&device->resource->req_lock); list_add_tail(&ctx->list, &device->pending_bitmap_io); spin_unlock_irq(&device->resource->req_lock); num_pages = b->bm_number_of_pages; now = jiffies; /* let the layers below us try to merge these bios... */ for (i = 0; i < num_pages; i++) { /* ignore completely unchanged pages */ if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) break; if (!(flags & BM_AIO_READ)) { if ((flags & BM_AIO_WRITE_HINTED) && !test_and_clear_bit(BM_PAGE_HINT_WRITEOUT, &page_private(b->bm_pages[i]))) continue; if (!(flags & BM_AIO_WRITE_ALL_PAGES) && bm_test_page_unchanged(b->bm_pages[i])) { dynamic_drbd_dbg(device, "skipped bm write for idx %u\n", i); continue; } /* during lazy writeout, * ignore those pages not marked for lazy writeout. */ if (lazy_writeout_upper_idx && !bm_test_page_lazy_writeout(b->bm_pages[i])) { dynamic_drbd_dbg(device, "skipped bm lazy write for idx %u\n", i); continue; } } atomic_inc(&ctx->in_flight); bm_page_io_async(ctx, i); ++count; cond_resched(); } /* * We initialize ctx->in_flight to one to make sure drbd_bm_endio * will not set ctx->done early, and decrement / test it here. If there * are still some bios in flight, we need to wait for them here. * If all IO is done already (or nothing had been submitted), there is * no need to wait. Still, we need to put the kref associated with the * "in_flight reached zero, all done" event. */ if (!atomic_dec_and_test(&ctx->in_flight)) wait_until_done_or_force_detached(device, device->ldev, &ctx->done); else kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy); /* summary for global bitmap IO */ if (flags == 0) drbd_info(device, "bitmap %s of %u pages took %lu jiffies\n", (flags & BM_AIO_READ) ? "READ" : "WRITE", count, jiffies - now); if (ctx->error) { drbd_alert(device, "we had at least one MD IO ERROR during bitmap IO\n"); drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR); err = -EIO; /* ctx->error ? */ } if (atomic_read(&ctx->in_flight)) err = -EIO; /* Disk timeout/force-detach during IO... */ now = jiffies; if (flags & BM_AIO_READ) { b->bm_set = bm_count_bits(b); drbd_info(device, "recounting of set bits took additional %lu jiffies\n", jiffies - now); } now = b->bm_set; if ((flags & ~BM_AIO_READ) == 0) drbd_info(device, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n", ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now); kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy); return err; } /** * drbd_bm_read() - Read the whole bitmap from its on disk location. * @device: DRBD device. */ int drbd_bm_read(struct drbd_device *device) __must_hold(local) { return bm_rw(device, BM_AIO_READ, 0); } /** * drbd_bm_write() - Write the whole bitmap to its on disk location. * @device: DRBD device. * * Will only write pages that have changed since last IO. */ int drbd_bm_write(struct drbd_device *device) __must_hold(local) { return bm_rw(device, 0, 0); } /** * drbd_bm_write_all() - Write the whole bitmap to its on disk location. * @device: DRBD device. * * Will write all pages. */ int drbd_bm_write_all(struct drbd_device *device) __must_hold(local) { return bm_rw(device, BM_AIO_WRITE_ALL_PAGES, 0); } /** * drbd_bm_write_lazy() - Write bitmap pages 0 to @upper_idx-1, if they have changed. * @device: DRBD device. * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages */ int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local) { return bm_rw(device, BM_AIO_COPY_PAGES, upper_idx); } /** * drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location. * @device: DRBD device. * * Will only write pages that have changed since last IO. * In contrast to drbd_bm_write(), this will copy the bitmap pages * to temporary writeout pages. It is intended to trigger a full write-out * while still allowing the bitmap to change, for example if a resync or online * verify is aborted due to a failed peer disk, while local IO continues, or * pending resync acks are still being processed. */ int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local) { return bm_rw(device, BM_AIO_COPY_PAGES, 0); } /** * drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed. * @device: DRBD device. */ int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local) { return bm_rw(device, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0); } /* NOTE * find_first_bit returns int, we return unsigned long. * For this to work on 32bit arch with bitnumbers > (1<<32), * we'd need to return u64, and get a whole lot of other places * fixed where we still use unsigned long. * * this returns a bit number, NOT a sector! */ static unsigned long __bm_find_next(struct drbd_device *device, unsigned long bm_fo, const int find_zero_bit) { struct drbd_bitmap *b = device->bitmap; unsigned long *p_addr; unsigned long bit_offset; unsigned i; if (bm_fo > b->bm_bits) { drbd_err(device, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); bm_fo = DRBD_END_OF_BITMAP; } else { while (bm_fo < b->bm_bits) { /* bit offset of the first bit in the page */ bit_offset = bm_fo & ~BITS_PER_PAGE_MASK; p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo)); if (find_zero_bit) i = find_next_zero_bit_le(p_addr, PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); else i = find_next_bit_le(p_addr, PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); __bm_unmap(p_addr); if (i < PAGE_SIZE*8) { bm_fo = bit_offset + i; if (bm_fo >= b->bm_bits) break; goto found; } bm_fo = bit_offset + PAGE_SIZE*8; } bm_fo = DRBD_END_OF_BITMAP; } found: return bm_fo; } static unsigned long bm_find_next(struct drbd_device *device, unsigned long bm_fo, const int find_zero_bit) { struct drbd_bitmap *b = device->bitmap; unsigned long i = DRBD_END_OF_BITMAP; if (!expect(b)) return i; if (!expect(b->bm_pages)) return i; spin_lock_irq(&b->bm_lock); if (BM_DONT_TEST & b->bm_flags) bm_print_lock_info(device); i = __bm_find_next(device, bm_fo, find_zero_bit); spin_unlock_irq(&b->bm_lock); return i; } unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo) { return bm_find_next(device, bm_fo, 0); } #if 0 /* not yet needed for anything. */ unsigned long drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo) { return bm_find_next(device, bm_fo, 1); } #endif /* does not spin_lock_irqsave. * you must take drbd_bm_lock() first */ unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo) { /* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */ return __bm_find_next(device, bm_fo, 0); } unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo) { /* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */ return __bm_find_next(device, bm_fo, 1); } /* returns number of bits actually changed. * for val != 0, we change 0 -> 1, return code positive * for val == 0, we change 1 -> 0, return code negative * wants bitnr, not sector. * expected to be called for only a few bits (e - s about BITS_PER_LONG). * Must hold bitmap lock already. */ static int __bm_change_bits_to(struct drbd_device *device, const unsigned long s, unsigned long e, int val) { struct drbd_bitmap *b = device->bitmap; unsigned long *p_addr = NULL; unsigned long bitnr; unsigned int last_page_nr = -1U; int c = 0; int changed_total = 0; if (e >= b->bm_bits) { drbd_err(device, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n", s, e, b->bm_bits); e = b->bm_bits ? b->bm_bits -1 : 0; } for (bitnr = s; bitnr <= e; bitnr++) { unsigned int page_nr = bm_bit_to_page_idx(b, bitnr); if (page_nr != last_page_nr) { if (p_addr) __bm_unmap(p_addr); if (c < 0) bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); else if (c > 0) bm_set_page_need_writeout(b->bm_pages[last_page_nr]); changed_total += c; c = 0; p_addr = __bm_map_pidx(b, page_nr); last_page_nr = page_nr; } if (val) c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); else c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); } if (p_addr) __bm_unmap(p_addr); if (c < 0) bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); else if (c > 0) bm_set_page_need_writeout(b->bm_pages[last_page_nr]); changed_total += c; b->bm_set += changed_total; return changed_total; } /* returns number of bits actually changed. * for val != 0, we change 0 -> 1, return code positive * for val == 0, we change 1 -> 0, return code negative * wants bitnr, not sector */ static int bm_change_bits_to(struct drbd_device *device, const unsigned long s, const unsigned long e, int val) { unsigned long flags; struct drbd_bitmap *b = device->bitmap; int c = 0; if (!expect(b)) return 1; if (!expect(b->bm_pages)) return 0; spin_lock_irqsave(&b->bm_lock, flags); if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags) bm_print_lock_info(device); c = __bm_change_bits_to(device, s, e, val); spin_unlock_irqrestore(&b->bm_lock, flags); return c; } /* returns number of bits changed 0 -> 1 */ int drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e) { return bm_change_bits_to(device, s, e, 1); } /* returns number of bits changed 1 -> 0 */ int drbd_bm_clear_bits(struct drbd_device *device, const unsigned long s, const unsigned long e) { return -bm_change_bits_to(device, s, e, 0); } /* sets all bits in full words, * from first_word up to, but not including, last_word */ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b, int page_nr, int first_word, int last_word) { int i; int bits; int changed = 0; unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]); for (i = first_word; i < last_word; i++) { bits = hweight_long(paddr[i]); paddr[i] = ~0UL; changed += BITS_PER_LONG - bits; } kunmap_atomic(paddr); if (changed) { /* We only need lazy writeout, the information is still in the * remote bitmap as well, and is reconstructed during the next * bitmap exchange, if lost locally due to a crash. */ bm_set_page_lazy_writeout(b->bm_pages[page_nr]); b->bm_set += changed; } } /* Same thing as drbd_bm_set_bits, * but more efficient for a large bit range. * You must first drbd_bm_lock(). * Can be called to set the whole bitmap in one go. * Sets bits from s to e _inclusive_. */ void _drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e) { /* First set_bit from the first bit (s) * up to the next long boundary (sl), * then assign full words up to the last long boundary (el), * then set_bit up to and including the last bit (e). * * Do not use memset, because we must account for changes, * so we need to loop over the words with hweight() anyways. */ struct drbd_bitmap *b = device->bitmap; unsigned long sl = ALIGN(s,BITS_PER_LONG); unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1); int first_page; int last_page; int page_nr; int first_word; int last_word; if (e - s <= 3*BITS_PER_LONG) { /* don't bother; el and sl may even be wrong. */ spin_lock_irq(&b->bm_lock); __bm_change_bits_to(device, s, e, 1); spin_unlock_irq(&b->bm_lock); return; } /* difference is large enough that we can trust sl and el */ spin_lock_irq(&b->bm_lock); /* bits filling the current long */ if (sl) __bm_change_bits_to(device, s, sl-1, 1); first_page = sl >> (3 + PAGE_SHIFT); last_page = el >> (3 + PAGE_SHIFT); /* MLPP: modulo longs per page */ /* LWPP: long words per page */ first_word = MLPP(sl >> LN2_BPL); last_word = LWPP; /* first and full pages, unless first page == last page */ for (page_nr = first_page; page_nr < last_page; page_nr++) { bm_set_full_words_within_one_page(device->bitmap, page_nr, first_word, last_word); spin_unlock_irq(&b->bm_lock); cond_resched(); first_word = 0; spin_lock_irq(&b->bm_lock); } /* last page (respectively only page, for first page == last page) */ last_word = MLPP(el >> LN2_BPL); /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples). * ==> e = 32767, el = 32768, last_page = 2, * and now last_word = 0. * We do not want to touch last_page in this case, * as we did not allocate it, it is not present in bitmap->bm_pages. */ if (last_word) bm_set_full_words_within_one_page(device->bitmap, last_page, first_word, last_word); /* possibly trailing bits. * example: (e & 63) == 63, el will be e+1. * if that even was the very last bit, * it would trigger an assert in __bm_change_bits_to() */ if (el <= e) __bm_change_bits_to(device, el, e, 1); spin_unlock_irq(&b->bm_lock); } /* returns bit state * wants bitnr, NOT sector. * inherently racy... area needs to be locked by means of {al,rs}_lru * 1 ... bit set * 0 ... bit not set * -1 ... first out of bounds access, stop testing for bits! */ int drbd_bm_test_bit(struct drbd_device *device, const unsigned long bitnr) { unsigned long flags; struct drbd_bitmap *b = device->bitmap; unsigned long *p_addr; int i; if (!expect(b)) return 0; if (!expect(b->bm_pages)) return 0; spin_lock_irqsave(&b->bm_lock, flags); if (BM_DONT_TEST & b->bm_flags) bm_print_lock_info(device); if (bitnr < b->bm_bits) { p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr)); i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0; bm_unmap(p_addr); } else if (bitnr == b->bm_bits) { i = -1; } else { /* (bitnr > b->bm_bits) */ drbd_err(device, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits); i = 0; } spin_unlock_irqrestore(&b->bm_lock, flags); return i; } /* returns number of bits set in the range [s, e] */ int drbd_bm_count_bits(struct drbd_device *device, const unsigned long s, const unsigned long e) { unsigned long flags; struct drbd_bitmap *b = device->bitmap; unsigned long *p_addr = NULL; unsigned long bitnr; unsigned int page_nr = -1U; int c = 0; /* If this is called without a bitmap, that is a bug. But just to be * robust in case we screwed up elsewhere, in that case pretend there * was one dirty bit in the requested area, so we won't try to do a * local read there (no bitmap probably implies no disk) */ if (!expect(b)) return 1; if (!expect(b->bm_pages)) return 1; spin_lock_irqsave(&b->bm_lock, flags); if (BM_DONT_TEST & b->bm_flags) bm_print_lock_info(device); for (bitnr = s; bitnr <= e; bitnr++) { unsigned int idx = bm_bit_to_page_idx(b, bitnr); if (page_nr != idx) { page_nr = idx; if (p_addr) bm_unmap(p_addr); p_addr = bm_map_pidx(b, idx); } if (expect(bitnr < b->bm_bits)) c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); else drbd_err(device, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); } if (p_addr) bm_unmap(p_addr); spin_unlock_irqrestore(&b->bm_lock, flags); return c; } /* inherently racy... * return value may be already out-of-date when this function returns. * but the general usage is that this is only use during a cstate when bits are * only cleared, not set, and typically only care for the case when the return * value is zero, or we already "locked" this "bitmap extent" by other means. * * enr is bm-extent number, since we chose to name one sector (512 bytes) * worth of the bitmap a "bitmap extent". * * TODO * I think since we use it like a reference count, we should use the real * reference count of some bitmap extent element from some lru instead... * */ int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr) { struct drbd_bitmap *b = device->bitmap; int count, s, e; unsigned long flags; unsigned long *p_addr, *bm; if (!expect(b)) return 0; if (!expect(b->bm_pages)) return 0; spin_lock_irqsave(&b->bm_lock, flags); if (BM_DONT_TEST & b->bm_flags) bm_print_lock_info(device); s = S2W(enr); e = min((size_t)S2W(enr+1), b->bm_words); count = 0; if (s < b->bm_words) { int n = e-s; p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s)); bm = p_addr + MLPP(s); while (n--) count += hweight_long(*bm++); bm_unmap(p_addr); } else { drbd_err(device, "start offset (%d) too large in drbd_bm_e_weight\n", s); } spin_unlock_irqrestore(&b->bm_lock, flags); return count; }
gpl-2.0
Alzyoud/android_kernel_samsung_smdk4412
arch/sh/kernel/cpu/sh4a/perf_event.c
2436
7492
/* * Performance events support for SH-4A performance counters * * Copyright (C) 2009, 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/perf_event.h> #include <asm/processor.h> #define PPC_CCBR(idx) (0xff200800 + (sizeof(u32) * idx)) #define PPC_PMCTR(idx) (0xfc100000 + (sizeof(u32) * idx)) #define CCBR_CIT_MASK (0x7ff << 6) #define CCBR_DUC (1 << 3) #define CCBR_CMDS (1 << 1) #define CCBR_PPCE (1 << 0) #ifdef CONFIG_CPU_SHX3 /* * The PMCAT location for SH-X3 CPUs was quietly moved, while the CCBR * and PMCTR locations remains tentatively constant. This change remains * wholly undocumented, and was simply found through trial and error. * * Early cuts of SH-X3 still appear to use the SH-X/SH-X2 locations, and * it's unclear when this ceased to be the case. For now we always use * the new location (if future parts keep up with this trend then * scanning for them at runtime also remains a viable option.) * * The gap in the register space also suggests that there are other * undocumented counters, so this will need to be revisited at a later * point in time. */ #define PPC_PMCAT 0xfc100240 #else #define PPC_PMCAT 0xfc100080 #endif #define PMCAT_OVF3 (1 << 27) #define PMCAT_CNN3 (1 << 26) #define PMCAT_CLR3 (1 << 25) #define PMCAT_OVF2 (1 << 19) #define PMCAT_CLR2 (1 << 17) #define PMCAT_OVF1 (1 << 11) #define PMCAT_CNN1 (1 << 10) #define PMCAT_CLR1 (1 << 9) #define PMCAT_OVF0 (1 << 3) #define PMCAT_CLR0 (1 << 1) static struct sh_pmu sh4a_pmu; /* * Supported raw event codes: * * Event Code Description * ---------- ----------- * * 0x0000 number of elapsed cycles * 0x0200 number of elapsed cycles in privileged mode * 0x0280 number of elapsed cycles while SR.BL is asserted * 0x0202 instruction execution * 0x0203 instruction execution in parallel * 0x0204 number of unconditional branches * 0x0208 number of exceptions * 0x0209 number of interrupts * 0x0220 UTLB miss caused by instruction fetch * 0x0222 UTLB miss caused by operand access * 0x02a0 number of ITLB misses * 0x0028 number of accesses to instruction memories * 0x0029 number of accesses to instruction cache * 0x002a instruction cache miss * 0x022e number of access to instruction X/Y memory * 0x0030 number of reads to operand memories * 0x0038 number of writes to operand memories * 0x0031 number of operand cache read accesses * 0x0039 number of operand cache write accesses * 0x0032 operand cache read miss * 0x003a operand cache write miss * 0x0236 number of reads to operand X/Y memory * 0x023e number of writes to operand X/Y memory * 0x0237 number of reads to operand U memory * 0x023f number of writes to operand U memory * 0x0337 number of U memory read buffer misses * 0x02b4 number of wait cycles due to operand read access * 0x02bc number of wait cycles due to operand write access * 0x0033 number of wait cycles due to operand cache read miss * 0x003b number of wait cycles due to operand cache write miss */ /* * Special reserved bits used by hardware emulators, read values will * vary, but writes must always be 0. */ #define PMCAT_EMU_CLR_MASK ((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0)) static const int sh4a_general_events[] = { [PERF_COUNT_HW_CPU_CYCLES] = 0x0000, [PERF_COUNT_HW_INSTRUCTIONS] = 0x0202, [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0029, /* I-cache */ [PERF_COUNT_HW_CACHE_MISSES] = 0x002a, /* I-cache */ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0204, [PERF_COUNT_HW_BRANCH_MISSES] = -1, [PERF_COUNT_HW_BUS_CYCLES] = -1, }; #define C(x) PERF_COUNT_HW_CACHE_##x static const int sh4a_cache_events [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { [ C(L1D) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x0031, [ C(RESULT_MISS) ] = 0x0032, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = 0x0039, [ C(RESULT_MISS) ] = 0x003a, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0, }, }, [ C(L1I) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x0029, [ C(RESULT_MISS) ] = 0x002a, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0, }, }, [ C(LL) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x0030, [ C(RESULT_MISS) ] = 0, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = 0x0038, [ C(RESULT_MISS) ] = 0, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0, }, }, [ C(DTLB) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x0222, [ C(RESULT_MISS) ] = 0x0220, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0, }, }, [ C(ITLB) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0x02a0, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, }, [ C(BPU) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, }, }; static int sh4a_event_map(int event) { return sh4a_general_events[event]; } static u64 sh4a_pmu_read(int idx) { return __raw_readl(PPC_PMCTR(idx)); } static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx) { unsigned int tmp; tmp = __raw_readl(PPC_CCBR(idx)); tmp &= ~(CCBR_CIT_MASK | CCBR_DUC); __raw_writel(tmp, PPC_CCBR(idx)); } static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx) { unsigned int tmp; tmp = __raw_readl(PPC_PMCAT); tmp &= ~PMCAT_EMU_CLR_MASK; tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0; __raw_writel(tmp, PPC_PMCAT); tmp = __raw_readl(PPC_CCBR(idx)); tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE; __raw_writel(tmp, PPC_CCBR(idx)); __raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx)); } static void sh4a_pmu_disable_all(void) { int i; for (i = 0; i < sh4a_pmu.num_events; i++) __raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i)); } static void sh4a_pmu_enable_all(void) { int i; for (i = 0; i < sh4a_pmu.num_events; i++) __raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i)); } static struct sh_pmu sh4a_pmu = { .name = "sh4a", .num_events = 2, .event_map = sh4a_event_map, .max_events = ARRAY_SIZE(sh4a_general_events), .raw_event_mask = 0x3ff, .cache_events = &sh4a_cache_events, .read = sh4a_pmu_read, .disable = sh4a_pmu_disable, .enable = sh4a_pmu_enable, .disable_all = sh4a_pmu_disable_all, .enable_all = sh4a_pmu_enable_all, }; static int __init sh4a_pmu_init(void) { /* * Make sure this CPU actually has perf counters. */ if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) { pr_notice("HW perf events unsupported, software events only.\n"); return -ENODEV; } return register_sh_pmu(&sh4a_pmu); } early_initcall(sh4a_pmu_init);
gpl-2.0
tchaari/android_kernel_samsung_crespo
drivers/pcmcia/rsrc_iodyn.c
2692
3842
/* * rsrc_iodyn.c -- Resource management routines for MEM-static sockets. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * The initial developer of the original code is David A. Hinds * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. * * (C) 1999 David A. Hinds */ #include <linux/slab.h> #include <linux/module.h> #include <linux/kernel.h> #include <pcmcia/ss.h> #include <pcmcia/cistpl.h> #include "cs_internal.h" struct pcmcia_align_data { unsigned long mask; unsigned long offset; }; static resource_size_t pcmcia_align(void *align_data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pcmcia_align_data *data = align_data; resource_size_t start; start = (res->start & ~data->mask) + data->offset; if (start < res->start) start += data->mask + 1; #ifdef CONFIG_X86 if (res->flags & IORESOURCE_IO) { if (start & 0x300) start = (start + 0x3ff) & ~0x3ff; } #endif #ifdef CONFIG_M68K if (res->flags & IORESOURCE_IO) { if ((res->start + size - 1) >= 1024) start = res->end; } #endif return start; } static struct resource *__iodyn_find_io_region(struct pcmcia_socket *s, unsigned long base, int num, unsigned long align) { struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO, dev_name(&s->dev)); struct pcmcia_align_data data; unsigned long min = base; int ret; data.mask = align - 1; data.offset = base & data.mask; #ifdef CONFIG_PCI if (s->cb_dev) { ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1, min, 0, pcmcia_align, &data); } else #endif ret = allocate_resource(&ioport_resource, res, num, min, ~0UL, 1, pcmcia_align, &data); if (ret != 0) { kfree(res); res = NULL; } return res; } static int iodyn_find_io(struct pcmcia_socket *s, unsigned int attr, unsigned int *base, unsigned int num, unsigned int align, struct resource **parent) { int i, ret = 0; /* Check for an already-allocated window that must conflict with * what was asked for. It is a hack because it does not catch all * potential conflicts, just the most obvious ones. */ for (i = 0; i < MAX_IO_WIN; i++) { if (!s->io[i].res) continue; if (!*base) continue; if ((s->io[i].res->start & (align-1)) == *base) return -EBUSY; } for (i = 0; i < MAX_IO_WIN; i++) { struct resource *res = s->io[i].res; unsigned int try; if (res && (res->flags & IORESOURCE_BITS) != (attr & IORESOURCE_BITS)) continue; if (!res) { if (align == 0) align = 0x10000; res = s->io[i].res = __iodyn_find_io_region(s, *base, num, align); if (!res) return -EINVAL; *base = res->start; s->io[i].res->flags = ((res->flags & ~IORESOURCE_BITS) | (attr & IORESOURCE_BITS)); s->io[i].InUse = num; *parent = res; return 0; } /* Try to extend top of window */ try = res->end + 1; if ((*base == 0) || (*base == try)) { if (adjust_resource(s->io[i].res, res->start, res->end - res->start + num + 1)) continue; *base = try; s->io[i].InUse += num; *parent = res; return 0; } /* Try to extend bottom of window */ try = res->start - num; if ((*base == 0) || (*base == try)) { if (adjust_resource(s->io[i].res, res->start - num, res->end - res->start + num + 1)) continue; *base = try; s->io[i].InUse += num; *parent = res; return 0; } } return -EINVAL; } struct pccard_resource_ops pccard_iodyn_ops = { .validate_mem = NULL, .find_io = iodyn_find_io, .find_mem = NULL, .init = static_init, .exit = NULL, }; EXPORT_SYMBOL(pccard_iodyn_ops);
gpl-2.0
boyan3010/Villec2_ShooterU_Kernel_3.0.X
kernel/sched_cpupri.c
3204
5517
/* * kernel/sched_cpupri.c * * CPU priority management * * Copyright (C) 2007-2008 Novell * * Author: Gregory Haskins <ghaskins@novell.com> * * This code tracks the priority of each CPU so that global migration * decisions are easy to calculate. Each CPU can be in a state as follows: * * (INVALID), IDLE, NORMAL, RT1, ... RT99 * * going from the lowest priority to the highest. CPUs in the INVALID state * are not eligible for routing. The system maintains this state with * a 2 dimensional bitmap (the first for priority class, the second for cpus * in that class). Therefore a typical application without affinity * restrictions can find a suitable CPU with O(1) complexity (e.g. two bit * searches). For tasks with affinity restrictions, the algorithm has a * worst case complexity of O(min(102, nr_domcpus)), though the scenario that * yields the worst case search is fairly contrived. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ #include <linux/gfp.h> #include "sched_cpupri.h" /* Convert between a 140 based task->prio, and our 102 based cpupri */ static int convert_prio(int prio) { int cpupri; if (prio == CPUPRI_INVALID) cpupri = CPUPRI_INVALID; else if (prio == MAX_PRIO) cpupri = CPUPRI_IDLE; else if (prio >= MAX_RT_PRIO) cpupri = CPUPRI_NORMAL; else cpupri = MAX_RT_PRIO - prio + 1; return cpupri; } #define for_each_cpupri_active(array, idx) \ for_each_set_bit(idx, array, CPUPRI_NR_PRIORITIES) /** * cpupri_find - find the best (lowest-pri) CPU in the system * @cp: The cpupri context * @p: The task * @lowest_mask: A mask to fill in with selected CPUs (or NULL) * * Note: This function returns the recommended CPUs as calculated during the * current invocation. By the time the call returns, the CPUs may have in * fact changed priorities any number of times. While not ideal, it is not * an issue of correctness since the normal rebalancer logic will correct * any discrepancies created by racing against the uncertainty of the current * priority configuration. * * Returns: (int)bool - CPUs were found */ int cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask) { int idx = 0; int task_pri = convert_prio(p->prio); for_each_cpupri_active(cp->pri_active, idx) { struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; if (idx >= task_pri) break; if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) continue; if (lowest_mask) { cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); /* * We have to ensure that we have at least one bit * still set in the array, since the map could have * been concurrently emptied between the first and * second reads of vec->mask. If we hit this * condition, simply act as though we never hit this * priority level and continue on. */ if (cpumask_any(lowest_mask) >= nr_cpu_ids) continue; } return 1; } return 0; } /** * cpupri_set - update the cpu priority setting * @cp: The cpupri context * @cpu: The target cpu * @pri: The priority (INVALID-RT99) to assign to this CPU * * Note: Assumes cpu_rq(cpu)->lock is locked * * Returns: (void) */ void cpupri_set(struct cpupri *cp, int cpu, int newpri) { int *currpri = &cp->cpu_to_pri[cpu]; int oldpri = *currpri; unsigned long flags; newpri = convert_prio(newpri); BUG_ON(newpri >= CPUPRI_NR_PRIORITIES); if (newpri == oldpri) return; /* * If the cpu was currently mapped to a different value, we * need to map it to the new value then remove the old value. * Note, we must add the new value first, otherwise we risk the * cpu being cleared from pri_active, and this cpu could be * missed for a push or pull. */ if (likely(newpri != CPUPRI_INVALID)) { struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; raw_spin_lock_irqsave(&vec->lock, flags); cpumask_set_cpu(cpu, vec->mask); vec->count++; if (vec->count == 1) set_bit(newpri, cp->pri_active); raw_spin_unlock_irqrestore(&vec->lock, flags); } if (likely(oldpri != CPUPRI_INVALID)) { struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; raw_spin_lock_irqsave(&vec->lock, flags); vec->count--; if (!vec->count) clear_bit(oldpri, cp->pri_active); cpumask_clear_cpu(cpu, vec->mask); raw_spin_unlock_irqrestore(&vec->lock, flags); } *currpri = newpri; } /** * cpupri_init - initialize the cpupri structure * @cp: The cpupri context * @bootmem: true if allocations need to use bootmem * * Returns: -ENOMEM if memory fails. */ int cpupri_init(struct cpupri *cp) { int i; memset(cp, 0, sizeof(*cp)); for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { struct cpupri_vec *vec = &cp->pri_to_cpu[i]; raw_spin_lock_init(&vec->lock); vec->count = 0; if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) goto cleanup; } for_each_possible_cpu(i) cp->cpu_to_pri[i] = CPUPRI_INVALID; return 0; cleanup: for (i--; i >= 0; i--) free_cpumask_var(cp->pri_to_cpu[i].mask); return -ENOMEM; } /** * cpupri_cleanup - clean up the cpupri structure * @cp: The cpupri context */ void cpupri_cleanup(struct cpupri *cp) { int i; for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) free_cpumask_var(cp->pri_to_cpu[i].mask); }
gpl-2.0
dininek/usbtmc-gadget
drivers/mfd/da9052-irq.c
3460
6138
/* * DA9052 interrupt support * * Author: Fabio Estevam <fabio.estevam@freescale.com> * Based on arizona-irq.c, which is: * * Copyright 2012 Wolfson Microelectronics plc * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/device.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/mfd/da9052/da9052.h> #include <linux/mfd/da9052/reg.h> #define DA9052_NUM_IRQ_REGS 4 #define DA9052_IRQ_MASK_POS_1 0x01 #define DA9052_IRQ_MASK_POS_2 0x02 #define DA9052_IRQ_MASK_POS_3 0x04 #define DA9052_IRQ_MASK_POS_4 0x08 #define DA9052_IRQ_MASK_POS_5 0x10 #define DA9052_IRQ_MASK_POS_6 0x20 #define DA9052_IRQ_MASK_POS_7 0x40 #define DA9052_IRQ_MASK_POS_8 0x80 static struct regmap_irq da9052_irqs[] = { [DA9052_IRQ_DCIN] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_1, }, [DA9052_IRQ_VBUS] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_2, }, [DA9052_IRQ_DCINREM] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_3, }, [DA9052_IRQ_VBUSREM] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_4, }, [DA9052_IRQ_VDDLOW] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_5, }, [DA9052_IRQ_ALARM] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_6, }, [DA9052_IRQ_SEQRDY] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_7, }, [DA9052_IRQ_COMP1V2] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_8, }, [DA9052_IRQ_NONKEY] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_1, }, [DA9052_IRQ_IDFLOAT] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_2, }, [DA9052_IRQ_IDGND] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_3, }, [DA9052_IRQ_CHGEND] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_4, }, [DA9052_IRQ_TBAT] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_5, }, [DA9052_IRQ_ADC_EOM] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_6, }, [DA9052_IRQ_PENDOWN] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_7, }, [DA9052_IRQ_TSIREADY] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_8, }, [DA9052_IRQ_GPI0] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_1, }, [DA9052_IRQ_GPI1] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_2, }, [DA9052_IRQ_GPI2] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_3, }, [DA9052_IRQ_GPI3] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_4, }, [DA9052_IRQ_GPI4] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_5, }, [DA9052_IRQ_GPI5] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_6, }, [DA9052_IRQ_GPI6] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_7, }, [DA9052_IRQ_GPI7] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_8, }, [DA9052_IRQ_GPI8] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_1, }, [DA9052_IRQ_GPI9] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_2, }, [DA9052_IRQ_GPI10] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_3, }, [DA9052_IRQ_GPI11] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_4, }, [DA9052_IRQ_GPI12] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_5, }, [DA9052_IRQ_GPI13] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_6, }, [DA9052_IRQ_GPI14] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_7, }, [DA9052_IRQ_GPI15] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_8, }, }; static struct regmap_irq_chip da9052_regmap_irq_chip = { .name = "da9052_irq", .status_base = DA9052_EVENT_A_REG, .mask_base = DA9052_IRQ_MASK_A_REG, .ack_base = DA9052_EVENT_A_REG, .num_regs = DA9052_NUM_IRQ_REGS, .irqs = da9052_irqs, .num_irqs = ARRAY_SIZE(da9052_irqs), }; static int da9052_map_irq(struct da9052 *da9052, int irq) { return regmap_irq_get_virq(da9052->irq_data, irq); } int da9052_enable_irq(struct da9052 *da9052, int irq) { irq = da9052_map_irq(da9052, irq); if (irq < 0) return irq; enable_irq(irq); return 0; } EXPORT_SYMBOL_GPL(da9052_enable_irq); int da9052_disable_irq(struct da9052 *da9052, int irq) { irq = da9052_map_irq(da9052, irq); if (irq < 0) return irq; disable_irq(irq); return 0; } EXPORT_SYMBOL_GPL(da9052_disable_irq); int da9052_disable_irq_nosync(struct da9052 *da9052, int irq) { irq = da9052_map_irq(da9052, irq); if (irq < 0) return irq; disable_irq_nosync(irq); return 0; } EXPORT_SYMBOL_GPL(da9052_disable_irq_nosync); int da9052_request_irq(struct da9052 *da9052, int irq, char *name, irq_handler_t handler, void *data) { irq = da9052_map_irq(da9052, irq); if (irq < 0) return irq; return request_threaded_irq(irq, NULL, handler, IRQF_TRIGGER_LOW | IRQF_ONESHOT, name, data); } EXPORT_SYMBOL_GPL(da9052_request_irq); void da9052_free_irq(struct da9052 *da9052, int irq, void *data) { irq = da9052_map_irq(da9052, irq); if (irq < 0) return; free_irq(irq, data); } EXPORT_SYMBOL_GPL(da9052_free_irq); static irqreturn_t da9052_auxadc_irq(int irq, void *irq_data) { struct da9052 *da9052 = irq_data; complete(&da9052->done); return IRQ_HANDLED; } int da9052_irq_init(struct da9052 *da9052) { int ret; ret = regmap_add_irq_chip(da9052->regmap, da9052->chip_irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT, -1, &da9052_regmap_irq_chip, &da9052->irq_data); if (ret < 0) { dev_err(da9052->dev, "regmap_add_irq_chip failed: %d\n", ret); goto regmap_err; } ret = da9052_request_irq(da9052, DA9052_IRQ_ADC_EOM, "adc-irq", da9052_auxadc_irq, da9052); if (ret != 0) { dev_err(da9052->dev, "DA9052_IRQ_ADC_EOM failed: %d\n", ret); goto request_irq_err; } return 0; request_irq_err: regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data); regmap_err: return ret; } int da9052_irq_exit(struct da9052 *da9052) { da9052_free_irq(da9052, DA9052_IRQ_ADC_EOM , da9052); regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data); return 0; }
gpl-2.0
kogone/AK-Angler
drivers/mfd/da9052-irq.c
3460
6138
/* * DA9052 interrupt support * * Author: Fabio Estevam <fabio.estevam@freescale.com> * Based on arizona-irq.c, which is: * * Copyright 2012 Wolfson Microelectronics plc * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/device.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/mfd/da9052/da9052.h> #include <linux/mfd/da9052/reg.h> #define DA9052_NUM_IRQ_REGS 4 #define DA9052_IRQ_MASK_POS_1 0x01 #define DA9052_IRQ_MASK_POS_2 0x02 #define DA9052_IRQ_MASK_POS_3 0x04 #define DA9052_IRQ_MASK_POS_4 0x08 #define DA9052_IRQ_MASK_POS_5 0x10 #define DA9052_IRQ_MASK_POS_6 0x20 #define DA9052_IRQ_MASK_POS_7 0x40 #define DA9052_IRQ_MASK_POS_8 0x80 static struct regmap_irq da9052_irqs[] = { [DA9052_IRQ_DCIN] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_1, }, [DA9052_IRQ_VBUS] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_2, }, [DA9052_IRQ_DCINREM] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_3, }, [DA9052_IRQ_VBUSREM] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_4, }, [DA9052_IRQ_VDDLOW] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_5, }, [DA9052_IRQ_ALARM] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_6, }, [DA9052_IRQ_SEQRDY] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_7, }, [DA9052_IRQ_COMP1V2] = { .reg_offset = 0, .mask = DA9052_IRQ_MASK_POS_8, }, [DA9052_IRQ_NONKEY] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_1, }, [DA9052_IRQ_IDFLOAT] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_2, }, [DA9052_IRQ_IDGND] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_3, }, [DA9052_IRQ_CHGEND] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_4, }, [DA9052_IRQ_TBAT] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_5, }, [DA9052_IRQ_ADC_EOM] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_6, }, [DA9052_IRQ_PENDOWN] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_7, }, [DA9052_IRQ_TSIREADY] = { .reg_offset = 1, .mask = DA9052_IRQ_MASK_POS_8, }, [DA9052_IRQ_GPI0] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_1, }, [DA9052_IRQ_GPI1] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_2, }, [DA9052_IRQ_GPI2] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_3, }, [DA9052_IRQ_GPI3] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_4, }, [DA9052_IRQ_GPI4] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_5, }, [DA9052_IRQ_GPI5] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_6, }, [DA9052_IRQ_GPI6] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_7, }, [DA9052_IRQ_GPI7] = { .reg_offset = 2, .mask = DA9052_IRQ_MASK_POS_8, }, [DA9052_IRQ_GPI8] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_1, }, [DA9052_IRQ_GPI9] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_2, }, [DA9052_IRQ_GPI10] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_3, }, [DA9052_IRQ_GPI11] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_4, }, [DA9052_IRQ_GPI12] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_5, }, [DA9052_IRQ_GPI13] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_6, }, [DA9052_IRQ_GPI14] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_7, }, [DA9052_IRQ_GPI15] = { .reg_offset = 3, .mask = DA9052_IRQ_MASK_POS_8, }, }; static struct regmap_irq_chip da9052_regmap_irq_chip = { .name = "da9052_irq", .status_base = DA9052_EVENT_A_REG, .mask_base = DA9052_IRQ_MASK_A_REG, .ack_base = DA9052_EVENT_A_REG, .num_regs = DA9052_NUM_IRQ_REGS, .irqs = da9052_irqs, .num_irqs = ARRAY_SIZE(da9052_irqs), }; static int da9052_map_irq(struct da9052 *da9052, int irq) { return regmap_irq_get_virq(da9052->irq_data, irq); } int da9052_enable_irq(struct da9052 *da9052, int irq) { irq = da9052_map_irq(da9052, irq); if (irq < 0) return irq; enable_irq(irq); return 0; } EXPORT_SYMBOL_GPL(da9052_enable_irq); int da9052_disable_irq(struct da9052 *da9052, int irq) { irq = da9052_map_irq(da9052, irq); if (irq < 0) return irq; disable_irq(irq); return 0; } EXPORT_SYMBOL_GPL(da9052_disable_irq); int da9052_disable_irq_nosync(struct da9052 *da9052, int irq) { irq = da9052_map_irq(da9052, irq); if (irq < 0) return irq; disable_irq_nosync(irq); return 0; } EXPORT_SYMBOL_GPL(da9052_disable_irq_nosync); int da9052_request_irq(struct da9052 *da9052, int irq, char *name, irq_handler_t handler, void *data) { irq = da9052_map_irq(da9052, irq); if (irq < 0) return irq; return request_threaded_irq(irq, NULL, handler, IRQF_TRIGGER_LOW | IRQF_ONESHOT, name, data); } EXPORT_SYMBOL_GPL(da9052_request_irq); void da9052_free_irq(struct da9052 *da9052, int irq, void *data) { irq = da9052_map_irq(da9052, irq); if (irq < 0) return; free_irq(irq, data); } EXPORT_SYMBOL_GPL(da9052_free_irq); static irqreturn_t da9052_auxadc_irq(int irq, void *irq_data) { struct da9052 *da9052 = irq_data; complete(&da9052->done); return IRQ_HANDLED; } int da9052_irq_init(struct da9052 *da9052) { int ret; ret = regmap_add_irq_chip(da9052->regmap, da9052->chip_irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT, -1, &da9052_regmap_irq_chip, &da9052->irq_data); if (ret < 0) { dev_err(da9052->dev, "regmap_add_irq_chip failed: %d\n", ret); goto regmap_err; } ret = da9052_request_irq(da9052, DA9052_IRQ_ADC_EOM, "adc-irq", da9052_auxadc_irq, da9052); if (ret != 0) { dev_err(da9052->dev, "DA9052_IRQ_ADC_EOM failed: %d\n", ret); goto request_irq_err; } return 0; request_irq_err: regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data); regmap_err: return ret; } int da9052_irq_exit(struct da9052 *da9052) { da9052_free_irq(da9052, DA9052_IRQ_ADC_EOM , da9052); regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data); return 0; }
gpl-2.0
CM11MOD/kernel_pantech_im900s
sound/isa/opti9xx/opti92x-ad1848.c
3716
29944
/* card-opti92x-ad1848.c - driver for OPTi 82c92x based soundcards. Copyright (C) 1998-2000 by Massimo Piccioni <dafastidio@libero.it> Part of this code was developed at the Italian Ministry of Air Defence, Sixth Division (oh, che pace ...), Rome. Thanks to Maria Grazia Pollarini, Salvatore Vassallo. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/err.h> #include <linux/isa.h> #include <linux/delay.h> #include <linux/pnp.h> #include <linux/module.h> #include <asm/io.h> #include <asm/dma.h> #include <sound/core.h> #include <sound/tlv.h> #include <sound/wss.h> #include <sound/mpu401.h> #include <sound/opl3.h> #ifndef OPTi93X #include <sound/opl4.h> #endif #define SNDRV_LEGACY_FIND_FREE_IRQ #define SNDRV_LEGACY_FIND_FREE_DMA #include <sound/initval.h> MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>"); MODULE_LICENSE("GPL"); #ifdef OPTi93X MODULE_DESCRIPTION("OPTi93X"); MODULE_SUPPORTED_DEVICE("{{OPTi,82C931/3}}"); #else /* OPTi93X */ #ifdef CS4231 MODULE_DESCRIPTION("OPTi92X - CS4231"); MODULE_SUPPORTED_DEVICE("{{OPTi,82C924 (CS4231)}," "{OPTi,82C925 (CS4231)}}"); #else /* CS4231 */ MODULE_DESCRIPTION("OPTi92X - AD1848"); MODULE_SUPPORTED_DEVICE("{{OPTi,82C924 (AD1848)}," "{OPTi,82C925 (AD1848)}," "{OAK,Mozart}}"); #endif /* CS4231 */ #endif /* OPTi93X */ static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */ static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */ //static bool enable = SNDRV_DEFAULT_ENABLE1; /* Enable this card */ #ifdef CONFIG_PNP static bool isapnp = true; /* Enable ISA PnP detection */ #endif static long port = SNDRV_DEFAULT_PORT1; /* 0x530,0xe80,0xf40,0x604 */ static long mpu_port = SNDRV_DEFAULT_PORT1; /* 0x300,0x310,0x320,0x330 */ static long fm_port = SNDRV_DEFAULT_PORT1; /* 0x388 */ static int irq = SNDRV_DEFAULT_IRQ1; /* 5,7,9,10,11 */ static int mpu_irq = SNDRV_DEFAULT_IRQ1; /* 5,7,9,10 */ static int dma1 = SNDRV_DEFAULT_DMA1; /* 0,1,3 */ #if defined(CS4231) || defined(OPTi93X) static int dma2 = SNDRV_DEFAULT_DMA1; /* 0,1,3 */ #endif /* CS4231 || OPTi93X */ module_param(index, int, 0444); MODULE_PARM_DESC(index, "Index value for opti9xx based soundcard."); module_param(id, charp, 0444); MODULE_PARM_DESC(id, "ID string for opti9xx based soundcard."); //module_param(enable, bool, 0444); //MODULE_PARM_DESC(enable, "Enable opti9xx soundcard."); #ifdef CONFIG_PNP module_param(isapnp, bool, 0444); MODULE_PARM_DESC(isapnp, "Enable ISA PnP detection for specified soundcard."); #endif module_param(port, long, 0444); MODULE_PARM_DESC(port, "WSS port # for opti9xx driver."); module_param(mpu_port, long, 0444); MODULE_PARM_DESC(mpu_port, "MPU-401 port # for opti9xx driver."); module_param(fm_port, long, 0444); MODULE_PARM_DESC(fm_port, "FM port # for opti9xx driver."); module_param(irq, int, 0444); MODULE_PARM_DESC(irq, "WSS irq # for opti9xx driver."); module_param(mpu_irq, int, 0444); MODULE_PARM_DESC(mpu_irq, "MPU-401 irq # for opti9xx driver."); module_param(dma1, int, 0444); MODULE_PARM_DESC(dma1, "1st dma # for opti9xx driver."); #if defined(CS4231) || defined(OPTi93X) module_param(dma2, int, 0444); MODULE_PARM_DESC(dma2, "2nd dma # for opti9xx driver."); #endif /* CS4231 || OPTi93X */ #define OPTi9XX_HW_82C928 1 #define OPTi9XX_HW_82C929 2 #define OPTi9XX_HW_82C924 3 #define OPTi9XX_HW_82C925 4 #define OPTi9XX_HW_82C930 5 #define OPTi9XX_HW_82C931 6 #define OPTi9XX_HW_82C933 7 #define OPTi9XX_HW_LAST OPTi9XX_HW_82C933 #define OPTi9XX_MC_REG(n) n #ifdef OPTi93X #define OPTi93X_STATUS 0x02 #define OPTi93X_PORT(chip, r) ((chip)->port + OPTi93X_##r) #define OPTi93X_IRQ_PLAYBACK 0x04 #define OPTi93X_IRQ_CAPTURE 0x08 #endif /* OPTi93X */ struct snd_opti9xx { unsigned short hardware; unsigned char password; char name[7]; unsigned long mc_base; struct resource *res_mc_base; unsigned long mc_base_size; #ifdef OPTi93X unsigned long mc_indir_index; unsigned long mc_indir_size; struct resource *res_mc_indir; struct snd_wss *codec; #endif /* OPTi93X */ unsigned long pwd_reg; spinlock_t lock; long wss_base; int irq; }; static int snd_opti9xx_pnp_is_probed; #ifdef CONFIG_PNP static struct pnp_card_device_id snd_opti9xx_pnpids[] = { #ifndef OPTi93X /* OPTi 82C924 */ { .id = "OPT0924", .devs = { { "OPT0000" }, { "OPT0002" }, { "OPT0005" } }, .driver_data = 0x0924 }, /* OPTi 82C925 */ { .id = "OPT0925", .devs = { { "OPT9250" }, { "OPT0002" }, { "OPT0005" } }, .driver_data = 0x0925 }, #else /* OPTi 82C931/3 */ { .id = "OPT0931", .devs = { { "OPT9310" }, { "OPT0002" } }, .driver_data = 0x0931 }, #endif /* OPTi93X */ { .id = "" } }; MODULE_DEVICE_TABLE(pnp_card, snd_opti9xx_pnpids); #endif /* CONFIG_PNP */ #ifdef OPTi93X #define DEV_NAME "opti93x" #else #define DEV_NAME "opti92x" #endif static char * snd_opti9xx_names[] = { "unknown", "82C928", "82C929", "82C924", "82C925", "82C930", "82C931", "82C933" }; static long __devinit snd_legacy_find_free_ioport(long *port_table, long size) { while (*port_table != -1) { if (request_region(*port_table, size, "ALSA test")) { release_region(*port_table, size); return *port_table; } port_table++; } return -1; } static int __devinit snd_opti9xx_init(struct snd_opti9xx *chip, unsigned short hardware) { static int opti9xx_mc_size[] = {7, 7, 10, 10, 2, 2, 2}; chip->hardware = hardware; strcpy(chip->name, snd_opti9xx_names[hardware]); spin_lock_init(&chip->lock); chip->irq = -1; #ifndef OPTi93X #ifdef CONFIG_PNP if (isapnp && chip->mc_base) /* PnP resource gives the least 10 bits */ chip->mc_base |= 0xc00; else #endif /* CONFIG_PNP */ { chip->mc_base = 0xf8c; chip->mc_base_size = opti9xx_mc_size[hardware]; } #else chip->mc_base_size = opti9xx_mc_size[hardware]; #endif switch (hardware) { #ifndef OPTi93X case OPTi9XX_HW_82C928: case OPTi9XX_HW_82C929: chip->password = (hardware == OPTi9XX_HW_82C928) ? 0xe2 : 0xe3; chip->pwd_reg = 3; break; case OPTi9XX_HW_82C924: case OPTi9XX_HW_82C925: chip->password = 0xe5; chip->pwd_reg = 3; break; #else /* OPTi93X */ case OPTi9XX_HW_82C930: case OPTi9XX_HW_82C931: case OPTi9XX_HW_82C933: chip->mc_base = (hardware == OPTi9XX_HW_82C930) ? 0xf8f : 0xf8d; if (!chip->mc_indir_index) { chip->mc_indir_index = 0xe0e; chip->mc_indir_size = 2; } chip->password = 0xe4; chip->pwd_reg = 0; break; #endif /* OPTi93X */ default: snd_printk(KERN_ERR "chip %d not supported\n", hardware); return -ENODEV; } return 0; } static unsigned char snd_opti9xx_read(struct snd_opti9xx *chip, unsigned char reg) { unsigned long flags; unsigned char retval = 0xff; spin_lock_irqsave(&chip->lock, flags); outb(chip->password, chip->mc_base + chip->pwd_reg); switch (chip->hardware) { #ifndef OPTi93X case OPTi9XX_HW_82C924: case OPTi9XX_HW_82C925: if (reg > 7) { outb(reg, chip->mc_base + 8); outb(chip->password, chip->mc_base + chip->pwd_reg); retval = inb(chip->mc_base + 9); break; } case OPTi9XX_HW_82C928: case OPTi9XX_HW_82C929: retval = inb(chip->mc_base + reg); break; #else /* OPTi93X */ case OPTi9XX_HW_82C930: case OPTi9XX_HW_82C931: case OPTi9XX_HW_82C933: outb(reg, chip->mc_indir_index); outb(chip->password, chip->mc_base + chip->pwd_reg); retval = inb(chip->mc_indir_index + 1); break; #endif /* OPTi93X */ default: snd_printk(KERN_ERR "chip %d not supported\n", chip->hardware); } spin_unlock_irqrestore(&chip->lock, flags); return retval; } static void snd_opti9xx_write(struct snd_opti9xx *chip, unsigned char reg, unsigned char value) { unsigned long flags; spin_lock_irqsave(&chip->lock, flags); outb(chip->password, chip->mc_base + chip->pwd_reg); switch (chip->hardware) { #ifndef OPTi93X case OPTi9XX_HW_82C924: case OPTi9XX_HW_82C925: if (reg > 7) { outb(reg, chip->mc_base + 8); outb(chip->password, chip->mc_base + chip->pwd_reg); outb(value, chip->mc_base + 9); break; } case OPTi9XX_HW_82C928: case OPTi9XX_HW_82C929: outb(value, chip->mc_base + reg); break; #else /* OPTi93X */ case OPTi9XX_HW_82C930: case OPTi9XX_HW_82C931: case OPTi9XX_HW_82C933: outb(reg, chip->mc_indir_index); outb(chip->password, chip->mc_base + chip->pwd_reg); outb(value, chip->mc_indir_index + 1); break; #endif /* OPTi93X */ default: snd_printk(KERN_ERR "chip %d not supported\n", chip->hardware); } spin_unlock_irqrestore(&chip->lock, flags); } #define snd_opti9xx_write_mask(chip, reg, value, mask) \ snd_opti9xx_write(chip, reg, \ (snd_opti9xx_read(chip, reg) & ~(mask)) | ((value) & (mask))) static int __devinit snd_opti9xx_configure(struct snd_opti9xx *chip, long port, int irq, int dma1, int dma2, long mpu_port, int mpu_irq) { unsigned char wss_base_bits; unsigned char irq_bits; unsigned char dma_bits; unsigned char mpu_port_bits = 0; unsigned char mpu_irq_bits; switch (chip->hardware) { #ifndef OPTi93X case OPTi9XX_HW_82C924: /* opti 929 mode (?), OPL3 clock output, audio enable */ snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(4), 0xf0, 0xfc); /* enable wave audio */ snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(6), 0x02, 0x02); case OPTi9XX_HW_82C925: /* enable WSS mode */ snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(1), 0x80, 0x80); /* OPL3 FM synthesis */ snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(2), 0x00, 0x20); /* disable Sound Blaster IRQ and DMA */ snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(3), 0xf0, 0xff); #ifdef CS4231 /* cs4231/4248 fix enabled */ snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(5), 0x02, 0x02); #else /* cs4231/4248 fix disabled */ snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(5), 0x00, 0x02); #endif /* CS4231 */ break; case OPTi9XX_HW_82C928: case OPTi9XX_HW_82C929: snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(1), 0x80, 0x80); snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(2), 0x00, 0x20); /* snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(3), 0xa2, 0xae); */ snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(4), 0x00, 0x0c); #ifdef CS4231 snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(5), 0x02, 0x02); #else snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(5), 0x00, 0x02); #endif /* CS4231 */ break; #else /* OPTi93X */ case OPTi9XX_HW_82C931: case OPTi9XX_HW_82C933: /* * The BTC 1817DW has QS1000 wavetable which is connected * to the serial digital input of the OPTI931. */ snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(21), 0x82, 0xff); /* * This bit sets OPTI931 to automaticaly select FM * or digital input signal. */ snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(26), 0x01, 0x01); case OPTi9XX_HW_82C930: /* FALL THROUGH */ snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(6), 0x02, 0x03); snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(3), 0x00, 0xff); snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(4), 0x10 | (chip->hardware == OPTi9XX_HW_82C930 ? 0x00 : 0x04), 0x34); snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(5), 0x20, 0xbf); break; #endif /* OPTi93X */ default: snd_printk(KERN_ERR "chip %d not supported\n", chip->hardware); return -EINVAL; } /* PnP resource says it decodes only 10 bits of address */ switch (port & 0x3ff) { case 0x130: chip->wss_base = 0x530; wss_base_bits = 0x00; break; case 0x204: chip->wss_base = 0x604; wss_base_bits = 0x03; break; case 0x280: chip->wss_base = 0xe80; wss_base_bits = 0x01; break; case 0x340: chip->wss_base = 0xf40; wss_base_bits = 0x02; break; default: snd_printk(KERN_WARNING "WSS port 0x%lx not valid\n", port); goto __skip_base; } snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(1), wss_base_bits << 4, 0x30); __skip_base: switch (irq) { //#ifdef OPTi93X case 5: irq_bits = 0x05; break; //#endif /* OPTi93X */ case 7: irq_bits = 0x01; break; case 9: irq_bits = 0x02; break; case 10: irq_bits = 0x03; break; case 11: irq_bits = 0x04; break; default: snd_printk(KERN_WARNING "WSS irq # %d not valid\n", irq); goto __skip_resources; } switch (dma1) { case 0: dma_bits = 0x01; break; case 1: dma_bits = 0x02; break; case 3: dma_bits = 0x03; break; default: snd_printk(KERN_WARNING "WSS dma1 # %d not valid\n", dma1); goto __skip_resources; } #if defined(CS4231) || defined(OPTi93X) if (dma1 == dma2) { snd_printk(KERN_ERR "don't want to share dmas\n"); return -EBUSY; } switch (dma2) { case 0: case 1: break; default: snd_printk(KERN_WARNING "WSS dma2 # %d not valid\n", dma2); goto __skip_resources; } dma_bits |= 0x04; #endif /* CS4231 || OPTi93X */ #ifndef OPTi93X outb(irq_bits << 3 | dma_bits, chip->wss_base); #else /* OPTi93X */ snd_opti9xx_write(chip, OPTi9XX_MC_REG(3), (irq_bits << 3 | dma_bits)); #endif /* OPTi93X */ __skip_resources: if (chip->hardware > OPTi9XX_HW_82C928) { switch (mpu_port) { case 0: case -1: break; case 0x300: mpu_port_bits = 0x03; break; case 0x310: mpu_port_bits = 0x02; break; case 0x320: mpu_port_bits = 0x01; break; case 0x330: mpu_port_bits = 0x00; break; default: snd_printk(KERN_WARNING "MPU-401 port 0x%lx not valid\n", mpu_port); goto __skip_mpu; } switch (mpu_irq) { case 5: mpu_irq_bits = 0x02; break; case 7: mpu_irq_bits = 0x03; break; case 9: mpu_irq_bits = 0x00; break; case 10: mpu_irq_bits = 0x01; break; default: snd_printk(KERN_WARNING "MPU-401 irq # %d not valid\n", mpu_irq); goto __skip_mpu; } snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(6), (mpu_port <= 0) ? 0x00 : 0x80 | mpu_port_bits << 5 | mpu_irq_bits << 3, 0xf8); } __skip_mpu: return 0; } #ifdef OPTi93X static const DECLARE_TLV_DB_SCALE(db_scale_5bit_3db_step, -9300, 300, 0); static const DECLARE_TLV_DB_SCALE(db_scale_5bit, -4650, 150, 0); static const DECLARE_TLV_DB_SCALE(db_scale_4bit_12db_max, -3300, 300, 0); static struct snd_kcontrol_new snd_opti93x_controls[] = { WSS_DOUBLE("Master Playback Switch", 0, OPTi93X_OUT_LEFT, OPTi93X_OUT_RIGHT, 7, 7, 1, 1), WSS_DOUBLE_TLV("Master Playback Volume", 0, OPTi93X_OUT_LEFT, OPTi93X_OUT_RIGHT, 1, 1, 31, 1, db_scale_5bit_3db_step), WSS_DOUBLE_TLV("PCM Playback Volume", 0, CS4231_LEFT_OUTPUT, CS4231_RIGHT_OUTPUT, 0, 0, 31, 1, db_scale_5bit), WSS_DOUBLE_TLV("FM Playback Volume", 0, CS4231_AUX2_LEFT_INPUT, CS4231_AUX2_RIGHT_INPUT, 1, 1, 15, 1, db_scale_4bit_12db_max), WSS_DOUBLE("Line Playback Switch", 0, CS4231_LEFT_LINE_IN, CS4231_RIGHT_LINE_IN, 7, 7, 1, 1), WSS_DOUBLE_TLV("Line Playback Volume", 0, CS4231_LEFT_LINE_IN, CS4231_RIGHT_LINE_IN, 0, 0, 15, 1, db_scale_4bit_12db_max), WSS_DOUBLE("Mic Playback Switch", 0, OPTi93X_MIC_LEFT_INPUT, OPTi93X_MIC_RIGHT_INPUT, 7, 7, 1, 1), WSS_DOUBLE_TLV("Mic Playback Volume", 0, OPTi93X_MIC_LEFT_INPUT, OPTi93X_MIC_RIGHT_INPUT, 1, 1, 15, 1, db_scale_4bit_12db_max), WSS_DOUBLE_TLV("CD Playback Volume", 0, CS4231_AUX1_LEFT_INPUT, CS4231_AUX1_RIGHT_INPUT, 1, 1, 15, 1, db_scale_4bit_12db_max), WSS_DOUBLE("Aux Playback Switch", 0, OPTi931_AUX_LEFT_INPUT, OPTi931_AUX_RIGHT_INPUT, 7, 7, 1, 1), WSS_DOUBLE_TLV("Aux Playback Volume", 0, OPTi931_AUX_LEFT_INPUT, OPTi931_AUX_RIGHT_INPUT, 1, 1, 15, 1, db_scale_4bit_12db_max), }; static int __devinit snd_opti93x_mixer(struct snd_wss *chip) { struct snd_card *card; unsigned int idx; struct snd_ctl_elem_id id1, id2; int err; if (snd_BUG_ON(!chip || !chip->pcm)) return -EINVAL; card = chip->card; strcpy(card->mixername, chip->pcm->name); memset(&id1, 0, sizeof(id1)); memset(&id2, 0, sizeof(id2)); id1.iface = id2.iface = SNDRV_CTL_ELEM_IFACE_MIXER; /* reassign AUX0 switch to CD */ strcpy(id1.name, "Aux Playback Switch"); strcpy(id2.name, "CD Playback Switch"); err = snd_ctl_rename_id(card, &id1, &id2); if (err < 0) { snd_printk(KERN_ERR "Cannot rename opti93x control\n"); return err; } /* reassign AUX1 switch to FM */ strcpy(id1.name, "Aux Playback Switch"); id1.index = 1; strcpy(id2.name, "FM Playback Switch"); err = snd_ctl_rename_id(card, &id1, &id2); if (err < 0) { snd_printk(KERN_ERR "Cannot rename opti93x control\n"); return err; } /* remove AUX1 volume */ strcpy(id1.name, "Aux Playback Volume"); id1.index = 1; snd_ctl_remove_id(card, &id1); /* Replace WSS volume controls with OPTi93x volume controls */ id1.index = 0; for (idx = 0; idx < ARRAY_SIZE(snd_opti93x_controls); idx++) { strcpy(id1.name, snd_opti93x_controls[idx].name); snd_ctl_remove_id(card, &id1); err = snd_ctl_add(card, snd_ctl_new1(&snd_opti93x_controls[idx], chip)); if (err < 0) return err; } return 0; } static irqreturn_t snd_opti93x_interrupt(int irq, void *dev_id) { struct snd_opti9xx *chip = dev_id; struct snd_wss *codec = chip->codec; unsigned char status; if (!codec) return IRQ_HANDLED; status = snd_opti9xx_read(chip, OPTi9XX_MC_REG(11)); if ((status & OPTi93X_IRQ_PLAYBACK) && codec->playback_substream) snd_pcm_period_elapsed(codec->playback_substream); if ((status & OPTi93X_IRQ_CAPTURE) && codec->capture_substream) { snd_wss_overrange(codec); snd_pcm_period_elapsed(codec->capture_substream); } outb(0x00, OPTi93X_PORT(codec, STATUS)); return IRQ_HANDLED; } #endif /* OPTi93X */ static int __devinit snd_opti9xx_read_check(struct snd_opti9xx *chip) { unsigned char value; #ifdef OPTi93X unsigned long flags; #endif chip->res_mc_base = request_region(chip->mc_base, chip->mc_base_size, "OPTi9xx MC"); if (chip->res_mc_base == NULL) return -EBUSY; #ifndef OPTi93X value = snd_opti9xx_read(chip, OPTi9XX_MC_REG(1)); if (value != 0xff && value != inb(chip->mc_base + OPTi9XX_MC_REG(1))) if (value == snd_opti9xx_read(chip, OPTi9XX_MC_REG(1))) return 0; #else /* OPTi93X */ chip->res_mc_indir = request_region(chip->mc_indir_index, chip->mc_indir_size, "OPTi93x MC"); if (chip->res_mc_indir == NULL) return -EBUSY; spin_lock_irqsave(&chip->lock, flags); outb(chip->password, chip->mc_base + chip->pwd_reg); outb(((chip->mc_indir_index & 0x1f0) >> 4), chip->mc_base); spin_unlock_irqrestore(&chip->lock, flags); value = snd_opti9xx_read(chip, OPTi9XX_MC_REG(7)); snd_opti9xx_write(chip, OPTi9XX_MC_REG(7), 0xff - value); if (snd_opti9xx_read(chip, OPTi9XX_MC_REG(7)) == 0xff - value) return 0; release_and_free_resource(chip->res_mc_indir); chip->res_mc_indir = NULL; #endif /* OPTi93X */ release_and_free_resource(chip->res_mc_base); chip->res_mc_base = NULL; return -ENODEV; } static int __devinit snd_card_opti9xx_detect(struct snd_card *card, struct snd_opti9xx *chip) { int i, err; #ifndef OPTi93X for (i = OPTi9XX_HW_82C928; i < OPTi9XX_HW_82C930; i++) { #else for (i = OPTi9XX_HW_82C931; i >= OPTi9XX_HW_82C930; i--) { #endif err = snd_opti9xx_init(chip, i); if (err < 0) return err; err = snd_opti9xx_read_check(chip); if (err == 0) return 1; #ifdef OPTi93X chip->mc_indir_index = 0; #endif } return -ENODEV; } #ifdef CONFIG_PNP static int __devinit snd_card_opti9xx_pnp(struct snd_opti9xx *chip, struct pnp_card_link *card, const struct pnp_card_device_id *pid) { struct pnp_dev *pdev; int err; struct pnp_dev *devmpu; #ifndef OPTi93X struct pnp_dev *devmc; #endif pdev = pnp_request_card_device(card, pid->devs[0].id, NULL); if (pdev == NULL) return -EBUSY; err = pnp_activate_dev(pdev); if (err < 0) { snd_printk(KERN_ERR "AUDIO pnp configure failure: %d\n", err); return err; } #ifdef OPTi93X port = pnp_port_start(pdev, 0) - 4; fm_port = pnp_port_start(pdev, 1) + 8; chip->mc_indir_index = pnp_port_start(pdev, 3) + 2; chip->mc_indir_size = pnp_port_len(pdev, 3) - 2; #else devmc = pnp_request_card_device(card, pid->devs[2].id, NULL); if (devmc == NULL) return -EBUSY; err = pnp_activate_dev(devmc); if (err < 0) { snd_printk(KERN_ERR "MC pnp configure failure: %d\n", err); return err; } port = pnp_port_start(pdev, 1); fm_port = pnp_port_start(pdev, 2) + 8; /* * The MC(0) is never accessed and card does not * include it in the PnP resource range. OPTI93x include it. */ chip->mc_base = pnp_port_start(devmc, 0) - 1; chip->mc_base_size = pnp_port_len(devmc, 0) + 1; #endif /* OPTi93X */ irq = pnp_irq(pdev, 0); dma1 = pnp_dma(pdev, 0); #if defined(CS4231) || defined(OPTi93X) dma2 = pnp_dma(pdev, 1); #endif /* CS4231 || OPTi93X */ devmpu = pnp_request_card_device(card, pid->devs[1].id, NULL); if (devmpu && mpu_port > 0) { err = pnp_activate_dev(devmpu); if (err < 0) { snd_printk(KERN_ERR "MPU401 pnp configure failure\n"); mpu_port = -1; } else { mpu_port = pnp_port_start(devmpu, 0); mpu_irq = pnp_irq(devmpu, 0); } } return pid->driver_data; } #endif /* CONFIG_PNP */ static void snd_card_opti9xx_free(struct snd_card *card) { struct snd_opti9xx *chip = card->private_data; if (chip) { #ifdef OPTi93X if (chip->irq > 0) { disable_irq(chip->irq); free_irq(chip->irq, chip); } release_and_free_resource(chip->res_mc_indir); #endif release_and_free_resource(chip->res_mc_base); } } static int __devinit snd_opti9xx_probe(struct snd_card *card) { static long possible_ports[] = {0x530, 0xe80, 0xf40, 0x604, -1}; int error; int xdma2; struct snd_opti9xx *chip = card->private_data; struct snd_wss *codec; #ifdef CS4231 struct snd_timer *timer; #endif struct snd_pcm *pcm; struct snd_rawmidi *rmidi; struct snd_hwdep *synth; #if defined(CS4231) || defined(OPTi93X) xdma2 = dma2; #else xdma2 = -1; #endif if (port == SNDRV_AUTO_PORT) { port = snd_legacy_find_free_ioport(possible_ports, 4); if (port < 0) { snd_printk(KERN_ERR "unable to find a free WSS port\n"); return -EBUSY; } } error = snd_opti9xx_configure(chip, port, irq, dma1, xdma2, mpu_port, mpu_irq); if (error) return error; error = snd_wss_create(card, chip->wss_base + 4, -1, irq, dma1, xdma2, #ifdef OPTi93X WSS_HW_OPTI93X, WSS_HWSHARE_IRQ, #else WSS_HW_DETECT, 0, #endif &codec); if (error < 0) return error; #ifdef OPTi93X chip->codec = codec; #endif error = snd_wss_pcm(codec, 0, &pcm); if (error < 0) return error; error = snd_wss_mixer(codec); if (error < 0) return error; #ifdef OPTi93X error = snd_opti93x_mixer(codec); if (error < 0) return error; #endif #ifdef CS4231 error = snd_wss_timer(codec, 0, &timer); if (error < 0) return error; #endif #ifdef OPTi93X error = request_irq(irq, snd_opti93x_interrupt, 0, DEV_NAME" - WSS", chip); if (error < 0) { snd_printk(KERN_ERR "opti9xx: can't grab IRQ %d\n", irq); return error; } #endif chip->irq = irq; strcpy(card->driver, chip->name); sprintf(card->shortname, "OPTi %s", card->driver); #if defined(CS4231) || defined(OPTi93X) sprintf(card->longname, "%s, %s at 0x%lx, irq %d, dma %d&%d", card->shortname, pcm->name, chip->wss_base + 4, irq, dma1, xdma2); #else sprintf(card->longname, "%s, %s at 0x%lx, irq %d, dma %d", card->shortname, pcm->name, chip->wss_base + 4, irq, dma1); #endif /* CS4231 || OPTi93X */ if (mpu_port <= 0 || mpu_port == SNDRV_AUTO_PORT) rmidi = NULL; else { error = snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401, mpu_port, 0, mpu_irq, &rmidi); if (error) snd_printk(KERN_WARNING "no MPU-401 device at 0x%lx?\n", mpu_port); } if (fm_port > 0 && fm_port != SNDRV_AUTO_PORT) { struct snd_opl3 *opl3 = NULL; #ifndef OPTi93X if (chip->hardware == OPTi9XX_HW_82C928 || chip->hardware == OPTi9XX_HW_82C929 || chip->hardware == OPTi9XX_HW_82C924) { struct snd_opl4 *opl4; /* assume we have an OPL4 */ snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(2), 0x20, 0x20); if (snd_opl4_create(card, fm_port, fm_port - 8, 2, &opl3, &opl4) < 0) { /* no luck, use OPL3 instead */ snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(2), 0x00, 0x20); } } #endif /* !OPTi93X */ if (!opl3 && snd_opl3_create(card, fm_port, fm_port + 2, OPL3_HW_AUTO, 0, &opl3) < 0) { snd_printk(KERN_WARNING "no OPL device at 0x%lx-0x%lx\n", fm_port, fm_port + 4 - 1); } if (opl3) { error = snd_opl3_hwdep_new(opl3, 0, 1, &synth); if (error < 0) return error; } } return snd_card_register(card); } static int snd_opti9xx_card_new(struct snd_card **cardp) { struct snd_card *card; int err; err = snd_card_create(index, id, THIS_MODULE, sizeof(struct snd_opti9xx), &card); if (err < 0) return err; card->private_free = snd_card_opti9xx_free; *cardp = card; return 0; } static int __devinit snd_opti9xx_isa_match(struct device *devptr, unsigned int dev) { #ifdef CONFIG_PNP if (snd_opti9xx_pnp_is_probed) return 0; if (isapnp) return 0; #endif return 1; } static int __devinit snd_opti9xx_isa_probe(struct device *devptr, unsigned int dev) { struct snd_card *card; int error; static long possible_mpu_ports[] = {0x300, 0x310, 0x320, 0x330, -1}; #ifdef OPTi93X static int possible_irqs[] = {5, 9, 10, 11, 7, -1}; #else static int possible_irqs[] = {9, 10, 11, 7, -1}; #endif /* OPTi93X */ static int possible_mpu_irqs[] = {5, 9, 10, 7, -1}; static int possible_dma1s[] = {3, 1, 0, -1}; #if defined(CS4231) || defined(OPTi93X) static int possible_dma2s[][2] = {{1,-1}, {0,-1}, {-1,-1}, {0,-1}}; #endif /* CS4231 || OPTi93X */ if (mpu_port == SNDRV_AUTO_PORT) { if ((mpu_port = snd_legacy_find_free_ioport(possible_mpu_ports, 2)) < 0) { snd_printk(KERN_ERR "unable to find a free MPU401 port\n"); return -EBUSY; } } if (irq == SNDRV_AUTO_IRQ) { if ((irq = snd_legacy_find_free_irq(possible_irqs)) < 0) { snd_printk(KERN_ERR "unable to find a free IRQ\n"); return -EBUSY; } } if (mpu_irq == SNDRV_AUTO_IRQ) { if ((mpu_irq = snd_legacy_find_free_irq(possible_mpu_irqs)) < 0) { snd_printk(KERN_ERR "unable to find a free MPU401 IRQ\n"); return -EBUSY; } } if (dma1 == SNDRV_AUTO_DMA) { if ((dma1 = snd_legacy_find_free_dma(possible_dma1s)) < 0) { snd_printk(KERN_ERR "unable to find a free DMA1\n"); return -EBUSY; } } #if defined(CS4231) || defined(OPTi93X) if (dma2 == SNDRV_AUTO_DMA) { if ((dma2 = snd_legacy_find_free_dma(possible_dma2s[dma1 % 4])) < 0) { snd_printk(KERN_ERR "unable to find a free DMA2\n"); return -EBUSY; } } #endif error = snd_opti9xx_card_new(&card); if (error < 0) return error; if ((error = snd_card_opti9xx_detect(card, card->private_data)) < 0) { snd_card_free(card); return error; } snd_card_set_dev(card, devptr); if ((error = snd_opti9xx_probe(card)) < 0) { snd_card_free(card); return error; } dev_set_drvdata(devptr, card); return 0; } static int __devexit snd_opti9xx_isa_remove(struct device *devptr, unsigned int dev) { snd_card_free(dev_get_drvdata(devptr)); dev_set_drvdata(devptr, NULL); return 0; } static struct isa_driver snd_opti9xx_driver = { .match = snd_opti9xx_isa_match, .probe = snd_opti9xx_isa_probe, .remove = __devexit_p(snd_opti9xx_isa_remove), /* FIXME: suspend/resume */ .driver = { .name = DEV_NAME }, }; #ifdef CONFIG_PNP static int __devinit snd_opti9xx_pnp_probe(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { struct snd_card *card; int error, hw; struct snd_opti9xx *chip; if (snd_opti9xx_pnp_is_probed) return -EBUSY; if (! isapnp) return -ENODEV; error = snd_opti9xx_card_new(&card); if (error < 0) return error; chip = card->private_data; hw = snd_card_opti9xx_pnp(chip, pcard, pid); switch (hw) { case 0x0924: hw = OPTi9XX_HW_82C924; break; case 0x0925: hw = OPTi9XX_HW_82C925; break; case 0x0931: hw = OPTi9XX_HW_82C931; break; default: snd_card_free(card); return -ENODEV; } if ((error = snd_opti9xx_init(chip, hw))) { snd_card_free(card); return error; } error = snd_opti9xx_read_check(chip); if (error) { snd_printk(KERN_ERR "OPTI chip not found\n"); snd_card_free(card); return error; } snd_card_set_dev(card, &pcard->card->dev); if ((error = snd_opti9xx_probe(card)) < 0) { snd_card_free(card); return error; } pnp_set_card_drvdata(pcard, card); snd_opti9xx_pnp_is_probed = 1; return 0; } static void __devexit snd_opti9xx_pnp_remove(struct pnp_card_link * pcard) { snd_card_free(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); snd_opti9xx_pnp_is_probed = 0; } static struct pnp_card_driver opti9xx_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, .name = "opti9xx", .id_table = snd_opti9xx_pnpids, .probe = snd_opti9xx_pnp_probe, .remove = __devexit_p(snd_opti9xx_pnp_remove), }; #endif #ifdef OPTi93X #define CHIP_NAME "82C93x" #else #define CHIP_NAME "82C92x" #endif static int __init alsa_card_opti9xx_init(void) { #ifdef CONFIG_PNP pnp_register_card_driver(&opti9xx_pnpc_driver); if (snd_opti9xx_pnp_is_probed) return 0; pnp_unregister_card_driver(&opti9xx_pnpc_driver); #endif return isa_register_driver(&snd_opti9xx_driver, 1); } static void __exit alsa_card_opti9xx_exit(void) { if (!snd_opti9xx_pnp_is_probed) { isa_unregister_driver(&snd_opti9xx_driver); return; } #ifdef CONFIG_PNP pnp_unregister_card_driver(&opti9xx_pnpc_driver); #endif } module_init(alsa_card_opti9xx_init) module_exit(alsa_card_opti9xx_exit)
gpl-2.0
dev-life/GT-I9300_Kernel
net/netlabel/netlabel_addrlist.c
4228
10674
/* * NetLabel Network Address Lists * * This file contains network address list functions used to manage ordered * lists of network addresses for use by the NetLabel subsystem. The NetLabel * system manages static and dynamic label mappings for network protocols such * as CIPSO and RIPSO. * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <net/ip.h> #include <net/ipv6.h> #include <linux/audit.h> #include "netlabel_addrlist.h" /* * Address List Functions */ /** * netlbl_af4list_search - Search for a matching IPv4 address entry * @addr: IPv4 address * @head: the list head * * Description: * Searches the IPv4 address list given by @head. If a matching address entry * is found it is returned, otherwise NULL is returned. The caller is * responsible for calling the rcu_read_[un]lock() functions. * */ struct netlbl_af4list *netlbl_af4list_search(__be32 addr, struct list_head *head) { struct netlbl_af4list *iter; list_for_each_entry_rcu(iter, head, list) if (iter->valid && (addr & iter->mask) == iter->addr) return iter; return NULL; } /** * netlbl_af4list_search_exact - Search for an exact IPv4 address entry * @addr: IPv4 address * @mask: IPv4 address mask * @head: the list head * * Description: * Searches the IPv4 address list given by @head. If an exact match if found * it is returned, otherwise NULL is returned. The caller is responsible for * calling the rcu_read_[un]lock() functions. * */ struct netlbl_af4list *netlbl_af4list_search_exact(__be32 addr, __be32 mask, struct list_head *head) { struct netlbl_af4list *iter; list_for_each_entry_rcu(iter, head, list) if (iter->valid && iter->addr == addr && iter->mask == mask) return iter; return NULL; } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /** * netlbl_af6list_search - Search for a matching IPv6 address entry * @addr: IPv6 address * @head: the list head * * Description: * Searches the IPv6 address list given by @head. If a matching address entry * is found it is returned, otherwise NULL is returned. The caller is * responsible for calling the rcu_read_[un]lock() functions. * */ struct netlbl_af6list *netlbl_af6list_search(const struct in6_addr *addr, struct list_head *head) { struct netlbl_af6list *iter; list_for_each_entry_rcu(iter, head, list) if (iter->valid && ipv6_masked_addr_cmp(&iter->addr, &iter->mask, addr) == 0) return iter; return NULL; } /** * netlbl_af6list_search_exact - Search for an exact IPv6 address entry * @addr: IPv6 address * @mask: IPv6 address mask * @head: the list head * * Description: * Searches the IPv6 address list given by @head. If an exact match if found * it is returned, otherwise NULL is returned. The caller is responsible for * calling the rcu_read_[un]lock() functions. * */ struct netlbl_af6list *netlbl_af6list_search_exact(const struct in6_addr *addr, const struct in6_addr *mask, struct list_head *head) { struct netlbl_af6list *iter; list_for_each_entry_rcu(iter, head, list) if (iter->valid && ipv6_addr_equal(&iter->addr, addr) && ipv6_addr_equal(&iter->mask, mask)) return iter; return NULL; } #endif /* IPv6 */ /** * netlbl_af4list_add - Add a new IPv4 address entry to a list * @entry: address entry * @head: the list head * * Description: * Add a new address entry to the list pointed to by @head. On success zero is * returned, otherwise a negative value is returned. The caller is responsible * for calling the necessary locking functions. * */ int netlbl_af4list_add(struct netlbl_af4list *entry, struct list_head *head) { struct netlbl_af4list *iter; iter = netlbl_af4list_search(entry->addr, head); if (iter != NULL && iter->addr == entry->addr && iter->mask == entry->mask) return -EEXIST; /* in order to speed up address searches through the list (the common * case) we need to keep the list in order based on the size of the * address mask such that the entry with the widest mask (smallest * numerical value) appears first in the list */ list_for_each_entry_rcu(iter, head, list) if (iter->valid && ntohl(entry->mask) > ntohl(iter->mask)) { __list_add_rcu(&entry->list, iter->list.prev, &iter->list); return 0; } list_add_tail_rcu(&entry->list, head); return 0; } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /** * netlbl_af6list_add - Add a new IPv6 address entry to a list * @entry: address entry * @head: the list head * * Description: * Add a new address entry to the list pointed to by @head. On success zero is * returned, otherwise a negative value is returned. The caller is responsible * for calling the necessary locking functions. * */ int netlbl_af6list_add(struct netlbl_af6list *entry, struct list_head *head) { struct netlbl_af6list *iter; iter = netlbl_af6list_search(&entry->addr, head); if (iter != NULL && ipv6_addr_equal(&iter->addr, &entry->addr) && ipv6_addr_equal(&iter->mask, &entry->mask)) return -EEXIST; /* in order to speed up address searches through the list (the common * case) we need to keep the list in order based on the size of the * address mask such that the entry with the widest mask (smallest * numerical value) appears first in the list */ list_for_each_entry_rcu(iter, head, list) if (iter->valid && ipv6_addr_cmp(&entry->mask, &iter->mask) > 0) { __list_add_rcu(&entry->list, iter->list.prev, &iter->list); return 0; } list_add_tail_rcu(&entry->list, head); return 0; } #endif /* IPv6 */ /** * netlbl_af4list_remove_entry - Remove an IPv4 address entry * @entry: address entry * * Description: * Remove the specified IP address entry. The caller is responsible for * calling the necessary locking functions. * */ void netlbl_af4list_remove_entry(struct netlbl_af4list *entry) { entry->valid = 0; list_del_rcu(&entry->list); } /** * netlbl_af4list_remove - Remove an IPv4 address entry * @addr: IP address * @mask: IP address mask * @head: the list head * * Description: * Remove an IP address entry from the list pointed to by @head. Returns the * entry on success, NULL on failure. The caller is responsible for calling * the necessary locking functions. * */ struct netlbl_af4list *netlbl_af4list_remove(__be32 addr, __be32 mask, struct list_head *head) { struct netlbl_af4list *entry; entry = netlbl_af4list_search_exact(addr, mask, head); if (entry == NULL) return NULL; netlbl_af4list_remove_entry(entry); return entry; } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /** * netlbl_af6list_remove_entry - Remove an IPv6 address entry * @entry: address entry * * Description: * Remove the specified IP address entry. The caller is responsible for * calling the necessary locking functions. * */ void netlbl_af6list_remove_entry(struct netlbl_af6list *entry) { entry->valid = 0; list_del_rcu(&entry->list); } /** * netlbl_af6list_remove - Remove an IPv6 address entry * @addr: IP address * @mask: IP address mask * @head: the list head * * Description: * Remove an IP address entry from the list pointed to by @head. Returns the * entry on success, NULL on failure. The caller is responsible for calling * the necessary locking functions. * */ struct netlbl_af6list *netlbl_af6list_remove(const struct in6_addr *addr, const struct in6_addr *mask, struct list_head *head) { struct netlbl_af6list *entry; entry = netlbl_af6list_search_exact(addr, mask, head); if (entry == NULL) return NULL; netlbl_af6list_remove_entry(entry); return entry; } #endif /* IPv6 */ /* * Audit Helper Functions */ #ifdef CONFIG_AUDIT /** * netlbl_af4list_audit_addr - Audit an IPv4 address * @audit_buf: audit buffer * @src: true if source address, false if destination * @dev: network interface * @addr: IP address * @mask: IP address mask * * Description: * Write the IPv4 address and address mask, if necessary, to @audit_buf. * */ void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf, int src, const char *dev, __be32 addr, __be32 mask) { u32 mask_val = ntohl(mask); char *dir = (src ? "src" : "dst"); if (dev != NULL) audit_log_format(audit_buf, " netif=%s", dev); audit_log_format(audit_buf, " %s=%pI4", dir, &addr); if (mask_val != 0xffffffff) { u32 mask_len = 0; while (mask_val > 0) { mask_val <<= 1; mask_len++; } audit_log_format(audit_buf, " %s_prefixlen=%d", dir, mask_len); } } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /** * netlbl_af6list_audit_addr - Audit an IPv6 address * @audit_buf: audit buffer * @src: true if source address, false if destination * @dev: network interface * @addr: IP address * @mask: IP address mask * * Description: * Write the IPv6 address and address mask, if necessary, to @audit_buf. * */ void netlbl_af6list_audit_addr(struct audit_buffer *audit_buf, int src, const char *dev, const struct in6_addr *addr, const struct in6_addr *mask) { char *dir = (src ? "src" : "dst"); if (dev != NULL) audit_log_format(audit_buf, " netif=%s", dev); audit_log_format(audit_buf, " %s=%pI6", dir, addr); if (ntohl(mask->s6_addr32[3]) != 0xffffffff) { u32 mask_len = 0; u32 mask_val; int iter = -1; while (ntohl(mask->s6_addr32[++iter]) == 0xffffffff) mask_len += 32; mask_val = ntohl(mask->s6_addr32[iter]); while (mask_val > 0) { mask_val <<= 1; mask_len++; } audit_log_format(audit_buf, " %s_prefixlen=%d", dir, mask_len); } } #endif /* IPv6 */ #endif /* CONFIG_AUDIT */
gpl-2.0
Hadramos/android_sony_xperiaz_kernel_sources
arch/x86/kernel/irqinit.c
4740
7807
#include <linux/linkage.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/timex.h> #include <linux/random.h> #include <linux/kprobes.h> #include <linux/init.h> #include <linux/kernel_stat.h> #include <linux/device.h> #include <linux/bitops.h> #include <linux/acpi.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/atomic.h> #include <asm/timer.h> #include <asm/hw_irq.h> #include <asm/pgtable.h> #include <asm/desc.h> #include <asm/apic.h> #include <asm/setup.h> #include <asm/i8259.h> #include <asm/traps.h> #include <asm/prom.h> /* * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: * (these are usually mapped to vectors 0x30-0x3f) */ /* * The IO-APIC gives us many more interrupt sources. Most of these * are unused but an SMP system is supposed to have enough memory ... * sometimes (mostly wrt. hw bugs) we get corrupted vectors all * across the spectrum, so we really want to be prepared to get all * of these. Plus, more powerful systems might have more than 64 * IO-APIC registers. * * (these are usually mapped into the 0x30-0xff vector range) */ #ifdef CONFIG_X86_32 /* * Note that on a 486, we don't want to do a SIGFPE on an irq13 * as the irq is unreliable, and exception 16 works correctly * (ie as explained in the intel literature). On a 386, you * can't use exception 16 due to bad IBM design, so we have to * rely on the less exact irq13. * * Careful.. Not only is IRQ13 unreliable, but it is also * leads to races. IBM designers who came up with it should * be shot. */ static irqreturn_t math_error_irq(int cpl, void *dev_id) { outb(0, 0xF0); if (ignore_fpu_irq || !boot_cpu_data.hard_math) return IRQ_NONE; math_error(get_irq_regs(), 0, X86_TRAP_MF); return IRQ_HANDLED; } /* * New motherboards sometimes make IRQ 13 be a PCI interrupt, * so allow interrupt sharing. */ static struct irqaction fpu_irq = { .handler = math_error_irq, .name = "fpu", .flags = IRQF_NO_THREAD, }; #endif /* * IRQ2 is cascade interrupt to second interrupt controller */ static struct irqaction irq2 = { .handler = no_action, .name = "cascade", .flags = IRQF_NO_THREAD, }; DEFINE_PER_CPU(vector_irq_t, vector_irq) = { [0 ... NR_VECTORS - 1] = -1, }; int vector_used_by_percpu_irq(unsigned int vector) { int cpu; for_each_online_cpu(cpu) { if (per_cpu(vector_irq, cpu)[vector] != -1) return 1; } return 0; } void __init init_ISA_irqs(void) { struct irq_chip *chip = legacy_pic->chip; const char *name = chip->name; int i; #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) init_bsp_APIC(); #endif legacy_pic->init(0); for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) irq_set_chip_and_handler_name(i, chip, handle_level_irq, name); } void __init init_IRQ(void) { int i; /* * We probably need a better place for this, but it works for * now ... */ x86_add_irq_domains(); /* * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15. * If these IRQ's are handled by legacy interrupt-controllers like PIC, * then this configuration will likely be static after the boot. If * these IRQ's are handled by more mordern controllers like IO-APIC, * then this vector space can be freed and re-used dynamically as the * irq's migrate etc. */ for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i; x86_init.irqs.intr_init(); } /* * Setup the vector to irq mappings. */ void setup_vector_irq(int cpu) { #ifndef CONFIG_X86_IO_APIC int irq; /* * On most of the platforms, legacy PIC delivers the interrupts on the * boot cpu. But there are certain platforms where PIC interrupts are * delivered to multiple cpu's. If the legacy IRQ is handled by the * legacy PIC, for the new cpu that is coming online, setup the static * legacy vector to irq mapping: */ for (irq = 0; irq < legacy_pic->nr_legacy_irqs; irq++) per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq; #endif __setup_vector_irq(cpu); } static void __init smp_intr_init(void) { #ifdef CONFIG_SMP #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) /* * The reschedule interrupt is a CPU-to-CPU reschedule-helper * IPI, driven by wakeup. */ alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); /* IPIs for invalidation */ #define ALLOC_INVTLB_VEC(NR) \ alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+NR, \ invalidate_interrupt##NR) switch (NUM_INVALIDATE_TLB_VECTORS) { default: ALLOC_INVTLB_VEC(31); case 31: ALLOC_INVTLB_VEC(30); case 30: ALLOC_INVTLB_VEC(29); case 29: ALLOC_INVTLB_VEC(28); case 28: ALLOC_INVTLB_VEC(27); case 27: ALLOC_INVTLB_VEC(26); case 26: ALLOC_INVTLB_VEC(25); case 25: ALLOC_INVTLB_VEC(24); case 24: ALLOC_INVTLB_VEC(23); case 23: ALLOC_INVTLB_VEC(22); case 22: ALLOC_INVTLB_VEC(21); case 21: ALLOC_INVTLB_VEC(20); case 20: ALLOC_INVTLB_VEC(19); case 19: ALLOC_INVTLB_VEC(18); case 18: ALLOC_INVTLB_VEC(17); case 17: ALLOC_INVTLB_VEC(16); case 16: ALLOC_INVTLB_VEC(15); case 15: ALLOC_INVTLB_VEC(14); case 14: ALLOC_INVTLB_VEC(13); case 13: ALLOC_INVTLB_VEC(12); case 12: ALLOC_INVTLB_VEC(11); case 11: ALLOC_INVTLB_VEC(10); case 10: ALLOC_INVTLB_VEC(9); case 9: ALLOC_INVTLB_VEC(8); case 8: ALLOC_INVTLB_VEC(7); case 7: ALLOC_INVTLB_VEC(6); case 6: ALLOC_INVTLB_VEC(5); case 5: ALLOC_INVTLB_VEC(4); case 4: ALLOC_INVTLB_VEC(3); case 3: ALLOC_INVTLB_VEC(2); case 2: ALLOC_INVTLB_VEC(1); case 1: ALLOC_INVTLB_VEC(0); break; } /* IPI for generic function call */ alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); /* IPI for generic single function call */ alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt); /* Low priority IPI to cleanup after moving an irq */ set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); /* IPI used for rebooting/stopping */ alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt); #endif #endif /* CONFIG_SMP */ } static void __init apic_intr_init(void) { smp_intr_init(); #ifdef CONFIG_X86_THERMAL_VECTOR alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); #endif #ifdef CONFIG_X86_MCE_THRESHOLD alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); #endif #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) /* self generated IPI for local APIC timer */ alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); /* IPI for X86 platform specific use */ alloc_intr_gate(X86_PLATFORM_IPI_VECTOR, x86_platform_ipi); /* IPI vectors for APIC spurious and error interrupts */ alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); /* IRQ work interrupts: */ # ifdef CONFIG_IRQ_WORK alloc_intr_gate(IRQ_WORK_VECTOR, irq_work_interrupt); # endif #endif } void __init native_init_IRQ(void) { int i; /* Execute any quirks before the call gates are initialised: */ x86_init.irqs.pre_vector_init(); apic_intr_init(); /* * Cover the whole vector space, no vector can escape * us. (some of these will be overridden and become * 'special' SMP interrupts) */ i = FIRST_EXTERNAL_VECTOR; for_each_clear_bit_from(i, used_vectors, NR_VECTORS) { /* IA32_SYSCALL_VECTOR could be used in trap_init already. */ set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]); } if (!acpi_ioapic && !of_ioapic) setup_irq(2, &irq2); #ifdef CONFIG_X86_32 /* * External FPU? Set up irq13 if so, for * original braindamaged IBM FERR coupling. */ if (boot_cpu_data.hard_math && !cpu_has_fpu) setup_irq(FPU_IRQ, &fpu_irq); irq_ctx_init(smp_processor_id()); #endif }
gpl-2.0
slukk/mako_msm
drivers/staging/iio/iio_dummy_evgen.c
4996
6223
/** * Copyright (c) 2011 Jonathan Cameron * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * Companion module to the iio simple dummy example driver. * The purpose of this is to generate 'fake' event interrupts thus * allowing that driver's code to be as close as possible to that of * a normal driver talking to hardware. The approach used here * is not intended to be general and just happens to work for this * particular use case. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/mutex.h> #include <linux/module.h> #include <linux/sysfs.h> #include "iio_dummy_evgen.h" #include "iio.h" #include "sysfs.h" /* Fiddly bit of faking and irq without hardware */ #define IIO_EVENTGEN_NO 10 /** * struct iio_dummy_evgen - evgen state * @chip: irq chip we are faking * @base: base of irq range * @enabled: mask of which irqs are enabled * @inuse: mask of which irqs are connected * @lock: protect the evgen state */ struct iio_dummy_eventgen { struct irq_chip chip; int base; bool enabled[IIO_EVENTGEN_NO]; bool inuse[IIO_EVENTGEN_NO]; struct mutex lock; }; /* We can only ever have one instance of this 'device' */ static struct iio_dummy_eventgen *iio_evgen; static const char *iio_evgen_name = "iio_dummy_evgen"; static void iio_dummy_event_irqmask(struct irq_data *d) { struct irq_chip *chip = irq_data_get_irq_chip(d); struct iio_dummy_eventgen *evgen = container_of(chip, struct iio_dummy_eventgen, chip); evgen->enabled[d->irq - evgen->base] = false; } static void iio_dummy_event_irqunmask(struct irq_data *d) { struct irq_chip *chip = irq_data_get_irq_chip(d); struct iio_dummy_eventgen *evgen = container_of(chip, struct iio_dummy_eventgen, chip); evgen->enabled[d->irq - evgen->base] = true; } static int iio_dummy_evgen_create(void) { int ret, i; iio_evgen = kzalloc(sizeof(*iio_evgen), GFP_KERNEL); if (iio_evgen == NULL) return -ENOMEM; iio_evgen->base = irq_alloc_descs(-1, 0, IIO_EVENTGEN_NO, 0); if (iio_evgen->base < 0) { ret = iio_evgen->base; kfree(iio_evgen); return ret; } iio_evgen->chip.name = iio_evgen_name; iio_evgen->chip.irq_mask = &iio_dummy_event_irqmask; iio_evgen->chip.irq_unmask = &iio_dummy_event_irqunmask; for (i = 0; i < IIO_EVENTGEN_NO; i++) { irq_set_chip(iio_evgen->base + i, &iio_evgen->chip); irq_set_handler(iio_evgen->base + i, &handle_simple_irq); irq_modify_status(iio_evgen->base + i, IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE); } mutex_init(&iio_evgen->lock); return 0; } /** * iio_dummy_evgen_get_irq() - get an evgen provided irq for a device * * This function will give a free allocated irq to a client device. * That irq can then be caused to 'fire' by using the associated sysfs file. */ int iio_dummy_evgen_get_irq(void) { int i, ret = 0; if (iio_evgen == NULL) return -ENODEV; mutex_lock(&iio_evgen->lock); for (i = 0; i < IIO_EVENTGEN_NO; i++) if (iio_evgen->inuse[i] == false) { ret = iio_evgen->base + i; iio_evgen->inuse[i] = true; break; } mutex_unlock(&iio_evgen->lock); if (i == IIO_EVENTGEN_NO) return -ENOMEM; return ret; } EXPORT_SYMBOL_GPL(iio_dummy_evgen_get_irq); /** * iio_dummy_evgen_release_irq() - give the irq back. * @irq: irq being returned to the pool * * Used by client driver instances to give the irqs back when they disconnect */ int iio_dummy_evgen_release_irq(int irq) { mutex_lock(&iio_evgen->lock); iio_evgen->inuse[irq - iio_evgen->base] = false; mutex_unlock(&iio_evgen->lock); return 0; } EXPORT_SYMBOL_GPL(iio_dummy_evgen_release_irq); static void iio_dummy_evgen_free(void) { irq_free_descs(iio_evgen->base, IIO_EVENTGEN_NO); kfree(iio_evgen); } static void iio_evgen_release(struct device *dev) { iio_dummy_evgen_free(); } static ssize_t iio_evgen_poke(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); if (iio_evgen->enabled[this_attr->address]) handle_nested_irq(iio_evgen->base + this_attr->address); return len; } static IIO_DEVICE_ATTR(poke_ev0, S_IWUSR, NULL, &iio_evgen_poke, 0); static IIO_DEVICE_ATTR(poke_ev1, S_IWUSR, NULL, &iio_evgen_poke, 1); static IIO_DEVICE_ATTR(poke_ev2, S_IWUSR, NULL, &iio_evgen_poke, 2); static IIO_DEVICE_ATTR(poke_ev3, S_IWUSR, NULL, &iio_evgen_poke, 3); static IIO_DEVICE_ATTR(poke_ev4, S_IWUSR, NULL, &iio_evgen_poke, 4); static IIO_DEVICE_ATTR(poke_ev5, S_IWUSR, NULL, &iio_evgen_poke, 5); static IIO_DEVICE_ATTR(poke_ev6, S_IWUSR, NULL, &iio_evgen_poke, 6); static IIO_DEVICE_ATTR(poke_ev7, S_IWUSR, NULL, &iio_evgen_poke, 7); static IIO_DEVICE_ATTR(poke_ev8, S_IWUSR, NULL, &iio_evgen_poke, 8); static IIO_DEVICE_ATTR(poke_ev9, S_IWUSR, NULL, &iio_evgen_poke, 9); static struct attribute *iio_evgen_attrs[] = { &iio_dev_attr_poke_ev0.dev_attr.attr, &iio_dev_attr_poke_ev1.dev_attr.attr, &iio_dev_attr_poke_ev2.dev_attr.attr, &iio_dev_attr_poke_ev3.dev_attr.attr, &iio_dev_attr_poke_ev4.dev_attr.attr, &iio_dev_attr_poke_ev5.dev_attr.attr, &iio_dev_attr_poke_ev6.dev_attr.attr, &iio_dev_attr_poke_ev7.dev_attr.attr, &iio_dev_attr_poke_ev8.dev_attr.attr, &iio_dev_attr_poke_ev9.dev_attr.attr, NULL, }; static const struct attribute_group iio_evgen_group = { .attrs = iio_evgen_attrs, }; static const struct attribute_group *iio_evgen_groups[] = { &iio_evgen_group, NULL }; static struct device iio_evgen_dev = { .bus = &iio_bus_type, .groups = iio_evgen_groups, .release = &iio_evgen_release, }; static __init int iio_dummy_evgen_init(void) { int ret = iio_dummy_evgen_create(); if (ret < 0) return ret; device_initialize(&iio_evgen_dev); dev_set_name(&iio_evgen_dev, "iio_evgen"); return device_add(&iio_evgen_dev); } module_init(iio_dummy_evgen_init); static __exit void iio_dummy_evgen_exit(void) { device_unregister(&iio_evgen_dev); } module_exit(iio_dummy_evgen_exit); MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>"); MODULE_DESCRIPTION("IIO dummy driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
Renzo-Olivares/android_kernel_htc_m7wlv
security/integrity/ima/ima_api.c
4996
5603
/* * Copyright (C) 2008 IBM Corporation * * Author: Mimi Zohar <zohar@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. * * File: ima_api.c * Implements must_measure, collect_measurement, store_measurement, * and store_template. */ #include <linux/module.h> #include <linux/slab.h> #include "ima.h" static const char *IMA_TEMPLATE_NAME = "ima"; /* * ima_store_template - store ima template measurements * * Calculate the hash of a template entry, add the template entry * to an ordered list of measurement entries maintained inside the kernel, * and also update the aggregate integrity value (maintained inside the * configured TPM PCR) over the hashes of the current list of measurement * entries. * * Applications retrieve the current kernel-held measurement list through * the securityfs entries in /sys/kernel/security/ima. The signed aggregate * TPM PCR (called quote) can be retrieved using a TPM user space library * and is used to validate the measurement list. * * Returns 0 on success, error code otherwise */ int ima_store_template(struct ima_template_entry *entry, int violation, struct inode *inode) { const char *op = "add_template_measure"; const char *audit_cause = "hashing_error"; int result; memset(entry->digest, 0, sizeof(entry->digest)); entry->template_name = IMA_TEMPLATE_NAME; entry->template_len = sizeof(entry->template); if (!violation) { result = ima_calc_template_hash(entry->template_len, &entry->template, entry->digest); if (result < 0) { integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, entry->template_name, op, audit_cause, result, 0); return result; } } result = ima_add_template_entry(entry, violation, op, inode); return result; } /* * ima_add_violation - add violation to measurement list. * * Violations are flagged in the measurement list with zero hash values. * By extending the PCR with 0xFF's instead of with zeroes, the PCR * value is invalidated. */ void ima_add_violation(struct inode *inode, const unsigned char *filename, const char *op, const char *cause) { struct ima_template_entry *entry; int violation = 1; int result; /* can overflow, only indicator */ atomic_long_inc(&ima_htable.violations); entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { result = -ENOMEM; goto err_out; } memset(&entry->template, 0, sizeof(entry->template)); strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX); result = ima_store_template(entry, violation, inode); if (result < 0) kfree(entry); err_out: integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename, op, cause, result, 0); } /** * ima_must_measure - measure decision based on policy. * @inode: pointer to inode to measure * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE) * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP) * * The policy is defined in terms of keypairs: * subj=, obj=, type=, func=, mask=, fsmagic= * subj,obj, and type: are LSM specific. * func: FILE_CHECK | BPRM_CHECK | FILE_MMAP * mask: contains the permission mask * fsmagic: hex value * * Return 0 to measure. For matching a DONT_MEASURE policy, no policy, * or other error, return an error code. */ int ima_must_measure(struct inode *inode, int mask, int function) { int must_measure; must_measure = ima_match_policy(inode, function, mask); return must_measure ? 0 : -EACCES; } /* * ima_collect_measurement - collect file measurement * * Calculate the file hash, if it doesn't already exist, * storing the measurement and i_version in the iint. * * Must be called with iint->mutex held. * * Return 0 on success, error code otherwise */ int ima_collect_measurement(struct integrity_iint_cache *iint, struct file *file) { int result = -EEXIST; if (!(iint->flags & IMA_MEASURED)) { u64 i_version = file->f_dentry->d_inode->i_version; memset(iint->digest, 0, IMA_DIGEST_SIZE); result = ima_calc_hash(file, iint->digest); if (!result) iint->version = i_version; } return result; } /* * ima_store_measurement - store file measurement * * Create an "ima" template and then store the template by calling * ima_store_template. * * We only get here if the inode has not already been measured, * but the measurement could already exist: * - multiple copies of the same file on either the same or * different filesystems. * - the inode was previously flushed as well as the iint info, * containing the hashing info. * * Must be called with iint->mutex held. */ void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file, const unsigned char *filename) { const char *op = "add_template_measure"; const char *audit_cause = "ENOMEM"; int result = -ENOMEM; struct inode *inode = file->f_dentry->d_inode; struct ima_template_entry *entry; int violation = 0; entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename, op, audit_cause, result, 0); return; } memset(&entry->template, 0, sizeof(entry->template)); memcpy(entry->template.digest, iint->digest, IMA_DIGEST_SIZE); strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX); result = ima_store_template(entry, violation, inode); if (!result || result == -EEXIST) iint->flags |= IMA_MEASURED; if (result < 0) kfree(entry); }
gpl-2.0
g7755725/Fitsugly
drivers/ata/sata_sx4.c
5508
40430
/* * sata_sx4.c - Promise SATA * * Maintained by: Jeff Garzik <jgarzik@pobox.com> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * * Copyright 2003-2004 Red Hat, Inc. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * * libata documentation is available via 'make {ps|pdf}docs', * as Documentation/DocBook/libata.* * * Hardware documentation available under NDA. * */ /* Theory of operation ------------------- The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy engine, DIMM memory, and four ATA engines (one per SATA port). Data is copied to/from DIMM memory by the HDMA engine, before handing off to one (or more) of the ATA engines. The ATA engines operate solely on DIMM memory. The SX4 behaves like a PATA chip, with no SATA controls or knowledge whatsoever, leading to the presumption that PATA<->SATA bridges exist on SX4 boards, external to the PDC20621 chip itself. The chip is quite capable, supporting an XOR engine and linked hardware commands (permits a string to transactions to be submitted and waited-on as a single unit), and an optional microprocessor. The limiting factor is largely software. This Linux driver was written to multiplex the single HDMA engine to copy disk transactions into a fixed DIMM memory space, from where an ATA engine takes over. As a result, each WRITE looks like this: submit HDMA packet to hardware hardware copies data from system memory to DIMM hardware raises interrupt submit ATA packet to hardware hardware executes ATA WRITE command, w/ data in DIMM hardware raises interrupt and each READ looks like this: submit ATA packet to hardware hardware executes ATA READ command, w/ data in DIMM hardware raises interrupt submit HDMA packet to hardware hardware copies data from DIMM to system memory hardware raises interrupt This is a very slow, lock-step way of doing things that can certainly be improved by motivated kernel hackers. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <linux/libata.h> #include "sata_promise.h" #define DRV_NAME "sata_sx4" #define DRV_VERSION "0.12" enum { PDC_MMIO_BAR = 3, PDC_DIMM_BAR = 4, PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */ PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */ PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */ PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */ PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */ PDC_CTLSTAT = 0x60, /* IDEn control / status */ PDC_20621_SEQCTL = 0x400, PDC_20621_SEQMASK = 0x480, PDC_20621_GENERAL_CTL = 0x484, PDC_20621_PAGE_SIZE = (32 * 1024), /* chosen, not constant, values; we design our own DIMM mem map */ PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */ PDC_20621_DIMM_BASE = 0x00200000, PDC_20621_DIMM_DATA = (64 * 1024), PDC_DIMM_DATA_STEP = (256 * 1024), PDC_DIMM_WINDOW_STEP = (8 * 1024), PDC_DIMM_HOST_PRD = (6 * 1024), PDC_DIMM_HOST_PKT = (128 * 0), PDC_DIMM_HPKT_PRD = (128 * 1), PDC_DIMM_ATA_PKT = (128 * 2), PDC_DIMM_APKT_PRD = (128 * 3), PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128, PDC_PAGE_WINDOW = 0x40, PDC_PAGE_DATA = PDC_PAGE_WINDOW + (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE), PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE, PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */ PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) | (1<<23), board_20621 = 0, /* FastTrak S150 SX4 */ PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */ PDC_RESET = (1 << 11), /* HDMA/ATA reset */ PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */ PDC_MAX_HDMA = 32, PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1), PDC_DIMM0_SPD_DEV_ADDRESS = 0x50, PDC_DIMM1_SPD_DEV_ADDRESS = 0x51, PDC_I2C_CONTROL = 0x48, PDC_I2C_ADDR_DATA = 0x4C, PDC_DIMM0_CONTROL = 0x80, PDC_DIMM1_CONTROL = 0x84, PDC_SDRAM_CONTROL = 0x88, PDC_I2C_WRITE = 0, /* master -> slave */ PDC_I2C_READ = (1 << 6), /* master <- slave */ PDC_I2C_START = (1 << 7), /* start I2C proto */ PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */ PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */ PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */ PDC_DIMM_SPD_SUBADDRESS_START = 0x00, PDC_DIMM_SPD_SUBADDRESS_END = 0x7F, PDC_DIMM_SPD_ROW_NUM = 3, PDC_DIMM_SPD_COLUMN_NUM = 4, PDC_DIMM_SPD_MODULE_ROW = 5, PDC_DIMM_SPD_TYPE = 11, PDC_DIMM_SPD_FRESH_RATE = 12, PDC_DIMM_SPD_BANK_NUM = 17, PDC_DIMM_SPD_CAS_LATENCY = 18, PDC_DIMM_SPD_ATTRIBUTE = 21, PDC_DIMM_SPD_ROW_PRE_CHARGE = 27, PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28, PDC_DIMM_SPD_RAS_CAS_DELAY = 29, PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30, PDC_DIMM_SPD_SYSTEM_FREQ = 126, PDC_CTL_STATUS = 0x08, PDC_DIMM_WINDOW_CTLR = 0x0C, PDC_TIME_CONTROL = 0x3C, PDC_TIME_PERIOD = 0x40, PDC_TIME_COUNTER = 0x44, PDC_GENERAL_CTLR = 0x484, PCI_PLL_INIT = 0x8A531824, PCI_X_TCOUNT = 0xEE1E5CFF, /* PDC_TIME_CONTROL bits */ PDC_TIMER_BUZZER = (1 << 10), PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */ PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */ PDC_TIMER_ENABLE = (1 << 7), PDC_TIMER_MASK_INT = (1 << 5), PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */ PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE | PDC_TIMER_ENABLE | PDC_TIMER_MASK_INT, }; #define ECC_ERASE_BUF_SZ (128 * 1024) struct pdc_port_priv { u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512]; u8 *pkt; dma_addr_t pkt_dma; }; struct pdc_host_priv { unsigned int doing_hdma; unsigned int hdma_prod; unsigned int hdma_cons; struct { struct ata_queued_cmd *qc; unsigned int seq; unsigned long pkt_ofs; } hdma[32]; }; static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static void pdc_error_handler(struct ata_port *ap); static void pdc_freeze(struct ata_port *ap); static void pdc_thaw(struct ata_port *ap); static int pdc_port_start(struct ata_port *ap); static void pdc20621_qc_prep(struct ata_queued_cmd *qc); static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static unsigned int pdc20621_dimm_init(struct ata_host *host); static int pdc20621_detect_dimm(struct ata_host *host); static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device, u32 subaddr, u32 *pdata); static int pdc20621_prog_dimm0(struct ata_host *host); static unsigned int pdc20621_prog_dimm_global(struct ata_host *host); #ifdef ATA_VERBOSE_DEBUG static void pdc20621_get_from_dimm(struct ata_host *host, void *psource, u32 offset, u32 size); #endif static void pdc20621_put_to_dimm(struct ata_host *host, void *psource, u32 offset, u32 size); static void pdc20621_irq_clear(struct ata_port *ap); static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc); static int pdc_softreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static void pdc_post_internal_cmd(struct ata_queued_cmd *qc); static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); static struct scsi_host_template pdc_sata_sht = { ATA_BASE_SHT(DRV_NAME), .sg_tablesize = LIBATA_MAX_PRD, .dma_boundary = ATA_DMA_BOUNDARY, }; /* TODO: inherit from base port_ops after converting to new EH */ static struct ata_port_operations pdc_20621_ops = { .inherits = &ata_sff_port_ops, .check_atapi_dma = pdc_check_atapi_dma, .qc_prep = pdc20621_qc_prep, .qc_issue = pdc20621_qc_issue, .freeze = pdc_freeze, .thaw = pdc_thaw, .softreset = pdc_softreset, .error_handler = pdc_error_handler, .lost_interrupt = ATA_OP_NULL, .post_internal_cmd = pdc_post_internal_cmd, .port_start = pdc_port_start, .sff_tf_load = pdc_tf_load_mmio, .sff_exec_command = pdc_exec_command_mmio, .sff_irq_clear = pdc20621_irq_clear, }; static const struct ata_port_info pdc_port_info[] = { /* board_20621 */ { .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &pdc_20621_ops, }, }; static const struct pci_device_id pdc_sata_pci_tbl[] = { { PCI_VDEVICE(PROMISE, 0x6622), board_20621 }, { } /* terminate list */ }; static struct pci_driver pdc_sata_pci_driver = { .name = DRV_NAME, .id_table = pdc_sata_pci_tbl, .probe = pdc_sata_init_one, .remove = ata_pci_remove_one, }; static int pdc_port_start(struct ata_port *ap) { struct device *dev = ap->host->dev; struct pdc_port_priv *pp; pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) return -ENOMEM; pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); if (!pp->pkt) return -ENOMEM; ap->private_data = pp; return 0; } static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf, unsigned int portno, unsigned int total_len) { u32 addr; unsigned int dw = PDC_DIMM_APKT_PRD >> 2; __le32 *buf32 = (__le32 *) buf; /* output ATA packet S/G table */ addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA + (PDC_DIMM_DATA_STEP * portno); VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr); buf32[dw] = cpu_to_le32(addr); buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT); VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n", PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + PDC_DIMM_APKT_PRD, buf32[dw], buf32[dw + 1]); } static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf, unsigned int portno, unsigned int total_len) { u32 addr; unsigned int dw = PDC_DIMM_HPKT_PRD >> 2; __le32 *buf32 = (__le32 *) buf; /* output Host DMA packet S/G table */ addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA + (PDC_DIMM_DATA_STEP * portno); buf32[dw] = cpu_to_le32(addr); buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT); VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n", PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + PDC_DIMM_HPKT_PRD, buf32[dw], buf32[dw + 1]); } static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf, unsigned int devno, u8 *buf, unsigned int portno) { unsigned int i, dw; __le32 *buf32 = (__le32 *) buf; u8 dev_reg; unsigned int dimm_sg = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + PDC_DIMM_APKT_PRD; VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg); i = PDC_DIMM_ATA_PKT; /* * Set up ATA packet */ if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE))) buf[i++] = PDC_PKT_READ; else if (tf->protocol == ATA_PROT_NODATA) buf[i++] = PDC_PKT_NODATA; else buf[i++] = 0; buf[i++] = 0; /* reserved */ buf[i++] = portno + 1; /* seq. id */ buf[i++] = 0xff; /* delay seq. id */ /* dimm dma S/G, and next-pkt */ dw = i >> 2; if (tf->protocol == ATA_PROT_NODATA) buf32[dw] = 0; else buf32[dw] = cpu_to_le32(dimm_sg); buf32[dw + 1] = 0; i += 8; if (devno == 0) dev_reg = ATA_DEVICE_OBS; else dev_reg = ATA_DEVICE_OBS | ATA_DEV1; /* select device */ buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE; buf[i++] = dev_reg; /* device control register */ buf[i++] = (1 << 5) | PDC_REG_DEVCTL; buf[i++] = tf->ctl; return i; } static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf, unsigned int portno) { unsigned int dw; u32 tmp; __le32 *buf32 = (__le32 *) buf; unsigned int host_sg = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + PDC_DIMM_HOST_PRD; unsigned int dimm_sg = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + PDC_DIMM_HPKT_PRD; VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg); VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg); dw = PDC_DIMM_HOST_PKT >> 2; /* * Set up Host DMA packet */ if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE))) tmp = PDC_PKT_READ; else tmp = 0; tmp |= ((portno + 1 + 4) << 16); /* seq. id */ tmp |= (0xff << 24); /* delay seq. id */ buf32[dw + 0] = cpu_to_le32(tmp); buf32[dw + 1] = cpu_to_le32(host_sg); buf32[dw + 2] = cpu_to_le32(dimm_sg); buf32[dw + 3] = 0; VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n", PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + PDC_DIMM_HOST_PKT, buf32[dw + 0], buf32[dw + 1], buf32[dw + 2], buf32[dw + 3]); } static void pdc20621_dma_prep(struct ata_queued_cmd *qc) { struct scatterlist *sg; struct ata_port *ap = qc->ap; struct pdc_port_priv *pp = ap->private_data; void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; unsigned int portno = ap->port_no; unsigned int i, si, idx, total_len = 0, sgt_len; __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP)); VPRINTK("ata%u: ENTER\n", ap->print_id); /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; /* * Build S/G table */ idx = 0; for_each_sg(qc->sg, sg, qc->n_elem, si) { buf[idx++] = cpu_to_le32(sg_dma_address(sg)); buf[idx++] = cpu_to_le32(sg_dma_len(sg)); total_len += sg_dma_len(sg); } buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT); sgt_len = idx * 4; /* * Build ATA, host DMA packets */ pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len); pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno); pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len); i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno); if (qc->tf.flags & ATA_TFLAG_LBA48) i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i); else i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i); pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i); /* copy three S/G tables and two packets to DIMM MMIO window */ memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP), &pp->dimm_buf, PDC_DIMM_HEADER_SZ); memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) + PDC_DIMM_HOST_PRD, &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len); /* force host FIFO dump */ writel(0x00000001, mmio + PDC_20621_GENERAL_CTL); readl(dimm_mmio); /* MMIO PCI posting flush */ VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len); } static void pdc20621_nodata_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct pdc_port_priv *pp = ap->private_data; void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; unsigned int portno = ap->port_no; unsigned int i; VPRINTK("ata%u: ENTER\n", ap->print_id); /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno); if (qc->tf.flags & ATA_TFLAG_LBA48) i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i); else i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i); pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i); /* copy three S/G tables and two packets to DIMM MMIO window */ memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP), &pp->dimm_buf, PDC_DIMM_HEADER_SZ); /* force host FIFO dump */ writel(0x00000001, mmio + PDC_20621_GENERAL_CTL); readl(dimm_mmio); /* MMIO PCI posting flush */ VPRINTK("ata pkt buf ofs %u, mmio copied\n", i); } static void pdc20621_qc_prep(struct ata_queued_cmd *qc) { switch (qc->tf.protocol) { case ATA_PROT_DMA: pdc20621_dma_prep(qc); break; case ATA_PROT_NODATA: pdc20621_nodata_prep(qc); break; default: break; } } static void __pdc20621_push_hdma(struct ata_queued_cmd *qc, unsigned int seq, u32 pkt_ofs) { struct ata_port *ap = qc->ap; struct ata_host *host = ap->host; void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */ writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT); readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */ } static void pdc20621_push_hdma(struct ata_queued_cmd *qc, unsigned int seq, u32 pkt_ofs) { struct ata_port *ap = qc->ap; struct pdc_host_priv *pp = ap->host->private_data; unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK; if (!pp->doing_hdma) { __pdc20621_push_hdma(qc, seq, pkt_ofs); pp->doing_hdma = 1; return; } pp->hdma[idx].qc = qc; pp->hdma[idx].seq = seq; pp->hdma[idx].pkt_ofs = pkt_ofs; pp->hdma_prod++; } static void pdc20621_pop_hdma(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct pdc_host_priv *pp = ap->host->private_data; unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK; /* if nothing on queue, we're done */ if (pp->hdma_prod == pp->hdma_cons) { pp->doing_hdma = 0; return; } __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq, pp->hdma[idx].pkt_ofs); pp->hdma_cons++; } #ifdef ATA_VERBOSE_DEBUG static void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; unsigned int port_no = ap->port_no; void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP); dimm_mmio += PDC_DIMM_HOST_PKT; printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio)); printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4)); printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8)); printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12)); } #else static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { } #endif /* ATA_VERBOSE_DEBUG */ static void pdc20621_packet_start(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_host *host = ap->host; unsigned int port_no = ap->port_no; void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); u8 seq = (u8) (port_no + 1); unsigned int port_ofs; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; VPRINTK("ata%u: ENTER\n", ap->print_id); wmb(); /* flush PRD, pkt writes */ port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no); /* if writing, we (1) DMA to DIMM, then (2) do ATA command */ if (rw && qc->tf.protocol == ATA_PROT_DMA) { seq += 4; pdc20621_dump_hdma(qc); pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT); VPRINTK("queued ofs 0x%x (%u), seq %u\n", port_ofs + PDC_DIMM_HOST_PKT, port_ofs + PDC_DIMM_HOST_PKT, seq); } else { writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */ writel(port_ofs + PDC_DIMM_ATA_PKT, ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); VPRINTK("submitted ofs 0x%x (%u), seq %u\n", port_ofs + PDC_DIMM_ATA_PKT, port_ofs + PDC_DIMM_ATA_PKT, seq); } } static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc) { switch (qc->tf.protocol) { case ATA_PROT_NODATA: if (qc->tf.flags & ATA_TFLAG_POLLING) break; /*FALLTHROUGH*/ case ATA_PROT_DMA: pdc20621_packet_start(qc); return 0; case ATAPI_PROT_DMA: BUG(); break; default: break; } return ata_sff_qc_issue(qc); } static inline unsigned int pdc20621_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc, unsigned int doing_hdma, void __iomem *mmio) { unsigned int port_no = ap->port_no; unsigned int port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no); u8 status; unsigned int handled = 0; VPRINTK("ENTER\n"); if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */ (!(qc->tf.flags & ATA_TFLAG_WRITE))) { /* step two - DMA from DIMM to host */ if (doing_hdma) { VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id, readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); /* get drive status; clear intr; complete txn */ qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); ata_qc_complete(qc); pdc20621_pop_hdma(qc); } /* step one - exec ATA command */ else { u8 seq = (u8) (port_no + 1 + 4); VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id, readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); /* submit hdma pkt */ pdc20621_dump_hdma(qc); pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT); } handled = 1; } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */ /* step one - DMA from host to DIMM */ if (doing_hdma) { u8 seq = (u8) (port_no + 1); VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id, readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); /* submit ata pkt */ writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); readl(mmio + PDC_20621_SEQCTL + (seq * 4)); writel(port_ofs + PDC_DIMM_ATA_PKT, ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); } /* step two - execute ATA command */ else { VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id, readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); /* get drive status; clear intr; complete txn */ qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); ata_qc_complete(qc); pdc20621_pop_hdma(qc); } handled = 1; /* command completion, but no data xfer */ } else if (qc->tf.protocol == ATA_PROT_NODATA) { status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status); qc->err_mask |= ac_err_mask(status); ata_qc_complete(qc); handled = 1; } else { ap->stats.idle_irq++; } return handled; } static void pdc20621_irq_clear(struct ata_port *ap) { ioread8(ap->ioaddr.status_addr); } static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; struct ata_port *ap; u32 mask = 0; unsigned int i, tmp, port_no; unsigned int handled = 0; void __iomem *mmio_base; VPRINTK("ENTER\n"); if (!host || !host->iomap[PDC_MMIO_BAR]) { VPRINTK("QUICK EXIT\n"); return IRQ_NONE; } mmio_base = host->iomap[PDC_MMIO_BAR]; /* reading should also clear interrupts */ mmio_base += PDC_CHIP0_OFS; mask = readl(mmio_base + PDC_20621_SEQMASK); VPRINTK("mask == 0x%x\n", mask); if (mask == 0xffffffff) { VPRINTK("QUICK EXIT 2\n"); return IRQ_NONE; } mask &= 0xffff; /* only 16 tags possible */ if (!mask) { VPRINTK("QUICK EXIT 3\n"); return IRQ_NONE; } spin_lock(&host->lock); for (i = 1; i < 9; i++) { port_no = i - 1; if (port_no > 3) port_no -= 4; if (port_no >= host->n_ports) ap = NULL; else ap = host->ports[port_no]; tmp = mask & (1 << i); VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); if (tmp && ap) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) handled += pdc20621_host_intr(ap, qc, (i > 4), mmio_base); } } spin_unlock(&host->lock); VPRINTK("mask == 0x%x\n", mask); VPRINTK("EXIT\n"); return IRQ_RETVAL(handled); } static void pdc_freeze(struct ata_port *ap) { void __iomem *mmio = ap->ioaddr.cmd_addr; u32 tmp; /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */ tmp = readl(mmio + PDC_CTLSTAT); tmp |= PDC_MASK_INT; tmp &= ~PDC_DMA_ENABLE; writel(tmp, mmio + PDC_CTLSTAT); readl(mmio + PDC_CTLSTAT); /* flush */ } static void pdc_thaw(struct ata_port *ap) { void __iomem *mmio = ap->ioaddr.cmd_addr; u32 tmp; /* FIXME: start HDMA engine, if zero ATA engines running */ /* clear IRQ */ ioread8(ap->ioaddr.status_addr); /* turn IRQ back on */ tmp = readl(mmio + PDC_CTLSTAT); tmp &= ~PDC_MASK_INT; writel(tmp, mmio + PDC_CTLSTAT); readl(mmio + PDC_CTLSTAT); /* flush */ } static void pdc_reset_port(struct ata_port *ap) { void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT; unsigned int i; u32 tmp; /* FIXME: handle HDMA copy engine */ for (i = 11; i > 0; i--) { tmp = readl(mmio); if (tmp & PDC_RESET) break; udelay(100); tmp |= PDC_RESET; writel(tmp, mmio); } tmp &= ~PDC_RESET; writel(tmp, mmio); readl(mmio); /* flush */ } static int pdc_softreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { pdc_reset_port(link->ap); return ata_sff_softreset(link, class, deadline); } static void pdc_error_handler(struct ata_port *ap) { if (!(ap->pflags & ATA_PFLAG_FROZEN)) pdc_reset_port(ap); ata_sff_error_handler(ap); } static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; /* make DMA engine forget about the failed command */ if (qc->flags & ATA_QCFLAG_FAILED) pdc_reset_port(ap); } static int pdc_check_atapi_dma(struct ata_queued_cmd *qc) { u8 *scsicmd = qc->scsicmd->cmnd; int pio = 1; /* atapi dma off by default */ /* Whitelist commands that may use DMA. */ switch (scsicmd[0]) { case WRITE_12: case WRITE_10: case WRITE_6: case READ_12: case READ_10: case READ_6: case 0xad: /* READ_DVD_STRUCTURE */ case 0xbe: /* READ_CD */ pio = 0; } /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */ if (scsicmd[0] == WRITE_10) { unsigned int lba = (scsicmd[2] << 24) | (scsicmd[3] << 16) | (scsicmd[4] << 8) | scsicmd[5]; if (lba >= 0xFFFF4FA2) pio = 1; } return pio; } static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) { WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA); ata_sff_tf_load(ap, tf); } static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) { WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA); ata_sff_exec_command(ap, tf); } static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base) { port->cmd_addr = base; port->data_addr = base; port->feature_addr = port->error_addr = base + 0x4; port->nsect_addr = base + 0x8; port->lbal_addr = base + 0xc; port->lbam_addr = base + 0x10; port->lbah_addr = base + 0x14; port->device_addr = base + 0x18; port->command_addr = port->status_addr = base + 0x1c; port->altstatus_addr = port->ctl_addr = base + 0x38; } #ifdef ATA_VERBOSE_DEBUG static void pdc20621_get_from_dimm(struct ata_host *host, void *psource, u32 offset, u32 size) { u32 window_size; u16 idx; u8 page_mask; long dist; void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR]; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; page_mask = 0x00; window_size = 0x2000 * 4; /* 32K byte uchar size */ idx = (u16) (offset / window_size); writel(0x01, mmio + PDC_GENERAL_CTLR); readl(mmio + PDC_GENERAL_CTLR); writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); readl(mmio + PDC_DIMM_WINDOW_CTLR); offset -= (idx * window_size); idx++; dist = ((long) (window_size - (offset + size))) >= 0 ? size : (long) (window_size - offset); memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4), dist); psource += dist; size -= dist; for (; (long) size >= (long) window_size ;) { writel(0x01, mmio + PDC_GENERAL_CTLR); readl(mmio + PDC_GENERAL_CTLR); writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); readl(mmio + PDC_DIMM_WINDOW_CTLR); memcpy_fromio((char *) psource, (char *) (dimm_mmio), window_size / 4); psource += window_size; size -= window_size; idx++; } if (size) { writel(0x01, mmio + PDC_GENERAL_CTLR); readl(mmio + PDC_GENERAL_CTLR); writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); readl(mmio + PDC_DIMM_WINDOW_CTLR); memcpy_fromio((char *) psource, (char *) (dimm_mmio), size / 4); } } #endif static void pdc20621_put_to_dimm(struct ata_host *host, void *psource, u32 offset, u32 size) { u32 window_size; u16 idx; u8 page_mask; long dist; void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR]; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; page_mask = 0x00; window_size = 0x2000 * 4; /* 32K byte uchar size */ idx = (u16) (offset / window_size); writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); readl(mmio + PDC_DIMM_WINDOW_CTLR); offset -= (idx * window_size); idx++; dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size : (long) (window_size - offset); memcpy_toio(dimm_mmio + offset / 4, psource, dist); writel(0x01, mmio + PDC_GENERAL_CTLR); readl(mmio + PDC_GENERAL_CTLR); psource += dist; size -= dist; for (; (long) size >= (long) window_size ;) { writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); readl(mmio + PDC_DIMM_WINDOW_CTLR); memcpy_toio(dimm_mmio, psource, window_size / 4); writel(0x01, mmio + PDC_GENERAL_CTLR); readl(mmio + PDC_GENERAL_CTLR); psource += window_size; size -= window_size; idx++; } if (size) { writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); readl(mmio + PDC_DIMM_WINDOW_CTLR); memcpy_toio(dimm_mmio, psource, size / 4); writel(0x01, mmio + PDC_GENERAL_CTLR); readl(mmio + PDC_GENERAL_CTLR); } } static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device, u32 subaddr, u32 *pdata) { void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; u32 i2creg = 0; u32 status; u32 count = 0; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; i2creg |= device << 24; i2creg |= subaddr << 16; /* Set the device and subaddress */ writel(i2creg, mmio + PDC_I2C_ADDR_DATA); readl(mmio + PDC_I2C_ADDR_DATA); /* Write Control to perform read operation, mask int */ writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT, mmio + PDC_I2C_CONTROL); for (count = 0; count <= 1000; count ++) { status = readl(mmio + PDC_I2C_CONTROL); if (status & PDC_I2C_COMPLETE) { status = readl(mmio + PDC_I2C_ADDR_DATA); break; } else if (count == 1000) return 0; } *pdata = (status >> 8) & 0x000000ff; return 1; } static int pdc20621_detect_dimm(struct ata_host *host) { u32 data = 0; if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_SYSTEM_FREQ, &data)) { if (data == 100) return 100; } else return 0; if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) { if (data <= 0x75) return 133; } else return 0; return 0; } static int pdc20621_prog_dimm0(struct ata_host *host) { u32 spd0[50]; u32 data = 0; int size, i; u8 bdimmsize; void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; static const struct { unsigned int reg; unsigned int ofs; } pdc_i2c_read_data [] = { { PDC_DIMM_SPD_TYPE, 11 }, { PDC_DIMM_SPD_FRESH_RATE, 12 }, { PDC_DIMM_SPD_COLUMN_NUM, 4 }, { PDC_DIMM_SPD_ATTRIBUTE, 21 }, { PDC_DIMM_SPD_ROW_NUM, 3 }, { PDC_DIMM_SPD_BANK_NUM, 17 }, { PDC_DIMM_SPD_MODULE_ROW, 5 }, { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 }, { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 }, { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 }, { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 }, { PDC_DIMM_SPD_CAS_LATENCY, 18 }, }; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++) pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, pdc_i2c_read_data[i].reg, &spd0[pdc_i2c_read_data[i].ofs]); data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4); data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) | ((((spd0[27] + 9) / 10) - 1) << 8) ; data |= (((((spd0[29] > spd0[28]) ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10; data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12; if (spd0[18] & 0x08) data |= ((0x03) << 14); else if (spd0[18] & 0x04) data |= ((0x02) << 14); else if (spd0[18] & 0x01) data |= ((0x01) << 14); else data |= (0 << 14); /* Calculate the size of bDIMMSize (power of 2) and merge the DIMM size by program start/end address. */ bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3; size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */ data |= (((size / 16) - 1) << 16); data |= (0 << 23); data |= 8; writel(data, mmio + PDC_DIMM0_CONTROL); readl(mmio + PDC_DIMM0_CONTROL); return size; } static unsigned int pdc20621_prog_dimm_global(struct ata_host *host) { u32 data, spd0; int error, i; void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; /* Set To Default : DIMM Module Global Control Register (0x022259F1) DIMM Arbitration Disable (bit 20) DIMM Data/Control Output Driving Selection (bit12 - bit15) Refresh Enable (bit 17) */ data = 0x022259F1; writel(data, mmio + PDC_SDRAM_CONTROL); readl(mmio + PDC_SDRAM_CONTROL); /* Turn on for ECC */ pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE, &spd0); if (spd0 == 0x02) { data |= (0x01 << 16); writel(data, mmio + PDC_SDRAM_CONTROL); readl(mmio + PDC_SDRAM_CONTROL); printk(KERN_ERR "Local DIMM ECC Enabled\n"); } /* DIMM Initialization Select/Enable (bit 18/19) */ data &= (~(1<<18)); data |= (1<<19); writel(data, mmio + PDC_SDRAM_CONTROL); error = 1; for (i = 1; i <= 10; i++) { /* polling ~5 secs */ data = readl(mmio + PDC_SDRAM_CONTROL); if (!(data & (1<<19))) { error = 0; break; } msleep(i*100); } return error; } static unsigned int pdc20621_dimm_init(struct ata_host *host) { int speed, size, length; u32 addr, spd0, pci_status; u32 time_period = 0; u32 tcount = 0; u32 ticks = 0; u32 clock = 0; u32 fparam = 0; void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; /* Initialize PLL based upon PCI Bus Frequency */ /* Initialize Time Period Register */ writel(0xffffffff, mmio + PDC_TIME_PERIOD); time_period = readl(mmio + PDC_TIME_PERIOD); VPRINTK("Time Period Register (0x40): 0x%x\n", time_period); /* Enable timer */ writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL); readl(mmio + PDC_TIME_CONTROL); /* Wait 3 seconds */ msleep(3000); /* When timer is enabled, counter is decreased every internal clock cycle. */ tcount = readl(mmio + PDC_TIME_COUNTER); VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount); /* If SX4 is on PCI-X bus, after 3 seconds, the timer counter register should be >= (0xffffffff - 3x10^8). */ if (tcount >= PCI_X_TCOUNT) { ticks = (time_period - tcount); VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks); clock = (ticks / 300000); VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock); clock = (clock * 33); VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock); /* PLL F Param (bit 22:16) */ fparam = (1400000 / clock) - 2; VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam); /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */ pci_status = (0x8a001824 | (fparam << 16)); } else pci_status = PCI_PLL_INIT; /* Initialize PLL. */ VPRINTK("pci_status: 0x%x\n", pci_status); writel(pci_status, mmio + PDC_CTL_STATUS); readl(mmio + PDC_CTL_STATUS); /* Read SPD of DIMM by I2C interface, and program the DIMM Module Controller. */ if (!(speed = pdc20621_detect_dimm(host))) { printk(KERN_ERR "Detect Local DIMM Fail\n"); return 1; /* DIMM error */ } VPRINTK("Local DIMM Speed = %d\n", speed); /* Programming DIMM0 Module Control Register (index_CID0:80h) */ size = pdc20621_prog_dimm0(host); VPRINTK("Local DIMM Size = %dMB\n", size); /* Programming DIMM Module Global Control Register (index_CID0:88h) */ if (pdc20621_prog_dimm_global(host)) { printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n"); return 1; } #ifdef ATA_VERBOSE_DEBUG { u8 test_parttern1[40] = {0x55,0xAA,'P','r','o','m','i','s','e',' ', 'N','o','t',' ','Y','e','t',' ', 'D','e','f','i','n','e','d',' ', '1','.','1','0', '9','8','0','3','1','6','1','2',0,0}; u8 test_parttern2[40] = {0}; pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40); pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40); pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40); pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40); printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], test_parttern2[1], &(test_parttern2[2])); pdc20621_get_from_dimm(host, test_parttern2, 0x10040, 40); printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], test_parttern2[1], &(test_parttern2[2])); pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40); pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40); printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], test_parttern2[1], &(test_parttern2[2])); } #endif /* ECC initiliazation. */ pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE, &spd0); if (spd0 == 0x02) { void *buf; VPRINTK("Start ECC initialization\n"); addr = 0; length = size * 1024 * 1024; buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL); while (addr < length) { pdc20621_put_to_dimm(host, buf, addr, ECC_ERASE_BUF_SZ); addr += ECC_ERASE_BUF_SZ; } kfree(buf); VPRINTK("Finish ECC initialization\n"); } return 0; } static void pdc_20621_init(struct ata_host *host) { u32 tmp; void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; /* hard-code chip #0 */ mmio += PDC_CHIP0_OFS; /* * Select page 0x40 for our 32k DIMM window */ tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000; tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */ writel(tmp, mmio + PDC_20621_DIMM_WINDOW); /* * Reset Host DMA */ tmp = readl(mmio + PDC_HDMA_CTLSTAT); tmp |= PDC_RESET; writel(tmp, mmio + PDC_HDMA_CTLSTAT); readl(mmio + PDC_HDMA_CTLSTAT); /* flush */ udelay(10); tmp = readl(mmio + PDC_HDMA_CTLSTAT); tmp &= ~PDC_RESET; writel(tmp, mmio + PDC_HDMA_CTLSTAT); readl(mmio + PDC_HDMA_CTLSTAT); /* flush */ } static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct ata_port_info *ppi[] = { &pdc_port_info[ent->driver_data], NULL }; struct ata_host *host; struct pdc_host_priv *hpriv; int i, rc; ata_print_version_once(&pdev->dev, DRV_VERSION); /* allocate host */ host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4); hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); if (!host || !hpriv) return -ENOMEM; host->private_data = hpriv; /* acquire resources and fill host */ rc = pcim_enable_device(pdev); if (rc) return rc; rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR), DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); if (rc) return rc; host->iomap = pcim_iomap_table(pdev); for (i = 0; i < 4; i++) { struct ata_port *ap = host->ports[i]; void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS; unsigned int offset = 0x200 + i * 0x80; pdc_sata_setup_port(&ap->ioaddr, base + offset); ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio"); ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm"); ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port"); } /* configure and activate */ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); if (rc) return rc; rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); if (rc) return rc; if (pdc20621_dimm_init(host)) return -ENOMEM; pdc_20621_init(host); pci_set_master(pdev); return ata_host_activate(host, pdev->irq, pdc20621_interrupt, IRQF_SHARED, &pdc_sata_sht); } static int __init pdc_sata_init(void) { return pci_register_driver(&pdc_sata_pci_driver); } static void __exit pdc_sata_exit(void) { pci_unregister_driver(&pdc_sata_pci_driver); } MODULE_AUTHOR("Jeff Garzik"); MODULE_DESCRIPTION("Promise SATA low-level driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl); MODULE_VERSION(DRV_VERSION); module_init(pdc_sata_init); module_exit(pdc_sata_exit);
gpl-2.0
GladeRom/android_kernel_lge_g3
drivers/ata/pata_amd.c
5508
17471
/* * pata_amd.c - AMD PATA for new ATA layer * (C) 2005-2006 Red Hat Inc * * Based on pata-sil680. Errata information is taken from data sheets * and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are * claimed by sata-nv.c. * * TODO: * Variable system clock when/if it makes sense * Power management on ports * * * Documentation publicly available. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_amd" #define DRV_VERSION "0.4.1" /** * timing_setup - shared timing computation and load * @ap: ATA port being set up * @adev: drive being configured * @offset: port offset * @speed: target speed * @clock: clock multiplier (number of times 33MHz for this part) * * Perform the actual timing set up for Nvidia or AMD PATA devices. * The actual devices vary so they all call into this helper function * providing the clock multipler and offset (because AMD and Nvidia put * the ports at different locations). */ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock) { static const unsigned char amd_cyc2udma[] = { 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7 }; struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct ata_device *peer = ata_dev_pair(adev); int dn = ap->port_no * 2 + adev->devno; struct ata_timing at, apeer; int T, UT; const int amd_clock = 33333; /* KHz. */ u8 t; T = 1000000000 / amd_clock; UT = T; if (clock >= 2) UT = T / 2; if (ata_timing_compute(adev, speed, &at, T, UT) < 0) { dev_err(&pdev->dev, "unknown mode %d\n", speed); return; } if (peer) { /* This may be over conservative */ if (peer->dma_mode) { ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT); ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT); } ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT); ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT); } if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1; if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15; /* * Now do the setup work */ /* Configure the address set up timing */ pci_read_config_byte(pdev, offset + 0x0C, &t); t = (t & ~(3 << ((3 - dn) << 1))) | ((clamp_val(at.setup, 1, 4) - 1) << ((3 - dn) << 1)); pci_write_config_byte(pdev, offset + 0x0C , t); /* Configure the 8bit I/O timing */ pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)), ((clamp_val(at.act8b, 1, 16) - 1) << 4) | (clamp_val(at.rec8b, 1, 16) - 1)); /* Drive timing */ pci_write_config_byte(pdev, offset + 0x08 + (3 - dn), ((clamp_val(at.active, 1, 16) - 1) << 4) | (clamp_val(at.recover, 1, 16) - 1)); switch (clock) { case 1: t = at.udma ? (0xc0 | (clamp_val(at.udma, 2, 5) - 2)) : 0x03; break; case 2: t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 2, 10)]) : 0x03; break; case 3: t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 1, 10)]) : 0x03; break; case 4: t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 1, 15)]) : 0x03; break; default: return; } /* UDMA timing */ if (at.udma) pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t); } /** * amd_pre_reset - perform reset handling * @link: ATA link * @deadline: deadline jiffies for the operation * * Reset sequence checking enable bits to see which ports are * active. */ static int amd_pre_reset(struct ata_link *link, unsigned long deadline) { static const struct pci_bits amd_enable_bits[] = { { 0x40, 1, 0x02, 0x02 }, { 0x40, 1, 0x01, 0x01 } }; struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no])) return -ENOENT; return ata_sff_prereset(link, deadline); } /** * amd_cable_detect - report cable type * @ap: port * * AMD controller/BIOS setups record the cable type in word 0x42 */ static int amd_cable_detect(struct ata_port *ap) { static const u32 bitmask[2] = {0x03, 0x0C}; struct pci_dev *pdev = to_pci_dev(ap->host->dev); u8 ata66; pci_read_config_byte(pdev, 0x42, &ata66); if (ata66 & bitmask[ap->port_no]) return ATA_CBL_PATA80; return ATA_CBL_PATA40; } /** * amd_fifo_setup - set the PIO FIFO for ATA/ATAPI * @ap: ATA interface * @adev: ATA device * * Set the PCI fifo for this device according to the devices present * on the bus at this point in time. We need to turn the post write buffer * off for ATAPI devices as we may need to issue a word sized write to the * device as the final I/O */ static void amd_fifo_setup(struct ata_port *ap) { struct ata_device *adev; struct pci_dev *pdev = to_pci_dev(ap->host->dev); static const u8 fifobit[2] = { 0xC0, 0x30}; u8 fifo = fifobit[ap->port_no]; u8 r; ata_for_each_dev(adev, &ap->link, ENABLED) { if (adev->class == ATA_DEV_ATAPI) fifo = 0; } if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411) /* FIFO is broken */ fifo = 0; /* On the later chips the read prefetch bits become no-op bits */ pci_read_config_byte(pdev, 0x41, &r); r &= ~fifobit[ap->port_no]; r |= fifo; pci_write_config_byte(pdev, 0x41, r); } /** * amd33_set_piomode - set initial PIO mode data * @ap: ATA interface * @adev: ATA device * * Program the AMD registers for PIO mode. */ static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev) { amd_fifo_setup(ap); timing_setup(ap, adev, 0x40, adev->pio_mode, 1); } static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev) { amd_fifo_setup(ap); timing_setup(ap, adev, 0x40, adev->pio_mode, 2); } static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev) { amd_fifo_setup(ap); timing_setup(ap, adev, 0x40, adev->pio_mode, 3); } static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev) { amd_fifo_setup(ap); timing_setup(ap, adev, 0x40, adev->pio_mode, 4); } /** * amd33_set_dmamode - set initial DMA mode data * @ap: ATA interface * @adev: ATA device * * Program the MWDMA/UDMA modes for the AMD and Nvidia * chipset. */ static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev) { timing_setup(ap, adev, 0x40, adev->dma_mode, 1); } static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev) { timing_setup(ap, adev, 0x40, adev->dma_mode, 2); } static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev) { timing_setup(ap, adev, 0x40, adev->dma_mode, 3); } static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev) { timing_setup(ap, adev, 0x40, adev->dma_mode, 4); } /* Both host-side and drive-side detection results are worthless on NV * PATAs. Ignore them and just follow what BIOS configured. Both the * current configuration in PCI config reg and ACPI GTM result are * cached during driver attach and are consulted to select transfer * mode. */ static unsigned long nv_mode_filter(struct ata_device *dev, unsigned long xfer_mask) { static const unsigned int udma_mask_map[] = { ATA_UDMA2, ATA_UDMA1, ATA_UDMA0, 0, ATA_UDMA3, ATA_UDMA4, ATA_UDMA5, ATA_UDMA6 }; struct ata_port *ap = dev->link->ap; char acpi_str[32] = ""; u32 saved_udma, udma; const struct ata_acpi_gtm *gtm; unsigned long bios_limit = 0, acpi_limit = 0, limit; /* find out what BIOS configured */ udma = saved_udma = (unsigned long)ap->host->private_data; if (ap->port_no == 0) udma >>= 16; if (dev->devno == 0) udma >>= 8; if ((udma & 0xc0) == 0xc0) bios_limit = ata_pack_xfermask(0, 0, udma_mask_map[udma & 0x7]); /* consult ACPI GTM too */ gtm = ata_acpi_init_gtm(ap); if (gtm) { acpi_limit = ata_acpi_gtm_xfermask(dev, gtm); snprintf(acpi_str, sizeof(acpi_str), " (%u:%u:0x%x)", gtm->drive[0].dma, gtm->drive[1].dma, gtm->flags); } /* be optimistic, EH can take care of things if something goes wrong */ limit = bios_limit | acpi_limit; /* If PIO or DMA isn't configured at all, don't limit. Let EH * handle it. */ if (!(limit & ATA_MASK_PIO)) limit |= ATA_MASK_PIO; if (!(limit & (ATA_MASK_MWDMA | ATA_MASK_UDMA))) limit |= ATA_MASK_MWDMA | ATA_MASK_UDMA; /* PIO4, MWDMA2, UDMA2 should always be supported regardless of cable detection result */ limit |= ata_pack_xfermask(ATA_PIO4, ATA_MWDMA2, ATA_UDMA2); ata_port_dbg(ap, "nv_mode_filter: 0x%lx&0x%lx->0x%lx, " "BIOS=0x%lx (0x%x) ACPI=0x%lx%s\n", xfer_mask, limit, xfer_mask & limit, bios_limit, saved_udma, acpi_limit, acpi_str); return xfer_mask & limit; } /** * nv_probe_init - cable detection * @lin: ATA link * * Perform cable detection. The BIOS stores this in PCI config * space for us. */ static int nv_pre_reset(struct ata_link *link, unsigned long deadline) { static const struct pci_bits nv_enable_bits[] = { { 0x50, 1, 0x02, 0x02 }, { 0x50, 1, 0x01, 0x01 } }; struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no])) return -ENOENT; return ata_sff_prereset(link, deadline); } /** * nv100_set_piomode - set initial PIO mode data * @ap: ATA interface * @adev: ATA device * * Program the AMD registers for PIO mode. */ static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev) { timing_setup(ap, adev, 0x50, adev->pio_mode, 3); } static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev) { timing_setup(ap, adev, 0x50, adev->pio_mode, 4); } /** * nv100_set_dmamode - set initial DMA mode data * @ap: ATA interface * @adev: ATA device * * Program the MWDMA/UDMA modes for the AMD and Nvidia * chipset. */ static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev) { timing_setup(ap, adev, 0x50, adev->dma_mode, 3); } static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev) { timing_setup(ap, adev, 0x50, adev->dma_mode, 4); } static void nv_host_stop(struct ata_host *host) { u32 udma = (unsigned long)host->private_data; /* restore PCI config register 0x60 */ pci_write_config_dword(to_pci_dev(host->dev), 0x60, udma); } static struct scsi_host_template amd_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static const struct ata_port_operations amd_base_port_ops = { .inherits = &ata_bmdma32_port_ops, .prereset = amd_pre_reset, }; static struct ata_port_operations amd33_port_ops = { .inherits = &amd_base_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = amd33_set_piomode, .set_dmamode = amd33_set_dmamode, }; static struct ata_port_operations amd66_port_ops = { .inherits = &amd_base_port_ops, .cable_detect = ata_cable_unknown, .set_piomode = amd66_set_piomode, .set_dmamode = amd66_set_dmamode, }; static struct ata_port_operations amd100_port_ops = { .inherits = &amd_base_port_ops, .cable_detect = ata_cable_unknown, .set_piomode = amd100_set_piomode, .set_dmamode = amd100_set_dmamode, }; static struct ata_port_operations amd133_port_ops = { .inherits = &amd_base_port_ops, .cable_detect = amd_cable_detect, .set_piomode = amd133_set_piomode, .set_dmamode = amd133_set_dmamode, }; static const struct ata_port_operations nv_base_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_ignore, .mode_filter = nv_mode_filter, .prereset = nv_pre_reset, .host_stop = nv_host_stop, }; static struct ata_port_operations nv100_port_ops = { .inherits = &nv_base_port_ops, .set_piomode = nv100_set_piomode, .set_dmamode = nv100_set_dmamode, }; static struct ata_port_operations nv133_port_ops = { .inherits = &nv_base_port_ops, .set_piomode = nv133_set_piomode, .set_dmamode = nv133_set_dmamode, }; static void amd_clear_fifo(struct pci_dev *pdev) { u8 fifo; /* Disable the FIFO, the FIFO logic will re-enable it as appropriate */ pci_read_config_byte(pdev, 0x41, &fifo); fifo &= 0x0F; pci_write_config_byte(pdev, 0x41, fifo); } static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { static const struct ata_port_info info[10] = { { /* 0: AMD 7401 - no swdma */ .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, .port_ops = &amd33_port_ops }, { /* 1: Early AMD7409 - no swdma */ .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, .port_ops = &amd66_port_ops }, { /* 2: AMD 7409 */ .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, .port_ops = &amd66_port_ops }, { /* 3: AMD 7411 */ .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &amd100_port_ops }, { /* 4: AMD 7441 */ .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &amd100_port_ops }, { /* 5: AMD 8111 - no swdma */ .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &amd133_port_ops }, { /* 6: AMD 8111 UDMA 100 (Serenade) - no swdma */ .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &amd133_port_ops }, { /* 7: Nvidia Nforce */ .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &nv100_port_ops }, { /* 8: Nvidia Nforce2 and later - no swdma */ .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &nv133_port_ops }, { /* 9: AMD CS5536 (Geode companion) */ .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &amd100_port_ops } }; const struct ata_port_info *ppi[] = { NULL, NULL }; int type = id->driver_data; void *hpriv = NULL; u8 fifo; int rc; ata_print_version_once(&pdev->dev, DRV_VERSION); rc = pcim_enable_device(pdev); if (rc) return rc; pci_read_config_byte(pdev, 0x41, &fifo); /* Check for AMD7409 without swdma errata and if found adjust type */ if (type == 1 && pdev->revision > 0x7) type = 2; /* Serenade ? */ if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD && pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE) type = 6; /* UDMA 100 only */ /* * Okay, type is determined now. Apply type-specific workarounds. */ ppi[0] = &info[type]; if (type < 3) ata_pci_bmdma_clear_simplex(pdev); if (pdev->vendor == PCI_VENDOR_ID_AMD) amd_clear_fifo(pdev); /* Cable detection on Nvidia chips doesn't work too well, * cache BIOS programmed UDMA mode. */ if (type == 7 || type == 8) { u32 udma; pci_read_config_dword(pdev, 0x60, &udma); hpriv = (void *)(unsigned long)udma; } /* And fire it up */ return ata_pci_bmdma_init_one(pdev, ppi, &amd_sht, hpriv, 0); } #ifdef CONFIG_PM static int amd_reinit_one(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; if (pdev->vendor == PCI_VENDOR_ID_AMD) { amd_clear_fifo(pdev); if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 || pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401) ata_pci_bmdma_clear_simplex(pdev); } ata_host_resume(host); return 0; } #endif static const struct pci_device_id amd[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7411), 3 }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_OPUS_7441), 4 }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_8111_IDE), 5 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE), 7 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE), 8 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE), 8 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE), 8 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE), 8 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE), 8 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE), 8 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE), 8 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE), 8 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 8 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 8 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 8 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 }, { }, }; static struct pci_driver amd_pci_driver = { .name = DRV_NAME, .id_table = amd, .probe = amd_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = amd_reinit_one, #endif }; static int __init amd_init(void) { return pci_register_driver(&amd_pci_driver); } static void __exit amd_exit(void) { pci_unregister_driver(&amd_pci_driver); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for AMD and Nvidia PATA IDE"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, amd); MODULE_VERSION(DRV_VERSION); module_init(amd_init); module_exit(amd_exit);
gpl-2.0
u-ra/android_kernel_htc_villec2
drivers/staging/comedi/drivers/addi-data/APCI1710_Pwm.c
8324
110860
/** @verbatim Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module. ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier Tel: +19(0)7223/9493-0 Fax: +49(0)7223/9493-92 http://www.addi-data.com info@addi-data.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA You should also find the complete GPL in the COPYING file accompanying this source code. @endverbatim */ /* +-----------------------------------------------------------------------+ | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier | +-----------------------------------------------------------------------+ | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com | | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com | +-----------------------------------------------------------------------+ | Project : API APCI1710 | Compiler : gcc | | Module name : PWM.C | Version : 2.96 | +-------------------------------+---------------------------------------+ | Project manager: Eric Stolz | Date : 02/12/2002 | +-----------------------------------------------------------------------+ | Description : APCI-1710 Wulse wide modulation module | | | | | +-----------------------------------------------------------------------+ | UPDATES | +-----------------------------------------------------------------------+ | Date | Author | Description of updates | +-----------------------------------------------------------------------+ | 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 | | | | available | +-----------------------------------------------------------------------+ */ /* +----------------------------------------------------------------------------+ | Included files | +----------------------------------------------------------------------------+ */ #include "APCI1710_Pwm.h" /* +----------------------------------------------------------------------------+ | Function Name :INT i_APCI1710_InsnConfigPWM(struct comedi_device *dev, struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) | +----------------------------------------------------------------------------+ | Task : Pwm Init and Get Pwm Initialisation | +----------------------------------------------------------------------------+ | Input Parameters : +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : +----------------------------------------------------------------------------+ */ int i_APCI1710_InsnConfigPWM(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned char b_ConfigType; int i_ReturnValue = 0; b_ConfigType = CR_CHAN(insn->chanspec); switch (b_ConfigType) { case APCI1710_PWM_INIT: i_ReturnValue = i_APCI1710_InitPWM(dev, (unsigned char) CR_AREF(insn->chanspec), /* b_ModulNbr */ (unsigned char) data[0], /* b_PWM */ (unsigned char) data[1], /* b_ClockSelection */ (unsigned char) data[2], /* b_TimingUnit */ (unsigned int) data[3], /* ul_LowTiming */ (unsigned int) data[4], /* ul_HighTiming */ (unsigned int *) &data[0], /* pul_RealLowTiming */ (unsigned int *) &data[1] /* pul_RealHighTiming */ ); break; case APCI1710_PWM_GETINITDATA: i_ReturnValue = i_APCI1710_GetPWMInitialisation(dev, (unsigned char) CR_AREF(insn->chanspec), /* b_ModulNbr */ (unsigned char) data[0], /* b_PWM */ (unsigned char *) &data[0], /* pb_TimingUnit */ (unsigned int *) &data[1], /* pul_LowTiming */ (unsigned int *) &data[2], /* pul_HighTiming */ (unsigned char *) &data[3], /* pb_StartLevel */ (unsigned char *) &data[4], /* pb_StopMode */ (unsigned char *) &data[5], /* pb_StopLevel */ (unsigned char *) &data[6], /* pb_ExternGate */ (unsigned char *) &data[7], /* pb_InterruptEnable */ (unsigned char *) &data[8] /* pb_Enable */ ); break; default: printk(" Config Parameter Wrong\n"); } if (i_ReturnValue >= 0) i_ReturnValue = insn->n; return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_InitPWM | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned char_ b_PWM, | | unsigned char_ b_ClockSelection, | | unsigned char_ b_TimingUnit, | | ULONG_ ul_LowTiming, | | ULONG_ ul_HighTiming, | | PULONG_ pul_RealLowTiming, | | PULONG_ pul_RealHighTiming) | +----------------------------------------------------------------------------+ | Task : Configure the selected PWM (b_PWM) from selected module| | (b_ModulNbr). The ul_LowTiming, ul_HighTiming and | | ul_TimingUnit determine the low/high timing base for | | the period. pul_RealLowTiming, pul_RealHighTiming | | return the real timing value. | | You must calling this function be for you call any | | other function witch access of the PWM. | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 | | unsigned char_ b_ModulNbr : Module number to configure| | (0 to 3) | | unsigned char_ b_PWM : Selected PWM (0 or 1). | | unsigned char_ b_ClockSelection : Selection from PCI bus | | clock | | - APCI1710_30MHZ : | | The PC have a 30 MHz | | PCI bus clock | | - APCI1710_33MHZ : | | The PC have a 33 MHz | | PCI bus clock | | - APCI1710_40MHZ | | The APCI-1710 have a | | integrated 40Mhz | | quartz. | | unsigned char_ b_TimingUnit : Base timing Unit (0 to 4) | | 0 : ns | | 1 : æs | | 2 : ms | | 3 : s | | 4 : mn | | ULONG_ ul_LowTiming : Low base timing value. | | ULONG_ ul_HighTiming : High base timing value. | +----------------------------------------------------------------------------+ | Output Parameters : PULONG_ pul_RealLowTiming : Real low base timing | | value. | | PULONG_ pul_RealHighTiming : Real high base timing | | value. | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: The module is not a PWM module | | -4: PWM selection is wrong | | -5: The selected input clock is wrong | | -6: Timing Unit selection is wrong | | -7: Low base timing selection is wrong | | -8: High base timing selection is wrong | | -9: You can not used the 40MHz clock selection with | | this board | +----------------------------------------------------------------------------+ */ int i_APCI1710_InitPWM(struct comedi_device *dev, unsigned char b_ModulNbr, unsigned char b_PWM, unsigned char b_ClockSelection, unsigned char b_TimingUnit, unsigned int ul_LowTiming, unsigned int ul_HighTiming, unsigned int *pul_RealLowTiming, unsigned int *pul_RealHighTiming) { int i_ReturnValue = 0; unsigned int ul_LowTimerValue = 0; unsigned int ul_HighTimerValue = 0; unsigned int dw_Command; double d_RealLowTiming = 0; double d_RealHighTiming = 0; /**************************/ /* Test the module number */ /**************************/ if (b_ModulNbr < 4) { /***************/ /* Test if PWM */ /***************/ if ((devpriv->s_BoardInfos. dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_PWM) { /**************************/ /* Test the PWM selection */ /**************************/ if (b_PWM <= 1) { /******************/ /* Test the clock */ /******************/ if ((b_ClockSelection == APCI1710_30MHZ) || (b_ClockSelection == APCI1710_33MHZ) || (b_ClockSelection == APCI1710_40MHZ)) { /************************/ /* Test the timing unit */ /************************/ if (b_TimingUnit <= 4) { /*********************************/ /* Test the low timing selection */ /*********************************/ if (((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 0) && (ul_LowTiming >= 266) && (ul_LowTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 1) && (ul_LowTiming >= 1) && (ul_LowTiming <= 571230650UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 2) && (ul_LowTiming >= 1) && (ul_LowTiming <= 571230UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 3) && (ul_LowTiming >= 1) && (ul_LowTiming <= 571UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 4) && (ul_LowTiming >= 1) && (ul_LowTiming <= 9UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 0) && (ul_LowTiming >= 242) && (ul_LowTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 1) && (ul_LowTiming >= 1) && (ul_LowTiming <= 519691043UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 2) && (ul_LowTiming >= 1) && (ul_LowTiming <= 519691UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 3) && (ul_LowTiming >= 1) && (ul_LowTiming <= 520UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 4) && (ul_LowTiming >= 1) && (ul_LowTiming <= 8UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 0) && (ul_LowTiming >= 200) && (ul_LowTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 1) && (ul_LowTiming >= 1) && (ul_LowTiming <= 429496729UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 2) && (ul_LowTiming >= 1) && (ul_LowTiming <= 429496UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 3) && (ul_LowTiming >= 1) && (ul_LowTiming <= 429UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 4) && (ul_LowTiming >= 1) && (ul_LowTiming <= 7UL))) { /**********************************/ /* Test the High timing selection */ /**********************************/ if (((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 266) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571230650UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571230UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 9UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 242) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 519691043UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 519691UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 520UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 8UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 200) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429496729UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429496UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 7UL))) { /**************************/ /* Test the board version */ /**************************/ if (((b_ClockSelection == APCI1710_40MHZ) && (devpriv->s_BoardInfos.b_BoardVersion > 0)) || (b_ClockSelection != APCI1710_40MHZ)) { /************************************/ /* Calculate the low division fator */ /************************************/ fpu_begin (); switch (b_TimingUnit) { /******/ /* ns */ /******/ case 0: /******************/ /* Timer 0 factor */ /******************/ ul_LowTimerValue = (unsigned int) (ul_LowTiming * (0.00025 * b_ClockSelection)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_LowTiming * (0.00025 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) { ul_LowTimerValue = ul_LowTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ *pul_RealLowTiming = (unsigned int) (ul_LowTimerValue / (0.00025 * (double)b_ClockSelection)); d_RealLowTiming = (double) ul_LowTimerValue / (0.00025 * (double) b_ClockSelection); if ((double)((double)ul_LowTimerValue / (0.00025 * (double)b_ClockSelection)) >= (double)((double)*pul_RealLowTiming + 0.5)) { *pul_RealLowTiming = *pul_RealLowTiming + 1; } ul_LowTiming = ul_LowTiming - 1; ul_LowTimerValue = ul_LowTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_LowTimerValue = (unsigned int) ( (double) (ul_LowTimerValue) * 1.007752288); } break; /******/ /* æs */ /******/ case 1: /******************/ /* Timer 0 factor */ /******************/ ul_LowTimerValue = (unsigned int) (ul_LowTiming * (0.25 * b_ClockSelection)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_LowTiming * (0.25 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) { ul_LowTimerValue = ul_LowTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ *pul_RealLowTiming = (unsigned int) (ul_LowTimerValue / (0.25 * (double)b_ClockSelection)); d_RealLowTiming = (double) ul_LowTimerValue / ( (double) 0.25 * (double) b_ClockSelection); if ((double)((double)ul_LowTimerValue / (0.25 * (double)b_ClockSelection)) >= (double)((double)*pul_RealLowTiming + 0.5)) { *pul_RealLowTiming = *pul_RealLowTiming + 1; } ul_LowTiming = ul_LowTiming - 1; ul_LowTimerValue = ul_LowTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_LowTimerValue = (unsigned int) ( (double) (ul_LowTimerValue) * 1.007752288); } break; /******/ /* ms */ /******/ case 2: /******************/ /* Timer 0 factor */ /******************/ ul_LowTimerValue = ul_LowTiming * (250.0 * b_ClockSelection); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_LowTiming * (250.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) { ul_LowTimerValue = ul_LowTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ *pul_RealLowTiming = (unsigned int) (ul_LowTimerValue / (250.0 * (double)b_ClockSelection)); d_RealLowTiming = (double) ul_LowTimerValue / (250.0 * (double) b_ClockSelection); if ((double)((double)ul_LowTimerValue / (250.0 * (double)b_ClockSelection)) >= (double)((double)*pul_RealLowTiming + 0.5)) { *pul_RealLowTiming = *pul_RealLowTiming + 1; } ul_LowTiming = ul_LowTiming - 1; ul_LowTimerValue = ul_LowTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_LowTimerValue = (unsigned int) ( (double) (ul_LowTimerValue) * 1.007752288); } break; /*****/ /* s */ /*****/ case 3: /******************/ /* Timer 0 factor */ /******************/ ul_LowTimerValue = (unsigned int) (ul_LowTiming * (250000.0 * b_ClockSelection)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_LowTiming * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) { ul_LowTimerValue = ul_LowTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ *pul_RealLowTiming = (unsigned int) (ul_LowTimerValue / (250000.0 * (double) b_ClockSelection)); d_RealLowTiming = (double) ul_LowTimerValue / (250000.0 * (double) b_ClockSelection); if ((double)((double)ul_LowTimerValue / (250000.0 * (double)b_ClockSelection)) >= (double)((double)*pul_RealLowTiming + 0.5)) { *pul_RealLowTiming = *pul_RealLowTiming + 1; } ul_LowTiming = ul_LowTiming - 1; ul_LowTimerValue = ul_LowTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_LowTimerValue = (unsigned int) ( (double) (ul_LowTimerValue) * 1.007752288); } break; /******/ /* mn */ /******/ case 4: /******************/ /* Timer 0 factor */ /******************/ ul_LowTimerValue = (unsigned int) ( (ul_LowTiming * 60) * (250000.0 * b_ClockSelection)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)(ul_LowTiming * 60.0) * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) { ul_LowTimerValue = ul_LowTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ *pul_RealLowTiming = (unsigned int) (ul_LowTimerValue / (250000.0 * (double) b_ClockSelection)) / 60; d_RealLowTiming = ( (double) ul_LowTimerValue / (250000.0 * (double) b_ClockSelection)) / 60.0; if ((double)(((double)ul_LowTimerValue / (250000.0 * (double)b_ClockSelection)) / 60.0) >= (double)((double)*pul_RealLowTiming + 0.5)) { *pul_RealLowTiming = *pul_RealLowTiming + 1; } ul_LowTiming = ul_LowTiming - 1; ul_LowTimerValue = ul_LowTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_LowTimerValue = (unsigned int) ( (double) (ul_LowTimerValue) * 1.007752288); } break; } /*************************************/ /* Calculate the high division fator */ /*************************************/ switch (b_TimingUnit) { /******/ /* ns */ /******/ case 0: /******************/ /* Timer 0 factor */ /******************/ ul_HighTimerValue = (unsigned int) (ul_HighTiming * (0.00025 * b_ClockSelection)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_HighTiming * (0.00025 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) { ul_HighTimerValue = ul_HighTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ *pul_RealHighTiming = (unsigned int) (ul_HighTimerValue / (0.00025 * (double)b_ClockSelection)); d_RealHighTiming = (double) ul_HighTimerValue / (0.00025 * (double) b_ClockSelection); if ((double)((double)ul_HighTimerValue / (0.00025 * (double)b_ClockSelection)) >= (double)((double)*pul_RealHighTiming + 0.5)) { *pul_RealHighTiming = *pul_RealHighTiming + 1; } ul_HighTiming = ul_HighTiming - 1; ul_HighTimerValue = ul_HighTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_HighTimerValue = (unsigned int) ( (double) (ul_HighTimerValue) * 1.007752288); } break; /******/ /* æs */ /******/ case 1: /******************/ /* Timer 0 factor */ /******************/ ul_HighTimerValue = (unsigned int) (ul_HighTiming * (0.25 * b_ClockSelection)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_HighTiming * (0.25 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) { ul_HighTimerValue = ul_HighTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ *pul_RealHighTiming = (unsigned int) (ul_HighTimerValue / (0.25 * (double)b_ClockSelection)); d_RealHighTiming = (double) ul_HighTimerValue / ( (double) 0.25 * (double) b_ClockSelection); if ((double)((double)ul_HighTimerValue / (0.25 * (double)b_ClockSelection)) >= (double)((double)*pul_RealHighTiming + 0.5)) { *pul_RealHighTiming = *pul_RealHighTiming + 1; } ul_HighTiming = ul_HighTiming - 1; ul_HighTimerValue = ul_HighTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_HighTimerValue = (unsigned int) ( (double) (ul_HighTimerValue) * 1.007752288); } break; /******/ /* ms */ /******/ case 2: /******************/ /* Timer 0 factor */ /******************/ ul_HighTimerValue = ul_HighTiming * (250.0 * b_ClockSelection); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_HighTiming * (250.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) { ul_HighTimerValue = ul_HighTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ *pul_RealHighTiming = (unsigned int) (ul_HighTimerValue / (250.0 * (double)b_ClockSelection)); d_RealHighTiming = (double) ul_HighTimerValue / (250.0 * (double) b_ClockSelection); if ((double)((double)ul_HighTimerValue / (250.0 * (double)b_ClockSelection)) >= (double)((double)*pul_RealHighTiming + 0.5)) { *pul_RealHighTiming = *pul_RealHighTiming + 1; } ul_HighTiming = ul_HighTiming - 1; ul_HighTimerValue = ul_HighTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_HighTimerValue = (unsigned int) ( (double) (ul_HighTimerValue) * 1.007752288); } break; /*****/ /* s */ /*****/ case 3: /******************/ /* Timer 0 factor */ /******************/ ul_HighTimerValue = (unsigned int) (ul_HighTiming * (250000.0 * b_ClockSelection)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_HighTiming * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) { ul_HighTimerValue = ul_HighTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ *pul_RealHighTiming = (unsigned int) (ul_HighTimerValue / (250000.0 * (double) b_ClockSelection)); d_RealHighTiming = (double) ul_HighTimerValue / (250000.0 * (double) b_ClockSelection); if ((double)((double)ul_HighTimerValue / (250000.0 * (double)b_ClockSelection)) >= (double)((double)*pul_RealHighTiming + 0.5)) { *pul_RealHighTiming = *pul_RealHighTiming + 1; } ul_HighTiming = ul_HighTiming - 1; ul_HighTimerValue = ul_HighTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_HighTimerValue = (unsigned int) ( (double) (ul_HighTimerValue) * 1.007752288); } break; /******/ /* mn */ /******/ case 4: /******************/ /* Timer 0 factor */ /******************/ ul_HighTimerValue = (unsigned int) ( (ul_HighTiming * 60) * (250000.0 * b_ClockSelection)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)(ul_HighTiming * 60.0) * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) { ul_HighTimerValue = ul_HighTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ *pul_RealHighTiming = (unsigned int) (ul_HighTimerValue / (250000.0 * (double) b_ClockSelection)) / 60; d_RealHighTiming = ( (double) ul_HighTimerValue / (250000.0 * (double) b_ClockSelection)) / 60.0; if ((double)(((double)ul_HighTimerValue / (250000.0 * (double)b_ClockSelection)) / 60.0) >= (double)((double)*pul_RealHighTiming + 0.5)) { *pul_RealHighTiming = *pul_RealHighTiming + 1; } ul_HighTiming = ul_HighTiming - 1; ul_HighTimerValue = ul_HighTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_HighTimerValue = (unsigned int) ( (double) (ul_HighTimerValue) * 1.007752288); } break; } fpu_end(); /****************************/ /* Save the clock selection */ /****************************/ devpriv-> s_ModuleInfo [b_ModulNbr]. s_PWMModuleInfo. b_ClockSelection = b_ClockSelection; /************************/ /* Save the timing unit */ /************************/ devpriv-> s_ModuleInfo [b_ModulNbr]. s_PWMModuleInfo. s_PWMInfo [b_PWM]. b_TimingUnit = b_TimingUnit; /****************************/ /* Save the low base timing */ /****************************/ devpriv-> s_ModuleInfo [b_ModulNbr]. s_PWMModuleInfo. s_PWMInfo [b_PWM]. d_LowTiming = d_RealLowTiming; devpriv-> s_ModuleInfo [b_ModulNbr]. s_PWMModuleInfo. s_PWMInfo [b_PWM]. ul_RealLowTiming = *pul_RealLowTiming; /****************************/ /* Save the high base timing */ /****************************/ devpriv-> s_ModuleInfo [b_ModulNbr]. s_PWMModuleInfo. s_PWMInfo [b_PWM]. d_HighTiming = d_RealHighTiming; devpriv-> s_ModuleInfo [b_ModulNbr]. s_PWMModuleInfo. s_PWMInfo [b_PWM]. ul_RealHighTiming = *pul_RealHighTiming; /************************/ /* Write the low timing */ /************************/ outl(ul_LowTimerValue, devpriv->s_BoardInfos.ui_Address + 0 + (20 * b_PWM) + (64 * b_ModulNbr)); /*************************/ /* Write the high timing */ /*************************/ outl(ul_HighTimerValue, devpriv->s_BoardInfos.ui_Address + 4 + (20 * b_PWM) + (64 * b_ModulNbr)); /***************************/ /* Set the clock selection */ /***************************/ dw_Command = inl (devpriv-> s_BoardInfos. ui_Address + 8 + (20 * b_PWM) + (64 * b_ModulNbr)); dw_Command = dw_Command & 0x7F; if (b_ClockSelection == APCI1710_40MHZ) { dw_Command = dw_Command | 0x80; } /***************************/ /* Set the clock selection */ /***************************/ outl(dw_Command, devpriv->s_BoardInfos.ui_Address + 8 + (20 * b_PWM) + (64 * b_ModulNbr)); /*************/ /* PWM init. */ /*************/ devpriv-> s_ModuleInfo [b_ModulNbr]. s_PWMModuleInfo. s_PWMInfo [b_PWM]. b_PWMInit = 1; } else { /***************************************************/ /* You can not used the 40MHz clock selection with */ /* this board */ /***************************************************/ DPRINTK("You can not used the 40MHz clock selection with this board\n"); i_ReturnValue = -9; } } else { /***************************************/ /* High base timing selection is wrong */ /***************************************/ DPRINTK("High base timing selection is wrong\n"); i_ReturnValue = -8; } } else { /**************************************/ /* Low base timing selection is wrong */ /**************************************/ DPRINTK("Low base timing selection is wrong\n"); i_ReturnValue = -7; } } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */ else { /**********************************/ /* Timing unit selection is wrong */ /**********************************/ DPRINTK("Timing unit selection is wrong\n"); i_ReturnValue = -6; } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */ } /* if ((b_ClockSelection == APCI1710_30MHZ) || (b_ClockSelection == APCI1710_33MHZ) || (b_ClockSelection == APCI1710_40MHZ)) */ else { /*******************************/ /* The selected clock is wrong */ /*******************************/ DPRINTK("The selected clock is wrong\n"); i_ReturnValue = -5; } /* if ((b_ClockSelection == APCI1710_30MHZ) || (b_ClockSelection == APCI1710_33MHZ) || (b_ClockSelection == APCI1710_40MHZ)) */ } /* if (b_PWM >= 0 && b_PWM <= 1) */ else { /******************************/ /* Tor PWM selection is wrong */ /******************************/ DPRINTK("Tor PWM selection is wrong\n"); i_ReturnValue = -4; } /* if (b_PWM >= 0 && b_PWM <= 1) */ } else { /**********************************/ /* The module is not a PWM module */ /**********************************/ DPRINTK("The module is not a PWM module\n"); i_ReturnValue = -3; } } else { /***********************/ /* Module number error */ /***********************/ DPRINTK("Module number error\n"); i_ReturnValue = -2; } return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_GetPWMInitialisation | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned char_ b_PWM, | | unsigned char *_ pb_TimingUnit, | | PULONG_ pul_LowTiming, | | PULONG_ pul_HighTiming, | | unsigned char *_ pb_StartLevel, | | unsigned char *_ pb_StopMode, | | unsigned char *_ pb_StopLevel, | | unsigned char *_ pb_ExternGate, | | unsigned char *_ pb_InterruptEnable, | | unsigned char *_ pb_Enable) | +----------------------------------------------------------------------------+ | Task : Return the PWM (b_PWM) initialisation from selected | | module (b_ModulNbr). You must calling the | | "i_APCI1710_InitPWM" function be for you call this | | function. | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 | | unsigned char_ b_ModulNbr : Selected module number (0 to 3) | | unsigned char_ b_PWM : Selected PWM (0 or 1) | +----------------------------------------------------------------------------+ | Output Parameters : unsigned char *_ pb_TimingUnit : Base timing Unit (0 to 4) | | 0 : ns | | 1 : æs | | 2 : ms | | 3 : s | | 4 : mn | | PULONG_ pul_LowTiming : Low base timing value. | | PULONG_ pul_HighTiming : High base timing value. | | unsigned char *_ pb_StartLevel : Start period level | | selection | | 0 : The period start | | with a low level | | 1 : The period start | | with a high level| | unsigned char *_ pb_StopMode : Stop mode selection | | 0 : The PWM is stopped | | directly after the | | "i_APCI1710_DisablePWM"| | function and break the| | last period | | 1 : After the | | "i_APCI1710_DisablePWM"| | function the PWM is | | stopped at the end | | from last period cycle| | unsigned char *_ pb_StopLevel : Stop PWM level selection | | 0 : The output signal | | keep the level after| | the | | "i_APCI1710_DisablePWM"| | function | | 1 : The output signal is| | set to low after the| | "i_APCI1710_DisablePWM"| | function | | 2 : The output signal is| | set to high after | | the | | "i_APCI1710_DisablePWM"| | function | | unsigned char *_ pb_ExternGate : Extern gate action | | selection | | 0 : Extern gate signal | | not used. | | 1 : Extern gate signal | | used. | | unsigned char *_ pb_InterruptEnable : Enable or disable the PWM | | interrupt. | | - APCI1710_ENABLE : | | Enable the PWM interrupt| | A interrupt occur after | | each period | | - APCI1710_DISABLE : | | Disable the PWM | | interrupt | | unsigned char *_ pb_Enable : Indicate if the PWM is | | enabled or no | | 0 : PWM not enabled | | 1 : PWM enabled | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: The module is not a PWM module | | -4: PWM selection is wrong | | -5: PWM not initialised see function | | "i_APCI1710_InitPWM" | +----------------------------------------------------------------------------+ */ int i_APCI1710_GetPWMInitialisation(struct comedi_device *dev, unsigned char b_ModulNbr, unsigned char b_PWM, unsigned char *pb_TimingUnit, unsigned int *pul_LowTiming, unsigned int *pul_HighTiming, unsigned char *pb_StartLevel, unsigned char *pb_StopMode, unsigned char *pb_StopLevel, unsigned char *pb_ExternGate, unsigned char *pb_InterruptEnable, unsigned char *pb_Enable) { int i_ReturnValue = 0; unsigned int dw_Status; unsigned int dw_Command; /**************************/ /* Test the module number */ /**************************/ if (b_ModulNbr < 4) { /***************/ /* Test if PWM */ /***************/ if ((devpriv->s_BoardInfos. dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_PWM) { /**************************/ /* Test the PWM selection */ /**************************/ if (b_PWM <= 1) { /***************************/ /* Test if PWM initialised */ /***************************/ dw_Status = inl(devpriv->s_BoardInfos. ui_Address + 12 + (20 * b_PWM) + (64 * b_ModulNbr)); if (dw_Status & 0x10) { /***********************/ /* Read the low timing */ /***********************/ *pul_LowTiming = inl(devpriv->s_BoardInfos. ui_Address + 0 + (20 * b_PWM) + (64 * b_ModulNbr)); /************************/ /* Read the high timing */ /************************/ *pul_HighTiming = inl(devpriv->s_BoardInfos. ui_Address + 4 + (20 * b_PWM) + (64 * b_ModulNbr)); /********************/ /* Read the command */ /********************/ dw_Command = inl(devpriv->s_BoardInfos. ui_Address + 8 + (20 * b_PWM) + (64 * b_ModulNbr)); *pb_StartLevel = (unsigned char) ((dw_Command >> 5) & 1); *pb_StopMode = (unsigned char) ((dw_Command >> 0) & 1); *pb_StopLevel = (unsigned char) ((dw_Command >> 1) & 1); *pb_ExternGate = (unsigned char) ((dw_Command >> 4) & 1); *pb_InterruptEnable = (unsigned char) ((dw_Command >> 3) & 1); if (*pb_StopLevel) { *pb_StopLevel = *pb_StopLevel + (unsigned char) ((dw_Command >> 2) & 1); } /********************/ /* Read the command */ /********************/ dw_Command = inl(devpriv->s_BoardInfos. ui_Address + 8 + (20 * b_PWM) + (64 * b_ModulNbr)); *pb_Enable = (unsigned char) ((dw_Command >> 0) & 1); *pb_TimingUnit = devpriv-> s_ModuleInfo[b_ModulNbr]. s_PWMModuleInfo. s_PWMInfo[b_PWM].b_TimingUnit; } /* if (dw_Status & 0x10) */ else { /***********************/ /* PWM not initialised */ /***********************/ DPRINTK("PWM not initialised\n"); i_ReturnValue = -5; } /* if (dw_Status & 0x10) */ } /* if (b_PWM >= 0 && b_PWM <= 1) */ else { /******************************/ /* Tor PWM selection is wrong */ /******************************/ DPRINTK("Tor PWM selection is wrong\n"); i_ReturnValue = -4; } /* if (b_PWM >= 0 && b_PWM <= 1) */ } else { /**********************************/ /* The module is not a PWM module */ /**********************************/ DPRINTK("The module is not a PWM module\n"); i_ReturnValue = -3; } } else { /***********************/ /* Module number error */ /***********************/ DPRINTK("Module number error\n"); i_ReturnValue = -2; } return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name :INT i_APCI1710_InsnWritePWM(struct comedi_device *dev, struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) | +----------------------------------------------------------------------------+ | Task : Pwm Enable Disable and Set New Timing | +----------------------------------------------------------------------------+ | Input Parameters : +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : +----------------------------------------------------------------------------+ */ int i_APCI1710_InsnWritePWM(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned char b_WriteType; int i_ReturnValue = 0; b_WriteType = CR_CHAN(insn->chanspec); switch (b_WriteType) { case APCI1710_PWM_ENABLE: i_ReturnValue = i_APCI1710_EnablePWM(dev, (unsigned char) CR_AREF(insn->chanspec), (unsigned char) data[0], (unsigned char) data[1], (unsigned char) data[2], (unsigned char) data[3], (unsigned char) data[4], (unsigned char) data[5]); break; case APCI1710_PWM_DISABLE: i_ReturnValue = i_APCI1710_DisablePWM(dev, (unsigned char) CR_AREF(insn->chanspec), (unsigned char) data[0]); break; case APCI1710_PWM_NEWTIMING: i_ReturnValue = i_APCI1710_SetNewPWMTiming(dev, (unsigned char) CR_AREF(insn->chanspec), (unsigned char) data[0], (unsigned char) data[1], (unsigned int) data[2], (unsigned int) data[3]); break; default: printk("Write Config Parameter Wrong\n"); } if (i_ReturnValue >= 0) i_ReturnValue = insn->n; return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_EnablePWM | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned char_ b_PWM, | | unsigned char_ b_StartLevel, | | unsigned char_ b_StopMode, | | unsigned char_ b_StopLevel, | | unsigned char_ b_ExternGate, | | unsigned char_ b_InterruptEnable) | +----------------------------------------------------------------------------+ | Task : Enable the selected PWM (b_PWM) from selected module | | (b_ModulNbr). You must calling the "i_APCI1710_InitPWM"| | function be for you call this function. | | If you enable the PWM interrupt, the PWM generate a | | interrupt after each period. | | See function "i_APCI1710_SetBoardIntRoutineX" and the | | Interrupt mask description chapter. | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 | | unsigned char_ b_ModulNbr : Selected module number | | (0 to 3) | | unsigned char_ b_PWM : Selected PWM (0 or 1) | | unsigned char_ b_StartLevel : Start period level selection | | 0 : The period start with a | | low level | | 1 : The period start with a | | high level | | unsigned char_ b_StopMode : Stop mode selection | | 0 : The PWM is stopped | | directly after the | | "i_APCI1710_DisablePWM" | | function and break the | | last period | | 1 : After the | | "i_APCI1710_DisablePWM" | | function the PWM is | | stopped at the end from| | last period cycle. | | unsigned char_ b_StopLevel : Stop PWM level selection | | 0 : The output signal keep | | the level after the | | "i_APCI1710_DisablePWM" | | function | | 1 : The output signal is set| | to low after the | | "i_APCI1710_DisablePWM" | | function | | 2 : The output signal is set| | to high after the | | "i_APCI1710_DisablePWM" | | function | | unsigned char_ b_ExternGate : Extern gate action selection | | 0 : Extern gate signal not | | used. | | 1 : Extern gate signal used.| | unsigned char_ b_InterruptEnable : Enable or disable the PWM | | interrupt. | | - APCI1710_ENABLE : | | Enable the PWM interrupt | | A interrupt occur after | | each period | | - APCI1710_DISABLE : | | Disable the PWM interrupt | +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: The module is not a PWM module | | -4: PWM selection is wrong | | -5: PWM not initialised see function | | "i_APCI1710_InitPWM" | | -6: PWM start level selection is wrong | | -7: PWM stop mode selection is wrong | | -8: PWM stop level selection is wrong | | -9: Extern gate signal selection is wrong | | -10: Interrupt parameter is wrong | | -11: Interrupt function not initialised. | | See function "i_APCI1710_SetBoardIntRoutineX" | +----------------------------------------------------------------------------+ */ int i_APCI1710_EnablePWM(struct comedi_device *dev, unsigned char b_ModulNbr, unsigned char b_PWM, unsigned char b_StartLevel, unsigned char b_StopMode, unsigned char b_StopLevel, unsigned char b_ExternGate, unsigned char b_InterruptEnable) { int i_ReturnValue = 0; unsigned int dw_Status; unsigned int dw_Command; devpriv->tsk_Current = current; /* Save the current process task structure */ /**************************/ /* Test the module number */ /**************************/ if (b_ModulNbr < 4) { /***************/ /* Test if PWM */ /***************/ if ((devpriv->s_BoardInfos. dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_PWM) { /**************************/ /* Test the PWM selection */ /**************************/ if (b_PWM <= 1) { /***************************/ /* Test if PWM initialised */ /***************************/ dw_Status = inl(devpriv->s_BoardInfos. ui_Address + 12 + (20 * b_PWM) + (64 * b_ModulNbr)); if (dw_Status & 0x10) { /**********************************/ /* Test the start level selection */ /**********************************/ if (b_StartLevel <= 1) { /**********************/ /* Test the stop mode */ /**********************/ if (b_StopMode <= 1) { /***********************/ /* Test the stop level */ /***********************/ if (b_StopLevel <= 2) { /*****************************/ /* Test the extern gate mode */ /*****************************/ if (b_ExternGate <= 1) { /*****************************/ /* Test the interrupt action */ /*****************************/ if (b_InterruptEnable == APCI1710_ENABLE || b_InterruptEnable == APCI1710_DISABLE) { /******************************************/ /* Test if interrupt function initialised */ /******************************************/ /********************/ /* Read the command */ /********************/ dw_Command = inl (devpriv-> s_BoardInfos. ui_Address + 8 + (20 * b_PWM) + (64 * b_ModulNbr)); dw_Command = dw_Command & 0x80; /********************/ /* Make the command */ /********************/ dw_Command = dw_Command | b_StopMode | (b_InterruptEnable << 3) | (b_ExternGate << 4) | (b_StartLevel << 5); if (b_StopLevel & 3) { dw_Command = dw_Command | 2; if (b_StopLevel & 2) { dw_Command = dw_Command | 4; } } devpriv-> s_ModuleInfo [b_ModulNbr]. s_PWMModuleInfo. s_PWMInfo [b_PWM]. b_InterruptEnable = b_InterruptEnable; /*******************/ /* Set the command */ /*******************/ outl(dw_Command, devpriv->s_BoardInfos.ui_Address + 8 + (20 * b_PWM) + (64 * b_ModulNbr)); /******************/ /* Enable the PWM */ /******************/ outl(1, devpriv->s_BoardInfos.ui_Address + 12 + (20 * b_PWM) + (64 * b_ModulNbr)); } /* if (b_InterruptEnable == APCI1710_ENABLE || b_InterruptEnable == APCI1710_DISABLE) */ else { /********************************/ /* Interrupt parameter is wrong */ /********************************/ DPRINTK("Interrupt parameter is wrong\n"); i_ReturnValue = -10; } /* if (b_InterruptEnable == APCI1710_ENABLE || b_InterruptEnable == APCI1710_DISABLE) */ } /* if (b_ExternGate >= 0 && b_ExternGate <= 1) */ else { /*****************************************/ /* Extern gate signal selection is wrong */ /*****************************************/ DPRINTK("Extern gate signal selection is wrong\n"); i_ReturnValue = -9; } /* if (b_ExternGate >= 0 && b_ExternGate <= 1) */ } /* if (b_StopLevel >= 0 && b_StopLevel <= 2) */ else { /*************************************/ /* PWM stop level selection is wrong */ /*************************************/ DPRINTK("PWM stop level selection is wrong\n"); i_ReturnValue = -8; } /* if (b_StopLevel >= 0 && b_StopLevel <= 2) */ } /* if (b_StopMode >= 0 && b_StopMode <= 1) */ else { /************************************/ /* PWM stop mode selection is wrong */ /************************************/ DPRINTK("PWM stop mode selection is wrong\n"); i_ReturnValue = -7; } /* if (b_StopMode >= 0 && b_StopMode <= 1) */ } /* if (b_StartLevel >= 0 && b_StartLevel <= 1) */ else { /**************************************/ /* PWM start level selection is wrong */ /**************************************/ DPRINTK("PWM start level selection is wrong\n"); i_ReturnValue = -6; } /* if (b_StartLevel >= 0 && b_StartLevel <= 1) */ } /* if (dw_Status & 0x10) */ else { /***********************/ /* PWM not initialised */ /***********************/ DPRINTK("PWM not initialised\n"); i_ReturnValue = -5; } /* if (dw_Status & 0x10) */ } /* if (b_PWM >= 0 && b_PWM <= 1) */ else { /******************************/ /* Tor PWM selection is wrong */ /******************************/ DPRINTK("Tor PWM selection is wrong\n"); i_ReturnValue = -4; } /* if (b_PWM >= 0 && b_PWM <= 1) */ } else { /**********************************/ /* The module is not a PWM module */ /**********************************/ DPRINTK("The module is not a PWM module\n"); i_ReturnValue = -3; } } else { /***********************/ /* Module number error */ /***********************/ DPRINTK("Module number error\n"); i_ReturnValue = -2; } return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_DisablePWM (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned char_ b_PWM) | +----------------------------------------------------------------------------+ | Task : Disable the selected PWM (b_PWM) from selected module | | (b_ModulNbr). The output signal level depend of the | | initialisation by the "i_APCI1710_EnablePWM". | | See the b_StartLevel, b_StopMode and b_StopLevel | | parameters from this function. | +----------------------------------------------------------------------------+ | Input Parameters :BYTE_ b_BoardHandle : Handle of board APCI-1710 | | unsigned char_ b_ModulNbr : Selected module number (0 to 3) | | unsigned char_ b_PWM : Selected PWM (0 or 1) | +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: The module is not a PWM module | | -4: PWM selection is wrong | | -5: PWM not initialised see function | | "i_APCI1710_InitPWM" | | -6: PWM not enabled see function | | "i_APCI1710_EnablePWM" | +----------------------------------------------------------------------------+ */ int i_APCI1710_DisablePWM(struct comedi_device *dev, unsigned char b_ModulNbr, unsigned char b_PWM) { int i_ReturnValue = 0; unsigned int dw_Status; /**************************/ /* Test the module number */ /**************************/ if (b_ModulNbr < 4) { /***************/ /* Test if PWM */ /***************/ if ((devpriv->s_BoardInfos. dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_PWM) { /**************************/ /* Test the PWM selection */ /**************************/ if (b_PWM <= 1) { /***************************/ /* Test if PWM initialised */ /***************************/ dw_Status = inl(devpriv->s_BoardInfos. ui_Address + 12 + (20 * b_PWM) + (64 * b_ModulNbr)); if (dw_Status & 0x10) { /***********************/ /* Test if PWM enabled */ /***********************/ if (dw_Status & 0x1) { /*******************/ /* Disable the PWM */ /*******************/ outl(0, devpriv->s_BoardInfos. ui_Address + 12 + (20 * b_PWM) + (64 * b_ModulNbr)); } /* if (dw_Status & 0x1) */ else { /*******************/ /* PWM not enabled */ /*******************/ DPRINTK("PWM not enabled\n"); i_ReturnValue = -6; } /* if (dw_Status & 0x1) */ } /* if (dw_Status & 0x10) */ else { /***********************/ /* PWM not initialised */ /***********************/ DPRINTK(" PWM not initialised\n"); i_ReturnValue = -5; } /* if (dw_Status & 0x10) */ } /* if (b_PWM >= 0 && b_PWM <= 1) */ else { /******************************/ /* Tor PWM selection is wrong */ /******************************/ DPRINTK("Tor PWM selection is wrong\n"); i_ReturnValue = -4; } /* if (b_PWM >= 0 && b_PWM <= 1) */ } else { /**********************************/ /* The module is not a PWM module */ /**********************************/ DPRINTK("The module is not a PWM module\n"); i_ReturnValue = -3; } } else { /***********************/ /* Module number error */ /***********************/ DPRINTK("Module number error\n"); i_ReturnValue = -2; } return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_SetNewPWMTiming | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned char_ b_PWM, | | unsigned char_ b_ClockSelection, | | unsigned char_ b_TimingUnit, | | ULONG_ ul_LowTiming, | | ULONG_ ul_HighTiming) | +----------------------------------------------------------------------------+ | Task : Set a new timing. The ul_LowTiming, ul_HighTiming and | | ul_TimingUnit determine the low/high timing base for | | the period. | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 | | unsigned char_ b_ModulNbr : Module number to configure| | (0 to 3) | | unsigned char_ b_PWM : Selected PWM (0 or 1). | | unsigned char_ b_TimingUnit : Base timing Unit (0 to 4) | | 0 : ns | | 1 : æs | | 2 : ms | | 3 : s | | 4 : mn | | ULONG_ ul_LowTiming : Low base timing value. | | ULONG_ ul_HighTiming : High base timing value. | +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: The module is not a PWM module | | -4: PWM selection is wrong | | -5: PWM not initialised | | -6: Timing Unit selection is wrong | | -7: Low base timing selection is wrong | | -8: High base timing selection is wrong | +----------------------------------------------------------------------------+ */ int i_APCI1710_SetNewPWMTiming(struct comedi_device *dev, unsigned char b_ModulNbr, unsigned char b_PWM, unsigned char b_TimingUnit, unsigned int ul_LowTiming, unsigned int ul_HighTiming) { unsigned char b_ClockSelection; int i_ReturnValue = 0; unsigned int ul_LowTimerValue = 0; unsigned int ul_HighTimerValue = 0; unsigned int ul_RealLowTiming = 0; unsigned int ul_RealHighTiming = 0; unsigned int dw_Status; unsigned int dw_Command; double d_RealLowTiming = 0; double d_RealHighTiming = 0; /**************************/ /* Test the module number */ /**************************/ if (b_ModulNbr < 4) { /***************/ /* Test if PWM */ /***************/ if ((devpriv->s_BoardInfos. dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_PWM) { /**************************/ /* Test the PWM selection */ /**************************/ if (b_PWM <= 1) { /***************************/ /* Test if PWM initialised */ /***************************/ dw_Status = inl(devpriv->s_BoardInfos. ui_Address + 12 + (20 * b_PWM) + (64 * b_ModulNbr)); if (dw_Status & 0x10) { b_ClockSelection = devpriv-> s_ModuleInfo[b_ModulNbr]. s_PWMModuleInfo. b_ClockSelection; /************************/ /* Test the timing unit */ /************************/ if (b_TimingUnit <= 4) { /*********************************/ /* Test the low timing selection */ /*********************************/ if (((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 0) && (ul_LowTiming >= 266) && (ul_LowTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 1) && (ul_LowTiming >= 1) && (ul_LowTiming <= 571230650UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 2) && (ul_LowTiming >= 1) && (ul_LowTiming <= 571230UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 3) && (ul_LowTiming >= 1) && (ul_LowTiming <= 571UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 4) && (ul_LowTiming >= 1) && (ul_LowTiming <= 9UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 0) && (ul_LowTiming >= 242) && (ul_LowTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 1) && (ul_LowTiming >= 1) && (ul_LowTiming <= 519691043UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 2) && (ul_LowTiming >= 1) && (ul_LowTiming <= 519691UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 3) && (ul_LowTiming >= 1) && (ul_LowTiming <= 520UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 4) && (ul_LowTiming >= 1) && (ul_LowTiming <= 8UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 0) && (ul_LowTiming >= 200) && (ul_LowTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 1) && (ul_LowTiming >= 1) && (ul_LowTiming <= 429496729UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 2) && (ul_LowTiming >= 1) && (ul_LowTiming <= 429496UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 3) && (ul_LowTiming >= 1) && (ul_LowTiming <= 429UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 4) && (ul_LowTiming >= 1) && (ul_LowTiming <= 7UL))) { /**********************************/ /* Test the High timing selection */ /**********************************/ if (((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 266) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571230650UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571230UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 9UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 242) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 519691043UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 519691UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 520UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 8UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 200) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429496729UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429496UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 7UL))) { /************************************/ /* Calculate the low division fator */ /************************************/ fpu_begin(); switch (b_TimingUnit) { /******/ /* ns */ /******/ case 0: /******************/ /* Timer 0 factor */ /******************/ ul_LowTimerValue = (unsigned int) (ul_LowTiming * (0.00025 * b_ClockSelection)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_LowTiming * (0.00025 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) { ul_LowTimerValue = ul_LowTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ ul_RealLowTiming = (unsigned int) (ul_LowTimerValue / (0.00025 * (double)b_ClockSelection)); d_RealLowTiming = (double) ul_LowTimerValue / (0.00025 * (double) b_ClockSelection); if ((double)((double)ul_LowTimerValue / (0.00025 * (double)b_ClockSelection)) >= (double)((double)ul_RealLowTiming + 0.5)) { ul_RealLowTiming = ul_RealLowTiming + 1; } ul_LowTiming = ul_LowTiming - 1; ul_LowTimerValue = ul_LowTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_LowTimerValue = (unsigned int) ( (double) (ul_LowTimerValue) * 1.007752288); } break; /******/ /* æs */ /******/ case 1: /******************/ /* Timer 0 factor */ /******************/ ul_LowTimerValue = (unsigned int) (ul_LowTiming * (0.25 * b_ClockSelection)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_LowTiming * (0.25 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) { ul_LowTimerValue = ul_LowTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ ul_RealLowTiming = (unsigned int) (ul_LowTimerValue / (0.25 * (double)b_ClockSelection)); d_RealLowTiming = (double) ul_LowTimerValue / ( (double) 0.25 * (double) b_ClockSelection); if ((double)((double)ul_LowTimerValue / (0.25 * (double)b_ClockSelection)) >= (double)((double)ul_RealLowTiming + 0.5)) { ul_RealLowTiming = ul_RealLowTiming + 1; } ul_LowTiming = ul_LowTiming - 1; ul_LowTimerValue = ul_LowTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_LowTimerValue = (unsigned int) ( (double) (ul_LowTimerValue) * 1.007752288); } break; /******/ /* ms */ /******/ case 2: /******************/ /* Timer 0 factor */ /******************/ ul_LowTimerValue = ul_LowTiming * (250.0 * b_ClockSelection); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_LowTiming * (250.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) { ul_LowTimerValue = ul_LowTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ ul_RealLowTiming = (unsigned int) (ul_LowTimerValue / (250.0 * (double)b_ClockSelection)); d_RealLowTiming = (double) ul_LowTimerValue / (250.0 * (double) b_ClockSelection); if ((double)((double)ul_LowTimerValue / (250.0 * (double)b_ClockSelection)) >= (double)((double)ul_RealLowTiming + 0.5)) { ul_RealLowTiming = ul_RealLowTiming + 1; } ul_LowTiming = ul_LowTiming - 1; ul_LowTimerValue = ul_LowTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_LowTimerValue = (unsigned int) ( (double) (ul_LowTimerValue) * 1.007752288); } break; /*****/ /* s */ /*****/ case 3: /******************/ /* Timer 0 factor */ /******************/ ul_LowTimerValue = (unsigned int) (ul_LowTiming * (250000.0 * b_ClockSelection)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_LowTiming * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) { ul_LowTimerValue = ul_LowTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ ul_RealLowTiming = (unsigned int) (ul_LowTimerValue / (250000.0 * (double) b_ClockSelection)); d_RealLowTiming = (double) ul_LowTimerValue / (250000.0 * (double) b_ClockSelection); if ((double)((double)ul_LowTimerValue / (250000.0 * (double)b_ClockSelection)) >= (double)((double)ul_RealLowTiming + 0.5)) { ul_RealLowTiming = ul_RealLowTiming + 1; } ul_LowTiming = ul_LowTiming - 1; ul_LowTimerValue = ul_LowTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_LowTimerValue = (unsigned int) ( (double) (ul_LowTimerValue) * 1.007752288); } break; /******/ /* mn */ /******/ case 4: /******************/ /* Timer 0 factor */ /******************/ ul_LowTimerValue = (unsigned int) ( (ul_LowTiming * 60) * (250000.0 * b_ClockSelection)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)(ul_LowTiming * 60.0) * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) { ul_LowTimerValue = ul_LowTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ ul_RealLowTiming = (unsigned int) (ul_LowTimerValue / (250000.0 * (double) b_ClockSelection)) / 60; d_RealLowTiming = ( (double) ul_LowTimerValue / (250000.0 * (double) b_ClockSelection)) / 60.0; if ((double)(((double)ul_LowTimerValue / (250000.0 * (double)b_ClockSelection)) / 60.0) >= (double)((double)ul_RealLowTiming + 0.5)) { ul_RealLowTiming = ul_RealLowTiming + 1; } ul_LowTiming = ul_LowTiming - 1; ul_LowTimerValue = ul_LowTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_LowTimerValue = (unsigned int) ( (double) (ul_LowTimerValue) * 1.007752288); } break; } /*************************************/ /* Calculate the high division fator */ /*************************************/ switch (b_TimingUnit) { /******/ /* ns */ /******/ case 0: /******************/ /* Timer 0 factor */ /******************/ ul_HighTimerValue = (unsigned int) (ul_HighTiming * (0.00025 * b_ClockSelection)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_HighTiming * (0.00025 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) { ul_HighTimerValue = ul_HighTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ ul_RealHighTiming = (unsigned int) (ul_HighTimerValue / (0.00025 * (double)b_ClockSelection)); d_RealHighTiming = (double) ul_HighTimerValue / (0.00025 * (double) b_ClockSelection); if ((double)((double)ul_HighTimerValue / (0.00025 * (double)b_ClockSelection)) >= (double)((double)ul_RealHighTiming + 0.5)) { ul_RealHighTiming = ul_RealHighTiming + 1; } ul_HighTiming = ul_HighTiming - 1; ul_HighTimerValue = ul_HighTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_HighTimerValue = (unsigned int) ( (double) (ul_HighTimerValue) * 1.007752288); } break; /******/ /* æs */ /******/ case 1: /******************/ /* Timer 0 factor */ /******************/ ul_HighTimerValue = (unsigned int) (ul_HighTiming * (0.25 * b_ClockSelection)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_HighTiming * (0.25 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) { ul_HighTimerValue = ul_HighTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ ul_RealHighTiming = (unsigned int) (ul_HighTimerValue / (0.25 * (double)b_ClockSelection)); d_RealHighTiming = (double) ul_HighTimerValue / ( (double) 0.25 * (double) b_ClockSelection); if ((double)((double)ul_HighTimerValue / (0.25 * (double)b_ClockSelection)) >= (double)((double)ul_RealHighTiming + 0.5)) { ul_RealHighTiming = ul_RealHighTiming + 1; } ul_HighTiming = ul_HighTiming - 1; ul_HighTimerValue = ul_HighTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_HighTimerValue = (unsigned int) ( (double) (ul_HighTimerValue) * 1.007752288); } break; /******/ /* ms */ /******/ case 2: /******************/ /* Timer 0 factor */ /******************/ ul_HighTimerValue = ul_HighTiming * (250.0 * b_ClockSelection); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_HighTiming * (250.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) { ul_HighTimerValue = ul_HighTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ ul_RealHighTiming = (unsigned int) (ul_HighTimerValue / (250.0 * (double)b_ClockSelection)); d_RealHighTiming = (double) ul_HighTimerValue / (250.0 * (double) b_ClockSelection); if ((double)((double)ul_HighTimerValue / (250.0 * (double)b_ClockSelection)) >= (double)((double)ul_RealHighTiming + 0.5)) { ul_RealHighTiming = ul_RealHighTiming + 1; } ul_HighTiming = ul_HighTiming - 1; ul_HighTimerValue = ul_HighTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_HighTimerValue = (unsigned int) ( (double) (ul_HighTimerValue) * 1.007752288); } break; /*****/ /* s */ /*****/ case 3: /******************/ /* Timer 0 factor */ /******************/ ul_HighTimerValue = (unsigned int) (ul_HighTiming * (250000.0 * b_ClockSelection)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)ul_HighTiming * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) { ul_HighTimerValue = ul_HighTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ ul_RealHighTiming = (unsigned int) (ul_HighTimerValue / (250000.0 * (double) b_ClockSelection)); d_RealHighTiming = (double) ul_HighTimerValue / (250000.0 * (double) b_ClockSelection); if ((double)((double)ul_HighTimerValue / (250000.0 * (double)b_ClockSelection)) >= (double)((double)ul_RealHighTiming + 0.5)) { ul_RealHighTiming = ul_RealHighTiming + 1; } ul_HighTiming = ul_HighTiming - 1; ul_HighTimerValue = ul_HighTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_HighTimerValue = (unsigned int) ( (double) (ul_HighTimerValue) * 1.007752288); } break; /******/ /* mn */ /******/ case 4: /******************/ /* Timer 0 factor */ /******************/ ul_HighTimerValue = (unsigned int) ( (ul_HighTiming * 60) * (250000.0 * b_ClockSelection)); /*******************/ /* Round the value */ /*******************/ if ((double)((double)(ul_HighTiming * 60.0) * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) { ul_HighTimerValue = ul_HighTimerValue + 1; } /*****************************/ /* Calculate the real timing */ /*****************************/ ul_RealHighTiming = (unsigned int) (ul_HighTimerValue / (250000.0 * (double) b_ClockSelection)) / 60; d_RealHighTiming = ( (double) ul_HighTimerValue / (250000.0 * (double) b_ClockSelection)) / 60.0; if ((double)(((double)ul_HighTimerValue / (250000.0 * (double)b_ClockSelection)) / 60.0) >= (double)((double)ul_RealHighTiming + 0.5)) { ul_RealHighTiming = ul_RealHighTiming + 1; } ul_HighTiming = ul_HighTiming - 1; ul_HighTimerValue = ul_HighTimerValue - 2; if (b_ClockSelection != APCI1710_40MHZ) { ul_HighTimerValue = (unsigned int) ( (double) (ul_HighTimerValue) * 1.007752288); } break; } fpu_end(); /************************/ /* Save the timing unit */ /************************/ devpriv-> s_ModuleInfo [b_ModulNbr]. s_PWMModuleInfo. s_PWMInfo [b_PWM]. b_TimingUnit = b_TimingUnit; /****************************/ /* Save the low base timing */ /****************************/ devpriv-> s_ModuleInfo [b_ModulNbr]. s_PWMModuleInfo. s_PWMInfo [b_PWM]. d_LowTiming = d_RealLowTiming; devpriv-> s_ModuleInfo [b_ModulNbr]. s_PWMModuleInfo. s_PWMInfo [b_PWM]. ul_RealLowTiming = ul_RealLowTiming; /****************************/ /* Save the high base timing */ /****************************/ devpriv-> s_ModuleInfo [b_ModulNbr]. s_PWMModuleInfo. s_PWMInfo [b_PWM]. d_HighTiming = d_RealHighTiming; devpriv-> s_ModuleInfo [b_ModulNbr]. s_PWMModuleInfo. s_PWMInfo [b_PWM]. ul_RealHighTiming = ul_RealHighTiming; /************************/ /* Write the low timing */ /************************/ outl(ul_LowTimerValue, devpriv->s_BoardInfos.ui_Address + 0 + (20 * b_PWM) + (64 * b_ModulNbr)); /*************************/ /* Write the high timing */ /*************************/ outl(ul_HighTimerValue, devpriv->s_BoardInfos.ui_Address + 4 + (20 * b_PWM) + (64 * b_ModulNbr)); /***************************/ /* Set the clock selection */ /***************************/ dw_Command = inl (devpriv-> s_BoardInfos. ui_Address + 8 + (20 * b_PWM) + (64 * b_ModulNbr)); dw_Command = dw_Command & 0x7F; if (b_ClockSelection == APCI1710_40MHZ) { dw_Command = dw_Command | 0x80; } /***************************/ /* Set the clock selection */ /***************************/ outl(dw_Command, devpriv-> s_BoardInfos. ui_Address + 8 + (20 * b_PWM) + (64 * b_ModulNbr)); } else { /***************************************/ /* High base timing selection is wrong */ /***************************************/ DPRINTK("High base timing selection is wrong\n"); i_ReturnValue = -8; } } else { /**************************************/ /* Low base timing selection is wrong */ /**************************************/ DPRINTK("Low base timing selection is wrong\n"); i_ReturnValue = -7; } } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */ else { /**********************************/ /* Timing unit selection is wrong */ /**********************************/ DPRINTK("Timing unit selection is wrong\n"); i_ReturnValue = -6; } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */ } /* if (dw_Status & 0x10) */ else { /***********************/ /* PWM not initialised */ /***********************/ DPRINTK("PWM not initialised\n"); i_ReturnValue = -5; } /* if (dw_Status & 0x10) */ } /* if (b_PWM >= 0 && b_PWM <= 1) */ else { /******************************/ /* Tor PWM selection is wrong */ /******************************/ DPRINTK("Tor PWM selection is wrong\n"); i_ReturnValue = -4; } /* if (b_PWM >= 0 && b_PWM <= 1) */ } else { /**********************************/ /* The module is not a PWM module */ /**********************************/ DPRINTK("The module is not a PWM module\n"); i_ReturnValue = -3; } } else { /***********************/ /* Module number error */ /***********************/ DPRINTK("Module number error\n"); i_ReturnValue = -2; } return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_GetPWMStatus | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned char_ b_PWM, | | unsigned char *_ pb_PWMOutputStatus, | | unsigned char *_ pb_ExternGateStatus) | +----------------------------------------------------------------------------+ | Task : Return the status from selected PWM (b_PWM) from | | selected module (b_ModulNbr). | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 | | unsigned char_ b_PWM : Selected PWM (0 or 1) | | unsigned char_ b_ModulNbr : Selected module number (0 to 3) b_ModulNbr =(unsigned char) CR_AREF(insn->chanspec); b_PWM =(unsigned char) data[0]; | +----------------------------------------------------------------------------+ | Output Parameters : unsigned char *_ pb_PWMOutputStatus : Return the PWM output | | level status. | | 0 : The PWM output level| | is low. | | 1 : The PWM output level| | is high. | | unsigned char *_ pb_ExternGateStatus : Return the extern gate | | level status. | | 0 : The extern gate is | | low. | | 1 : The extern gate is | | high. pb_PWMOutputStatus =(unsigned char *) data[0]; pb_ExternGateStatus =(unsigned char *) data[1]; | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: The module is not a PWM module | | -4: PWM selection is wrong | | -5: PWM not initialised see function | | "i_APCI1710_InitPWM" | | -6: PWM not enabled see function "i_APCI1710_EnablePWM"| +----------------------------------------------------------------------------+ */ int i_APCI1710_InsnReadGetPWMStatus(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i_ReturnValue = 0; unsigned int dw_Status; unsigned char b_ModulNbr; unsigned char b_PWM; unsigned char *pb_PWMOutputStatus; unsigned char *pb_ExternGateStatus; i_ReturnValue = insn->n; b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec); b_PWM = (unsigned char) CR_CHAN(insn->chanspec); pb_PWMOutputStatus = (unsigned char *) &data[0]; pb_ExternGateStatus = (unsigned char *) &data[1]; /**************************/ /* Test the module number */ /**************************/ if (b_ModulNbr < 4) { /***************/ /* Test if PWM */ /***************/ if ((devpriv->s_BoardInfos. dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_PWM) { /**************************/ /* Test the PWM selection */ /**************************/ if (b_PWM <= 1) { /***************************/ /* Test if PWM initialised */ /***************************/ dw_Status = inl(devpriv->s_BoardInfos. ui_Address + 12 + (20 * b_PWM) + (64 * b_ModulNbr)); if (dw_Status & 0x10) { /***********************/ /* Test if PWM enabled */ /***********************/ if (dw_Status & 0x1) { *pb_PWMOutputStatus = (unsigned char) ((dw_Status >> 7) & 1); *pb_ExternGateStatus = (unsigned char) ((dw_Status >> 6) & 1); } /* if (dw_Status & 0x1) */ else { /*******************/ /* PWM not enabled */ /*******************/ DPRINTK("PWM not enabled \n"); i_ReturnValue = -6; } /* if (dw_Status & 0x1) */ } /* if (dw_Status & 0x10) */ else { /***********************/ /* PWM not initialised */ /***********************/ DPRINTK("PWM not initialised\n"); i_ReturnValue = -5; } /* if (dw_Status & 0x10) */ } /* if (b_PWM >= 0 && b_PWM <= 1) */ else { /******************************/ /* Tor PWM selection is wrong */ /******************************/ DPRINTK("Tor PWM selection is wrong\n"); i_ReturnValue = -4; } /* if (b_PWM >= 0 && b_PWM <= 1) */ } else { /**********************************/ /* The module is not a PWM module */ /**********************************/ DPRINTK("The module is not a PWM module\n"); i_ReturnValue = -3; } } else { /***********************/ /* Module number error */ /***********************/ DPRINTK("Module number error\n"); i_ReturnValue = -2; } return i_ReturnValue; } int i_APCI1710_InsnBitsReadPWMInterrupt(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = devpriv->s_InterruptParameters. s_FIFOInterruptParameters[devpriv-> s_InterruptParameters.ui_Read].b_OldModuleMask; data[1] = devpriv->s_InterruptParameters. s_FIFOInterruptParameters[devpriv-> s_InterruptParameters.ui_Read].ul_OldInterruptMask; data[2] = devpriv->s_InterruptParameters. s_FIFOInterruptParameters[devpriv-> s_InterruptParameters.ui_Read].ul_OldCounterLatchValue; /**************************/ /* Increment the read FIFO */ /***************************/ devpriv-> s_InterruptParameters. ui_Read = (devpriv-> s_InterruptParameters.ui_Read + 1) % APCI1710_SAVE_INTERRUPT; return insn->n; }
gpl-2.0
nixholas/msm-nexus5
arch/powerpc/platforms/powermac/backlight.c
9348
5538
/* * Miscellaneous procedures for dealing with the PowerMac hardware. * Contains support for the backlight. * * Copyright (C) 2000 Benjamin Herrenschmidt * Copyright (C) 2006 Michael Hanselmann <linux-kernel@hansmi.ch> * */ #include <linux/kernel.h> #include <linux/fb.h> #include <linux/backlight.h> #include <linux/adb.h> #include <linux/pmu.h> #include <linux/atomic.h> #include <linux/export.h> #include <asm/prom.h> #include <asm/backlight.h> #define OLD_BACKLIGHT_MAX 15 static void pmac_backlight_key_worker(struct work_struct *work); static void pmac_backlight_set_legacy_worker(struct work_struct *work); static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker); static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker); /* Although these variables are used in interrupt context, it makes no sense to * protect them. No user is able to produce enough key events per second and * notice the errors that might happen. */ static int pmac_backlight_key_queued; static int pmac_backlight_set_legacy_queued; /* The via-pmu code allows the backlight to be grabbed, in which case the * in-kernel control of the brightness needs to be disabled. This should * only be used by really old PowerBooks. */ static atomic_t kernel_backlight_disabled = ATOMIC_INIT(0); /* Protect the pmac_backlight variable below. You should hold this lock when using the pmac_backlight pointer to prevent its potential removal. */ DEFINE_MUTEX(pmac_backlight_mutex); /* Main backlight storage * * Backlight drivers in this variable are required to have the "ops" * attribute set and to have an update_status function. * * We can only store one backlight here, but since Apple laptops have only one * internal display, it doesn't matter. Other backlight drivers can be used * independently. * */ struct backlight_device *pmac_backlight; int pmac_has_backlight_type(const char *type) { struct device_node* bk_node = of_find_node_by_name(NULL, "backlight"); if (bk_node) { const char *prop = of_get_property(bk_node, "backlight-control", NULL); if (prop && strncmp(prop, type, strlen(type)) == 0) { of_node_put(bk_node); return 1; } of_node_put(bk_node); } return 0; } int pmac_backlight_curve_lookup(struct fb_info *info, int value) { int level = (FB_BACKLIGHT_LEVELS - 1); if (info && info->bl_dev) { int i, max = 0; /* Look for biggest value */ for (i = 0; i < FB_BACKLIGHT_LEVELS; i++) max = max((int)info->bl_curve[i], max); /* Look for nearest value */ for (i = 0; i < FB_BACKLIGHT_LEVELS; i++) { int diff = abs(info->bl_curve[i] - value); if (diff < max) { max = diff; level = i; } } } return level; } static void pmac_backlight_key_worker(struct work_struct *work) { if (atomic_read(&kernel_backlight_disabled)) return; mutex_lock(&pmac_backlight_mutex); if (pmac_backlight) { struct backlight_properties *props; int brightness; props = &pmac_backlight->props; brightness = props->brightness + ((pmac_backlight_key_queued?-1:1) * (props->max_brightness / 15)); if (brightness < 0) brightness = 0; else if (brightness > props->max_brightness) brightness = props->max_brightness; props->brightness = brightness; backlight_update_status(pmac_backlight); } mutex_unlock(&pmac_backlight_mutex); } /* This function is called in interrupt context */ void pmac_backlight_key(int direction) { if (atomic_read(&kernel_backlight_disabled)) return; /* we can receive multiple interrupts here, but the scheduled work * will run only once, with the last value */ pmac_backlight_key_queued = direction; schedule_work(&pmac_backlight_key_work); } static int __pmac_backlight_set_legacy_brightness(int brightness) { int error = -ENXIO; mutex_lock(&pmac_backlight_mutex); if (pmac_backlight) { struct backlight_properties *props; props = &pmac_backlight->props; props->brightness = brightness * (props->max_brightness + 1) / (OLD_BACKLIGHT_MAX + 1); if (props->brightness > props->max_brightness) props->brightness = props->max_brightness; else if (props->brightness < 0) props->brightness = 0; backlight_update_status(pmac_backlight); error = 0; } mutex_unlock(&pmac_backlight_mutex); return error; } static void pmac_backlight_set_legacy_worker(struct work_struct *work) { if (atomic_read(&kernel_backlight_disabled)) return; __pmac_backlight_set_legacy_brightness(pmac_backlight_set_legacy_queued); } /* This function is called in interrupt context */ void pmac_backlight_set_legacy_brightness_pmu(int brightness) { if (atomic_read(&kernel_backlight_disabled)) return; pmac_backlight_set_legacy_queued = brightness; schedule_work(&pmac_backlight_set_legacy_work); } int pmac_backlight_set_legacy_brightness(int brightness) { return __pmac_backlight_set_legacy_brightness(brightness); } int pmac_backlight_get_legacy_brightness() { int result = -ENXIO; mutex_lock(&pmac_backlight_mutex); if (pmac_backlight) { struct backlight_properties *props; props = &pmac_backlight->props; result = props->brightness * (OLD_BACKLIGHT_MAX + 1) / (props->max_brightness + 1); } mutex_unlock(&pmac_backlight_mutex); return result; } void pmac_backlight_disable() { atomic_inc(&kernel_backlight_disabled); } void pmac_backlight_enable() { atomic_dec(&kernel_backlight_disabled); } EXPORT_SYMBOL_GPL(pmac_backlight); EXPORT_SYMBOL_GPL(pmac_backlight_mutex); EXPORT_SYMBOL_GPL(pmac_has_backlight_type);
gpl-2.0
SlimRoms/kernel_lge_geefhd
drivers/pci/hotplug/ibmphp_res.c
11652
59622
/* * IBM Hot Plug Controller Driver * * Written By: Irene Zubarev, IBM Corporation * * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001,2002 IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <gregkh@us.ibm.com> * */ #include <linux/module.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/list.h> #include <linux/init.h> #include "ibmphp.h" static int flags = 0; /* for testing */ static void update_resources (struct bus_node *bus_cur, int type, int rangeno); static int once_over (void); static int remove_ranges (struct bus_node *, struct bus_node *); static int update_bridge_ranges (struct bus_node **); static int add_bus_range (int type, struct range_node *, struct bus_node *); static void fix_resources (struct bus_node *); static struct bus_node *find_bus_wprev (u8, struct bus_node **, u8); static LIST_HEAD(gbuses); static struct bus_node * __init alloc_error_bus (struct ebda_pci_rsrc * curr, u8 busno, int flag) { struct bus_node * newbus; if (!(curr) && !(flag)) { err ("NULL pointer passed\n"); return NULL; } newbus = kzalloc(sizeof(struct bus_node), GFP_KERNEL); if (!newbus) { err ("out of system memory\n"); return NULL; } if (flag) newbus->busno = busno; else newbus->busno = curr->bus_num; list_add_tail (&newbus->bus_list, &gbuses); return newbus; } static struct resource_node * __init alloc_resources (struct ebda_pci_rsrc * curr) { struct resource_node *rs; if (!curr) { err ("NULL passed to allocate\n"); return NULL; } rs = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!rs) { err ("out of system memory\n"); return NULL; } rs->busno = curr->bus_num; rs->devfunc = curr->dev_fun; rs->start = curr->start_addr; rs->end = curr->end_addr; rs->len = curr->end_addr - curr->start_addr + 1; return rs; } static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node **new_range, struct ebda_pci_rsrc *curr, int flag, u8 first_bus) { struct bus_node * newbus; struct range_node *newrange; u8 num_ranges = 0; if (first_bus) { newbus = kzalloc(sizeof(struct bus_node), GFP_KERNEL); if (!newbus) { err ("out of system memory.\n"); return -ENOMEM; } newbus->busno = curr->bus_num; } else { newbus = *new_bus; switch (flag) { case MEM: num_ranges = newbus->noMemRanges; break; case PFMEM: num_ranges = newbus->noPFMemRanges; break; case IO: num_ranges = newbus->noIORanges; break; } } newrange = kzalloc(sizeof(struct range_node), GFP_KERNEL); if (!newrange) { if (first_bus) kfree (newbus); err ("out of system memory\n"); return -ENOMEM; } newrange->start = curr->start_addr; newrange->end = curr->end_addr; if (first_bus || (!num_ranges)) newrange->rangeno = 1; else { /* need to insert our range */ add_bus_range (flag, newrange, newbus); debug ("%d resource Primary Bus inserted on bus %x [%x - %x]\n", flag, newbus->busno, newrange->start, newrange->end); } switch (flag) { case MEM: newbus->rangeMem = newrange; if (first_bus) newbus->noMemRanges = 1; else { debug ("First Memory Primary on bus %x, [%x - %x]\n", newbus->busno, newrange->start, newrange->end); ++newbus->noMemRanges; fix_resources (newbus); } break; case IO: newbus->rangeIO = newrange; if (first_bus) newbus->noIORanges = 1; else { debug ("First IO Primary on bus %x, [%x - %x]\n", newbus->busno, newrange->start, newrange->end); ++newbus->noIORanges; fix_resources (newbus); } break; case PFMEM: newbus->rangePFMem = newrange; if (first_bus) newbus->noPFMemRanges = 1; else { debug ("1st PFMemory Primary on Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); ++newbus->noPFMemRanges; fix_resources (newbus); } break; } *new_bus = newbus; *new_range = newrange; return 0; } /* Notes: * 1. The ranges are ordered. The buses are not ordered. (First come) * * 2. If cannot allocate out of PFMem range, allocate from Mem ranges. PFmemFromMem * are not sorted. (no need since use mem node). To not change the entire code, we * also add mem node whenever this case happens so as not to change * ibmphp_check_mem_resource etc (and since it really is taking Mem resource) */ /***************************************************************************** * This is the Resource Management initialization function. It will go through * the Resource list taken from EBDA and fill in this module's data structures * * THIS IS NOT TAKING INTO CONSIDERATION IO RESTRICTIONS OF PRIMARY BUSES, * SINCE WE'RE GOING TO ASSUME FOR NOW WE DON'T HAVE THOSE ON OUR BUSES FOR NOW * * Input: ptr to the head of the resource list from EBDA * Output: 0, -1 or error codes ***************************************************************************/ int __init ibmphp_rsrc_init (void) { struct ebda_pci_rsrc *curr; struct range_node *newrange = NULL; struct bus_node *newbus = NULL; struct bus_node *bus_cur; struct bus_node *bus_prev; struct list_head *tmp; struct resource_node *new_io = NULL; struct resource_node *new_mem = NULL; struct resource_node *new_pfmem = NULL; int rc; struct list_head *tmp_ebda; list_for_each (tmp_ebda, &ibmphp_ebda_pci_rsrc_head) { curr = list_entry (tmp_ebda, struct ebda_pci_rsrc, ebda_pci_rsrc_list); if (!(curr->rsrc_type & PCIDEVMASK)) { /* EBDA still lists non PCI devices, so ignore... */ debug ("this is not a PCI DEVICE in rsrc_init, please take care\n"); // continue; } /* this is a primary bus resource */ if (curr->rsrc_type & PRIMARYBUSMASK) { /* memory */ if ((curr->rsrc_type & RESTYPE) == MMASK) { /* no bus structure exists in place yet */ if (list_empty (&gbuses)) { if ((rc = alloc_bus_range (&newbus, &newrange, curr, MEM, 1))) return rc; list_add_tail (&newbus->bus_list, &gbuses); debug ("gbuses = NULL, Memory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); } else { bus_cur = find_bus_wprev (curr->bus_num, &bus_prev, 1); /* found our bus */ if (bus_cur) { rc = alloc_bus_range (&bus_cur, &newrange, curr, MEM, 0); if (rc) return rc; } else { /* went through all the buses and didn't find ours, need to create a new bus node */ if ((rc = alloc_bus_range (&newbus, &newrange, curr, MEM, 1))) return rc; list_add_tail (&newbus->bus_list, &gbuses); debug ("New Bus, Memory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); } } } else if ((curr->rsrc_type & RESTYPE) == PFMASK) { /* prefetchable memory */ if (list_empty (&gbuses)) { /* no bus structure exists in place yet */ if ((rc = alloc_bus_range (&newbus, &newrange, curr, PFMEM, 1))) return rc; list_add_tail (&newbus->bus_list, &gbuses); debug ("gbuses = NULL, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); } else { bus_cur = find_bus_wprev (curr->bus_num, &bus_prev, 1); if (bus_cur) { /* found our bus */ rc = alloc_bus_range (&bus_cur, &newrange, curr, PFMEM, 0); if (rc) return rc; } else { /* went through all the buses and didn't find ours, need to create a new bus node */ if ((rc = alloc_bus_range (&newbus, &newrange, curr, PFMEM, 1))) return rc; list_add_tail (&newbus->bus_list, &gbuses); debug ("1st Bus, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); } } } else if ((curr->rsrc_type & RESTYPE) == IOMASK) { /* IO */ if (list_empty (&gbuses)) { /* no bus structure exists in place yet */ if ((rc = alloc_bus_range (&newbus, &newrange, curr, IO, 1))) return rc; list_add_tail (&newbus->bus_list, &gbuses); debug ("gbuses = NULL, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); } else { bus_cur = find_bus_wprev (curr->bus_num, &bus_prev, 1); if (bus_cur) { rc = alloc_bus_range (&bus_cur, &newrange, curr, IO, 0); if (rc) return rc; } else { /* went through all the buses and didn't find ours, need to create a new bus node */ if ((rc = alloc_bus_range (&newbus, &newrange, curr, IO, 1))) return rc; list_add_tail (&newbus->bus_list, &gbuses); debug ("1st Bus, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); } } } else { ; /* type is reserved WHAT TO DO IN THIS CASE??? NOTHING TO DO??? */ } } else { /* regular pci device resource */ if ((curr->rsrc_type & RESTYPE) == MMASK) { /* Memory resource */ new_mem = alloc_resources (curr); if (!new_mem) return -ENOMEM; new_mem->type = MEM; /* * if it didn't find the bus, means PCI dev * came b4 the Primary Bus info, so need to * create a bus rangeno becomes a problem... * assign a -1 and then update once the range * actually appears... */ if (ibmphp_add_resource (new_mem) < 0) { newbus = alloc_error_bus (curr, 0, 0); if (!newbus) return -ENOMEM; newbus->firstMem = new_mem; ++newbus->needMemUpdate; new_mem->rangeno = -1; } debug ("Memory resource for device %x, bus %x, [%x - %x]\n", new_mem->devfunc, new_mem->busno, new_mem->start, new_mem->end); } else if ((curr->rsrc_type & RESTYPE) == PFMASK) { /* PFMemory resource */ new_pfmem = alloc_resources (curr); if (!new_pfmem) return -ENOMEM; new_pfmem->type = PFMEM; new_pfmem->fromMem = 0; if (ibmphp_add_resource (new_pfmem) < 0) { newbus = alloc_error_bus (curr, 0, 0); if (!newbus) return -ENOMEM; newbus->firstPFMem = new_pfmem; ++newbus->needPFMemUpdate; new_pfmem->rangeno = -1; } debug ("PFMemory resource for device %x, bus %x, [%x - %x]\n", new_pfmem->devfunc, new_pfmem->busno, new_pfmem->start, new_pfmem->end); } else if ((curr->rsrc_type & RESTYPE) == IOMASK) { /* IO resource */ new_io = alloc_resources (curr); if (!new_io) return -ENOMEM; new_io->type = IO; /* * if it didn't find the bus, means PCI dev * came b4 the Primary Bus info, so need to * create a bus rangeno becomes a problem... * Can assign a -1 and then update once the * range actually appears... */ if (ibmphp_add_resource (new_io) < 0) { newbus = alloc_error_bus (curr, 0, 0); if (!newbus) return -ENOMEM; newbus->firstIO = new_io; ++newbus->needIOUpdate; new_io->rangeno = -1; } debug ("IO resource for device %x, bus %x, [%x - %x]\n", new_io->devfunc, new_io->busno, new_io->start, new_io->end); } } } list_for_each (tmp, &gbuses) { bus_cur = list_entry (tmp, struct bus_node, bus_list); /* This is to get info about PPB resources, since EBDA doesn't put this info into the primary bus info */ rc = update_bridge_ranges (&bus_cur); if (rc) return rc; } rc = once_over (); /* This is to align ranges (so no -1) */ if (rc) return rc; return 0; } /******************************************************************************** * This function adds a range into a sorted list of ranges per bus for a particular * range type, it then calls another routine to update the range numbers on the * pci devices' resources for the appropriate resource * * Input: type of the resource, range to add, current bus * Output: 0 or -1, bus and range ptrs ********************************************************************************/ static int add_bus_range (int type, struct range_node *range, struct bus_node *bus_cur) { struct range_node *range_cur = NULL; struct range_node *range_prev; int count = 0, i_init; int noRanges = 0; switch (type) { case MEM: range_cur = bus_cur->rangeMem; noRanges = bus_cur->noMemRanges; break; case PFMEM: range_cur = bus_cur->rangePFMem; noRanges = bus_cur->noPFMemRanges; break; case IO: range_cur = bus_cur->rangeIO; noRanges = bus_cur->noIORanges; break; } range_prev = NULL; while (range_cur) { if (range->start < range_cur->start) break; range_prev = range_cur; range_cur = range_cur->next; count = count + 1; } if (!count) { /* our range will go at the beginning of the list */ switch (type) { case MEM: bus_cur->rangeMem = range; break; case PFMEM: bus_cur->rangePFMem = range; break; case IO: bus_cur->rangeIO = range; break; } range->next = range_cur; range->rangeno = 1; i_init = 0; } else if (!range_cur) { /* our range will go at the end of the list */ range->next = NULL; range_prev->next = range; range->rangeno = range_prev->rangeno + 1; return 0; } else { /* the range is in the middle */ range_prev->next = range; range->next = range_cur; range->rangeno = range_cur->rangeno; i_init = range_prev->rangeno; } for (count = i_init; count < noRanges; ++count) { ++range_cur->rangeno; range_cur = range_cur->next; } update_resources (bus_cur, type, i_init + 1); return 0; } /******************************************************************************* * This routine goes through the list of resources of type 'type' and updates * the range numbers that they correspond to. It was called from add_bus_range fnc * * Input: bus, type of the resource, the rangeno starting from which to update ******************************************************************************/ static void update_resources (struct bus_node *bus_cur, int type, int rangeno) { struct resource_node *res = NULL; u8 eol = 0; /* end of list indicator */ switch (type) { case MEM: if (bus_cur->firstMem) res = bus_cur->firstMem; break; case PFMEM: if (bus_cur->firstPFMem) res = bus_cur->firstPFMem; break; case IO: if (bus_cur->firstIO) res = bus_cur->firstIO; break; } if (res) { while (res) { if (res->rangeno == rangeno) break; if (res->next) res = res->next; else if (res->nextRange) res = res->nextRange; else { eol = 1; break; } } if (!eol) { /* found the range */ while (res) { ++res->rangeno; res = res->next; } } } } static void fix_me (struct resource_node *res, struct bus_node *bus_cur, struct range_node *range) { char * str = ""; switch (res->type) { case IO: str = "io"; break; case MEM: str = "mem"; break; case PFMEM: str = "pfmem"; break; } while (res) { if (res->rangeno == -1) { while (range) { if ((res->start >= range->start) && (res->end <= range->end)) { res->rangeno = range->rangeno; debug ("%s->rangeno in fix_resources is %d\n", str, res->rangeno); switch (res->type) { case IO: --bus_cur->needIOUpdate; break; case MEM: --bus_cur->needMemUpdate; break; case PFMEM: --bus_cur->needPFMemUpdate; break; } break; } range = range->next; } } if (res->next) res = res->next; else res = res->nextRange; } } /***************************************************************************** * This routine reassigns the range numbers to the resources that had a -1 * This case can happen only if upon initialization, resources taken by pci dev * appear in EBDA before the resources allocated for that bus, since we don't * know the range, we assign -1, and this routine is called after a new range * is assigned to see the resources with unknown range belong to the added range * * Input: current bus * Output: none, list of resources for that bus are fixed if can be *******************************************************************************/ static void fix_resources (struct bus_node *bus_cur) { struct range_node *range; struct resource_node *res; debug ("%s - bus_cur->busno = %d\n", __func__, bus_cur->busno); if (bus_cur->needIOUpdate) { res = bus_cur->firstIO; range = bus_cur->rangeIO; fix_me (res, bus_cur, range); } if (bus_cur->needMemUpdate) { res = bus_cur->firstMem; range = bus_cur->rangeMem; fix_me (res, bus_cur, range); } if (bus_cur->needPFMemUpdate) { res = bus_cur->firstPFMem; range = bus_cur->rangePFMem; fix_me (res, bus_cur, range); } } /******************************************************************************* * This routine adds a resource to the list of resources to the appropriate bus * based on their resource type and sorted by their starting addresses. It assigns * the ptrs to next and nextRange if needed. * * Input: resource ptr * Output: ptrs assigned (to the node) * 0 or -1 *******************************************************************************/ int ibmphp_add_resource (struct resource_node *res) { struct resource_node *res_cur; struct resource_node *res_prev; struct bus_node *bus_cur; struct range_node *range_cur = NULL; struct resource_node *res_start = NULL; debug ("%s - enter\n", __func__); if (!res) { err ("NULL passed to add\n"); return -ENODEV; } bus_cur = find_bus_wprev (res->busno, NULL, 0); if (!bus_cur) { /* didn't find a bus, smth's wrong!!! */ debug ("no bus in the system, either pci_dev's wrong or allocation failed\n"); return -ENODEV; } /* Normal case */ switch (res->type) { case IO: range_cur = bus_cur->rangeIO; res_start = bus_cur->firstIO; break; case MEM: range_cur = bus_cur->rangeMem; res_start = bus_cur->firstMem; break; case PFMEM: range_cur = bus_cur->rangePFMem; res_start = bus_cur->firstPFMem; break; default: err ("cannot read the type of the resource to add... problem\n"); return -EINVAL; } while (range_cur) { if ((res->start >= range_cur->start) && (res->end <= range_cur->end)) { res->rangeno = range_cur->rangeno; break; } range_cur = range_cur->next; } /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! * this is again the case of rangeno = -1 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */ if (!range_cur) { switch (res->type) { case IO: ++bus_cur->needIOUpdate; break; case MEM: ++bus_cur->needMemUpdate; break; case PFMEM: ++bus_cur->needPFMemUpdate; break; } res->rangeno = -1; } debug ("The range is %d\n", res->rangeno); if (!res_start) { /* no first{IO,Mem,Pfmem} on the bus, 1st IO/Mem/Pfmem resource ever */ switch (res->type) { case IO: bus_cur->firstIO = res; break; case MEM: bus_cur->firstMem = res; break; case PFMEM: bus_cur->firstPFMem = res; break; } res->next = NULL; res->nextRange = NULL; } else { res_cur = res_start; res_prev = NULL; debug ("res_cur->rangeno is %d\n", res_cur->rangeno); while (res_cur) { if (res_cur->rangeno >= res->rangeno) break; res_prev = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; } if (!res_cur) { /* at the end of the resource list */ debug ("i should be here, [%x - %x]\n", res->start, res->end); res_prev->nextRange = res; res->next = NULL; res->nextRange = NULL; } else if (res_cur->rangeno == res->rangeno) { /* in the same range */ while (res_cur) { if (res->start < res_cur->start) break; res_prev = res_cur; res_cur = res_cur->next; } if (!res_cur) { /* the last resource in this range */ res_prev->next = res; res->next = NULL; res->nextRange = res_prev->nextRange; res_prev->nextRange = NULL; } else if (res->start < res_cur->start) { /* at the beginning or middle of the range */ if (!res_prev) { switch (res->type) { case IO: bus_cur->firstIO = res; break; case MEM: bus_cur->firstMem = res; break; case PFMEM: bus_cur->firstPFMem = res; break; } } else if (res_prev->rangeno == res_cur->rangeno) res_prev->next = res; else res_prev->nextRange = res; res->next = res_cur; res->nextRange = NULL; } } else { /* this is the case where it is 1st occurrence of the range */ if (!res_prev) { /* at the beginning of the resource list */ res->next = NULL; switch (res->type) { case IO: res->nextRange = bus_cur->firstIO; bus_cur->firstIO = res; break; case MEM: res->nextRange = bus_cur->firstMem; bus_cur->firstMem = res; break; case PFMEM: res->nextRange = bus_cur->firstPFMem; bus_cur->firstPFMem = res; break; } } else if (res_cur->rangeno > res->rangeno) { /* in the middle of the resource list */ res_prev->nextRange = res; res->next = NULL; res->nextRange = res_cur; } } } debug ("%s - exit\n", __func__); return 0; } /**************************************************************************** * This routine will remove the resource from the list of resources * * Input: io, mem, and/or pfmem resource to be deleted * Ouput: modified resource list * 0 or error code ****************************************************************************/ int ibmphp_remove_resource (struct resource_node *res) { struct bus_node *bus_cur; struct resource_node *res_cur = NULL; struct resource_node *res_prev; struct resource_node *mem_cur; char * type = ""; if (!res) { err ("resource to remove is NULL\n"); return -ENODEV; } bus_cur = find_bus_wprev (res->busno, NULL, 0); if (!bus_cur) { err ("cannot find corresponding bus of the io resource to remove " "bailing out...\n"); return -ENODEV; } switch (res->type) { case IO: res_cur = bus_cur->firstIO; type = "io"; break; case MEM: res_cur = bus_cur->firstMem; type = "mem"; break; case PFMEM: res_cur = bus_cur->firstPFMem; type = "pfmem"; break; default: err ("unknown type for resource to remove\n"); return -EINVAL; } res_prev = NULL; while (res_cur) { if ((res_cur->start == res->start) && (res_cur->end == res->end)) break; res_prev = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; } if (!res_cur) { if (res->type == PFMEM) { /* * case where pfmem might be in the PFMemFromMem list * so will also need to remove the corresponding mem * entry */ res_cur = bus_cur->firstPFMemFromMem; res_prev = NULL; while (res_cur) { if ((res_cur->start == res->start) && (res_cur->end == res->end)) { mem_cur = bus_cur->firstMem; while (mem_cur) { if ((mem_cur->start == res_cur->start) && (mem_cur->end == res_cur->end)) break; if (mem_cur->next) mem_cur = mem_cur->next; else mem_cur = mem_cur->nextRange; } if (!mem_cur) { err ("cannot find corresponding mem node for pfmem...\n"); return -EINVAL; } ibmphp_remove_resource (mem_cur); if (!res_prev) bus_cur->firstPFMemFromMem = res_cur->next; else res_prev->next = res_cur->next; kfree (res_cur); return 0; } res_prev = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; } if (!res_cur) { err ("cannot find pfmem to delete...\n"); return -EINVAL; } } else { err ("the %s resource is not in the list to be deleted...\n", type); return -EINVAL; } } if (!res_prev) { /* first device to be deleted */ if (res_cur->next) { switch (res->type) { case IO: bus_cur->firstIO = res_cur->next; break; case MEM: bus_cur->firstMem = res_cur->next; break; case PFMEM: bus_cur->firstPFMem = res_cur->next; break; } } else if (res_cur->nextRange) { switch (res->type) { case IO: bus_cur->firstIO = res_cur->nextRange; break; case MEM: bus_cur->firstMem = res_cur->nextRange; break; case PFMEM: bus_cur->firstPFMem = res_cur->nextRange; break; } } else { switch (res->type) { case IO: bus_cur->firstIO = NULL; break; case MEM: bus_cur->firstMem = NULL; break; case PFMEM: bus_cur->firstPFMem = NULL; break; } } kfree (res_cur); return 0; } else { if (res_cur->next) { if (res_prev->rangeno == res_cur->rangeno) res_prev->next = res_cur->next; else res_prev->nextRange = res_cur->next; } else if (res_cur->nextRange) { res_prev->next = NULL; res_prev->nextRange = res_cur->nextRange; } else { res_prev->next = NULL; res_prev->nextRange = NULL; } kfree (res_cur); return 0; } return 0; } static struct range_node * find_range (struct bus_node *bus_cur, struct resource_node * res) { struct range_node * range = NULL; switch (res->type) { case IO: range = bus_cur->rangeIO; break; case MEM: range = bus_cur->rangeMem; break; case PFMEM: range = bus_cur->rangePFMem; break; default: err ("cannot read resource type in find_range\n"); } while (range) { if (res->rangeno == range->rangeno) break; range = range->next; } return range; } /***************************************************************************** * This routine will check to make sure the io/mem/pfmem->len that the device asked for * can fit w/i our list of available IO/MEM/PFMEM resources. If cannot, returns -EINVAL, * otherwise, returns 0 * * Input: resource * Ouput: the correct start and end address are inputted into the resource node, * 0 or -EINVAL *****************************************************************************/ int ibmphp_check_resource (struct resource_node *res, u8 bridge) { struct bus_node *bus_cur; struct range_node *range = NULL; struct resource_node *res_prev; struct resource_node *res_cur = NULL; u32 len_cur = 0, start_cur = 0, len_tmp = 0; int noranges = 0; u32 tmp_start; /* this is to make sure start address is divisible by the length needed */ u32 tmp_divide; u8 flag = 0; if (!res) return -EINVAL; if (bridge) { /* The rules for bridges are different, 4K divisible for IO, 1M for (pf)mem*/ if (res->type == IO) tmp_divide = IOBRIDGE; else tmp_divide = MEMBRIDGE; } else tmp_divide = res->len; bus_cur = find_bus_wprev (res->busno, NULL, 0); if (!bus_cur) { /* didn't find a bus, smth's wrong!!! */ debug ("no bus in the system, either pci_dev's wrong or allocation failed\n"); return -EINVAL; } debug ("%s - enter\n", __func__); debug ("bus_cur->busno is %d\n", bus_cur->busno); /* This is a quick fix to not mess up with the code very much. i.e., * 2000-2fff, len = 1000, but when we compare, we need it to be fff */ res->len -= 1; switch (res->type) { case IO: res_cur = bus_cur->firstIO; noranges = bus_cur->noIORanges; break; case MEM: res_cur = bus_cur->firstMem; noranges = bus_cur->noMemRanges; break; case PFMEM: res_cur = bus_cur->firstPFMem; noranges = bus_cur->noPFMemRanges; break; default: err ("wrong type of resource to check\n"); return -EINVAL; } res_prev = NULL; while (res_cur) { range = find_range (bus_cur, res_cur); debug ("%s - rangeno = %d\n", __func__, res_cur->rangeno); if (!range) { err ("no range for the device exists... bailing out...\n"); return -EINVAL; } /* found our range */ if (!res_prev) { /* first time in the loop */ if ((res_cur->start != range->start) && ((len_tmp = res_cur->start - 1 - range->start) >= res->len)) { debug ("len_tmp = %x\n", len_tmp); if ((len_tmp < len_cur) || (len_cur == 0)) { if ((range->start % tmp_divide) == 0) { /* just perfect, starting address is divisible by length */ flag = 1; len_cur = len_tmp; start_cur = range->start; } else { /* Needs adjusting */ tmp_start = range->start; flag = 0; while ((len_tmp = res_cur->start - 1 - tmp_start) >= res->len) { if ((tmp_start % tmp_divide) == 0) { flag = 1; len_cur = len_tmp; start_cur = tmp_start; break; } tmp_start += tmp_divide - tmp_start % tmp_divide; if (tmp_start >= res_cur->start - 1) break; } } if (flag && len_cur == res->len) { debug ("but we are not here, right?\n"); res->start = start_cur; res->len += 1; /* To restore the balance */ res->end = res->start + res->len - 1; return 0; } } } } if (!res_cur->next) { /* last device on the range */ if ((range->end != res_cur->end) && ((len_tmp = range->end - (res_cur->end + 1)) >= res->len)) { debug ("len_tmp = %x\n", len_tmp); if ((len_tmp < len_cur) || (len_cur == 0)) { if (((res_cur->end + 1) % tmp_divide) == 0) { /* just perfect, starting address is divisible by length */ flag = 1; len_cur = len_tmp; start_cur = res_cur->end + 1; } else { /* Needs adjusting */ tmp_start = res_cur->end + 1; flag = 0; while ((len_tmp = range->end - tmp_start) >= res->len) { if ((tmp_start % tmp_divide) == 0) { flag = 1; len_cur = len_tmp; start_cur = tmp_start; break; } tmp_start += tmp_divide - tmp_start % tmp_divide; if (tmp_start >= range->end) break; } } if (flag && len_cur == res->len) { res->start = start_cur; res->len += 1; /* To restore the balance */ res->end = res->start + res->len - 1; return 0; } } } } if (res_prev) { if (res_prev->rangeno != res_cur->rangeno) { /* 1st device on this range */ if ((res_cur->start != range->start) && ((len_tmp = res_cur->start - 1 - range->start) >= res->len)) { if ((len_tmp < len_cur) || (len_cur == 0)) { if ((range->start % tmp_divide) == 0) { /* just perfect, starting address is divisible by length */ flag = 1; len_cur = len_tmp; start_cur = range->start; } else { /* Needs adjusting */ tmp_start = range->start; flag = 0; while ((len_tmp = res_cur->start - 1 - tmp_start) >= res->len) { if ((tmp_start % tmp_divide) == 0) { flag = 1; len_cur = len_tmp; start_cur = tmp_start; break; } tmp_start += tmp_divide - tmp_start % tmp_divide; if (tmp_start >= res_cur->start - 1) break; } } if (flag && len_cur == res->len) { res->start = start_cur; res->len += 1; /* To restore the balance */ res->end = res->start + res->len - 1; return 0; } } } } else { /* in the same range */ if ((len_tmp = res_cur->start - 1 - res_prev->end - 1) >= res->len) { if ((len_tmp < len_cur) || (len_cur == 0)) { if (((res_prev->end + 1) % tmp_divide) == 0) { /* just perfect, starting address's divisible by length */ flag = 1; len_cur = len_tmp; start_cur = res_prev->end + 1; } else { /* Needs adjusting */ tmp_start = res_prev->end + 1; flag = 0; while ((len_tmp = res_cur->start - 1 - tmp_start) >= res->len) { if ((tmp_start % tmp_divide) == 0) { flag = 1; len_cur = len_tmp; start_cur = tmp_start; break; } tmp_start += tmp_divide - tmp_start % tmp_divide; if (tmp_start >= res_cur->start - 1) break; } } if (flag && len_cur == res->len) { res->start = start_cur; res->len += 1; /* To restore the balance */ res->end = res->start + res->len - 1; return 0; } } } } } /* end if (res_prev) */ res_prev = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; } /* end of while */ if (!res_prev) { /* 1st device ever */ /* need to find appropriate range */ switch (res->type) { case IO: range = bus_cur->rangeIO; break; case MEM: range = bus_cur->rangeMem; break; case PFMEM: range = bus_cur->rangePFMem; break; } while (range) { if ((len_tmp = range->end - range->start) >= res->len) { if ((len_tmp < len_cur) || (len_cur == 0)) { if ((range->start % tmp_divide) == 0) { /* just perfect, starting address's divisible by length */ flag = 1; len_cur = len_tmp; start_cur = range->start; } else { /* Needs adjusting */ tmp_start = range->start; flag = 0; while ((len_tmp = range->end - tmp_start) >= res->len) { if ((tmp_start % tmp_divide) == 0) { flag = 1; len_cur = len_tmp; start_cur = tmp_start; break; } tmp_start += tmp_divide - tmp_start % tmp_divide; if (tmp_start >= range->end) break; } } if (flag && len_cur == res->len) { res->start = start_cur; res->len += 1; /* To restore the balance */ res->end = res->start + res->len - 1; return 0; } } } range = range->next; } /* end of while */ if ((!range) && (len_cur == 0)) { /* have gone through the list of devices and ranges and haven't found n.e.thing */ err ("no appropriate range.. bailing out...\n"); return -EINVAL; } else if (len_cur) { res->start = start_cur; res->len += 1; /* To restore the balance */ res->end = res->start + res->len - 1; return 0; } } if (!res_cur) { debug ("prev->rangeno = %d, noranges = %d\n", res_prev->rangeno, noranges); if (res_prev->rangeno < noranges) { /* if there're more ranges out there to check */ switch (res->type) { case IO: range = bus_cur->rangeIO; break; case MEM: range = bus_cur->rangeMem; break; case PFMEM: range = bus_cur->rangePFMem; break; } while (range) { if ((len_tmp = range->end - range->start) >= res->len) { if ((len_tmp < len_cur) || (len_cur == 0)) { if ((range->start % tmp_divide) == 0) { /* just perfect, starting address's divisible by length */ flag = 1; len_cur = len_tmp; start_cur = range->start; } else { /* Needs adjusting */ tmp_start = range->start; flag = 0; while ((len_tmp = range->end - tmp_start) >= res->len) { if ((tmp_start % tmp_divide) == 0) { flag = 1; len_cur = len_tmp; start_cur = tmp_start; break; } tmp_start += tmp_divide - tmp_start % tmp_divide; if (tmp_start >= range->end) break; } } if (flag && len_cur == res->len) { res->start = start_cur; res->len += 1; /* To restore the balance */ res->end = res->start + res->len - 1; return 0; } } } range = range->next; } /* end of while */ if ((!range) && (len_cur == 0)) { /* have gone through the list of devices and ranges and haven't found n.e.thing */ err ("no appropriate range.. bailing out...\n"); return -EINVAL; } else if (len_cur) { res->start = start_cur; res->len += 1; /* To restore the balance */ res->end = res->start + res->len - 1; return 0; } } else { /* no more ranges to check on */ if (len_cur) { res->start = start_cur; res->len += 1; /* To restore the balance */ res->end = res->start + res->len - 1; return 0; } else { /* have gone through the list of devices and haven't found n.e.thing */ err ("no appropriate range.. bailing out...\n"); return -EINVAL; } } } /* end if(!res_cur) */ return -EINVAL; } /******************************************************************************** * This routine is called from remove_card if the card contained PPB. * It will remove all the resources on the bus as well as the bus itself * Input: Bus * Ouput: 0, -ENODEV ********************************************************************************/ int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno) { struct resource_node *res_cur; struct resource_node *res_tmp; struct bus_node *prev_bus; int rc; prev_bus = find_bus_wprev (parent_busno, NULL, 0); if (!prev_bus) { debug ("something terribly wrong. Cannot find parent bus to the one to remove\n"); return -ENODEV; } debug ("In ibmphp_remove_bus... prev_bus->busno is %x\n", prev_bus->busno); rc = remove_ranges (bus, prev_bus); if (rc) return rc; if (bus->firstIO) { res_cur = bus->firstIO; while (res_cur) { res_tmp = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; kfree (res_tmp); res_tmp = NULL; } bus->firstIO = NULL; } if (bus->firstMem) { res_cur = bus->firstMem; while (res_cur) { res_tmp = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; kfree (res_tmp); res_tmp = NULL; } bus->firstMem = NULL; } if (bus->firstPFMem) { res_cur = bus->firstPFMem; while (res_cur) { res_tmp = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; kfree (res_tmp); res_tmp = NULL; } bus->firstPFMem = NULL; } if (bus->firstPFMemFromMem) { res_cur = bus->firstPFMemFromMem; while (res_cur) { res_tmp = res_cur; res_cur = res_cur->next; kfree (res_tmp); res_tmp = NULL; } bus->firstPFMemFromMem = NULL; } list_del (&bus->bus_list); kfree (bus); return 0; } /****************************************************************************** * This routine deletes the ranges from a given bus, and the entries from the * parent's bus in the resources * Input: current bus, previous bus * Output: 0, -EINVAL ******************************************************************************/ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev) { struct range_node *range_cur; struct range_node *range_tmp; int i; struct resource_node *res = NULL; if (bus_cur->noIORanges) { range_cur = bus_cur->rangeIO; for (i = 0; i < bus_cur->noIORanges; i++) { if (ibmphp_find_resource (bus_prev, range_cur->start, &res, IO) < 0) return -EINVAL; ibmphp_remove_resource (res); range_tmp = range_cur; range_cur = range_cur->next; kfree (range_tmp); range_tmp = NULL; } bus_cur->rangeIO = NULL; } if (bus_cur->noMemRanges) { range_cur = bus_cur->rangeMem; for (i = 0; i < bus_cur->noMemRanges; i++) { if (ibmphp_find_resource (bus_prev, range_cur->start, &res, MEM) < 0) return -EINVAL; ibmphp_remove_resource (res); range_tmp = range_cur; range_cur = range_cur->next; kfree (range_tmp); range_tmp = NULL; } bus_cur->rangeMem = NULL; } if (bus_cur->noPFMemRanges) { range_cur = bus_cur->rangePFMem; for (i = 0; i < bus_cur->noPFMemRanges; i++) { if (ibmphp_find_resource (bus_prev, range_cur->start, &res, PFMEM) < 0) return -EINVAL; ibmphp_remove_resource (res); range_tmp = range_cur; range_cur = range_cur->next; kfree (range_tmp); range_tmp = NULL; } bus_cur->rangePFMem = NULL; } return 0; } /* * find the resource node in the bus * Input: Resource needed, start address of the resource, type of resource */ int ibmphp_find_resource (struct bus_node *bus, u32 start_address, struct resource_node **res, int flag) { struct resource_node *res_cur = NULL; char * type = ""; if (!bus) { err ("The bus passed in NULL to find resource\n"); return -ENODEV; } switch (flag) { case IO: res_cur = bus->firstIO; type = "io"; break; case MEM: res_cur = bus->firstMem; type = "mem"; break; case PFMEM: res_cur = bus->firstPFMem; type = "pfmem"; break; default: err ("wrong type of flag\n"); return -EINVAL; } while (res_cur) { if (res_cur->start == start_address) { *res = res_cur; break; } if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; } if (!res_cur) { if (flag == PFMEM) { res_cur = bus->firstPFMemFromMem; while (res_cur) { if (res_cur->start == start_address) { *res = res_cur; break; } res_cur = res_cur->next; } if (!res_cur) { debug ("SOS...cannot find %s resource in the bus.\n", type); return -EINVAL; } } else { debug ("SOS... cannot find %s resource in the bus.\n", type); return -EINVAL; } } if (*res) debug ("*res->start = %x\n", (*res)->start); return 0; } /*********************************************************************** * This routine will free the resource structures used by the * system. It is called from cleanup routine for the module * Parameters: none * Returns: none ***********************************************************************/ void ibmphp_free_resources (void) { struct bus_node *bus_cur = NULL; struct bus_node *bus_tmp; struct range_node *range_cur; struct range_node *range_tmp; struct resource_node *res_cur; struct resource_node *res_tmp; struct list_head *tmp; struct list_head *next; int i = 0; flags = 1; list_for_each_safe (tmp, next, &gbuses) { bus_cur = list_entry (tmp, struct bus_node, bus_list); if (bus_cur->noIORanges) { range_cur = bus_cur->rangeIO; for (i = 0; i < bus_cur->noIORanges; i++) { if (!range_cur) break; range_tmp = range_cur; range_cur = range_cur->next; kfree (range_tmp); range_tmp = NULL; } } if (bus_cur->noMemRanges) { range_cur = bus_cur->rangeMem; for (i = 0; i < bus_cur->noMemRanges; i++) { if (!range_cur) break; range_tmp = range_cur; range_cur = range_cur->next; kfree (range_tmp); range_tmp = NULL; } } if (bus_cur->noPFMemRanges) { range_cur = bus_cur->rangePFMem; for (i = 0; i < bus_cur->noPFMemRanges; i++) { if (!range_cur) break; range_tmp = range_cur; range_cur = range_cur->next; kfree (range_tmp); range_tmp = NULL; } } if (bus_cur->firstIO) { res_cur = bus_cur->firstIO; while (res_cur) { res_tmp = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; kfree (res_tmp); res_tmp = NULL; } bus_cur->firstIO = NULL; } if (bus_cur->firstMem) { res_cur = bus_cur->firstMem; while (res_cur) { res_tmp = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; kfree (res_tmp); res_tmp = NULL; } bus_cur->firstMem = NULL; } if (bus_cur->firstPFMem) { res_cur = bus_cur->firstPFMem; while (res_cur) { res_tmp = res_cur; if (res_cur->next) res_cur = res_cur->next; else res_cur = res_cur->nextRange; kfree (res_tmp); res_tmp = NULL; } bus_cur->firstPFMem = NULL; } if (bus_cur->firstPFMemFromMem) { res_cur = bus_cur->firstPFMemFromMem; while (res_cur) { res_tmp = res_cur; res_cur = res_cur->next; kfree (res_tmp); res_tmp = NULL; } bus_cur->firstPFMemFromMem = NULL; } bus_tmp = bus_cur; list_del (&bus_cur->bus_list); kfree (bus_tmp); bus_tmp = NULL; } } /********************************************************************************* * This function will go over the PFmem resources to check if the EBDA allocated * pfmem out of memory buckets of the bus. If so, it will change the range numbers * and a flag to indicate that this resource is out of memory. It will also move the * Pfmem out of the pfmem resource list to the PFMemFromMem list, and will create * a new Mem node * This routine is called right after initialization *******************************************************************************/ static int __init once_over (void) { struct resource_node *pfmem_cur; struct resource_node *pfmem_prev; struct resource_node *mem; struct bus_node *bus_cur; struct list_head *tmp; list_for_each (tmp, &gbuses) { bus_cur = list_entry (tmp, struct bus_node, bus_list); if ((!bus_cur->rangePFMem) && (bus_cur->firstPFMem)) { for (pfmem_cur = bus_cur->firstPFMem, pfmem_prev = NULL; pfmem_cur; pfmem_prev = pfmem_cur, pfmem_cur = pfmem_cur->next) { pfmem_cur->fromMem = 1; if (pfmem_prev) pfmem_prev->next = pfmem_cur->next; else bus_cur->firstPFMem = pfmem_cur->next; if (!bus_cur->firstPFMemFromMem) pfmem_cur->next = NULL; else /* we don't need to sort PFMemFromMem since we're using mem node for all the real work anyways, so just insert at the beginning of the list */ pfmem_cur->next = bus_cur->firstPFMemFromMem; bus_cur->firstPFMemFromMem = pfmem_cur; mem = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!mem) { err ("out of system memory\n"); return -ENOMEM; } mem->type = MEM; mem->busno = pfmem_cur->busno; mem->devfunc = pfmem_cur->devfunc; mem->start = pfmem_cur->start; mem->end = pfmem_cur->end; mem->len = pfmem_cur->len; if (ibmphp_add_resource (mem) < 0) err ("Trouble...trouble... EBDA allocated pfmem from mem, but system doesn't display it has this space... unless not PCI device...\n"); pfmem_cur->rangeno = mem->rangeno; } /* end for pfmem */ } /* end if */ } /* end list_for_each bus */ return 0; } int ibmphp_add_pfmem_from_mem (struct resource_node *pfmem) { struct bus_node *bus_cur = find_bus_wprev (pfmem->busno, NULL, 0); if (!bus_cur) { err ("cannot find bus of pfmem to add...\n"); return -ENODEV; } if (bus_cur->firstPFMemFromMem) pfmem->next = bus_cur->firstPFMemFromMem; else pfmem->next = NULL; bus_cur->firstPFMemFromMem = pfmem; return 0; } /* This routine just goes through the buses to see if the bus already exists. * It is called from ibmphp_find_sec_number, to find out a secondary bus number for * bridged cards * Parameters: bus_number * Returns: Bus pointer or NULL */ struct bus_node *ibmphp_find_res_bus (u8 bus_number) { return find_bus_wprev (bus_number, NULL, 0); } static struct bus_node *find_bus_wprev (u8 bus_number, struct bus_node **prev, u8 flag) { struct bus_node *bus_cur; struct list_head *tmp; struct list_head *tmp_prev; list_for_each (tmp, &gbuses) { tmp_prev = tmp->prev; bus_cur = list_entry (tmp, struct bus_node, bus_list); if (flag) *prev = list_entry (tmp_prev, struct bus_node, bus_list); if (bus_cur->busno == bus_number) return bus_cur; } return NULL; } void ibmphp_print_test (void) { int i = 0; struct bus_node *bus_cur = NULL; struct range_node *range; struct resource_node *res; struct list_head *tmp; debug_pci ("*****************START**********************\n"); if ((!list_empty(&gbuses)) && flags) { err ("The GBUSES is not NULL?!?!?!?!?\n"); return; } list_for_each (tmp, &gbuses) { bus_cur = list_entry (tmp, struct bus_node, bus_list); debug_pci ("This is bus # %d. There are\n", bus_cur->busno); debug_pci ("IORanges = %d\t", bus_cur->noIORanges); debug_pci ("MemRanges = %d\t", bus_cur->noMemRanges); debug_pci ("PFMemRanges = %d\n", bus_cur->noPFMemRanges); debug_pci ("The IO Ranges are as follows:\n"); if (bus_cur->rangeIO) { range = bus_cur->rangeIO; for (i = 0; i < bus_cur->noIORanges; i++) { debug_pci ("rangeno is %d\n", range->rangeno); debug_pci ("[%x - %x]\n", range->start, range->end); range = range->next; } } debug_pci ("The Mem Ranges are as follows:\n"); if (bus_cur->rangeMem) { range = bus_cur->rangeMem; for (i = 0; i < bus_cur->noMemRanges; i++) { debug_pci ("rangeno is %d\n", range->rangeno); debug_pci ("[%x - %x]\n", range->start, range->end); range = range->next; } } debug_pci ("The PFMem Ranges are as follows:\n"); if (bus_cur->rangePFMem) { range = bus_cur->rangePFMem; for (i = 0; i < bus_cur->noPFMemRanges; i++) { debug_pci ("rangeno is %d\n", range->rangeno); debug_pci ("[%x - %x]\n", range->start, range->end); range = range->next; } } debug_pci ("The resources on this bus are as follows\n"); debug_pci ("IO...\n"); if (bus_cur->firstIO) { res = bus_cur->firstIO; while (res) { debug_pci ("The range # is %d\n", res->rangeno); debug_pci ("The bus, devfnc is %d, %x\n", res->busno, res->devfunc); debug_pci ("[%x - %x], len=%x\n", res->start, res->end, res->len); if (res->next) res = res->next; else if (res->nextRange) res = res->nextRange; else break; } } debug_pci ("Mem...\n"); if (bus_cur->firstMem) { res = bus_cur->firstMem; while (res) { debug_pci ("The range # is %d\n", res->rangeno); debug_pci ("The bus, devfnc is %d, %x\n", res->busno, res->devfunc); debug_pci ("[%x - %x], len=%x\n", res->start, res->end, res->len); if (res->next) res = res->next; else if (res->nextRange) res = res->nextRange; else break; } } debug_pci ("PFMem...\n"); if (bus_cur->firstPFMem) { res = bus_cur->firstPFMem; while (res) { debug_pci ("The range # is %d\n", res->rangeno); debug_pci ("The bus, devfnc is %d, %x\n", res->busno, res->devfunc); debug_pci ("[%x - %x], len=%x\n", res->start, res->end, res->len); if (res->next) res = res->next; else if (res->nextRange) res = res->nextRange; else break; } } debug_pci ("PFMemFromMem...\n"); if (bus_cur->firstPFMemFromMem) { res = bus_cur->firstPFMemFromMem; while (res) { debug_pci ("The range # is %d\n", res->rangeno); debug_pci ("The bus, devfnc is %d, %x\n", res->busno, res->devfunc); debug_pci ("[%x - %x], len=%x\n", res->start, res->end, res->len); res = res->next; } } } debug_pci ("***********************END***********************\n"); } static int range_exists_already (struct range_node * range, struct bus_node * bus_cur, u8 type) { struct range_node * range_cur = NULL; switch (type) { case IO: range_cur = bus_cur->rangeIO; break; case MEM: range_cur = bus_cur->rangeMem; break; case PFMEM: range_cur = bus_cur->rangePFMem; break; default: err ("wrong type passed to find out if range already exists\n"); return -ENODEV; } while (range_cur) { if ((range_cur->start == range->start) && (range_cur->end == range->end)) return 1; range_cur = range_cur->next; } return 0; } /* This routine will read the windows for any PPB we have and update the * range info for the secondary bus, and will also input this info into * primary bus, since BIOS doesn't. This is for PPB that are in the system * on bootup. For bridged cards that were added during previous load of the * driver, only the ranges and the bus structure are added, the devices are * added from NVRAM * Input: primary busno * Returns: none * Note: this function doesn't take into account IO restrictions etc, * so will only work for bridges with no video/ISA devices behind them It * also will not work for onboard PPB's that can have more than 1 *bus * behind them All these are TO DO. * Also need to add more error checkings... (from fnc returns etc) */ static int __init update_bridge_ranges (struct bus_node **bus) { u8 sec_busno, device, function, hdr_type, start_io_address, end_io_address; u16 vendor_id, upper_io_start, upper_io_end, start_mem_address, end_mem_address; u32 start_address, end_address, upper_start, upper_end; struct bus_node *bus_sec; struct bus_node *bus_cur; struct resource_node *io; struct resource_node *mem; struct resource_node *pfmem; struct range_node *range; unsigned int devfn; bus_cur = *bus; if (!bus_cur) return -ENODEV; ibmphp_pci_bus->number = bus_cur->busno; debug ("inside %s\n", __func__); debug ("bus_cur->busno = %x\n", bus_cur->busno); for (device = 0; device < 32; device++) { for (function = 0x00; function < 0x08; function++) { devfn = PCI_DEVFN(device, function); pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_VENDOR_ID, &vendor_id); if (vendor_id != PCI_VENDOR_ID_NOTVALID) { /* found correct device!!! */ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_HEADER_TYPE, &hdr_type); switch (hdr_type) { case PCI_HEADER_TYPE_NORMAL: function = 0x8; break; case PCI_HEADER_TYPE_MULTIDEVICE: break; case PCI_HEADER_TYPE_BRIDGE: function = 0x8; case PCI_HEADER_TYPE_MULTIBRIDGE: /* We assume here that only 1 bus behind the bridge TO DO: add functionality for several: temp = secondary; while (temp < subordinate) { ... temp++; } */ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_busno); bus_sec = find_bus_wprev (sec_busno, NULL, 0); /* this bus structure doesn't exist yet, PPB was configured during previous loading of ibmphp */ if (!bus_sec) { bus_sec = alloc_error_bus (NULL, sec_busno, 1); /* the rest will be populated during NVRAM call */ return 0; } pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, &start_io_address); pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_IO_LIMIT, &end_io_address); pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_IO_BASE_UPPER16, &upper_io_start); pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_IO_LIMIT_UPPER16, &upper_io_end); start_address = (start_io_address & PCI_IO_RANGE_MASK) << 8; start_address |= (upper_io_start << 16); end_address = (end_io_address & PCI_IO_RANGE_MASK) << 8; end_address |= (upper_io_end << 16); if ((start_address) && (start_address <= end_address)) { range = kzalloc(sizeof(struct range_node), GFP_KERNEL); if (!range) { err ("out of system memory\n"); return -ENOMEM; } range->start = start_address; range->end = end_address + 0xfff; if (bus_sec->noIORanges > 0) { if (!range_exists_already (range, bus_sec, IO)) { add_bus_range (IO, range, bus_sec); ++bus_sec->noIORanges; } else { kfree (range); range = NULL; } } else { /* 1st IO Range on the bus */ range->rangeno = 1; bus_sec->rangeIO = range; ++bus_sec->noIORanges; } fix_resources (bus_sec); if (ibmphp_find_resource (bus_cur, start_address, &io, IO)) { io = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!io) { kfree (range); err ("out of system memory\n"); return -ENOMEM; } io->type = IO; io->busno = bus_cur->busno; io->devfunc = ((device << 3) | (function & 0x7)); io->start = start_address; io->end = end_address + 0xfff; io->len = io->end - io->start + 1; ibmphp_add_resource (io); } } pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &start_mem_address); pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, &end_mem_address); start_address = 0x00000000 | (start_mem_address & PCI_MEMORY_RANGE_MASK) << 16; end_address = 0x00000000 | (end_mem_address & PCI_MEMORY_RANGE_MASK) << 16; if ((start_address) && (start_address <= end_address)) { range = kzalloc(sizeof(struct range_node), GFP_KERNEL); if (!range) { err ("out of system memory\n"); return -ENOMEM; } range->start = start_address; range->end = end_address + 0xfffff; if (bus_sec->noMemRanges > 0) { if (!range_exists_already (range, bus_sec, MEM)) { add_bus_range (MEM, range, bus_sec); ++bus_sec->noMemRanges; } else { kfree (range); range = NULL; } } else { /* 1st Mem Range on the bus */ range->rangeno = 1; bus_sec->rangeMem = range; ++bus_sec->noMemRanges; } fix_resources (bus_sec); if (ibmphp_find_resource (bus_cur, start_address, &mem, MEM)) { mem = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!mem) { kfree (range); err ("out of system memory\n"); return -ENOMEM; } mem->type = MEM; mem->busno = bus_cur->busno; mem->devfunc = ((device << 3) | (function & 0x7)); mem->start = start_address; mem->end = end_address + 0xfffff; mem->len = mem->end - mem->start + 1; ibmphp_add_resource (mem); } } pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_BASE, &start_mem_address); pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &end_mem_address); pci_bus_read_config_dword (ibmphp_pci_bus, devfn, PCI_PREF_BASE_UPPER32, &upper_start); pci_bus_read_config_dword (ibmphp_pci_bus, devfn, PCI_PREF_LIMIT_UPPER32, &upper_end); start_address = 0x00000000 | (start_mem_address & PCI_MEMORY_RANGE_MASK) << 16; end_address = 0x00000000 | (end_mem_address & PCI_MEMORY_RANGE_MASK) << 16; #if BITS_PER_LONG == 64 start_address |= ((long) upper_start) << 32; end_address |= ((long) upper_end) << 32; #endif if ((start_address) && (start_address <= end_address)) { range = kzalloc(sizeof(struct range_node), GFP_KERNEL); if (!range) { err ("out of system memory\n"); return -ENOMEM; } range->start = start_address; range->end = end_address + 0xfffff; if (bus_sec->noPFMemRanges > 0) { if (!range_exists_already (range, bus_sec, PFMEM)) { add_bus_range (PFMEM, range, bus_sec); ++bus_sec->noPFMemRanges; } else { kfree (range); range = NULL; } } else { /* 1st PFMem Range on the bus */ range->rangeno = 1; bus_sec->rangePFMem = range; ++bus_sec->noPFMemRanges; } fix_resources (bus_sec); if (ibmphp_find_resource (bus_cur, start_address, &pfmem, PFMEM)) { pfmem = kzalloc(sizeof(struct resource_node), GFP_KERNEL); if (!pfmem) { kfree (range); err ("out of system memory\n"); return -ENOMEM; } pfmem->type = PFMEM; pfmem->busno = bus_cur->busno; pfmem->devfunc = ((device << 3) | (function & 0x7)); pfmem->start = start_address; pfmem->end = end_address + 0xfffff; pfmem->len = pfmem->end - pfmem->start + 1; pfmem->fromMem = 0; ibmphp_add_resource (pfmem); } } break; } /* end of switch */ } /* end if vendor */ } /* end for function */ } /* end for device */ bus = &bus_cur; return 0; }
gpl-2.0
sandeshghimire/xlnx-3.17
arch/m68k/mac/macboing.c
12164
8395
/* * Mac bong noise generator. Note - we ought to put a boingy noise * here 8) * * ---------------------------------------------------------------------- * 16.11.98: * rewrote some functions, added support for Enhanced ASC (Quadras) * after the NetBSD asc.c console bell patch by Colin Wood/Frederick Bruck * Juergen Mellinger (juergen.mellinger@t-online.de) */ #include <linux/sched.h> #include <linux/timer.h> #include <asm/macintosh.h> #include <asm/mac_asc.h> static int mac_asc_inited; /* * dumb triangular wave table */ static __u8 mac_asc_wave_tab[ 0x800 ]; /* * Alan's original sine table; needs interpolating to 0x800 * (hint: interpolate or hardwire [0 -> Pi/2[, it's symmetric) */ static const signed char sine_data[] = { 0, 39, 75, 103, 121, 127, 121, 103, 75, 39, 0, -39, -75, -103, -121, -127, -121, -103, -75, -39 }; /* * where the ASC hides ... */ static volatile __u8* mac_asc_regs = ( void* )0x50F14000; /* * sample rate; is this a good default value? */ static unsigned long mac_asc_samplespersec = 11050; static int mac_bell_duration; static unsigned long mac_bell_phase; /* 0..2*Pi -> 0..0x800 (wavetable size) */ static unsigned long mac_bell_phasepersample; /* * some function protos */ static void mac_init_asc( void ); static void mac_nosound( unsigned long ); static void mac_quadra_start_bell( unsigned int, unsigned int, unsigned int ); static void mac_quadra_ring_bell( unsigned long ); static void mac_av_start_bell( unsigned int, unsigned int, unsigned int ); static void ( *mac_special_bell )( unsigned int, unsigned int, unsigned int ); /* * our timer to start/continue/stop the bell */ static DEFINE_TIMER(mac_sound_timer, mac_nosound, 0, 0); /* * Sort of initialize the sound chip (called from mac_mksound on the first * beep). */ static void mac_init_asc( void ) { int i; /* * do some machine specific initialization * BTW: * the NetBSD Quadra patch identifies the Enhanced Apple Sound Chip via * mac_asc_regs[ 0x800 ] & 0xF0 != 0 * this makes no sense here, because we have to set the default sample * rate anyway if we want correct frequencies */ switch ( macintosh_config->ident ) { case MAC_MODEL_IIFX: /* * The IIfx is always special ... */ mac_asc_regs = ( void* )0x50010000; break; /* * not sure about how correct this list is * machines with the EASC enhanced apple sound chip */ case MAC_MODEL_Q630: case MAC_MODEL_P475: mac_special_bell = mac_quadra_start_bell; mac_asc_samplespersec = 22150; break; case MAC_MODEL_C660: case MAC_MODEL_Q840: /* * The Quadra 660AV and 840AV use the "Singer" custom ASIC for sound I/O. * It appears to be similar to the "AWACS" custom ASIC in the Power Mac * [678]100. Because Singer and AWACS may have a similar hardware * interface, this would imply that the code in drivers/sound/dmasound.c * for AWACS could be used as a basis for Singer support. All we have to * do is figure out how to do DMA on the 660AV/840AV through the PSC and * figure out where the Singer hardware sits in memory. (I'd look in the * vicinity of the AWACS location in a Power Mac [678]100 first, or the * current location of the Apple Sound Chip--ASC--in other Macs.) The * Power Mac [678]100 info can be found in MkLinux Mach kernel sources. * * Quoted from Apple's Tech Info Library, article number 16405: * "Among desktop Macintosh computers, only the 660AV, 840AV, and Power * Macintosh models have 16-bit audio input and output capability * because of the AT&T DSP3210 hardware circuitry and the 16-bit Singer * codec circuitry in the AVs. The Audio Waveform Amplifier and * Converter (AWAC) chip in the Power Macintosh performs the same * 16-bit I/O functionality. The PowerBook 500 series computers * support 16-bit stereo output, but only mono input." * * Technical Information Library (TIL) article number 16405. * http://support.apple.com/kb/TA32601 * * --David Kilzer */ mac_special_bell = mac_av_start_bell; break; case MAC_MODEL_Q650: case MAC_MODEL_Q700: case MAC_MODEL_Q800: case MAC_MODEL_Q900: case MAC_MODEL_Q950: /* * Currently not implemented! */ mac_special_bell = NULL; break; default: /* * Every switch needs a default */ mac_special_bell = NULL; break; } /* * init the wave table with a simple triangular wave * A sine wave would sure be nicer here ... */ for ( i = 0; i < 0x400; i++ ) { mac_asc_wave_tab[ i ] = i / 4; mac_asc_wave_tab[ i + 0x400 ] = 0xFF - i / 4; } mac_asc_inited = 1; } /* * Called to make noise; current single entry to the boing driver. * Does the job for simple ASC, calls other routines else. * XXX Fixme: * Should be split into asc_mksound, easc_mksound, av_mksound and * function pointer set in mac_init_asc which would be called at * init time. * _This_ is rather ugly ... */ void mac_mksound( unsigned int freq, unsigned int length ) { __u32 cfreq = ( freq << 5 ) / 468; unsigned long flags; int i; if ( mac_special_bell == NULL ) { /* Do nothing */ return; } if ( !mac_asc_inited ) mac_init_asc(); if ( mac_special_bell ) { mac_special_bell( freq, length, 128 ); return; } if ( freq < 20 || freq > 20000 || length == 0 ) { mac_nosound( 0 ); return; } local_irq_save(flags); del_timer( &mac_sound_timer ); for ( i = 0; i < 0x800; i++ ) mac_asc_regs[ i ] = 0; for ( i = 0; i < 0x800; i++ ) mac_asc_regs[ i ] = mac_asc_wave_tab[ i ]; for ( i = 0; i < 8; i++ ) *( __u32* )( ( __u32 )mac_asc_regs + ASC_CONTROL + 0x814 + 8 * i ) = cfreq; mac_asc_regs[ 0x807 ] = 0; mac_asc_regs[ ASC_VOLUME ] = 128; mac_asc_regs[ 0x805 ] = 0; mac_asc_regs[ 0x80F ] = 0; mac_asc_regs[ ASC_MODE ] = ASC_MODE_SAMPLE; mac_asc_regs[ ASC_ENABLE ] = ASC_ENABLE_SAMPLE; mac_sound_timer.expires = jiffies + length; add_timer( &mac_sound_timer ); local_irq_restore(flags); } /* * regular ASC: stop whining .. */ static void mac_nosound( unsigned long ignored ) { mac_asc_regs[ ASC_ENABLE ] = 0; } /* * EASC entry; init EASC, don't load wavetable, schedule 'start whining'. */ static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume ) { unsigned long flags; /* if the bell is already ringing, ring longer */ if ( mac_bell_duration > 0 ) { mac_bell_duration += length; return; } mac_bell_duration = length; mac_bell_phase = 0; mac_bell_phasepersample = ( freq * sizeof( mac_asc_wave_tab ) ) / mac_asc_samplespersec; /* this is reasonably big for small frequencies */ local_irq_save(flags); /* set the volume */ mac_asc_regs[ 0x806 ] = volume; /* set up the ASC registers */ if ( mac_asc_regs[ 0x801 ] != 1 ) { /* select mono mode */ mac_asc_regs[ 0x807 ] = 0; /* select sampled sound mode */ mac_asc_regs[ 0x802 ] = 0; /* ??? */ mac_asc_regs[ 0x801 ] = 1; mac_asc_regs[ 0x803 ] |= 0x80; mac_asc_regs[ 0x803 ] &= 0x7F; } mac_sound_timer.function = mac_quadra_ring_bell; mac_sound_timer.expires = jiffies + 1; add_timer( &mac_sound_timer ); local_irq_restore(flags); } /* * EASC 'start/continue whining'; I'm not sure why the above function didn't * already load the wave table, or at least call this one... * This piece keeps reloading the wave table until done. */ static void mac_quadra_ring_bell( unsigned long ignored ) { int i, count = mac_asc_samplespersec / HZ; unsigned long flags; /* * we neither want a sound buffer overflow nor underflow, so we need to match * the number of samples per timer interrupt as exactly as possible. * using the asc interrupt will give better results in the future * ...and the possibility to use a real sample (a boingy noise, maybe...) */ local_irq_save(flags); del_timer( &mac_sound_timer ); if ( mac_bell_duration-- > 0 ) { for ( i = 0; i < count; i++ ) { mac_bell_phase += mac_bell_phasepersample; mac_asc_regs[ 0 ] = mac_asc_wave_tab[ mac_bell_phase & ( sizeof( mac_asc_wave_tab ) - 1 ) ]; } mac_sound_timer.expires = jiffies + 1; add_timer( &mac_sound_timer ); } else mac_asc_regs[ 0x801 ] = 0; local_irq_restore(flags); } /* * AV code - please fill in. */ static void mac_av_start_bell( unsigned int freq, unsigned int length, unsigned int volume ) { }
gpl-2.0
EPDCenterSpain/bq-QC
sound/soc/codecs/tlv320aic3111.c
133
59300
/* * linux/sound/soc/codecs/tlv320aic3111.c * * * Copyright (C) 2010 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * History: * * Rev 0.1 ASoC driver support Mistral 14-04-2010 * * Rev 0.2 Updated based Review Comments Mistral 29-06-2010 * * Rev 0.3 Updated for Codec Family Compatibility 12-07-2010 */ /* ***************************************************************************** * Include Files ***************************************************************************** */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/cdev.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/initval.h> #include <mach/gpio.h> #include "tlv320aic3111.h" #if 0 #define AIC3111_DEBUG #define AIC_DBG(x...) printk(KERN_INFO x) #else #define AIC_DBG(x...) do { } while (0) #endif #define HP_DET_PIN RK29_PIN6_PA0 //#define AIC3111_DEBUG /* codec status */ #define AIC3110_IS_SHUTDOWN 0 #define AIC3110_IS_CAPTURE_ON 1 #define AIC3110_IS_PLAYBACK_ON 2 #define AIC3110_IS_INITPOWER_ON 4 /* work type */ #define AIC3110_POWERDOWN_NULL 0 #define AIC3110_POWERDOWN_PLAYBACK 1 #define AIC3110_POWERDOWN_CAPTURE 2 #define AIC3110_POWERDOWN_PLAYBACK_CAPTURE 3 #define JACK_DET_ADLOOP msecs_to_jiffies(200) //#define AIC3111_DEBUG #define SPK 1 #define HP 0 static int aic3111_power_speaker (bool on); struct speaker_data { struct timer_list timer; struct semaphore sem; }; enum { POWER_STATE_OFF = 0, POWER_STATE_ON, POWER_STATE_SW_HP = 0, POWER_STATE_SW_SPK, }; static void aic3111_work(struct work_struct *work); static struct workqueue_struct *aic3111_workq; static DECLARE_DELAYED_WORK(delayed_work, aic3111_work); static int aic3111_current_status = AIC3110_IS_SHUTDOWN, aic3111_work_type = AIC3110_POWERDOWN_NULL; static bool isHSin = true, isSetHW = false; int old_status = SPK; /* ***************************************************************************** * Global Variables ***************************************************************************** */ /* Used to maintain the Register Access control*/ static u8 aic3111_reg_ctl; static struct snd_soc_codec *aic3111_codec; struct aic3111_priv *aic3111_privdata; struct i2c_client *aic3111_i2c; /* add a timer for checkout HP or SPK*/ static struct timer_list aic3111_timer; /*Used to delay work hpdet switch irq handle*/ struct delayed_work aic3111_hpdet_work; #ifdef CONFIG_MINI_DSP extern int aic3111_minidsp_program (struct snd_soc_codec *codec); extern void aic3111_add_minidsp_controls (struct snd_soc_codec *codec); #endif /* * AIC3111 register cache * We are caching the registers here. * NOTE: In AIC3111, there are 61 pages of 128 registers supported. * The following table contains the page0, page1 and page2 registers values. */ #ifdef AIC3111_CODEC_SUPPORT static const u8 aic31xx_reg[AIC31xx_CACHEREGNUM] = { /* Page 0 Registers */ /* 0 */ 0x00, 0x00, 0x12, 0x00, 0x00, 0x11, 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x80, 0x80, /* 10 */ 0x08, 0x00, 0x01, 0x01, 0x80, 0x80, 0x04, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* 20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x55, 0x55, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x14, /* 40 */ 0x0c, 0x00, 0x00, 0x00, 0x6f, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0xee, 0x10, 0xd8, 0x7e, 0xe3, /* 50 */ 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */ 0x00, 0x00, 0x10, 0x32, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x02, /* Page 1 Registers */ /* 0 */ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x00, /* 40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; #elif defined(AIC3110_CODEC_SUPPORT) /**************** AIC3110 REG CACHE ******************/ static const u8 aic31xx_reg[AIC31xx_CACHEREGNUM] = { /* Page 0 Registers */ 0x00, 0x00, 0x01, 0x56, 0x00, 0x11, 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x80, 0x80, 0x08, 0x00, 0x01, 0x01, 0x80, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x32, 0x12, 0x03, 0x02, 0x02, 0x11, 0x10, 0x00, 0x01, 0x04, 0x00, 0x14, 0x0c, 0x00, 0x00, 0x00, 0x0f, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0xee, 0x10, 0xd8, 0x7e, 0xe3, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* Page 1 Registers */ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x06, 0x3e, 0x00, 0x00, 0x7f, 0x7f, 0x7f, 0x7f, 0x02, 0x02, 0x00, 0x00, 0x20, 0x86, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; /**************************** End of AIC3110 REG CAHE ******************/ #elif defined(AIC3100_CODEC_SUPPORT) /******************************* AIC3100 REG CACHE ***********************/ static const u8 aic31xx_reg[AIC31xx_CACHEREGNUM] = { /* Page 0 Registers */ /* 0 */ 0x00, 0x00, 0x12, 0x00, 0x00, 0x11, 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x80, 0x00, /* 10 */ 0x00, 0x00, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* 20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x55, 0x55, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x14, /* 40 */ 0x0c, 0x00, 0x00, 0x00, 0x6f, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0xee, 0x10, 0xd8, 0x7e, 0xe3, /* 50 */ 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */ 0x00, 0x00, 0x10, 0x32, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x02, /* Page 1 Registers */ /* 0 */ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x00, /* 40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; /**************************** End of AIC3100 REG CACHE ******************/ #else /*#ifdef AIC3120_CODEC_SUPPORT */ static const u8 aic31xx_reg[AIC31xx_CACHEREGNUM] = { /* Page 0 Registers */ /* 0 */ 0x00, 0x00, 0x12, 0x00, 0x00, 0x11, 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x80, 0x80, /* 10 */ 0x08, 0x00, 0x01, 0x01, 0x80, 0x80, 0x04, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* 20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x55, 0x55, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x14, /* 40 */ 0x0c, 0x00, 0x00, 0x00, 0x6f, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0xee, 0x10, 0xd8, 0x7e, 0xe3, /* 50 */ 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */ 0x00, 0x00, 0x10, 0x32, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x02, /* Page 1 Registers */ /* 0 */ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x00, /* 40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; #endif /* *---------------------------------------------------------------------------- * Function : aic3111_change_page * Purpose : This function is to switch between page 0 and page 1. * *---------------------------------------------------------------------------- */ static int aic3111_change_page (struct snd_soc_codec *codec, u8 new_page) { struct aic3111_priv *aic3111 = aic3111_privdata; u8 data[2]; if (new_page == 2 || new_page > 8) { printk("ERROR::codec do not have page %d !!!!!!\n", new_page); return -1; } data[0] = 0; data[1] = new_page; aic3111->page_no = new_page; if (codec->hw_write (codec->control_data, data, 2) != 2) { printk ("Error in changing page to %d \n", new_page); return -1; } return 0; } /* *---------------------------------------------------------------------------- * Function : aic3111_write_reg_cache * Purpose : This function is to write aic3111 register cache * *---------------------------------------------------------------------------- */ static inline void aic3111_write_reg_cache (struct snd_soc_codec *codec, u16 reg, u8 value) { u8 *cache = codec->reg_cache; if (reg >= AIC31xx_CACHEREGNUM) { return; } cache[reg] = value; } /* *---------------------------------------------------------------------------- * Function : aic3111_read * Purpose : This function is to read the aic3111 register space. * *---------------------------------------------------------------------------- */ static unsigned int aic3111_read (struct snd_soc_codec *codec, unsigned int reg) { struct aic3111_priv *aic3111 = aic3111_privdata; u8 value; u8 page = reg / 128; if (page == 2 || page > 8) { printk("aic3111_read::Error page, there's not page %d in codec tlv320aic3111 !!!\n", page); return -1; } reg = reg % 128; if (aic3111->page_no != page) { aic3111_change_page (codec, page); } i2c_master_send (codec->control_data, (char *) &reg, 1); i2c_master_recv (codec->control_data, &value, 1); return value; } /* *---------------------------------------------------------------------------- * Function : aic3111_write * Purpose : This function is to write to the aic3111 register space. * *---------------------------------------------------------------------------- */ static int aic3111_write (struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { struct aic3111_priv *aic3111 = aic3111_privdata; u8 data[2]; u8 page; //printk("enter %s!!!!!!\n",__FUNCTION__); //printk("RK29_PIN6_PB6 =%d!!!!!!\n",gpio_get_value(RK29_PIN6_PB6)); page = reg / 128; data[AIC3111_REG_OFFSET_INDEX] = reg % 128; if (page == 2 || page > 9) { printk("aic3111_write::Error page, there's not page %d in codec tlv320aic3111 !!!\n", page); return -1; } if (aic3111->page_no != page) { aic3111_change_page (codec, page); } /* data is * D15..D8 aic3111 register offset * D7...D0 register data */ data[AIC3111_REG_DATA_INDEX] = value & AIC3111_8BITS_MASK; #if defined(EN_REG_CACHE) if ((page == 0) || (page == 1)) { aic3111_write_reg_cache (codec, reg, value); } #endif if (codec->hw_write (codec->control_data, data, 2) != 2) { printk ("Error in i2c write\n"); return -EIO; } return 0; } static int aic3111_print_register_cache (struct platform_device *pdev) { struct snd_soc_codec *codec = aic3111_codec; u8 *cache = codec->reg_cache; int reg; printk ("\n========3110 reg========\n"); for (reg = 0; reg < codec->reg_size; reg++) { if (reg == 0) printk ("Page 0\n"); if (reg == 128) printk ("\nPage 1\n"); if (reg%16 == 0 && reg != 0 && reg != 128) printk ("\n"); printk("0x%02x, ",aic3111_read(codec,reg)); } printk ("\n========3110 cache========\n"); for (reg = 0; reg < codec->reg_size; reg++) { if (reg == 0) printk ("Page 0\n"); if (reg == 128) printk ("\nPage 1\n"); if (reg%16 == 0 && reg != 0 && reg != 128) printk ("\n"); printk ("0x%02x, ",cache[reg]); } printk ("\n==========================\n"); return 0; } static void aic3111_soft_reset (void) { struct snd_soc_codec *codec = aic3111_codec; AIC_DBG("CODEC::%s\n",__FUNCTION__); //aic3111_write (codec, 1, 0x01); aic3111_write (codec, 63, 0x00); gpio_set_value(RK29_PIN6_PB5, GPIO_LOW); msleep(10); aic3111_write (aic3111_codec, (68), 0x01); //disable DRC aic3111_write (aic3111_codec, (128 + 31), 0xc4); aic3111_write (aic3111_codec, (128 + 36), 0x28); //Left Analog Vol to HPL aic3111_write (aic3111_codec, (128 + 37), 0x28); //Right Analog Vol to HPL aic3111_write (aic3111_codec, (128 + 40), 0x4f); //HPL driver PGA aic3111_write (aic3111_codec, (128 + 41), 0x4f); //HPR driver PGA aic3111_power_speaker(POWER_STATE_OFF); mdelay (20); aic3111_write (codec, 1, 0x00); memcpy(codec->reg_cache, aic31xx_reg, sizeof(aic31xx_reg)); isSetHW = false; return; } /* *---------------------------------------------------------------------------- * Function : aic3111_set_bias_level * Purpose : This function is to get triggered when dapm events occurs. * *---------------------------------------------------------------------------- */ static int aic3111_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct aic3111_priv *aic3111 = aic3111_privdata; u8 value; if (isSetHW) return 0; AIC_DBG ("CODEC::%s>>>>>>level:%d>>>>master:%d\n", __FUNCTION__, level, aic3111->master); switch (level) { /* full On */ case SND_SOC_BIAS_ON: /* all power is driven by DAPM system */ if (aic3111->master) { /* Switch on PLL */ value = aic3111_read(codec, CLK_REG_2); aic3111_write(codec, CLK_REG_2, (value | ENABLE_PLL)); /* Switch on NDAC Divider */ value = aic3111_read(codec, NDAC_CLK_REG); aic3111_write(codec, NDAC_CLK_REG, value | ENABLE_NDAC); /* Switch on MDAC Divider */ value = aic3111_read(codec, MDAC_CLK_REG); aic3111_write(codec, MDAC_CLK_REG, value | ENABLE_MDAC); /* Switch on NADC Divider */ value = aic3111_read(codec, NADC_CLK_REG); aic3111_write(codec, NADC_CLK_REG, value | ENABLE_MDAC); /* Switch on MADC Divider */ value = aic3111_read(codec, MADC_CLK_REG); aic3111_write(codec, MADC_CLK_REG, value | ENABLE_MDAC); /* Switch on BCLK_N Divider */ value = aic3111_read(codec, BCLK_N_VAL); aic3111_write(codec, BCLK_N_VAL, value | ENABLE_BCLK); } else { /* Switch on PLL */ value = aic3111_read(codec, CLK_REG_2); aic3111_write(codec, CLK_REG_2, (value | ENABLE_PLL)); /* Switch on NDAC Divider */ value = aic3111_read(codec, NDAC_CLK_REG); aic3111_write(codec, NDAC_CLK_REG, value | ENABLE_NDAC); /* Switch on MDAC Divider */ value = aic3111_read(codec, MDAC_CLK_REG); aic3111_write(codec, MDAC_CLK_REG, value | ENABLE_MDAC); /* Switch on NADC Divider */ value = aic3111_read(codec, NADC_CLK_REG); aic3111_write(codec, NADC_CLK_REG, value | ENABLE_MDAC); /* Switch on MADC Divider */ value = aic3111_read(codec, MADC_CLK_REG); aic3111_write(codec, MADC_CLK_REG, value | ENABLE_MDAC); /* Switch on BCLK_N Divider */ value = aic3111_read(codec, BCLK_N_VAL); aic3111_write(codec, BCLK_N_VAL, value | ENABLE_BCLK); } break; /* partial On */ case SND_SOC_BIAS_PREPARE: break; /* Off, with power */ case SND_SOC_BIAS_STANDBY: /* * all power is driven by DAPM system, * so output power is safe if bypass was set */ if (aic3111->master) { /* Switch off PLL */ value = aic3111_read(codec, CLK_REG_2); aic3111_write(codec, CLK_REG_2, (value & ~ENABLE_PLL)); /* Switch off NDAC Divider */ value = aic3111_read(codec, NDAC_CLK_REG); aic3111_write(codec, NDAC_CLK_REG, value & ~ENABLE_NDAC); /* Switch off MDAC Divider */ value = aic3111_read(codec, MDAC_CLK_REG); aic3111_write(codec, MDAC_CLK_REG, value & ~ENABLE_MDAC); /* Switch off NADC Divider */ value = aic3111_read(codec, NADC_CLK_REG); aic3111_write(codec, NADC_CLK_REG, value & ~ENABLE_NDAC); /* Switch off MADC Divider */ value = aic3111_read(codec, MADC_CLK_REG); aic3111_write(codec, MADC_CLK_REG, value & ~ENABLE_MDAC); value = aic3111_read(codec, BCLK_N_VAL); /* Switch off BCLK_N Divider */ aic3111_write(codec, BCLK_N_VAL, value & ~ENABLE_BCLK); } break; /* Off, without power */ case SND_SOC_BIAS_OFF: /* force all power off */ break; } codec->dapm.bias_level = level; return 0; } /* the structure contains the different values for mclk */ static const struct aic3111_rate_divs aic3111_divs[] = { /* * mclk, rate, p_val, pll_j, pll_d, dosr, ndac, mdac, aosr, nadc, madc, blck_N, * codec_speficic_initializations */ /* 8k rate */ {12000000, 8000, 1, 7, 6800, 768, 5, 3, 128, 5, 18, 24}, //{12288000, 8000, 1, 7, 8643, 768, 5, 3, 128, 5, 18, 24}, {24000000, 8000, 2, 7, 6800, 768, 15, 1, 64, 45, 4, 24}, /* 11.025k rate */ {12000000, 11025, 1, 7, 5264, 512, 8, 2, 128, 8, 8, 16}, {24000000, 11025, 2, 7, 5264, 512, 16, 1, 64, 32, 4, 16}, /* 16k rate */ {12000000, 16000, 1, 7, 6800, 384, 5, 3, 128, 5, 9, 12}, {24000000, 16000, 2, 7, 6800, 384, 15, 1, 64, 18, 5, 12}, /* 22.05k rate */ {12000000, 22050, 1, 7, 5264, 256, 4, 4, 128, 4, 8, 8}, {24000000, 22050, 2, 7, 5264, 256, 16, 1, 64, 16, 4, 8}, /* 32k rate */ {12000000, 32000, 1, 7, 1680, 192, 2, 7, 64, 2, 21, 6}, {24000000, 32000, 2, 7, 1680, 192, 7, 2, 64, 7, 6, 6}, /* 44.1k rate */ {12000000, 44100, 1, 7, 5264, 128, 2, 8, 128, 2, 8, 4}, {11289600, 44100, 1, 8, 0, 128, 4, 4, 128, 4, 4, 4}, {24000000, 44100, 2, 7, 5264, 128, 8, 2, 64, 8, 4, 4}, /* 48k rate */ {12000000, 48000, 1, 8, 1920, 128, 2, 8, 128, 2, 8, 4}, {24000000, 48000, 2, 8, 1920, 128, 8, 2, 64, 8, 4, 4}, /*96k rate */ {12000000, 96000, 1, 8, 1920, 64, 2, 8, 64, 2, 8, 2}, {24000000, 96000, 2, 8, 1920, 64, 4, 4, 64, 8, 2, 2}, /*192k */ {12000000, 192000, 1, 8, 1920, 32, 2, 8, 32, 2, 8, 1}, {24000000, 192000, 2, 8, 1920, 32, 4, 4, 32, 4, 4, 1}, }; /* *---------------------------------------------------------------------------- * Function : aic3111_get_divs * Purpose : This function is to get required divisor from the "aic3111_divs" * table. * *---------------------------------------------------------------------------- */ static inline int aic3111_get_divs (int mclk, int rate) { int i; AIC_DBG("Enter::%s\n",__FUNCTION__); for (i = 0; i < ARRAY_SIZE (aic3111_divs); i++) { if ((aic3111_divs[i].rate == rate) && (aic3111_divs[i].mclk == mclk)) { return i; } } printk ("Master clock and sample rate is not supported\n"); return -EINVAL; } /* *---------------------------------------------------------------------------- * Function : aic3111_hw_params * Purpose : This function is to set the hardware parameters for AIC3111. * The functions set the sample rate and audio serial data word * length. * *---------------------------------------------------------------------------- */ static int aic3111_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *codec_dai) { struct snd_soc_codec *codec = aic3111_codec; struct aic3111_priv *aic3111 = aic3111_privdata; int i; u8 data; if (isSetHW) return 0; AIC_DBG("CODEC::%s\n", __FUNCTION__); aic3111_set_bias_level(codec, SND_SOC_BIAS_STANDBY); i = aic3111_get_divs(aic3111->sysclk, params_rate (params)); if (i < 0) { printk ("sampling rate not supported\n"); return i; } /* We will fix R value to 1 and will make P & J=K.D as varialble */ /* Setting P & R values */ aic3111_write(codec, CLK_REG_2, ((aic3111_divs[i].p_val << 4) | 0x01)); /* J value */ aic3111_write(codec, CLK_REG_3, aic3111_divs[i].pll_j); /* MSB & LSB for D value */ aic3111_write(codec, CLK_REG_4, (aic3111_divs[i].pll_d >> 8)); aic3111_write(codec, CLK_REG_5, (aic3111_divs[i].pll_d & AIC3111_8BITS_MASK)); /* NDAC divider value */ aic3111_write(codec, NDAC_CLK_REG, aic3111_divs[i].ndac); /* MDAC divider value */ aic3111_write(codec, MDAC_CLK_REG, aic3111_divs[i].mdac); /* DOSR MSB & LSB values */ aic3111_write(codec, DAC_OSR_MSB, aic3111_divs[i].dosr >> 8); aic3111_write(codec, DAC_OSR_LSB, aic3111_divs[i].dosr & AIC3111_8BITS_MASK); /* NADC divider value */ aic3111_write(codec, NADC_CLK_REG, aic3111_divs[i].nadc); /* MADC divider value */ aic3111_write(codec, MADC_CLK_REG, aic3111_divs[i].madc); /* AOSR value */ aic3111_write(codec, ADC_OSR_REG, aic3111_divs[i].aosr); /* BCLK N divider */ aic3111_write(codec, BCLK_N_VAL, aic3111_divs[i].blck_N); aic3111_set_bias_level(codec, SND_SOC_BIAS_ON); data = aic3111_read(codec, INTERFACE_SET_REG_1); data = data & ~(3 << 4); switch (params_format (params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: data |= (AIC3111_WORD_LEN_20BITS << DATA_LEN_SHIFT); break; case SNDRV_PCM_FORMAT_S24_LE: data |= (AIC3111_WORD_LEN_24BITS << DATA_LEN_SHIFT); break; case SNDRV_PCM_FORMAT_S32_LE: data |= (AIC3111_WORD_LEN_32BITS << DATA_LEN_SHIFT); break; } aic3111_write(codec, INTERFACE_SET_REG_1, data); return 0; } /* *---------------------------------------------------------------------------- * Function : aic3111_mute * Purpose : This function is to mute or unmute the left and right DAC * *---------------------------------------------------------------------------- */ static int aic3111_mute (struct snd_soc_dai *codec_dai, int mute) { struct snd_soc_codec *codec = codec_dai->codec; u8 dac_reg; AIC_DBG ("CODEC::%s>>>>mute:%d\n", __FUNCTION__, mute); dac_reg = aic3111_read (codec, DAC_MUTE_CTRL_REG) & ~MUTE_ON; if (mute) ;//aic3111_write (codec, DAC_MUTE_CTRL_REG, dac_reg | MUTE_ON); else { //aic3111_write (codec, DAC_MUTE_CTRL_REG, dac_reg); isSetHW = true; } return 0; } /* *---------------------------------------------------------------------------- * Function : aic3111_set_dai_sysclk * Purpose : This function is to set the DAI system clock * *---------------------------------------------------------------------------- */ static int aic3111_set_dai_sysclk (struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct aic3111_priv *aic3111 = aic3111_privdata; if (isSetHW) return 0; AIC_DBG("Enter %s and line %d\n",__FUNCTION__,__LINE__); switch (freq) { case AIC3111_FREQ_11289600: case AIC3111_FREQ_12000000: case AIC3111_FREQ_24000000: aic3111->sysclk = freq; return 0; } printk ("Invalid frequency to set DAI system clock\n"); return -EINVAL; } /* *---------------------------------------------------------------------------- * Function : aic3111_set_dai_fmt * Purpose : This function is to set the DAI format * *---------------------------------------------------------------------------- */ static int aic3111_set_dai_fmt (struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; struct aic3111_priv *aic3111 = aic3111_privdata; u8 iface_reg; if (isSetHW) return 0; AIC_DBG("Enter %s and line %d\n",__FUNCTION__,__LINE__); iface_reg = aic3111_read (codec, INTERFACE_SET_REG_1); iface_reg = iface_reg & ~(3 << 6 | 3 << 2); //set I2S mode BCLK and WCLK is input /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: aic3111->master = 1; iface_reg |= BIT_CLK_MASTER | WORD_CLK_MASTER; break; case SND_SOC_DAIFMT_CBS_CFS: aic3111->master = 0; iface_reg &= ~(BIT_CLK_MASTER | WORD_CLK_MASTER); break; case SND_SOC_DAIFMT_CBS_CFM: aic3111->master = 0; iface_reg |= BIT_CLK_MASTER; iface_reg &= ~(WORD_CLK_MASTER); break; default: printk ("Invalid DAI master/slave interface\n"); return -EINVAL; } /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: break; case SND_SOC_DAIFMT_DSP_A: iface_reg |= (AIC3111_DSP_MODE << AUDIO_MODE_SHIFT); break; case SND_SOC_DAIFMT_RIGHT_J: iface_reg |= (AIC3111_RIGHT_JUSTIFIED_MODE << AUDIO_MODE_SHIFT); break; case SND_SOC_DAIFMT_LEFT_J: iface_reg |= (AIC3111_LEFT_JUSTIFIED_MODE << AUDIO_MODE_SHIFT); break; default: printk ("Invalid DAI interface format\n"); return -EINVAL; } aic3111_write (codec, INTERFACE_SET_REG_1, iface_reg); return 0; } /* *---------------------------------------------------------------------------- * Function : aic3111_power_headphone * Purpose : * parameter: on = 1: power up; * on = 0: power dn; * xjq@rock-chips.com *---------------------------------------------------------------------------- */ static int aic3111_power_headphone (bool on) { struct snd_soc_codec *codec = aic3111_codec; AIC_DBG("Enter %s and line %d\n",__FUNCTION__,__LINE__); if (on == POWER_STATE_ON) { aic3111_write (codec, (63), 0xd4); // aic3111_write(codec, (128 + 35), 0x88); aic3111_write (codec, (68), 0x01); //disable DRC aic3111_write (codec, (128 + 31), 0xc4); aic3111_write (codec, (128 + 44), 0x00); aic3111_write (codec, (128 + 36), 0x28); //Left Analog Vol to HPL aic3111_write (codec, (128 + 37), 0x28); //Right Analog Vol to HPL // aic3111_write (codec, (128 + 40), 0x06); //HPL driver PGA // aic3111_write (codec, (128 + 41), 0x06); //HPR driver PGA aic3111_write (codec, (128 + 40), 0x4f); //HPL driver PGA aic3111_write (codec, (128 + 41), 0x4f); //HPR driver PGA } else if (on == POWER_STATE_OFF) { aic3111_write (codec, (128 + 31), 0x00); aic3111_write (codec, (128 + 44), 0x00); aic3111_write (codec, (128 + 36), 0xff); aic3111_write (codec, (128 + 37), 0xff); aic3111_write (codec, (128 + 40), 0x02); aic3111_write (codec, (128 + 41), 0x02); } return 0; } /* *---------------------------------------------------------------------------- * Function : aic3111_power_speaker * Purpose : * parameter: on = 1: power up; * on = 0: power dn; * xjq@rock-chips.com *---------------------------------------------------------------------------- */ static int aic3111_power_speaker (bool on) { struct snd_soc_codec *codec = aic3111_codec; AIC_DBG("Enter %s and line %d\n",__FUNCTION__,__LINE__); if (on == POWER_STATE_ON) { #if 0 // aic3111_write(codec, (128 + 32), 0x86); aic3111_write(codec, (128 + 32), 0xc6); aic3111_write(codec, (128 + 30), 0x00); // aic3111_write(codec, (128 + 38), 0x08); //set left speaker analog gain to -4.88db aic3111_write(codec, (128 + 38), 0x16); //set left speaker analog gain to -4.88db aic3111_write(codec, (128 + 39), 0x16); //Right Analog Vol to SPR // aic3111_write(codec, (128 + 38), 0x7f); //set left speaker analog gain to -4.88db // aic3111_write(codec, (128 + 39), 0x7f); //Right Analog Vol to SPR aic3111_write(codec, (128 + 42), 0x1d); //set left speaker driver gain to 12db aic3111_write(codec, (128 + 43), 0x1d); //bit3-4 output stage gain // aic3111_write(codec, (128 + 43), 0x00); //bit3-4 output stage gain aic3111_write(codec, (37), 0x98); #if 1 /* DRC */ aic3111_write(codec, (60), 0x02); //select PRB_P2 aic3111_write(codec, (68), 0x61); //enable left and right DRC, set DRC threshold to -3db, set DRC hystersis to 1db aic3111_write(codec, (69), 0x00); //set hold time disable aic3111_write(codec, (70), 0x5D); //set attack time to 0.125db per sample period and decay time to 0.000488db per sample #endif #endif #if 1 aic3111_write(codec, (63), 0xfc); aic3111_write(codec, (128 + 32), 0xc6); aic3111_write(codec, (128 + 30), 0x00); aic3111_write(codec, (128 + 39), 0x08); //set left speaker analog gain to -4.88db aic3111_write(codec, (128 + 38), 0x08); //Right Analog Vol to SPR aic3111_write(codec, (128 + 43), 0x0D); //set left speaker driver gain to 12db aic3111_write(codec, (128 + 42), 0x0D); //bit3-4 output stage gain aic3111_write(codec, (37), 0x99); #if 1 /* DRC */ aic3111_write(codec, (60), 0x02); //select PRB_P2 aic3111_write(codec, (68), 0x61); //enable left and right DRC, set DRC threshold to -3db, set DRC hystersis to 1db aic3111_write(codec, (69), 0x00); //set hold time disable aic3111_write(codec, (70), 0x5D); //set attack time to 0.125db per sample period and decay time to 0.000488db per sample #endif #endif } else if (on == POWER_STATE_OFF) { aic3111_write(codec, (68), 0x01); //disable DRC aic3111_write(codec, (128 + 32), 0x06); aic3111_write(codec, (128 + 30), 0x00); aic3111_write(codec, (128 + 38), 0xff); aic3111_write(codec, (128 + 39), 0xff); aic3111_write(codec, (128 + 42), 0x00); aic3111_write(codec, (128 + 43), 0x00); aic3111_write(codec, (37), 0x00); } return 0; } /* *---------------------------------------------------------------------------- * Function : aic3111_HS_switch * Purpose : This function is to initialise the AIC3111 driver * In PLAYBACK, switch between HP and SPK app. * parameter: on = 1: SPK power up & HP power dn; * on = 0: HP power up & SPK power dn; * xjq@rock-chips.com *---------------------------------------------------------------------------- */ static int aic3111_HS_switch (bool on) { AIC_DBG("enter %s and line %d\n",__FUNCTION__,__LINE__); if (POWER_STATE_SW_SPK == on) { //aic3111_power_headphone (POWER_STATE_OFF); aic3111_power_speaker (POWER_STATE_ON); } else if (POWER_STATE_SW_HP == on) { aic3111_power_speaker (POWER_STATE_OFF); //aic3111_power_headphone (POWER_STATE_ON); //aic3111_power_speaker (POWER_STATE_ON); //aic3111_power_headphone (POWER_STATE_OFF); } return 0; } /* *---------------------------------------------------------------------------- * Function : aic3111_SPK_HS_powerdown * Purpose : This function is to power down HP and SPK. * xjq@rock-chips.com *---------------------------------------------------------------------------- */ static int aic3111_SPK_HS_powerdown (void) { AIC_DBG("enter %s and line %d\n",__FUNCTION__,__LINE__); //aic3111_power_headphone (POWER_STATE_OFF); aic3111_power_speaker (POWER_STATE_OFF); // aic3111_power_speaker (POWER_STATE_ON); return 0; } /* *---------------------------------------------------------------------------- * Function : aic3111_power_init * Purpose : pll clock setting * parameter: on = 1: power up; * on = 0: power dn; * xjq@rock-chips.com *---------------------------------------------------------------------------- */ static void aic3111_power_init (void) { struct snd_soc_codec *codec = aic3111_codec; AIC_DBG("enter %s and line %d\n",__FUNCTION__,__LINE__); if (!(aic3111_current_status & AIC3110_IS_INITPOWER_ON)) { AIC_DBG ("CODEC::%s\n", __FUNCTION__); aic3111_write(codec, (128 + 46), 0x0b); aic3111_write(codec, (128 + 35), 0x44); aic3111_write(codec, (4), 0x03); aic3111_write(codec, (29), 0x01); aic3111_write(codec, (48), 0xC0); aic3111_write(codec, (51), 0x14); aic3111_write(codec, (67), 0x82); aic3111_current_status |= AIC3110_IS_INITPOWER_ON; } return; } /* *---------------------------------------------------------------------------- * Function : aic3111_power_playback * Purpose : * parameter: on = 1: power up; * on = 0: power dn; * xjq@rock-chips.com *---------------------------------------------------------------------------- */ static int aic3111_power_playback (bool on) { struct snd_soc_codec *codec = aic3111_codec; AIC_DBG ("CODEC::%s>>>>>>%d\n", __FUNCTION__, on); gpio_set_value(RK29_PIN6_PB5, GPIO_LOW); aic3111_power_init(); if ((on == POWER_STATE_ON) && !(aic3111_current_status & AIC3110_IS_PLAYBACK_ON)) { // if(1){ //gpio_set_value(RK29_PIN6_PB5, GPIO_HIGH); /****open HPL and HPR*******/ //aic3111_write(codec, (63), 0xfc); msleep(10); aic3111_write(codec, (65), 0x00); //LDAC VOL aic3111_write(codec, (66), 0x00); //RDAC VOL aic3111_write (aic3111_codec, (63), 0xd4); // aic3111_write(codec, (128 + 35), 0x88); //aic3111_write (aic3111_codec, (68), 0x01); //disable DRC //aic3111_write (aic3111_codec, (128 + 31), 0xc4); aic3111_write (aic3111_codec, (128 + 44), 0x00); //aic3111_write (aic3111_codec, (128 + 36), 0x28); //Left Analog Vol to HPL //aic3111_write (aic3111_codec, (128 + 37), 0x28); //Right Analog Vol to HPL aic3111_write (codec, (128 + 40), 0x06); //HPL driver PGA aic3111_write (codec, (128 + 41), 0x06); //HPR driver PGA //aic3111_write (aic3111_codec, (128 + 40), 0x4f); //HPL driver PGA //aic3111_write (aic3111_codec, (128 + 41), 0x4f); //HPR driver PGA //printk("HP INIT~~~~~~~~~~~~~~~~~~~~~~~~~`\n"); /***************************/ aic3111_HS_switch(isHSin); aic3111_write(codec, (65), 0x10); //LDAC VOL to +8db aic3111_write(codec, (66), 0x10); //RDAC VOL to +8db msleep(10); aic3111_write(codec, (64), 0x00); aic3111_current_status |= AIC3110_IS_PLAYBACK_ON; } else if ((on == POWER_STATE_OFF) && (aic3111_current_status & AIC3110_IS_PLAYBACK_ON)) { aic3111_write(codec, (68), 0x01); //disable DRC aic3111_write(codec, (64), 0x0c); aic3111_write(codec, (63), 0x00); aic3111_write(codec, (65), 0x00); //LDAC VOL aic3111_write(codec, (66), 0x00); //RDAC VOL aic3111_SPK_HS_powerdown(); aic3111_current_status &= ~AIC3110_IS_PLAYBACK_ON; } //mdelay(800); gpio_set_value(RK29_PIN6_PB5, GPIO_HIGH); return 0; } /* *---------------------------------------------------------------------------- * Function : aic3111_power_capture * Purpose : * parameter: on = 1: power up; * on = 0: power dn; * xjq@rock-chips.com *---------------------------------------------------------------------------- */ static int aic3111_power_capture (bool on) { struct snd_soc_codec *codec = aic3111_codec; AIC_DBG ("CODEC::%s>>>>>>%d\n", __FUNCTION__, on); aic3111_power_init(); if ((on == POWER_STATE_ON) && !(aic3111_current_status & AIC3110_IS_CAPTURE_ON)) { aic3111_write(codec, (64), 0x0c); msleep(10); aic3111_write(codec, (61), 0x0b); aic3111_write(codec, (128 + 47), 0x00); //MIC PGA 0x80:0dB 0x14:10dB 0x28:20dB 0x3c:30dB 0x77:59dB aic3111_write(codec, (128 + 48), 0x80); //MIC1LP\MIC1LM RIN = 10. aic3111_write(codec, (128 + 49), 0x20); aic3111_write(codec, (82), 0x00); //D7=0:0: ADC channel not muted aic3111_write(codec, (83), 0x1A); //ADC Digital Volume 0 dB aic3111_write(codec, (81), 0x80); //D7=1:ADC channel is powered up. #if 1 /*configure register to creat a filter 20~3.5kHz*/ aic3111_write(codec, (128*4 + 14), 0x7f); aic3111_write(codec, (128*4 + 15), 0x00); aic3111_write(codec, (128*4 + 16), 0xc0); aic3111_write(codec, (128*4 + 17), 0x18); aic3111_write(codec, (128*4 + 18), 0x00); aic3111_write(codec, (128*4 + 19), 0x00); aic3111_write(codec, (128*4 + 20), 0x3f); aic3111_write(codec, (128*4 + 21), 0x00); aic3111_write(codec, (128*4 + 22), 0x00); aic3111_write(codec, (128*4 + 23), 0x00); aic3111_write(codec, (128*4 + 24), 0x05); aic3111_write(codec, (128*4 + 25), 0xd2); aic3111_write(codec, (128*4 + 26), 0x05); aic3111_write(codec, (128*4 + 27), 0xd2); aic3111_write(codec, (128*4 + 28), 0x05); aic3111_write(codec, (128*4 + 29), 0xd2); aic3111_write(codec, (128*4 + 30), 0x53); aic3111_write(codec, (128*4 + 31), 0xff); aic3111_write(codec, (128*4 + 32), 0xc0); aic3111_write(codec, (128*4 + 33), 0xb5); #endif msleep(10); aic3111_write(codec, (64), 0x00); aic3111_current_status |= AIC3110_IS_CAPTURE_ON; } else if ((on == POWER_STATE_OFF) && (aic3111_current_status & AIC3110_IS_CAPTURE_ON)) { aic3111_write(codec, (61), 0x00); aic3111_write(codec, (128 + 47), 0x00); //MIC PGA AOL aic3111_write(codec, (128 + 48), 0x00); aic3111_write(codec, (128 + 50), 0x00); aic3111_write(codec, (81), 0x00); aic3111_write(codec, (82), 0x80); aic3111_write(codec, (83), 0x00); //ADC VOL aic3111_write(codec, (86), 0x00); aic3111_current_status &= ~AIC3110_IS_CAPTURE_ON; } return 0; } /* *---------------------------------------------------------------------------- * Function : aic3111_powerdown * Purpose : This function is to power down codec. * *---------------------------------------------------------------------------- */ static void aic3111_powerdown (void) { AIC_DBG ("CODEC::%s\n", __FUNCTION__); if (aic3111_current_status != AIC3110_IS_SHUTDOWN) { aic3111_soft_reset();//sai aic3111_current_status = AIC3110_IS_SHUTDOWN; } } /* *---------------------------------------------------------------------------- * Function : aic3111_work * Purpose : This function is to respond to HPDET handle. * *---------------------------------------------------------------------------- */ static void aic3111_work (struct work_struct *work) { AIC_DBG("Enter %s and line %d\n",__FUNCTION__,__LINE__); switch (aic3111_work_type) { case AIC3110_POWERDOWN_NULL: break; case AIC3110_POWERDOWN_PLAYBACK: aic3111_power_playback(POWER_STATE_OFF); break; case AIC3110_POWERDOWN_CAPTURE: aic3111_power_capture(POWER_STATE_OFF); break; case AIC3110_POWERDOWN_PLAYBACK_CAPTURE: aic3111_powerdown();//sai break; default: break; } aic3111_work_type = AIC3110_POWERDOWN_NULL; } /* *---------------------------------------------------------------------------- * Function : aic3111_startup * Purpose : This function is to start up codec. * *---------------------------------------------------------------------------- */ static int aic3111_startup (struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { /* struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = aic3111_codec; */ AIC_DBG ("CODEC::%s----substream->stream:%s \n", __FUNCTION__, substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? "PLAYBACK":"CAPTURE"); cancel_delayed_work_sync(&delayed_work); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { aic3111_power_playback(POWER_STATE_ON); } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { aic3111_power_capture(POWER_STATE_ON); } return 0; } /* *---------------------------------------------------------------------------- * Function : aic3111_startup * Purpose : This function is to shut down codec. * *---------------------------------------------------------------------------- */ static void aic3111_shutdown (struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_dai *codec_dai = dai; AIC_DBG ("CODEC::%s----substream->stream:%s \n", __FUNCTION__, substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? "PLAYBACK":"CAPTURE"); if (!codec_dai->capture_active && !codec_dai->playback_active) { cancel_delayed_work_sync(&delayed_work); /* If codec is already shutdown, return */ if (aic3111_current_status == AIC3110_IS_SHUTDOWN) return; AIC_DBG ("CODEC::Is going to power down aic3111\n"); aic3111_work_type = AIC3110_POWERDOWN_PLAYBACK_CAPTURE; /* If codec is useless, queue work to close it */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { queue_delayed_work(aic3111_workq, &delayed_work, msecs_to_jiffies(1000)); } else { queue_delayed_work(aic3111_workq, &delayed_work, msecs_to_jiffies(3000)); } } else if (codec_dai->capture_active && !codec_dai->playback_active) { cancel_delayed_work_sync(&delayed_work); aic3111_work_type = AIC3110_POWERDOWN_PLAYBACK; /* Turn off playback and keep record on */ queue_delayed_work(aic3111_workq, &delayed_work, msecs_to_jiffies(1000)); } else if (!codec_dai->capture_active && codec_dai->playback_active) { cancel_delayed_work_sync(&delayed_work); aic3111_work_type = AIC3110_POWERDOWN_CAPTURE; /* Turn off record and keep playback on */ queue_delayed_work(aic3111_workq, &delayed_work, msecs_to_jiffies(3000)); } } /* *---------------------------------------------------------------------------- * Function : aic3111_trigger * Purpose : This function is to respond to playback trigger. * *---------------------------------------------------------------------------- */ static int aic3111_trigger(struct snd_pcm_substream *substream, int status, struct snd_soc_dai *dai) { struct snd_soc_dai *codec_dai = dai; if(status == 0) { gpio_set_value(RK29_PIN6_PB5, GPIO_LOW); mdelay(10); } AIC_DBG ("CODEC::%s----status = %d substream->stream:%s \n", __FUNCTION__, status, substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? "PLAYBACK":"CAPTURE"); if (status == 1 || status == 0) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { codec_dai->playback_active = status; } else { codec_dai->capture_active = status; } } return 0; } static struct snd_soc_dai_ops aic3111_dai_ops = { .hw_params = aic3111_hw_params, .digital_mute = aic3111_mute, .set_sysclk = aic3111_set_dai_sysclk, .set_fmt = aic3111_set_dai_fmt, .startup = aic3111_startup, .shutdown = aic3111_shutdown, .trigger = aic3111_trigger, }; static struct snd_soc_dai_driver aic3111_dai[] = { { .name = "AIC3111 HiFi", .playback = { .stream_name = "HiFi Playback", .channels_min = 1, .channels_max = 2, .rates = AIC3111_RATES, .formats = AIC3111_FORMATS, }, .capture = { .stream_name = "HiFi Capture", .channels_min = 1, .channels_max = 2, .rates = AIC3111_RATES, .formats = AIC3111_FORMATS, }, .ops = &aic3111_dai_ops, }, }; #ifdef AIC3111_DEBUG static struct class *aic3111_debug_class = NULL; static int reg_128_39 = 12, reg_128_43 = 0; static int i=52;j=1,k=0; static int CheckCommand(const char __user *buffer) { switch(*buffer) { case '1': if (*(buffer + 1) == '+') { if (reg_128_39 < 12) { if (reg_128_39 % 2 == 0) printk("write reg 128 + 39 vol + : %ddB -> -%d.5dB\n", (reg_128_39 - 12) / 2, (11 - reg_128_39) / 2); else printk("write reg 128 + 39 vol + : -%d.5dB -> %ddB\n", (12 - reg_128_39) / 2, (reg_128_39 - 11) / 2); reg_128_39++; aic3111_write(aic3111_codec, (128 + 39), 0x04 + (12 - reg_128_39)); } else { printk("128 + 39 max vol 0dB\n"); } } else if (*(buffer + 1) == '-') { if (reg_128_39 > 0) { if (reg_128_39 % 2 == 0) printk("write reg 128 + 39 vol - : %ddB -> -%d.5dB\n", (reg_128_39 - 12) / 2, (13 - reg_128_39) / 2); else printk("write reg 128 + 39 vol - : -%d.5dB -> %ddB\n", (12 - reg_128_39) / 2, (reg_128_39 - 13) / 2); reg_128_39--; aic3111_write(aic3111_codec, (128 + 39), 0x08 + (12 - reg_128_39)); } else { printk("128 + 39 min vol -6dB\n"); } } break; case '2': if (*(buffer + 1) == '+') { if (reg_128_43 < 2) { printk("write reg 128 + 43 vol + : %ddB -> %ddB\n", (reg_128_43) * 6, (reg_128_43 + 1) * 6); reg_128_43++; aic3111_write(aic3111_codec, (128 + 43), 0x04 + ((reg_128_43 + 1) << 3)); } else { printk("128 + 43 max vol 12dB\n"); } } else if (*(buffer + 1) == '-') { if (reg_128_43 > 0) { printk("write reg 128 + 43 vol - : %ddB -> %ddB\n", (reg_128_43) * 6, (reg_128_43 - 1) * 6); reg_128_43--; aic3111_write(aic3111_codec, (128 + 43), 0x04 + ((reg_128_43 + 1) << 3)); } else { printk("128 + 43 min vol 0dB\n"); } } break; case 'o': aic3111_write(aic3111_codec, (128 + 39), 0x08 + (12 - reg_128_39)); aic3111_write(aic3111_codec, (128 + 43), 0x04 + ((reg_128_43 + 1) << 3)); case 'l': if (reg_128_39 % 2 == 0) printk("reg 128 + 43 vol : %ddB reg 128 + 39 vol : %ddB\n", (reg_128_43) * 6, (reg_128_39 - 12) / 2); else printk("reg 128 + 43 vol : %ddB reg 128 + 39 vol : -%d.5dB\n", (reg_128_43) * 6, (12 - reg_128_39) / 2); break; case 's': aic3111_power_speaker (POWER_STATE_ON); break; case 'h': aic3111_power_headphone (POWER_STATE_ON); break; case 'q': aic3111_power_speaker (POWER_STATE_OFF); break; case 'w': aic3111_power_headphone (POWER_STATE_OFF); break; case 'a': i--; gpio_set_value(RK29_PIN6_PB5, GPIO_HIGH); //printk("reg[128+39]=0x%x\n",aic3111_read(aic3111_codec,(128 + 39))); printk("-db add\n"); break; case 'r': i++; gpio_set_value(RK29_PIN6_PB5, GPIO_LOW); //printk("reg[128+39]=0x%x\n",aic3111_read(aic3111_codec,(128 + 39))); printk("-db down\n"); break; case 't': printk("PB5 = %d\n",gpio_get_value(RK29_PIN6_PB5)); break; case 'z': j++; aic3111_write(aic3111_codec, (66), j); printk("DAC db add\n"); printk("reg[66]=0x%x\n",aic3111_read(aic3111_codec,66)); break; case 'x': j--; aic3111_write(aic3111_codec, (66), j); printk("DAC db down\n"); printk("reg[66]=0x%x\n",aic3111_read(aic3111_codec,66)); break; case 'c': j--; aic3111_write(aic3111_codec, (63), 0xfc); printk("reg[63]=0x%x\n",aic3111_read(aic3111_codec,66)); break; case 'n': k++; if(k==1) { aic3111_write(aic3111_codec, (128 + 40), 0x0e); aic3111_write(aic3111_codec, (128 + 41), 0x0e); printk("HPR and HPL 1 DB\n",k); } if(k==2) { aic3111_write(aic3111_codec, (128 + 40), 0x1e); aic3111_write(aic3111_codec, (128 + 40), 0x1e); printk("HPR and HPL 3 DB\n",k); } if(k==3) { aic3111_write(aic3111_codec, (128 + 40), 0x2e); aic3111_write(aic3111_codec, (128 + 40), 0x2e); printk("HPR and HPL 5 DB\n",k); } break; case 'm': k--; if(k==1) { aic3111_write(aic3111_codec, (128 + 40), 0x0e); aic3111_write(aic3111_codec, (128 + 41), 0x0e); printk("HPR and HPL 1 DB\n",k); } if(k==2) { aic3111_write(aic3111_codec, (128 + 40), 0x1e); aic3111_write(aic3111_codec, (128 + 40), 0x1e); printk("HPR and HPL 3 DB\n",k); } if(k==3) { aic3111_write(aic3111_codec, (128 + 40), 0x2e); aic3111_write(aic3111_codec, (128 + 40), 0x2e); printk("HPR and HPL 5 DB\n",k); } break; default: printk("Please press '1' '2' 'o' 'l' !\n"); break; } return 0; } static int aic3111_proc_write(struct file *file, const char __user *buffer, unsigned long count, void *data) { if (CheckCommand(buffer) != 0) { printk("Write proc error !\n"); return -1; } return sizeof(buffer); } static const struct file_operations aic3111_proc_fops = { .owner = THIS_MODULE, .write = aic3111_proc_write, }; static int aic3111_proc_init(void) { struct proc_dir_entry *aic3111_proc_entry; //printk("!!!!!!!!!!!!!!!!!!!!\n"); aic3111_proc_entry = create_proc_entry("driver/aic3111_ts", 0777, NULL); if (aic3111_proc_entry != NULL) { aic3111_proc_entry->write_proc = aic3111_proc_write; return -1; }else { printk("create proc error !\n"); } return 0; } #endif struct delayed_work aic3111_speaker_delayed_work; int speakeronoff; static void aic3111_speaker_delayed_work_func(struct work_struct *work) { struct snd_soc_codec *codec = aic3111_codec; if (aic3111_current_status & AIC3110_IS_PLAYBACK_ON){ if(speakeronoff) { //aic3111_write(codec, (128 + 32), 0xc6); //printk("reg 128+32 = %x\n"aic3111_read(codec, (128 + 32))); isHSin = 0; //gpio_set_value(RK29_PIN6_PB5, GPIO_LOW); aic3111_power_speaker(POWER_STATE_OFF); gpio_set_value(RK29_PIN6_PB5, GPIO_HIGH); //aic3111_power_headphone(POWER_STATE_ON); //aic3111_write(codec, (128 + 35), 0x88); printk("now hp sound\n"); } else { //aic3111_power_speaker(POWER_STATE_ON); isHSin = 1; //aic3111_power_headphone(POWER_STATE_OFF); gpio_set_value(RK29_PIN6_PB5, GPIO_LOW); aic3111_power_speaker(POWER_STATE_ON); aic3111_write(codec, (128 + 35), 0x44); aic3111_write(codec, (63), 0xfc); printk("now spk sound\n"); } } //printk("----------------------------mma7660_work_func------------------------\n"); } /**for check hp or spk****/ static int speaker_timer(unsigned long _data) { struct speaker_data *spk = (struct speaker_data *)_data; int new_status; if (gpio_get_value(RK29_PIN6_PB6) == 0) { new_status = HP; isHSin = 0; //printk("hp now\n"); if(old_status != new_status) { old_status = new_status; // printk("new_status = %d,old_status = %d\n",new_status,old_status); old_status = new_status; schedule_delayed_work(&aic3111_speaker_delayed_work,msecs_to_jiffies(30)); speakeronoff=1; //printk("HS RUN!!!!!!!!!!\n"); } } if (gpio_get_value(RK29_PIN6_PB6) == 1) { new_status = SPK; isHSin = 1; //printk("speak now\n"); if(old_status != new_status) { old_status = new_status; printk("new_status = %d,old_status = %d\n",new_status,old_status); old_status = new_status; schedule_delayed_work(&aic3111_speaker_delayed_work,msecs_to_jiffies(30)); speakeronoff=0; //printk("HS RUN!!!!!!!!!!\n"); } } mod_timer(&spk->timer, jiffies + msecs_to_jiffies(200)); return 0; } /* *---------------------------------------------------------------------------- * Function : aic3111_probe * Purpose : This is first driver function called by the SoC core driver. * *---------------------------------------------------------------------------- */ static int aic3111_probe (struct snd_soc_codec *codec) { int ret = 0;//, flags, hp_det_irq; codec->hw_write = (hw_write_t) i2c_master_send; codec->control_data = aic3111_i2c; aic3111_codec = codec; #if 1 gpio_set_value(RK29_PIN6_PB6,1); struct speaker_data *spk; spk = kzalloc(sizeof(struct speaker_data), GFP_KERNEL); if (spk == NULL) { printk("Allocate Memory Failed!\n"); ret = -ENOMEM; //goto exit_gpio_free; } setup_timer(&spk->timer, speaker_timer, (unsigned long)spk); mod_timer(&spk->timer, jiffies + JACK_DET_ADLOOP); INIT_DELAYED_WORK(&aic3111_speaker_delayed_work, aic3111_speaker_delayed_work_func); /*********************/ //pio_set_value(RK29_PIN6_PB5, GPIO_LOW); //aic3111_power_speaker(POWER_STATE_OFF); //aic3111_power_headphone(POWER_STATE_ON); #endif aic3111_workq = create_freezable_workqueue("aic3111"); if (aic3111_workq == NULL) { return -ENOMEM; } /* INIT_DELAYED_WORK (&aic3111_hpdet_work, aic3111_hpdet_work_handle); if (gpio_request (HP_DET_PIN, "hp_det")) { gpio_free (HP_DET_PIN); printk ("CODEC::tlv3110 hp det pin request error\n"); } else { gpio_direction_input (HP_DET_PIN); gpio_pull_updown (HP_DET_PIN, PullDisable); hp_det_irq = gpio_to_irq (HP_DET_PIN); isHSin = gpio_get_value (HP_DET_PIN); flags = isHSin ? IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; ret = request_irq (hp_det_irq, aic3111_hpdet_isr, flags, "hpdet", codec); if (ret < 0) { printk ("CODEC::request hp_det_irq error\n"); } } */ /* Just Reset codec */ aic3111_soft_reset(); gpio_set_value(RK29_PIN6_PB5, GPIO_LOW); msleep(10); aic3111_write (aic3111_codec, (68), 0x01); //disable DRC aic3111_write (aic3111_codec, (128 + 31), 0xc4); aic3111_write (aic3111_codec, (128 + 36), 0x28); //Left Analog Vol to HPL aic3111_write (aic3111_codec, (128 + 37), 0x28); //Right Analog Vol to HPL aic3111_write (aic3111_codec, (128 + 40), 0x4f); //HPL driver PGA aic3111_write (aic3111_codec, (128 + 41), 0x4f); //HPR driver PGA #ifdef AIC3111_DEBUG aic3111_proc_init(); #endif aic3111_set_bias_level (codec, SND_SOC_BIAS_STANDBY); return 0; } /* *---------------------------------------------------------------------------- * Function : aic3111_remove * Purpose : to remove aic3111 soc device * *---------------------------------------------------------------------------- */ static int aic3111_remove (struct snd_soc_codec *codec) { AIC_DBG ("CODEC::%s\n", __FUNCTION__); /* Disable HPDET irq */ //disable_irq_nosync (HP_DET_PIN); /* power down chip */ aic3111_set_bias_level (codec, SND_SOC_BIAS_OFF); return 0; } /* *---------------------------------------------------------------------------- * Function : aic3111_suspend * Purpose : This function is to suspend the AIC3111 driver. * *---------------------------------------------------------------------------- */ static int aic3111_suspend (struct snd_soc_codec *codec, pm_message_t state) { AIC_DBG ("CODEC::%s\n", __FUNCTION__); aic3111_set_bias_level (codec, SND_SOC_BIAS_STANDBY); aic3111_soft_reset();//sai return 0; } /* *---------------------------------------------------------------------------- * Function : aic3111_resume * Purpose : This function is to resume the AIC3111 driver * *---------------------------------------------------------------------------- */ static int aic3111_resume (struct snd_soc_codec *codec) { //isHSin = gpio_get_value(HP_DET_PIN); aic3111_set_bias_level (codec, SND_SOC_BIAS_STANDBY); //aic3111_set_bias_level(codec, codec->suspend_bias_level); return 0; } /* *---------------------------------------------------------------------------- * @struct snd_soc_codec_device | * This structure is soc audio codec device sturecute which pointer * to basic functions aic3111_probe(), aic3111_remove(), * aic3111_suspend() and aic3111_resume() *---------------------------------------------------------------------------- */ static struct snd_soc_codec_driver soc_codec_dev_aic3111 = { .probe = aic3111_probe, .remove = aic3111_remove, .suspend = aic3111_suspend, .resume = aic3111_resume, .set_bias_level = aic3111_set_bias_level, .reg_cache_size = ARRAY_SIZE(aic31xx_reg), .reg_word_size = sizeof(u16), .reg_cache_default = aic31xx_reg, .reg_cache_step = 1, }; static const struct i2c_device_id tlv320aic3111_i2c_id[] = { { "aic3111", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tlv320aic3111_i2c_id); static int tlv320aic3111_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct aic3111_priv *aic3111; int ret; aic3111 = kzalloc(sizeof(struct aic3111_priv), GFP_KERNEL); if (NULL == aic3111) return -ENOMEM; aic3111_i2c = i2c; i2c_set_clientdata(i2c, aic3111); aic3111_privdata = aic3111; ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_aic3111, aic3111_dai, ARRAY_SIZE(aic3111_dai)); if (ret < 0) kfree(aic3111); return ret; } static __devexit int tlv320aic3111_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); kfree(i2c_get_clientdata(client)); return 0; } struct i2c_driver tlv320aic3111_i2c_driver = { .driver = { .name = "AIC3111", .owner = THIS_MODULE, }, .probe = tlv320aic3111_i2c_probe, .remove = __devexit_p(tlv320aic3111_i2c_remove), .id_table = tlv320aic3111_i2c_id, }; static int __init tlv320aic3111_init (void) { return i2c_add_driver(&tlv320aic3111_i2c_driver); } static void __exit tlv320aic3111_exit (void) { i2c_del_driver(&tlv320aic3111_i2c_driver); } module_init (tlv320aic3111_init); module_exit (tlv320aic3111_exit); #ifdef CONFIG_PROC_FS #include <linux/proc_fs.h> #include <linux/seq_file.h> static int proc_3110_reg_show (struct seq_file *s, void *v) { struct snd_soc_codec *codec = aic3111_codec; int reg; u8 *cache = codec->reg_cache; seq_printf (s, "========3110register========\n"); for (reg = 0; reg < 256; reg++) { if (reg == 0) seq_printf (s, "Page 0\n"); if (reg == 128) seq_printf (s, "\nPage 1\n"); if (reg%8 == 0 && reg != 0 && reg != 128) seq_printf (s, "\n"); seq_printf (s, "[%d]0x%02x, ",reg,aic3111_read (codec, reg)); } seq_printf (s, "\n========3110cache========\n"); for (reg = 0; reg < codec->reg_size; reg++) { if (reg == 0) seq_printf (s, "Page 0\n"); if (reg == 128) seq_printf (s, "\nPage 1\n"); if (reg%16 == 0 && reg != 0 && reg != 128) seq_printf (s, "\n"); seq_printf (s, "0x%02x, ",cache[reg]); } printk ("\n==========================\n"); return 0; } static int proc_3110_reg_open (struct inode *inode, struct file *file) { return single_open (file, proc_3110_reg_show, NULL); } static const struct file_operations proc_3110_reg_fops = { .open = proc_3110_reg_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init codec_proc_init (void) { proc_create ("aic3110_register", 0, NULL, &proc_3110_reg_fops); return 0; } late_initcall (codec_proc_init); #endif /* CONFIG_PROC_FS */ MODULE_DESCRIPTION (" ASoC TLV320AIC3111 codec driver "); MODULE_AUTHOR (" Jaz B John <jazbjohn@mistralsolutions.com> "); MODULE_LICENSE ("GPL");
gpl-2.0
nuxeh/u-boot
board/ti/beagle/led.c
133
2194
/* * Copyright (c) 2010 Texas Instruments, Inc. * Jason Kridner <jkridner@beagleboard.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <status_led.h> #include <asm/arch/cpu.h> #include <asm/io.h> #include <asm/arch/sys_proto.h> #include <asm/gpio.h> /* GPIO pins for the LEDs */ #define BEAGLE_LED_USR0 150 #define BEAGLE_LED_USR1 149 #ifdef STATUS_LED_GREEN void green_led_off(void) { __led_set (STATUS_LED_GREEN, 0); } void green_led_on(void) { __led_set (STATUS_LED_GREEN, 1); } #endif void __led_init (led_id_t mask, int state) { __led_set (mask, state); } void __led_toggle (led_id_t mask) { int state, toggle_gpio = 0; #ifdef STATUS_LED_BIT if (!toggle_gpio && STATUS_LED_BIT & mask) toggle_gpio = BEAGLE_LED_USR0; #endif #ifdef STATUS_LED_BIT1 if (!toggle_gpio && STATUS_LED_BIT1 & mask) toggle_gpio = BEAGLE_LED_USR1; #endif if (toggle_gpio) { if (!gpio_request(toggle_gpio, "")) { gpio_direction_output(toggle_gpio, 0); state = gpio_get_value(toggle_gpio); gpio_set_value(toggle_gpio, !state); } } } void __led_set (led_id_t mask, int state) { #ifdef STATUS_LED_BIT if (STATUS_LED_BIT & mask) { if (!gpio_request(BEAGLE_LED_USR0, "")) { gpio_direction_output(BEAGLE_LED_USR0, 0); gpio_set_value(BEAGLE_LED_USR0, state); } } #endif #ifdef STATUS_LED_BIT1 if (STATUS_LED_BIT1 & mask) { if (!gpio_request(BEAGLE_LED_USR1, "")) { gpio_direction_output(BEAGLE_LED_USR1, 0); gpio_set_value(BEAGLE_LED_USR1, state); } } #endif }
gpl-2.0
wilmarcardonac/fisher-mcmc
lapack-3.5.0/BLAS/TESTING/zblat3.f
133
131995
*> \brief \b ZBLAT3 * * =========== DOCUMENTATION =========== * * Online html documentation available at * http://www.netlib.org/lapack/explore-html/ * * Definition: * =========== * * PROGRAM ZBLAT3 * * *> \par Purpose: * ============= *> *> \verbatim *> *> Test program for the COMPLEX*16 Level 3 Blas. *> *> The program must be driven by a short data file. The first 14 records *> of the file are read using list-directed input, the last 9 records *> are read using the format ( A6, L2 ). An annotated example of a data *> file can be obtained by deleting the first 3 characters from the *> following 23 lines: *> 'zblat3.out' NAME OF SUMMARY OUTPUT FILE *> 6 UNIT NUMBER OF SUMMARY FILE *> 'ZBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE *> -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) *> F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. *> F LOGICAL FLAG, T TO STOP ON FAILURES. *> T LOGICAL FLAG, T TO TEST ERROR EXITS. *> 16.0 THRESHOLD VALUE OF TEST RATIO *> 6 NUMBER OF VALUES OF N *> 0 1 2 3 5 9 VALUES OF N *> 3 NUMBER OF VALUES OF ALPHA *> (0.0,0.0) (1.0,0.0) (0.7,-0.9) VALUES OF ALPHA *> 3 NUMBER OF VALUES OF BETA *> (0.0,0.0) (1.0,0.0) (1.3,-1.1) VALUES OF BETA *> ZGEMM T PUT F FOR NO TEST. SAME COLUMNS. *> ZHEMM T PUT F FOR NO TEST. SAME COLUMNS. *> ZSYMM T PUT F FOR NO TEST. SAME COLUMNS. *> ZTRMM T PUT F FOR NO TEST. SAME COLUMNS. *> ZTRSM T PUT F FOR NO TEST. SAME COLUMNS. *> ZHERK T PUT F FOR NO TEST. SAME COLUMNS. *> ZSYRK T PUT F FOR NO TEST. SAME COLUMNS. *> ZHER2K T PUT F FOR NO TEST. SAME COLUMNS. *> ZSYR2K T PUT F FOR NO TEST. SAME COLUMNS. *> *> *> Further Details *> =============== *> *> See: *> *> Dongarra J. J., Du Croz J. J., Duff I. S. and Hammarling S. *> A Set of Level 3 Basic Linear Algebra Subprograms. *> *> Technical Memorandum No.88 (Revision 1), Mathematics and *> Computer Science Division, Argonne National Laboratory, 9700 *> South Cass Avenue, Argonne, Illinois 60439, US. *> *> -- Written on 8-February-1989. *> Jack Dongarra, Argonne National Laboratory. *> Iain Duff, AERE Harwell. *> Jeremy Du Croz, Numerical Algorithms Group Ltd. *> Sven Hammarling, Numerical Algorithms Group Ltd. *> *> 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers *> can be run multiple times without deleting generated *> output files (susan) *> \endverbatim * * Authors: * ======== * *> \author Univ. of Tennessee *> \author Univ. of California Berkeley *> \author Univ. of Colorado Denver *> \author NAG Ltd. * *> \date April 2012 * *> \ingroup complex16_blas_testing * * ===================================================================== PROGRAM ZBLAT3 * * -- Reference BLAS test routine (version 3.4.1) -- * -- Reference BLAS is a software package provided by Univ. of Tennessee, -- * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- * April 2012 * * ===================================================================== * * .. Parameters .. INTEGER NIN PARAMETER ( NIN = 5 ) INTEGER NSUBS PARAMETER ( NSUBS = 9 ) COMPLEX*16 ZERO, ONE PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), $ ONE = ( 1.0D0, 0.0D0 ) ) DOUBLE PRECISION RZERO PARAMETER ( RZERO = 0.0D0 ) INTEGER NMAX PARAMETER ( NMAX = 65 ) INTEGER NIDMAX, NALMAX, NBEMAX PARAMETER ( NIDMAX = 9, NALMAX = 7, NBEMAX = 7 ) * .. Local Scalars .. DOUBLE PRECISION EPS, ERR, THRESH INTEGER I, ISNUM, J, N, NALF, NBET, NIDIM, NOUT, NTRA LOGICAL FATAL, LTESTT, REWI, SAME, SFATAL, TRACE, $ TSTERR CHARACTER*1 TRANSA, TRANSB CHARACTER*6 SNAMET CHARACTER*32 SNAPS, SUMMRY * .. Local Arrays .. COMPLEX*16 AA( NMAX*NMAX ), AB( NMAX, 2*NMAX ), $ ALF( NALMAX ), AS( NMAX*NMAX ), $ BB( NMAX*NMAX ), BET( NBEMAX ), $ BS( NMAX*NMAX ), C( NMAX, NMAX ), $ CC( NMAX*NMAX ), CS( NMAX*NMAX ), CT( NMAX ), $ W( 2*NMAX ) DOUBLE PRECISION G( NMAX ) INTEGER IDIM( NIDMAX ) LOGICAL LTEST( NSUBS ) CHARACTER*6 SNAMES( NSUBS ) * .. External Functions .. DOUBLE PRECISION DDIFF LOGICAL LZE EXTERNAL DDIFF, LZE * .. External Subroutines .. EXTERNAL ZCHK1, ZCHK2, ZCHK3, ZCHK4, ZCHK5, ZCHKE, ZMMCH * .. Intrinsic Functions .. INTRINSIC MAX, MIN * .. Scalars in Common .. INTEGER INFOT, NOUTC LOGICAL LERR, OK CHARACTER*6 SRNAMT * .. Common blocks .. COMMON /INFOC/INFOT, NOUTC, OK, LERR COMMON /SRNAMC/SRNAMT * .. Data statements .. DATA SNAMES/'ZGEMM ', 'ZHEMM ', 'ZSYMM ', 'ZTRMM ', $ 'ZTRSM ', 'ZHERK ', 'ZSYRK ', 'ZHER2K', $ 'ZSYR2K'/ * .. Executable Statements .. * * Read name and unit number for summary output file and open file. * READ( NIN, FMT = * )SUMMRY READ( NIN, FMT = * )NOUT OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) NOUTC = NOUT * * Read name and unit number for snapshot output file and open file. * READ( NIN, FMT = * )SNAPS READ( NIN, FMT = * )NTRA TRACE = NTRA.GE.0 IF( TRACE )THEN OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) END IF * Read the flag that directs rewinding of the snapshot file. READ( NIN, FMT = * )REWI REWI = REWI.AND.TRACE * Read the flag that directs stopping on any failure. READ( NIN, FMT = * )SFATAL * Read the flag that indicates whether error exits are to be tested. READ( NIN, FMT = * )TSTERR * Read the threshold value of the test ratio READ( NIN, FMT = * )THRESH * * Read and check the parameter values for the tests. * * Values of N READ( NIN, FMT = * )NIDIM IF( NIDIM.LT.1.OR.NIDIM.GT.NIDMAX )THEN WRITE( NOUT, FMT = 9997 )'N', NIDMAX GO TO 220 END IF READ( NIN, FMT = * )( IDIM( I ), I = 1, NIDIM ) DO 10 I = 1, NIDIM IF( IDIM( I ).LT.0.OR.IDIM( I ).GT.NMAX )THEN WRITE( NOUT, FMT = 9996 )NMAX GO TO 220 END IF 10 CONTINUE * Values of ALPHA READ( NIN, FMT = * )NALF IF( NALF.LT.1.OR.NALF.GT.NALMAX )THEN WRITE( NOUT, FMT = 9997 )'ALPHA', NALMAX GO TO 220 END IF READ( NIN, FMT = * )( ALF( I ), I = 1, NALF ) * Values of BETA READ( NIN, FMT = * )NBET IF( NBET.LT.1.OR.NBET.GT.NBEMAX )THEN WRITE( NOUT, FMT = 9997 )'BETA', NBEMAX GO TO 220 END IF READ( NIN, FMT = * )( BET( I ), I = 1, NBET ) * * Report values of parameters. * WRITE( NOUT, FMT = 9995 ) WRITE( NOUT, FMT = 9994 )( IDIM( I ), I = 1, NIDIM ) WRITE( NOUT, FMT = 9993 )( ALF( I ), I = 1, NALF ) WRITE( NOUT, FMT = 9992 )( BET( I ), I = 1, NBET ) IF( .NOT.TSTERR )THEN WRITE( NOUT, FMT = * ) WRITE( NOUT, FMT = 9984 ) END IF WRITE( NOUT, FMT = * ) WRITE( NOUT, FMT = 9999 )THRESH WRITE( NOUT, FMT = * ) * * Read names of subroutines and flags which indicate * whether they are to be tested. * DO 20 I = 1, NSUBS LTEST( I ) = .FALSE. 20 CONTINUE 30 READ( NIN, FMT = 9988, END = 60 )SNAMET, LTESTT DO 40 I = 1, NSUBS IF( SNAMET.EQ.SNAMES( I ) ) $ GO TO 50 40 CONTINUE WRITE( NOUT, FMT = 9990 )SNAMET STOP 50 LTEST( I ) = LTESTT GO TO 30 * 60 CONTINUE CLOSE ( NIN ) * * Compute EPS (the machine precision). * EPS = EPSILON(RZERO) WRITE( NOUT, FMT = 9998 )EPS * * Check the reliability of ZMMCH using exact data. * N = MIN( 32, NMAX ) DO 100 J = 1, N DO 90 I = 1, N AB( I, J ) = MAX( I - J + 1, 0 ) 90 CONTINUE AB( J, NMAX + 1 ) = J AB( 1, NMAX + J ) = J C( J, 1 ) = ZERO 100 CONTINUE DO 110 J = 1, N CC( J ) = J*( ( J + 1 )*J )/2 - ( ( J + 1 )*J*( J - 1 ) )/3 110 CONTINUE * CC holds the exact result. On exit from ZMMCH CT holds * the result computed by ZMMCH. TRANSA = 'N' TRANSB = 'N' CALL ZMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) SAME = LZE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR STOP END IF TRANSB = 'C' CALL ZMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) SAME = LZE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR STOP END IF DO 120 J = 1, N AB( J, NMAX + 1 ) = N - J + 1 AB( 1, NMAX + J ) = N - J + 1 120 CONTINUE DO 130 J = 1, N CC( N - J + 1 ) = J*( ( J + 1 )*J )/2 - $ ( ( J + 1 )*J*( J - 1 ) )/3 130 CONTINUE TRANSA = 'C' TRANSB = 'N' CALL ZMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) SAME = LZE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR STOP END IF TRANSB = 'C' CALL ZMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) SAME = LZE( CC, CT, N ) IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR STOP END IF * * Test each subroutine in turn. * DO 200 ISNUM = 1, NSUBS WRITE( NOUT, FMT = * ) IF( .NOT.LTEST( ISNUM ) )THEN * Subprogram is not to be tested. WRITE( NOUT, FMT = 9987 )SNAMES( ISNUM ) ELSE SRNAMT = SNAMES( ISNUM ) * Test error exits. IF( TSTERR )THEN CALL ZCHKE( ISNUM, SNAMES( ISNUM ), NOUT ) WRITE( NOUT, FMT = * ) END IF * Test computations. INFOT = 0 OK = .TRUE. FATAL = .FALSE. GO TO ( 140, 150, 150, 160, 160, 170, 170, $ 180, 180 )ISNUM * Test ZGEMM, 01. 140 CALL ZCHK1( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, $ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C, $ CC, CS, CT, G ) GO TO 190 * Test ZHEMM, 02, ZSYMM, 03. 150 CALL ZCHK2( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, $ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C, $ CC, CS, CT, G ) GO TO 190 * Test ZTRMM, 04, ZTRSM, 05. 160 CALL ZCHK3( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NMAX, AB, $ AA, AS, AB( 1, NMAX + 1 ), BB, BS, CT, G, C ) GO TO 190 * Test ZHERK, 06, ZSYRK, 07. 170 CALL ZCHK4( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, $ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C, $ CC, CS, CT, G ) GO TO 190 * Test ZHER2K, 08, ZSYR2K, 09. 180 CALL ZCHK5( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, $ NMAX, AB, AA, AS, BB, BS, C, CC, CS, CT, G, W ) GO TO 190 * 190 IF( FATAL.AND.SFATAL ) $ GO TO 210 END IF 200 CONTINUE WRITE( NOUT, FMT = 9986 ) GO TO 230 * 210 CONTINUE WRITE( NOUT, FMT = 9985 ) GO TO 230 * 220 CONTINUE WRITE( NOUT, FMT = 9991 ) * 230 CONTINUE IF( TRACE ) $ CLOSE ( NTRA ) CLOSE ( NOUT ) STOP * 9999 FORMAT( ' ROUTINES PASS COMPUTATIONAL TESTS IF TEST RATIO IS LES', $ 'S THAN', F8.2 ) 9998 FORMAT( ' RELATIVE MACHINE PRECISION IS TAKEN TO BE', 1P, D9.1 ) 9997 FORMAT( ' NUMBER OF VALUES OF ', A, ' IS LESS THAN 1 OR GREATER ', $ 'THAN ', I2 ) 9996 FORMAT( ' VALUE OF N IS LESS THAN 0 OR GREATER THAN ', I2 ) 9995 FORMAT( ' TESTS OF THE COMPLEX*16 LEVEL 3 BLAS', //' THE F', $ 'OLLOWING PARAMETER VALUES WILL BE USED:' ) 9994 FORMAT( ' FOR N ', 9I6 ) 9993 FORMAT( ' FOR ALPHA ', $ 7( '(', F4.1, ',', F4.1, ') ', : ) ) 9992 FORMAT( ' FOR BETA ', $ 7( '(', F4.1, ',', F4.1, ') ', : ) ) 9991 FORMAT( ' AMEND DATA FILE OR INCREASE ARRAY SIZES IN PROGRAM', $ /' ******* TESTS ABANDONED *******' ) 9990 FORMAT( ' SUBPROGRAM NAME ', A6, ' NOT RECOGNIZED', /' ******* T', $ 'ESTS ABANDONED *******' ) 9989 FORMAT( ' ERROR IN ZMMCH - IN-LINE DOT PRODUCTS ARE BEING EVALU', $ 'ATED WRONGLY.', /' ZMMCH WAS CALLED WITH TRANSA = ', A1, $ ' AND TRANSB = ', A1, /' AND RETURNED SAME = ', L1, ' AND ', $ 'ERR = ', F12.3, '.', /' THIS MAY BE DUE TO FAULTS IN THE ', $ 'ARITHMETIC OR THE COMPILER.', /' ******* TESTS ABANDONED ', $ '*******' ) 9988 FORMAT( A6, L2 ) 9987 FORMAT( 1X, A6, ' WAS NOT TESTED' ) 9986 FORMAT( /' END OF TESTS' ) 9985 FORMAT( /' ******* FATAL ERROR - TESTS ABANDONED *******' ) 9984 FORMAT( ' ERROR-EXITS WILL NOT BE TESTED' ) * * End of ZBLAT3. * END SUBROUTINE ZCHK1( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, $ A, AA, AS, B, BB, BS, C, CC, CS, CT, G ) * * Tests ZGEMM. * * Auxiliary routine for test program for Level 3 Blas. * * -- Written on 8-February-1989. * Jack Dongarra, Argonne National Laboratory. * Iain Duff, AERE Harwell. * Jeremy Du Croz, Numerical Algorithms Group Ltd. * Sven Hammarling, Numerical Algorithms Group Ltd. * * .. Parameters .. COMPLEX*16 ZERO PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ) ) DOUBLE PRECISION RZERO PARAMETER ( RZERO = 0.0D0 ) * .. Scalar Arguments .. DOUBLE PRECISION EPS, THRESH INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA LOGICAL FATAL, REWI, TRACE CHARACTER*6 SNAME * .. Array Arguments .. COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), $ AS( NMAX*NMAX ), B( NMAX, NMAX ), $ BB( NMAX*NMAX ), BET( NBET ), BS( NMAX*NMAX ), $ C( NMAX, NMAX ), CC( NMAX*NMAX ), $ CS( NMAX*NMAX ), CT( NMAX ) DOUBLE PRECISION G( NMAX ) INTEGER IDIM( NIDIM ) * .. Local Scalars .. COMPLEX*16 ALPHA, ALS, BETA, BLS DOUBLE PRECISION ERR, ERRMAX INTEGER I, IA, IB, ICA, ICB, IK, IM, IN, K, KS, LAA, $ LBB, LCC, LDA, LDAS, LDB, LDBS, LDC, LDCS, M, $ MA, MB, MS, N, NA, NARGS, NB, NC, NS LOGICAL NULL, RESET, SAME, TRANA, TRANB CHARACTER*1 TRANAS, TRANBS, TRANSA, TRANSB CHARACTER*3 ICH * .. Local Arrays .. LOGICAL ISAME( 13 ) * .. External Functions .. LOGICAL LZE, LZERES EXTERNAL LZE, LZERES * .. External Subroutines .. EXTERNAL ZGEMM, ZMAKE, ZMMCH * .. Intrinsic Functions .. INTRINSIC MAX * .. Scalars in Common .. INTEGER INFOT, NOUTC LOGICAL LERR, OK * .. Common blocks .. COMMON /INFOC/INFOT, NOUTC, OK, LERR * .. Data statements .. DATA ICH/'NTC'/ * .. Executable Statements .. * NARGS = 13 NC = 0 RESET = .TRUE. ERRMAX = RZERO * DO 110 IM = 1, NIDIM M = IDIM( IM ) * DO 100 IN = 1, NIDIM N = IDIM( IN ) * Set LDC to 1 more than minimum value if room. LDC = M IF( LDC.LT.NMAX ) $ LDC = LDC + 1 * Skip tests if not enough room. IF( LDC.GT.NMAX ) $ GO TO 100 LCC = LDC*N NULL = N.LE.0.OR.M.LE.0 * DO 90 IK = 1, NIDIM K = IDIM( IK ) * DO 80 ICA = 1, 3 TRANSA = ICH( ICA: ICA ) TRANA = TRANSA.EQ.'T'.OR.TRANSA.EQ.'C' * IF( TRANA )THEN MA = K NA = M ELSE MA = M NA = K END IF * Set LDA to 1 more than minimum value if room. LDA = MA IF( LDA.LT.NMAX ) $ LDA = LDA + 1 * Skip tests if not enough room. IF( LDA.GT.NMAX ) $ GO TO 80 LAA = LDA*NA * * Generate the matrix A. * CALL ZMAKE( 'GE', ' ', ' ', MA, NA, A, NMAX, AA, LDA, $ RESET, ZERO ) * DO 70 ICB = 1, 3 TRANSB = ICH( ICB: ICB ) TRANB = TRANSB.EQ.'T'.OR.TRANSB.EQ.'C' * IF( TRANB )THEN MB = N NB = K ELSE MB = K NB = N END IF * Set LDB to 1 more than minimum value if room. LDB = MB IF( LDB.LT.NMAX ) $ LDB = LDB + 1 * Skip tests if not enough room. IF( LDB.GT.NMAX ) $ GO TO 70 LBB = LDB*NB * * Generate the matrix B. * CALL ZMAKE( 'GE', ' ', ' ', MB, NB, B, NMAX, BB, $ LDB, RESET, ZERO ) * DO 60 IA = 1, NALF ALPHA = ALF( IA ) * DO 50 IB = 1, NBET BETA = BET( IB ) * * Generate the matrix C. * CALL ZMAKE( 'GE', ' ', ' ', M, N, C, NMAX, $ CC, LDC, RESET, ZERO ) * NC = NC + 1 * * Save every datum before calling the * subroutine. * TRANAS = TRANSA TRANBS = TRANSB MS = M NS = N KS = K ALS = ALPHA DO 10 I = 1, LAA AS( I ) = AA( I ) 10 CONTINUE LDAS = LDA DO 20 I = 1, LBB BS( I ) = BB( I ) 20 CONTINUE LDBS = LDB BLS = BETA DO 30 I = 1, LCC CS( I ) = CC( I ) 30 CONTINUE LDCS = LDC * * Call the subroutine. * IF( TRACE ) $ WRITE( NTRA, FMT = 9995 )NC, SNAME, $ TRANSA, TRANSB, M, N, K, ALPHA, LDA, LDB, $ BETA, LDC IF( REWI ) $ REWIND NTRA CALL ZGEMM( TRANSA, TRANSB, M, N, K, ALPHA, $ AA, LDA, BB, LDB, BETA, CC, LDC ) * * Check if error-exit was taken incorrectly. * IF( .NOT.OK )THEN WRITE( NOUT, FMT = 9994 ) FATAL = .TRUE. GO TO 120 END IF * * See what data changed inside subroutines. * ISAME( 1 ) = TRANSA.EQ.TRANAS ISAME( 2 ) = TRANSB.EQ.TRANBS ISAME( 3 ) = MS.EQ.M ISAME( 4 ) = NS.EQ.N ISAME( 5 ) = KS.EQ.K ISAME( 6 ) = ALS.EQ.ALPHA ISAME( 7 ) = LZE( AS, AA, LAA ) ISAME( 8 ) = LDAS.EQ.LDA ISAME( 9 ) = LZE( BS, BB, LBB ) ISAME( 10 ) = LDBS.EQ.LDB ISAME( 11 ) = BLS.EQ.BETA IF( NULL )THEN ISAME( 12 ) = LZE( CS, CC, LCC ) ELSE ISAME( 12 ) = LZERES( 'GE', ' ', M, N, CS, $ CC, LDC ) END IF ISAME( 13 ) = LDCS.EQ.LDC * * If data was incorrectly changed, report * and return. * SAME = .TRUE. DO 40 I = 1, NARGS SAME = SAME.AND.ISAME( I ) IF( .NOT.ISAME( I ) ) $ WRITE( NOUT, FMT = 9998 )I 40 CONTINUE IF( .NOT.SAME )THEN FATAL = .TRUE. GO TO 120 END IF * IF( .NOT.NULL )THEN * * Check the result. * CALL ZMMCH( TRANSA, TRANSB, M, N, K, $ ALPHA, A, NMAX, B, NMAX, BETA, $ C, NMAX, CT, G, CC, LDC, EPS, $ ERR, FATAL, NOUT, .TRUE. ) ERRMAX = MAX( ERRMAX, ERR ) * If got really bad answer, report and * return. IF( FATAL ) $ GO TO 120 END IF * 50 CONTINUE * 60 CONTINUE * 70 CONTINUE * 80 CONTINUE * 90 CONTINUE * 100 CONTINUE * 110 CONTINUE * * Report result. * IF( ERRMAX.LT.THRESH )THEN WRITE( NOUT, FMT = 9999 )SNAME, NC ELSE WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX END IF GO TO 130 * 120 CONTINUE WRITE( NOUT, FMT = 9996 )SNAME WRITE( NOUT, FMT = 9995 )NC, SNAME, TRANSA, TRANSB, M, N, K, $ ALPHA, LDA, LDB, BETA, LDC * 130 CONTINUE RETURN * 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', $ 'S)' ) 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', $ 'ANGED INCORRECTLY *******' ) 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, $ ' - SUSPECT *******' ) 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) 9995 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',''', A1, ''',', $ 3( I3, ',' ), '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, $ ',(', F4.1, ',', F4.1, '), C,', I3, ').' ) 9994 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', $ '******' ) * * End of ZCHK1. * END SUBROUTINE ZCHK2( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, $ A, AA, AS, B, BB, BS, C, CC, CS, CT, G ) * * Tests ZHEMM and ZSYMM. * * Auxiliary routine for test program for Level 3 Blas. * * -- Written on 8-February-1989. * Jack Dongarra, Argonne National Laboratory. * Iain Duff, AERE Harwell. * Jeremy Du Croz, Numerical Algorithms Group Ltd. * Sven Hammarling, Numerical Algorithms Group Ltd. * * .. Parameters .. COMPLEX*16 ZERO PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ) ) DOUBLE PRECISION RZERO PARAMETER ( RZERO = 0.0D0 ) * .. Scalar Arguments .. DOUBLE PRECISION EPS, THRESH INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA LOGICAL FATAL, REWI, TRACE CHARACTER*6 SNAME * .. Array Arguments .. COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), $ AS( NMAX*NMAX ), B( NMAX, NMAX ), $ BB( NMAX*NMAX ), BET( NBET ), BS( NMAX*NMAX ), $ C( NMAX, NMAX ), CC( NMAX*NMAX ), $ CS( NMAX*NMAX ), CT( NMAX ) DOUBLE PRECISION G( NMAX ) INTEGER IDIM( NIDIM ) * .. Local Scalars .. COMPLEX*16 ALPHA, ALS, BETA, BLS DOUBLE PRECISION ERR, ERRMAX INTEGER I, IA, IB, ICS, ICU, IM, IN, LAA, LBB, LCC, $ LDA, LDAS, LDB, LDBS, LDC, LDCS, M, MS, N, NA, $ NARGS, NC, NS LOGICAL CONJ, LEFT, NULL, RESET, SAME CHARACTER*1 SIDE, SIDES, UPLO, UPLOS CHARACTER*2 ICHS, ICHU * .. Local Arrays .. LOGICAL ISAME( 13 ) * .. External Functions .. LOGICAL LZE, LZERES EXTERNAL LZE, LZERES * .. External Subroutines .. EXTERNAL ZHEMM, ZMAKE, ZMMCH, ZSYMM * .. Intrinsic Functions .. INTRINSIC MAX * .. Scalars in Common .. INTEGER INFOT, NOUTC LOGICAL LERR, OK * .. Common blocks .. COMMON /INFOC/INFOT, NOUTC, OK, LERR * .. Data statements .. DATA ICHS/'LR'/, ICHU/'UL'/ * .. Executable Statements .. CONJ = SNAME( 2: 3 ).EQ.'HE' * NARGS = 12 NC = 0 RESET = .TRUE. ERRMAX = RZERO * DO 100 IM = 1, NIDIM M = IDIM( IM ) * DO 90 IN = 1, NIDIM N = IDIM( IN ) * Set LDC to 1 more than minimum value if room. LDC = M IF( LDC.LT.NMAX ) $ LDC = LDC + 1 * Skip tests if not enough room. IF( LDC.GT.NMAX ) $ GO TO 90 LCC = LDC*N NULL = N.LE.0.OR.M.LE.0 * Set LDB to 1 more than minimum value if room. LDB = M IF( LDB.LT.NMAX ) $ LDB = LDB + 1 * Skip tests if not enough room. IF( LDB.GT.NMAX ) $ GO TO 90 LBB = LDB*N * * Generate the matrix B. * CALL ZMAKE( 'GE', ' ', ' ', M, N, B, NMAX, BB, LDB, RESET, $ ZERO ) * DO 80 ICS = 1, 2 SIDE = ICHS( ICS: ICS ) LEFT = SIDE.EQ.'L' * IF( LEFT )THEN NA = M ELSE NA = N END IF * Set LDA to 1 more than minimum value if room. LDA = NA IF( LDA.LT.NMAX ) $ LDA = LDA + 1 * Skip tests if not enough room. IF( LDA.GT.NMAX ) $ GO TO 80 LAA = LDA*NA * DO 70 ICU = 1, 2 UPLO = ICHU( ICU: ICU ) * * Generate the hermitian or symmetric matrix A. * CALL ZMAKE( SNAME( 2: 3 ), UPLO, ' ', NA, NA, A, NMAX, $ AA, LDA, RESET, ZERO ) * DO 60 IA = 1, NALF ALPHA = ALF( IA ) * DO 50 IB = 1, NBET BETA = BET( IB ) * * Generate the matrix C. * CALL ZMAKE( 'GE', ' ', ' ', M, N, C, NMAX, CC, $ LDC, RESET, ZERO ) * NC = NC + 1 * * Save every datum before calling the * subroutine. * SIDES = SIDE UPLOS = UPLO MS = M NS = N ALS = ALPHA DO 10 I = 1, LAA AS( I ) = AA( I ) 10 CONTINUE LDAS = LDA DO 20 I = 1, LBB BS( I ) = BB( I ) 20 CONTINUE LDBS = LDB BLS = BETA DO 30 I = 1, LCC CS( I ) = CC( I ) 30 CONTINUE LDCS = LDC * * Call the subroutine. * IF( TRACE ) $ WRITE( NTRA, FMT = 9995 )NC, SNAME, SIDE, $ UPLO, M, N, ALPHA, LDA, LDB, BETA, LDC IF( REWI ) $ REWIND NTRA IF( CONJ )THEN CALL ZHEMM( SIDE, UPLO, M, N, ALPHA, AA, LDA, $ BB, LDB, BETA, CC, LDC ) ELSE CALL ZSYMM( SIDE, UPLO, M, N, ALPHA, AA, LDA, $ BB, LDB, BETA, CC, LDC ) END IF * * Check if error-exit was taken incorrectly. * IF( .NOT.OK )THEN WRITE( NOUT, FMT = 9994 ) FATAL = .TRUE. GO TO 110 END IF * * See what data changed inside subroutines. * ISAME( 1 ) = SIDES.EQ.SIDE ISAME( 2 ) = UPLOS.EQ.UPLO ISAME( 3 ) = MS.EQ.M ISAME( 4 ) = NS.EQ.N ISAME( 5 ) = ALS.EQ.ALPHA ISAME( 6 ) = LZE( AS, AA, LAA ) ISAME( 7 ) = LDAS.EQ.LDA ISAME( 8 ) = LZE( BS, BB, LBB ) ISAME( 9 ) = LDBS.EQ.LDB ISAME( 10 ) = BLS.EQ.BETA IF( NULL )THEN ISAME( 11 ) = LZE( CS, CC, LCC ) ELSE ISAME( 11 ) = LZERES( 'GE', ' ', M, N, CS, $ CC, LDC ) END IF ISAME( 12 ) = LDCS.EQ.LDC * * If data was incorrectly changed, report and * return. * SAME = .TRUE. DO 40 I = 1, NARGS SAME = SAME.AND.ISAME( I ) IF( .NOT.ISAME( I ) ) $ WRITE( NOUT, FMT = 9998 )I 40 CONTINUE IF( .NOT.SAME )THEN FATAL = .TRUE. GO TO 110 END IF * IF( .NOT.NULL )THEN * * Check the result. * IF( LEFT )THEN CALL ZMMCH( 'N', 'N', M, N, M, ALPHA, A, $ NMAX, B, NMAX, BETA, C, NMAX, $ CT, G, CC, LDC, EPS, ERR, $ FATAL, NOUT, .TRUE. ) ELSE CALL ZMMCH( 'N', 'N', M, N, N, ALPHA, B, $ NMAX, A, NMAX, BETA, C, NMAX, $ CT, G, CC, LDC, EPS, ERR, $ FATAL, NOUT, .TRUE. ) END IF ERRMAX = MAX( ERRMAX, ERR ) * If got really bad answer, report and * return. IF( FATAL ) $ GO TO 110 END IF * 50 CONTINUE * 60 CONTINUE * 70 CONTINUE * 80 CONTINUE * 90 CONTINUE * 100 CONTINUE * * Report result. * IF( ERRMAX.LT.THRESH )THEN WRITE( NOUT, FMT = 9999 )SNAME, NC ELSE WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX END IF GO TO 120 * 110 CONTINUE WRITE( NOUT, FMT = 9996 )SNAME WRITE( NOUT, FMT = 9995 )NC, SNAME, SIDE, UPLO, M, N, ALPHA, LDA, $ LDB, BETA, LDC * 120 CONTINUE RETURN * 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', $ 'S)' ) 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', $ 'ANGED INCORRECTLY *******' ) 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, $ ' - SUSPECT *******' ) 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) 9995 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), $ '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, ',(', F4.1, $ ',', F4.1, '), C,', I3, ') .' ) 9994 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', $ '******' ) * * End of ZCHK2. * END SUBROUTINE ZCHK3( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, $ FATAL, NIDIM, IDIM, NALF, ALF, NMAX, A, AA, AS, $ B, BB, BS, CT, G, C ) * * Tests ZTRMM and ZTRSM. * * Auxiliary routine for test program for Level 3 Blas. * * -- Written on 8-February-1989. * Jack Dongarra, Argonne National Laboratory. * Iain Duff, AERE Harwell. * Jeremy Du Croz, Numerical Algorithms Group Ltd. * Sven Hammarling, Numerical Algorithms Group Ltd. * * .. Parameters .. COMPLEX*16 ZERO, ONE PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), $ ONE = ( 1.0D0, 0.0D0 ) ) DOUBLE PRECISION RZERO PARAMETER ( RZERO = 0.0D0 ) * .. Scalar Arguments .. DOUBLE PRECISION EPS, THRESH INTEGER NALF, NIDIM, NMAX, NOUT, NTRA LOGICAL FATAL, REWI, TRACE CHARACTER*6 SNAME * .. Array Arguments .. COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), $ AS( NMAX*NMAX ), B( NMAX, NMAX ), $ BB( NMAX*NMAX ), BS( NMAX*NMAX ), $ C( NMAX, NMAX ), CT( NMAX ) DOUBLE PRECISION G( NMAX ) INTEGER IDIM( NIDIM ) * .. Local Scalars .. COMPLEX*16 ALPHA, ALS DOUBLE PRECISION ERR, ERRMAX INTEGER I, IA, ICD, ICS, ICT, ICU, IM, IN, J, LAA, LBB, $ LDA, LDAS, LDB, LDBS, M, MS, N, NA, NARGS, NC, $ NS LOGICAL LEFT, NULL, RESET, SAME CHARACTER*1 DIAG, DIAGS, SIDE, SIDES, TRANAS, TRANSA, UPLO, $ UPLOS CHARACTER*2 ICHD, ICHS, ICHU CHARACTER*3 ICHT * .. Local Arrays .. LOGICAL ISAME( 13 ) * .. External Functions .. LOGICAL LZE, LZERES EXTERNAL LZE, LZERES * .. External Subroutines .. EXTERNAL ZMAKE, ZMMCH, ZTRMM, ZTRSM * .. Intrinsic Functions .. INTRINSIC MAX * .. Scalars in Common .. INTEGER INFOT, NOUTC LOGICAL LERR, OK * .. Common blocks .. COMMON /INFOC/INFOT, NOUTC, OK, LERR * .. Data statements .. DATA ICHU/'UL'/, ICHT/'NTC'/, ICHD/'UN'/, ICHS/'LR'/ * .. Executable Statements .. * NARGS = 11 NC = 0 RESET = .TRUE. ERRMAX = RZERO * Set up zero matrix for ZMMCH. DO 20 J = 1, NMAX DO 10 I = 1, NMAX C( I, J ) = ZERO 10 CONTINUE 20 CONTINUE * DO 140 IM = 1, NIDIM M = IDIM( IM ) * DO 130 IN = 1, NIDIM N = IDIM( IN ) * Set LDB to 1 more than minimum value if room. LDB = M IF( LDB.LT.NMAX ) $ LDB = LDB + 1 * Skip tests if not enough room. IF( LDB.GT.NMAX ) $ GO TO 130 LBB = LDB*N NULL = M.LE.0.OR.N.LE.0 * DO 120 ICS = 1, 2 SIDE = ICHS( ICS: ICS ) LEFT = SIDE.EQ.'L' IF( LEFT )THEN NA = M ELSE NA = N END IF * Set LDA to 1 more than minimum value if room. LDA = NA IF( LDA.LT.NMAX ) $ LDA = LDA + 1 * Skip tests if not enough room. IF( LDA.GT.NMAX ) $ GO TO 130 LAA = LDA*NA * DO 110 ICU = 1, 2 UPLO = ICHU( ICU: ICU ) * DO 100 ICT = 1, 3 TRANSA = ICHT( ICT: ICT ) * DO 90 ICD = 1, 2 DIAG = ICHD( ICD: ICD ) * DO 80 IA = 1, NALF ALPHA = ALF( IA ) * * Generate the matrix A. * CALL ZMAKE( 'TR', UPLO, DIAG, NA, NA, A, $ NMAX, AA, LDA, RESET, ZERO ) * * Generate the matrix B. * CALL ZMAKE( 'GE', ' ', ' ', M, N, B, NMAX, $ BB, LDB, RESET, ZERO ) * NC = NC + 1 * * Save every datum before calling the * subroutine. * SIDES = SIDE UPLOS = UPLO TRANAS = TRANSA DIAGS = DIAG MS = M NS = N ALS = ALPHA DO 30 I = 1, LAA AS( I ) = AA( I ) 30 CONTINUE LDAS = LDA DO 40 I = 1, LBB BS( I ) = BB( I ) 40 CONTINUE LDBS = LDB * * Call the subroutine. * IF( SNAME( 4: 5 ).EQ.'MM' )THEN IF( TRACE ) $ WRITE( NTRA, FMT = 9995 )NC, SNAME, $ SIDE, UPLO, TRANSA, DIAG, M, N, ALPHA, $ LDA, LDB IF( REWI ) $ REWIND NTRA CALL ZTRMM( SIDE, UPLO, TRANSA, DIAG, M, $ N, ALPHA, AA, LDA, BB, LDB ) ELSE IF( SNAME( 4: 5 ).EQ.'SM' )THEN IF( TRACE ) $ WRITE( NTRA, FMT = 9995 )NC, SNAME, $ SIDE, UPLO, TRANSA, DIAG, M, N, ALPHA, $ LDA, LDB IF( REWI ) $ REWIND NTRA CALL ZTRSM( SIDE, UPLO, TRANSA, DIAG, M, $ N, ALPHA, AA, LDA, BB, LDB ) END IF * * Check if error-exit was taken incorrectly. * IF( .NOT.OK )THEN WRITE( NOUT, FMT = 9994 ) FATAL = .TRUE. GO TO 150 END IF * * See what data changed inside subroutines. * ISAME( 1 ) = SIDES.EQ.SIDE ISAME( 2 ) = UPLOS.EQ.UPLO ISAME( 3 ) = TRANAS.EQ.TRANSA ISAME( 4 ) = DIAGS.EQ.DIAG ISAME( 5 ) = MS.EQ.M ISAME( 6 ) = NS.EQ.N ISAME( 7 ) = ALS.EQ.ALPHA ISAME( 8 ) = LZE( AS, AA, LAA ) ISAME( 9 ) = LDAS.EQ.LDA IF( NULL )THEN ISAME( 10 ) = LZE( BS, BB, LBB ) ELSE ISAME( 10 ) = LZERES( 'GE', ' ', M, N, BS, $ BB, LDB ) END IF ISAME( 11 ) = LDBS.EQ.LDB * * If data was incorrectly changed, report and * return. * SAME = .TRUE. DO 50 I = 1, NARGS SAME = SAME.AND.ISAME( I ) IF( .NOT.ISAME( I ) ) $ WRITE( NOUT, FMT = 9998 )I 50 CONTINUE IF( .NOT.SAME )THEN FATAL = .TRUE. GO TO 150 END IF * IF( .NOT.NULL )THEN IF( SNAME( 4: 5 ).EQ.'MM' )THEN * * Check the result. * IF( LEFT )THEN CALL ZMMCH( TRANSA, 'N', M, N, M, $ ALPHA, A, NMAX, B, NMAX, $ ZERO, C, NMAX, CT, G, $ BB, LDB, EPS, ERR, $ FATAL, NOUT, .TRUE. ) ELSE CALL ZMMCH( 'N', TRANSA, M, N, N, $ ALPHA, B, NMAX, A, NMAX, $ ZERO, C, NMAX, CT, G, $ BB, LDB, EPS, ERR, $ FATAL, NOUT, .TRUE. ) END IF ELSE IF( SNAME( 4: 5 ).EQ.'SM' )THEN * * Compute approximation to original * matrix. * DO 70 J = 1, N DO 60 I = 1, M C( I, J ) = BB( I + ( J - 1 )* $ LDB ) BB( I + ( J - 1 )*LDB ) = ALPHA* $ B( I, J ) 60 CONTINUE 70 CONTINUE * IF( LEFT )THEN CALL ZMMCH( TRANSA, 'N', M, N, M, $ ONE, A, NMAX, C, NMAX, $ ZERO, B, NMAX, CT, G, $ BB, LDB, EPS, ERR, $ FATAL, NOUT, .FALSE. ) ELSE CALL ZMMCH( 'N', TRANSA, M, N, N, $ ONE, C, NMAX, A, NMAX, $ ZERO, B, NMAX, CT, G, $ BB, LDB, EPS, ERR, $ FATAL, NOUT, .FALSE. ) END IF END IF ERRMAX = MAX( ERRMAX, ERR ) * If got really bad answer, report and * return. IF( FATAL ) $ GO TO 150 END IF * 80 CONTINUE * 90 CONTINUE * 100 CONTINUE * 110 CONTINUE * 120 CONTINUE * 130 CONTINUE * 140 CONTINUE * * Report result. * IF( ERRMAX.LT.THRESH )THEN WRITE( NOUT, FMT = 9999 )SNAME, NC ELSE WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX END IF GO TO 160 * 150 CONTINUE WRITE( NOUT, FMT = 9996 )SNAME WRITE( NOUT, FMT = 9995 )NC, SNAME, SIDE, UPLO, TRANSA, DIAG, M, $ N, ALPHA, LDA, LDB * 160 CONTINUE RETURN * 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', $ 'S)' ) 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', $ 'ANGED INCORRECTLY *******' ) 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, $ ' - SUSPECT *******' ) 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) 9995 FORMAT( 1X, I6, ': ', A6, '(', 4( '''', A1, ''',' ), 2( I3, ',' ), $ '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, ') ', $ ' .' ) 9994 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', $ '******' ) * * End of ZCHK3. * END SUBROUTINE ZCHK4( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, $ A, AA, AS, B, BB, BS, C, CC, CS, CT, G ) * * Tests ZHERK and ZSYRK. * * Auxiliary routine for test program for Level 3 Blas. * * -- Written on 8-February-1989. * Jack Dongarra, Argonne National Laboratory. * Iain Duff, AERE Harwell. * Jeremy Du Croz, Numerical Algorithms Group Ltd. * Sven Hammarling, Numerical Algorithms Group Ltd. * * .. Parameters .. COMPLEX*16 ZERO PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ) ) DOUBLE PRECISION RONE, RZERO PARAMETER ( RONE = 1.0D0, RZERO = 0.0D0 ) * .. Scalar Arguments .. DOUBLE PRECISION EPS, THRESH INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA LOGICAL FATAL, REWI, TRACE CHARACTER*6 SNAME * .. Array Arguments .. COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), $ AS( NMAX*NMAX ), B( NMAX, NMAX ), $ BB( NMAX*NMAX ), BET( NBET ), BS( NMAX*NMAX ), $ C( NMAX, NMAX ), CC( NMAX*NMAX ), $ CS( NMAX*NMAX ), CT( NMAX ) DOUBLE PRECISION G( NMAX ) INTEGER IDIM( NIDIM ) * .. Local Scalars .. COMPLEX*16 ALPHA, ALS, BETA, BETS DOUBLE PRECISION ERR, ERRMAX, RALPHA, RALS, RBETA, RBETS INTEGER I, IA, IB, ICT, ICU, IK, IN, J, JC, JJ, K, KS, $ LAA, LCC, LDA, LDAS, LDC, LDCS, LJ, MA, N, NA, $ NARGS, NC, NS LOGICAL CONJ, NULL, RESET, SAME, TRAN, UPPER CHARACTER*1 TRANS, TRANSS, TRANST, UPLO, UPLOS CHARACTER*2 ICHT, ICHU * .. Local Arrays .. LOGICAL ISAME( 13 ) * .. External Functions .. LOGICAL LZE, LZERES EXTERNAL LZE, LZERES * .. External Subroutines .. EXTERNAL ZHERK, ZMAKE, ZMMCH, ZSYRK * .. Intrinsic Functions .. INTRINSIC DCMPLX, MAX, DBLE * .. Scalars in Common .. INTEGER INFOT, NOUTC LOGICAL LERR, OK * .. Common blocks .. COMMON /INFOC/INFOT, NOUTC, OK, LERR * .. Data statements .. DATA ICHT/'NC'/, ICHU/'UL'/ * .. Executable Statements .. CONJ = SNAME( 2: 3 ).EQ.'HE' * NARGS = 10 NC = 0 RESET = .TRUE. ERRMAX = RZERO * DO 100 IN = 1, NIDIM N = IDIM( IN ) * Set LDC to 1 more than minimum value if room. LDC = N IF( LDC.LT.NMAX ) $ LDC = LDC + 1 * Skip tests if not enough room. IF( LDC.GT.NMAX ) $ GO TO 100 LCC = LDC*N * DO 90 IK = 1, NIDIM K = IDIM( IK ) * DO 80 ICT = 1, 2 TRANS = ICHT( ICT: ICT ) TRAN = TRANS.EQ.'C' IF( TRAN.AND..NOT.CONJ ) $ TRANS = 'T' IF( TRAN )THEN MA = K NA = N ELSE MA = N NA = K END IF * Set LDA to 1 more than minimum value if room. LDA = MA IF( LDA.LT.NMAX ) $ LDA = LDA + 1 * Skip tests if not enough room. IF( LDA.GT.NMAX ) $ GO TO 80 LAA = LDA*NA * * Generate the matrix A. * CALL ZMAKE( 'GE', ' ', ' ', MA, NA, A, NMAX, AA, LDA, $ RESET, ZERO ) * DO 70 ICU = 1, 2 UPLO = ICHU( ICU: ICU ) UPPER = UPLO.EQ.'U' * DO 60 IA = 1, NALF ALPHA = ALF( IA ) IF( CONJ )THEN RALPHA = DBLE( ALPHA ) ALPHA = DCMPLX( RALPHA, RZERO ) END IF * DO 50 IB = 1, NBET BETA = BET( IB ) IF( CONJ )THEN RBETA = DBLE( BETA ) BETA = DCMPLX( RBETA, RZERO ) END IF NULL = N.LE.0 IF( CONJ ) $ NULL = NULL.OR.( ( K.LE.0.OR.RALPHA.EQ. $ RZERO ).AND.RBETA.EQ.RONE ) * * Generate the matrix C. * CALL ZMAKE( SNAME( 2: 3 ), UPLO, ' ', N, N, C, $ NMAX, CC, LDC, RESET, ZERO ) * NC = NC + 1 * * Save every datum before calling the subroutine. * UPLOS = UPLO TRANSS = TRANS NS = N KS = K IF( CONJ )THEN RALS = RALPHA ELSE ALS = ALPHA END IF DO 10 I = 1, LAA AS( I ) = AA( I ) 10 CONTINUE LDAS = LDA IF( CONJ )THEN RBETS = RBETA ELSE BETS = BETA END IF DO 20 I = 1, LCC CS( I ) = CC( I ) 20 CONTINUE LDCS = LDC * * Call the subroutine. * IF( CONJ )THEN IF( TRACE ) $ WRITE( NTRA, FMT = 9994 )NC, SNAME, UPLO, $ TRANS, N, K, RALPHA, LDA, RBETA, LDC IF( REWI ) $ REWIND NTRA CALL ZHERK( UPLO, TRANS, N, K, RALPHA, AA, $ LDA, RBETA, CC, LDC ) ELSE IF( TRACE ) $ WRITE( NTRA, FMT = 9993 )NC, SNAME, UPLO, $ TRANS, N, K, ALPHA, LDA, BETA, LDC IF( REWI ) $ REWIND NTRA CALL ZSYRK( UPLO, TRANS, N, K, ALPHA, AA, $ LDA, BETA, CC, LDC ) END IF * * Check if error-exit was taken incorrectly. * IF( .NOT.OK )THEN WRITE( NOUT, FMT = 9992 ) FATAL = .TRUE. GO TO 120 END IF * * See what data changed inside subroutines. * ISAME( 1 ) = UPLOS.EQ.UPLO ISAME( 2 ) = TRANSS.EQ.TRANS ISAME( 3 ) = NS.EQ.N ISAME( 4 ) = KS.EQ.K IF( CONJ )THEN ISAME( 5 ) = RALS.EQ.RALPHA ELSE ISAME( 5 ) = ALS.EQ.ALPHA END IF ISAME( 6 ) = LZE( AS, AA, LAA ) ISAME( 7 ) = LDAS.EQ.LDA IF( CONJ )THEN ISAME( 8 ) = RBETS.EQ.RBETA ELSE ISAME( 8 ) = BETS.EQ.BETA END IF IF( NULL )THEN ISAME( 9 ) = LZE( CS, CC, LCC ) ELSE ISAME( 9 ) = LZERES( SNAME( 2: 3 ), UPLO, N, $ N, CS, CC, LDC ) END IF ISAME( 10 ) = LDCS.EQ.LDC * * If data was incorrectly changed, report and * return. * SAME = .TRUE. DO 30 I = 1, NARGS SAME = SAME.AND.ISAME( I ) IF( .NOT.ISAME( I ) ) $ WRITE( NOUT, FMT = 9998 )I 30 CONTINUE IF( .NOT.SAME )THEN FATAL = .TRUE. GO TO 120 END IF * IF( .NOT.NULL )THEN * * Check the result column by column. * IF( CONJ )THEN TRANST = 'C' ELSE TRANST = 'T' END IF JC = 1 DO 40 J = 1, N IF( UPPER )THEN JJ = 1 LJ = J ELSE JJ = J LJ = N - J + 1 END IF IF( TRAN )THEN CALL ZMMCH( TRANST, 'N', LJ, 1, K, $ ALPHA, A( 1, JJ ), NMAX, $ A( 1, J ), NMAX, BETA, $ C( JJ, J ), NMAX, CT, G, $ CC( JC ), LDC, EPS, ERR, $ FATAL, NOUT, .TRUE. ) ELSE CALL ZMMCH( 'N', TRANST, LJ, 1, K, $ ALPHA, A( JJ, 1 ), NMAX, $ A( J, 1 ), NMAX, BETA, $ C( JJ, J ), NMAX, CT, G, $ CC( JC ), LDC, EPS, ERR, $ FATAL, NOUT, .TRUE. ) END IF IF( UPPER )THEN JC = JC + LDC ELSE JC = JC + LDC + 1 END IF ERRMAX = MAX( ERRMAX, ERR ) * If got really bad answer, report and * return. IF( FATAL ) $ GO TO 110 40 CONTINUE END IF * 50 CONTINUE * 60 CONTINUE * 70 CONTINUE * 80 CONTINUE * 90 CONTINUE * 100 CONTINUE * * Report result. * IF( ERRMAX.LT.THRESH )THEN WRITE( NOUT, FMT = 9999 )SNAME, NC ELSE WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX END IF GO TO 130 * 110 CONTINUE IF( N.GT.1 ) $ WRITE( NOUT, FMT = 9995 )J * 120 CONTINUE WRITE( NOUT, FMT = 9996 )SNAME IF( CONJ )THEN WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, TRANS, N, K, RALPHA, $ LDA, RBETA, LDC ELSE WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, TRANS, N, K, ALPHA, $ LDA, BETA, LDC END IF * 130 CONTINUE RETURN * 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', $ 'S)' ) 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', $ 'ANGED INCORRECTLY *******' ) 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, $ ' - SUSPECT *******' ) 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) 9994 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), $ F4.1, ', A,', I3, ',', F4.1, ', C,', I3, ') ', $ ' .' ) 9993 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), $ '(', F4.1, ',', F4.1, ') , A,', I3, ',(', F4.1, ',', F4.1, $ '), C,', I3, ') .' ) 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', $ '******' ) * * End of ZCHK4. * END SUBROUTINE ZCHK5( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, $ AB, AA, AS, BB, BS, C, CC, CS, CT, G, W ) * * Tests ZHER2K and ZSYR2K. * * Auxiliary routine for test program for Level 3 Blas. * * -- Written on 8-February-1989. * Jack Dongarra, Argonne National Laboratory. * Iain Duff, AERE Harwell. * Jeremy Du Croz, Numerical Algorithms Group Ltd. * Sven Hammarling, Numerical Algorithms Group Ltd. * * .. Parameters .. COMPLEX*16 ZERO, ONE PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), $ ONE = ( 1.0D0, 0.0D0 ) ) DOUBLE PRECISION RONE, RZERO PARAMETER ( RONE = 1.0D0, RZERO = 0.0D0 ) * .. Scalar Arguments .. DOUBLE PRECISION EPS, THRESH INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA LOGICAL FATAL, REWI, TRACE CHARACTER*6 SNAME * .. Array Arguments .. COMPLEX*16 AA( NMAX*NMAX ), AB( 2*NMAX*NMAX ), $ ALF( NALF ), AS( NMAX*NMAX ), BB( NMAX*NMAX ), $ BET( NBET ), BS( NMAX*NMAX ), C( NMAX, NMAX ), $ CC( NMAX*NMAX ), CS( NMAX*NMAX ), CT( NMAX ), $ W( 2*NMAX ) DOUBLE PRECISION G( NMAX ) INTEGER IDIM( NIDIM ) * .. Local Scalars .. COMPLEX*16 ALPHA, ALS, BETA, BETS DOUBLE PRECISION ERR, ERRMAX, RBETA, RBETS INTEGER I, IA, IB, ICT, ICU, IK, IN, J, JC, JJ, JJAB, $ K, KS, LAA, LBB, LCC, LDA, LDAS, LDB, LDBS, $ LDC, LDCS, LJ, MA, N, NA, NARGS, NC, NS LOGICAL CONJ, NULL, RESET, SAME, TRAN, UPPER CHARACTER*1 TRANS, TRANSS, TRANST, UPLO, UPLOS CHARACTER*2 ICHT, ICHU * .. Local Arrays .. LOGICAL ISAME( 13 ) * .. External Functions .. LOGICAL LZE, LZERES EXTERNAL LZE, LZERES * .. External Subroutines .. EXTERNAL ZHER2K, ZMAKE, ZMMCH, ZSYR2K * .. Intrinsic Functions .. INTRINSIC DCMPLX, DCONJG, MAX, DBLE * .. Scalars in Common .. INTEGER INFOT, NOUTC LOGICAL LERR, OK * .. Common blocks .. COMMON /INFOC/INFOT, NOUTC, OK, LERR * .. Data statements .. DATA ICHT/'NC'/, ICHU/'UL'/ * .. Executable Statements .. CONJ = SNAME( 2: 3 ).EQ.'HE' * NARGS = 12 NC = 0 RESET = .TRUE. ERRMAX = RZERO * DO 130 IN = 1, NIDIM N = IDIM( IN ) * Set LDC to 1 more than minimum value if room. LDC = N IF( LDC.LT.NMAX ) $ LDC = LDC + 1 * Skip tests if not enough room. IF( LDC.GT.NMAX ) $ GO TO 130 LCC = LDC*N * DO 120 IK = 1, NIDIM K = IDIM( IK ) * DO 110 ICT = 1, 2 TRANS = ICHT( ICT: ICT ) TRAN = TRANS.EQ.'C' IF( TRAN.AND..NOT.CONJ ) $ TRANS = 'T' IF( TRAN )THEN MA = K NA = N ELSE MA = N NA = K END IF * Set LDA to 1 more than minimum value if room. LDA = MA IF( LDA.LT.NMAX ) $ LDA = LDA + 1 * Skip tests if not enough room. IF( LDA.GT.NMAX ) $ GO TO 110 LAA = LDA*NA * * Generate the matrix A. * IF( TRAN )THEN CALL ZMAKE( 'GE', ' ', ' ', MA, NA, AB, 2*NMAX, AA, $ LDA, RESET, ZERO ) ELSE CALL ZMAKE( 'GE', ' ', ' ', MA, NA, AB, NMAX, AA, LDA, $ RESET, ZERO ) END IF * * Generate the matrix B. * LDB = LDA LBB = LAA IF( TRAN )THEN CALL ZMAKE( 'GE', ' ', ' ', MA, NA, AB( K + 1 ), $ 2*NMAX, BB, LDB, RESET, ZERO ) ELSE CALL ZMAKE( 'GE', ' ', ' ', MA, NA, AB( K*NMAX + 1 ), $ NMAX, BB, LDB, RESET, ZERO ) END IF * DO 100 ICU = 1, 2 UPLO = ICHU( ICU: ICU ) UPPER = UPLO.EQ.'U' * DO 90 IA = 1, NALF ALPHA = ALF( IA ) * DO 80 IB = 1, NBET BETA = BET( IB ) IF( CONJ )THEN RBETA = DBLE( BETA ) BETA = DCMPLX( RBETA, RZERO ) END IF NULL = N.LE.0 IF( CONJ ) $ NULL = NULL.OR.( ( K.LE.0.OR.ALPHA.EQ. $ ZERO ).AND.RBETA.EQ.RONE ) * * Generate the matrix C. * CALL ZMAKE( SNAME( 2: 3 ), UPLO, ' ', N, N, C, $ NMAX, CC, LDC, RESET, ZERO ) * NC = NC + 1 * * Save every datum before calling the subroutine. * UPLOS = UPLO TRANSS = TRANS NS = N KS = K ALS = ALPHA DO 10 I = 1, LAA AS( I ) = AA( I ) 10 CONTINUE LDAS = LDA DO 20 I = 1, LBB BS( I ) = BB( I ) 20 CONTINUE LDBS = LDB IF( CONJ )THEN RBETS = RBETA ELSE BETS = BETA END IF DO 30 I = 1, LCC CS( I ) = CC( I ) 30 CONTINUE LDCS = LDC * * Call the subroutine. * IF( CONJ )THEN IF( TRACE ) $ WRITE( NTRA, FMT = 9994 )NC, SNAME, UPLO, $ TRANS, N, K, ALPHA, LDA, LDB, RBETA, LDC IF( REWI ) $ REWIND NTRA CALL ZHER2K( UPLO, TRANS, N, K, ALPHA, AA, $ LDA, BB, LDB, RBETA, CC, LDC ) ELSE IF( TRACE ) $ WRITE( NTRA, FMT = 9993 )NC, SNAME, UPLO, $ TRANS, N, K, ALPHA, LDA, LDB, BETA, LDC IF( REWI ) $ REWIND NTRA CALL ZSYR2K( UPLO, TRANS, N, K, ALPHA, AA, $ LDA, BB, LDB, BETA, CC, LDC ) END IF * * Check if error-exit was taken incorrectly. * IF( .NOT.OK )THEN WRITE( NOUT, FMT = 9992 ) FATAL = .TRUE. GO TO 150 END IF * * See what data changed inside subroutines. * ISAME( 1 ) = UPLOS.EQ.UPLO ISAME( 2 ) = TRANSS.EQ.TRANS ISAME( 3 ) = NS.EQ.N ISAME( 4 ) = KS.EQ.K ISAME( 5 ) = ALS.EQ.ALPHA ISAME( 6 ) = LZE( AS, AA, LAA ) ISAME( 7 ) = LDAS.EQ.LDA ISAME( 8 ) = LZE( BS, BB, LBB ) ISAME( 9 ) = LDBS.EQ.LDB IF( CONJ )THEN ISAME( 10 ) = RBETS.EQ.RBETA ELSE ISAME( 10 ) = BETS.EQ.BETA END IF IF( NULL )THEN ISAME( 11 ) = LZE( CS, CC, LCC ) ELSE ISAME( 11 ) = LZERES( 'HE', UPLO, N, N, CS, $ CC, LDC ) END IF ISAME( 12 ) = LDCS.EQ.LDC * * If data was incorrectly changed, report and * return. * SAME = .TRUE. DO 40 I = 1, NARGS SAME = SAME.AND.ISAME( I ) IF( .NOT.ISAME( I ) ) $ WRITE( NOUT, FMT = 9998 )I 40 CONTINUE IF( .NOT.SAME )THEN FATAL = .TRUE. GO TO 150 END IF * IF( .NOT.NULL )THEN * * Check the result column by column. * IF( CONJ )THEN TRANST = 'C' ELSE TRANST = 'T' END IF JJAB = 1 JC = 1 DO 70 J = 1, N IF( UPPER )THEN JJ = 1 LJ = J ELSE JJ = J LJ = N - J + 1 END IF IF( TRAN )THEN DO 50 I = 1, K W( I ) = ALPHA*AB( ( J - 1 )*2* $ NMAX + K + I ) IF( CONJ )THEN W( K + I ) = DCONJG( ALPHA )* $ AB( ( J - 1 )*2* $ NMAX + I ) ELSE W( K + I ) = ALPHA* $ AB( ( J - 1 )*2* $ NMAX + I ) END IF 50 CONTINUE CALL ZMMCH( TRANST, 'N', LJ, 1, 2*K, $ ONE, AB( JJAB ), 2*NMAX, W, $ 2*NMAX, BETA, C( JJ, J ), $ NMAX, CT, G, CC( JC ), LDC, $ EPS, ERR, FATAL, NOUT, $ .TRUE. ) ELSE DO 60 I = 1, K IF( CONJ )THEN W( I ) = ALPHA*DCONJG( AB( ( K + $ I - 1 )*NMAX + J ) ) W( K + I ) = DCONJG( ALPHA* $ AB( ( I - 1 )*NMAX + $ J ) ) ELSE W( I ) = ALPHA*AB( ( K + I - 1 )* $ NMAX + J ) W( K + I ) = ALPHA* $ AB( ( I - 1 )*NMAX + $ J ) END IF 60 CONTINUE CALL ZMMCH( 'N', 'N', LJ, 1, 2*K, ONE, $ AB( JJ ), NMAX, W, 2*NMAX, $ BETA, C( JJ, J ), NMAX, CT, $ G, CC( JC ), LDC, EPS, ERR, $ FATAL, NOUT, .TRUE. ) END IF IF( UPPER )THEN JC = JC + LDC ELSE JC = JC + LDC + 1 IF( TRAN ) $ JJAB = JJAB + 2*NMAX END IF ERRMAX = MAX( ERRMAX, ERR ) * If got really bad answer, report and * return. IF( FATAL ) $ GO TO 140 70 CONTINUE END IF * 80 CONTINUE * 90 CONTINUE * 100 CONTINUE * 110 CONTINUE * 120 CONTINUE * 130 CONTINUE * * Report result. * IF( ERRMAX.LT.THRESH )THEN WRITE( NOUT, FMT = 9999 )SNAME, NC ELSE WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX END IF GO TO 160 * 140 CONTINUE IF( N.GT.1 ) $ WRITE( NOUT, FMT = 9995 )J * 150 CONTINUE WRITE( NOUT, FMT = 9996 )SNAME IF( CONJ )THEN WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, TRANS, N, K, ALPHA, $ LDA, LDB, RBETA, LDC ELSE WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, TRANS, N, K, ALPHA, $ LDA, LDB, BETA, LDC END IF * 160 CONTINUE RETURN * 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', $ 'S)' ) 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', $ 'ANGED INCORRECTLY *******' ) 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, $ ' - SUSPECT *******' ) 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) 9994 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), $ '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, ',', F4.1, $ ', C,', I3, ') .' ) 9993 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), $ '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, ',(', F4.1, $ ',', F4.1, '), C,', I3, ') .' ) 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', $ '******' ) * * End of ZCHK5. * END SUBROUTINE ZCHKE( ISNUM, SRNAMT, NOUT ) * * Tests the error exits from the Level 3 Blas. * Requires a special version of the error-handling routine XERBLA. * A, B and C should not need to be defined. * * Auxiliary routine for test program for Level 3 Blas. * * -- Written on 8-February-1989. * Jack Dongarra, Argonne National Laboratory. * Iain Duff, AERE Harwell. * Jeremy Du Croz, Numerical Algorithms Group Ltd. * Sven Hammarling, Numerical Algorithms Group Ltd. * * 3-19-92: Initialize ALPHA, BETA, RALPHA, and RBETA (eca) * 3-19-92: Fix argument 12 in calls to ZSYMM and ZHEMM * with INFOT = 9 (eca) * 10-9-00: Declared INTRINSIC DCMPLX (susan) * * .. Scalar Arguments .. INTEGER ISNUM, NOUT CHARACTER*6 SRNAMT * .. Scalars in Common .. INTEGER INFOT, NOUTC LOGICAL LERR, OK * .. Parameters .. REAL ONE, TWO PARAMETER ( ONE = 1.0D0, TWO = 2.0D0 ) * .. Local Scalars .. COMPLEX*16 ALPHA, BETA DOUBLE PRECISION RALPHA, RBETA * .. Local Arrays .. COMPLEX*16 A( 2, 1 ), B( 2, 1 ), C( 2, 1 ) * .. External Subroutines .. EXTERNAL ZGEMM, ZHEMM, ZHER2K, ZHERK, CHKXER, ZSYMM, $ ZSYR2K, ZSYRK, ZTRMM, ZTRSM * .. Intrinsic Functions .. INTRINSIC DCMPLX * .. Common blocks .. COMMON /INFOC/INFOT, NOUTC, OK, LERR * .. Executable Statements .. * OK is set to .FALSE. by the special version of XERBLA or by CHKXER * if anything is wrong. OK = .TRUE. * LERR is set to .TRUE. by the special version of XERBLA each time * it is called, and is then tested and re-set by CHKXER. LERR = .FALSE. * * Initialize ALPHA, BETA, RALPHA, and RBETA. * ALPHA = DCMPLX( ONE, -ONE ) BETA = DCMPLX( TWO, -TWO ) RALPHA = ONE RBETA = TWO * GO TO ( 10, 20, 30, 40, 50, 60, 70, 80, $ 90 )ISNUM 10 INFOT = 1 CALL ZGEMM( '/', 'N', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 1 CALL ZGEMM( '/', 'C', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 1 CALL ZGEMM( '/', 'T', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 2 CALL ZGEMM( 'N', '/', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 2 CALL ZGEMM( 'C', '/', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 2 CALL ZGEMM( 'T', '/', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZGEMM( 'N', 'N', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZGEMM( 'N', 'C', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZGEMM( 'N', 'T', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZGEMM( 'C', 'N', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZGEMM( 'C', 'C', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZGEMM( 'C', 'T', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZGEMM( 'T', 'N', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZGEMM( 'T', 'C', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZGEMM( 'T', 'T', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZGEMM( 'N', 'N', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZGEMM( 'N', 'C', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZGEMM( 'N', 'T', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZGEMM( 'C', 'N', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZGEMM( 'C', 'C', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZGEMM( 'C', 'T', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZGEMM( 'T', 'N', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZGEMM( 'T', 'C', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZGEMM( 'T', 'T', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZGEMM( 'N', 'N', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZGEMM( 'N', 'C', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZGEMM( 'N', 'T', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZGEMM( 'C', 'N', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZGEMM( 'C', 'C', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZGEMM( 'C', 'T', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZGEMM( 'T', 'N', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZGEMM( 'T', 'C', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZGEMM( 'T', 'T', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 8 CALL ZGEMM( 'N', 'N', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 8 CALL ZGEMM( 'N', 'C', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 8 CALL ZGEMM( 'N', 'T', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 8 CALL ZGEMM( 'C', 'N', 0, 0, 2, ALPHA, A, 1, B, 2, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 8 CALL ZGEMM( 'C', 'C', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 8 CALL ZGEMM( 'C', 'T', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 8 CALL ZGEMM( 'T', 'N', 0, 0, 2, ALPHA, A, 1, B, 2, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 8 CALL ZGEMM( 'T', 'C', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 8 CALL ZGEMM( 'T', 'T', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 10 CALL ZGEMM( 'N', 'N', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 10 CALL ZGEMM( 'C', 'N', 0, 0, 2, ALPHA, A, 2, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 10 CALL ZGEMM( 'T', 'N', 0, 0, 2, ALPHA, A, 2, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 10 CALL ZGEMM( 'N', 'C', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 10 CALL ZGEMM( 'C', 'C', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 10 CALL ZGEMM( 'T', 'C', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 10 CALL ZGEMM( 'N', 'T', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 10 CALL ZGEMM( 'C', 'T', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 10 CALL ZGEMM( 'T', 'T', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 13 CALL ZGEMM( 'N', 'N', 2, 0, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 13 CALL ZGEMM( 'N', 'C', 2, 0, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 13 CALL ZGEMM( 'N', 'T', 2, 0, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 13 CALL ZGEMM( 'C', 'N', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 13 CALL ZGEMM( 'C', 'C', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 13 CALL ZGEMM( 'C', 'T', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 13 CALL ZGEMM( 'T', 'N', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 13 CALL ZGEMM( 'T', 'C', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 13 CALL ZGEMM( 'T', 'T', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) GO TO 100 20 INFOT = 1 CALL ZHEMM( '/', 'U', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 2 CALL ZHEMM( 'L', '/', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZHEMM( 'L', 'U', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZHEMM( 'R', 'U', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZHEMM( 'L', 'L', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZHEMM( 'R', 'L', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZHEMM( 'L', 'U', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZHEMM( 'R', 'U', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZHEMM( 'L', 'L', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZHEMM( 'R', 'L', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZHEMM( 'L', 'U', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZHEMM( 'R', 'U', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZHEMM( 'L', 'L', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZHEMM( 'R', 'L', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZHEMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZHEMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZHEMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZHEMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL ZHEMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL ZHEMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL ZHEMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL ZHEMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) GO TO 100 30 INFOT = 1 CALL ZSYMM( '/', 'U', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 2 CALL ZSYMM( 'L', '/', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZSYMM( 'L', 'U', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZSYMM( 'R', 'U', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZSYMM( 'L', 'L', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZSYMM( 'R', 'L', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZSYMM( 'L', 'U', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZSYMM( 'R', 'U', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZSYMM( 'L', 'L', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZSYMM( 'R', 'L', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZSYMM( 'L', 'U', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZSYMM( 'R', 'U', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZSYMM( 'L', 'L', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZSYMM( 'R', 'L', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZSYMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZSYMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZSYMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL ZSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL ZSYMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL ZSYMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL ZSYMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) GO TO 100 40 INFOT = 1 CALL ZTRMM( '/', 'U', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 2 CALL ZTRMM( 'L', '/', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZTRMM( 'L', 'U', '/', 'N', 0, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZTRMM( 'L', 'U', 'N', '/', 0, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRMM( 'L', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRMM( 'L', 'U', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRMM( 'L', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRMM( 'R', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRMM( 'R', 'U', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRMM( 'R', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRMM( 'L', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRMM( 'L', 'L', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRMM( 'L', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRMM( 'R', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRMM( 'R', 'L', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRMM( 'R', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRMM( 'L', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRMM( 'L', 'U', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRMM( 'L', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRMM( 'R', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRMM( 'R', 'U', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRMM( 'R', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRMM( 'L', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRMM( 'L', 'L', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRMM( 'L', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRMM( 'R', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRMM( 'R', 'L', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRMM( 'R', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRMM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRMM( 'L', 'U', 'C', 'N', 2, 0, ALPHA, A, 1, B, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRMM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRMM( 'R', 'U', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRMM( 'R', 'U', 'C', 'N', 0, 2, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRMM( 'R', 'U', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRMM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRMM( 'L', 'L', 'C', 'N', 2, 0, ALPHA, A, 1, B, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRMM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRMM( 'R', 'L', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRMM( 'R', 'L', 'C', 'N', 0, 2, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRMM( 'R', 'L', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRMM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRMM( 'L', 'U', 'C', 'N', 2, 0, ALPHA, A, 2, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRMM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRMM( 'R', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRMM( 'R', 'U', 'C', 'N', 2, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRMM( 'R', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRMM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRMM( 'L', 'L', 'C', 'N', 2, 0, ALPHA, A, 2, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRMM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRMM( 'R', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRMM( 'R', 'L', 'C', 'N', 2, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRMM( 'R', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) GO TO 100 50 INFOT = 1 CALL ZTRSM( '/', 'U', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 2 CALL ZTRSM( 'L', '/', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZTRSM( 'L', 'U', '/', 'N', 0, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZTRSM( 'L', 'U', 'N', '/', 0, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRSM( 'L', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRSM( 'L', 'U', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRSM( 'L', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRSM( 'R', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRSM( 'R', 'U', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRSM( 'R', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRSM( 'L', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRSM( 'L', 'L', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRSM( 'L', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRSM( 'R', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRSM( 'R', 'L', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 5 CALL ZTRSM( 'R', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRSM( 'L', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRSM( 'L', 'U', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRSM( 'L', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRSM( 'R', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRSM( 'R', 'U', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRSM( 'R', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRSM( 'L', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRSM( 'L', 'L', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRSM( 'L', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRSM( 'R', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRSM( 'R', 'L', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 6 CALL ZTRSM( 'R', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRSM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRSM( 'L', 'U', 'C', 'N', 2, 0, ALPHA, A, 1, B, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRSM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRSM( 'R', 'U', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRSM( 'R', 'U', 'C', 'N', 0, 2, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRSM( 'R', 'U', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRSM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRSM( 'L', 'L', 'C', 'N', 2, 0, ALPHA, A, 1, B, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRSM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRSM( 'R', 'L', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRSM( 'R', 'L', 'C', 'N', 0, 2, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZTRSM( 'R', 'L', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRSM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRSM( 'L', 'U', 'C', 'N', 2, 0, ALPHA, A, 2, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRSM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRSM( 'R', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRSM( 'R', 'U', 'C', 'N', 2, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRSM( 'R', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRSM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRSM( 'L', 'L', 'C', 'N', 2, 0, ALPHA, A, 2, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRSM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRSM( 'R', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRSM( 'R', 'L', 'C', 'N', 2, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 11 CALL ZTRSM( 'R', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) GO TO 100 60 INFOT = 1 CALL ZHERK( '/', 'N', 0, 0, RALPHA, A, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 2 CALL ZHERK( 'U', 'T', 0, 0, RALPHA, A, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZHERK( 'U', 'N', -1, 0, RALPHA, A, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZHERK( 'U', 'C', -1, 0, RALPHA, A, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZHERK( 'L', 'N', -1, 0, RALPHA, A, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZHERK( 'L', 'C', -1, 0, RALPHA, A, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZHERK( 'U', 'N', 0, -1, RALPHA, A, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZHERK( 'U', 'C', 0, -1, RALPHA, A, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZHERK( 'L', 'N', 0, -1, RALPHA, A, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZHERK( 'L', 'C', 0, -1, RALPHA, A, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZHERK( 'U', 'N', 2, 0, RALPHA, A, 1, RBETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZHERK( 'U', 'C', 0, 2, RALPHA, A, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZHERK( 'L', 'N', 2, 0, RALPHA, A, 1, RBETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZHERK( 'L', 'C', 0, 2, RALPHA, A, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 10 CALL ZHERK( 'U', 'N', 2, 0, RALPHA, A, 2, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 10 CALL ZHERK( 'U', 'C', 2, 0, RALPHA, A, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 10 CALL ZHERK( 'L', 'N', 2, 0, RALPHA, A, 2, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 10 CALL ZHERK( 'L', 'C', 2, 0, RALPHA, A, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) GO TO 100 70 INFOT = 1 CALL ZSYRK( '/', 'N', 0, 0, ALPHA, A, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 2 CALL ZSYRK( 'U', 'C', 0, 0, ALPHA, A, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZSYRK( 'U', 'N', -1, 0, ALPHA, A, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZSYRK( 'U', 'T', -1, 0, ALPHA, A, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZSYRK( 'L', 'N', -1, 0, ALPHA, A, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZSYRK( 'L', 'T', -1, 0, ALPHA, A, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZSYRK( 'U', 'N', 0, -1, ALPHA, A, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZSYRK( 'U', 'T', 0, -1, ALPHA, A, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZSYRK( 'L', 'N', 0, -1, ALPHA, A, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZSYRK( 'L', 'T', 0, -1, ALPHA, A, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZSYRK( 'U', 'N', 2, 0, ALPHA, A, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZSYRK( 'U', 'T', 0, 2, ALPHA, A, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZSYRK( 'L', 'N', 2, 0, ALPHA, A, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZSYRK( 'L', 'T', 0, 2, ALPHA, A, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 10 CALL ZSYRK( 'U', 'N', 2, 0, ALPHA, A, 2, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 10 CALL ZSYRK( 'U', 'T', 2, 0, ALPHA, A, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 10 CALL ZSYRK( 'L', 'N', 2, 0, ALPHA, A, 2, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 10 CALL ZSYRK( 'L', 'T', 2, 0, ALPHA, A, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) GO TO 100 80 INFOT = 1 CALL ZHER2K( '/', 'N', 0, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 2 CALL ZHER2K( 'U', 'T', 0, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZHER2K( 'U', 'N', -1, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZHER2K( 'U', 'C', -1, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZHER2K( 'L', 'N', -1, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZHER2K( 'L', 'C', -1, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZHER2K( 'U', 'N', 0, -1, ALPHA, A, 1, B, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZHER2K( 'U', 'C', 0, -1, ALPHA, A, 1, B, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZHER2K( 'L', 'N', 0, -1, ALPHA, A, 1, B, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZHER2K( 'L', 'C', 0, -1, ALPHA, A, 1, B, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZHER2K( 'U', 'N', 2, 0, ALPHA, A, 1, B, 1, RBETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZHER2K( 'U', 'C', 0, 2, ALPHA, A, 1, B, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZHER2K( 'L', 'N', 2, 0, ALPHA, A, 1, B, 1, RBETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZHER2K( 'L', 'C', 0, 2, ALPHA, A, 1, B, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZHER2K( 'U', 'N', 2, 0, ALPHA, A, 2, B, 1, RBETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZHER2K( 'U', 'C', 0, 2, ALPHA, A, 2, B, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZHER2K( 'L', 'N', 2, 0, ALPHA, A, 2, B, 1, RBETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZHER2K( 'L', 'C', 0, 2, ALPHA, A, 2, B, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL ZHER2K( 'U', 'N', 2, 0, ALPHA, A, 2, B, 2, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL ZHER2K( 'U', 'C', 2, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL ZHER2K( 'L', 'N', 2, 0, ALPHA, A, 2, B, 2, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL ZHER2K( 'L', 'C', 2, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) GO TO 100 90 INFOT = 1 CALL ZSYR2K( '/', 'N', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 2 CALL ZSYR2K( 'U', 'C', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZSYR2K( 'U', 'N', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZSYR2K( 'U', 'T', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZSYR2K( 'L', 'N', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 3 CALL ZSYR2K( 'L', 'T', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZSYR2K( 'U', 'N', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZSYR2K( 'U', 'T', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZSYR2K( 'L', 'N', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 4 CALL ZSYR2K( 'L', 'T', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZSYR2K( 'U', 'N', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZSYR2K( 'U', 'T', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZSYR2K( 'L', 'N', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 7 CALL ZSYR2K( 'L', 'T', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZSYR2K( 'U', 'N', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZSYR2K( 'U', 'T', 0, 2, ALPHA, A, 2, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZSYR2K( 'L', 'N', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 9 CALL ZSYR2K( 'L', 'T', 0, 2, ALPHA, A, 2, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL ZSYR2K( 'U', 'N', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL ZSYR2K( 'U', 'T', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL ZSYR2K( 'L', 'N', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) INFOT = 12 CALL ZSYR2K( 'L', 'T', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) * 100 IF( OK )THEN WRITE( NOUT, FMT = 9999 )SRNAMT ELSE WRITE( NOUT, FMT = 9998 )SRNAMT END IF RETURN * 9999 FORMAT( ' ', A6, ' PASSED THE TESTS OF ERROR-EXITS' ) 9998 FORMAT( ' ******* ', A6, ' FAILED THE TESTS OF ERROR-EXITS *****', $ '**' ) * * End of ZCHKE. * END SUBROUTINE ZMAKE( TYPE, UPLO, DIAG, M, N, A, NMAX, AA, LDA, RESET, $ TRANSL ) * * Generates values for an M by N matrix A. * Stores the values in the array AA in the data structure required * by the routine, with unwanted elements set to rogue value. * * TYPE is 'GE', 'HE', 'SY' or 'TR'. * * Auxiliary routine for test program for Level 3 Blas. * * -- Written on 8-February-1989. * Jack Dongarra, Argonne National Laboratory. * Iain Duff, AERE Harwell. * Jeremy Du Croz, Numerical Algorithms Group Ltd. * Sven Hammarling, Numerical Algorithms Group Ltd. * * .. Parameters .. COMPLEX*16 ZERO, ONE PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), $ ONE = ( 1.0D0, 0.0D0 ) ) COMPLEX*16 ROGUE PARAMETER ( ROGUE = ( -1.0D10, 1.0D10 ) ) DOUBLE PRECISION RZERO PARAMETER ( RZERO = 0.0D0 ) DOUBLE PRECISION RROGUE PARAMETER ( RROGUE = -1.0D10 ) * .. Scalar Arguments .. COMPLEX*16 TRANSL INTEGER LDA, M, N, NMAX LOGICAL RESET CHARACTER*1 DIAG, UPLO CHARACTER*2 TYPE * .. Array Arguments .. COMPLEX*16 A( NMAX, * ), AA( * ) * .. Local Scalars .. INTEGER I, IBEG, IEND, J, JJ LOGICAL GEN, HER, LOWER, SYM, TRI, UNIT, UPPER * .. External Functions .. COMPLEX*16 ZBEG EXTERNAL ZBEG * .. Intrinsic Functions .. INTRINSIC DCMPLX, DCONJG, DBLE * .. Executable Statements .. GEN = TYPE.EQ.'GE' HER = TYPE.EQ.'HE' SYM = TYPE.EQ.'SY' TRI = TYPE.EQ.'TR' UPPER = ( HER.OR.SYM.OR.TRI ).AND.UPLO.EQ.'U' LOWER = ( HER.OR.SYM.OR.TRI ).AND.UPLO.EQ.'L' UNIT = TRI.AND.DIAG.EQ.'U' * * Generate data in array A. * DO 20 J = 1, N DO 10 I = 1, M IF( GEN.OR.( UPPER.AND.I.LE.J ).OR.( LOWER.AND.I.GE.J ) ) $ THEN A( I, J ) = ZBEG( RESET ) + TRANSL IF( I.NE.J )THEN * Set some elements to zero IF( N.GT.3.AND.J.EQ.N/2 ) $ A( I, J ) = ZERO IF( HER )THEN A( J, I ) = DCONJG( A( I, J ) ) ELSE IF( SYM )THEN A( J, I ) = A( I, J ) ELSE IF( TRI )THEN A( J, I ) = ZERO END IF END IF END IF 10 CONTINUE IF( HER ) $ A( J, J ) = DCMPLX( DBLE( A( J, J ) ), RZERO ) IF( TRI ) $ A( J, J ) = A( J, J ) + ONE IF( UNIT ) $ A( J, J ) = ONE 20 CONTINUE * * Store elements in array AS in data structure required by routine. * IF( TYPE.EQ.'GE' )THEN DO 50 J = 1, N DO 30 I = 1, M AA( I + ( J - 1 )*LDA ) = A( I, J ) 30 CONTINUE DO 40 I = M + 1, LDA AA( I + ( J - 1 )*LDA ) = ROGUE 40 CONTINUE 50 CONTINUE ELSE IF( TYPE.EQ.'HE'.OR.TYPE.EQ.'SY'.OR.TYPE.EQ.'TR' )THEN DO 90 J = 1, N IF( UPPER )THEN IBEG = 1 IF( UNIT )THEN IEND = J - 1 ELSE IEND = J END IF ELSE IF( UNIT )THEN IBEG = J + 1 ELSE IBEG = J END IF IEND = N END IF DO 60 I = 1, IBEG - 1 AA( I + ( J - 1 )*LDA ) = ROGUE 60 CONTINUE DO 70 I = IBEG, IEND AA( I + ( J - 1 )*LDA ) = A( I, J ) 70 CONTINUE DO 80 I = IEND + 1, LDA AA( I + ( J - 1 )*LDA ) = ROGUE 80 CONTINUE IF( HER )THEN JJ = J + ( J - 1 )*LDA AA( JJ ) = DCMPLX( DBLE( AA( JJ ) ), RROGUE ) END IF 90 CONTINUE END IF RETURN * * End of ZMAKE. * END SUBROUTINE ZMMCH( TRANSA, TRANSB, M, N, KK, ALPHA, A, LDA, B, LDB, $ BETA, C, LDC, CT, G, CC, LDCC, EPS, ERR, FATAL, $ NOUT, MV ) * * Checks the results of the computational tests. * * Auxiliary routine for test program for Level 3 Blas. * * -- Written on 8-February-1989. * Jack Dongarra, Argonne National Laboratory. * Iain Duff, AERE Harwell. * Jeremy Du Croz, Numerical Algorithms Group Ltd. * Sven Hammarling, Numerical Algorithms Group Ltd. * * .. Parameters .. COMPLEX*16 ZERO PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ) ) DOUBLE PRECISION RZERO, RONE PARAMETER ( RZERO = 0.0D0, RONE = 1.0D0 ) * .. Scalar Arguments .. COMPLEX*16 ALPHA, BETA DOUBLE PRECISION EPS, ERR INTEGER KK, LDA, LDB, LDC, LDCC, M, N, NOUT LOGICAL FATAL, MV CHARACTER*1 TRANSA, TRANSB * .. Array Arguments .. COMPLEX*16 A( LDA, * ), B( LDB, * ), C( LDC, * ), $ CC( LDCC, * ), CT( * ) DOUBLE PRECISION G( * ) * .. Local Scalars .. COMPLEX*16 CL DOUBLE PRECISION ERRI INTEGER I, J, K LOGICAL CTRANA, CTRANB, TRANA, TRANB * .. Intrinsic Functions .. INTRINSIC ABS, DIMAG, DCONJG, MAX, DBLE, SQRT * .. Statement Functions .. DOUBLE PRECISION ABS1 * .. Statement Function definitions .. ABS1( CL ) = ABS( DBLE( CL ) ) + ABS( DIMAG( CL ) ) * .. Executable Statements .. TRANA = TRANSA.EQ.'T'.OR.TRANSA.EQ.'C' TRANB = TRANSB.EQ.'T'.OR.TRANSB.EQ.'C' CTRANA = TRANSA.EQ.'C' CTRANB = TRANSB.EQ.'C' * * Compute expected result, one column at a time, in CT using data * in A, B and C. * Compute gauges in G. * DO 220 J = 1, N * DO 10 I = 1, M CT( I ) = ZERO G( I ) = RZERO 10 CONTINUE IF( .NOT.TRANA.AND..NOT.TRANB )THEN DO 30 K = 1, KK DO 20 I = 1, M CT( I ) = CT( I ) + A( I, K )*B( K, J ) G( I ) = G( I ) + ABS1( A( I, K ) )*ABS1( B( K, J ) ) 20 CONTINUE 30 CONTINUE ELSE IF( TRANA.AND..NOT.TRANB )THEN IF( CTRANA )THEN DO 50 K = 1, KK DO 40 I = 1, M CT( I ) = CT( I ) + DCONJG( A( K, I ) )*B( K, J ) G( I ) = G( I ) + ABS1( A( K, I ) )* $ ABS1( B( K, J ) ) 40 CONTINUE 50 CONTINUE ELSE DO 70 K = 1, KK DO 60 I = 1, M CT( I ) = CT( I ) + A( K, I )*B( K, J ) G( I ) = G( I ) + ABS1( A( K, I ) )* $ ABS1( B( K, J ) ) 60 CONTINUE 70 CONTINUE END IF ELSE IF( .NOT.TRANA.AND.TRANB )THEN IF( CTRANB )THEN DO 90 K = 1, KK DO 80 I = 1, M CT( I ) = CT( I ) + A( I, K )*DCONJG( B( J, K ) ) G( I ) = G( I ) + ABS1( A( I, K ) )* $ ABS1( B( J, K ) ) 80 CONTINUE 90 CONTINUE ELSE DO 110 K = 1, KK DO 100 I = 1, M CT( I ) = CT( I ) + A( I, K )*B( J, K ) G( I ) = G( I ) + ABS1( A( I, K ) )* $ ABS1( B( J, K ) ) 100 CONTINUE 110 CONTINUE END IF ELSE IF( TRANA.AND.TRANB )THEN IF( CTRANA )THEN IF( CTRANB )THEN DO 130 K = 1, KK DO 120 I = 1, M CT( I ) = CT( I ) + DCONJG( A( K, I ) )* $ DCONJG( B( J, K ) ) G( I ) = G( I ) + ABS1( A( K, I ) )* $ ABS1( B( J, K ) ) 120 CONTINUE 130 CONTINUE ELSE DO 150 K = 1, KK DO 140 I = 1, M CT( I ) = CT( I ) + DCONJG( A( K, I ) )* $ B( J, K ) G( I ) = G( I ) + ABS1( A( K, I ) )* $ ABS1( B( J, K ) ) 140 CONTINUE 150 CONTINUE END IF ELSE IF( CTRANB )THEN DO 170 K = 1, KK DO 160 I = 1, M CT( I ) = CT( I ) + A( K, I )* $ DCONJG( B( J, K ) ) G( I ) = G( I ) + ABS1( A( K, I ) )* $ ABS1( B( J, K ) ) 160 CONTINUE 170 CONTINUE ELSE DO 190 K = 1, KK DO 180 I = 1, M CT( I ) = CT( I ) + A( K, I )*B( J, K ) G( I ) = G( I ) + ABS1( A( K, I ) )* $ ABS1( B( J, K ) ) 180 CONTINUE 190 CONTINUE END IF END IF END IF DO 200 I = 1, M CT( I ) = ALPHA*CT( I ) + BETA*C( I, J ) G( I ) = ABS1( ALPHA )*G( I ) + $ ABS1( BETA )*ABS1( C( I, J ) ) 200 CONTINUE * * Compute the error ratio for this result. * ERR = ZERO DO 210 I = 1, M ERRI = ABS1( CT( I ) - CC( I, J ) )/EPS IF( G( I ).NE.RZERO ) $ ERRI = ERRI/G( I ) ERR = MAX( ERR, ERRI ) IF( ERR*SQRT( EPS ).GE.RONE ) $ GO TO 230 210 CONTINUE * 220 CONTINUE * * If the loop completes, all results are at least half accurate. GO TO 250 * * Report fatal error. * 230 FATAL = .TRUE. WRITE( NOUT, FMT = 9999 ) DO 240 I = 1, M IF( MV )THEN WRITE( NOUT, FMT = 9998 )I, CT( I ), CC( I, J ) ELSE WRITE( NOUT, FMT = 9998 )I, CC( I, J ), CT( I ) END IF 240 CONTINUE IF( N.GT.1 ) $ WRITE( NOUT, FMT = 9997 )J * 250 CONTINUE RETURN * 9999 FORMAT( ' ******* FATAL ERROR - COMPUTED RESULT IS LESS THAN HAL', $ 'F ACCURATE *******', /' EXPECTED RE', $ 'SULT COMPUTED RESULT' ) 9998 FORMAT( 1X, I7, 2( ' (', G15.6, ',', G15.6, ')' ) ) 9997 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) * * End of ZMMCH. * END LOGICAL FUNCTION LZE( RI, RJ, LR ) * * Tests if two arrays are identical. * * Auxiliary routine for test program for Level 3 Blas. * * -- Written on 8-February-1989. * Jack Dongarra, Argonne National Laboratory. * Iain Duff, AERE Harwell. * Jeremy Du Croz, Numerical Algorithms Group Ltd. * Sven Hammarling, Numerical Algorithms Group Ltd. * * .. Scalar Arguments .. INTEGER LR * .. Array Arguments .. COMPLEX*16 RI( * ), RJ( * ) * .. Local Scalars .. INTEGER I * .. Executable Statements .. DO 10 I = 1, LR IF( RI( I ).NE.RJ( I ) ) $ GO TO 20 10 CONTINUE LZE = .TRUE. GO TO 30 20 CONTINUE LZE = .FALSE. 30 RETURN * * End of LZE. * END LOGICAL FUNCTION LZERES( TYPE, UPLO, M, N, AA, AS, LDA ) * * Tests if selected elements in two arrays are equal. * * TYPE is 'GE' or 'HE' or 'SY'. * * Auxiliary routine for test program for Level 3 Blas. * * -- Written on 8-February-1989. * Jack Dongarra, Argonne National Laboratory. * Iain Duff, AERE Harwell. * Jeremy Du Croz, Numerical Algorithms Group Ltd. * Sven Hammarling, Numerical Algorithms Group Ltd. * * .. Scalar Arguments .. INTEGER LDA, M, N CHARACTER*1 UPLO CHARACTER*2 TYPE * .. Array Arguments .. COMPLEX*16 AA( LDA, * ), AS( LDA, * ) * .. Local Scalars .. INTEGER I, IBEG, IEND, J LOGICAL UPPER * .. Executable Statements .. UPPER = UPLO.EQ.'U' IF( TYPE.EQ.'GE' )THEN DO 20 J = 1, N DO 10 I = M + 1, LDA IF( AA( I, J ).NE.AS( I, J ) ) $ GO TO 70 10 CONTINUE 20 CONTINUE ELSE IF( TYPE.EQ.'HE'.OR.TYPE.EQ.'SY' )THEN DO 50 J = 1, N IF( UPPER )THEN IBEG = 1 IEND = J ELSE IBEG = J IEND = N END IF DO 30 I = 1, IBEG - 1 IF( AA( I, J ).NE.AS( I, J ) ) $ GO TO 70 30 CONTINUE DO 40 I = IEND + 1, LDA IF( AA( I, J ).NE.AS( I, J ) ) $ GO TO 70 40 CONTINUE 50 CONTINUE END IF * LZERES = .TRUE. GO TO 80 70 CONTINUE LZERES = .FALSE. 80 RETURN * * End of LZERES. * END COMPLEX*16 FUNCTION ZBEG( RESET ) * * Generates complex numbers as pairs of random numbers uniformly * distributed between -0.5 and 0.5. * * Auxiliary routine for test program for Level 3 Blas. * * -- Written on 8-February-1989. * Jack Dongarra, Argonne National Laboratory. * Iain Duff, AERE Harwell. * Jeremy Du Croz, Numerical Algorithms Group Ltd. * Sven Hammarling, Numerical Algorithms Group Ltd. * * .. Scalar Arguments .. LOGICAL RESET * .. Local Scalars .. INTEGER I, IC, J, MI, MJ * .. Save statement .. SAVE I, IC, J, MI, MJ * .. Intrinsic Functions .. INTRINSIC DCMPLX * .. Executable Statements .. IF( RESET )THEN * Initialize local variables. MI = 891 MJ = 457 I = 7 J = 7 IC = 0 RESET = .FALSE. END IF * * The sequence of values of I or J is bounded between 1 and 999. * If initial I or J = 1,2,3,6,7 or 9, the period will be 50. * If initial I or J = 4 or 8, the period will be 25. * If initial I or J = 5, the period will be 10. * IC is used to break up the period by skipping 1 value of I or J * in 6. * IC = IC + 1 10 I = I*MI J = J*MJ I = I - 1000*( I/1000 ) J = J - 1000*( J/1000 ) IF( IC.GE.5 )THEN IC = 0 GO TO 10 END IF ZBEG = DCMPLX( ( I - 500 )/1001.0D0, ( J - 500 )/1001.0D0 ) RETURN * * End of ZBEG. * END DOUBLE PRECISION FUNCTION DDIFF( X, Y ) * * Auxiliary routine for test program for Level 3 Blas. * * -- Written on 8-February-1989. * Jack Dongarra, Argonne National Laboratory. * Iain Duff, AERE Harwell. * Jeremy Du Croz, Numerical Algorithms Group Ltd. * Sven Hammarling, Numerical Algorithms Group Ltd. * * .. Scalar Arguments .. DOUBLE PRECISION X, Y * .. Executable Statements .. DDIFF = X - Y RETURN * * End of DDIFF. * END SUBROUTINE CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) * * Tests whether XERBLA has detected an error when it should. * * Auxiliary routine for test program for Level 3 Blas. * * -- Written on 8-February-1989. * Jack Dongarra, Argonne National Laboratory. * Iain Duff, AERE Harwell. * Jeremy Du Croz, Numerical Algorithms Group Ltd. * Sven Hammarling, Numerical Algorithms Group Ltd. * * .. Scalar Arguments .. INTEGER INFOT, NOUT LOGICAL LERR, OK CHARACTER*6 SRNAMT * .. Executable Statements .. IF( .NOT.LERR )THEN WRITE( NOUT, FMT = 9999 )INFOT, SRNAMT OK = .FALSE. END IF LERR = .FALSE. RETURN * 9999 FORMAT( ' ***** ILLEGAL VALUE OF PARAMETER NUMBER ', I2, ' NOT D', $ 'ETECTED BY ', A6, ' *****' ) * * End of CHKXER. * END SUBROUTINE XERBLA( SRNAME, INFO ) * * This is a special version of XERBLA to be used only as part of * the test program for testing error exits from the Level 3 BLAS * routines. * * XERBLA is an error handler for the Level 3 BLAS routines. * * It is called by the Level 3 BLAS routines if an input parameter is * invalid. * * Auxiliary routine for test program for Level 3 Blas. * * -- Written on 8-February-1989. * Jack Dongarra, Argonne National Laboratory. * Iain Duff, AERE Harwell. * Jeremy Du Croz, Numerical Algorithms Group Ltd. * Sven Hammarling, Numerical Algorithms Group Ltd. * * .. Scalar Arguments .. INTEGER INFO CHARACTER*6 SRNAME * .. Scalars in Common .. INTEGER INFOT, NOUT LOGICAL LERR, OK CHARACTER*6 SRNAMT * .. Common blocks .. COMMON /INFOC/INFOT, NOUT, OK, LERR COMMON /SRNAMC/SRNAMT * .. Executable Statements .. LERR = .TRUE. IF( INFO.NE.INFOT )THEN IF( INFOT.NE.0 )THEN WRITE( NOUT, FMT = 9999 )INFO, INFOT ELSE WRITE( NOUT, FMT = 9997 )INFO END IF OK = .FALSE. END IF IF( SRNAME.NE.SRNAMT )THEN WRITE( NOUT, FMT = 9998 )SRNAME, SRNAMT OK = .FALSE. END IF RETURN * 9999 FORMAT( ' ******* XERBLA WAS CALLED WITH INFO = ', I6, ' INSTEAD', $ ' OF ', I2, ' *******' ) 9998 FORMAT( ' ******* XERBLA WAS CALLED WITH SRNAME = ', A6, ' INSTE', $ 'AD OF ', A6, ' *******' ) 9997 FORMAT( ' ******* XERBLA WAS CALLED WITH INFO = ', I6, $ ' *******' ) * * End of XERBLA * END
gpl-2.0
mrlambchop/imx23-kernel
drivers/mtd/ubi/gluebi.c
133
14667
/* * Copyright (c) International Business Machines Corp., 2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author: Artem Bityutskiy (Битюцкий Артём), Joern Engel */ /* * This is a small driver which implements fake MTD devices on top of UBI * volumes. This sounds strange, but it is in fact quite useful to make * MTD-oriented software (including all the legacy software) work on top of * UBI. * * Gluebi emulates MTD devices of "MTD_UBIVOLUME" type. Their minimal I/O unit * size (@mtd->writesize) is equivalent to the UBI minimal I/O unit. The * eraseblock size is equivalent to the logical eraseblock size of the volume. */ #include <linux/err.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/math64.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/mtd/ubi.h> #include <linux/mtd/mtd.h> #include "ubi-media.h" #define err_msg(fmt, ...) \ printk(KERN_DEBUG "gluebi (pid %d): %s: " fmt "\n", \ current->pid, __func__, ##__VA_ARGS__) /** * struct gluebi_device - a gluebi device description data structure. * @mtd: emulated MTD device description object * @refcnt: gluebi device reference count * @desc: UBI volume descriptor * @ubi_num: UBI device number this gluebi device works on * @vol_id: ID of UBI volume this gluebi device works on * @list: link in a list of gluebi devices */ struct gluebi_device { struct mtd_info mtd; int refcnt; struct ubi_volume_desc *desc; int ubi_num; int vol_id; struct list_head list; }; /* List of all gluebi devices */ static LIST_HEAD(gluebi_devices); static DEFINE_MUTEX(devices_mutex); /** * find_gluebi_nolock - find a gluebi device. * @ubi_num: UBI device number * @vol_id: volume ID * * This function seraches for gluebi device corresponding to UBI device * @ubi_num and UBI volume @vol_id. Returns the gluebi device description * object in case of success and %NULL in case of failure. The caller has to * have the &devices_mutex locked. */ static struct gluebi_device *find_gluebi_nolock(int ubi_num, int vol_id) { struct gluebi_device *gluebi; list_for_each_entry(gluebi, &gluebi_devices, list) if (gluebi->ubi_num == ubi_num && gluebi->vol_id == vol_id) return gluebi; return NULL; } /** * gluebi_get_device - get MTD device reference. * @mtd: the MTD device description object * * This function is called every time the MTD device is being opened and * implements the MTD get_device() operation. Returns zero in case of success * and a negative error code in case of failure. */ static int gluebi_get_device(struct mtd_info *mtd) { struct gluebi_device *gluebi; int ubi_mode = UBI_READONLY; if (!try_module_get(THIS_MODULE)) return -ENODEV; if (mtd->flags & MTD_WRITEABLE) ubi_mode = UBI_READWRITE; gluebi = container_of(mtd, struct gluebi_device, mtd); mutex_lock(&devices_mutex); if (gluebi->refcnt > 0) { /* * The MTD device is already referenced and this is just one * more reference. MTD allows many users to open the same * volume simultaneously and do not distinguish between * readers/writers/exclusive openers as UBI does. So we do not * open the UBI volume again - just increase the reference * counter and return. */ gluebi->refcnt += 1; mutex_unlock(&devices_mutex); return 0; } /* * This is the first reference to this UBI volume via the MTD device * interface. Open the corresponding volume in read-write mode. */ gluebi->desc = ubi_open_volume(gluebi->ubi_num, gluebi->vol_id, ubi_mode); if (IS_ERR(gluebi->desc)) { mutex_unlock(&devices_mutex); module_put(THIS_MODULE); return PTR_ERR(gluebi->desc); } gluebi->refcnt += 1; mutex_unlock(&devices_mutex); return 0; } /** * gluebi_put_device - put MTD device reference. * @mtd: the MTD device description object * * This function is called every time the MTD device is being put. Returns * zero in case of success and a negative error code in case of failure. */ static void gluebi_put_device(struct mtd_info *mtd) { struct gluebi_device *gluebi; gluebi = container_of(mtd, struct gluebi_device, mtd); mutex_lock(&devices_mutex); gluebi->refcnt -= 1; if (gluebi->refcnt == 0) ubi_close_volume(gluebi->desc); module_put(THIS_MODULE); mutex_unlock(&devices_mutex); } /** * gluebi_read - read operation of emulated MTD devices. * @mtd: MTD device description object * @from: absolute offset from where to read * @len: how many bytes to read * @retlen: count of read bytes is returned here * @buf: buffer to store the read data * * This function returns zero in case of success and a negative error code in * case of failure. */ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, unsigned char *buf) { int err = 0, lnum, offs, total_read; struct gluebi_device *gluebi; gluebi = container_of(mtd, struct gluebi_device, mtd); lnum = div_u64_rem(from, mtd->erasesize, &offs); total_read = len; while (total_read) { size_t to_read = mtd->erasesize - offs; if (to_read > total_read) to_read = total_read; err = ubi_read(gluebi->desc, lnum, buf, offs, to_read); if (err) break; lnum += 1; offs = 0; total_read -= to_read; buf += to_read; } *retlen = len - total_read; return err; } /** * gluebi_write - write operation of emulated MTD devices. * @mtd: MTD device description object * @to: absolute offset where to write * @len: how many bytes to write * @retlen: count of written bytes is returned here * @buf: buffer with data to write * * This function returns zero in case of success and a negative error code in * case of failure. */ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { int err = 0, lnum, offs, total_written; struct gluebi_device *gluebi; gluebi = container_of(mtd, struct gluebi_device, mtd); lnum = div_u64_rem(to, mtd->erasesize, &offs); if (len % mtd->writesize || offs % mtd->writesize) return -EINVAL; total_written = len; while (total_written) { size_t to_write = mtd->erasesize - offs; if (to_write > total_written) to_write = total_written; err = ubi_leb_write(gluebi->desc, lnum, buf, offs, to_write); if (err) break; lnum += 1; offs = 0; total_written -= to_write; buf += to_write; } *retlen = len - total_written; return err; } /** * gluebi_erase - erase operation of emulated MTD devices. * @mtd: the MTD device description object * @instr: the erase operation description * * This function calls the erase callback when finishes. Returns zero in case * of success and a negative error code in case of failure. */ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr) { int err, i, lnum, count; struct gluebi_device *gluebi; if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd)) return -EINVAL; lnum = mtd_div_by_eb(instr->addr, mtd); count = mtd_div_by_eb(instr->len, mtd); gluebi = container_of(mtd, struct gluebi_device, mtd); for (i = 0; i < count - 1; i++) { err = ubi_leb_unmap(gluebi->desc, lnum + i); if (err) goto out_err; } /* * MTD erase operations are synchronous, so we have to make sure the * physical eraseblock is wiped out. * * Thus, perform leb_erase instead of leb_unmap operation - leb_erase * will wait for the end of operations */ err = ubi_leb_erase(gluebi->desc, lnum + i); if (err) goto out_err; instr->state = MTD_ERASE_DONE; mtd_erase_callback(instr); return 0; out_err: instr->state = MTD_ERASE_FAILED; instr->fail_addr = (long long)lnum * mtd->erasesize; return err; } /** * gluebi_create - create a gluebi device for an UBI volume. * @di: UBI device description object * @vi: UBI volume description object * * This function is called when a new UBI volume is created in order to create * corresponding fake MTD device. Returns zero in case of success and a * negative error code in case of failure. */ static int gluebi_create(struct ubi_device_info *di, struct ubi_volume_info *vi) { struct gluebi_device *gluebi, *g; struct mtd_info *mtd; gluebi = kzalloc(sizeof(struct gluebi_device), GFP_KERNEL); if (!gluebi) return -ENOMEM; mtd = &gluebi->mtd; mtd->name = kmemdup(vi->name, vi->name_len + 1, GFP_KERNEL); if (!mtd->name) { kfree(gluebi); return -ENOMEM; } gluebi->vol_id = vi->vol_id; gluebi->ubi_num = vi->ubi_num; mtd->type = MTD_UBIVOLUME; if (!di->ro_mode) mtd->flags = MTD_WRITEABLE; mtd->owner = THIS_MODULE; mtd->writesize = di->min_io_size; mtd->erasesize = vi->usable_leb_size; mtd->_read = gluebi_read; mtd->_write = gluebi_write; mtd->_erase = gluebi_erase; mtd->_get_device = gluebi_get_device; mtd->_put_device = gluebi_put_device; /* * In case of dynamic a volume, MTD device size is just volume size. In * case of a static volume the size is equivalent to the amount of data * bytes. */ if (vi->vol_type == UBI_DYNAMIC_VOLUME) mtd->size = (unsigned long long)vi->usable_leb_size * vi->size; else mtd->size = vi->used_bytes; /* Just a sanity check - make sure this gluebi device does not exist */ mutex_lock(&devices_mutex); g = find_gluebi_nolock(vi->ubi_num, vi->vol_id); if (g) err_msg("gluebi MTD device %d form UBI device %d volume %d " "already exists", g->mtd.index, vi->ubi_num, vi->vol_id); mutex_unlock(&devices_mutex); if (mtd_device_register(mtd, NULL, 0)) { err_msg("cannot add MTD device"); kfree(mtd->name); kfree(gluebi); return -ENFILE; } mutex_lock(&devices_mutex); list_add_tail(&gluebi->list, &gluebi_devices); mutex_unlock(&devices_mutex); return 0; } /** * gluebi_remove - remove a gluebi device. * @vi: UBI volume description object * * This function is called when an UBI volume is removed and it removes * corresponding fake MTD device. Returns zero in case of success and a * negative error code in case of failure. */ static int gluebi_remove(struct ubi_volume_info *vi) { int err = 0; struct mtd_info *mtd; struct gluebi_device *gluebi; mutex_lock(&devices_mutex); gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); if (!gluebi) { err_msg("got remove notification for unknown UBI device %d " "volume %d", vi->ubi_num, vi->vol_id); err = -ENOENT; } else if (gluebi->refcnt) err = -EBUSY; else list_del(&gluebi->list); mutex_unlock(&devices_mutex); if (err) return err; mtd = &gluebi->mtd; err = mtd_device_unregister(mtd); if (err) { err_msg("cannot remove fake MTD device %d, UBI device %d, " "volume %d, error %d", mtd->index, gluebi->ubi_num, gluebi->vol_id, err); mutex_lock(&devices_mutex); list_add_tail(&gluebi->list, &gluebi_devices); mutex_unlock(&devices_mutex); return err; } kfree(mtd->name); kfree(gluebi); return 0; } /** * gluebi_updated - UBI volume was updated notifier. * @vi: volume info structure * * This function is called every time an UBI volume is updated. It does nothing * if te volume @vol is dynamic, and changes MTD device size if the * volume is static. This is needed because static volumes cannot be read past * data they contain. This function returns zero in case of success and a * negative error code in case of error. */ static int gluebi_updated(struct ubi_volume_info *vi) { struct gluebi_device *gluebi; mutex_lock(&devices_mutex); gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); if (!gluebi) { mutex_unlock(&devices_mutex); err_msg("got update notification for unknown UBI device %d " "volume %d", vi->ubi_num, vi->vol_id); return -ENOENT; } if (vi->vol_type == UBI_STATIC_VOLUME) gluebi->mtd.size = vi->used_bytes; mutex_unlock(&devices_mutex); return 0; } /** * gluebi_resized - UBI volume was re-sized notifier. * @vi: volume info structure * * This function is called every time an UBI volume is re-size. It changes the * corresponding fake MTD device size. This function returns zero in case of * success and a negative error code in case of error. */ static int gluebi_resized(struct ubi_volume_info *vi) { struct gluebi_device *gluebi; mutex_lock(&devices_mutex); gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); if (!gluebi) { mutex_unlock(&devices_mutex); err_msg("got update notification for unknown UBI device %d " "volume %d", vi->ubi_num, vi->vol_id); return -ENOENT; } gluebi->mtd.size = vi->used_bytes; mutex_unlock(&devices_mutex); return 0; } /** * gluebi_notify - UBI notification handler. * @nb: registered notifier block * @l: notification type * @ptr: pointer to the &struct ubi_notification object */ static int gluebi_notify(struct notifier_block *nb, unsigned long l, void *ns_ptr) { struct ubi_notification *nt = ns_ptr; switch (l) { case UBI_VOLUME_ADDED: gluebi_create(&nt->di, &nt->vi); break; case UBI_VOLUME_REMOVED: gluebi_remove(&nt->vi); break; case UBI_VOLUME_RESIZED: gluebi_resized(&nt->vi); break; case UBI_VOLUME_UPDATED: gluebi_updated(&nt->vi); break; default: break; } return NOTIFY_OK; } static struct notifier_block gluebi_notifier = { .notifier_call = gluebi_notify, }; static int __init ubi_gluebi_init(void) { return ubi_register_volume_notifier(&gluebi_notifier, 0); } static void __exit ubi_gluebi_exit(void) { struct gluebi_device *gluebi, *g; list_for_each_entry_safe(gluebi, g, &gluebi_devices, list) { int err; struct mtd_info *mtd = &gluebi->mtd; err = mtd_device_unregister(mtd); if (err) err_msg("error %d while removing gluebi MTD device %d, " "UBI device %d, volume %d - ignoring", err, mtd->index, gluebi->ubi_num, gluebi->vol_id); kfree(mtd->name); kfree(gluebi); } ubi_unregister_volume_notifier(&gluebi_notifier); } module_init(ubi_gluebi_init); module_exit(ubi_gluebi_exit); MODULE_DESCRIPTION("MTD emulation layer over UBI volumes"); MODULE_AUTHOR("Artem Bityutskiy, Joern Engel"); MODULE_LICENSE("GPL");
gpl-2.0
flyfire/linux
drivers/net/ethernet/ibm/emac/phy.c
645
12814
/* * drivers/net/ibm_newemac/phy.c * * Driver for PowerPC 4xx on-chip ethernet controller, PHY support. * Borrowed from sungem_phy.c, though I only kept the generic MII * driver for now. * * This file should be shared with other drivers or eventually * merged as the "low level" part of miilib * * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * Based on the arch/ppc version of the driver: * * (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org) * (c) 2004-2005, Eugene Surovegin <ebs@ebshome.net> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/netdevice.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/delay.h> #include "emac.h" #include "phy.h" #define phy_read _phy_read #define phy_write _phy_write static inline int _phy_read(struct mii_phy *phy, int reg) { return phy->mdio_read(phy->dev, phy->address, reg); } static inline void _phy_write(struct mii_phy *phy, int reg, int val) { phy->mdio_write(phy->dev, phy->address, reg, val); } static inline int gpcs_phy_read(struct mii_phy *phy, int reg) { return phy->mdio_read(phy->dev, phy->gpcs_address, reg); } static inline void gpcs_phy_write(struct mii_phy *phy, int reg, int val) { phy->mdio_write(phy->dev, phy->gpcs_address, reg, val); } int emac_mii_reset_phy(struct mii_phy *phy) { int val; int limit = 10000; val = phy_read(phy, MII_BMCR); val &= ~(BMCR_ISOLATE | BMCR_ANENABLE); val |= BMCR_RESET; phy_write(phy, MII_BMCR, val); udelay(300); while (--limit) { val = phy_read(phy, MII_BMCR); if (val >= 0 && (val & BMCR_RESET) == 0) break; udelay(10); } if ((val & BMCR_ISOLATE) && limit > 0) phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE); return limit <= 0; } int emac_mii_reset_gpcs(struct mii_phy *phy) { int val; int limit = 10000; val = gpcs_phy_read(phy, MII_BMCR); val &= ~(BMCR_ISOLATE | BMCR_ANENABLE); val |= BMCR_RESET; gpcs_phy_write(phy, MII_BMCR, val); udelay(300); while (--limit) { val = gpcs_phy_read(phy, MII_BMCR); if (val >= 0 && (val & BMCR_RESET) == 0) break; udelay(10); } if ((val & BMCR_ISOLATE) && limit > 0) gpcs_phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE); if (limit > 0 && phy->mode == PHY_MODE_SGMII) { /* Configure GPCS interface to recommended setting for SGMII */ gpcs_phy_write(phy, 0x04, 0x8120); /* AsymPause, FDX */ gpcs_phy_write(phy, 0x07, 0x2801); /* msg_pg, toggle */ gpcs_phy_write(phy, 0x00, 0x0140); /* 1Gbps, FDX */ } return limit <= 0; } static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) { int ctl, adv; phy->autoneg = AUTONEG_ENABLE; phy->speed = SPEED_10; phy->duplex = DUPLEX_HALF; phy->pause = phy->asym_pause = 0; phy->advertising = advertise; ctl = phy_read(phy, MII_BMCR); if (ctl < 0) return ctl; ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); /* First clear the PHY */ phy_write(phy, MII_BMCR, ctl); /* Setup standard advertise */ adv = phy_read(phy, MII_ADVERTISE); if (adv < 0) return adv; adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); if (advertise & ADVERTISED_10baseT_Half) adv |= ADVERTISE_10HALF; if (advertise & ADVERTISED_10baseT_Full) adv |= ADVERTISE_10FULL; if (advertise & ADVERTISED_100baseT_Half) adv |= ADVERTISE_100HALF; if (advertise & ADVERTISED_100baseT_Full) adv |= ADVERTISE_100FULL; if (advertise & ADVERTISED_Pause) adv |= ADVERTISE_PAUSE_CAP; if (advertise & ADVERTISED_Asym_Pause) adv |= ADVERTISE_PAUSE_ASYM; phy_write(phy, MII_ADVERTISE, adv); if (phy->features & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) { adv = phy_read(phy, MII_CTRL1000); if (adv < 0) return adv; adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); if (advertise & ADVERTISED_1000baseT_Full) adv |= ADVERTISE_1000FULL; if (advertise & ADVERTISED_1000baseT_Half) adv |= ADVERTISE_1000HALF; phy_write(phy, MII_CTRL1000, adv); } /* Start/Restart aneg */ ctl = phy_read(phy, MII_BMCR); ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); phy_write(phy, MII_BMCR, ctl); return 0; } static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) { int ctl; phy->autoneg = AUTONEG_DISABLE; phy->speed = speed; phy->duplex = fd; phy->pause = phy->asym_pause = 0; ctl = phy_read(phy, MII_BMCR); if (ctl < 0) return ctl; ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); /* First clear the PHY */ phy_write(phy, MII_BMCR, ctl | BMCR_RESET); /* Select speed & duplex */ switch (speed) { case SPEED_10: break; case SPEED_100: ctl |= BMCR_SPEED100; break; case SPEED_1000: ctl |= BMCR_SPEED1000; break; default: return -EINVAL; } if (fd == DUPLEX_FULL) ctl |= BMCR_FULLDPLX; phy_write(phy, MII_BMCR, ctl); return 0; } static int genmii_poll_link(struct mii_phy *phy) { int status; /* Clear latched value with dummy read */ phy_read(phy, MII_BMSR); status = phy_read(phy, MII_BMSR); if (status < 0 || (status & BMSR_LSTATUS) == 0) return 0; if (phy->autoneg == AUTONEG_ENABLE && !(status & BMSR_ANEGCOMPLETE)) return 0; return 1; } static int genmii_read_link(struct mii_phy *phy) { if (phy->autoneg == AUTONEG_ENABLE) { int glpa = 0; int lpa = phy_read(phy, MII_LPA) & phy_read(phy, MII_ADVERTISE); if (lpa < 0) return lpa; if (phy->features & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) { int adv = phy_read(phy, MII_CTRL1000); glpa = phy_read(phy, MII_STAT1000); if (glpa < 0 || adv < 0) return adv; glpa &= adv << 2; } phy->speed = SPEED_10; phy->duplex = DUPLEX_HALF; phy->pause = phy->asym_pause = 0; if (glpa & (LPA_1000FULL | LPA_1000HALF)) { phy->speed = SPEED_1000; if (glpa & LPA_1000FULL) phy->duplex = DUPLEX_FULL; } else if (lpa & (LPA_100FULL | LPA_100HALF)) { phy->speed = SPEED_100; if (lpa & LPA_100FULL) phy->duplex = DUPLEX_FULL; } else if (lpa & LPA_10FULL) phy->duplex = DUPLEX_FULL; if (phy->duplex == DUPLEX_FULL) { phy->pause = lpa & LPA_PAUSE_CAP ? 1 : 0; phy->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0; } } else { int bmcr = phy_read(phy, MII_BMCR); if (bmcr < 0) return bmcr; if (bmcr & BMCR_FULLDPLX) phy->duplex = DUPLEX_FULL; else phy->duplex = DUPLEX_HALF; if (bmcr & BMCR_SPEED1000) phy->speed = SPEED_1000; else if (bmcr & BMCR_SPEED100) phy->speed = SPEED_100; else phy->speed = SPEED_10; phy->pause = phy->asym_pause = 0; } return 0; } /* Generic implementation for most 10/100/1000 PHYs */ static struct mii_phy_ops generic_phy_ops = { .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, .poll_link = genmii_poll_link, .read_link = genmii_read_link }; static struct mii_phy_def genmii_phy_def = { .phy_id = 0x00000000, .phy_id_mask = 0x00000000, .name = "Generic MII", .ops = &generic_phy_ops }; /* CIS8201 */ #define MII_CIS8201_10BTCSR 0x16 #define TENBTCSR_ECHO_DISABLE 0x2000 #define MII_CIS8201_EPCR 0x17 #define EPCR_MODE_MASK 0x3000 #define EPCR_GMII_MODE 0x0000 #define EPCR_RGMII_MODE 0x1000 #define EPCR_TBI_MODE 0x2000 #define EPCR_RTBI_MODE 0x3000 #define MII_CIS8201_ACSR 0x1c #define ACSR_PIN_PRIO_SELECT 0x0004 static int cis8201_init(struct mii_phy *phy) { int epcr; epcr = phy_read(phy, MII_CIS8201_EPCR); if (epcr < 0) return epcr; epcr &= ~EPCR_MODE_MASK; switch (phy->mode) { case PHY_MODE_TBI: epcr |= EPCR_TBI_MODE; break; case PHY_MODE_RTBI: epcr |= EPCR_RTBI_MODE; break; case PHY_MODE_GMII: epcr |= EPCR_GMII_MODE; break; case PHY_MODE_RGMII: default: epcr |= EPCR_RGMII_MODE; } phy_write(phy, MII_CIS8201_EPCR, epcr); /* MII regs override strap pins */ phy_write(phy, MII_CIS8201_ACSR, phy_read(phy, MII_CIS8201_ACSR) | ACSR_PIN_PRIO_SELECT); /* Disable TX_EN -> CRS echo mode, otherwise 10/HDX doesn't work */ phy_write(phy, MII_CIS8201_10BTCSR, phy_read(phy, MII_CIS8201_10BTCSR) | TENBTCSR_ECHO_DISABLE); return 0; } static struct mii_phy_ops cis8201_phy_ops = { .init = cis8201_init, .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, .poll_link = genmii_poll_link, .read_link = genmii_read_link }; static struct mii_phy_def cis8201_phy_def = { .phy_id = 0x000fc410, .phy_id_mask = 0x000ffff0, .name = "CIS8201 Gigabit Ethernet", .ops = &cis8201_phy_ops }; static struct mii_phy_def bcm5248_phy_def = { .phy_id = 0x0143bc00, .phy_id_mask = 0x0ffffff0, .name = "BCM5248 10/100 SMII Ethernet", .ops = &generic_phy_ops }; static int m88e1111_init(struct mii_phy *phy) { pr_debug("%s: Marvell 88E1111 Ethernet\n", __func__); phy_write(phy, 0x14, 0x0ce3); phy_write(phy, 0x18, 0x4101); phy_write(phy, 0x09, 0x0e00); phy_write(phy, 0x04, 0x01e1); phy_write(phy, 0x00, 0x9140); phy_write(phy, 0x00, 0x1140); return 0; } static int m88e1112_init(struct mii_phy *phy) { /* * Marvell 88E1112 PHY needs to have the SGMII MAC * interace (page 2) properly configured to * communicate with the 460EX/GT GPCS interface. */ u16 reg_short; pr_debug("%s: Marvell 88E1112 Ethernet\n", __func__); /* Set access to Page 2 */ phy_write(phy, 0x16, 0x0002); phy_write(phy, 0x00, 0x0040); /* 1Gbps */ reg_short = (u16)(phy_read(phy, 0x1a)); reg_short |= 0x8000; /* bypass Auto-Negotiation */ phy_write(phy, 0x1a, reg_short); emac_mii_reset_phy(phy); /* reset MAC interface */ /* Reset access to Page 0 */ phy_write(phy, 0x16, 0x0000); return 0; } static int et1011c_init(struct mii_phy *phy) { u16 reg_short; reg_short = (u16)(phy_read(phy, 0x16)); reg_short &= ~(0x7); reg_short |= 0x6; /* RGMII Trace Delay*/ phy_write(phy, 0x16, reg_short); reg_short = (u16)(phy_read(phy, 0x17)); reg_short &= ~(0x40); phy_write(phy, 0x17, reg_short); phy_write(phy, 0x1c, 0x74f0); return 0; } static struct mii_phy_ops et1011c_phy_ops = { .init = et1011c_init, .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, .poll_link = genmii_poll_link, .read_link = genmii_read_link }; static struct mii_phy_def et1011c_phy_def = { .phy_id = 0x0282f000, .phy_id_mask = 0x0fffff00, .name = "ET1011C Gigabit Ethernet", .ops = &et1011c_phy_ops }; static struct mii_phy_ops m88e1111_phy_ops = { .init = m88e1111_init, .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, .poll_link = genmii_poll_link, .read_link = genmii_read_link }; static struct mii_phy_def m88e1111_phy_def = { .phy_id = 0x01410CC0, .phy_id_mask = 0x0ffffff0, .name = "Marvell 88E1111 Ethernet", .ops = &m88e1111_phy_ops, }; static struct mii_phy_ops m88e1112_phy_ops = { .init = m88e1112_init, .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, .poll_link = genmii_poll_link, .read_link = genmii_read_link }; static struct mii_phy_def m88e1112_phy_def = { .phy_id = 0x01410C90, .phy_id_mask = 0x0ffffff0, .name = "Marvell 88E1112 Ethernet", .ops = &m88e1112_phy_ops, }; static struct mii_phy_def *mii_phy_table[] = { &et1011c_phy_def, &cis8201_phy_def, &bcm5248_phy_def, &m88e1111_phy_def, &m88e1112_phy_def, &genmii_phy_def, NULL }; int emac_mii_phy_probe(struct mii_phy *phy, int address) { struct mii_phy_def *def; int i; u32 id; phy->autoneg = AUTONEG_DISABLE; phy->advertising = 0; phy->address = address; phy->speed = SPEED_10; phy->duplex = DUPLEX_HALF; phy->pause = phy->asym_pause = 0; /* Take PHY out of isolate mode and reset it. */ if (emac_mii_reset_phy(phy)) return -ENODEV; /* Read ID and find matching entry */ id = (phy_read(phy, MII_PHYSID1) << 16) | phy_read(phy, MII_PHYSID2); for (i = 0; (def = mii_phy_table[i]) != NULL; i++) if ((id & def->phy_id_mask) == def->phy_id) break; /* Should never be NULL (we have a generic entry), but... */ if (!def) return -ENODEV; phy->def = def; /* Determine PHY features if needed */ phy->features = def->features; if (!phy->features) { u16 bmsr = phy_read(phy, MII_BMSR); if (bmsr & BMSR_ANEGCAPABLE) phy->features |= SUPPORTED_Autoneg; if (bmsr & BMSR_10HALF) phy->features |= SUPPORTED_10baseT_Half; if (bmsr & BMSR_10FULL) phy->features |= SUPPORTED_10baseT_Full; if (bmsr & BMSR_100HALF) phy->features |= SUPPORTED_100baseT_Half; if (bmsr & BMSR_100FULL) phy->features |= SUPPORTED_100baseT_Full; if (bmsr & BMSR_ESTATEN) { u16 esr = phy_read(phy, MII_ESTATUS); if (esr & ESTATUS_1000_TFULL) phy->features |= SUPPORTED_1000baseT_Full; if (esr & ESTATUS_1000_THALF) phy->features |= SUPPORTED_1000baseT_Half; } phy->features |= SUPPORTED_MII; } /* Setup default advertising */ phy->advertising = phy->features; return 0; } MODULE_LICENSE("GPL");
gpl-2.0
onejay09/runnymede-kitkat_3.0.101-wip
net/xfrm/xfrm_replay.c
645
13433
/* * xfrm_replay.c - xfrm replay detection, derived from xfrm_state.c. * * Copyright (C) 2010 secunet Security Networks AG * Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <net/xfrm.h> u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq) { u32 seq, seq_hi, bottom; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; if (!(x->props.flags & XFRM_STATE_ESN)) return 0; seq = ntohl(net_seq); seq_hi = replay_esn->seq_hi; bottom = replay_esn->seq - replay_esn->replay_window + 1; if (likely(replay_esn->seq >= replay_esn->replay_window - 1)) { /* A. same subspace */ if (unlikely(seq < bottom)) seq_hi++; } else { /* B. window spans two subspaces */ if (unlikely(seq >= bottom)) seq_hi--; } return seq_hi; } static void xfrm_replay_notify(struct xfrm_state *x, int event) { struct km_event c; /* we send notify messages in case * 1. we updated on of the sequence numbers, and the seqno difference * is at least x->replay_maxdiff, in this case we also update the * timeout of our timer function * 2. if x->replay_maxage has elapsed since last update, * and there were changes * * The state structure must be locked! */ switch (event) { case XFRM_REPLAY_UPDATE: if (x->replay_maxdiff && (x->replay.seq - x->preplay.seq < x->replay_maxdiff) && (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) { if (x->xflags & XFRM_TIME_DEFER) event = XFRM_REPLAY_TIMEOUT; else return; } break; case XFRM_REPLAY_TIMEOUT: if (memcmp(&x->replay, &x->preplay, sizeof(struct xfrm_replay_state)) == 0) { x->xflags |= XFRM_TIME_DEFER; return; } break; } memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state)); c.event = XFRM_MSG_NEWAE; c.data.aevent = event; km_state_notify(x, &c); if (x->replay_maxage && !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) x->xflags &= ~XFRM_TIME_DEFER; } static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb) { int err = 0; struct net *net = xs_net(x); if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq; if (unlikely(x->replay.oseq == 0)) { x->replay.oseq--; xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; return err; } if (xfrm_aevent_is_on(net)) x->repl->notify(x, XFRM_REPLAY_UPDATE); } return err; } static int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq) { u32 diff; u32 seq = ntohl(net_seq); if (!x->props.replay_window) return 0; if (unlikely(seq == 0)) goto err; if (likely(seq > x->replay.seq)) return 0; diff = x->replay.seq - seq; if (diff >= min_t(unsigned int, x->props.replay_window, sizeof(x->replay.bitmap) * 8)) { x->stats.replay_window++; goto err; } if (x->replay.bitmap & (1U << diff)) { x->stats.replay++; goto err; } return 0; err: xfrm_audit_state_replay(x, skb, net_seq); return -EINVAL; } static void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq) { u32 diff; u32 seq = ntohl(net_seq); if (!x->props.replay_window) return; if (seq > x->replay.seq) { diff = seq - x->replay.seq; if (diff < x->props.replay_window) x->replay.bitmap = ((x->replay.bitmap) << diff) | 1; else x->replay.bitmap = 1; x->replay.seq = seq; } else { diff = x->replay.seq - seq; x->replay.bitmap |= (1U << diff); } if (xfrm_aevent_is_on(xs_net(x))) x->repl->notify(x, XFRM_REPLAY_UPDATE); } static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb) { int err = 0; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; struct net *net = xs_net(x); if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq; if (unlikely(replay_esn->oseq == 0)) { replay_esn->oseq--; xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; return err; } if (xfrm_aevent_is_on(net)) x->repl->notify(x, XFRM_REPLAY_UPDATE); } return err; } static int xfrm_replay_check_bmp(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq) { unsigned int bitnr, nr; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; u32 pos; u32 seq = ntohl(net_seq); u32 diff = replay_esn->seq - seq; if (!replay_esn->replay_window) return 0; pos = (replay_esn->seq - 1) % replay_esn->replay_window; if (unlikely(seq == 0)) goto err; if (likely(seq > replay_esn->seq)) return 0; if (diff >= replay_esn->replay_window) { x->stats.replay_window++; goto err; } if (pos >= diff) { bitnr = (pos - diff) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; if (replay_esn->bmp[nr] & (1U << bitnr)) goto err_replay; } else { bitnr = replay_esn->replay_window - (diff - pos); nr = bitnr >> 5; bitnr = bitnr & 0x1F; if (replay_esn->bmp[nr] & (1U << bitnr)) goto err_replay; } return 0; err_replay: x->stats.replay++; err: xfrm_audit_state_replay(x, skb, net_seq); return -EINVAL; } static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq) { unsigned int bitnr, nr, i; u32 diff; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; u32 seq = ntohl(net_seq); u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window; if (!replay_esn->replay_window) return; if (seq > replay_esn->seq) { diff = seq - replay_esn->seq; if (diff < replay_esn->replay_window) { for (i = 1; i < diff; i++) { bitnr = (pos + i) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] &= ~(1U << bitnr); } bitnr = (pos + diff) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] |= (1U << bitnr); } else { nr = (replay_esn->replay_window - 1) >> 5; for (i = 0; i <= nr; i++) replay_esn->bmp[i] = 0; bitnr = (pos + diff) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] |= (1U << bitnr); } replay_esn->seq = seq; } else { diff = replay_esn->seq - seq; if (pos >= diff) { bitnr = (pos - diff) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] |= (1U << bitnr); } else { bitnr = replay_esn->replay_window - (diff - pos); nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] |= (1U << bitnr); } } if (xfrm_aevent_is_on(xs_net(x))) x->repl->notify(x, XFRM_REPLAY_UPDATE); } static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event) { struct km_event c; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn; /* we send notify messages in case * 1. we updated on of the sequence numbers, and the seqno difference * is at least x->replay_maxdiff, in this case we also update the * timeout of our timer function * 2. if x->replay_maxage has elapsed since last update, * and there were changes * * The state structure must be locked! */ switch (event) { case XFRM_REPLAY_UPDATE: if (x->replay_maxdiff && (replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) && (replay_esn->oseq - preplay_esn->oseq < x->replay_maxdiff)) { if (x->xflags & XFRM_TIME_DEFER) event = XFRM_REPLAY_TIMEOUT; else return; } break; case XFRM_REPLAY_TIMEOUT: if (memcmp(x->replay_esn, x->preplay_esn, xfrm_replay_state_esn_len(replay_esn)) == 0) { x->xflags |= XFRM_TIME_DEFER; return; } break; } memcpy(x->preplay_esn, x->replay_esn, xfrm_replay_state_esn_len(replay_esn)); c.event = XFRM_MSG_NEWAE; c.data.aevent = event; km_state_notify(x, &c); if (x->replay_maxage && !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) x->xflags &= ~XFRM_TIME_DEFER; } static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb) { int err = 0; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; struct net *net = xs_net(x); if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq; XFRM_SKB_CB(skb)->seq.output.hi = replay_esn->oseq_hi; if (unlikely(replay_esn->oseq == 0)) { XFRM_SKB_CB(skb)->seq.output.hi = ++replay_esn->oseq_hi; if (replay_esn->oseq_hi == 0) { replay_esn->oseq--; replay_esn->oseq_hi--; xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; return err; } } if (xfrm_aevent_is_on(net)) x->repl->notify(x, XFRM_REPLAY_UPDATE); } return err; } static int xfrm_replay_check_esn(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq) { unsigned int bitnr, nr; u32 diff; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; u32 pos; u32 seq = ntohl(net_seq); u32 wsize = replay_esn->replay_window; u32 top = replay_esn->seq; u32 bottom = top - wsize + 1; if (!wsize) return 0; pos = (replay_esn->seq - 1) % replay_esn->replay_window; if (unlikely(seq == 0 && replay_esn->seq_hi == 0 && (replay_esn->seq < replay_esn->replay_window - 1))) goto err; diff = top - seq; if (likely(top >= wsize - 1)) { /* A. same subspace */ if (likely(seq > top) || seq < bottom) return 0; } else { /* B. window spans two subspaces */ if (likely(seq > top && seq < bottom)) return 0; if (seq >= bottom) diff = ~seq + top + 1; } if (diff >= replay_esn->replay_window) { x->stats.replay_window++; goto err; } if (pos >= diff) { bitnr = (pos - diff) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; if (replay_esn->bmp[nr] & (1U << bitnr)) goto err_replay; } else { bitnr = replay_esn->replay_window - (diff - pos); nr = bitnr >> 5; bitnr = bitnr & 0x1F; if (replay_esn->bmp[nr] & (1U << bitnr)) goto err_replay; } return 0; err_replay: x->stats.replay++; err: xfrm_audit_state_replay(x, skb, net_seq); return -EINVAL; } static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq) { unsigned int bitnr, nr, i; int wrap; u32 diff, pos, seq, seq_hi; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; if (!replay_esn->replay_window) return; seq = ntohl(net_seq); pos = (replay_esn->seq - 1) % replay_esn->replay_window; seq_hi = xfrm_replay_seqhi(x, net_seq); wrap = seq_hi - replay_esn->seq_hi; if ((!wrap && seq > replay_esn->seq) || wrap > 0) { if (likely(!wrap)) diff = seq - replay_esn->seq; else diff = ~replay_esn->seq + seq + 1; if (diff < replay_esn->replay_window) { for (i = 1; i < diff; i++) { bitnr = (pos + i) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] &= ~(1U << bitnr); } bitnr = (pos + diff) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] |= (1U << bitnr); } else { nr = (replay_esn->replay_window - 1) >> 5; for (i = 0; i <= nr; i++) replay_esn->bmp[i] = 0; bitnr = (pos + diff) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] |= (1U << bitnr); } replay_esn->seq = seq; if (unlikely(wrap > 0)) replay_esn->seq_hi++; } else { diff = replay_esn->seq - seq; if (pos >= diff) { bitnr = (pos - diff) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] |= (1U << bitnr); } else { bitnr = replay_esn->replay_window - (diff - pos); nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] |= (1U << bitnr); } } if (xfrm_aevent_is_on(xs_net(x))) x->repl->notify(x, XFRM_REPLAY_UPDATE); } static struct xfrm_replay xfrm_replay_legacy = { .advance = xfrm_replay_advance, .check = xfrm_replay_check, .notify = xfrm_replay_notify, .overflow = xfrm_replay_overflow, }; static struct xfrm_replay xfrm_replay_bmp = { .advance = xfrm_replay_advance_bmp, .check = xfrm_replay_check_bmp, .notify = xfrm_replay_notify_bmp, .overflow = xfrm_replay_overflow_bmp, }; static struct xfrm_replay xfrm_replay_esn = { .advance = xfrm_replay_advance_esn, .check = xfrm_replay_check_esn, .notify = xfrm_replay_notify_bmp, .overflow = xfrm_replay_overflow_esn, }; int xfrm_init_replay(struct xfrm_state *x) { struct xfrm_replay_state_esn *replay_esn = x->replay_esn; if (replay_esn) { if (replay_esn->replay_window > replay_esn->bmp_len * sizeof(__u32) * 8) return -EINVAL; if ((x->props.flags & XFRM_STATE_ESN) && replay_esn->replay_window == 0) return -EINVAL; if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn) x->repl = &xfrm_replay_esn; else x->repl = &xfrm_replay_bmp; } else x->repl = &xfrm_replay_legacy; return 0; } EXPORT_SYMBOL(xfrm_init_replay);
gpl-2.0
Shaaan/android_kernel_samsung_u8500-common
drivers/video/da8xx-fb.c
901
29292
/* * Copyright (C) 2008-2009 MontaVista Software Inc. * Copyright (C) 2008-2009 Texas Instruments Inc * * Based on the LCD driver for TI Avalanche processors written by * Ajay Singh and Shalom Hai. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option)any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/fb.h> #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/uaccess.h> #include <linux/interrupt.h> #include <linux/clk.h> #include <linux/cpufreq.h> #include <linux/console.h> #include <linux/slab.h> #include <video/da8xx-fb.h> #define DRIVER_NAME "da8xx_lcdc" /* LCD Status Register */ #define LCD_END_OF_FRAME1 BIT(9) #define LCD_END_OF_FRAME0 BIT(8) #define LCD_PL_LOAD_DONE BIT(6) #define LCD_FIFO_UNDERFLOW BIT(5) #define LCD_SYNC_LOST BIT(2) /* LCD DMA Control Register */ #define LCD_DMA_BURST_SIZE(x) ((x) << 4) #define LCD_DMA_BURST_1 0x0 #define LCD_DMA_BURST_2 0x1 #define LCD_DMA_BURST_4 0x2 #define LCD_DMA_BURST_8 0x3 #define LCD_DMA_BURST_16 0x4 #define LCD_END_OF_FRAME_INT_ENA BIT(2) #define LCD_DUAL_FRAME_BUFFER_ENABLE BIT(0) /* LCD Control Register */ #define LCD_CLK_DIVISOR(x) ((x) << 8) #define LCD_RASTER_MODE 0x01 /* LCD Raster Control Register */ #define LCD_PALETTE_LOAD_MODE(x) ((x) << 20) #define PALETTE_AND_DATA 0x00 #define PALETTE_ONLY 0x01 #define DATA_ONLY 0x02 #define LCD_MONO_8BIT_MODE BIT(9) #define LCD_RASTER_ORDER BIT(8) #define LCD_TFT_MODE BIT(7) #define LCD_UNDERFLOW_INT_ENA BIT(6) #define LCD_PL_ENABLE BIT(4) #define LCD_MONOCHROME_MODE BIT(1) #define LCD_RASTER_ENABLE BIT(0) #define LCD_TFT_ALT_ENABLE BIT(23) #define LCD_STN_565_ENABLE BIT(24) /* LCD Raster Timing 2 Register */ #define LCD_AC_BIAS_TRANSITIONS_PER_INT(x) ((x) << 16) #define LCD_AC_BIAS_FREQUENCY(x) ((x) << 8) #define LCD_SYNC_CTRL BIT(25) #define LCD_SYNC_EDGE BIT(24) #define LCD_INVERT_PIXEL_CLOCK BIT(22) #define LCD_INVERT_LINE_CLOCK BIT(21) #define LCD_INVERT_FRAME_CLOCK BIT(20) /* LCD Block */ #define LCD_CTRL_REG 0x4 #define LCD_STAT_REG 0x8 #define LCD_RASTER_CTRL_REG 0x28 #define LCD_RASTER_TIMING_0_REG 0x2C #define LCD_RASTER_TIMING_1_REG 0x30 #define LCD_RASTER_TIMING_2_REG 0x34 #define LCD_DMA_CTRL_REG 0x40 #define LCD_DMA_FRM_BUF_BASE_ADDR_0_REG 0x44 #define LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG 0x48 #define LCD_DMA_FRM_BUF_BASE_ADDR_1_REG 0x4C #define LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG 0x50 #define LCD_NUM_BUFFERS 2 #define WSI_TIMEOUT 50 #define PALETTE_SIZE 256 #define LEFT_MARGIN 64 #define RIGHT_MARGIN 64 #define UPPER_MARGIN 32 #define LOWER_MARGIN 32 static resource_size_t da8xx_fb_reg_base; static struct resource *lcdc_regs; static inline unsigned int lcdc_read(unsigned int addr) { return (unsigned int)__raw_readl(da8xx_fb_reg_base + (addr)); } static inline void lcdc_write(unsigned int val, unsigned int addr) { __raw_writel(val, da8xx_fb_reg_base + (addr)); } struct da8xx_fb_par { resource_size_t p_palette_base; unsigned char *v_palette_base; dma_addr_t vram_phys; unsigned long vram_size; void *vram_virt; unsigned int dma_start; unsigned int dma_end; struct clk *lcdc_clk; int irq; unsigned short pseudo_palette[16]; unsigned int palette_sz; unsigned int pxl_clk; int blank; wait_queue_head_t vsync_wait; int vsync_flag; int vsync_timeout; #ifdef CONFIG_CPU_FREQ struct notifier_block freq_transition; #endif void (*panel_power_ctrl)(int); }; /* Variable Screen Information */ static struct fb_var_screeninfo da8xx_fb_var __devinitdata = { .xoffset = 0, .yoffset = 0, .transp = {0, 0, 0}, .nonstd = 0, .activate = 0, .height = -1, .width = -1, .pixclock = 46666, /* 46us - AUO display */ .accel_flags = 0, .left_margin = LEFT_MARGIN, .right_margin = RIGHT_MARGIN, .upper_margin = UPPER_MARGIN, .lower_margin = LOWER_MARGIN, .sync = 0, .vmode = FB_VMODE_NONINTERLACED }; static struct fb_fix_screeninfo da8xx_fb_fix __devinitdata = { .id = "DA8xx FB Drv", .type = FB_TYPE_PACKED_PIXELS, .type_aux = 0, .visual = FB_VISUAL_PSEUDOCOLOR, .xpanstep = 0, .ypanstep = 1, .ywrapstep = 0, .accel = FB_ACCEL_NONE }; struct da8xx_panel { const char name[25]; /* Full name <vendor>_<model> */ unsigned short width; unsigned short height; int hfp; /* Horizontal front porch */ int hbp; /* Horizontal back porch */ int hsw; /* Horizontal Sync Pulse Width */ int vfp; /* Vertical front porch */ int vbp; /* Vertical back porch */ int vsw; /* Vertical Sync Pulse Width */ unsigned int pxl_clk; /* Pixel clock */ unsigned char invert_pxl_clk; /* Invert Pixel clock */ }; static struct da8xx_panel known_lcd_panels[] = { /* Sharp LCD035Q3DG01 */ [0] = { .name = "Sharp_LCD035Q3DG01", .width = 320, .height = 240, .hfp = 8, .hbp = 6, .hsw = 0, .vfp = 2, .vbp = 2, .vsw = 0, .pxl_clk = 4608000, .invert_pxl_clk = 1, }, /* Sharp LK043T1DG01 */ [1] = { .name = "Sharp_LK043T1DG01", .width = 480, .height = 272, .hfp = 2, .hbp = 2, .hsw = 41, .vfp = 2, .vbp = 2, .vsw = 10, .pxl_clk = 7833600, .invert_pxl_clk = 0, }, }; /* Enable the Raster Engine of the LCD Controller */ static inline void lcd_enable_raster(void) { u32 reg; reg = lcdc_read(LCD_RASTER_CTRL_REG); if (!(reg & LCD_RASTER_ENABLE)) lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); } /* Disable the Raster Engine of the LCD Controller */ static inline void lcd_disable_raster(void) { u32 reg; reg = lcdc_read(LCD_RASTER_CTRL_REG); if (reg & LCD_RASTER_ENABLE) lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); } static void lcd_blit(int load_mode, struct da8xx_fb_par *par) { u32 start; u32 end; u32 reg_ras; u32 reg_dma; /* init reg to clear PLM (loading mode) fields */ reg_ras = lcdc_read(LCD_RASTER_CTRL_REG); reg_ras &= ~(3 << 20); reg_dma = lcdc_read(LCD_DMA_CTRL_REG); if (load_mode == LOAD_DATA) { start = par->dma_start; end = par->dma_end; reg_ras |= LCD_PALETTE_LOAD_MODE(DATA_ONLY); reg_dma |= LCD_END_OF_FRAME_INT_ENA; reg_dma |= LCD_DUAL_FRAME_BUFFER_ENABLE; lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG); lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_1_REG); lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG); } else if (load_mode == LOAD_PALETTE) { start = par->p_palette_base; end = start + par->palette_sz - 1; reg_ras |= LCD_PALETTE_LOAD_MODE(PALETTE_ONLY); reg_ras |= LCD_PL_ENABLE; lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG); } lcdc_write(reg_dma, LCD_DMA_CTRL_REG); lcdc_write(reg_ras, LCD_RASTER_CTRL_REG); /* * The Raster enable bit must be set after all other control fields are * set. */ lcd_enable_raster(); } /* Configure the Burst Size of DMA */ static int lcd_cfg_dma(int burst_size) { u32 reg; reg = lcdc_read(LCD_DMA_CTRL_REG) & 0x00000001; switch (burst_size) { case 1: reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_1); break; case 2: reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_2); break; case 4: reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_4); break; case 8: reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_8); break; case 16: reg |= LCD_DMA_BURST_SIZE(LCD_DMA_BURST_16); break; default: return -EINVAL; } lcdc_write(reg, LCD_DMA_CTRL_REG); return 0; } static void lcd_cfg_ac_bias(int period, int transitions_per_int) { u32 reg; /* Set the AC Bias Period and Number of Transisitons per Interrupt */ reg = lcdc_read(LCD_RASTER_TIMING_2_REG) & 0xFFF00000; reg |= LCD_AC_BIAS_FREQUENCY(period) | LCD_AC_BIAS_TRANSITIONS_PER_INT(transitions_per_int); lcdc_write(reg, LCD_RASTER_TIMING_2_REG); } static void lcd_cfg_horizontal_sync(int back_porch, int pulse_width, int front_porch) { u32 reg; reg = lcdc_read(LCD_RASTER_TIMING_0_REG) & 0xf; reg |= ((back_porch & 0xff) << 24) | ((front_porch & 0xff) << 16) | ((pulse_width & 0x3f) << 10); lcdc_write(reg, LCD_RASTER_TIMING_0_REG); } static void lcd_cfg_vertical_sync(int back_porch, int pulse_width, int front_porch) { u32 reg; reg = lcdc_read(LCD_RASTER_TIMING_1_REG) & 0x3ff; reg |= ((back_porch & 0xff) << 24) | ((front_porch & 0xff) << 16) | ((pulse_width & 0x3f) << 10); lcdc_write(reg, LCD_RASTER_TIMING_1_REG); } static int lcd_cfg_display(const struct lcd_ctrl_config *cfg) { u32 reg; reg = lcdc_read(LCD_RASTER_CTRL_REG) & ~(LCD_TFT_MODE | LCD_MONO_8BIT_MODE | LCD_MONOCHROME_MODE); switch (cfg->p_disp_panel->panel_shade) { case MONOCHROME: reg |= LCD_MONOCHROME_MODE; if (cfg->mono_8bit_mode) reg |= LCD_MONO_8BIT_MODE; break; case COLOR_ACTIVE: reg |= LCD_TFT_MODE; if (cfg->tft_alt_mode) reg |= LCD_TFT_ALT_ENABLE; break; case COLOR_PASSIVE: if (cfg->stn_565_mode) reg |= LCD_STN_565_ENABLE; break; default: return -EINVAL; } /* enable additional interrupts here */ reg |= LCD_UNDERFLOW_INT_ENA; lcdc_write(reg, LCD_RASTER_CTRL_REG); reg = lcdc_read(LCD_RASTER_TIMING_2_REG); if (cfg->sync_ctrl) reg |= LCD_SYNC_CTRL; else reg &= ~LCD_SYNC_CTRL; if (cfg->sync_edge) reg |= LCD_SYNC_EDGE; else reg &= ~LCD_SYNC_EDGE; if (cfg->invert_line_clock) reg |= LCD_INVERT_LINE_CLOCK; else reg &= ~LCD_INVERT_LINE_CLOCK; if (cfg->invert_frm_clock) reg |= LCD_INVERT_FRAME_CLOCK; else reg &= ~LCD_INVERT_FRAME_CLOCK; lcdc_write(reg, LCD_RASTER_TIMING_2_REG); return 0; } static int lcd_cfg_frame_buffer(struct da8xx_fb_par *par, u32 width, u32 height, u32 bpp, u32 raster_order) { u32 reg; /* Set the Panel Width */ /* Pixels per line = (PPL + 1)*16 */ /*0x3F in bits 4..9 gives max horisontal resolution = 1024 pixels*/ width &= 0x3f0; reg = lcdc_read(LCD_RASTER_TIMING_0_REG); reg &= 0xfffffc00; reg |= ((width >> 4) - 1) << 4; lcdc_write(reg, LCD_RASTER_TIMING_0_REG); /* Set the Panel Height */ reg = lcdc_read(LCD_RASTER_TIMING_1_REG); reg = ((height - 1) & 0x3ff) | (reg & 0xfffffc00); lcdc_write(reg, LCD_RASTER_TIMING_1_REG); /* Set the Raster Order of the Frame Buffer */ reg = lcdc_read(LCD_RASTER_CTRL_REG) & ~(1 << 8); if (raster_order) reg |= LCD_RASTER_ORDER; lcdc_write(reg, LCD_RASTER_CTRL_REG); switch (bpp) { case 1: case 2: case 4: case 16: par->palette_sz = 16 * 2; break; case 8: par->palette_sz = 256 * 2; break; default: return -EINVAL; } return 0; } static int fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct da8xx_fb_par *par = info->par; unsigned short *palette = (unsigned short *) par->v_palette_base; u_short pal; int update_hw = 0; if (regno > 255) return 1; if (info->fix.visual == FB_VISUAL_DIRECTCOLOR) return 1; if (info->var.bits_per_pixel == 8) { red >>= 4; green >>= 8; blue >>= 12; pal = (red & 0x0f00); pal |= (green & 0x00f0); pal |= (blue & 0x000f); if (palette[regno] != pal) { update_hw = 1; palette[regno] = pal; } } else if ((info->var.bits_per_pixel == 16) && regno < 16) { red >>= (16 - info->var.red.length); red <<= info->var.red.offset; green >>= (16 - info->var.green.length); green <<= info->var.green.offset; blue >>= (16 - info->var.blue.length); blue <<= info->var.blue.offset; par->pseudo_palette[regno] = red | green | blue; if (palette[0] != 0x4000) { update_hw = 1; palette[0] = 0x4000; } } /* Update the palette in the h/w as needed. */ if (update_hw) lcd_blit(LOAD_PALETTE, par); return 0; } static void lcd_reset(struct da8xx_fb_par *par) { /* Disable the Raster if previously Enabled */ lcd_disable_raster(); /* DMA has to be disabled */ lcdc_write(0, LCD_DMA_CTRL_REG); lcdc_write(0, LCD_RASTER_CTRL_REG); } static void lcd_calc_clk_divider(struct da8xx_fb_par *par) { unsigned int lcd_clk, div; lcd_clk = clk_get_rate(par->lcdc_clk); div = lcd_clk / par->pxl_clk; /* Configure the LCD clock divisor. */ lcdc_write(LCD_CLK_DIVISOR(div) | (LCD_RASTER_MODE & 0x1), LCD_CTRL_REG); } static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg, struct da8xx_panel *panel) { u32 bpp; int ret = 0; lcd_reset(par); /* Calculate the divider */ lcd_calc_clk_divider(par); if (panel->invert_pxl_clk) lcdc_write((lcdc_read(LCD_RASTER_TIMING_2_REG) | LCD_INVERT_PIXEL_CLOCK), LCD_RASTER_TIMING_2_REG); else lcdc_write((lcdc_read(LCD_RASTER_TIMING_2_REG) & ~LCD_INVERT_PIXEL_CLOCK), LCD_RASTER_TIMING_2_REG); /* Configure the DMA burst size. */ ret = lcd_cfg_dma(cfg->dma_burst_sz); if (ret < 0) return ret; /* Configure the AC bias properties. */ lcd_cfg_ac_bias(cfg->ac_bias, cfg->ac_bias_intrpt); /* Configure the vertical and horizontal sync properties. */ lcd_cfg_vertical_sync(panel->vbp, panel->vsw, panel->vfp); lcd_cfg_horizontal_sync(panel->hbp, panel->hsw, panel->hfp); /* Configure for disply */ ret = lcd_cfg_display(cfg); if (ret < 0) return ret; if (QVGA != cfg->p_disp_panel->panel_type) return -EINVAL; if (cfg->bpp <= cfg->p_disp_panel->max_bpp && cfg->bpp >= cfg->p_disp_panel->min_bpp) bpp = cfg->bpp; else bpp = cfg->p_disp_panel->max_bpp; if (bpp == 12) bpp = 16; ret = lcd_cfg_frame_buffer(par, (unsigned int)panel->width, (unsigned int)panel->height, bpp, cfg->raster_order); if (ret < 0) return ret; /* Configure FDD */ lcdc_write((lcdc_read(LCD_RASTER_CTRL_REG) & 0xfff00fff) | (cfg->fdd << 12), LCD_RASTER_CTRL_REG); return 0; } static irqreturn_t lcdc_irq_handler(int irq, void *arg) { struct da8xx_fb_par *par = arg; u32 stat = lcdc_read(LCD_STAT_REG); u32 reg_ras; if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) { lcd_disable_raster(); lcdc_write(stat, LCD_STAT_REG); lcd_enable_raster(); } else if (stat & LCD_PL_LOAD_DONE) { /* * Must disable raster before changing state of any control bit. * And also must be disabled before clearing the PL loading * interrupt via the following write to the status register. If * this is done after then one gets multiple PL done interrupts. */ lcd_disable_raster(); lcdc_write(stat, LCD_STAT_REG); /* Disable PL completion inerrupt */ reg_ras = lcdc_read(LCD_RASTER_CTRL_REG); reg_ras &= ~LCD_PL_ENABLE; lcdc_write(reg_ras, LCD_RASTER_CTRL_REG); /* Setup and start data loading mode */ lcd_blit(LOAD_DATA, par); } else { lcdc_write(stat, LCD_STAT_REG); if (stat & LCD_END_OF_FRAME0) { lcdc_write(par->dma_start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); lcdc_write(par->dma_end, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG); par->vsync_flag = 1; wake_up_interruptible(&par->vsync_wait); } if (stat & LCD_END_OF_FRAME1) { lcdc_write(par->dma_start, LCD_DMA_FRM_BUF_BASE_ADDR_1_REG); lcdc_write(par->dma_end, LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG); par->vsync_flag = 1; wake_up_interruptible(&par->vsync_wait); } } return IRQ_HANDLED; } static int fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { int err = 0; switch (var->bits_per_pixel) { case 1: case 8: var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case 4: var->red.offset = 0; var->red.length = 4; var->green.offset = 0; var->green.length = 4; var->blue.offset = 0; var->blue.length = 4; var->transp.offset = 0; var->transp.length = 0; break; case 16: /* RGB 565 */ var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; default: err = -EINVAL; } var->red.msb_right = 0; var->green.msb_right = 0; var->blue.msb_right = 0; var->transp.msb_right = 0; return err; } #ifdef CONFIG_CPU_FREQ static int lcd_da8xx_cpufreq_transition(struct notifier_block *nb, unsigned long val, void *data) { struct da8xx_fb_par *par; par = container_of(nb, struct da8xx_fb_par, freq_transition); if (val == CPUFREQ_PRECHANGE) { lcd_disable_raster(); } else if (val == CPUFREQ_POSTCHANGE) { lcd_calc_clk_divider(par); lcd_enable_raster(); } return 0; } static inline int lcd_da8xx_cpufreq_register(struct da8xx_fb_par *par) { par->freq_transition.notifier_call = lcd_da8xx_cpufreq_transition; return cpufreq_register_notifier(&par->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); } static inline void lcd_da8xx_cpufreq_deregister(struct da8xx_fb_par *par) { cpufreq_unregister_notifier(&par->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); } #endif static int __devexit fb_remove(struct platform_device *dev) { struct fb_info *info = dev_get_drvdata(&dev->dev); if (info) { struct da8xx_fb_par *par = info->par; #ifdef CONFIG_CPU_FREQ lcd_da8xx_cpufreq_deregister(par); #endif if (par->panel_power_ctrl) par->panel_power_ctrl(0); lcd_disable_raster(); lcdc_write(0, LCD_RASTER_CTRL_REG); /* disable DMA */ lcdc_write(0, LCD_DMA_CTRL_REG); unregister_framebuffer(info); fb_dealloc_cmap(&info->cmap); dma_free_coherent(NULL, PALETTE_SIZE, par->v_palette_base, par->p_palette_base); dma_free_coherent(NULL, par->vram_size, par->vram_virt, par->vram_phys); free_irq(par->irq, par); clk_disable(par->lcdc_clk); clk_put(par->lcdc_clk); framebuffer_release(info); iounmap((void __iomem *)da8xx_fb_reg_base); release_mem_region(lcdc_regs->start, resource_size(lcdc_regs)); } return 0; } /* * Function to wait for vertical sync which for this LCD peripheral * translates into waiting for the current raster frame to complete. */ static int fb_wait_for_vsync(struct fb_info *info) { struct da8xx_fb_par *par = info->par; int ret; /* * Set flag to 0 and wait for isr to set to 1. It would seem there is a * race condition here where the ISR could have occured just before or * just after this set. But since we are just coarsely waiting for * a frame to complete then that's OK. i.e. if the frame completed * just before this code executed then we have to wait another full * frame time but there is no way to avoid such a situation. On the * other hand if the frame completed just after then we don't need * to wait long at all. Either way we are guaranteed to return to the * user immediately after a frame completion which is all that is * required. */ par->vsync_flag = 0; ret = wait_event_interruptible_timeout(par->vsync_wait, par->vsync_flag != 0, par->vsync_timeout); if (ret < 0) return ret; if (ret == 0) return -ETIMEDOUT; return 0; } static int fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct lcd_sync_arg sync_arg; switch (cmd) { case FBIOGET_CONTRAST: case FBIOPUT_CONTRAST: case FBIGET_BRIGHTNESS: case FBIPUT_BRIGHTNESS: case FBIGET_COLOR: case FBIPUT_COLOR: return -ENOTTY; case FBIPUT_HSYNC: if (copy_from_user(&sync_arg, (char *)arg, sizeof(struct lcd_sync_arg))) return -EFAULT; lcd_cfg_horizontal_sync(sync_arg.back_porch, sync_arg.pulse_width, sync_arg.front_porch); break; case FBIPUT_VSYNC: if (copy_from_user(&sync_arg, (char *)arg, sizeof(struct lcd_sync_arg))) return -EFAULT; lcd_cfg_vertical_sync(sync_arg.back_porch, sync_arg.pulse_width, sync_arg.front_porch); break; case FBIO_WAITFORVSYNC: return fb_wait_for_vsync(info); default: return -EINVAL; } return 0; } static int cfb_blank(int blank, struct fb_info *info) { struct da8xx_fb_par *par = info->par; int ret = 0; if (par->blank == blank) return 0; par->blank = blank; switch (blank) { case FB_BLANK_UNBLANK: if (par->panel_power_ctrl) par->panel_power_ctrl(1); lcd_enable_raster(); break; case FB_BLANK_POWERDOWN: if (par->panel_power_ctrl) par->panel_power_ctrl(0); lcd_disable_raster(); break; default: ret = -EINVAL; } return ret; } /* * Set new x,y offsets in the virtual display for the visible area and switch * to the new mode. */ static int da8xx_pan_display(struct fb_var_screeninfo *var, struct fb_info *fbi) { int ret = 0; struct fb_var_screeninfo new_var; struct da8xx_fb_par *par = fbi->par; struct fb_fix_screeninfo *fix = &fbi->fix; unsigned int end; unsigned int start; if (var->xoffset != fbi->var.xoffset || var->yoffset != fbi->var.yoffset) { memcpy(&new_var, &fbi->var, sizeof(new_var)); new_var.xoffset = var->xoffset; new_var.yoffset = var->yoffset; if (fb_check_var(&new_var, fbi)) ret = -EINVAL; else { memcpy(&fbi->var, &new_var, sizeof(new_var)); start = fix->smem_start + new_var.yoffset * fix->line_length + new_var.xoffset * var->bits_per_pixel / 8; end = start + var->yres * fix->line_length - 1; par->dma_start = start; par->dma_end = end; } } return ret; } static struct fb_ops da8xx_fb_ops = { .owner = THIS_MODULE, .fb_check_var = fb_check_var, .fb_setcolreg = fb_setcolreg, .fb_pan_display = da8xx_pan_display, .fb_ioctl = fb_ioctl, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_blank = cfb_blank, }; static int __init fb_probe(struct platform_device *device) { struct da8xx_lcdc_platform_data *fb_pdata = device->dev.platform_data; struct lcd_ctrl_config *lcd_cfg; struct da8xx_panel *lcdc_info; struct fb_info *da8xx_fb_info; struct clk *fb_clk = NULL; struct da8xx_fb_par *par; resource_size_t len; int ret, i; if (fb_pdata == NULL) { dev_err(&device->dev, "Can not get platform data\n"); return -ENOENT; } lcdc_regs = platform_get_resource(device, IORESOURCE_MEM, 0); if (!lcdc_regs) { dev_err(&device->dev, "Can not get memory resource for LCD controller\n"); return -ENOENT; } len = resource_size(lcdc_regs); lcdc_regs = request_mem_region(lcdc_regs->start, len, lcdc_regs->name); if (!lcdc_regs) return -EBUSY; da8xx_fb_reg_base = (resource_size_t)ioremap(lcdc_regs->start, len); if (!da8xx_fb_reg_base) { ret = -EBUSY; goto err_request_mem; } fb_clk = clk_get(&device->dev, NULL); if (IS_ERR(fb_clk)) { dev_err(&device->dev, "Can not get device clock\n"); ret = -ENODEV; goto err_ioremap; } ret = clk_enable(fb_clk); if (ret) goto err_clk_put; for (i = 0, lcdc_info = known_lcd_panels; i < ARRAY_SIZE(known_lcd_panels); i++, lcdc_info++) { if (strcmp(fb_pdata->type, lcdc_info->name) == 0) break; } if (i == ARRAY_SIZE(known_lcd_panels)) { dev_err(&device->dev, "GLCD: No valid panel found\n"); ret = -ENODEV; goto err_clk_disable; } else dev_info(&device->dev, "GLCD: Found %s panel\n", fb_pdata->type); lcd_cfg = (struct lcd_ctrl_config *)fb_pdata->controller_data; da8xx_fb_info = framebuffer_alloc(sizeof(struct da8xx_fb_par), &device->dev); if (!da8xx_fb_info) { dev_dbg(&device->dev, "Memory allocation failed for fb_info\n"); ret = -ENOMEM; goto err_clk_disable; } par = da8xx_fb_info->par; par->lcdc_clk = fb_clk; par->pxl_clk = lcdc_info->pxl_clk; if (fb_pdata->panel_power_ctrl) { par->panel_power_ctrl = fb_pdata->panel_power_ctrl; par->panel_power_ctrl(1); } if (lcd_init(par, lcd_cfg, lcdc_info) < 0) { dev_err(&device->dev, "lcd_init failed\n"); ret = -EFAULT; goto err_release_fb; } /* allocate frame buffer */ par->vram_size = lcdc_info->width * lcdc_info->height * lcd_cfg->bpp; par->vram_size = PAGE_ALIGN(par->vram_size/8); par->vram_size = par->vram_size * LCD_NUM_BUFFERS; par->vram_virt = dma_alloc_coherent(NULL, par->vram_size, (resource_size_t *) &par->vram_phys, GFP_KERNEL | GFP_DMA); if (!par->vram_virt) { dev_err(&device->dev, "GLCD: kmalloc for frame buffer failed\n"); ret = -EINVAL; goto err_release_fb; } da8xx_fb_info->screen_base = (char __iomem *) par->vram_virt; da8xx_fb_fix.smem_start = par->vram_phys; da8xx_fb_fix.smem_len = par->vram_size; da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8; par->dma_start = par->vram_phys; par->dma_end = par->dma_start + lcdc_info->height * da8xx_fb_fix.line_length - 1; /* allocate palette buffer */ par->v_palette_base = dma_alloc_coherent(NULL, PALETTE_SIZE, (resource_size_t *) &par->p_palette_base, GFP_KERNEL | GFP_DMA); if (!par->v_palette_base) { dev_err(&device->dev, "GLCD: kmalloc for palette buffer failed\n"); ret = -EINVAL; goto err_release_fb_mem; } memset(par->v_palette_base, 0, PALETTE_SIZE); par->irq = platform_get_irq(device, 0); if (par->irq < 0) { ret = -ENOENT; goto err_release_pl_mem; } ret = request_irq(par->irq, lcdc_irq_handler, 0, DRIVER_NAME, par); if (ret) goto err_release_pl_mem; /* Initialize par */ da8xx_fb_info->var.bits_per_pixel = lcd_cfg->bpp; da8xx_fb_var.xres = lcdc_info->width; da8xx_fb_var.xres_virtual = lcdc_info->width; da8xx_fb_var.yres = lcdc_info->height; da8xx_fb_var.yres_virtual = lcdc_info->height * LCD_NUM_BUFFERS; da8xx_fb_var.grayscale = lcd_cfg->p_disp_panel->panel_shade == MONOCHROME ? 1 : 0; da8xx_fb_var.bits_per_pixel = lcd_cfg->bpp; da8xx_fb_var.hsync_len = lcdc_info->hsw; da8xx_fb_var.vsync_len = lcdc_info->vsw; /* Initialize fbinfo */ da8xx_fb_info->flags = FBINFO_FLAG_DEFAULT; da8xx_fb_info->fix = da8xx_fb_fix; da8xx_fb_info->var = da8xx_fb_var; da8xx_fb_info->fbops = &da8xx_fb_ops; da8xx_fb_info->pseudo_palette = par->pseudo_palette; da8xx_fb_info->fix.visual = (da8xx_fb_info->var.bits_per_pixel <= 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0); if (ret) goto err_free_irq; da8xx_fb_info->cmap.len = par->palette_sz; /* initialize var_screeninfo */ da8xx_fb_var.activate = FB_ACTIVATE_FORCE; fb_set_var(da8xx_fb_info, &da8xx_fb_var); dev_set_drvdata(&device->dev, da8xx_fb_info); /* initialize the vsync wait queue */ init_waitqueue_head(&par->vsync_wait); par->vsync_timeout = HZ / 5; /* Register the Frame Buffer */ if (register_framebuffer(da8xx_fb_info) < 0) { dev_err(&device->dev, "GLCD: Frame Buffer Registration Failed!\n"); ret = -EINVAL; goto err_dealloc_cmap; } #ifdef CONFIG_CPU_FREQ ret = lcd_da8xx_cpufreq_register(par); if (ret) { dev_err(&device->dev, "failed to register cpufreq\n"); goto err_cpu_freq; } #endif return 0; #ifdef CONFIG_CPU_FREQ err_cpu_freq: unregister_framebuffer(da8xx_fb_info); #endif err_dealloc_cmap: fb_dealloc_cmap(&da8xx_fb_info->cmap); err_free_irq: free_irq(par->irq, par); err_release_pl_mem: dma_free_coherent(NULL, PALETTE_SIZE, par->v_palette_base, par->p_palette_base); err_release_fb_mem: dma_free_coherent(NULL, par->vram_size, par->vram_virt, par->vram_phys); err_release_fb: framebuffer_release(da8xx_fb_info); err_clk_disable: clk_disable(fb_clk); err_clk_put: clk_put(fb_clk); err_ioremap: iounmap((void __iomem *)da8xx_fb_reg_base); err_request_mem: release_mem_region(lcdc_regs->start, len); return ret; } #ifdef CONFIG_PM static int fb_suspend(struct platform_device *dev, pm_message_t state) { struct fb_info *info = platform_get_drvdata(dev); struct da8xx_fb_par *par = info->par; acquire_console_sem(); if (par->panel_power_ctrl) par->panel_power_ctrl(0); fb_set_suspend(info, 1); lcd_disable_raster(); clk_disable(par->lcdc_clk); release_console_sem(); return 0; } static int fb_resume(struct platform_device *dev) { struct fb_info *info = platform_get_drvdata(dev); struct da8xx_fb_par *par = info->par; acquire_console_sem(); if (par->panel_power_ctrl) par->panel_power_ctrl(1); clk_enable(par->lcdc_clk); lcd_enable_raster(); fb_set_suspend(info, 0); release_console_sem(); return 0; } #else #define fb_suspend NULL #define fb_resume NULL #endif static struct platform_driver da8xx_fb_driver = { .probe = fb_probe, .remove = fb_remove, .suspend = fb_suspend, .resume = fb_resume, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; static int __init da8xx_fb_init(void) { return platform_driver_register(&da8xx_fb_driver); } static void __exit da8xx_fb_cleanup(void) { platform_driver_unregister(&da8xx_fb_driver); } module_init(da8xx_fb_init); module_exit(da8xx_fb_cleanup); MODULE_DESCRIPTION("Framebuffer driver for TI da8xx/omap-l1xx"); MODULE_AUTHOR("Texas Instruments"); MODULE_LICENSE("GPL");
gpl-2.0
ryanli/kernel_huawei_c8650
arch/sh/kernel/cpu/sh4a/setup-sh7763.c
901
14921
/* * SH7763 Setup * * Copyright (C) 2006 Paul Mundt * Copyright (C) 2007 Yoshihiro Shimoda * Copyright (C) 2008, 2009 Nobuhiro Iwamatsu * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/sh_timer.h> #include <linux/io.h> #include <linux/serial_sci.h> static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe00000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, .irqs = { 40, 40, 40, 40 }, }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .dev = { .platform_data = &scif0_platform_data, }, }; static struct plat_sci_port scif1_platform_data = { .mapbase = 0xffe08000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, .irqs = { 76, 76, 76, 76 }, }; static struct platform_device scif1_device = { .name = "sh-sci", .id = 1, .dev = { .platform_data = &scif1_platform_data, }, }; static struct plat_sci_port scif2_platform_data = { .mapbase = 0xffe10000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, .irqs = { 104, 104, 104, 104 }, }; static struct platform_device scif2_device = { .name = "sh-sci", .id = 2, .dev = { .platform_data = &scif2_platform_data, }, }; static struct resource rtc_resources[] = { [0] = { .start = 0xffe80000, .end = 0xffe80000 + 0x58 - 1, .flags = IORESOURCE_IO, }, [1] = { /* Shared Period/Carry/Alarm IRQ */ .start = 20, .flags = IORESOURCE_IRQ, }, }; static struct platform_device rtc_device = { .name = "sh-rtc", .id = -1, .num_resources = ARRAY_SIZE(rtc_resources), .resource = rtc_resources, }; static struct resource usb_ohci_resources[] = { [0] = { .start = 0xffec8000, .end = 0xffec80ff, .flags = IORESOURCE_MEM, }, [1] = { .start = 83, .end = 83, .flags = IORESOURCE_IRQ, }, }; static u64 usb_ohci_dma_mask = 0xffffffffUL; static struct platform_device usb_ohci_device = { .name = "sh_ohci", .id = -1, .dev = { .dma_mask = &usb_ohci_dma_mask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(usb_ohci_resources), .resource = usb_ohci_resources, }; static struct resource usbf_resources[] = { [0] = { .start = 0xffec0000, .end = 0xffec00ff, .flags = IORESOURCE_MEM, }, [1] = { .start = 84, .end = 84, .flags = IORESOURCE_IRQ, }, }; static struct platform_device usbf_device = { .name = "sh_udc", .id = -1, .dev = { .dma_mask = NULL, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(usbf_resources), .resource = usbf_resources, }; static struct sh_timer_config tmu0_platform_data = { .channel_offset = 0x04, .timer_bit = 0, .clockevent_rating = 200, }; static struct resource tmu0_resources[] = { [0] = { .start = 0xffd80008, .end = 0xffd80013, .flags = IORESOURCE_MEM, }, [1] = { .start = 28, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu0_device = { .name = "sh_tmu", .id = 0, .dev = { .platform_data = &tmu0_platform_data, }, .resource = tmu0_resources, .num_resources = ARRAY_SIZE(tmu0_resources), }; static struct sh_timer_config tmu1_platform_data = { .channel_offset = 0x10, .timer_bit = 1, .clocksource_rating = 200, }; static struct resource tmu1_resources[] = { [0] = { .start = 0xffd80014, .end = 0xffd8001f, .flags = IORESOURCE_MEM, }, [1] = { .start = 29, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu1_device = { .name = "sh_tmu", .id = 1, .dev = { .platform_data = &tmu1_platform_data, }, .resource = tmu1_resources, .num_resources = ARRAY_SIZE(tmu1_resources), }; static struct sh_timer_config tmu2_platform_data = { .channel_offset = 0x1c, .timer_bit = 2, }; static struct resource tmu2_resources[] = { [0] = { .start = 0xffd80020, .end = 0xffd8002f, .flags = IORESOURCE_MEM, }, [1] = { .start = 30, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu2_device = { .name = "sh_tmu", .id = 2, .dev = { .platform_data = &tmu2_platform_data, }, .resource = tmu2_resources, .num_resources = ARRAY_SIZE(tmu2_resources), }; static struct sh_timer_config tmu3_platform_data = { .channel_offset = 0x04, .timer_bit = 0, }; static struct resource tmu3_resources[] = { [0] = { .start = 0xffd88008, .end = 0xffd88013, .flags = IORESOURCE_MEM, }, [1] = { .start = 96, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu3_device = { .name = "sh_tmu", .id = 3, .dev = { .platform_data = &tmu3_platform_data, }, .resource = tmu3_resources, .num_resources = ARRAY_SIZE(tmu3_resources), }; static struct sh_timer_config tmu4_platform_data = { .channel_offset = 0x10, .timer_bit = 1, }; static struct resource tmu4_resources[] = { [0] = { .start = 0xffd88014, .end = 0xffd8801f, .flags = IORESOURCE_MEM, }, [1] = { .start = 97, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu4_device = { .name = "sh_tmu", .id = 4, .dev = { .platform_data = &tmu4_platform_data, }, .resource = tmu4_resources, .num_resources = ARRAY_SIZE(tmu4_resources), }; static struct sh_timer_config tmu5_platform_data = { .channel_offset = 0x1c, .timer_bit = 2, }; static struct resource tmu5_resources[] = { [0] = { .start = 0xffd88020, .end = 0xffd8802b, .flags = IORESOURCE_MEM, }, [1] = { .start = 98, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu5_device = { .name = "sh_tmu", .id = 5, .dev = { .platform_data = &tmu5_platform_data, }, .resource = tmu5_resources, .num_resources = ARRAY_SIZE(tmu5_resources), }; static struct platform_device *sh7763_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &tmu0_device, &tmu1_device, &tmu2_device, &tmu3_device, &tmu4_device, &tmu5_device, &rtc_device, &usb_ohci_device, &usbf_device, }; static int __init sh7763_devices_setup(void) { return platform_add_devices(sh7763_devices, ARRAY_SIZE(sh7763_devices)); } arch_initcall(sh7763_devices_setup); static struct platform_device *sh7763_early_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &tmu0_device, &tmu1_device, &tmu2_device, &tmu3_device, &tmu4_device, &tmu5_device, }; void __init plat_early_device_setup(void) { early_platform_add_devices(sh7763_early_devices, ARRAY_SIZE(sh7763_early_devices)); } enum { UNUSED = 0, /* interrupt sources */ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, IRL_HHLL, IRL_HHLH, IRL_HHHL, IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, RTC, WDT, TMU0, TMU1, TMU2, TMU2_TICPI, HUDI, LCDC, DMAC, SCIF0, IIC0, IIC1, CMT, GETHER, HAC, PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, PCIC5, STIF0, STIF1, SCIF1, SIOF0, SIOF1, SIOF2, USBH, USBF, TPU, PCC, MMCIF, SIM, TMU3, TMU4, TMU5, ADC, SSI0, SSI1, SSI2, SSI3, SCIF2, GPIO, /* interrupt groups */ TMU012, TMU345, }; static struct intc_vect vectors[] __initdata = { INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0), INTC_VECT(RTC, 0x4c0), INTC_VECT(WDT, 0x560), INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0), INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0), INTC_VECT(HUDI, 0x600), INTC_VECT(LCDC, 0x620), INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660), INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0), INTC_VECT(DMAC, 0x6c0), INTC_VECT(SCIF0, 0x700), INTC_VECT(SCIF0, 0x720), INTC_VECT(SCIF0, 0x740), INTC_VECT(SCIF0, 0x760), INTC_VECT(DMAC, 0x780), INTC_VECT(DMAC, 0x7a0), INTC_VECT(IIC0, 0x8A0), INTC_VECT(IIC1, 0x8C0), INTC_VECT(CMT, 0x900), INTC_VECT(GETHER, 0x920), INTC_VECT(GETHER, 0x940), INTC_VECT(GETHER, 0x960), INTC_VECT(HAC, 0x980), INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20), INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60), INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIC5, 0xaa0), INTC_VECT(PCIC5, 0xac0), INTC_VECT(PCIC5, 0xae0), INTC_VECT(PCIC5, 0xb00), INTC_VECT(PCIC5, 0xb20), INTC_VECT(STIF0, 0xb40), INTC_VECT(STIF1, 0xb60), INTC_VECT(SCIF1, 0xb80), INTC_VECT(SCIF1, 0xba0), INTC_VECT(SCIF1, 0xbc0), INTC_VECT(SCIF1, 0xbe0), INTC_VECT(SIOF0, 0xc00), INTC_VECT(SIOF1, 0xc20), INTC_VECT(USBH, 0xc60), INTC_VECT(USBF, 0xc80), INTC_VECT(USBF, 0xca0), INTC_VECT(TPU, 0xcc0), INTC_VECT(PCC, 0xce0), INTC_VECT(MMCIF, 0xd00), INTC_VECT(MMCIF, 0xd20), INTC_VECT(MMCIF, 0xd40), INTC_VECT(MMCIF, 0xd60), INTC_VECT(SIM, 0xd80), INTC_VECT(SIM, 0xda0), INTC_VECT(SIM, 0xdc0), INTC_VECT(SIM, 0xde0), INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20), INTC_VECT(TMU5, 0xe40), INTC_VECT(ADC, 0xe60), INTC_VECT(SSI0, 0xe80), INTC_VECT(SSI1, 0xea0), INTC_VECT(SSI2, 0xec0), INTC_VECT(SSI3, 0xee0), INTC_VECT(SCIF2, 0xf00), INTC_VECT(SCIF2, 0xf20), INTC_VECT(SCIF2, 0xf40), INTC_VECT(SCIF2, 0xf60), INTC_VECT(GPIO, 0xf80), INTC_VECT(GPIO, 0xfa0), INTC_VECT(GPIO, 0xfc0), INTC_VECT(GPIO, 0xfe0), }; static struct intc_group groups[] __initdata = { INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI), INTC_GROUP(TMU345, TMU3, TMU4, TMU5), }; static struct intc_mask_reg mask_registers[] __initdata = { { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */ { 0, 0, 0, 0, 0, 0, GPIO, 0, SSI0, MMCIF, 0, SIOF0, PCIC5, PCIINTD, PCIINTC, PCIINTB, PCIINTA, PCISERR, HAC, CMT, 0, 0, 0, DMAC, HUDI, 0, WDT, SCIF1, SCIF0, RTC, TMU345, TMU012 } }, { 0xffd400d0, 0xffd400d4, 32, /* INT2MSKR1 / INT2MSKCR1 */ { 0, 0, 0, 0, 0, 0, SCIF2, USBF, 0, 0, STIF1, STIF0, 0, 0, USBH, GETHER, PCC, 0, 0, ADC, TPU, SIM, SIOF2, SIOF1, LCDC, 0, IIC1, IIC0, SSI3, SSI2, SSI1, 0 } }, }; static struct intc_prio_reg prio_registers[] __initdata = { { 0xffd40000, 0, 32, 8, /* INT2PRI0 */ { TMU0, TMU1, TMU2, TMU2_TICPI } }, { 0xffd40004, 0, 32, 8, /* INT2PRI1 */ { TMU3, TMU4, TMU5, RTC } }, { 0xffd40008, 0, 32, 8, /* INT2PRI2 */ { SCIF0, SCIF1, WDT } }, { 0xffd4000c, 0, 32, 8, /* INT2PRI3 */ { HUDI, DMAC, ADC } }, { 0xffd40010, 0, 32, 8, /* INT2PRI4 */ { CMT, HAC, PCISERR, PCIINTA } }, { 0xffd40014, 0, 32, 8, /* INT2PRI5 */ { PCIINTB, PCIINTC, PCIINTD, PCIC5 } }, { 0xffd40018, 0, 32, 8, /* INT2PRI6 */ { SIOF0, USBF, MMCIF, SSI0 } }, { 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { SCIF2, GPIO } }, { 0xffd400a0, 0, 32, 8, /* INT2PRI8 */ { SSI3, SSI2, SSI1, 0 } }, { 0xffd400a4, 0, 32, 8, /* INT2PRI9 */ { LCDC, 0, IIC1, IIC0 } }, { 0xffd400a8, 0, 32, 8, /* INT2PRI10 */ { TPU, SIM, SIOF2, SIOF1 } }, { 0xffd400ac, 0, 32, 8, /* INT2PRI11 */ { PCC } }, { 0xffd400b0, 0, 32, 8, /* INT2PRI12 */ { 0, 0, USBH, GETHER } }, { 0xffd400b4, 0, 32, 8, /* INT2PRI13 */ { 0, 0, STIF1, STIF0 } }, }; static DECLARE_INTC_DESC(intc_desc, "sh7763", vectors, groups, mask_registers, prio_registers, NULL); /* Support for external interrupt pins in IRQ mode */ static struct intc_vect irq_vectors[] __initdata = { INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280), INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300), INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380), INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200), }; static struct intc_mask_reg irq_mask_registers[] __initdata = { { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_prio_reg irq_prio_registers[] __initdata = { { 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_sense_reg irq_sense_registers[] __initdata = { { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_mask_reg irq_ack_registers[] __initdata = { { 0xffd00024, 0, 32, /* INTREQ */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static DECLARE_INTC_DESC_ACK(intc_irq_desc, "sh7763-irq", irq_vectors, NULL, irq_mask_registers, irq_prio_registers, irq_sense_registers, irq_ack_registers); /* External interrupt pins in IRL mode */ static struct intc_vect irl_vectors[] __initdata = { INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220), INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260), INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0), INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0), INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320), INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360), INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0), INTC_VECT(IRL_HHHL, 0x3c0), }; static struct intc_mask_reg irl3210_mask_registers[] __initdata = { { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */ { IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, IRL_HHLL, IRL_HHLH, IRL_HHHL, } }, }; static struct intc_mask_reg irl7654_mask_registers[] __initdata = { { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, IRL_HHLL, IRL_HHLH, IRL_HHHL, } }, }; static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7763-irl7654", irl_vectors, NULL, irl7654_mask_registers, NULL, NULL); static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7763-irl3210", irl_vectors, NULL, irl3210_mask_registers, NULL, NULL); #define INTC_ICR0 0xffd00000 #define INTC_INTMSK0 0xffd00044 #define INTC_INTMSK1 0xffd00048 #define INTC_INTMSK2 0xffd40080 #define INTC_INTMSKCLR1 0xffd00068 #define INTC_INTMSKCLR2 0xffd40084 void __init plat_irq_setup(void) { /* disable IRQ7-0 */ __raw_writel(0xff000000, INTC_INTMSK0); /* disable IRL3-0 + IRL7-4 */ __raw_writel(0xc0000000, INTC_INTMSK1); __raw_writel(0xfffefffe, INTC_INTMSK2); register_intc_controller(&intc_desc); } void __init plat_irq_setup_pins(int mode) { switch (mode) { case IRQ_MODE_IRQ: /* select IRQ mode for IRL3-0 + IRL7-4 */ __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0); register_intc_controller(&intc_irq_desc); break; case IRQ_MODE_IRL7654: /* enable IRL7-4 but don't provide any masking */ __raw_writel(0x40000000, INTC_INTMSKCLR1); __raw_writel(0x0000fffe, INTC_INTMSKCLR2); break; case IRQ_MODE_IRL3210: /* enable IRL0-3 but don't provide any masking */ __raw_writel(0x80000000, INTC_INTMSKCLR1); __raw_writel(0xfffe0000, INTC_INTMSKCLR2); break; case IRQ_MODE_IRL7654_MASK: /* enable IRL7-4 and mask using cpu intc controller */ __raw_writel(0x40000000, INTC_INTMSKCLR1); register_intc_controller(&intc_irl7654_desc); break; case IRQ_MODE_IRL3210_MASK: /* enable IRL0-3 and mask using cpu intc controller */ __raw_writel(0x80000000, INTC_INTMSKCLR1); register_intc_controller(&intc_irl3210_desc); break; default: BUG(); } }
gpl-2.0
peacetank200/android_kernel_samsung_m180l
drivers/serial/jsm/jsm_neo.c
1157
37207
/************************************************************************ * Copyright 2003 Digi International (www.digi.com) * * Copyright (C) 2004 IBM Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 * Temple Place - Suite 330, Boston, * MA 02111-1307, USA. * * Contact Information: * Scott H Kilau <Scott_Kilau@digi.com> * Wendy Xiong <wendyx@us.ibm.com> * ***********************************************************************/ #include <linux/delay.h> /* For udelay */ #include <linux/serial_reg.h> /* For the various UART offsets */ #include <linux/tty.h> #include <linux/pci.h> #include <asm/io.h> #include "jsm.h" /* Driver main header file */ static u32 jsm_offset_table[8] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 }; /* * This function allows calls to ensure that all outstanding * PCI writes have been completed, by doing a PCI read against * a non-destructive, read-only location on the Neo card. * * In this case, we are reading the DVID (Read-only Device Identification) * value of the Neo card. */ static inline void neo_pci_posting_flush(struct jsm_board *bd) { readb(bd->re_map_membase + 0x8D); } static void neo_set_cts_flow_control(struct jsm_channel *ch) { u8 ier, efr; ier = readb(&ch->ch_neo_uart->ier); efr = readb(&ch->ch_neo_uart->efr); jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting CTSFLOW\n"); /* Turn on auto CTS flow control */ ier |= (UART_17158_IER_CTSDSR); efr |= (UART_17158_EFR_ECB | UART_17158_EFR_CTSDSR); /* Turn off auto Xon flow control */ efr &= ~(UART_17158_EFR_IXON); /* Why? Becuz Exar's spec says we have to zero it out before setting it */ writeb(0, &ch->ch_neo_uart->efr); /* Turn on UART enhanced bits */ writeb(efr, &ch->ch_neo_uart->efr); /* Turn on table D, with 8 char hi/low watermarks */ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr); /* Feed the UART our trigger levels */ writeb(8, &ch->ch_neo_uart->tfifo); ch->ch_t_tlevel = 8; writeb(ier, &ch->ch_neo_uart->ier); } static void neo_set_rts_flow_control(struct jsm_channel *ch) { u8 ier, efr; ier = readb(&ch->ch_neo_uart->ier); efr = readb(&ch->ch_neo_uart->efr); jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting RTSFLOW\n"); /* Turn on auto RTS flow control */ ier |= (UART_17158_IER_RTSDTR); efr |= (UART_17158_EFR_ECB | UART_17158_EFR_RTSDTR); /* Turn off auto Xoff flow control */ ier &= ~(UART_17158_IER_XOFF); efr &= ~(UART_17158_EFR_IXOFF); /* Why? Becuz Exar's spec says we have to zero it out before setting it */ writeb(0, &ch->ch_neo_uart->efr); /* Turn on UART enhanced bits */ writeb(efr, &ch->ch_neo_uart->efr); writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr); ch->ch_r_watermark = 4; writeb(56, &ch->ch_neo_uart->rfifo); ch->ch_r_tlevel = 56; writeb(ier, &ch->ch_neo_uart->ier); /* * From the Neo UART spec sheet: * The auto RTS/DTR function must be started by asserting * RTS/DTR# output pin (MCR bit-0 or 1 to logic 1 after * it is enabled. */ ch->ch_mostat |= (UART_MCR_RTS); } static void neo_set_ixon_flow_control(struct jsm_channel *ch) { u8 ier, efr; ier = readb(&ch->ch_neo_uart->ier); efr = readb(&ch->ch_neo_uart->efr); jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting IXON FLOW\n"); /* Turn off auto CTS flow control */ ier &= ~(UART_17158_IER_CTSDSR); efr &= ~(UART_17158_EFR_CTSDSR); /* Turn on auto Xon flow control */ efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXON); /* Why? Becuz Exar's spec says we have to zero it out before setting it */ writeb(0, &ch->ch_neo_uart->efr); /* Turn on UART enhanced bits */ writeb(efr, &ch->ch_neo_uart->efr); writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); ch->ch_r_watermark = 4; writeb(32, &ch->ch_neo_uart->rfifo); ch->ch_r_tlevel = 32; /* Tell UART what start/stop chars it should be looking for */ writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1); writeb(0, &ch->ch_neo_uart->xonchar2); writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1); writeb(0, &ch->ch_neo_uart->xoffchar2); writeb(ier, &ch->ch_neo_uart->ier); } static void neo_set_ixoff_flow_control(struct jsm_channel *ch) { u8 ier, efr; ier = readb(&ch->ch_neo_uart->ier); efr = readb(&ch->ch_neo_uart->efr); jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting IXOFF FLOW\n"); /* Turn off auto RTS flow control */ ier &= ~(UART_17158_IER_RTSDTR); efr &= ~(UART_17158_EFR_RTSDTR); /* Turn on auto Xoff flow control */ ier |= (UART_17158_IER_XOFF); efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXOFF); /* Why? Becuz Exar's spec says we have to zero it out before setting it */ writeb(0, &ch->ch_neo_uart->efr); /* Turn on UART enhanced bits */ writeb(efr, &ch->ch_neo_uart->efr); /* Turn on table D, with 8 char hi/low watermarks */ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); writeb(8, &ch->ch_neo_uart->tfifo); ch->ch_t_tlevel = 8; /* Tell UART what start/stop chars it should be looking for */ writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1); writeb(0, &ch->ch_neo_uart->xonchar2); writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1); writeb(0, &ch->ch_neo_uart->xoffchar2); writeb(ier, &ch->ch_neo_uart->ier); } static void neo_set_no_input_flow_control(struct jsm_channel *ch) { u8 ier, efr; ier = readb(&ch->ch_neo_uart->ier); efr = readb(&ch->ch_neo_uart->efr); jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Unsetting Input FLOW\n"); /* Turn off auto RTS flow control */ ier &= ~(UART_17158_IER_RTSDTR); efr &= ~(UART_17158_EFR_RTSDTR); /* Turn off auto Xoff flow control */ ier &= ~(UART_17158_IER_XOFF); if (ch->ch_c_iflag & IXON) efr &= ~(UART_17158_EFR_IXOFF); else efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXOFF); /* Why? Becuz Exar's spec says we have to zero it out before setting it */ writeb(0, &ch->ch_neo_uart->efr); /* Turn on UART enhanced bits */ writeb(efr, &ch->ch_neo_uart->efr); /* Turn on table D, with 8 char hi/low watermarks */ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); ch->ch_r_watermark = 0; writeb(16, &ch->ch_neo_uart->tfifo); ch->ch_t_tlevel = 16; writeb(16, &ch->ch_neo_uart->rfifo); ch->ch_r_tlevel = 16; writeb(ier, &ch->ch_neo_uart->ier); } static void neo_set_no_output_flow_control(struct jsm_channel *ch) { u8 ier, efr; ier = readb(&ch->ch_neo_uart->ier); efr = readb(&ch->ch_neo_uart->efr); jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Unsetting Output FLOW\n"); /* Turn off auto CTS flow control */ ier &= ~(UART_17158_IER_CTSDSR); efr &= ~(UART_17158_EFR_CTSDSR); /* Turn off auto Xon flow control */ if (ch->ch_c_iflag & IXOFF) efr &= ~(UART_17158_EFR_IXON); else efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXON); /* Why? Becuz Exar's spec says we have to zero it out before setting it */ writeb(0, &ch->ch_neo_uart->efr); /* Turn on UART enhanced bits */ writeb(efr, &ch->ch_neo_uart->efr); /* Turn on table D, with 8 char hi/low watermarks */ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); ch->ch_r_watermark = 0; writeb(16, &ch->ch_neo_uart->tfifo); ch->ch_t_tlevel = 16; writeb(16, &ch->ch_neo_uart->rfifo); ch->ch_r_tlevel = 16; writeb(ier, &ch->ch_neo_uart->ier); } static inline void neo_set_new_start_stop_chars(struct jsm_channel *ch) { /* if hardware flow control is set, then skip this whole thing */ if (ch->ch_c_cflag & CRTSCTS) return; jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "start\n"); /* Tell UART what start/stop chars it should be looking for */ writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1); writeb(0, &ch->ch_neo_uart->xonchar2); writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1); writeb(0, &ch->ch_neo_uart->xoffchar2); } static void neo_copy_data_from_uart_to_queue(struct jsm_channel *ch) { int qleft = 0; u8 linestatus = 0; u8 error_mask = 0; int n = 0; int total = 0; u16 head; u16 tail; if (!ch) return; /* cache head and tail of queue */ head = ch->ch_r_head & RQUEUEMASK; tail = ch->ch_r_tail & RQUEUEMASK; /* Get our cached LSR */ linestatus = ch->ch_cached_lsr; ch->ch_cached_lsr = 0; /* Store how much space we have left in the queue */ if ((qleft = tail - head - 1) < 0) qleft += RQUEUEMASK + 1; /* * If the UART is not in FIFO mode, force the FIFO copy to * NOT be run, by setting total to 0. * * On the other hand, if the UART IS in FIFO mode, then ask * the UART to give us an approximation of data it has RX'ed. */ if (!(ch->ch_flags & CH_FIFO_ENABLED)) total = 0; else { total = readb(&ch->ch_neo_uart->rfifo); /* * EXAR chip bug - RX FIFO COUNT - Fudge factor. * * This resolves a problem/bug with the Exar chip that sometimes * returns a bogus value in the rfifo register. * The count can be any where from 0-3 bytes "off". * Bizarre, but true. */ total -= 3; } /* * Finally, bound the copy to make sure we don't overflow * our own queue... * The byte by byte copy loop below this loop this will * deal with the queue overflow possibility. */ total = min(total, qleft); while (total > 0) { /* * Grab the linestatus register, we need to check * to see if there are any errors in the FIFO. */ linestatus = readb(&ch->ch_neo_uart->lsr); /* * Break out if there is a FIFO error somewhere. * This will allow us to go byte by byte down below, * finding the exact location of the error. */ if (linestatus & UART_17158_RX_FIFO_DATA_ERROR) break; /* Make sure we don't go over the end of our queue */ n = min(((u32) total), (RQUEUESIZE - (u32) head)); /* * Cut down n even further if needed, this is to fix * a problem with memcpy_fromio() with the Neo on the * IBM pSeries platform. * 15 bytes max appears to be the magic number. */ n = min((u32) n, (u32) 12); /* * Since we are grabbing the linestatus register, which * will reset some bits after our read, we need to ensure * we don't miss our TX FIFO emptys. */ if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); linestatus = 0; /* Copy data from uart to the queue */ memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n); /* * Since RX_FIFO_DATA_ERROR was 0, we are guarenteed * that all the data currently in the FIFO is free of * breaks and parity/frame/orun errors. */ memset(ch->ch_equeue + head, 0, n); /* Add to and flip head if needed */ head = (head + n) & RQUEUEMASK; total -= n; qleft -= n; ch->ch_rxcount += n; } /* * Create a mask to determine whether we should * insert the character (if any) into our queue. */ if (ch->ch_c_iflag & IGNBRK) error_mask |= UART_LSR_BI; /* * Now cleanup any leftover bytes still in the UART. * Also deal with any possible queue overflow here as well. */ while (1) { /* * Its possible we have a linestatus from the loop above * this, so we "OR" on any extra bits. */ linestatus |= readb(&ch->ch_neo_uart->lsr); /* * If the chip tells us there is no more data pending to * be read, we can then leave. * But before we do, cache the linestatus, just in case. */ if (!(linestatus & UART_LSR_DR)) { ch->ch_cached_lsr = linestatus; break; } /* No need to store this bit */ linestatus &= ~UART_LSR_DR; /* * Since we are grabbing the linestatus register, which * will reset some bits after our read, we need to ensure * we don't miss our TX FIFO emptys. */ if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) { linestatus &= ~(UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR); ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); } /* * Discard character if we are ignoring the error mask. */ if (linestatus & error_mask) { u8 discard; linestatus = 0; memcpy_fromio(&discard, &ch->ch_neo_uart->txrxburst, 1); continue; } /* * If our queue is full, we have no choice but to drop some data. * The assumption is that HWFLOW or SWFLOW should have stopped * things way way before we got to this point. * * I decided that I wanted to ditch the oldest data first, * I hope thats okay with everyone? Yes? Good. */ while (qleft < 1) { jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "Queue full, dropping DATA:%x LSR:%x\n", ch->ch_rqueue[tail], ch->ch_equeue[tail]); ch->ch_r_tail = tail = (tail + 1) & RQUEUEMASK; ch->ch_err_overrun++; qleft++; } memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, 1); ch->ch_equeue[head] = (u8) linestatus; jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "DATA/LSR pair: %x %x\n", ch->ch_rqueue[head], ch->ch_equeue[head]); /* Ditch any remaining linestatus value. */ linestatus = 0; /* Add to and flip head if needed */ head = (head + 1) & RQUEUEMASK; qleft--; ch->ch_rxcount++; } /* * Write new final heads to channel structure. */ ch->ch_r_head = head & RQUEUEMASK; ch->ch_e_head = head & EQUEUEMASK; jsm_input(ch); } static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch) { u16 head; u16 tail; int n; int s; int qlen; u32 len_written = 0; if (!ch) return; /* No data to write to the UART */ if (ch->ch_w_tail == ch->ch_w_head) return; /* If port is "stopped", don't send any data to the UART */ if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_BREAK_SENDING)) return; /* * If FIFOs are disabled. Send data directly to txrx register */ if (!(ch->ch_flags & CH_FIFO_ENABLED)) { u8 lsrbits = readb(&ch->ch_neo_uart->lsr); ch->ch_cached_lsr |= lsrbits; if (ch->ch_cached_lsr & UART_LSR_THRE) { ch->ch_cached_lsr &= ~(UART_LSR_THRE); writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_neo_uart->txrx); jsm_printk(WRITE, INFO, &ch->ch_bd->pci_dev, "Tx data: %x\n", ch->ch_wqueue[ch->ch_w_head]); ch->ch_w_tail++; ch->ch_w_tail &= WQUEUEMASK; ch->ch_txcount++; } return; } /* * We have to do it this way, because of the EXAR TXFIFO count bug. */ if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM))) return; n = UART_17158_TX_FIFOSIZE - ch->ch_t_tlevel; /* cache head and tail of queue */ head = ch->ch_w_head & WQUEUEMASK; tail = ch->ch_w_tail & WQUEUEMASK; qlen = (head - tail) & WQUEUEMASK; /* Find minimum of the FIFO space, versus queue length */ n = min(n, qlen); while (n > 0) { s = ((head >= tail) ? head : WQUEUESIZE) - tail; s = min(s, n); if (s <= 0) break; memcpy_toio(&ch->ch_neo_uart->txrxburst, ch->ch_wqueue + tail, s); /* Add and flip queue if needed */ tail = (tail + s) & WQUEUEMASK; n -= s; ch->ch_txcount += s; len_written += s; } /* Update the final tail */ ch->ch_w_tail = tail & WQUEUEMASK; if (len_written >= ch->ch_t_tlevel) ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); if (!jsm_tty_write(&ch->uart_port)) uart_write_wakeup(&ch->uart_port); } static void neo_parse_modem(struct jsm_channel *ch, u8 signals) { u8 msignals = signals; jsm_printk(MSIGS, INFO, &ch->ch_bd->pci_dev, "neo_parse_modem: port: %d msignals: %x\n", ch->ch_portnum, msignals); /* Scrub off lower bits. They signify delta's, which I don't care about */ /* Keep DDCD and DDSR though */ msignals &= 0xf8; if (msignals & UART_MSR_DDCD) uart_handle_dcd_change(&ch->uart_port, msignals & UART_MSR_DCD); if (msignals & UART_MSR_DDSR) uart_handle_cts_change(&ch->uart_port, msignals & UART_MSR_CTS); if (msignals & UART_MSR_DCD) ch->ch_mistat |= UART_MSR_DCD; else ch->ch_mistat &= ~UART_MSR_DCD; if (msignals & UART_MSR_DSR) ch->ch_mistat |= UART_MSR_DSR; else ch->ch_mistat &= ~UART_MSR_DSR; if (msignals & UART_MSR_RI) ch->ch_mistat |= UART_MSR_RI; else ch->ch_mistat &= ~UART_MSR_RI; if (msignals & UART_MSR_CTS) ch->ch_mistat |= UART_MSR_CTS; else ch->ch_mistat &= ~UART_MSR_CTS; jsm_printk(MSIGS, INFO, &ch->ch_bd->pci_dev, "Port: %d DTR: %d RTS: %d CTS: %d DSR: %d " "RI: %d CD: %d\n", ch->ch_portnum, !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_DTR), !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_RTS), !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_CTS), !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DSR), !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_RI), !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DCD)); } /* Make the UART raise any of the output signals we want up */ static void neo_assert_modem_signals(struct jsm_channel *ch) { if (!ch) return; writeb(ch->ch_mostat, &ch->ch_neo_uart->mcr); /* flush write operation */ neo_pci_posting_flush(ch->ch_bd); } /* * Flush the WRITE FIFO on the Neo. * * NOTE: Channel lock MUST be held before calling this function! */ static void neo_flush_uart_write(struct jsm_channel *ch) { u8 tmp = 0; int i = 0; if (!ch) return; writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr); for (i = 0; i < 10; i++) { /* Check to see if the UART feels it completely flushed the FIFO. */ tmp = readb(&ch->ch_neo_uart->isr_fcr); if (tmp & 4) { jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "Still flushing TX UART... i: %d\n", i); udelay(10); } else break; } ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); } /* * Flush the READ FIFO on the Neo. * * NOTE: Channel lock MUST be held before calling this function! */ static void neo_flush_uart_read(struct jsm_channel *ch) { u8 tmp = 0; int i = 0; if (!ch) return; writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), &ch->ch_neo_uart->isr_fcr); for (i = 0; i < 10; i++) { /* Check to see if the UART feels it completely flushed the FIFO. */ tmp = readb(&ch->ch_neo_uart->isr_fcr); if (tmp & 2) { jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "Still flushing RX UART... i: %d\n", i); udelay(10); } else break; } } /* * No locks are assumed to be held when calling this function. */ static void neo_clear_break(struct jsm_channel *ch, int force) { unsigned long lock_flags; spin_lock_irqsave(&ch->ch_lock, lock_flags); /* Turn break off, and unset some variables */ if (ch->ch_flags & CH_BREAK_SENDING) { u8 temp = readb(&ch->ch_neo_uart->lcr); writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr); ch->ch_flags &= ~(CH_BREAK_SENDING); jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "clear break Finishing UART_LCR_SBC! finished: %lx\n", jiffies); /* flush write operation */ neo_pci_posting_flush(ch->ch_bd); } spin_unlock_irqrestore(&ch->ch_lock, lock_flags); } /* * Parse the ISR register. */ static inline void neo_parse_isr(struct jsm_board *brd, u32 port) { struct jsm_channel *ch; u8 isr; u8 cause; unsigned long lock_flags; if (!brd) return; if (port > brd->maxports) return; ch = brd->channels[port]; if (!ch) return; /* Here we try to figure out what caused the interrupt to happen */ while (1) { isr = readb(&ch->ch_neo_uart->isr_fcr); /* Bail if no pending interrupt */ if (isr & UART_IIR_NO_INT) break; /* * Yank off the upper 2 bits, which just show that the FIFO's are enabled. */ isr &= ~(UART_17158_IIR_FIFO_ENABLED); jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "%s:%d isr: %x\n", __FILE__, __LINE__, isr); if (isr & (UART_17158_IIR_RDI_TIMEOUT | UART_IIR_RDI)) { /* Read data from uart -> queue */ neo_copy_data_from_uart_to_queue(ch); /* Call our tty layer to enforce queue flow control if needed. */ spin_lock_irqsave(&ch->ch_lock, lock_flags); jsm_check_queue_flow_control(ch); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); } if (isr & UART_IIR_THRI) { /* Transfer data (if any) from Write Queue -> UART. */ spin_lock_irqsave(&ch->ch_lock, lock_flags); ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); neo_copy_data_from_queue_to_uart(ch); } if (isr & UART_17158_IIR_XONXOFF) { cause = readb(&ch->ch_neo_uart->xoffchar1); jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "Port %d. Got ISR_XONXOFF: cause:%x\n", port, cause); /* * Since the UART detected either an XON or * XOFF match, we need to figure out which * one it was, so we can suspend or resume data flow. */ spin_lock_irqsave(&ch->ch_lock, lock_flags); if (cause == UART_17158_XON_DETECT) { /* Is output stopped right now, if so, resume it */ if (brd->channels[port]->ch_flags & CH_STOP) { ch->ch_flags &= ~(CH_STOP); } jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "Port %d. XON detected in incoming data\n", port); } else if (cause == UART_17158_XOFF_DETECT) { if (!(brd->channels[port]->ch_flags & CH_STOP)) { ch->ch_flags |= CH_STOP; jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "Setting CH_STOP\n"); } jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "Port: %d. XOFF detected in incoming data\n", port); } spin_unlock_irqrestore(&ch->ch_lock, lock_flags); } if (isr & UART_17158_IIR_HWFLOW_STATE_CHANGE) { /* * If we get here, this means the hardware is doing auto flow control. * Check to see whether RTS/DTR or CTS/DSR caused this interrupt. */ cause = readb(&ch->ch_neo_uart->mcr); /* Which pin is doing auto flow? RTS or DTR? */ spin_lock_irqsave(&ch->ch_lock, lock_flags); if ((cause & 0x4) == 0) { if (cause & UART_MCR_RTS) ch->ch_mostat |= UART_MCR_RTS; else ch->ch_mostat &= ~(UART_MCR_RTS); } else { if (cause & UART_MCR_DTR) ch->ch_mostat |= UART_MCR_DTR; else ch->ch_mostat &= ~(UART_MCR_DTR); } spin_unlock_irqrestore(&ch->ch_lock, lock_flags); } /* Parse any modem signal changes */ jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "MOD_STAT: sending to parse_modem_sigs\n"); neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr)); } } static inline void neo_parse_lsr(struct jsm_board *brd, u32 port) { struct jsm_channel *ch; int linestatus; unsigned long lock_flags; if (!brd) return; if (port > brd->maxports) return; ch = brd->channels[port]; if (!ch) return; linestatus = readb(&ch->ch_neo_uart->lsr); jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "%s:%d port: %d linestatus: %x\n", __FILE__, __LINE__, port, linestatus); ch->ch_cached_lsr |= linestatus; if (ch->ch_cached_lsr & UART_LSR_DR) { /* Read data from uart -> queue */ neo_copy_data_from_uart_to_queue(ch); spin_lock_irqsave(&ch->ch_lock, lock_flags); jsm_check_queue_flow_control(ch); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); } /* * This is a special flag. It indicates that at least 1 * RX error (parity, framing, or break) has happened. * Mark this in our struct, which will tell me that I have *to do the special RX+LSR read for this FIFO load. */ if (linestatus & UART_17158_RX_FIFO_DATA_ERROR) jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev, "%s:%d Port: %d Got an RX error, need to parse LSR\n", __FILE__, __LINE__, port); /* * The next 3 tests should *NOT* happen, as the above test * should encapsulate all 3... At least, thats what Exar says. */ if (linestatus & UART_LSR_PE) { ch->ch_err_parity++; jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev, "%s:%d Port: %d. PAR ERR!\n", __FILE__, __LINE__, port); } if (linestatus & UART_LSR_FE) { ch->ch_err_frame++; jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev, "%s:%d Port: %d. FRM ERR!\n", __FILE__, __LINE__, port); } if (linestatus & UART_LSR_BI) { ch->ch_err_break++; jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev, "%s:%d Port: %d. BRK INTR!\n", __FILE__, __LINE__, port); } if (linestatus & UART_LSR_OE) { /* * Rx Oruns. Exar says that an orun will NOT corrupt * the FIFO. It will just replace the holding register * with this new data byte. So basically just ignore this. * Probably we should eventually have an orun stat in our driver... */ ch->ch_err_overrun++; jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev, "%s:%d Port: %d. Rx Overrun!\n", __FILE__, __LINE__, port); } if (linestatus & UART_LSR_THRE) { spin_lock_irqsave(&ch->ch_lock, lock_flags); ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); /* Transfer data (if any) from Write Queue -> UART. */ neo_copy_data_from_queue_to_uart(ch); } else if (linestatus & UART_17158_TX_AND_FIFO_CLR) { spin_lock_irqsave(&ch->ch_lock, lock_flags); ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); /* Transfer data (if any) from Write Queue -> UART. */ neo_copy_data_from_queue_to_uart(ch); } } /* * neo_param() * Send any/all changes to the line to the UART. */ static void neo_param(struct jsm_channel *ch) { u8 lcr = 0; u8 uart_lcr, ier; u32 baud; int quot; struct jsm_board *bd; bd = ch->ch_bd; if (!bd) return; /* * If baud rate is zero, flush queues, and set mval to drop DTR. */ if ((ch->ch_c_cflag & (CBAUD)) == 0) { ch->ch_r_head = ch->ch_r_tail = 0; ch->ch_e_head = ch->ch_e_tail = 0; ch->ch_w_head = ch->ch_w_tail = 0; neo_flush_uart_write(ch); neo_flush_uart_read(ch); ch->ch_flags |= (CH_BAUD0); ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR); neo_assert_modem_signals(ch); return; } else { int i; unsigned int cflag; static struct { unsigned int rate; unsigned int cflag; } baud_rates[] = { { 921600, B921600 }, { 460800, B460800 }, { 230400, B230400 }, { 115200, B115200 }, { 57600, B57600 }, { 38400, B38400 }, { 19200, B19200 }, { 9600, B9600 }, { 4800, B4800 }, { 2400, B2400 }, { 1200, B1200 }, { 600, B600 }, { 300, B300 }, { 200, B200 }, { 150, B150 }, { 134, B134 }, { 110, B110 }, { 75, B75 }, { 50, B50 }, }; cflag = C_BAUD(ch->uart_port.state->port.tty); baud = 9600; for (i = 0; i < ARRAY_SIZE(baud_rates); i++) { if (baud_rates[i].cflag == cflag) { baud = baud_rates[i].rate; break; } } if (ch->ch_flags & CH_BAUD0) ch->ch_flags &= ~(CH_BAUD0); } if (ch->ch_c_cflag & PARENB) lcr |= UART_LCR_PARITY; if (!(ch->ch_c_cflag & PARODD)) lcr |= UART_LCR_EPAR; /* * Not all platforms support mark/space parity, * so this will hide behind an ifdef. */ #ifdef CMSPAR if (ch->ch_c_cflag & CMSPAR) lcr |= UART_LCR_SPAR; #endif if (ch->ch_c_cflag & CSTOPB) lcr |= UART_LCR_STOP; switch (ch->ch_c_cflag & CSIZE) { case CS5: lcr |= UART_LCR_WLEN5; break; case CS6: lcr |= UART_LCR_WLEN6; break; case CS7: lcr |= UART_LCR_WLEN7; break; case CS8: default: lcr |= UART_LCR_WLEN8; break; } ier = readb(&ch->ch_neo_uart->ier); uart_lcr = readb(&ch->ch_neo_uart->lcr); if (baud == 0) baud = 9600; quot = ch->ch_bd->bd_dividend / baud; if (quot != 0) { writeb(UART_LCR_DLAB, &ch->ch_neo_uart->lcr); writeb((quot & 0xff), &ch->ch_neo_uart->txrx); writeb((quot >> 8), &ch->ch_neo_uart->ier); writeb(lcr, &ch->ch_neo_uart->lcr); } if (uart_lcr != lcr) writeb(lcr, &ch->ch_neo_uart->lcr); if (ch->ch_c_cflag & CREAD) ier |= (UART_IER_RDI | UART_IER_RLSI); ier |= (UART_IER_THRI | UART_IER_MSI); writeb(ier, &ch->ch_neo_uart->ier); /* Set new start/stop chars */ neo_set_new_start_stop_chars(ch); if (ch->ch_c_cflag & CRTSCTS) neo_set_cts_flow_control(ch); else if (ch->ch_c_iflag & IXON) { /* If start/stop is set to disable, then we should disable flow control */ if ((ch->ch_startc == __DISABLED_CHAR) || (ch->ch_stopc == __DISABLED_CHAR)) neo_set_no_output_flow_control(ch); else neo_set_ixon_flow_control(ch); } else neo_set_no_output_flow_control(ch); if (ch->ch_c_cflag & CRTSCTS) neo_set_rts_flow_control(ch); else if (ch->ch_c_iflag & IXOFF) { /* If start/stop is set to disable, then we should disable flow control */ if ((ch->ch_startc == __DISABLED_CHAR) || (ch->ch_stopc == __DISABLED_CHAR)) neo_set_no_input_flow_control(ch); else neo_set_ixoff_flow_control(ch); } else neo_set_no_input_flow_control(ch); /* * Adjust the RX FIFO Trigger level if baud is less than 9600. * Not exactly elegant, but this is needed because of the Exar chip's * delay on firing off the RX FIFO interrupt on slower baud rates. */ if (baud < 9600) { writeb(1, &ch->ch_neo_uart->rfifo); ch->ch_r_tlevel = 1; } neo_assert_modem_signals(ch); /* Get current status of the modem signals now */ neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr)); return; } /* * jsm_neo_intr() * * Neo specific interrupt handler. */ static irqreturn_t neo_intr(int irq, void *voidbrd) { struct jsm_board *brd = voidbrd; struct jsm_channel *ch; int port = 0; int type = 0; int current_port; u32 tmp; u32 uart_poll; unsigned long lock_flags; unsigned long lock_flags2; int outofloop_count = 0; /* Lock out the slow poller from running on this board. */ spin_lock_irqsave(&brd->bd_intr_lock, lock_flags); /* * Read in "extended" IRQ information from the 32bit Neo register. * Bits 0-7: What port triggered the interrupt. * Bits 8-31: Each 3bits indicate what type of interrupt occurred. */ uart_poll = readl(brd->re_map_membase + UART_17158_POLL_ADDR_OFFSET); jsm_printk(INTR, INFO, &brd->pci_dev, "%s:%d uart_poll: %x\n", __FILE__, __LINE__, uart_poll); if (!uart_poll) { jsm_printk(INTR, INFO, &brd->pci_dev, "Kernel interrupted to me, but no pending interrupts...\n"); spin_unlock_irqrestore(&brd->bd_intr_lock, lock_flags); return IRQ_NONE; } /* At this point, we have at least SOMETHING to service, dig further... */ current_port = 0; /* Loop on each port */ while (((uart_poll & 0xff) != 0) && (outofloop_count < 0xff)){ tmp = uart_poll; outofloop_count++; /* Check current port to see if it has interrupt pending */ if ((tmp & jsm_offset_table[current_port]) != 0) { port = current_port; type = tmp >> (8 + (port * 3)); type &= 0x7; } else { current_port++; continue; } jsm_printk(INTR, INFO, &brd->pci_dev, "%s:%d port: %x type: %x\n", __FILE__, __LINE__, port, type); /* Remove this port + type from uart_poll */ uart_poll &= ~(jsm_offset_table[port]); if (!type) { /* If no type, just ignore it, and move onto next port */ jsm_printk(INTR, ERR, &brd->pci_dev, "Interrupt with no type! port: %d\n", port); continue; } /* Switch on type of interrupt we have */ switch (type) { case UART_17158_RXRDY_TIMEOUT: /* * RXRDY Time-out is cleared by reading data in the * RX FIFO until it falls below the trigger level. */ /* Verify the port is in range. */ if (port > brd->nasync) continue; ch = brd->channels[port]; neo_copy_data_from_uart_to_queue(ch); /* Call our tty layer to enforce queue flow control if needed. */ spin_lock_irqsave(&ch->ch_lock, lock_flags2); jsm_check_queue_flow_control(ch); spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); continue; case UART_17158_RX_LINE_STATUS: /* * RXRDY and RX LINE Status (logic OR of LSR[4:1]) */ neo_parse_lsr(brd, port); continue; case UART_17158_TXRDY: /* * TXRDY interrupt clears after reading ISR register for the UART channel. */ /* * Yes, this is odd... * Why would I check EVERY possibility of type of * interrupt, when we know its TXRDY??? * Becuz for some reason, even tho we got triggered for TXRDY, * it seems to be occassionally wrong. Instead of TX, which * it should be, I was getting things like RXDY too. Weird. */ neo_parse_isr(brd, port); continue; case UART_17158_MSR: /* * MSR or flow control was seen. */ neo_parse_isr(brd, port); continue; default: /* * The UART triggered us with a bogus interrupt type. * It appears the Exar chip, when REALLY bogged down, will throw * these once and awhile. * Its harmless, just ignore it and move on. */ jsm_printk(INTR, ERR, &brd->pci_dev, "%s:%d Unknown Interrupt type: %x\n", __FILE__, __LINE__, type); continue; } } spin_unlock_irqrestore(&brd->bd_intr_lock, lock_flags); jsm_printk(INTR, INFO, &brd->pci_dev, "finish.\n"); return IRQ_HANDLED; } /* * Neo specific way of turning off the receiver. * Used as a way to enforce queue flow control when in * hardware flow control mode. */ static void neo_disable_receiver(struct jsm_channel *ch) { u8 tmp = readb(&ch->ch_neo_uart->ier); tmp &= ~(UART_IER_RDI); writeb(tmp, &ch->ch_neo_uart->ier); /* flush write operation */ neo_pci_posting_flush(ch->ch_bd); } /* * Neo specific way of turning on the receiver. * Used as a way to un-enforce queue flow control when in * hardware flow control mode. */ static void neo_enable_receiver(struct jsm_channel *ch) { u8 tmp = readb(&ch->ch_neo_uart->ier); tmp |= (UART_IER_RDI); writeb(tmp, &ch->ch_neo_uart->ier); /* flush write operation */ neo_pci_posting_flush(ch->ch_bd); } static void neo_send_start_character(struct jsm_channel *ch) { if (!ch) return; if (ch->ch_startc != __DISABLED_CHAR) { ch->ch_xon_sends++; writeb(ch->ch_startc, &ch->ch_neo_uart->txrx); /* flush write operation */ neo_pci_posting_flush(ch->ch_bd); } } static void neo_send_stop_character(struct jsm_channel *ch) { if (!ch) return; if (ch->ch_stopc != __DISABLED_CHAR) { ch->ch_xoff_sends++; writeb(ch->ch_stopc, &ch->ch_neo_uart->txrx); /* flush write operation */ neo_pci_posting_flush(ch->ch_bd); } } /* * neo_uart_init */ static void neo_uart_init(struct jsm_channel *ch) { writeb(0, &ch->ch_neo_uart->ier); writeb(0, &ch->ch_neo_uart->efr); writeb(UART_EFR_ECB, &ch->ch_neo_uart->efr); /* Clear out UART and FIFO */ readb(&ch->ch_neo_uart->txrx); writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr); readb(&ch->ch_neo_uart->lsr); readb(&ch->ch_neo_uart->msr); ch->ch_flags |= CH_FIFO_ENABLED; /* Assert any signals we want up */ writeb(ch->ch_mostat, &ch->ch_neo_uart->mcr); } /* * Make the UART completely turn off. */ static void neo_uart_off(struct jsm_channel *ch) { /* Turn off UART enhanced bits */ writeb(0, &ch->ch_neo_uart->efr); /* Stop all interrupts from occurring. */ writeb(0, &ch->ch_neo_uart->ier); } static u32 neo_get_uart_bytes_left(struct jsm_channel *ch) { u8 left = 0; u8 lsr = readb(&ch->ch_neo_uart->lsr); /* We must cache the LSR as some of the bits get reset once read... */ ch->ch_cached_lsr |= lsr; /* Determine whether the Transmitter is empty or not */ if (!(lsr & UART_LSR_TEMT)) left = 1; else { ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); left = 0; } return left; } /* Channel lock MUST be held by the calling function! */ static void neo_send_break(struct jsm_channel *ch) { /* * Set the time we should stop sending the break. * If we are already sending a break, toss away the existing * time to stop, and use this new value instead. */ /* Tell the UART to start sending the break */ if (!(ch->ch_flags & CH_BREAK_SENDING)) { u8 temp = readb(&ch->ch_neo_uart->lcr); writeb((temp | UART_LCR_SBC), &ch->ch_neo_uart->lcr); ch->ch_flags |= (CH_BREAK_SENDING); /* flush write operation */ neo_pci_posting_flush(ch->ch_bd); } } /* * neo_send_immediate_char. * * Sends a specific character as soon as possible to the UART, * jumping over any bytes that might be in the write queue. * * The channel lock MUST be held by the calling function. */ static void neo_send_immediate_char(struct jsm_channel *ch, unsigned char c) { if (!ch) return; writeb(c, &ch->ch_neo_uart->txrx); /* flush write operation */ neo_pci_posting_flush(ch->ch_bd); } struct board_ops jsm_neo_ops = { .intr = neo_intr, .uart_init = neo_uart_init, .uart_off = neo_uart_off, .param = neo_param, .assert_modem_signals = neo_assert_modem_signals, .flush_uart_write = neo_flush_uart_write, .flush_uart_read = neo_flush_uart_read, .disable_receiver = neo_disable_receiver, .enable_receiver = neo_enable_receiver, .send_break = neo_send_break, .clear_break = neo_clear_break, .send_start_character = neo_send_start_character, .send_stop_character = neo_send_stop_character, .copy_data_from_queue_to_uart = neo_copy_data_from_queue_to_uart, .get_uart_bytes_left = neo_get_uart_bytes_left, .send_immediate_char = neo_send_immediate_char };
gpl-2.0
fenggangwu/sffs
arch/arm/mach-ep93xx/snappercl15.c
3717
4731
/* * arch/arm/mach-ep93xx/snappercl15.c * Bluewater Systems Snapper CL15 system module * * Copyright (C) 2009 Bluewater Systems Ltd * Author: Ryan Mallon * * NAND code adapted from driver by: * Andre Renaud <andre@bluewatersys.com> * James R. McKaskill * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * */ #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/i2c-gpio.h> #include <linux/fb.h> #include <linux/mtd/partitions.h> #include <linux/mtd/nand.h> #include <mach/hardware.h> #include <linux/platform_data/video-ep93xx.h> #include <mach/gpio-ep93xx.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include "soc.h" #define SNAPPERCL15_NAND_BASE (EP93XX_CS7_PHYS_BASE + SZ_16M) #define SNAPPERCL15_NAND_WPN (1 << 8) /* Write protect (active low) */ #define SNAPPERCL15_NAND_ALE (1 << 9) /* Address latch */ #define SNAPPERCL15_NAND_CLE (1 << 10) /* Command latch */ #define SNAPPERCL15_NAND_CEN (1 << 11) /* Chip enable (active low) */ #define SNAPPERCL15_NAND_RDY (1 << 14) /* Device ready */ #define NAND_CTRL_ADDR(chip) (chip->IO_ADDR_W + 0x40) static void snappercl15_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *chip = mtd->priv; static u16 nand_state = SNAPPERCL15_NAND_WPN; u16 set; if (ctrl & NAND_CTRL_CHANGE) { set = SNAPPERCL15_NAND_CEN | SNAPPERCL15_NAND_WPN; if (ctrl & NAND_NCE) set &= ~SNAPPERCL15_NAND_CEN; if (ctrl & NAND_CLE) set |= SNAPPERCL15_NAND_CLE; if (ctrl & NAND_ALE) set |= SNAPPERCL15_NAND_ALE; nand_state &= ~(SNAPPERCL15_NAND_CEN | SNAPPERCL15_NAND_CLE | SNAPPERCL15_NAND_ALE); nand_state |= set; __raw_writew(nand_state, NAND_CTRL_ADDR(chip)); } if (cmd != NAND_CMD_NONE) __raw_writew((cmd & 0xff) | nand_state, chip->IO_ADDR_W); } static int snappercl15_nand_dev_ready(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; return !!(__raw_readw(NAND_CTRL_ADDR(chip)) & SNAPPERCL15_NAND_RDY); } static struct mtd_partition snappercl15_nand_parts[] = { { .name = "Kernel", .offset = 0, .size = SZ_2M, }, { .name = "Filesystem", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct platform_nand_data snappercl15_nand_data = { .chip = { .nr_chips = 1, .partitions = snappercl15_nand_parts, .nr_partitions = ARRAY_SIZE(snappercl15_nand_parts), .chip_delay = 25, }, .ctrl = { .dev_ready = snappercl15_nand_dev_ready, .cmd_ctrl = snappercl15_nand_cmd_ctrl, }, }; static struct resource snappercl15_nand_resource[] = { { .start = SNAPPERCL15_NAND_BASE, .end = SNAPPERCL15_NAND_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device snappercl15_nand_device = { .name = "gen_nand", .id = -1, .dev.platform_data = &snappercl15_nand_data, .resource = snappercl15_nand_resource, .num_resources = ARRAY_SIZE(snappercl15_nand_resource), }; static struct ep93xx_eth_data __initdata snappercl15_eth_data = { .phy_id = 1, }; static struct i2c_gpio_platform_data __initdata snappercl15_i2c_gpio_data = { .sda_pin = EP93XX_GPIO_LINE_EEDAT, .sda_is_open_drain = 0, .scl_pin = EP93XX_GPIO_LINE_EECLK, .scl_is_open_drain = 0, .udelay = 0, .timeout = 0, }; static struct i2c_board_info __initdata snappercl15_i2c_data[] = { { /* Audio codec */ I2C_BOARD_INFO("tlv320aic23", 0x1a), }, }; static struct ep93xxfb_mach_info __initdata snappercl15_fb_info = { .num_modes = EP93XXFB_USE_MODEDB, .bpp = 16, }; static struct platform_device snappercl15_audio_device = { .name = "snappercl15-audio", .id = -1, }; static void __init snappercl15_register_audio(void) { ep93xx_register_i2s(); platform_device_register(&snappercl15_audio_device); } static void __init snappercl15_init_machine(void) { ep93xx_init_devices(); ep93xx_register_eth(&snappercl15_eth_data, 1); ep93xx_register_i2c(&snappercl15_i2c_gpio_data, snappercl15_i2c_data, ARRAY_SIZE(snappercl15_i2c_data)); ep93xx_register_fb(&snappercl15_fb_info); snappercl15_register_audio(); platform_device_register(&snappercl15_nand_device); } MACHINE_START(SNAPPER_CL15, "Bluewater Systems Snapper CL15") /* Maintainer: Ryan Mallon */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .init_time = ep93xx_timer_init, .init_machine = snappercl15_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END
gpl-2.0
parheliamm/android_kernel_nubia_nx403a
drivers/video/omap2/displays/panel-taal.c
4741
39135
/* * Taal DSI command mode panel * * Copyright (C) 2009 Nokia Corporation * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ /*#define DEBUG*/ #include <linux/module.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/backlight.h> #include <linux/fb.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/regulator/consumer.h> #include <linux/mutex.h> #include <video/omapdss.h> #include <video/omap-panel-nokia-dsi.h> #include <video/mipi_display.h> /* DSI Virtual channel. Hardcoded for now. */ #define TCH 0 #define DCS_READ_NUM_ERRORS 0x05 #define DCS_BRIGHTNESS 0x51 #define DCS_CTRL_DISPLAY 0x53 #define DCS_WRITE_CABC 0x55 #define DCS_READ_CABC 0x56 #define DCS_GET_ID1 0xda #define DCS_GET_ID2 0xdb #define DCS_GET_ID3 0xdc static irqreturn_t taal_te_isr(int irq, void *data); static void taal_te_timeout_work_callback(struct work_struct *work); static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable); static int taal_panel_reset(struct omap_dss_device *dssdev); struct panel_regulator { struct regulator *regulator; const char *name; int min_uV; int max_uV; }; static void free_regulators(struct panel_regulator *regulators, int n) { int i; for (i = 0; i < n; i++) { /* disable/put in reverse order */ regulator_disable(regulators[n - i - 1].regulator); regulator_put(regulators[n - i - 1].regulator); } } static int init_regulators(struct omap_dss_device *dssdev, struct panel_regulator *regulators, int n) { int r, i, v; for (i = 0; i < n; i++) { struct regulator *reg; reg = regulator_get(&dssdev->dev, regulators[i].name); if (IS_ERR(reg)) { dev_err(&dssdev->dev, "failed to get regulator %s\n", regulators[i].name); r = PTR_ERR(reg); goto err; } /* FIXME: better handling of fixed vs. variable regulators */ v = regulator_get_voltage(reg); if (v < regulators[i].min_uV || v > regulators[i].max_uV) { r = regulator_set_voltage(reg, regulators[i].min_uV, regulators[i].max_uV); if (r) { dev_err(&dssdev->dev, "failed to set regulator %s voltage\n", regulators[i].name); regulator_put(reg); goto err; } } r = regulator_enable(reg); if (r) { dev_err(&dssdev->dev, "failed to enable regulator %s\n", regulators[i].name); regulator_put(reg); goto err; } regulators[i].regulator = reg; } return 0; err: free_regulators(regulators, i); return r; } /** * struct panel_config - panel configuration * @name: panel name * @type: panel type * @timings: panel resolution * @sleep: various panel specific delays, passed to msleep() if non-zero * @reset_sequence: reset sequence timings, passed to udelay() if non-zero * @regulators: array of panel regulators * @num_regulators: number of regulators in the array */ struct panel_config { const char *name; int type; struct omap_video_timings timings; struct { unsigned int sleep_in; unsigned int sleep_out; unsigned int hw_reset; unsigned int enable_te; } sleep; struct { unsigned int high; unsigned int low; } reset_sequence; struct panel_regulator *regulators; int num_regulators; }; enum { PANEL_TAAL, }; static struct panel_config panel_configs[] = { { .name = "taal", .type = PANEL_TAAL, .timings = { .x_res = 864, .y_res = 480, }, .sleep = { .sleep_in = 5, .sleep_out = 5, .hw_reset = 5, .enable_te = 100, /* possible panel bug */ }, .reset_sequence = { .high = 10, .low = 10, }, }, }; struct taal_data { struct mutex lock; struct backlight_device *bldev; unsigned long hw_guard_end; /* next value of jiffies when we can * issue the next sleep in/out command */ unsigned long hw_guard_wait; /* max guard time in jiffies */ struct omap_dss_device *dssdev; bool enabled; u8 rotate; bool mirror; bool te_enabled; atomic_t do_update; int channel; struct delayed_work te_timeout_work; bool cabc_broken; unsigned cabc_mode; bool intro_printed; struct workqueue_struct *workqueue; struct delayed_work esd_work; unsigned esd_interval; bool ulps_enabled; unsigned ulps_timeout; struct delayed_work ulps_work; struct panel_config *panel_config; }; static inline struct nokia_dsi_panel_data *get_panel_data(const struct omap_dss_device *dssdev) { return (struct nokia_dsi_panel_data *) dssdev->data; } static void taal_esd_work(struct work_struct *work); static void taal_ulps_work(struct work_struct *work); static void hw_guard_start(struct taal_data *td, int guard_msec) { td->hw_guard_wait = msecs_to_jiffies(guard_msec); td->hw_guard_end = jiffies + td->hw_guard_wait; } static void hw_guard_wait(struct taal_data *td) { unsigned long wait = td->hw_guard_end - jiffies; if ((long)wait > 0 && wait <= td->hw_guard_wait) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(wait); } } static int taal_dcs_read_1(struct taal_data *td, u8 dcs_cmd, u8 *data) { int r; u8 buf[1]; r = dsi_vc_dcs_read(td->dssdev, td->channel, dcs_cmd, buf, 1); if (r < 0) return r; *data = buf[0]; return 0; } static int taal_dcs_write_0(struct taal_data *td, u8 dcs_cmd) { return dsi_vc_dcs_write(td->dssdev, td->channel, &dcs_cmd, 1); } static int taal_dcs_write_1(struct taal_data *td, u8 dcs_cmd, u8 param) { u8 buf[2]; buf[0] = dcs_cmd; buf[1] = param; return dsi_vc_dcs_write(td->dssdev, td->channel, buf, 2); } static int taal_sleep_in(struct taal_data *td) { u8 cmd; int r; hw_guard_wait(td); cmd = MIPI_DCS_ENTER_SLEEP_MODE; r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, &cmd, 1); if (r) return r; hw_guard_start(td, 120); if (td->panel_config->sleep.sleep_in) msleep(td->panel_config->sleep.sleep_in); return 0; } static int taal_sleep_out(struct taal_data *td) { int r; hw_guard_wait(td); r = taal_dcs_write_0(td, MIPI_DCS_EXIT_SLEEP_MODE); if (r) return r; hw_guard_start(td, 120); if (td->panel_config->sleep.sleep_out) msleep(td->panel_config->sleep.sleep_out); return 0; } static int taal_get_id(struct taal_data *td, u8 *id1, u8 *id2, u8 *id3) { int r; r = taal_dcs_read_1(td, DCS_GET_ID1, id1); if (r) return r; r = taal_dcs_read_1(td, DCS_GET_ID2, id2); if (r) return r; r = taal_dcs_read_1(td, DCS_GET_ID3, id3); if (r) return r; return 0; } static int taal_set_addr_mode(struct taal_data *td, u8 rotate, bool mirror) { int r; u8 mode; int b5, b6, b7; r = taal_dcs_read_1(td, MIPI_DCS_GET_ADDRESS_MODE, &mode); if (r) return r; switch (rotate) { default: case 0: b7 = 0; b6 = 0; b5 = 0; break; case 1: b7 = 0; b6 = 1; b5 = 1; break; case 2: b7 = 1; b6 = 1; b5 = 0; break; case 3: b7 = 1; b6 = 0; b5 = 1; break; } if (mirror) b6 = !b6; mode &= ~((1<<7) | (1<<6) | (1<<5)); mode |= (b7 << 7) | (b6 << 6) | (b5 << 5); return taal_dcs_write_1(td, MIPI_DCS_SET_ADDRESS_MODE, mode); } static int taal_set_update_window(struct taal_data *td, u16 x, u16 y, u16 w, u16 h) { int r; u16 x1 = x; u16 x2 = x + w - 1; u16 y1 = y; u16 y2 = y + h - 1; u8 buf[5]; buf[0] = MIPI_DCS_SET_COLUMN_ADDRESS; buf[1] = (x1 >> 8) & 0xff; buf[2] = (x1 >> 0) & 0xff; buf[3] = (x2 >> 8) & 0xff; buf[4] = (x2 >> 0) & 0xff; r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, buf, sizeof(buf)); if (r) return r; buf[0] = MIPI_DCS_SET_PAGE_ADDRESS; buf[1] = (y1 >> 8) & 0xff; buf[2] = (y1 >> 0) & 0xff; buf[3] = (y2 >> 8) & 0xff; buf[4] = (y2 >> 0) & 0xff; r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, buf, sizeof(buf)); if (r) return r; dsi_vc_send_bta_sync(td->dssdev, td->channel); return r; } static void taal_queue_esd_work(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); if (td->esd_interval > 0) queue_delayed_work(td->workqueue, &td->esd_work, msecs_to_jiffies(td->esd_interval)); } static void taal_cancel_esd_work(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); cancel_delayed_work(&td->esd_work); } static void taal_queue_ulps_work(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); if (td->ulps_timeout > 0) queue_delayed_work(td->workqueue, &td->ulps_work, msecs_to_jiffies(td->ulps_timeout)); } static void taal_cancel_ulps_work(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); cancel_delayed_work(&td->ulps_work); } static int taal_enter_ulps(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); int r; if (td->ulps_enabled) return 0; taal_cancel_ulps_work(dssdev); r = _taal_enable_te(dssdev, false); if (r) goto err; disable_irq(gpio_to_irq(panel_data->ext_te_gpio)); omapdss_dsi_display_disable(dssdev, false, true); td->ulps_enabled = true; return 0; err: dev_err(&dssdev->dev, "enter ULPS failed"); taal_panel_reset(dssdev); td->ulps_enabled = false; taal_queue_ulps_work(dssdev); return r; } static int taal_exit_ulps(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); int r; if (!td->ulps_enabled) return 0; r = omapdss_dsi_display_enable(dssdev); if (r) { dev_err(&dssdev->dev, "failed to enable DSI\n"); goto err1; } omapdss_dsi_vc_enable_hs(dssdev, td->channel, true); r = _taal_enable_te(dssdev, true); if (r) { dev_err(&dssdev->dev, "failed to re-enable TE"); goto err2; } enable_irq(gpio_to_irq(panel_data->ext_te_gpio)); taal_queue_ulps_work(dssdev); td->ulps_enabled = false; return 0; err2: dev_err(&dssdev->dev, "failed to exit ULPS"); r = taal_panel_reset(dssdev); if (!r) { enable_irq(gpio_to_irq(panel_data->ext_te_gpio)); td->ulps_enabled = false; } err1: taal_queue_ulps_work(dssdev); return r; } static int taal_wake_up(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); if (td->ulps_enabled) return taal_exit_ulps(dssdev); taal_cancel_ulps_work(dssdev); taal_queue_ulps_work(dssdev); return 0; } static int taal_bl_update_status(struct backlight_device *dev) { struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; int level; if (dev->props.fb_blank == FB_BLANK_UNBLANK && dev->props.power == FB_BLANK_UNBLANK) level = dev->props.brightness; else level = 0; dev_dbg(&dssdev->dev, "update brightness to %d\n", level); mutex_lock(&td->lock); if (td->enabled) { dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (!r) r = taal_dcs_write_1(td, DCS_BRIGHTNESS, level); dsi_bus_unlock(dssdev); } else { r = 0; } mutex_unlock(&td->lock); return r; } static int taal_bl_get_intensity(struct backlight_device *dev) { if (dev->props.fb_blank == FB_BLANK_UNBLANK && dev->props.power == FB_BLANK_UNBLANK) return dev->props.brightness; return 0; } static const struct backlight_ops taal_bl_ops = { .get_brightness = taal_bl_get_intensity, .update_status = taal_bl_update_status, }; static void taal_get_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { *timings = dssdev->panel.timings; } static void taal_get_resolution(struct omap_dss_device *dssdev, u16 *xres, u16 *yres) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); if (td->rotate == 0 || td->rotate == 2) { *xres = dssdev->panel.timings.x_res; *yres = dssdev->panel.timings.y_res; } else { *yres = dssdev->panel.timings.x_res; *xres = dssdev->panel.timings.y_res; } } static ssize_t taal_num_errors_show(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); u8 errors; int r; mutex_lock(&td->lock); if (td->enabled) { dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (!r) r = taal_dcs_read_1(td, DCS_READ_NUM_ERRORS, &errors); dsi_bus_unlock(dssdev); } else { r = -ENODEV; } mutex_unlock(&td->lock); if (r) return r; return snprintf(buf, PAGE_SIZE, "%d\n", errors); } static ssize_t taal_hw_revision_show(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); u8 id1, id2, id3; int r; mutex_lock(&td->lock); if (td->enabled) { dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (!r) r = taal_get_id(td, &id1, &id2, &id3); dsi_bus_unlock(dssdev); } else { r = -ENODEV; } mutex_unlock(&td->lock); if (r) return r; return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3); } static const char *cabc_modes[] = { "off", /* used also always when CABC is not supported */ "ui", "still-image", "moving-image", }; static ssize_t show_cabc_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); const char *mode_str; int mode; int len; mode = td->cabc_mode; mode_str = "unknown"; if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes)) mode_str = cabc_modes[mode]; len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str); return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1; } static ssize_t store_cabc_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); int i; int r; for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) { if (sysfs_streq(cabc_modes[i], buf)) break; } if (i == ARRAY_SIZE(cabc_modes)) return -EINVAL; mutex_lock(&td->lock); if (td->enabled) { dsi_bus_lock(dssdev); if (!td->cabc_broken) { r = taal_wake_up(dssdev); if (r) goto err; r = taal_dcs_write_1(td, DCS_WRITE_CABC, i); if (r) goto err; } dsi_bus_unlock(dssdev); } td->cabc_mode = i; mutex_unlock(&td->lock); return count; err: dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; } static ssize_t show_cabc_available_modes(struct device *dev, struct device_attribute *attr, char *buf) { int len; int i; for (i = 0, len = 0; len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++) len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s", i ? " " : "", cabc_modes[i], i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : ""); return len < PAGE_SIZE ? len : PAGE_SIZE - 1; } static ssize_t taal_store_esd_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); unsigned long t; int r; r = strict_strtoul(buf, 10, &t); if (r) return r; mutex_lock(&td->lock); taal_cancel_esd_work(dssdev); td->esd_interval = t; if (td->enabled) taal_queue_esd_work(dssdev); mutex_unlock(&td->lock); return count; } static ssize_t taal_show_esd_interval(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); unsigned t; mutex_lock(&td->lock); t = td->esd_interval; mutex_unlock(&td->lock); return snprintf(buf, PAGE_SIZE, "%u\n", t); } static ssize_t taal_store_ulps(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); unsigned long t; int r; r = strict_strtoul(buf, 10, &t); if (r) return r; mutex_lock(&td->lock); if (td->enabled) { dsi_bus_lock(dssdev); if (t) r = taal_enter_ulps(dssdev); else r = taal_wake_up(dssdev); dsi_bus_unlock(dssdev); } mutex_unlock(&td->lock); if (r) return r; return count; } static ssize_t taal_show_ulps(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); unsigned t; mutex_lock(&td->lock); t = td->ulps_enabled; mutex_unlock(&td->lock); return snprintf(buf, PAGE_SIZE, "%u\n", t); } static ssize_t taal_store_ulps_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); unsigned long t; int r; r = strict_strtoul(buf, 10, &t); if (r) return r; mutex_lock(&td->lock); td->ulps_timeout = t; if (td->enabled) { /* taal_wake_up will restart the timer */ dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); dsi_bus_unlock(dssdev); } mutex_unlock(&td->lock); if (r) return r; return count; } static ssize_t taal_show_ulps_timeout(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); unsigned t; mutex_lock(&td->lock); t = td->ulps_timeout; mutex_unlock(&td->lock); return snprintf(buf, PAGE_SIZE, "%u\n", t); } static DEVICE_ATTR(num_dsi_errors, S_IRUGO, taal_num_errors_show, NULL); static DEVICE_ATTR(hw_revision, S_IRUGO, taal_hw_revision_show, NULL); static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR, show_cabc_mode, store_cabc_mode); static DEVICE_ATTR(cabc_available_modes, S_IRUGO, show_cabc_available_modes, NULL); static DEVICE_ATTR(esd_interval, S_IRUGO | S_IWUSR, taal_show_esd_interval, taal_store_esd_interval); static DEVICE_ATTR(ulps, S_IRUGO | S_IWUSR, taal_show_ulps, taal_store_ulps); static DEVICE_ATTR(ulps_timeout, S_IRUGO | S_IWUSR, taal_show_ulps_timeout, taal_store_ulps_timeout); static struct attribute *taal_attrs[] = { &dev_attr_num_dsi_errors.attr, &dev_attr_hw_revision.attr, &dev_attr_cabc_mode.attr, &dev_attr_cabc_available_modes.attr, &dev_attr_esd_interval.attr, &dev_attr_ulps.attr, &dev_attr_ulps_timeout.attr, NULL, }; static struct attribute_group taal_attr_group = { .attrs = taal_attrs, }; static void taal_hw_reset(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); if (panel_data->reset_gpio == -1) return; gpio_set_value(panel_data->reset_gpio, 1); if (td->panel_config->reset_sequence.high) udelay(td->panel_config->reset_sequence.high); /* reset the panel */ gpio_set_value(panel_data->reset_gpio, 0); /* assert reset */ if (td->panel_config->reset_sequence.low) udelay(td->panel_config->reset_sequence.low); gpio_set_value(panel_data->reset_gpio, 1); /* wait after releasing reset */ if (td->panel_config->sleep.hw_reset) msleep(td->panel_config->sleep.hw_reset); } static int taal_probe(struct omap_dss_device *dssdev) { struct backlight_properties props; struct taal_data *td; struct backlight_device *bldev = NULL; struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); struct panel_config *panel_config = NULL; int r, i; dev_dbg(&dssdev->dev, "probe\n"); if (!panel_data || !panel_data->name) { r = -EINVAL; goto err; } for (i = 0; i < ARRAY_SIZE(panel_configs); i++) { if (strcmp(panel_data->name, panel_configs[i].name) == 0) { panel_config = &panel_configs[i]; break; } } if (!panel_config) { r = -EINVAL; goto err; } dssdev->panel.config = OMAP_DSS_LCD_TFT; dssdev->panel.timings = panel_config->timings; dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888; td = kzalloc(sizeof(*td), GFP_KERNEL); if (!td) { r = -ENOMEM; goto err; } td->dssdev = dssdev; td->panel_config = panel_config; td->esd_interval = panel_data->esd_interval; td->ulps_enabled = false; td->ulps_timeout = panel_data->ulps_timeout; mutex_init(&td->lock); atomic_set(&td->do_update, 0); r = init_regulators(dssdev, panel_config->regulators, panel_config->num_regulators); if (r) goto err_reg; td->workqueue = create_singlethread_workqueue("taal_esd"); if (td->workqueue == NULL) { dev_err(&dssdev->dev, "can't create ESD workqueue\n"); r = -ENOMEM; goto err_wq; } INIT_DELAYED_WORK_DEFERRABLE(&td->esd_work, taal_esd_work); INIT_DELAYED_WORK(&td->ulps_work, taal_ulps_work); dev_set_drvdata(&dssdev->dev, td); taal_hw_reset(dssdev); if (panel_data->use_dsi_backlight) { memset(&props, 0, sizeof(struct backlight_properties)); props.max_brightness = 255; props.type = BACKLIGHT_RAW; bldev = backlight_device_register(dev_name(&dssdev->dev), &dssdev->dev, dssdev, &taal_bl_ops, &props); if (IS_ERR(bldev)) { r = PTR_ERR(bldev); goto err_bl; } td->bldev = bldev; bldev->props.fb_blank = FB_BLANK_UNBLANK; bldev->props.power = FB_BLANK_UNBLANK; bldev->props.brightness = 255; taal_bl_update_status(bldev); } if (panel_data->use_ext_te) { int gpio = panel_data->ext_te_gpio; r = gpio_request_one(gpio, GPIOF_IN, "taal irq"); if (r) { dev_err(&dssdev->dev, "GPIO request failed\n"); goto err_gpio; } r = request_irq(gpio_to_irq(gpio), taal_te_isr, IRQF_TRIGGER_RISING, "taal vsync", dssdev); if (r) { dev_err(&dssdev->dev, "IRQ request failed\n"); gpio_free(gpio); goto err_irq; } INIT_DELAYED_WORK_DEFERRABLE(&td->te_timeout_work, taal_te_timeout_work_callback); dev_dbg(&dssdev->dev, "Using GPIO TE\n"); } r = omap_dsi_request_vc(dssdev, &td->channel); if (r) { dev_err(&dssdev->dev, "failed to get virtual channel\n"); goto err_req_vc; } r = omap_dsi_set_vc_id(dssdev, td->channel, TCH); if (r) { dev_err(&dssdev->dev, "failed to set VC_ID\n"); goto err_vc_id; } r = sysfs_create_group(&dssdev->dev.kobj, &taal_attr_group); if (r) { dev_err(&dssdev->dev, "failed to create sysfs files\n"); goto err_vc_id; } return 0; err_vc_id: omap_dsi_release_vc(dssdev, td->channel); err_req_vc: if (panel_data->use_ext_te) free_irq(gpio_to_irq(panel_data->ext_te_gpio), dssdev); err_irq: if (panel_data->use_ext_te) gpio_free(panel_data->ext_te_gpio); err_gpio: if (bldev != NULL) backlight_device_unregister(bldev); err_bl: destroy_workqueue(td->workqueue); err_wq: free_regulators(panel_config->regulators, panel_config->num_regulators); err_reg: kfree(td); err: return r; } static void __exit taal_remove(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); struct backlight_device *bldev; dev_dbg(&dssdev->dev, "remove\n"); sysfs_remove_group(&dssdev->dev.kobj, &taal_attr_group); omap_dsi_release_vc(dssdev, td->channel); if (panel_data->use_ext_te) { int gpio = panel_data->ext_te_gpio; free_irq(gpio_to_irq(gpio), dssdev); gpio_free(gpio); } bldev = td->bldev; if (bldev != NULL) { bldev->props.power = FB_BLANK_POWERDOWN; taal_bl_update_status(bldev); backlight_device_unregister(bldev); } taal_cancel_ulps_work(dssdev); taal_cancel_esd_work(dssdev); destroy_workqueue(td->workqueue); /* reset, to be sure that the panel is in a valid state */ taal_hw_reset(dssdev); free_regulators(td->panel_config->regulators, td->panel_config->num_regulators); kfree(td); } static int taal_power_on(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); u8 id1, id2, id3; int r; r = omapdss_dsi_display_enable(dssdev); if (r) { dev_err(&dssdev->dev, "failed to enable DSI\n"); goto err0; } taal_hw_reset(dssdev); omapdss_dsi_vc_enable_hs(dssdev, td->channel, false); r = taal_sleep_out(td); if (r) goto err; r = taal_get_id(td, &id1, &id2, &id3); if (r) goto err; /* on early Taal revisions CABC is broken */ if (td->panel_config->type == PANEL_TAAL && (id2 == 0x00 || id2 == 0xff || id2 == 0x81)) td->cabc_broken = true; r = taal_dcs_write_1(td, DCS_BRIGHTNESS, 0xff); if (r) goto err; r = taal_dcs_write_1(td, DCS_CTRL_DISPLAY, (1<<2) | (1<<5)); /* BL | BCTRL */ if (r) goto err; r = taal_dcs_write_1(td, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_24BIT); if (r) goto err; r = taal_set_addr_mode(td, td->rotate, td->mirror); if (r) goto err; if (!td->cabc_broken) { r = taal_dcs_write_1(td, DCS_WRITE_CABC, td->cabc_mode); if (r) goto err; } r = taal_dcs_write_0(td, MIPI_DCS_SET_DISPLAY_ON); if (r) goto err; r = _taal_enable_te(dssdev, td->te_enabled); if (r) goto err; r = dsi_enable_video_output(dssdev, td->channel); if (r) goto err; td->enabled = 1; if (!td->intro_printed) { dev_info(&dssdev->dev, "%s panel revision %02x.%02x.%02x\n", td->panel_config->name, id1, id2, id3); if (td->cabc_broken) dev_info(&dssdev->dev, "old Taal version, CABC disabled\n"); td->intro_printed = true; } omapdss_dsi_vc_enable_hs(dssdev, td->channel, true); return 0; err: dev_err(&dssdev->dev, "error while enabling panel, issuing HW reset\n"); taal_hw_reset(dssdev); omapdss_dsi_display_disable(dssdev, true, false); err0: return r; } static void taal_power_off(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; dsi_disable_video_output(dssdev, td->channel); r = taal_dcs_write_0(td, MIPI_DCS_SET_DISPLAY_OFF); if (!r) r = taal_sleep_in(td); if (r) { dev_err(&dssdev->dev, "error disabling panel, issuing HW reset\n"); taal_hw_reset(dssdev); } omapdss_dsi_display_disable(dssdev, true, false); td->enabled = 0; } static int taal_panel_reset(struct omap_dss_device *dssdev) { dev_err(&dssdev->dev, "performing LCD reset\n"); taal_power_off(dssdev); taal_hw_reset(dssdev); return taal_power_on(dssdev); } static int taal_enable(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; dev_dbg(&dssdev->dev, "enable\n"); mutex_lock(&td->lock); if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) { r = -EINVAL; goto err; } dsi_bus_lock(dssdev); r = taal_power_on(dssdev); dsi_bus_unlock(dssdev); if (r) goto err; taal_queue_esd_work(dssdev); dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; mutex_unlock(&td->lock); return 0; err: dev_dbg(&dssdev->dev, "enable failed\n"); mutex_unlock(&td->lock); return r; } static void taal_disable(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); dev_dbg(&dssdev->dev, "disable\n"); mutex_lock(&td->lock); taal_cancel_ulps_work(dssdev); taal_cancel_esd_work(dssdev); dsi_bus_lock(dssdev); if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { int r; r = taal_wake_up(dssdev); if (!r) taal_power_off(dssdev); } dsi_bus_unlock(dssdev); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; mutex_unlock(&td->lock); } static int taal_suspend(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; dev_dbg(&dssdev->dev, "suspend\n"); mutex_lock(&td->lock); if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) { r = -EINVAL; goto err; } taal_cancel_ulps_work(dssdev); taal_cancel_esd_work(dssdev); dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (!r) taal_power_off(dssdev); dsi_bus_unlock(dssdev); dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED; mutex_unlock(&td->lock); return 0; err: mutex_unlock(&td->lock); return r; } static int taal_resume(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; dev_dbg(&dssdev->dev, "resume\n"); mutex_lock(&td->lock); if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) { r = -EINVAL; goto err; } dsi_bus_lock(dssdev); r = taal_power_on(dssdev); dsi_bus_unlock(dssdev); if (r) { dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } else { dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; taal_queue_esd_work(dssdev); } mutex_unlock(&td->lock); return r; err: mutex_unlock(&td->lock); return r; } static void taal_framedone_cb(int err, void *data) { struct omap_dss_device *dssdev = data; dev_dbg(&dssdev->dev, "framedone, err %d\n", err); dsi_bus_unlock(dssdev); } static irqreturn_t taal_te_isr(int irq, void *data) { struct omap_dss_device *dssdev = data; struct taal_data *td = dev_get_drvdata(&dssdev->dev); int old; int r; old = atomic_cmpxchg(&td->do_update, 1, 0); if (old) { cancel_delayed_work(&td->te_timeout_work); r = omap_dsi_update(dssdev, td->channel, taal_framedone_cb, dssdev); if (r) goto err; } return IRQ_HANDLED; err: dev_err(&dssdev->dev, "start update failed\n"); dsi_bus_unlock(dssdev); return IRQ_HANDLED; } static void taal_te_timeout_work_callback(struct work_struct *work) { struct taal_data *td = container_of(work, struct taal_data, te_timeout_work.work); struct omap_dss_device *dssdev = td->dssdev; dev_err(&dssdev->dev, "TE not received for 250ms!\n"); atomic_set(&td->do_update, 0); dsi_bus_unlock(dssdev); } static int taal_update(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); int r; dev_dbg(&dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h); mutex_lock(&td->lock); dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (r) goto err; if (!td->enabled) { r = 0; goto err; } /* XXX no need to send this every frame, but dsi break if not done */ r = taal_set_update_window(td, 0, 0, td->panel_config->timings.x_res, td->panel_config->timings.y_res); if (r) goto err; if (td->te_enabled && panel_data->use_ext_te) { schedule_delayed_work(&td->te_timeout_work, msecs_to_jiffies(250)); atomic_set(&td->do_update, 1); } else { r = omap_dsi_update(dssdev, td->channel, taal_framedone_cb, dssdev); if (r) goto err; } /* note: no bus_unlock here. unlock is in framedone_cb */ mutex_unlock(&td->lock); return 0; err: dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; } static int taal_sync(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); dev_dbg(&dssdev->dev, "sync\n"); mutex_lock(&td->lock); dsi_bus_lock(dssdev); dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); dev_dbg(&dssdev->dev, "sync done\n"); return 0; } static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); int r; if (enable) r = taal_dcs_write_1(td, MIPI_DCS_SET_TEAR_ON, 0); else r = taal_dcs_write_0(td, MIPI_DCS_SET_TEAR_OFF); if (!panel_data->use_ext_te) omapdss_dsi_enable_te(dssdev, enable); if (td->panel_config->sleep.enable_te) msleep(td->panel_config->sleep.enable_te); return r; } static int taal_enable_te(struct omap_dss_device *dssdev, bool enable) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; mutex_lock(&td->lock); if (td->te_enabled == enable) goto end; dsi_bus_lock(dssdev); if (td->enabled) { r = taal_wake_up(dssdev); if (r) goto err; r = _taal_enable_te(dssdev, enable); if (r) goto err; } td->te_enabled = enable; dsi_bus_unlock(dssdev); end: mutex_unlock(&td->lock); return 0; err: dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; } static int taal_get_te(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; mutex_lock(&td->lock); r = td->te_enabled; mutex_unlock(&td->lock); return r; } static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; dev_dbg(&dssdev->dev, "rotate %d\n", rotate); mutex_lock(&td->lock); if (td->rotate == rotate) goto end; dsi_bus_lock(dssdev); if (td->enabled) { r = taal_wake_up(dssdev); if (r) goto err; r = taal_set_addr_mode(td, rotate, td->mirror); if (r) goto err; } td->rotate = rotate; dsi_bus_unlock(dssdev); end: mutex_unlock(&td->lock); return 0; err: dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; } static u8 taal_get_rotate(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; mutex_lock(&td->lock); r = td->rotate; mutex_unlock(&td->lock); return r; } static int taal_mirror(struct omap_dss_device *dssdev, bool enable) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; dev_dbg(&dssdev->dev, "mirror %d\n", enable); mutex_lock(&td->lock); if (td->mirror == enable) goto end; dsi_bus_lock(dssdev); if (td->enabled) { r = taal_wake_up(dssdev); if (r) goto err; r = taal_set_addr_mode(td, td->rotate, enable); if (r) goto err; } td->mirror = enable; dsi_bus_unlock(dssdev); end: mutex_unlock(&td->lock); return 0; err: dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; } static bool taal_get_mirror(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; mutex_lock(&td->lock); r = td->mirror; mutex_unlock(&td->lock); return r; } static int taal_run_test(struct omap_dss_device *dssdev, int test_num) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); u8 id1, id2, id3; int r; mutex_lock(&td->lock); if (!td->enabled) { r = -ENODEV; goto err1; } dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (r) goto err2; r = taal_dcs_read_1(td, DCS_GET_ID1, &id1); if (r) goto err2; r = taal_dcs_read_1(td, DCS_GET_ID2, &id2); if (r) goto err2; r = taal_dcs_read_1(td, DCS_GET_ID3, &id3); if (r) goto err2; dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return 0; err2: dsi_bus_unlock(dssdev); err1: mutex_unlock(&td->lock); return r; } static int taal_memory_read(struct omap_dss_device *dssdev, void *buf, size_t size, u16 x, u16 y, u16 w, u16 h) { int r; int first = 1; int plen; unsigned buf_used = 0; struct taal_data *td = dev_get_drvdata(&dssdev->dev); if (size < w * h * 3) return -ENOMEM; mutex_lock(&td->lock); if (!td->enabled) { r = -ENODEV; goto err1; } size = min(w * h * 3, dssdev->panel.timings.x_res * dssdev->panel.timings.y_res * 3); dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (r) goto err2; /* plen 1 or 2 goes into short packet. until checksum error is fixed, * use short packets. plen 32 works, but bigger packets seem to cause * an error. */ if (size % 2) plen = 1; else plen = 2; taal_set_update_window(td, x, y, w, h); r = dsi_vc_set_max_rx_packet_size(dssdev, td->channel, plen); if (r) goto err2; while (buf_used < size) { u8 dcs_cmd = first ? 0x2e : 0x3e; first = 0; r = dsi_vc_dcs_read(dssdev, td->channel, dcs_cmd, buf + buf_used, size - buf_used); if (r < 0) { dev_err(&dssdev->dev, "read error\n"); goto err3; } buf_used += r; if (r < plen) { dev_err(&dssdev->dev, "short read\n"); break; } if (signal_pending(current)) { dev_err(&dssdev->dev, "signal pending, " "aborting memory read\n"); r = -ERESTARTSYS; goto err3; } } r = buf_used; err3: dsi_vc_set_max_rx_packet_size(dssdev, td->channel, 1); err2: dsi_bus_unlock(dssdev); err1: mutex_unlock(&td->lock); return r; } static void taal_ulps_work(struct work_struct *work) { struct taal_data *td = container_of(work, struct taal_data, ulps_work.work); struct omap_dss_device *dssdev = td->dssdev; mutex_lock(&td->lock); if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || !td->enabled) { mutex_unlock(&td->lock); return; } dsi_bus_lock(dssdev); taal_enter_ulps(dssdev); dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); } static void taal_esd_work(struct work_struct *work) { struct taal_data *td = container_of(work, struct taal_data, esd_work.work); struct omap_dss_device *dssdev = td->dssdev; struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); u8 state1, state2; int r; mutex_lock(&td->lock); if (!td->enabled) { mutex_unlock(&td->lock); return; } dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (r) { dev_err(&dssdev->dev, "failed to exit ULPS\n"); goto err; } r = taal_dcs_read_1(td, MIPI_DCS_GET_DIAGNOSTIC_RESULT, &state1); if (r) { dev_err(&dssdev->dev, "failed to read Taal status\n"); goto err; } /* Run self diagnostics */ r = taal_sleep_out(td); if (r) { dev_err(&dssdev->dev, "failed to run Taal self-diagnostics\n"); goto err; } r = taal_dcs_read_1(td, MIPI_DCS_GET_DIAGNOSTIC_RESULT, &state2); if (r) { dev_err(&dssdev->dev, "failed to read Taal status\n"); goto err; } /* Each sleep out command will trigger a self diagnostic and flip * Bit6 if the test passes. */ if (!((state1 ^ state2) & (1 << 6))) { dev_err(&dssdev->dev, "LCD self diagnostics failed\n"); goto err; } /* Self-diagnostics result is also shown on TE GPIO line. We need * to re-enable TE after self diagnostics */ if (td->te_enabled && panel_data->use_ext_te) { r = taal_dcs_write_1(td, MIPI_DCS_SET_TEAR_ON, 0); if (r) goto err; } dsi_bus_unlock(dssdev); taal_queue_esd_work(dssdev); mutex_unlock(&td->lock); return; err: dev_err(&dssdev->dev, "performing LCD reset\n"); taal_panel_reset(dssdev); dsi_bus_unlock(dssdev); taal_queue_esd_work(dssdev); mutex_unlock(&td->lock); } static struct omap_dss_driver taal_driver = { .probe = taal_probe, .remove = __exit_p(taal_remove), .enable = taal_enable, .disable = taal_disable, .suspend = taal_suspend, .resume = taal_resume, .update = taal_update, .sync = taal_sync, .get_resolution = taal_get_resolution, .get_recommended_bpp = omapdss_default_get_recommended_bpp, .enable_te = taal_enable_te, .get_te = taal_get_te, .set_rotate = taal_rotate, .get_rotate = taal_get_rotate, .set_mirror = taal_mirror, .get_mirror = taal_get_mirror, .run_test = taal_run_test, .memory_read = taal_memory_read, .get_timings = taal_get_timings, .driver = { .name = "taal", .owner = THIS_MODULE, }, }; static int __init taal_init(void) { omap_dss_register_driver(&taal_driver); return 0; } static void __exit taal_exit(void) { omap_dss_unregister_driver(&taal_driver); } module_init(taal_init); module_exit(taal_exit); MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>"); MODULE_DESCRIPTION("Taal Driver"); MODULE_LICENSE("GPL");
gpl-2.0
mer-hybris/android_kernel_motorola_titan-OLD
drivers/video/omap2/displays/panel-taal.c
4741
39135
/* * Taal DSI command mode panel * * Copyright (C) 2009 Nokia Corporation * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ /*#define DEBUG*/ #include <linux/module.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/backlight.h> #include <linux/fb.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/regulator/consumer.h> #include <linux/mutex.h> #include <video/omapdss.h> #include <video/omap-panel-nokia-dsi.h> #include <video/mipi_display.h> /* DSI Virtual channel. Hardcoded for now. */ #define TCH 0 #define DCS_READ_NUM_ERRORS 0x05 #define DCS_BRIGHTNESS 0x51 #define DCS_CTRL_DISPLAY 0x53 #define DCS_WRITE_CABC 0x55 #define DCS_READ_CABC 0x56 #define DCS_GET_ID1 0xda #define DCS_GET_ID2 0xdb #define DCS_GET_ID3 0xdc static irqreturn_t taal_te_isr(int irq, void *data); static void taal_te_timeout_work_callback(struct work_struct *work); static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable); static int taal_panel_reset(struct omap_dss_device *dssdev); struct panel_regulator { struct regulator *regulator; const char *name; int min_uV; int max_uV; }; static void free_regulators(struct panel_regulator *regulators, int n) { int i; for (i = 0; i < n; i++) { /* disable/put in reverse order */ regulator_disable(regulators[n - i - 1].regulator); regulator_put(regulators[n - i - 1].regulator); } } static int init_regulators(struct omap_dss_device *dssdev, struct panel_regulator *regulators, int n) { int r, i, v; for (i = 0; i < n; i++) { struct regulator *reg; reg = regulator_get(&dssdev->dev, regulators[i].name); if (IS_ERR(reg)) { dev_err(&dssdev->dev, "failed to get regulator %s\n", regulators[i].name); r = PTR_ERR(reg); goto err; } /* FIXME: better handling of fixed vs. variable regulators */ v = regulator_get_voltage(reg); if (v < regulators[i].min_uV || v > regulators[i].max_uV) { r = regulator_set_voltage(reg, regulators[i].min_uV, regulators[i].max_uV); if (r) { dev_err(&dssdev->dev, "failed to set regulator %s voltage\n", regulators[i].name); regulator_put(reg); goto err; } } r = regulator_enable(reg); if (r) { dev_err(&dssdev->dev, "failed to enable regulator %s\n", regulators[i].name); regulator_put(reg); goto err; } regulators[i].regulator = reg; } return 0; err: free_regulators(regulators, i); return r; } /** * struct panel_config - panel configuration * @name: panel name * @type: panel type * @timings: panel resolution * @sleep: various panel specific delays, passed to msleep() if non-zero * @reset_sequence: reset sequence timings, passed to udelay() if non-zero * @regulators: array of panel regulators * @num_regulators: number of regulators in the array */ struct panel_config { const char *name; int type; struct omap_video_timings timings; struct { unsigned int sleep_in; unsigned int sleep_out; unsigned int hw_reset; unsigned int enable_te; } sleep; struct { unsigned int high; unsigned int low; } reset_sequence; struct panel_regulator *regulators; int num_regulators; }; enum { PANEL_TAAL, }; static struct panel_config panel_configs[] = { { .name = "taal", .type = PANEL_TAAL, .timings = { .x_res = 864, .y_res = 480, }, .sleep = { .sleep_in = 5, .sleep_out = 5, .hw_reset = 5, .enable_te = 100, /* possible panel bug */ }, .reset_sequence = { .high = 10, .low = 10, }, }, }; struct taal_data { struct mutex lock; struct backlight_device *bldev; unsigned long hw_guard_end; /* next value of jiffies when we can * issue the next sleep in/out command */ unsigned long hw_guard_wait; /* max guard time in jiffies */ struct omap_dss_device *dssdev; bool enabled; u8 rotate; bool mirror; bool te_enabled; atomic_t do_update; int channel; struct delayed_work te_timeout_work; bool cabc_broken; unsigned cabc_mode; bool intro_printed; struct workqueue_struct *workqueue; struct delayed_work esd_work; unsigned esd_interval; bool ulps_enabled; unsigned ulps_timeout; struct delayed_work ulps_work; struct panel_config *panel_config; }; static inline struct nokia_dsi_panel_data *get_panel_data(const struct omap_dss_device *dssdev) { return (struct nokia_dsi_panel_data *) dssdev->data; } static void taal_esd_work(struct work_struct *work); static void taal_ulps_work(struct work_struct *work); static void hw_guard_start(struct taal_data *td, int guard_msec) { td->hw_guard_wait = msecs_to_jiffies(guard_msec); td->hw_guard_end = jiffies + td->hw_guard_wait; } static void hw_guard_wait(struct taal_data *td) { unsigned long wait = td->hw_guard_end - jiffies; if ((long)wait > 0 && wait <= td->hw_guard_wait) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(wait); } } static int taal_dcs_read_1(struct taal_data *td, u8 dcs_cmd, u8 *data) { int r; u8 buf[1]; r = dsi_vc_dcs_read(td->dssdev, td->channel, dcs_cmd, buf, 1); if (r < 0) return r; *data = buf[0]; return 0; } static int taal_dcs_write_0(struct taal_data *td, u8 dcs_cmd) { return dsi_vc_dcs_write(td->dssdev, td->channel, &dcs_cmd, 1); } static int taal_dcs_write_1(struct taal_data *td, u8 dcs_cmd, u8 param) { u8 buf[2]; buf[0] = dcs_cmd; buf[1] = param; return dsi_vc_dcs_write(td->dssdev, td->channel, buf, 2); } static int taal_sleep_in(struct taal_data *td) { u8 cmd; int r; hw_guard_wait(td); cmd = MIPI_DCS_ENTER_SLEEP_MODE; r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, &cmd, 1); if (r) return r; hw_guard_start(td, 120); if (td->panel_config->sleep.sleep_in) msleep(td->panel_config->sleep.sleep_in); return 0; } static int taal_sleep_out(struct taal_data *td) { int r; hw_guard_wait(td); r = taal_dcs_write_0(td, MIPI_DCS_EXIT_SLEEP_MODE); if (r) return r; hw_guard_start(td, 120); if (td->panel_config->sleep.sleep_out) msleep(td->panel_config->sleep.sleep_out); return 0; } static int taal_get_id(struct taal_data *td, u8 *id1, u8 *id2, u8 *id3) { int r; r = taal_dcs_read_1(td, DCS_GET_ID1, id1); if (r) return r; r = taal_dcs_read_1(td, DCS_GET_ID2, id2); if (r) return r; r = taal_dcs_read_1(td, DCS_GET_ID3, id3); if (r) return r; return 0; } static int taal_set_addr_mode(struct taal_data *td, u8 rotate, bool mirror) { int r; u8 mode; int b5, b6, b7; r = taal_dcs_read_1(td, MIPI_DCS_GET_ADDRESS_MODE, &mode); if (r) return r; switch (rotate) { default: case 0: b7 = 0; b6 = 0; b5 = 0; break; case 1: b7 = 0; b6 = 1; b5 = 1; break; case 2: b7 = 1; b6 = 1; b5 = 0; break; case 3: b7 = 1; b6 = 0; b5 = 1; break; } if (mirror) b6 = !b6; mode &= ~((1<<7) | (1<<6) | (1<<5)); mode |= (b7 << 7) | (b6 << 6) | (b5 << 5); return taal_dcs_write_1(td, MIPI_DCS_SET_ADDRESS_MODE, mode); } static int taal_set_update_window(struct taal_data *td, u16 x, u16 y, u16 w, u16 h) { int r; u16 x1 = x; u16 x2 = x + w - 1; u16 y1 = y; u16 y2 = y + h - 1; u8 buf[5]; buf[0] = MIPI_DCS_SET_COLUMN_ADDRESS; buf[1] = (x1 >> 8) & 0xff; buf[2] = (x1 >> 0) & 0xff; buf[3] = (x2 >> 8) & 0xff; buf[4] = (x2 >> 0) & 0xff; r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, buf, sizeof(buf)); if (r) return r; buf[0] = MIPI_DCS_SET_PAGE_ADDRESS; buf[1] = (y1 >> 8) & 0xff; buf[2] = (y1 >> 0) & 0xff; buf[3] = (y2 >> 8) & 0xff; buf[4] = (y2 >> 0) & 0xff; r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, buf, sizeof(buf)); if (r) return r; dsi_vc_send_bta_sync(td->dssdev, td->channel); return r; } static void taal_queue_esd_work(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); if (td->esd_interval > 0) queue_delayed_work(td->workqueue, &td->esd_work, msecs_to_jiffies(td->esd_interval)); } static void taal_cancel_esd_work(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); cancel_delayed_work(&td->esd_work); } static void taal_queue_ulps_work(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); if (td->ulps_timeout > 0) queue_delayed_work(td->workqueue, &td->ulps_work, msecs_to_jiffies(td->ulps_timeout)); } static void taal_cancel_ulps_work(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); cancel_delayed_work(&td->ulps_work); } static int taal_enter_ulps(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); int r; if (td->ulps_enabled) return 0; taal_cancel_ulps_work(dssdev); r = _taal_enable_te(dssdev, false); if (r) goto err; disable_irq(gpio_to_irq(panel_data->ext_te_gpio)); omapdss_dsi_display_disable(dssdev, false, true); td->ulps_enabled = true; return 0; err: dev_err(&dssdev->dev, "enter ULPS failed"); taal_panel_reset(dssdev); td->ulps_enabled = false; taal_queue_ulps_work(dssdev); return r; } static int taal_exit_ulps(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); int r; if (!td->ulps_enabled) return 0; r = omapdss_dsi_display_enable(dssdev); if (r) { dev_err(&dssdev->dev, "failed to enable DSI\n"); goto err1; } omapdss_dsi_vc_enable_hs(dssdev, td->channel, true); r = _taal_enable_te(dssdev, true); if (r) { dev_err(&dssdev->dev, "failed to re-enable TE"); goto err2; } enable_irq(gpio_to_irq(panel_data->ext_te_gpio)); taal_queue_ulps_work(dssdev); td->ulps_enabled = false; return 0; err2: dev_err(&dssdev->dev, "failed to exit ULPS"); r = taal_panel_reset(dssdev); if (!r) { enable_irq(gpio_to_irq(panel_data->ext_te_gpio)); td->ulps_enabled = false; } err1: taal_queue_ulps_work(dssdev); return r; } static int taal_wake_up(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); if (td->ulps_enabled) return taal_exit_ulps(dssdev); taal_cancel_ulps_work(dssdev); taal_queue_ulps_work(dssdev); return 0; } static int taal_bl_update_status(struct backlight_device *dev) { struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; int level; if (dev->props.fb_blank == FB_BLANK_UNBLANK && dev->props.power == FB_BLANK_UNBLANK) level = dev->props.brightness; else level = 0; dev_dbg(&dssdev->dev, "update brightness to %d\n", level); mutex_lock(&td->lock); if (td->enabled) { dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (!r) r = taal_dcs_write_1(td, DCS_BRIGHTNESS, level); dsi_bus_unlock(dssdev); } else { r = 0; } mutex_unlock(&td->lock); return r; } static int taal_bl_get_intensity(struct backlight_device *dev) { if (dev->props.fb_blank == FB_BLANK_UNBLANK && dev->props.power == FB_BLANK_UNBLANK) return dev->props.brightness; return 0; } static const struct backlight_ops taal_bl_ops = { .get_brightness = taal_bl_get_intensity, .update_status = taal_bl_update_status, }; static void taal_get_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { *timings = dssdev->panel.timings; } static void taal_get_resolution(struct omap_dss_device *dssdev, u16 *xres, u16 *yres) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); if (td->rotate == 0 || td->rotate == 2) { *xres = dssdev->panel.timings.x_res; *yres = dssdev->panel.timings.y_res; } else { *yres = dssdev->panel.timings.x_res; *xres = dssdev->panel.timings.y_res; } } static ssize_t taal_num_errors_show(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); u8 errors; int r; mutex_lock(&td->lock); if (td->enabled) { dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (!r) r = taal_dcs_read_1(td, DCS_READ_NUM_ERRORS, &errors); dsi_bus_unlock(dssdev); } else { r = -ENODEV; } mutex_unlock(&td->lock); if (r) return r; return snprintf(buf, PAGE_SIZE, "%d\n", errors); } static ssize_t taal_hw_revision_show(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); u8 id1, id2, id3; int r; mutex_lock(&td->lock); if (td->enabled) { dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (!r) r = taal_get_id(td, &id1, &id2, &id3); dsi_bus_unlock(dssdev); } else { r = -ENODEV; } mutex_unlock(&td->lock); if (r) return r; return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3); } static const char *cabc_modes[] = { "off", /* used also always when CABC is not supported */ "ui", "still-image", "moving-image", }; static ssize_t show_cabc_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); const char *mode_str; int mode; int len; mode = td->cabc_mode; mode_str = "unknown"; if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes)) mode_str = cabc_modes[mode]; len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str); return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1; } static ssize_t store_cabc_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); int i; int r; for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) { if (sysfs_streq(cabc_modes[i], buf)) break; } if (i == ARRAY_SIZE(cabc_modes)) return -EINVAL; mutex_lock(&td->lock); if (td->enabled) { dsi_bus_lock(dssdev); if (!td->cabc_broken) { r = taal_wake_up(dssdev); if (r) goto err; r = taal_dcs_write_1(td, DCS_WRITE_CABC, i); if (r) goto err; } dsi_bus_unlock(dssdev); } td->cabc_mode = i; mutex_unlock(&td->lock); return count; err: dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; } static ssize_t show_cabc_available_modes(struct device *dev, struct device_attribute *attr, char *buf) { int len; int i; for (i = 0, len = 0; len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++) len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s", i ? " " : "", cabc_modes[i], i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : ""); return len < PAGE_SIZE ? len : PAGE_SIZE - 1; } static ssize_t taal_store_esd_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); unsigned long t; int r; r = strict_strtoul(buf, 10, &t); if (r) return r; mutex_lock(&td->lock); taal_cancel_esd_work(dssdev); td->esd_interval = t; if (td->enabled) taal_queue_esd_work(dssdev); mutex_unlock(&td->lock); return count; } static ssize_t taal_show_esd_interval(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); unsigned t; mutex_lock(&td->lock); t = td->esd_interval; mutex_unlock(&td->lock); return snprintf(buf, PAGE_SIZE, "%u\n", t); } static ssize_t taal_store_ulps(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); unsigned long t; int r; r = strict_strtoul(buf, 10, &t); if (r) return r; mutex_lock(&td->lock); if (td->enabled) { dsi_bus_lock(dssdev); if (t) r = taal_enter_ulps(dssdev); else r = taal_wake_up(dssdev); dsi_bus_unlock(dssdev); } mutex_unlock(&td->lock); if (r) return r; return count; } static ssize_t taal_show_ulps(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); unsigned t; mutex_lock(&td->lock); t = td->ulps_enabled; mutex_unlock(&td->lock); return snprintf(buf, PAGE_SIZE, "%u\n", t); } static ssize_t taal_store_ulps_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); unsigned long t; int r; r = strict_strtoul(buf, 10, &t); if (r) return r; mutex_lock(&td->lock); td->ulps_timeout = t; if (td->enabled) { /* taal_wake_up will restart the timer */ dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); dsi_bus_unlock(dssdev); } mutex_unlock(&td->lock); if (r) return r; return count; } static ssize_t taal_show_ulps_timeout(struct device *dev, struct device_attribute *attr, char *buf) { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); unsigned t; mutex_lock(&td->lock); t = td->ulps_timeout; mutex_unlock(&td->lock); return snprintf(buf, PAGE_SIZE, "%u\n", t); } static DEVICE_ATTR(num_dsi_errors, S_IRUGO, taal_num_errors_show, NULL); static DEVICE_ATTR(hw_revision, S_IRUGO, taal_hw_revision_show, NULL); static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR, show_cabc_mode, store_cabc_mode); static DEVICE_ATTR(cabc_available_modes, S_IRUGO, show_cabc_available_modes, NULL); static DEVICE_ATTR(esd_interval, S_IRUGO | S_IWUSR, taal_show_esd_interval, taal_store_esd_interval); static DEVICE_ATTR(ulps, S_IRUGO | S_IWUSR, taal_show_ulps, taal_store_ulps); static DEVICE_ATTR(ulps_timeout, S_IRUGO | S_IWUSR, taal_show_ulps_timeout, taal_store_ulps_timeout); static struct attribute *taal_attrs[] = { &dev_attr_num_dsi_errors.attr, &dev_attr_hw_revision.attr, &dev_attr_cabc_mode.attr, &dev_attr_cabc_available_modes.attr, &dev_attr_esd_interval.attr, &dev_attr_ulps.attr, &dev_attr_ulps_timeout.attr, NULL, }; static struct attribute_group taal_attr_group = { .attrs = taal_attrs, }; static void taal_hw_reset(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); if (panel_data->reset_gpio == -1) return; gpio_set_value(panel_data->reset_gpio, 1); if (td->panel_config->reset_sequence.high) udelay(td->panel_config->reset_sequence.high); /* reset the panel */ gpio_set_value(panel_data->reset_gpio, 0); /* assert reset */ if (td->panel_config->reset_sequence.low) udelay(td->panel_config->reset_sequence.low); gpio_set_value(panel_data->reset_gpio, 1); /* wait after releasing reset */ if (td->panel_config->sleep.hw_reset) msleep(td->panel_config->sleep.hw_reset); } static int taal_probe(struct omap_dss_device *dssdev) { struct backlight_properties props; struct taal_data *td; struct backlight_device *bldev = NULL; struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); struct panel_config *panel_config = NULL; int r, i; dev_dbg(&dssdev->dev, "probe\n"); if (!panel_data || !panel_data->name) { r = -EINVAL; goto err; } for (i = 0; i < ARRAY_SIZE(panel_configs); i++) { if (strcmp(panel_data->name, panel_configs[i].name) == 0) { panel_config = &panel_configs[i]; break; } } if (!panel_config) { r = -EINVAL; goto err; } dssdev->panel.config = OMAP_DSS_LCD_TFT; dssdev->panel.timings = panel_config->timings; dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888; td = kzalloc(sizeof(*td), GFP_KERNEL); if (!td) { r = -ENOMEM; goto err; } td->dssdev = dssdev; td->panel_config = panel_config; td->esd_interval = panel_data->esd_interval; td->ulps_enabled = false; td->ulps_timeout = panel_data->ulps_timeout; mutex_init(&td->lock); atomic_set(&td->do_update, 0); r = init_regulators(dssdev, panel_config->regulators, panel_config->num_regulators); if (r) goto err_reg; td->workqueue = create_singlethread_workqueue("taal_esd"); if (td->workqueue == NULL) { dev_err(&dssdev->dev, "can't create ESD workqueue\n"); r = -ENOMEM; goto err_wq; } INIT_DELAYED_WORK_DEFERRABLE(&td->esd_work, taal_esd_work); INIT_DELAYED_WORK(&td->ulps_work, taal_ulps_work); dev_set_drvdata(&dssdev->dev, td); taal_hw_reset(dssdev); if (panel_data->use_dsi_backlight) { memset(&props, 0, sizeof(struct backlight_properties)); props.max_brightness = 255; props.type = BACKLIGHT_RAW; bldev = backlight_device_register(dev_name(&dssdev->dev), &dssdev->dev, dssdev, &taal_bl_ops, &props); if (IS_ERR(bldev)) { r = PTR_ERR(bldev); goto err_bl; } td->bldev = bldev; bldev->props.fb_blank = FB_BLANK_UNBLANK; bldev->props.power = FB_BLANK_UNBLANK; bldev->props.brightness = 255; taal_bl_update_status(bldev); } if (panel_data->use_ext_te) { int gpio = panel_data->ext_te_gpio; r = gpio_request_one(gpio, GPIOF_IN, "taal irq"); if (r) { dev_err(&dssdev->dev, "GPIO request failed\n"); goto err_gpio; } r = request_irq(gpio_to_irq(gpio), taal_te_isr, IRQF_TRIGGER_RISING, "taal vsync", dssdev); if (r) { dev_err(&dssdev->dev, "IRQ request failed\n"); gpio_free(gpio); goto err_irq; } INIT_DELAYED_WORK_DEFERRABLE(&td->te_timeout_work, taal_te_timeout_work_callback); dev_dbg(&dssdev->dev, "Using GPIO TE\n"); } r = omap_dsi_request_vc(dssdev, &td->channel); if (r) { dev_err(&dssdev->dev, "failed to get virtual channel\n"); goto err_req_vc; } r = omap_dsi_set_vc_id(dssdev, td->channel, TCH); if (r) { dev_err(&dssdev->dev, "failed to set VC_ID\n"); goto err_vc_id; } r = sysfs_create_group(&dssdev->dev.kobj, &taal_attr_group); if (r) { dev_err(&dssdev->dev, "failed to create sysfs files\n"); goto err_vc_id; } return 0; err_vc_id: omap_dsi_release_vc(dssdev, td->channel); err_req_vc: if (panel_data->use_ext_te) free_irq(gpio_to_irq(panel_data->ext_te_gpio), dssdev); err_irq: if (panel_data->use_ext_te) gpio_free(panel_data->ext_te_gpio); err_gpio: if (bldev != NULL) backlight_device_unregister(bldev); err_bl: destroy_workqueue(td->workqueue); err_wq: free_regulators(panel_config->regulators, panel_config->num_regulators); err_reg: kfree(td); err: return r; } static void __exit taal_remove(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); struct backlight_device *bldev; dev_dbg(&dssdev->dev, "remove\n"); sysfs_remove_group(&dssdev->dev.kobj, &taal_attr_group); omap_dsi_release_vc(dssdev, td->channel); if (panel_data->use_ext_te) { int gpio = panel_data->ext_te_gpio; free_irq(gpio_to_irq(gpio), dssdev); gpio_free(gpio); } bldev = td->bldev; if (bldev != NULL) { bldev->props.power = FB_BLANK_POWERDOWN; taal_bl_update_status(bldev); backlight_device_unregister(bldev); } taal_cancel_ulps_work(dssdev); taal_cancel_esd_work(dssdev); destroy_workqueue(td->workqueue); /* reset, to be sure that the panel is in a valid state */ taal_hw_reset(dssdev); free_regulators(td->panel_config->regulators, td->panel_config->num_regulators); kfree(td); } static int taal_power_on(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); u8 id1, id2, id3; int r; r = omapdss_dsi_display_enable(dssdev); if (r) { dev_err(&dssdev->dev, "failed to enable DSI\n"); goto err0; } taal_hw_reset(dssdev); omapdss_dsi_vc_enable_hs(dssdev, td->channel, false); r = taal_sleep_out(td); if (r) goto err; r = taal_get_id(td, &id1, &id2, &id3); if (r) goto err; /* on early Taal revisions CABC is broken */ if (td->panel_config->type == PANEL_TAAL && (id2 == 0x00 || id2 == 0xff || id2 == 0x81)) td->cabc_broken = true; r = taal_dcs_write_1(td, DCS_BRIGHTNESS, 0xff); if (r) goto err; r = taal_dcs_write_1(td, DCS_CTRL_DISPLAY, (1<<2) | (1<<5)); /* BL | BCTRL */ if (r) goto err; r = taal_dcs_write_1(td, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_24BIT); if (r) goto err; r = taal_set_addr_mode(td, td->rotate, td->mirror); if (r) goto err; if (!td->cabc_broken) { r = taal_dcs_write_1(td, DCS_WRITE_CABC, td->cabc_mode); if (r) goto err; } r = taal_dcs_write_0(td, MIPI_DCS_SET_DISPLAY_ON); if (r) goto err; r = _taal_enable_te(dssdev, td->te_enabled); if (r) goto err; r = dsi_enable_video_output(dssdev, td->channel); if (r) goto err; td->enabled = 1; if (!td->intro_printed) { dev_info(&dssdev->dev, "%s panel revision %02x.%02x.%02x\n", td->panel_config->name, id1, id2, id3); if (td->cabc_broken) dev_info(&dssdev->dev, "old Taal version, CABC disabled\n"); td->intro_printed = true; } omapdss_dsi_vc_enable_hs(dssdev, td->channel, true); return 0; err: dev_err(&dssdev->dev, "error while enabling panel, issuing HW reset\n"); taal_hw_reset(dssdev); omapdss_dsi_display_disable(dssdev, true, false); err0: return r; } static void taal_power_off(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; dsi_disable_video_output(dssdev, td->channel); r = taal_dcs_write_0(td, MIPI_DCS_SET_DISPLAY_OFF); if (!r) r = taal_sleep_in(td); if (r) { dev_err(&dssdev->dev, "error disabling panel, issuing HW reset\n"); taal_hw_reset(dssdev); } omapdss_dsi_display_disable(dssdev, true, false); td->enabled = 0; } static int taal_panel_reset(struct omap_dss_device *dssdev) { dev_err(&dssdev->dev, "performing LCD reset\n"); taal_power_off(dssdev); taal_hw_reset(dssdev); return taal_power_on(dssdev); } static int taal_enable(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; dev_dbg(&dssdev->dev, "enable\n"); mutex_lock(&td->lock); if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) { r = -EINVAL; goto err; } dsi_bus_lock(dssdev); r = taal_power_on(dssdev); dsi_bus_unlock(dssdev); if (r) goto err; taal_queue_esd_work(dssdev); dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; mutex_unlock(&td->lock); return 0; err: dev_dbg(&dssdev->dev, "enable failed\n"); mutex_unlock(&td->lock); return r; } static void taal_disable(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); dev_dbg(&dssdev->dev, "disable\n"); mutex_lock(&td->lock); taal_cancel_ulps_work(dssdev); taal_cancel_esd_work(dssdev); dsi_bus_lock(dssdev); if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { int r; r = taal_wake_up(dssdev); if (!r) taal_power_off(dssdev); } dsi_bus_unlock(dssdev); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; mutex_unlock(&td->lock); } static int taal_suspend(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; dev_dbg(&dssdev->dev, "suspend\n"); mutex_lock(&td->lock); if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) { r = -EINVAL; goto err; } taal_cancel_ulps_work(dssdev); taal_cancel_esd_work(dssdev); dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (!r) taal_power_off(dssdev); dsi_bus_unlock(dssdev); dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED; mutex_unlock(&td->lock); return 0; err: mutex_unlock(&td->lock); return r; } static int taal_resume(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; dev_dbg(&dssdev->dev, "resume\n"); mutex_lock(&td->lock); if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) { r = -EINVAL; goto err; } dsi_bus_lock(dssdev); r = taal_power_on(dssdev); dsi_bus_unlock(dssdev); if (r) { dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } else { dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; taal_queue_esd_work(dssdev); } mutex_unlock(&td->lock); return r; err: mutex_unlock(&td->lock); return r; } static void taal_framedone_cb(int err, void *data) { struct omap_dss_device *dssdev = data; dev_dbg(&dssdev->dev, "framedone, err %d\n", err); dsi_bus_unlock(dssdev); } static irqreturn_t taal_te_isr(int irq, void *data) { struct omap_dss_device *dssdev = data; struct taal_data *td = dev_get_drvdata(&dssdev->dev); int old; int r; old = atomic_cmpxchg(&td->do_update, 1, 0); if (old) { cancel_delayed_work(&td->te_timeout_work); r = omap_dsi_update(dssdev, td->channel, taal_framedone_cb, dssdev); if (r) goto err; } return IRQ_HANDLED; err: dev_err(&dssdev->dev, "start update failed\n"); dsi_bus_unlock(dssdev); return IRQ_HANDLED; } static void taal_te_timeout_work_callback(struct work_struct *work) { struct taal_data *td = container_of(work, struct taal_data, te_timeout_work.work); struct omap_dss_device *dssdev = td->dssdev; dev_err(&dssdev->dev, "TE not received for 250ms!\n"); atomic_set(&td->do_update, 0); dsi_bus_unlock(dssdev); } static int taal_update(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); int r; dev_dbg(&dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h); mutex_lock(&td->lock); dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (r) goto err; if (!td->enabled) { r = 0; goto err; } /* XXX no need to send this every frame, but dsi break if not done */ r = taal_set_update_window(td, 0, 0, td->panel_config->timings.x_res, td->panel_config->timings.y_res); if (r) goto err; if (td->te_enabled && panel_data->use_ext_te) { schedule_delayed_work(&td->te_timeout_work, msecs_to_jiffies(250)); atomic_set(&td->do_update, 1); } else { r = omap_dsi_update(dssdev, td->channel, taal_framedone_cb, dssdev); if (r) goto err; } /* note: no bus_unlock here. unlock is in framedone_cb */ mutex_unlock(&td->lock); return 0; err: dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; } static int taal_sync(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); dev_dbg(&dssdev->dev, "sync\n"); mutex_lock(&td->lock); dsi_bus_lock(dssdev); dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); dev_dbg(&dssdev->dev, "sync done\n"); return 0; } static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); int r; if (enable) r = taal_dcs_write_1(td, MIPI_DCS_SET_TEAR_ON, 0); else r = taal_dcs_write_0(td, MIPI_DCS_SET_TEAR_OFF); if (!panel_data->use_ext_te) omapdss_dsi_enable_te(dssdev, enable); if (td->panel_config->sleep.enable_te) msleep(td->panel_config->sleep.enable_te); return r; } static int taal_enable_te(struct omap_dss_device *dssdev, bool enable) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; mutex_lock(&td->lock); if (td->te_enabled == enable) goto end; dsi_bus_lock(dssdev); if (td->enabled) { r = taal_wake_up(dssdev); if (r) goto err; r = _taal_enable_te(dssdev, enable); if (r) goto err; } td->te_enabled = enable; dsi_bus_unlock(dssdev); end: mutex_unlock(&td->lock); return 0; err: dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; } static int taal_get_te(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; mutex_lock(&td->lock); r = td->te_enabled; mutex_unlock(&td->lock); return r; } static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; dev_dbg(&dssdev->dev, "rotate %d\n", rotate); mutex_lock(&td->lock); if (td->rotate == rotate) goto end; dsi_bus_lock(dssdev); if (td->enabled) { r = taal_wake_up(dssdev); if (r) goto err; r = taal_set_addr_mode(td, rotate, td->mirror); if (r) goto err; } td->rotate = rotate; dsi_bus_unlock(dssdev); end: mutex_unlock(&td->lock); return 0; err: dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; } static u8 taal_get_rotate(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; mutex_lock(&td->lock); r = td->rotate; mutex_unlock(&td->lock); return r; } static int taal_mirror(struct omap_dss_device *dssdev, bool enable) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; dev_dbg(&dssdev->dev, "mirror %d\n", enable); mutex_lock(&td->lock); if (td->mirror == enable) goto end; dsi_bus_lock(dssdev); if (td->enabled) { r = taal_wake_up(dssdev); if (r) goto err; r = taal_set_addr_mode(td, td->rotate, enable); if (r) goto err; } td->mirror = enable; dsi_bus_unlock(dssdev); end: mutex_unlock(&td->lock); return 0; err: dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; } static bool taal_get_mirror(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); int r; mutex_lock(&td->lock); r = td->mirror; mutex_unlock(&td->lock); return r; } static int taal_run_test(struct omap_dss_device *dssdev, int test_num) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); u8 id1, id2, id3; int r; mutex_lock(&td->lock); if (!td->enabled) { r = -ENODEV; goto err1; } dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (r) goto err2; r = taal_dcs_read_1(td, DCS_GET_ID1, &id1); if (r) goto err2; r = taal_dcs_read_1(td, DCS_GET_ID2, &id2); if (r) goto err2; r = taal_dcs_read_1(td, DCS_GET_ID3, &id3); if (r) goto err2; dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return 0; err2: dsi_bus_unlock(dssdev); err1: mutex_unlock(&td->lock); return r; } static int taal_memory_read(struct omap_dss_device *dssdev, void *buf, size_t size, u16 x, u16 y, u16 w, u16 h) { int r; int first = 1; int plen; unsigned buf_used = 0; struct taal_data *td = dev_get_drvdata(&dssdev->dev); if (size < w * h * 3) return -ENOMEM; mutex_lock(&td->lock); if (!td->enabled) { r = -ENODEV; goto err1; } size = min(w * h * 3, dssdev->panel.timings.x_res * dssdev->panel.timings.y_res * 3); dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (r) goto err2; /* plen 1 or 2 goes into short packet. until checksum error is fixed, * use short packets. plen 32 works, but bigger packets seem to cause * an error. */ if (size % 2) plen = 1; else plen = 2; taal_set_update_window(td, x, y, w, h); r = dsi_vc_set_max_rx_packet_size(dssdev, td->channel, plen); if (r) goto err2; while (buf_used < size) { u8 dcs_cmd = first ? 0x2e : 0x3e; first = 0; r = dsi_vc_dcs_read(dssdev, td->channel, dcs_cmd, buf + buf_used, size - buf_used); if (r < 0) { dev_err(&dssdev->dev, "read error\n"); goto err3; } buf_used += r; if (r < plen) { dev_err(&dssdev->dev, "short read\n"); break; } if (signal_pending(current)) { dev_err(&dssdev->dev, "signal pending, " "aborting memory read\n"); r = -ERESTARTSYS; goto err3; } } r = buf_used; err3: dsi_vc_set_max_rx_packet_size(dssdev, td->channel, 1); err2: dsi_bus_unlock(dssdev); err1: mutex_unlock(&td->lock); return r; } static void taal_ulps_work(struct work_struct *work) { struct taal_data *td = container_of(work, struct taal_data, ulps_work.work); struct omap_dss_device *dssdev = td->dssdev; mutex_lock(&td->lock); if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || !td->enabled) { mutex_unlock(&td->lock); return; } dsi_bus_lock(dssdev); taal_enter_ulps(dssdev); dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); } static void taal_esd_work(struct work_struct *work) { struct taal_data *td = container_of(work, struct taal_data, esd_work.work); struct omap_dss_device *dssdev = td->dssdev; struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); u8 state1, state2; int r; mutex_lock(&td->lock); if (!td->enabled) { mutex_unlock(&td->lock); return; } dsi_bus_lock(dssdev); r = taal_wake_up(dssdev); if (r) { dev_err(&dssdev->dev, "failed to exit ULPS\n"); goto err; } r = taal_dcs_read_1(td, MIPI_DCS_GET_DIAGNOSTIC_RESULT, &state1); if (r) { dev_err(&dssdev->dev, "failed to read Taal status\n"); goto err; } /* Run self diagnostics */ r = taal_sleep_out(td); if (r) { dev_err(&dssdev->dev, "failed to run Taal self-diagnostics\n"); goto err; } r = taal_dcs_read_1(td, MIPI_DCS_GET_DIAGNOSTIC_RESULT, &state2); if (r) { dev_err(&dssdev->dev, "failed to read Taal status\n"); goto err; } /* Each sleep out command will trigger a self diagnostic and flip * Bit6 if the test passes. */ if (!((state1 ^ state2) & (1 << 6))) { dev_err(&dssdev->dev, "LCD self diagnostics failed\n"); goto err; } /* Self-diagnostics result is also shown on TE GPIO line. We need * to re-enable TE after self diagnostics */ if (td->te_enabled && panel_data->use_ext_te) { r = taal_dcs_write_1(td, MIPI_DCS_SET_TEAR_ON, 0); if (r) goto err; } dsi_bus_unlock(dssdev); taal_queue_esd_work(dssdev); mutex_unlock(&td->lock); return; err: dev_err(&dssdev->dev, "performing LCD reset\n"); taal_panel_reset(dssdev); dsi_bus_unlock(dssdev); taal_queue_esd_work(dssdev); mutex_unlock(&td->lock); } static struct omap_dss_driver taal_driver = { .probe = taal_probe, .remove = __exit_p(taal_remove), .enable = taal_enable, .disable = taal_disable, .suspend = taal_suspend, .resume = taal_resume, .update = taal_update, .sync = taal_sync, .get_resolution = taal_get_resolution, .get_recommended_bpp = omapdss_default_get_recommended_bpp, .enable_te = taal_enable_te, .get_te = taal_get_te, .set_rotate = taal_rotate, .get_rotate = taal_get_rotate, .set_mirror = taal_mirror, .get_mirror = taal_get_mirror, .run_test = taal_run_test, .memory_read = taal_memory_read, .get_timings = taal_get_timings, .driver = { .name = "taal", .owner = THIS_MODULE, }, }; static int __init taal_init(void) { omap_dss_register_driver(&taal_driver); return 0; } static void __exit taal_exit(void) { omap_dss_unregister_driver(&taal_driver); } module_init(taal_init); module_exit(taal_exit); MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>"); MODULE_DESCRIPTION("Taal Driver"); MODULE_LICENSE("GPL");
gpl-2.0
sktjdgns1189/android_kernel_samsung_SHV-E220S
arch/m68k/lib/muldi3.c
7301
2911
/* muldi3.c extracted from gcc-2.7.2.3/libgcc2.c and gcc-2.7.2.3/longlong.h which is: */ /* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. This file is part of GNU CC. GNU CC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GNU CC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) #define SI_TYPE_SIZE 32 #define __BITS4 (SI_TYPE_SIZE / 4) #define __ll_B (1L << (SI_TYPE_SIZE / 2)) #define __ll_lowpart(t) ((USItype) (t) % __ll_B) #define __ll_highpart(t) ((USItype) (t) / __ll_B) #define umul_ppmm(w1, w0, u, v) \ do { \ USItype __x0, __x1, __x2, __x3; \ USItype __ul, __vl, __uh, __vh; \ \ __ul = __ll_lowpart (u); \ __uh = __ll_highpart (u); \ __vl = __ll_lowpart (v); \ __vh = __ll_highpart (v); \ \ __x0 = (USItype) __ul * __vl; \ __x1 = (USItype) __ul * __vh; \ __x2 = (USItype) __uh * __vl; \ __x3 = (USItype) __uh * __vh; \ \ __x1 += __ll_highpart (__x0);/* this can't give carry */ \ __x1 += __x2; /* but this indeed can */ \ if (__x1 < __x2) /* did we get it? */ \ __x3 += __ll_B; /* yes, add it in the proper pos. */ \ \ (w1) = __x3 + __ll_highpart (__x1); \ (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \ } while (0) #else #define umul_ppmm(w1, w0, u, v) \ __asm__ ("mulu%.l %3,%1:%0" \ : "=d" ((USItype)(w0)), \ "=d" ((USItype)(w1)) \ : "%0" ((USItype)(u)), \ "dmi" ((USItype)(v))) #endif #define __umulsidi3(u, v) \ ({DIunion __w; \ umul_ppmm (__w.s.high, __w.s.low, u, v); \ __w.ll; }) typedef int SItype __attribute__ ((mode (SI))); typedef unsigned int USItype __attribute__ ((mode (SI))); typedef int DItype __attribute__ ((mode (DI))); typedef int word_type __attribute__ ((mode (__word__))); struct DIstruct {SItype high, low;}; typedef union { struct DIstruct s; DItype ll; } DIunion; DItype __muldi3 (DItype u, DItype v) { DIunion w; DIunion uu, vv; uu.ll = u, vv.ll = v; w.ll = __umulsidi3 (uu.s.low, vv.s.low); w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high + (USItype) uu.s.high * (USItype) vv.s.low); return w.ll; }
gpl-2.0
pan60157/A830L_JB_KERNEL_214
drivers/staging/cxt1e1/sbeproc.c
8325
11466
/* Copyright (C) 2004-2005 SBE, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <asm/uaccess.h> #include "pmcc4_sysdep.h" #include "sbecom_inline_linux.h" #include "pmcc4_private.h" #include "sbeproc.h" /* forwards */ void sbecom_get_brdinfo (ci_t *, struct sbe_brd_info *, u_int8_t *); extern struct s_hdw_info hdw_info[MAX_BOARDS]; #ifdef CONFIG_PROC_FS /********************************************************************/ /* procfs stuff */ /********************************************************************/ void sbecom_proc_brd_cleanup (ci_t * ci) { if (ci->dir_dev) { char dir[7 + SBE_IFACETMPL_SIZE + 1]; snprintf(dir, sizeof(dir), "driver/%s", ci->devname); remove_proc_entry("info", ci->dir_dev); remove_proc_entry(dir, NULL); ci->dir_dev = NULL; } } static int sbecom_proc_get_sbe_info (char *buffer, char **start, off_t offset, int length, int *eof, void *priv) { ci_t *ci = (ci_t *) priv; int len = 0; char *spd; struct sbe_brd_info *bip; if (!(bip = OS_kmalloc (sizeof (struct sbe_brd_info)))) { return -ENOMEM; } #if 0 /** RLD DEBUG **/ pr_info(">> sbecom_proc_get_sbe_info: entered, offset %d. length %d.\n", (int) offset, (int) length); #endif { hdw_info_t *hi = &hdw_info[ci->brdno]; u_int8_t *bsn = 0; switch (hi->promfmt) { case PROM_FORMAT_TYPE1: bsn = (u_int8_t *) hi->mfg_info.pft1.Serial; break; case PROM_FORMAT_TYPE2: bsn = (u_int8_t *) hi->mfg_info.pft2.Serial; break; } sbecom_get_brdinfo (ci, bip, bsn); } #if 0 /** RLD DEBUG **/ pr_info(">> sbecom_get_brdinfo: returned, first_if %p <%s> last_if %p <%s>\n", (char *) &bip->first_iname, (char *) &bip->first_iname, (char *) &bip->last_iname, (char *) &bip->last_iname); #endif len += sprintf (buffer + len, "Board Type: "); switch (bip->brd_id) { case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T3): len += sprintf (buffer + len, "wanPMC-C1T3"); break; case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1): len += sprintf (buffer + len, "wanPTMC-256T3 <E1>"); break; case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1): len += sprintf (buffer + len, "wanPTMC-256T3 <T1>"); break; case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_C24TE1): len += sprintf (buffer + len, "wanPTMC-C24TE1"); break; case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1): case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1_L): len += sprintf (buffer + len, "wanPMC-C4T1E1"); break; case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1): case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1_L): len += sprintf (buffer + len, "wanPMC-C2T1E1"); break; case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1): case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1_L): len += sprintf (buffer + len, "wanPMC-C1T1E1"); break; case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1): len += sprintf (buffer + len, "wanPCI-C4T1E1"); break; case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1): len += sprintf (buffer + len, "wanPCI-C2T1E1"); break; case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1): len += sprintf (buffer + len, "wanPCI-C1T1E1"); break; default: len += sprintf (buffer + len, "unknown"); break; } len += sprintf (buffer + len, " [%08X]\n", bip->brd_id); len += sprintf (buffer + len, "Board Number: %d\n", bip->brdno); len += sprintf (buffer + len, "Hardware ID: 0x%02X\n", ci->hdw_bid); len += sprintf (buffer + len, "Board SN: %06X\n", bip->brd_sn); len += sprintf(buffer + len, "Board MAC: %pMF\n", bip->brd_mac_addr); len += sprintf (buffer + len, "Ports: %d\n", ci->max_port); len += sprintf (buffer + len, "Channels: %d\n", bip->brd_chan_cnt); #if 1 len += sprintf (buffer + len, "Interface: %s -> %s\n", (char *) &bip->first_iname, (char *) &bip->last_iname); #else len += sprintf (buffer + len, "Interface: <not available> 1st %p lst %p\n", (char *) &bip->first_iname, (char *) &bip->last_iname); #endif switch (bip->brd_pci_speed) { case BINFO_PCI_SPEED_33: spd = "33Mhz"; break; case BINFO_PCI_SPEED_66: spd = "66Mhz"; break; default: spd = "<not available>"; break; } len += sprintf (buffer + len, "PCI Bus Speed: %s\n", spd); len += sprintf (buffer + len, "Release: %s\n", ci->release); #ifdef SBE_PMCC4_ENABLE { extern int cxt1e1_max_mru; #if 0 extern int max_chans_used; extern int cxt1e1_max_mtu; #endif extern int max_rxdesc_used, max_txdesc_used; len += sprintf (buffer + len, "\ncxt1e1_max_mru: %d\n", cxt1e1_max_mru); #if 0 len += sprintf (buffer + len, "\nmax_chans_used: %d\n", max_chans_used); len += sprintf (buffer + len, "cxt1e1_max_mtu: %d\n", cxt1e1_max_mtu); #endif len += sprintf (buffer + len, "max_rxdesc_used: %d\n", max_rxdesc_used); len += sprintf (buffer + len, "max_txdesc_used: %d\n", max_txdesc_used); } #endif OS_kfree (bip); /* cleanup */ /*** * How to be a proc read function * ------------------------------ * Prototype: * int f(char *buffer, char **start, off_t offset, * int count, int *peof, void *dat) * * Assume that the buffer is "count" bytes in size. * * If you know you have supplied all the data you * have, set *peof. * * You have three ways to return data: * 0) Leave *start = NULL. (This is the default.) * Put the data of the requested offset at that * offset within the buffer. Return the number (n) * of bytes there are from the beginning of the * buffer up to the last byte of data. If the * number of supplied bytes (= n - offset) is * greater than zero and you didn't signal eof * and the reader is prepared to take more data * you will be called again with the requested * offset advanced by the number of bytes * absorbed. This interface is useful for files * no larger than the buffer. * 1) Set *start = an unsigned long value less than * the buffer address but greater than zero. * Put the data of the requested offset at the * beginning of the buffer. Return the number of * bytes of data placed there. If this number is * greater than zero and you didn't signal eof * and the reader is prepared to take more data * you will be called again with the requested * offset advanced by *start. This interface is * useful when you have a large file consisting * of a series of blocks which you want to count * and return as wholes. * (Hack by Paul.Russell@rustcorp.com.au) * 2) Set *start = an address within the buffer. * Put the data of the requested offset at *start. * Return the number of bytes of data placed there. * If this number is greater than zero and you * didn't signal eof and the reader is prepared to * take more data you will be called again with the * requested offset advanced by the number of bytes * absorbed. */ #if 1 /* #4 - interpretation of above = set EOF, return len */ *eof = 1; #endif #if 0 /* * #1 - from net/wireless/atmel.c RLD NOTE -there's something wrong with * this plagarized code which results in this routine being called TWICE. * The second call returns ZERO, resulting in hidden failure, but at * least only a single message set is being displayed. */ if (len <= offset + length) *eof = 1; *start = buffer + offset; len -= offset; if (len > length) len = length; if (len < 0) len = 0; #endif #if 0 /* #2 from net/tokenring/olympic.c + * lanstreamer.c */ { off_t begin = 0; int size = 0; off_t pos = 0; size = len; pos = begin + size; if (pos < offset) { len = 0; begin = pos; } *start = buffer + (offset - begin); /* Start of wanted data */ len -= (offset - begin); /* Start slop */ if (len > length) len = length; /* Ending slop */ } #endif #if 0 /* #3 from * char/ftape/lowlevel/ftape-proc.c */ len = strlen (buffer); *start = NULL; if (offset + length >= len) *eof = 1; else *eof = 0; #endif #if 0 pr_info(">> proc_fs: returned len = %d., start %p\n", len, start); /* RLD DEBUG */ #endif /*** using NONE: returns = 314.314.314. using #1 : returns = 314, 0. using #2 : returns = 314, 0, 0. using #3 : returns = 314, 314. using #4 : returns = 314, 314. ***/ return len; } /* initialize the /proc subsystem for the specific SBE driver */ int __init sbecom_proc_brd_init (ci_t * ci) { struct proc_dir_entry *e; char dir[7 + SBE_IFACETMPL_SIZE + 1]; /* create a directory in the root procfs */ snprintf(dir, sizeof(dir), "driver/%s", ci->devname); ci->dir_dev = proc_mkdir(dir, NULL); if (!ci->dir_dev) { pr_err("Unable to create directory /proc/driver/%s\n", ci->devname); goto fail; } e = create_proc_read_entry ("info", S_IFREG | S_IRUGO, ci->dir_dev, sbecom_proc_get_sbe_info, ci); if (!e) { pr_err("Unable to create entry /proc/driver/%s/info\n", ci->devname); goto fail; } return 0; fail: sbecom_proc_brd_cleanup (ci); return 1; } #else /*** ! CONFIG_PROC_FS ***/ /* stubbed off dummy routines */ void sbecom_proc_brd_cleanup (ci_t * ci) { } int __init sbecom_proc_brd_init (ci_t * ci) { return 0; } #endif /*** CONFIG_PROC_FS ***/ /*** End-of-File ***/
gpl-2.0
eagleeyetom/android_kernel_mtk_mt6572
drivers/staging/comedi/drivers/addi-data/APCI1710_82x54.c
8325
55614
/* * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module. * * ADDI-DATA GmbH * Dieselstrasse 3 * D-77833 Ottersweier * Tel: +19(0)7223/9493-0 * Fax: +49(0)7223/9493-92 * http://www.addi-data.com * info@addi-data.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ /* | Description : APCI-1710 82X54 timer module | */ #include "APCI1710_82x54.h" /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_InitTimer | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned char_ b_TimerNbr, | | unsigned char_ b_TimerMode, | | ULONG_ ul_ReloadValue, | | unsigned char_ b_InputClockSelection, | | unsigned char_ b_InputClockLevel, | | unsigned char_ b_OutputLevel, | | unsigned char_ b_HardwareGateLevel) int i_InsnConfig_InitTimer(struct comedi_device *dev,struct comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data) | +----------------------------------------------------------------------------+ | Task : Configure the Timer (b_TimerNbr) operating mode | | (b_TimerMode) from selected module (b_ModulNbr). | | You must calling this function be for you call any | | other function witch access of the timer. | | | | | | Timer mode description table | | | |+--------+-----------------------------+--------------+--------------------+| ||Selected+ Mode description +u_ReloadValue | Hardware gate input|| || mode | | description | action || |+--------+-----------------------------+--------------+--------------------+| || |Mode 0 is typically used | | || || |for event counting. After | | || || |the initialisation, OUT | | || || |is initially low, and | | || || 0 |will remain low until the |Start counting| Hardware gate || || |counter reaches zero. | value | || || |OUT then goes high and | | || || |remains high until a new | | || || |count is written. See | | || || |"i_APCI1710_WriteTimerValue" | | || || |function. | | || |+--------+-----------------------------+--------------+--------------------+| || |Mode 1 is similar to mode 0 | | || || |except for the gate input | | || || 1 |action. The gate input is not|Start counting| Hardware trigger || || |used for enabled or disabled | value | || || |the timer. | | || || |The gate input is used for | | || || |triggered the timer. | | || |+--------+-----------------------------+--------------+--------------------+| || |This mode functions like a | | || || |divide-by-ul_ReloadValue | | || || |counter. It is typically used| | || || |to generate a real time clock| | || || |interrupt. OUT will initially| | || || 2 |be high after the | Division | Hardware gate || || |initialisation. When the | factor | || || |initial count has decremented| | || || |to 1, OUT goes low for one | | || || |CLK pule. OUT then goes high | | || || |again, the counter reloads | | || || |the initial count | | || || |(ul_ReloadValue) and the | | || || |process is repeated. | | || || |This action can generated a | | || || |interrupt. See function | | || || |"i_APCI1710_SetBoardInt- | | || || |RoutineX" | | || || |and "i_APCI1710_EnableTimer" | | || |+--------+-----------------------------+--------------+--------------------+| || |Mode 3 is typically used for | | || || |baud rate generation. This | | || || |mode is similar to mode 2 | | || || |except for the duty cycle of | | || || 3 |OUT. OUT will initially be | Division | Hardware gate || || |high after the initialisation| factor | || || |When half the initial count | | || || |(ul_ReloadValue) has expired,| | || || |OUT goes low for the | | || || |remainder of the count. The | | || || |mode is periodic; the | | || || |sequence above is repeated | | || || |indefinitely. | | || |+--------+-----------------------------+--------------+--------------------+| || |OUT will be initially high | | || || |after the initialisation. | | || || |When the initial count | | || || 4 |expires OUT will go low for |Start counting| Hardware gate || || |one CLK pulse and then go | value | || || |high again. | | || || |The counting sequences is | | || || |triggered by writing a new | | || || |value. See | | || || |"i_APCI1710_WriteTimerValue" | | || || |function. If a new count is | | || || |written during counting, | | || || |it will be loaded on the | | || || |next CLK pulse | | || |+--------+-----------------------------+--------------+--------------------+| || |Mode 5 is similar to mode 4 | | || || |except for the gate input | | || || |action. The gate input is not| | || || 5 |used for enabled or disabled |Start counting| Hardware trigger || || |the timer. The gate input is | value | || || |used for triggered the timer.| | || |+--------+-----------------------------+--------------+--------------------+| | | | | | | | Input clock selection table | | | | +--------------------------------+------------------------------------+ | | | b_InputClockSelection | Description | | | | parameter | | | | +--------------------------------+------------------------------------+ | | | APCI1710_PCI_BUS_CLOCK | For the timer input clock, the PCI | | | | | bus clock / 4 is used. This PCI bus| | | | | clock can be 30MHz or 33MHz. For | | | | | Timer 0 only this selection are | | | | | available. | | | +--------------------------------+------------------------------------+ | | | APCI1710_ FRONT_CONNECTOR_INPUT| Of the front connector you have the| | | | | possibility to inject a input clock| | | | | for Timer 1 or Timer 2. The source | | | | | from this clock can eat the output | | | | | clock from Timer 0 or any other | | | | | clock source. | | | +--------------------------------+------------------------------------+ | | | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board | | APCI-1710 | | unsigned char_ b_ModulNbr : Module number to | | configure (0 to 3) | | unsigned char_ b_TimerNbr : Timer number to | | configure (0 to 2) | | unsigned char_ b_TimerMode : Timer mode selection | | (0 to 5) | | 0: Interrupt on terminal| | count | | 1: Hardware | | retriggerable one- | | shot | | 2: Rate generator | | 3: Square wave mode | | 4: Software triggered | | strobe | | 5: Hardware triggered | | strobe | | See timer mode | | description table. | | ULONG_ ul_ReloadValue : Start counting value | | or division factor | | See timer mode | | description table. | | unsigned char_ b_InputClockSelection : Selection from input | | timer clock. | | See input clock | | selection table. | | unsigned char_ b_InputClockLevel : Selection from input | | clock level. | | 0 : Low active | | (Input inverted) | | 1 : High active | | unsigned char_ b_OutputLevel, : Selection from output | | clock level. | | 0 : Low active | | 1 : High active | | (Output inverted) | | unsigned char_ b_HardwareGateLevel : Selection from | | hardware gate level. | | 0 : Low active | | (Input inverted) | | 1 : High active | | If you will not used | | the hardware gate set | | this value to 0. |b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec); b_TimerNbr = (unsigned char) CR_CHAN(insn->chanspec); b_TimerMode = (unsigned char) data[0]; ul_ReloadValue = (unsigned int) data[1]; b_InputClockSelection =(unsigned char) data[2]; b_InputClockLevel =(unsigned char) data[3]; b_OutputLevel =(unsigned char) data[4]; b_HardwareGateLevel =(unsigned char) data[5]; +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: Timer selection wrong | | -4: The module is not a TIMER module | | -5: Timer mode selection is wrong | | -6: Input timer clock selection is wrong | | -7: Selection from input clock level is wrong | | -8: Selection from output clock level is wrong | | -9: Selection from hardware gate level is wrong | +----------------------------------------------------------------------------+ */ int i_APCI1710_InsnConfigInitTimer(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i_ReturnValue = 0; unsigned char b_ModulNbr; unsigned char b_TimerNbr; unsigned char b_TimerMode; unsigned int ul_ReloadValue; unsigned char b_InputClockSelection; unsigned char b_InputClockLevel; unsigned char b_OutputLevel; unsigned char b_HardwareGateLevel; /* BEGIN JK 27.10.2003 : Add the possibility to use a 40 Mhz quartz */ unsigned int dw_Test = 0; /* END JK 27.10.2003 : Add the possibility to use a 40 Mhz quartz */ i_ReturnValue = insn->n; b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec); b_TimerNbr = (unsigned char) CR_CHAN(insn->chanspec); b_TimerMode = (unsigned char) data[0]; ul_ReloadValue = (unsigned int) data[1]; b_InputClockSelection = (unsigned char) data[2]; b_InputClockLevel = (unsigned char) data[3]; b_OutputLevel = (unsigned char) data[4]; b_HardwareGateLevel = (unsigned char) data[5]; /* Test the module number */ if (b_ModulNbr < 4) { /* Test if 82X54 timer */ if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) { /* Test the timer number */ if (b_TimerNbr <= 2) { /* Test the timer mode */ if (b_TimerMode <= 5) { /* BEGIN JK 27.10.2003 : Add the possibility to use a 40 Mhz quartz */ /* Test te imput clock selection */ /* if (((b_TimerNbr == 0) && (b_InputClockSelection == 0)) || ((b_TimerNbr != 0) && ((b_InputClockSelection == 0) || (b_InputClockSelection == 1)))) */ if (((b_TimerNbr == 0) && (b_InputClockSelection == APCI1710_PCI_BUS_CLOCK)) || ((b_TimerNbr == 0) && (b_InputClockSelection == APCI1710_10MHZ)) || ((b_TimerNbr != 0) && ((b_InputClockSelection == APCI1710_PCI_BUS_CLOCK) || (b_InputClockSelection == APCI1710_FRONT_CONNECTOR_INPUT) || (b_InputClockSelection == APCI1710_10MHZ)))) { /* BEGIN JK 27.10.2003 : Add the possibility to use a 40 Mhz quartz */ if (((b_InputClockSelection == APCI1710_10MHZ) && ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0x0000FFFFUL) >= 0x3131)) || (b_InputClockSelection != APCI1710_10MHZ)) { /* END JK 27.10.2003 : Add the possibility to use a 40 Mhz quartz */ /* Test the input clock level selection */ if ((b_InputClockLevel == 0) || (b_InputClockLevel == 1)) { /* Test the output clock level selection */ if ((b_OutputLevel == 0) || (b_OutputLevel == 1)) { /* Test the hardware gate level selection */ if ((b_HardwareGateLevel == 0) || (b_HardwareGateLevel == 1)) { /* BEGIN JK 27.10.03 : Add the possibility to use a 40 Mhz quartz */ /* Test if version > 1.1 and clock selection = 10MHz */ if ((b_InputClockSelection == APCI1710_10MHZ) && ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0x0000FFFFUL) > 0x3131)) { /* Test if 40MHz quartz on board */ dw_Test = inl(devpriv->s_BoardInfos.ui_Address + (16 + (b_TimerNbr * 4) + (64 * b_ModulNbr))); dw_Test = (dw_Test >> 16) & 1; } else { dw_Test = 1; } /* Test if detection OK */ if (dw_Test == 1) { /* END JK 27.10.03 : Add the possibility to use a 40 Mhz quartz */ /* Initialisation OK */ devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_82X54Init = 1; /* Save the input clock selection */ devpriv-> s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_InputClockSelection = b_InputClockSelection; /* Save the input clock level */ devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_InputClockLevel = ~b_InputClockLevel & 1; /* Save the output level */ devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_OutputLevel = ~b_OutputLevel & 1; /* Save the gate level */ devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_HardwareGateLevel = b_HardwareGateLevel; /* Set the configuration word and disable the timer */ /* BEGIN JK 27.10.03 : Add the possibility to use a 40 Mhz quartz */ /* devpriv->s_ModuleInfo [b_ModulNbr]. s_82X54ModuleInfo. s_82X54TimerInfo [b_TimerNbr]. dw_ConfigurationWord = (unsigned int) (((b_HardwareGateLevel << 0) & 0x1) | ((b_InputClockLevel << 1) & 0x2) | (((~b_OutputLevel & 1) << 2) & 0x4) | ((b_InputClockSelection << 4) & 0x10)); */ /* Test if 10MHz selected */ if (b_InputClockSelection == APCI1710_10MHZ) { b_InputClockSelection = 2; } devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord = (unsigned int)(((b_HardwareGateLevel << 0) & 0x1) | ((b_InputClockLevel << 1) & 0x2) | (((~b_OutputLevel & 1) << 2) & 0x4) | ((b_InputClockSelection << 4) & 0x30)); /* END JK 27.10.03 : Add the possibility to use a 40 Mhz quartz */ outl(devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord, devpriv->s_BoardInfos.ui_Address + 32 + (b_TimerNbr * 4) + (64 * b_ModulNbr)); /* Initialise the 82X54 Timer */ outl((unsigned int) b_TimerMode, devpriv->s_BoardInfos.ui_Address + 16 + (b_TimerNbr * 4) + (64 * b_ModulNbr)); /* Write the reload value */ outl(ul_ReloadValue, devpriv->s_BoardInfos.ui_Address + 0 + (b_TimerNbr * 4) + (64 * b_ModulNbr)); /* BEGIN JK 27.10.03 : Add the possibility to use a 40 Mhz quartz */ } /* if (dw_Test == 1) */ else { /* Input timer clock selection is wrong */ i_ReturnValue = -6; } /* if (dw_Test == 1) */ /* END JK 27.10.03 : Add the possibility to use a 40 Mhz quartz */ } /* if ((b_HardwareGateLevel == 0) || (b_HardwareGateLevel == 1)) */ else { /* Selection from hardware gate level is wrong */ DPRINTK("Selection from hardware gate level is wrong\n"); i_ReturnValue = -9; } /* if ((b_HardwareGateLevel == 0) || (b_HardwareGateLevel == 1)) */ } /* if ((b_OutputLevel == 0) || (b_OutputLevel == 1)) */ else { /* Selection from output clock level is wrong */ DPRINTK("Selection from output clock level is wrong\n"); i_ReturnValue = -8; } /* if ((b_OutputLevel == 0) || (b_OutputLevel == 1)) */ } /* if ((b_InputClockLevel == 0) || (b_InputClockLevel == 1)) */ else { /* Selection from input clock level is wrong */ DPRINTK("Selection from input clock level is wrong\n"); i_ReturnValue = -7; } /* if ((b_InputClockLevel == 0) || (b_InputClockLevel == 1)) */ } else { /* Input timer clock selection is wrong */ DPRINTK("Input timer clock selection is wrong\n"); i_ReturnValue = -6; } } else { /* Input timer clock selection is wrong */ DPRINTK("Input timer clock selection is wrong\n"); i_ReturnValue = -6; } } /* if ((b_TimerMode >= 0) && (b_TimerMode <= 5)) */ else { /* Timer mode selection is wrong */ DPRINTK("Timer mode selection is wrong\n"); i_ReturnValue = -5; } /* if ((b_TimerMode >= 0) && (b_TimerMode <= 5)) */ } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */ else { /* Timer selection wrong */ DPRINTK("Timer selection wrong\n"); i_ReturnValue = -3; } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */ } else { /* The module is not a TIMER module */ DPRINTK("The module is not a TIMER module\n"); i_ReturnValue = -4; } } else { /* Module number error */ DPRINTK("Module number error\n"); i_ReturnValue = -2; } return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_EnableTimer | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned char_ b_TimerNbr, | | unsigned char_ b_InterruptEnable) int i_APCI1710_InsnWriteEnableDisableTimer(struct comedi_device *dev,struct comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data) | +----------------------------------------------------------------------------+ | Task : Enable OR Disable the Timer (b_TimerNbr) from selected module | | (b_ModulNbr). You must calling the | | "i_APCI1710_InitTimer" function be for you call this | | function. If you enable the timer interrupt, the timer | | generate a interrupt after the timer value reach | | the zero. See function "i_APCI1710_SetBoardIntRoutineX"| +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board | | APCI-1710 | | unsigned char_ b_ModulNbr : Selected module number | | (0 to 3) | | unsigned char_ b_TimerNbr : Timer number to enable | | (0 to 2) | | unsigned char_ b_InterruptEnable : Enable or disable the | | timer interrupt. | | APCI1710_ENABLE : | | Enable the timer interrupt | | APCI1710_DISABLE : | | Disable the timer interrupt| i_ReturnValue=insn->n; b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec); b_TimerNbr = (unsigned char) CR_CHAN(insn->chanspec); b_ActionType = (unsigned char) data[0]; /* enable disable */ +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: Timer selection wrong | | -4: The module is not a TIMER module | | -5: Timer not initialised see function | | "i_APCI1710_InitTimer" | | -6: Interrupt parameter is wrong | | -7: Interrupt function not initialised. | | See function "i_APCI1710_SetBoardIntRoutineX" | +----------------------------------------------------------------------------+ */ int i_APCI1710_InsnWriteEnableDisableTimer(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i_ReturnValue = 0; unsigned int dw_DummyRead; unsigned char b_ModulNbr; unsigned char b_TimerNbr; unsigned char b_ActionType; unsigned char b_InterruptEnable; i_ReturnValue = insn->n; b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec); b_TimerNbr = (unsigned char) CR_CHAN(insn->chanspec); b_ActionType = (unsigned char) data[0]; /* enable disable */ /* Test the module number */ if (b_ModulNbr < 4) { /* Test if 82X54 timer */ if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) { /* Test the timer number */ if (b_TimerNbr <= 2) { /* Test if timer initialised */ if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_82X54Init == 1) { switch (b_ActionType) { case APCI1710_ENABLE: b_InterruptEnable = (unsigned char) data[1]; /* Test the interrupt selection */ if ((b_InterruptEnable == APCI1710_ENABLE) || (b_InterruptEnable == APCI1710_DISABLE)) { if (b_InterruptEnable == APCI1710_ENABLE) { dw_DummyRead = inl(devpriv->s_BoardInfos.ui_Address + 12 + (b_TimerNbr * 4) + (64 * b_ModulNbr)); /* Enable the interrupt */ devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord = devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord | 0x8; outl(devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord, devpriv->s_BoardInfos.ui_Address + 32 + (b_TimerNbr * 4) + (64 * b_ModulNbr)); devpriv->tsk_Current = current; /* Save the current process task structure */ } /* if (b_InterruptEnable == APCI1710_ENABLE) */ else { /* Disable the interrupt */ devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord = devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord & 0xF7; outl(devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord, devpriv->s_BoardInfos.ui_Address + 32 + (b_TimerNbr * 4) + (64 * b_ModulNbr)); /* Save the interrupt flag */ devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask = devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask & (0xFF - (1 << b_TimerNbr)); } /* if (b_InterruptEnable == APCI1710_ENABLE) */ /* Test if error occur */ if (i_ReturnValue >= 0) { /* Save the interrupt flag */ devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask = devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask | ((1 & b_InterruptEnable) << b_TimerNbr); /* Enable the timer */ outl(1, devpriv->s_BoardInfos.ui_Address + 44 + (b_TimerNbr * 4) + (64 * b_ModulNbr)); } } else { /* Interrupt parameter is wrong */ DPRINTK("\n"); i_ReturnValue = -6; } break; case APCI1710_DISABLE: /* Test the interrupt flag */ if (((devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask >> b_TimerNbr) & 1) == 1) { /* Disable the interrupt */ devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr]. dw_ConfigurationWord = devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord & 0xF7; outl(devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord, devpriv->s_BoardInfos.ui_Address + 32 + (b_TimerNbr * 4) + (64 * b_ModulNbr)); /* Save the interrupt flag */ devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask = devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask & (0xFF - (1 << b_TimerNbr)); } /* Disable the timer */ outl(0, devpriv->s_BoardInfos.ui_Address + 44 + (b_TimerNbr * 4) + (64 * b_ModulNbr)); break; } /* Switch end */ } else { /* Timer not initialised see function */ DPRINTK ("Timer not initialised see function\n"); i_ReturnValue = -5; } } else { /* Timer selection wrong */ DPRINTK("Timer selection wrong\n"); i_ReturnValue = -3; } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */ } else { /* The module is not a TIMER module */ DPRINTK("The module is not a TIMER module\n"); i_ReturnValue = -4; } } else { /* Module number error */ DPRINTK("Module number error\n"); i_ReturnValue = -2; } return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_ReadAllTimerValue | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | PULONG_ pul_TimerValueArray) int i_APCI1710_InsnReadAllTimerValue(struct comedi_device *dev,struct comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data) | +----------------------------------------------------------------------------+ | Task : Return the all timer values from selected timer | | module (b_ModulNbr). | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board | | APCI-1710 | | unsigned char_ b_ModulNbr : Selected module number | | (0 to 3) | +----------------------------------------------------------------------------+ | Output Parameters : PULONG_ pul_TimerValueArray : Timer value array. | | Element 0 contain the timer 0 value. | | Element 1 contain the timer 1 value. | | Element 2 contain the timer 2 value. | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: The module is not a TIMER module | | -4: Timer 0 not initialised see function | | "i_APCI1710_InitTimer" | | -5: Timer 1 not initialised see function | | "i_APCI1710_InitTimer" | | -6: Timer 2 not initialised see function | | "i_APCI1710_InitTimer" | +----------------------------------------------------------------------------+ */ int i_APCI1710_InsnReadAllTimerValue(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i_ReturnValue = 0; unsigned char b_ModulNbr, b_ReadType; unsigned int *pul_TimerValueArray; b_ModulNbr = CR_AREF(insn->chanspec); b_ReadType = CR_CHAN(insn->chanspec); pul_TimerValueArray = (unsigned int *) data; i_ReturnValue = insn->n; switch (b_ReadType) { case APCI1710_TIMER_READINTERRUPT: data[0] = devpriv->s_InterruptParameters.s_FIFOInterruptParameters[devpriv->s_InterruptParameters.ui_Read].b_OldModuleMask; data[1] = devpriv->s_InterruptParameters.s_FIFOInterruptParameters[devpriv->s_InterruptParameters.ui_Read].ul_OldInterruptMask; data[2] = devpriv->s_InterruptParameters.s_FIFOInterruptParameters[devpriv->s_InterruptParameters.ui_Read].ul_OldCounterLatchValue; /* Increment the read FIFO */ devpriv->s_InterruptParameters.ui_Read = (devpriv->s_InterruptParameters.ui_Read + 1) % APCI1710_SAVE_INTERRUPT; break; case APCI1710_TIMER_READALLTIMER: /* Test the module number */ if (b_ModulNbr < 4) { /* Test if 82X54 timer */ if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) { /* Test if timer 0 iniutialised */ if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[0].b_82X54Init == 1) { /* Test if timer 1 iniutialised */ if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[1].b_82X54Init == 1) { /* Test if timer 2 iniutialised */ if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[2].b_82X54Init == 1) { /* Latch all counter */ outl(0x17, devpriv->s_BoardInfos.ui_Address + 12 + (64 * b_ModulNbr)); /* Read the timer 0 value */ pul_TimerValueArray[0] = inl(devpriv->s_BoardInfos.ui_Address + 0 + (64 * b_ModulNbr)); /* Read the timer 1 value */ pul_TimerValueArray[1] = inl(devpriv->s_BoardInfos.ui_Address + 4 + (64 * b_ModulNbr)); /* Read the timer 2 value */ pul_TimerValueArray[2] = inl(devpriv->s_BoardInfos.ui_Address + 8 + (64 * b_ModulNbr)); } else { /* Timer 2 not initialised see function */ DPRINTK("Timer 2 not initialised see function\n"); i_ReturnValue = -6; } } else { /* Timer 1 not initialised see function */ DPRINTK("Timer 1 not initialised see function\n"); i_ReturnValue = -5; } } else { /* Timer 0 not initialised see function */ DPRINTK("Timer 0 not initialised see function\n"); i_ReturnValue = -4; } } else { /* The module is not a TIMER module */ DPRINTK("The module is not a TIMER module\n"); i_ReturnValue = -3; } } else { /* Module number error */ DPRINTK("Module number error\n"); i_ReturnValue = -2; } } /* End of Switch */ return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name :INT i_APCI1710_InsnBitsTimer(struct comedi_device *dev, struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) | +----------------------------------------------------------------------------+ | Task : Read write functions for Timer | +----------------------------------------------------------------------------+ | Input Parameters : +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : +----------------------------------------------------------------------------+ */ int i_APCI1710_InsnBitsTimer(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned char b_BitsType; int i_ReturnValue = 0; b_BitsType = data[0]; printk("\n82X54"); switch (b_BitsType) { case APCI1710_TIMER_READVALUE: i_ReturnValue = i_APCI1710_ReadTimerValue(dev, (unsigned char)CR_AREF(insn->chanspec), (unsigned char)CR_CHAN(insn->chanspec), (unsigned int *) &data[0]); break; case APCI1710_TIMER_GETOUTPUTLEVEL: i_ReturnValue = i_APCI1710_GetTimerOutputLevel(dev, (unsigned char)CR_AREF(insn->chanspec), (unsigned char)CR_CHAN(insn->chanspec), (unsigned char *) &data[0]); break; case APCI1710_TIMER_GETPROGRESSSTATUS: i_ReturnValue = i_APCI1710_GetTimerProgressStatus(dev, (unsigned char)CR_AREF(insn->chanspec), (unsigned char)CR_CHAN(insn->chanspec), (unsigned char *)&data[0]); break; case APCI1710_TIMER_WRITEVALUE: i_ReturnValue = i_APCI1710_WriteTimerValue(dev, (unsigned char)CR_AREF(insn->chanspec), (unsigned char)CR_CHAN(insn->chanspec), (unsigned int)data[1]); break; default: printk("Bits Config Parameter Wrong\n"); i_ReturnValue = -1; } if (i_ReturnValue >= 0) i_ReturnValue = insn->n; return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_ReadTimerValue | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned char_ b_TimerNbr, | | PULONG_ pul_TimerValue) | +----------------------------------------------------------------------------+ | Task : Return the timer value from selected digital timer | | (b_TimerNbr) from selected timer module (b_ModulNbr). | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board | | APCI-1710 | | unsigned char_ b_ModulNbr : Selected module number | | (0 to 3) | | unsigned char_ b_TimerNbr : Timer number to read | | (0 to 2) | +----------------------------------------------------------------------------+ | Output Parameters : PULONG_ pul_TimerValue : Timer value | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: Timer selection wrong | | -4: The module is not a TIMER module | | -5: Timer not initialised see function | | "i_APCI1710_InitTimer" | +----------------------------------------------------------------------------+ */ int i_APCI1710_ReadTimerValue(struct comedi_device *dev, unsigned char b_ModulNbr, unsigned char b_TimerNbr, unsigned int *pul_TimerValue) { int i_ReturnValue = 0; /* Test the module number */ if (b_ModulNbr < 4) { /* Test if 82X54 timer */ if ((devpriv->s_BoardInfos. dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) { /* Test the timer number */ if (b_TimerNbr <= 2) { /* Test if timer initialised */ if (devpriv-> s_ModuleInfo[b_ModulNbr]. s_82X54ModuleInfo. s_82X54TimerInfo[b_TimerNbr]. b_82X54Init == 1) { /* Latch the timer value */ outl((2 << b_TimerNbr) | 0xD0, devpriv->s_BoardInfos. ui_Address + 12 + (64 * b_ModulNbr)); /* Read the counter value */ *pul_TimerValue = inl(devpriv->s_BoardInfos. ui_Address + (b_TimerNbr * 4) + (64 * b_ModulNbr)); } else { /* Timer not initialised see function */ DPRINTK("Timer not initialised see function\n"); i_ReturnValue = -5; } } else { /* Timer selection wrong */ DPRINTK("Timer selection wrong\n"); i_ReturnValue = -3; } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */ } else { /* The module is not a TIMER module */ DPRINTK("The module is not a TIMER module\n"); i_ReturnValue = -4; } } else { /* Module number error */ DPRINTK("Module number error\n"); i_ReturnValue = -2; } return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_GetTimerOutputLevel | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned char_ b_TimerNbr, | | unsigned char *_ pb_OutputLevel) | +----------------------------------------------------------------------------+ | Task : Return the output signal level (pb_OutputLevel) from | | selected digital timer (b_TimerNbr) from selected timer| | module (b_ModulNbr). | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board | | APCI-1710 | | unsigned char_ b_ModulNbr : Selected module number | | (0 to 3) | | unsigned char_ b_TimerNbr : Timer number to test | | (0 to 2) | +----------------------------------------------------------------------------+ | Output Parameters : unsigned char *_ pb_OutputLevel : Output signal level | | 0 : The output is low | | 1 : The output is high | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: Timer selection wrong | | -4: The module is not a TIMER module | | -5: Timer not initialised see function | | "i_APCI1710_InitTimer" | +----------------------------------------------------------------------------+ */ int i_APCI1710_GetTimerOutputLevel(struct comedi_device *dev, unsigned char b_ModulNbr, unsigned char b_TimerNbr, unsigned char *pb_OutputLevel) { int i_ReturnValue = 0; unsigned int dw_TimerStatus; /* Test the module number */ if (b_ModulNbr < 4) { /* Test if 82X54 timer */ if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) { /* Test the timer number */ if (b_TimerNbr <= 2) { /* Test if timer initialised */ if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_82X54Init == 1) { /* Latch the timer value */ outl((2 << b_TimerNbr) | 0xE0, devpriv->s_BoardInfos.ui_Address + 12 + (64 * b_ModulNbr)); /* Read the timer status */ dw_TimerStatus = inl(devpriv->s_BoardInfos.ui_Address + 16 + (b_TimerNbr * 4) + (64 * b_ModulNbr)); *pb_OutputLevel = (unsigned char) (((dw_TimerStatus >> 7) & 1) ^ devpriv-> s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_OutputLevel); } else { /* Timer not initialised see function */ DPRINTK("Timer not initialised see function\n"); i_ReturnValue = -5; } } else { /* Timer selection wrong */ DPRINTK("Timer selection wrong\n"); i_ReturnValue = -3; } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */ } else { /* The module is not a TIMER module */ DPRINTK("The module is not a TIMER module\n"); i_ReturnValue = -4; } } else { /* Module number error */ DPRINTK("Module number error\n"); i_ReturnValue = -2; } return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_GetTimerProgressStatus | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned char_ b_TimerNbr, | | unsigned char *_ pb_TimerStatus) | +----------------------------------------------------------------------------+ | Task : Return the progress status (pb_TimerStatus) from | | selected digital timer (b_TimerNbr) from selected timer| | module (b_ModulNbr). | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board | | APCI-1710 | | unsigned char_ b_ModulNbr : Selected module number | | (0 to 3) | | unsigned char_ b_TimerNbr : Timer number to test | | (0 to 2) | +----------------------------------------------------------------------------+ | Output Parameters : unsigned char *_ pb_TimerStatus : Output signal level | | 0 : Timer not in progress | | 1 : Timer in progress | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: Timer selection wrong | | -4: The module is not a TIMER module | | -5: Timer not initialised see function | | "i_APCI1710_InitTimer" | +----------------------------------------------------------------------------+ */ int i_APCI1710_GetTimerProgressStatus(struct comedi_device *dev, unsigned char b_ModulNbr, unsigned char b_TimerNbr, unsigned char *pb_TimerStatus) { int i_ReturnValue = 0; unsigned int dw_TimerStatus; /* Test the module number */ if (b_ModulNbr < 4) { /* Test if 82X54 timer */ if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) { /* Test the timer number */ if (b_TimerNbr <= 2) { /* Test if timer initialised */ if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_82X54Init == 1) { /* Latch the timer value */ outl((2 << b_TimerNbr) | 0xE0, devpriv->s_BoardInfos.ui_Address + 12 + (64 * b_ModulNbr)); /* Read the timer status */ dw_TimerStatus = inl(devpriv->s_BoardInfos.ui_Address + 16 + (b_TimerNbr * 4) + (64 * b_ModulNbr)); *pb_TimerStatus = (unsigned char) ((dw_TimerStatus) >> 8) & 1; printk("ProgressStatus : %d", *pb_TimerStatus); } else { /* Timer not initialised see function */ i_ReturnValue = -5; } } else { /* Timer selection wrong */ i_ReturnValue = -3; } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */ } else { /* The module is not a TIMER module */ i_ReturnValue = -4; } } else { /* Module number error */ i_ReturnValue = -2; } return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | Function Name : _INT_ i_APCI1710_WriteTimerValue | | (unsigned char_ b_BoardHandle, | | unsigned char_ b_ModulNbr, | | unsigned char_ b_TimerNbr, | | ULONG_ ul_WriteValue) | +----------------------------------------------------------------------------+ | Task : Write the value (ul_WriteValue) into the selected timer| | (b_TimerNbr) from selected timer module (b_ModulNbr). | | The action in depend of the time mode selection. | | See timer mode description table. | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board | | APCI-1710 | | unsigned char_ b_ModulNbr : Selected module number | | (0 to 3) | | unsigned char_ b_TimerNbr : Timer number to write | | (0 to 2) | | ULONG_ ul_WriteValue : Value to write | +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: Module selection wrong | | -3: Timer selection wrong | | -4: The module is not a TIMER module | | -5: Timer not initialised see function | | "i_APCI1710_InitTimer" | +----------------------------------------------------------------------------+ */ int i_APCI1710_WriteTimerValue(struct comedi_device *dev, unsigned char b_ModulNbr, unsigned char b_TimerNbr, unsigned int ul_WriteValue) { int i_ReturnValue = 0; /* Test the module number */ if (b_ModulNbr < 4) { /* Test if 82X54 timer */ if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) { /* Test the timer number */ if (b_TimerNbr <= 2) { /* Test if timer initialised */ if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_82X54Init == 1) { /* Write the value */ outl(ul_WriteValue, devpriv->s_BoardInfos.ui_Address + (b_TimerNbr * 4) + (64 * b_ModulNbr)); } else { /* Timer not initialised see function */ DPRINTK("Timer not initialised see function\n"); i_ReturnValue = -5; } } else { /* Timer selection wrong */ DPRINTK("Timer selection wrong\n"); i_ReturnValue = -3; } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */ } else { /* The module is not a TIMER module */ DPRINTK("The module is not a TIMER module\n"); i_ReturnValue = -4; } } else { /* Module number error */ DPRINTK("Module number error\n"); i_ReturnValue = -2; } return i_ReturnValue; }
gpl-2.0
Evervolv/android_kernel_samsung_d2
drivers/w1/slaves/w1_ds2433.c
9093
7508
/* * w1_ds2433.c - w1 family 23 (DS2433) driver * * Copyright (c) 2005 Ben Gardner <bgardner@wabtec.com> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/slab.h> #ifdef CONFIG_W1_SLAVE_DS2433_CRC #include <linux/crc16.h> #define CRC16_INIT 0 #define CRC16_VALID 0xb001 #endif #include "../w1.h" #include "../w1_int.h" #include "../w1_family.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>"); MODULE_DESCRIPTION("w1 family 23 driver for DS2433, 4kb EEPROM"); #define W1_EEPROM_SIZE 512 #define W1_PAGE_COUNT 16 #define W1_PAGE_SIZE 32 #define W1_PAGE_BITS 5 #define W1_PAGE_MASK 0x1F #define W1_F23_TIME 300 #define W1_F23_READ_EEPROM 0xF0 #define W1_F23_WRITE_SCRATCH 0x0F #define W1_F23_READ_SCRATCH 0xAA #define W1_F23_COPY_SCRATCH 0x55 struct w1_f23_data { u8 memory[W1_EEPROM_SIZE]; u32 validcrc; }; /** * Check the file size bounds and adjusts count as needed. * This would not be needed if the file size didn't reset to 0 after a write. */ static inline size_t w1_f23_fix_count(loff_t off, size_t count, size_t size) { if (off > size) return 0; if ((off + count) > size) return (size - off); return count; } #ifdef CONFIG_W1_SLAVE_DS2433_CRC static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data, int block) { u8 wrbuf[3]; int off = block * W1_PAGE_SIZE; if (data->validcrc & (1 << block)) return 0; if (w1_reset_select_slave(sl)) { data->validcrc = 0; return -EIO; } wrbuf[0] = W1_F23_READ_EEPROM; wrbuf[1] = off & 0xff; wrbuf[2] = off >> 8; w1_write_block(sl->master, wrbuf, 3); w1_read_block(sl->master, &data->memory[off], W1_PAGE_SIZE); /* cache the block if the CRC is valid */ if (crc16(CRC16_INIT, &data->memory[off], W1_PAGE_SIZE) == CRC16_VALID) data->validcrc |= (1 << block); return 0; } #endif /* CONFIG_W1_SLAVE_DS2433_CRC */ static ssize_t w1_f23_read_bin(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct w1_slave *sl = kobj_to_w1_slave(kobj); #ifdef CONFIG_W1_SLAVE_DS2433_CRC struct w1_f23_data *data = sl->family_data; int i, min_page, max_page; #else u8 wrbuf[3]; #endif if ((count = w1_f23_fix_count(off, count, W1_EEPROM_SIZE)) == 0) return 0; mutex_lock(&sl->master->mutex); #ifdef CONFIG_W1_SLAVE_DS2433_CRC min_page = (off >> W1_PAGE_BITS); max_page = (off + count - 1) >> W1_PAGE_BITS; for (i = min_page; i <= max_page; i++) { if (w1_f23_refresh_block(sl, data, i)) { count = -EIO; goto out_up; } } memcpy(buf, &data->memory[off], count); #else /* CONFIG_W1_SLAVE_DS2433_CRC */ /* read directly from the EEPROM */ if (w1_reset_select_slave(sl)) { count = -EIO; goto out_up; } wrbuf[0] = W1_F23_READ_EEPROM; wrbuf[1] = off & 0xff; wrbuf[2] = off >> 8; w1_write_block(sl->master, wrbuf, 3); w1_read_block(sl->master, buf, count); #endif /* CONFIG_W1_SLAVE_DS2433_CRC */ out_up: mutex_unlock(&sl->master->mutex); return count; } /** * Writes to the scratchpad and reads it back for verification. * Then copies the scratchpad to EEPROM. * The data must be on one page. * The master must be locked. * * @param sl The slave structure * @param addr Address for the write * @param len length must be <= (W1_PAGE_SIZE - (addr & W1_PAGE_MASK)) * @param data The data to write * @return 0=Success -1=failure */ static int w1_f23_write(struct w1_slave *sl, int addr, int len, const u8 *data) { #ifdef CONFIG_W1_SLAVE_DS2433_CRC struct w1_f23_data *f23 = sl->family_data; #endif u8 wrbuf[4]; u8 rdbuf[W1_PAGE_SIZE + 3]; u8 es = (addr + len - 1) & 0x1f; /* Write the data to the scratchpad */ if (w1_reset_select_slave(sl)) return -1; wrbuf[0] = W1_F23_WRITE_SCRATCH; wrbuf[1] = addr & 0xff; wrbuf[2] = addr >> 8; w1_write_block(sl->master, wrbuf, 3); w1_write_block(sl->master, data, len); /* Read the scratchpad and verify */ if (w1_reset_select_slave(sl)) return -1; w1_write_8(sl->master, W1_F23_READ_SCRATCH); w1_read_block(sl->master, rdbuf, len + 3); /* Compare what was read against the data written */ if ((rdbuf[0] != wrbuf[1]) || (rdbuf[1] != wrbuf[2]) || (rdbuf[2] != es) || (memcmp(data, &rdbuf[3], len) != 0)) return -1; /* Copy the scratchpad to EEPROM */ if (w1_reset_select_slave(sl)) return -1; wrbuf[0] = W1_F23_COPY_SCRATCH; wrbuf[3] = es; w1_write_block(sl->master, wrbuf, 4); /* Sleep for 5 ms to wait for the write to complete */ msleep(5); /* Reset the bus to wake up the EEPROM (this may not be needed) */ w1_reset_bus(sl->master); #ifdef CONFIG_W1_SLAVE_DS2433_CRC f23->validcrc &= ~(1 << (addr >> W1_PAGE_BITS)); #endif return 0; } static ssize_t w1_f23_write_bin(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct w1_slave *sl = kobj_to_w1_slave(kobj); int addr, len, idx; if ((count = w1_f23_fix_count(off, count, W1_EEPROM_SIZE)) == 0) return 0; #ifdef CONFIG_W1_SLAVE_DS2433_CRC /* can only write full blocks in cached mode */ if ((off & W1_PAGE_MASK) || (count & W1_PAGE_MASK)) { dev_err(&sl->dev, "invalid offset/count off=%d cnt=%zd\n", (int)off, count); return -EINVAL; } /* make sure the block CRCs are valid */ for (idx = 0; idx < count; idx += W1_PAGE_SIZE) { if (crc16(CRC16_INIT, &buf[idx], W1_PAGE_SIZE) != CRC16_VALID) { dev_err(&sl->dev, "bad CRC at offset %d\n", (int)off); return -EINVAL; } } #endif /* CONFIG_W1_SLAVE_DS2433_CRC */ mutex_lock(&sl->master->mutex); /* Can only write data to one page at a time */ idx = 0; while (idx < count) { addr = off + idx; len = W1_PAGE_SIZE - (addr & W1_PAGE_MASK); if (len > (count - idx)) len = count - idx; if (w1_f23_write(sl, addr, len, &buf[idx]) < 0) { count = -EIO; goto out_up; } idx += len; } out_up: mutex_unlock(&sl->master->mutex); return count; } static struct bin_attribute w1_f23_bin_attr = { .attr = { .name = "eeprom", .mode = S_IRUGO | S_IWUSR, }, .size = W1_EEPROM_SIZE, .read = w1_f23_read_bin, .write = w1_f23_write_bin, }; static int w1_f23_add_slave(struct w1_slave *sl) { int err; #ifdef CONFIG_W1_SLAVE_DS2433_CRC struct w1_f23_data *data; data = kzalloc(sizeof(struct w1_f23_data), GFP_KERNEL); if (!data) return -ENOMEM; sl->family_data = data; #endif /* CONFIG_W1_SLAVE_DS2433_CRC */ err = sysfs_create_bin_file(&sl->dev.kobj, &w1_f23_bin_attr); #ifdef CONFIG_W1_SLAVE_DS2433_CRC if (err) kfree(data); #endif /* CONFIG_W1_SLAVE_DS2433_CRC */ return err; } static void w1_f23_remove_slave(struct w1_slave *sl) { #ifdef CONFIG_W1_SLAVE_DS2433_CRC kfree(sl->family_data); sl->family_data = NULL; #endif /* CONFIG_W1_SLAVE_DS2433_CRC */ sysfs_remove_bin_file(&sl->dev.kobj, &w1_f23_bin_attr); } static struct w1_family_ops w1_f23_fops = { .add_slave = w1_f23_add_slave, .remove_slave = w1_f23_remove_slave, }; static struct w1_family w1_family_23 = { .fid = W1_EEPROM_DS2433, .fops = &w1_f23_fops, }; static int __init w1_f23_init(void) { return w1_register_family(&w1_family_23); } static void __exit w1_f23_fini(void) { w1_unregister_family(&w1_family_23); } module_init(w1_f23_init); module_exit(w1_f23_fini);
gpl-2.0
UnknownzD/I9103_ZSLE6_Kernel
arch/arm/mach-shmobile/clock-sh7367.c
134
12642
/* * SH7367 clock framework support * * Copyright (C) 2010 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/sh_clk.h> #include <mach/common.h> #include <asm/clkdev.h> /* SH7367 registers */ #define RTFRQCR 0xe6150000 #define SYFRQCR 0xe6150004 #define CMFRQCR 0xe61500E0 #define VCLKCR1 0xe6150008 #define VCLKCR2 0xe615000C #define VCLKCR3 0xe615001C #define SCLKACR 0xe6150010 #define SCLKBCR 0xe6150014 #define SUBUSBCKCR 0xe6158080 #define SPUCKCR 0xe6150084 #define MSUCKCR 0xe6150088 #define MVI3CKCR 0xe6150090 #define VOUCKCR 0xe6150094 #define MFCK1CR 0xe6150098 #define MFCK2CR 0xe615009C #define PLLC1CR 0xe6150028 #define PLLC2CR 0xe615002C #define RTMSTPCR0 0xe6158030 #define RTMSTPCR2 0xe6158038 #define SYMSTPCR0 0xe6158040 #define SYMSTPCR2 0xe6158048 #define CMMSTPCR0 0xe615804c /* Fixed 32 KHz root clock from EXTALR pin */ static struct clk r_clk = { .rate = 32768, }; /* * 26MHz default rate for the EXTALB1 root input clock. * If needed, reset this with clk_set_rate() from the platform code. */ struct clk sh7367_extalb1_clk = { .rate = 26666666, }; /* * 48MHz default rate for the EXTAL2 root input clock. * If needed, reset this with clk_set_rate() from the platform code. */ struct clk sh7367_extal2_clk = { .rate = 48000000, }; /* A fixed divide-by-2 block */ static unsigned long div2_recalc(struct clk *clk) { return clk->parent->rate / 2; } static struct clk_ops div2_clk_ops = { .recalc = div2_recalc, }; /* Divide extalb1 by two */ static struct clk extalb1_div2_clk = { .ops = &div2_clk_ops, .parent = &sh7367_extalb1_clk, }; /* Divide extal2 by two */ static struct clk extal2_div2_clk = { .ops = &div2_clk_ops, .parent = &sh7367_extal2_clk, }; /* PLLC1 */ static unsigned long pllc1_recalc(struct clk *clk) { unsigned long mult = 1; if (__raw_readl(PLLC1CR) & (1 << 14)) mult = (((__raw_readl(RTFRQCR) >> 24) & 0x3f) + 1) * 2; return clk->parent->rate * mult; } static struct clk_ops pllc1_clk_ops = { .recalc = pllc1_recalc, }; static struct clk pllc1_clk = { .ops = &pllc1_clk_ops, .flags = CLK_ENABLE_ON_INIT, .parent = &extalb1_div2_clk, }; /* Divide PLLC1 by two */ static struct clk pllc1_div2_clk = { .ops = &div2_clk_ops, .parent = &pllc1_clk, }; /* PLLC2 */ static unsigned long pllc2_recalc(struct clk *clk) { unsigned long mult = 1; if (__raw_readl(PLLC2CR) & (1 << 31)) mult = (((__raw_readl(PLLC2CR) >> 24) & 0x3f) + 1) * 2; return clk->parent->rate * mult; } static struct clk_ops pllc2_clk_ops = { .recalc = pllc2_recalc, }; static struct clk pllc2_clk = { .ops = &pllc2_clk_ops, .flags = CLK_ENABLE_ON_INIT, .parent = &extalb1_div2_clk, }; static struct clk *main_clks[] = { &r_clk, &sh7367_extalb1_clk, &sh7367_extal2_clk, &extalb1_div2_clk, &extal2_div2_clk, &pllc1_clk, &pllc1_div2_clk, &pllc2_clk, }; static void div4_kick(struct clk *clk) { unsigned long value; /* set KICK bit in SYFRQCR to update hardware setting */ value = __raw_readl(SYFRQCR); value |= (1 << 31); __raw_writel(value, SYFRQCR); } static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 18, 24, 32, 36, 48, 0, 72, 0, 0 }; static struct clk_div_mult_table div4_div_mult_table = { .divisors = divisors, .nr_divisors = ARRAY_SIZE(divisors), }; static struct clk_div4_table div4_table = { .div_mult_table = &div4_div_mult_table, .kick = div4_kick, }; enum { DIV4_I, DIV4_G, DIV4_S, DIV4_B, DIV4_ZX, DIV4_ZT, DIV4_Z, DIV4_ZD, DIV4_HP, DIV4_ZS, DIV4_ZB, DIV4_ZB3, DIV4_CP, DIV4_NR }; #define DIV4(_reg, _bit, _mask, _flags) \ SH_CLK_DIV4(&pllc1_clk, _reg, _bit, _mask, _flags) static struct clk div4_clks[DIV4_NR] = { [DIV4_I] = DIV4(RTFRQCR, 20, 0x6fff, CLK_ENABLE_ON_INIT), [DIV4_G] = DIV4(RTFRQCR, 16, 0x6fff, CLK_ENABLE_ON_INIT), [DIV4_S] = DIV4(RTFRQCR, 12, 0x6fff, CLK_ENABLE_ON_INIT), [DIV4_B] = DIV4(RTFRQCR, 8, 0x6fff, CLK_ENABLE_ON_INIT), [DIV4_ZX] = DIV4(SYFRQCR, 20, 0x6fff, 0), [DIV4_ZT] = DIV4(SYFRQCR, 16, 0x6fff, 0), [DIV4_Z] = DIV4(SYFRQCR, 12, 0x6fff, 0), [DIV4_ZD] = DIV4(SYFRQCR, 8, 0x6fff, 0), [DIV4_HP] = DIV4(SYFRQCR, 4, 0x6fff, 0), [DIV4_ZS] = DIV4(CMFRQCR, 12, 0x6fff, 0), [DIV4_ZB] = DIV4(CMFRQCR, 8, 0x6fff, 0), [DIV4_ZB3] = DIV4(CMFRQCR, 4, 0x6fff, 0), [DIV4_CP] = DIV4(CMFRQCR, 0, 0x6fff, 0), }; enum { DIV6_SUB, DIV6_SIUA, DIV6_SIUB, DIV6_MSU, DIV6_SPU, DIV6_MVI3, DIV6_MF1, DIV6_MF2, DIV6_VCK1, DIV6_VCK2, DIV6_VCK3, DIV6_VOU, DIV6_NR }; static struct clk div6_clks[DIV6_NR] = { [DIV6_SUB] = SH_CLK_DIV6(&sh7367_extal2_clk, SUBUSBCKCR, 0), [DIV6_SIUA] = SH_CLK_DIV6(&pllc1_div2_clk, SCLKACR, 0), [DIV6_SIUB] = SH_CLK_DIV6(&pllc1_div2_clk, SCLKBCR, 0), [DIV6_MSU] = SH_CLK_DIV6(&pllc1_div2_clk, MSUCKCR, 0), [DIV6_SPU] = SH_CLK_DIV6(&pllc1_div2_clk, SPUCKCR, 0), [DIV6_MVI3] = SH_CLK_DIV6(&pllc1_div2_clk, MVI3CKCR, 0), [DIV6_MF1] = SH_CLK_DIV6(&pllc1_div2_clk, MFCK1CR, 0), [DIV6_MF2] = SH_CLK_DIV6(&pllc1_div2_clk, MFCK2CR, 0), [DIV6_VCK1] = SH_CLK_DIV6(&pllc1_div2_clk, VCLKCR1, 0), [DIV6_VCK2] = SH_CLK_DIV6(&pllc1_div2_clk, VCLKCR2, 0), [DIV6_VCK3] = SH_CLK_DIV6(&pllc1_div2_clk, VCLKCR3, 0), [DIV6_VOU] = SH_CLK_DIV6(&pllc1_div2_clk, VOUCKCR, 0), }; enum { RTMSTP001, RTMSTP231, RTMSTP230, RTMSTP229, RTMSTP228, RTMSTP226, RTMSTP216, RTMSTP206, RTMSTP205, RTMSTP201, SYMSTP023, SYMSTP007, SYMSTP006, SYMSTP004, SYMSTP003, SYMSTP002, SYMSTP001, SYMSTP000, SYMSTP231, SYMSTP229, SYMSTP225, SYMSTP223, SYMSTP222, SYMSTP215, SYMSTP214, SYMSTP213, SYMSTP211, CMMSTP003, MSTP_NR }; #define MSTP(_parent, _reg, _bit, _flags) \ SH_CLK_MSTP32(_parent, _reg, _bit, _flags) static struct clk mstp_clks[MSTP_NR] = { [RTMSTP001] = MSTP(&div6_clks[DIV6_SUB], RTMSTPCR0, 1, 0), /* IIC2 */ [RTMSTP231] = MSTP(&div4_clks[DIV4_B], RTMSTPCR2, 31, 0), /* VEU3 */ [RTMSTP230] = MSTP(&div4_clks[DIV4_B], RTMSTPCR2, 30, 0), /* VEU2 */ [RTMSTP229] = MSTP(&div4_clks[DIV4_B], RTMSTPCR2, 29, 0), /* VEU1 */ [RTMSTP228] = MSTP(&div4_clks[DIV4_B], RTMSTPCR2, 28, 0), /* VEU0 */ [RTMSTP226] = MSTP(&div4_clks[DIV4_B], RTMSTPCR2, 26, 0), /* VEU2H */ [RTMSTP216] = MSTP(&div6_clks[DIV6_SUB], RTMSTPCR2, 16, 0), /* IIC0 */ [RTMSTP206] = MSTP(&div4_clks[DIV4_B], RTMSTPCR2, 6, 0), /* JPU */ [RTMSTP205] = MSTP(&div6_clks[DIV6_VOU], RTMSTPCR2, 5, 0), /* VOU */ [RTMSTP201] = MSTP(&div4_clks[DIV4_B], RTMSTPCR2, 1, 0), /* VPU */ [SYMSTP023] = MSTP(&div6_clks[DIV6_SPU], SYMSTPCR0, 23, 0), /* SPU1 */ [SYMSTP007] = MSTP(&div6_clks[DIV6_SUB], SYMSTPCR0, 7, 0), /* SCIFA5 */ [SYMSTP006] = MSTP(&div6_clks[DIV6_SUB], SYMSTPCR0, 6, 0), /* SCIFB */ [SYMSTP004] = MSTP(&div6_clks[DIV6_SUB], SYMSTPCR0, 4, 0), /* SCIFA0 */ [SYMSTP003] = MSTP(&div6_clks[DIV6_SUB], SYMSTPCR0, 3, 0), /* SCIFA1 */ [SYMSTP002] = MSTP(&div6_clks[DIV6_SUB], SYMSTPCR0, 2, 0), /* SCIFA2 */ [SYMSTP001] = MSTP(&div6_clks[DIV6_SUB], SYMSTPCR0, 1, 0), /* SCIFA3 */ [SYMSTP000] = MSTP(&div6_clks[DIV6_SUB], SYMSTPCR0, 0, 0), /* SCIFA4 */ [SYMSTP231] = MSTP(&div6_clks[DIV6_SUB], SYMSTPCR2, 31, 0), /* SIU */ [SYMSTP229] = MSTP(&r_clk, SYMSTPCR2, 29, 0), /* CMT10 */ [SYMSTP225] = MSTP(&div6_clks[DIV6_SUB], SYMSTPCR2, 25, 0), /* IRDA */ [SYMSTP223] = MSTP(&div6_clks[DIV6_SUB], SYMSTPCR2, 23, 0), /* IIC1 */ [SYMSTP222] = MSTP(&div6_clks[DIV6_SUB], SYMSTPCR2, 22, 0), /* USBHS */ [SYMSTP215] = MSTP(&div4_clks[DIV4_HP], SYMSTPCR2, 15, 0), /* FLCTL */ [SYMSTP214] = MSTP(&div4_clks[DIV4_HP], SYMSTPCR2, 14, 0), /* SDHI0 */ [SYMSTP213] = MSTP(&div4_clks[DIV4_HP], SYMSTPCR2, 13, 0), /* SDHI1 */ [SYMSTP211] = MSTP(&div4_clks[DIV4_HP], SYMSTPCR2, 11, 0), /* SDHI2 */ [CMMSTP003] = MSTP(&r_clk, CMMSTPCR0, 3, 0), /* KEYSC */ }; #define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk } #define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk } static struct clk_lookup lookups[] = { /* main clocks */ CLKDEV_CON_ID("r_clk", &r_clk), CLKDEV_CON_ID("extalb1", &sh7367_extalb1_clk), CLKDEV_CON_ID("extal2", &sh7367_extal2_clk), CLKDEV_CON_ID("extalb1_div2_clk", &extalb1_div2_clk), CLKDEV_CON_ID("extal2_div2_clk", &extal2_div2_clk), CLKDEV_CON_ID("pllc1_clk", &pllc1_clk), CLKDEV_CON_ID("pllc1_div2_clk", &pllc1_div2_clk), CLKDEV_CON_ID("pllc2_clk", &pllc2_clk), /* DIV4 clocks */ CLKDEV_CON_ID("i_clk", &div4_clks[DIV4_I]), CLKDEV_CON_ID("g_clk", &div4_clks[DIV4_G]), CLKDEV_CON_ID("b_clk", &div4_clks[DIV4_B]), CLKDEV_CON_ID("zx_clk", &div4_clks[DIV4_ZX]), CLKDEV_CON_ID("zt_clk", &div4_clks[DIV4_ZT]), CLKDEV_CON_ID("z_clk", &div4_clks[DIV4_Z]), CLKDEV_CON_ID("zd_clk", &div4_clks[DIV4_ZD]), CLKDEV_CON_ID("hp_clk", &div4_clks[DIV4_HP]), CLKDEV_CON_ID("zs_clk", &div4_clks[DIV4_ZS]), CLKDEV_CON_ID("zb_clk", &div4_clks[DIV4_ZB]), CLKDEV_CON_ID("zb3_clk", &div4_clks[DIV4_ZB3]), CLKDEV_CON_ID("cp_clk", &div4_clks[DIV4_CP]), /* DIV6 clocks */ CLKDEV_CON_ID("sub_clk", &div6_clks[DIV6_SUB]), CLKDEV_CON_ID("siua_clk", &div6_clks[DIV6_SIUA]), CLKDEV_CON_ID("siub_clk", &div6_clks[DIV6_SIUB]), CLKDEV_CON_ID("msu_clk", &div6_clks[DIV6_MSU]), CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_SPU]), CLKDEV_CON_ID("mvi3_clk", &div6_clks[DIV6_MVI3]), CLKDEV_CON_ID("mf1_clk", &div6_clks[DIV6_MF1]), CLKDEV_CON_ID("mf2_clk", &div6_clks[DIV6_MF2]), CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]), CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]), CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]), CLKDEV_CON_ID("vou_clk", &div6_clks[DIV6_VOU]), /* MSTP32 clocks */ CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[RTMSTP001]), /* IIC2 */ CLKDEV_DEV_ID("uio_pdrv_genirq.4", &mstp_clks[RTMSTP231]), /* VEU3 */ CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[RTMSTP230]), /* VEU2 */ CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[RTMSTP229]), /* VEU1 */ CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[RTMSTP228]), /* VEU0 */ CLKDEV_DEV_ID("uio_pdrv_genirq.5", &mstp_clks[RTMSTP226]), /* VEU2H */ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[RTMSTP216]), /* IIC0 */ CLKDEV_DEV_ID("uio_pdrv_genirq.6", &mstp_clks[RTMSTP206]), /* JPU */ CLKDEV_DEV_ID("sh-vou", &mstp_clks[RTMSTP205]), /* VOU */ CLKDEV_DEV_ID("uio_pdrv_genirq.0", &mstp_clks[RTMSTP201]), /* VPU */ CLKDEV_DEV_ID("uio_pdrv_genirq.7", &mstp_clks[SYMSTP023]), /* SPU1 */ CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[SYMSTP007]), /* SCIFA5 */ CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[SYMSTP006]), /* SCIFB */ CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[SYMSTP004]), /* SCIFA0 */ CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[SYMSTP003]), /* SCIFA1 */ CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[SYMSTP002]), /* SCIFA2 */ CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[SYMSTP001]), /* SCIFA3 */ CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[SYMSTP000]), /* SCIFA4 */ CLKDEV_DEV_ID("sh_siu", &mstp_clks[SYMSTP231]), /* SIU */ CLKDEV_CON_ID("cmt1", &mstp_clks[SYMSTP229]), /* CMT10 */ CLKDEV_DEV_ID("sh_irda", &mstp_clks[SYMSTP225]), /* IRDA */ CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[SYMSTP223]), /* IIC1 */ CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[SYMSTP222]), /* USBHS */ CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[SYMSTP222]), /* USBHS */ CLKDEV_DEV_ID("sh_flctl", &mstp_clks[SYMSTP215]), /* FLCTL */ CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[SYMSTP214]), /* SDHI0 */ CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[SYMSTP213]), /* SDHI1 */ CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[SYMSTP211]), /* SDHI2 */ CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[CMMSTP003]), /* KEYSC */ }; void __init sh7367_clock_init(void) { int k, ret = 0; for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); if (!ret) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = sh_clk_div6_register(div6_clks, DIV6_NR); if (!ret) ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) clk_init(); else panic("failed to setup sh7367 clocks\n"); }
gpl-2.0
ndmsystems/linux-2.6.36
arch/powerpc/sysdev/mpic.c
134
44266
/* * arch/powerpc/kernel/mpic.c * * Driver for interrupt controllers following the OpenPIC standard, the * common implementation beeing IBM's MPIC. This driver also can deal * with various broken implementations of this HW. * * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #undef DEBUG #undef DEBUG_IPI #undef DEBUG_IRQ #undef DEBUG_LOW #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/bootmem.h> #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/slab.h> #include <asm/ptrace.h> #include <asm/signal.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/machdep.h> #include <asm/mpic.h> #include <asm/smp.h> #include "mpic.h" #ifdef DEBUG #define DBG(fmt...) printk(fmt) #else #define DBG(fmt...) #endif static struct mpic *mpics; static struct mpic *mpic_primary; static DEFINE_RAW_SPINLOCK(mpic_lock); #ifdef CONFIG_PPC32 /* XXX for now */ #ifdef CONFIG_IRQ_ALL_CPUS #define distribute_irqs (1) #else #define distribute_irqs (0) #endif #endif #ifdef CONFIG_MPIC_WEIRD static u32 mpic_infos[][MPIC_IDX_END] = { [0] = { /* Original OpenPIC compatible MPIC */ MPIC_GREG_BASE, MPIC_GREG_FEATURE_0, MPIC_GREG_GLOBAL_CONF_0, MPIC_GREG_VENDOR_ID, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_GREG_IPI_STRIDE, MPIC_GREG_SPURIOUS, MPIC_GREG_TIMER_FREQ, MPIC_TIMER_BASE, MPIC_TIMER_STRIDE, MPIC_TIMER_CURRENT_CNT, MPIC_TIMER_BASE_CNT, MPIC_TIMER_VECTOR_PRI, MPIC_TIMER_DESTINATION, MPIC_CPU_BASE, MPIC_CPU_STRIDE, MPIC_CPU_IPI_DISPATCH_0, MPIC_CPU_IPI_DISPATCH_STRIDE, MPIC_CPU_CURRENT_TASK_PRI, MPIC_CPU_WHOAMI, MPIC_CPU_INTACK, MPIC_CPU_EOI, MPIC_CPU_MCACK, MPIC_IRQ_BASE, MPIC_IRQ_STRIDE, MPIC_IRQ_VECTOR_PRI, MPIC_VECPRI_VECTOR_MASK, MPIC_VECPRI_POLARITY_POSITIVE, MPIC_VECPRI_POLARITY_NEGATIVE, MPIC_VECPRI_SENSE_LEVEL, MPIC_VECPRI_SENSE_EDGE, MPIC_VECPRI_POLARITY_MASK, MPIC_VECPRI_SENSE_MASK, MPIC_IRQ_DESTINATION }, [1] = { /* Tsi108/109 PIC */ TSI108_GREG_BASE, TSI108_GREG_FEATURE_0, TSI108_GREG_GLOBAL_CONF_0, TSI108_GREG_VENDOR_ID, TSI108_GREG_IPI_VECTOR_PRI_0, TSI108_GREG_IPI_STRIDE, TSI108_GREG_SPURIOUS, TSI108_GREG_TIMER_FREQ, TSI108_TIMER_BASE, TSI108_TIMER_STRIDE, TSI108_TIMER_CURRENT_CNT, TSI108_TIMER_BASE_CNT, TSI108_TIMER_VECTOR_PRI, TSI108_TIMER_DESTINATION, TSI108_CPU_BASE, TSI108_CPU_STRIDE, TSI108_CPU_IPI_DISPATCH_0, TSI108_CPU_IPI_DISPATCH_STRIDE, TSI108_CPU_CURRENT_TASK_PRI, TSI108_CPU_WHOAMI, TSI108_CPU_INTACK, TSI108_CPU_EOI, TSI108_CPU_MCACK, TSI108_IRQ_BASE, TSI108_IRQ_STRIDE, TSI108_IRQ_VECTOR_PRI, TSI108_VECPRI_VECTOR_MASK, TSI108_VECPRI_POLARITY_POSITIVE, TSI108_VECPRI_POLARITY_NEGATIVE, TSI108_VECPRI_SENSE_LEVEL, TSI108_VECPRI_SENSE_EDGE, TSI108_VECPRI_POLARITY_MASK, TSI108_VECPRI_SENSE_MASK, TSI108_IRQ_DESTINATION }, }; #define MPIC_INFO(name) mpic->hw_set[MPIC_IDX_##name] #else /* CONFIG_MPIC_WEIRD */ #define MPIC_INFO(name) MPIC_##name #endif /* CONFIG_MPIC_WEIRD */ /* * Register accessor functions */ static inline u32 _mpic_read(enum mpic_reg_type type, struct mpic_reg_bank *rb, unsigned int reg) { switch(type) { #ifdef CONFIG_PPC_DCR case mpic_access_dcr: return dcr_read(rb->dhost, reg); #endif case mpic_access_mmio_be: return in_be32(rb->base + (reg >> 2)); case mpic_access_mmio_le: default: return in_le32(rb->base + (reg >> 2)); } } static inline void _mpic_write(enum mpic_reg_type type, struct mpic_reg_bank *rb, unsigned int reg, u32 value) { switch(type) { #ifdef CONFIG_PPC_DCR case mpic_access_dcr: dcr_write(rb->dhost, reg, value); break; #endif case mpic_access_mmio_be: out_be32(rb->base + (reg >> 2), value); break; case mpic_access_mmio_le: default: out_le32(rb->base + (reg >> 2), value); break; } } static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi) { enum mpic_reg_type type = mpic->reg_type; unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) + (ipi * MPIC_INFO(GREG_IPI_STRIDE)); if ((mpic->flags & MPIC_BROKEN_IPI) && type == mpic_access_mmio_le) type = mpic_access_mmio_be; return _mpic_read(type, &mpic->gregs, offset); } static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value) { unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) + (ipi * MPIC_INFO(GREG_IPI_STRIDE)); _mpic_write(mpic->reg_type, &mpic->gregs, offset, value); } static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg) { unsigned int cpu = 0; if (mpic->flags & MPIC_PRIMARY) cpu = hard_smp_processor_id(); return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg); } static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value) { unsigned int cpu = 0; if (mpic->flags & MPIC_PRIMARY) cpu = hard_smp_processor_id(); _mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value); } static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg) { unsigned int isu = src_no >> mpic->isu_shift; unsigned int idx = src_no & mpic->isu_mask; unsigned int val; val = _mpic_read(mpic->reg_type, &mpic->isus[isu], reg + (idx * MPIC_INFO(IRQ_STRIDE))); #ifdef CONFIG_MPIC_BROKEN_REGREAD if (reg == 0) val = (val & (MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY)) | mpic->isu_reg0_shadow[src_no]; #endif return val; } static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no, unsigned int reg, u32 value) { unsigned int isu = src_no >> mpic->isu_shift; unsigned int idx = src_no & mpic->isu_mask; _mpic_write(mpic->reg_type, &mpic->isus[isu], reg + (idx * MPIC_INFO(IRQ_STRIDE)), value); #ifdef CONFIG_MPIC_BROKEN_REGREAD if (reg == 0) mpic->isu_reg0_shadow[src_no] = value & ~(MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY); #endif } #define mpic_read(b,r) _mpic_read(mpic->reg_type,&(b),(r)) #define mpic_write(b,r,v) _mpic_write(mpic->reg_type,&(b),(r),(v)) #define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i)) #define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v)) #define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i)) #define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v)) #define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r)) #define mpic_irq_write(s,r,v) _mpic_irq_write(mpic,(s),(r),(v)) /* * Low level utility functions */ static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr, struct mpic_reg_bank *rb, unsigned int offset, unsigned int size) { rb->base = ioremap(phys_addr + offset, size); BUG_ON(rb->base == NULL); } #ifdef CONFIG_PPC_DCR static void _mpic_map_dcr(struct mpic *mpic, struct device_node *node, struct mpic_reg_bank *rb, unsigned int offset, unsigned int size) { const u32 *dbasep; dbasep = of_get_property(node, "dcr-reg", NULL); rb->dhost = dcr_map(node, *dbasep + offset, size); BUG_ON(!DCR_MAP_OK(rb->dhost)); } static inline void mpic_map(struct mpic *mpic, struct device_node *node, phys_addr_t phys_addr, struct mpic_reg_bank *rb, unsigned int offset, unsigned int size) { if (mpic->flags & MPIC_USES_DCR) _mpic_map_dcr(mpic, node, rb, offset, size); else _mpic_map_mmio(mpic, phys_addr, rb, offset, size); } #else /* CONFIG_PPC_DCR */ #define mpic_map(m,n,p,b,o,s) _mpic_map_mmio(m,p,b,o,s) #endif /* !CONFIG_PPC_DCR */ /* Check if we have one of those nice broken MPICs with a flipped endian on * reads from IPI registers */ static void __init mpic_test_broken_ipi(struct mpic *mpic) { u32 r; mpic_write(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0), MPIC_VECPRI_MASK); r = mpic_read(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0)); if (r == le32_to_cpu(MPIC_VECPRI_MASK)) { printk(KERN_INFO "mpic: Detected reversed IPI registers\n"); mpic->flags |= MPIC_BROKEN_IPI; } } #ifdef CONFIG_MPIC_U3_HT_IRQS /* Test if an interrupt is sourced from HyperTransport (used on broken U3s) * to force the edge setting on the MPIC and do the ack workaround. */ static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source) { if (source >= 128 || !mpic->fixups) return 0; return mpic->fixups[source].base != NULL; } static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source) { struct mpic_irq_fixup *fixup = &mpic->fixups[source]; if (fixup->applebase) { unsigned int soff = (fixup->index >> 3) & ~3; unsigned int mask = 1U << (fixup->index & 0x1f); writel(mask, fixup->applebase + soff); } else { raw_spin_lock(&mpic->fixup_lock); writeb(0x11 + 2 * fixup->index, fixup->base + 2); writel(fixup->data, fixup->base + 4); raw_spin_unlock(&mpic->fixup_lock); } } static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, unsigned int irqflags) { struct mpic_irq_fixup *fixup = &mpic->fixups[source]; unsigned long flags; u32 tmp; if (fixup->base == NULL) return; DBG("startup_ht_interrupt(0x%x, 0x%x) index: %d\n", source, irqflags, fixup->index); raw_spin_lock_irqsave(&mpic->fixup_lock, flags); /* Enable and configure */ writeb(0x10 + 2 * fixup->index, fixup->base + 2); tmp = readl(fixup->base + 4); tmp &= ~(0x23U); if (irqflags & IRQ_LEVEL) tmp |= 0x22; writel(tmp, fixup->base + 4); raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags); #ifdef CONFIG_PM /* use the lowest bit inverted to the actual HW, * set if this fixup was enabled, clear otherwise */ mpic->save_data[source].fixup_data = tmp | 1; #endif } static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source, unsigned int irqflags) { struct mpic_irq_fixup *fixup = &mpic->fixups[source]; unsigned long flags; u32 tmp; if (fixup->base == NULL) return; DBG("shutdown_ht_interrupt(0x%x, 0x%x)\n", source, irqflags); /* Disable */ raw_spin_lock_irqsave(&mpic->fixup_lock, flags); writeb(0x10 + 2 * fixup->index, fixup->base + 2); tmp = readl(fixup->base + 4); tmp |= 1; writel(tmp, fixup->base + 4); raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags); #ifdef CONFIG_PM /* use the lowest bit inverted to the actual HW, * set if this fixup was enabled, clear otherwise */ mpic->save_data[source].fixup_data = tmp & ~1; #endif } #ifdef CONFIG_PCI_MSI static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase, unsigned int devfn) { u8 __iomem *base; u8 pos, flags; u64 addr = 0; for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0; pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) { u8 id = readb(devbase + pos + PCI_CAP_LIST_ID); if (id == PCI_CAP_ID_HT) { id = readb(devbase + pos + 3); if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_MSI_MAPPING) break; } } if (pos == 0) return; base = devbase + pos; flags = readb(base + HT_MSI_FLAGS); if (!(flags & HT_MSI_FLAGS_FIXED)) { addr = readl(base + HT_MSI_ADDR_LO) & HT_MSI_ADDR_LO_MASK; addr = addr | ((u64)readl(base + HT_MSI_ADDR_HI) << 32); } printk(KERN_DEBUG "mpic: - HT:%02x.%x %s MSI mapping found @ 0x%llx\n", PCI_SLOT(devfn), PCI_FUNC(devfn), flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled", addr); if (!(flags & HT_MSI_FLAGS_ENABLE)) writeb(flags | HT_MSI_FLAGS_ENABLE, base + HT_MSI_FLAGS); } #else static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase, unsigned int devfn) { return; } #endif static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase, unsigned int devfn, u32 vdid) { int i, irq, n; u8 __iomem *base; u32 tmp; u8 pos; for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0; pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) { u8 id = readb(devbase + pos + PCI_CAP_LIST_ID); if (id == PCI_CAP_ID_HT) { id = readb(devbase + pos + 3); if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_IRQ) break; } } if (pos == 0) return; base = devbase + pos; writeb(0x01, base + 2); n = (readl(base + 4) >> 16) & 0xff; printk(KERN_INFO "mpic: - HT:%02x.%x [0x%02x] vendor %04x device %04x" " has %d irqs\n", devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1); for (i = 0; i <= n; i++) { writeb(0x10 + 2 * i, base + 2); tmp = readl(base + 4); irq = (tmp >> 16) & 0xff; DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp); /* mask it , will be unmasked later */ tmp |= 0x1; writel(tmp, base + 4); mpic->fixups[irq].index = i; mpic->fixups[irq].base = base; /* Apple HT PIC has a non-standard way of doing EOIs */ if ((vdid & 0xffff) == 0x106b) mpic->fixups[irq].applebase = devbase + 0x60; else mpic->fixups[irq].applebase = NULL; writeb(0x11 + 2 * i, base + 2); mpic->fixups[irq].data = readl(base + 4) | 0x80000000; } } static void __init mpic_scan_ht_pics(struct mpic *mpic) { unsigned int devfn; u8 __iomem *cfgspace; printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n"); /* Allocate fixups array */ mpic->fixups = kzalloc(128 * sizeof(*mpic->fixups), GFP_KERNEL); BUG_ON(mpic->fixups == NULL); /* Init spinlock */ raw_spin_lock_init(&mpic->fixup_lock); /* Map U3 config space. We assume all IO-APICs are on the primary bus * so we only need to map 64kB. */ cfgspace = ioremap(0xf2000000, 0x10000); BUG_ON(cfgspace == NULL); /* Now we scan all slots. We do a very quick scan, we read the header * type, vendor ID and device ID only, that's plenty enough */ for (devfn = 0; devfn < 0x100; devfn++) { u8 __iomem *devbase = cfgspace + (devfn << 8); u8 hdr_type = readb(devbase + PCI_HEADER_TYPE); u32 l = readl(devbase + PCI_VENDOR_ID); u16 s; DBG("devfn %x, l: %x\n", devfn, l); /* If no device, skip */ if (l == 0xffffffff || l == 0x00000000 || l == 0x0000ffff || l == 0xffff0000) goto next; /* Check if is supports capability lists */ s = readw(devbase + PCI_STATUS); if (!(s & PCI_STATUS_CAP_LIST)) goto next; mpic_scan_ht_pic(mpic, devbase, devfn, l); mpic_scan_ht_msi(mpic, devbase, devfn); next: /* next device, if function 0 */ if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0) devfn += 7; } } #else /* CONFIG_MPIC_U3_HT_IRQS */ static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source) { return 0; } static void __init mpic_scan_ht_pics(struct mpic *mpic) { } #endif /* CONFIG_MPIC_U3_HT_IRQS */ #ifdef CONFIG_SMP static int irq_choose_cpu(const struct cpumask *mask) { int cpuid; if (cpumask_equal(mask, cpu_all_mask)) { static int irq_rover = 0; static DEFINE_RAW_SPINLOCK(irq_rover_lock); unsigned long flags; /* Round-robin distribution... */ do_round_robin: raw_spin_lock_irqsave(&irq_rover_lock, flags); irq_rover = cpumask_next(irq_rover, cpu_online_mask); if (irq_rover >= nr_cpu_ids) irq_rover = cpumask_first(cpu_online_mask); cpuid = irq_rover; raw_spin_unlock_irqrestore(&irq_rover_lock, flags); } else { cpuid = cpumask_first_and(mask, cpu_online_mask); if (cpuid >= nr_cpu_ids) goto do_round_robin; } return get_hard_smp_processor_id(cpuid); } #else static int irq_choose_cpu(const struct cpumask *mask) { return hard_smp_processor_id(); } #endif #define mpic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) /* Find an mpic associated with a given linux interrupt */ static struct mpic *mpic_find(unsigned int irq) { if (irq < NUM_ISA_INTERRUPTS) return NULL; return irq_to_desc(irq)->chip_data; } /* Determine if the linux irq is an IPI */ static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int irq) { unsigned int src = mpic_irq_to_hw(irq); return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]); } /* Convert a cpu mask from logical to physical cpu numbers. */ static inline u32 mpic_physmask(u32 cpumask) { int i; u32 mask = 0; for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1) mask |= (cpumask & 1) << get_hard_smp_processor_id(i); return mask; } #ifdef CONFIG_SMP /* Get the mpic structure from the IPI number */ static inline struct mpic * mpic_from_ipi(unsigned int ipi) { return irq_to_desc(ipi)->chip_data; } #endif /* Get the mpic structure from the irq number */ static inline struct mpic * mpic_from_irq(unsigned int irq) { return irq_to_desc(irq)->chip_data; } /* Send an EOI */ static inline void mpic_eoi(struct mpic *mpic) { mpic_cpu_write(MPIC_INFO(CPU_EOI), 0); (void)mpic_cpu_read(MPIC_INFO(CPU_WHOAMI)); } /* * Linux descriptor level callbacks */ void mpic_unmask_irq(unsigned int irq) { unsigned int loops = 100000; struct mpic *mpic = mpic_from_irq(irq); unsigned int src = mpic_irq_to_hw(irq); DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src); mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & ~MPIC_VECPRI_MASK); /* make sure mask gets to controller before we return to user */ do { if (!loops--) { printk(KERN_ERR "mpic_enable_irq timeout\n"); break; } } while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK); } void mpic_mask_irq(unsigned int irq) { unsigned int loops = 100000; struct mpic *mpic = mpic_from_irq(irq); unsigned int src = mpic_irq_to_hw(irq); DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src); mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) | MPIC_VECPRI_MASK); /* make sure mask gets to controller before we return to user */ do { if (!loops--) { printk(KERN_ERR "mpic_enable_irq timeout\n"); break; } } while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK)); } void mpic_end_irq(unsigned int irq) { struct mpic *mpic = mpic_from_irq(irq); #ifdef DEBUG_IRQ DBG("%s: end_irq: %d\n", mpic->name, irq); #endif /* We always EOI on end_irq() even for edge interrupts since that * should only lower the priority, the MPIC should have properly * latched another edge interrupt coming in anyway */ mpic_eoi(mpic); } #ifdef CONFIG_MPIC_U3_HT_IRQS static void mpic_unmask_ht_irq(unsigned int irq) { struct mpic *mpic = mpic_from_irq(irq); unsigned int src = mpic_irq_to_hw(irq); mpic_unmask_irq(irq); if (irq_to_desc(irq)->status & IRQ_LEVEL) mpic_ht_end_irq(mpic, src); } static unsigned int mpic_startup_ht_irq(unsigned int irq) { struct mpic *mpic = mpic_from_irq(irq); unsigned int src = mpic_irq_to_hw(irq); mpic_unmask_irq(irq); mpic_startup_ht_interrupt(mpic, src, irq_to_desc(irq)->status); return 0; } static void mpic_shutdown_ht_irq(unsigned int irq) { struct mpic *mpic = mpic_from_irq(irq); unsigned int src = mpic_irq_to_hw(irq); mpic_shutdown_ht_interrupt(mpic, src, irq_to_desc(irq)->status); mpic_mask_irq(irq); } static void mpic_end_ht_irq(unsigned int irq) { struct mpic *mpic = mpic_from_irq(irq); unsigned int src = mpic_irq_to_hw(irq); #ifdef DEBUG_IRQ DBG("%s: end_irq: %d\n", mpic->name, irq); #endif /* We always EOI on end_irq() even for edge interrupts since that * should only lower the priority, the MPIC should have properly * latched another edge interrupt coming in anyway */ if (irq_to_desc(irq)->status & IRQ_LEVEL) mpic_ht_end_irq(mpic, src); mpic_eoi(mpic); } #endif /* !CONFIG_MPIC_U3_HT_IRQS */ #ifdef CONFIG_SMP static void mpic_unmask_ipi(unsigned int irq) { struct mpic *mpic = mpic_from_ipi(irq); unsigned int src = mpic_irq_to_hw(irq) - mpic->ipi_vecs[0]; DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src); mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); } static void mpic_mask_ipi(unsigned int irq) { /* NEVER disable an IPI... that's just plain wrong! */ } static void mpic_end_ipi(unsigned int irq) { struct mpic *mpic = mpic_from_ipi(irq); /* * IPIs are marked IRQ_PER_CPU. This has the side effect of * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from * applying to them. We EOI them late to avoid re-entering. * We mark IPI's with IRQF_DISABLED as they must run with * irqs disabled. */ mpic_eoi(mpic); } #endif /* CONFIG_SMP */ int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask) { struct mpic *mpic = mpic_from_irq(irq); unsigned int src = mpic_irq_to_hw(irq); if (mpic->flags & MPIC_SINGLE_DEST_CPU) { int cpuid = irq_choose_cpu(cpumask); mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); } else { cpumask_var_t tmp; alloc_cpumask_var(&tmp, GFP_KERNEL); cpumask_and(tmp, cpumask, cpu_online_mask); mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), mpic_physmask(cpumask_bits(tmp)[0])); free_cpumask_var(tmp); } return 0; } static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) { /* Now convert sense value */ switch(type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_RISING: return MPIC_INFO(VECPRI_SENSE_EDGE) | MPIC_INFO(VECPRI_POLARITY_POSITIVE); case IRQ_TYPE_EDGE_FALLING: case IRQ_TYPE_EDGE_BOTH: return MPIC_INFO(VECPRI_SENSE_EDGE) | MPIC_INFO(VECPRI_POLARITY_NEGATIVE); case IRQ_TYPE_LEVEL_HIGH: return MPIC_INFO(VECPRI_SENSE_LEVEL) | MPIC_INFO(VECPRI_POLARITY_POSITIVE); case IRQ_TYPE_LEVEL_LOW: default: return MPIC_INFO(VECPRI_SENSE_LEVEL) | MPIC_INFO(VECPRI_POLARITY_NEGATIVE); } } int mpic_set_irq_type(unsigned int virq, unsigned int flow_type) { struct mpic *mpic = mpic_from_irq(virq); unsigned int src = mpic_irq_to_hw(virq); struct irq_desc *desc = irq_to_desc(virq); unsigned int vecpri, vold, vnew; DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n", mpic, virq, src, flow_type); if (src >= mpic->irq_count) return -EINVAL; if (flow_type == IRQ_TYPE_NONE) if (mpic->senses && src < mpic->senses_count) flow_type = mpic->senses[src]; if (flow_type == IRQ_TYPE_NONE) flow_type = IRQ_TYPE_LEVEL_LOW; desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) desc->status |= IRQ_LEVEL; if (mpic_is_ht_interrupt(mpic, src)) vecpri = MPIC_VECPRI_POLARITY_POSITIVE | MPIC_VECPRI_SENSE_EDGE; else vecpri = mpic_type_to_vecpri(mpic, flow_type); vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)); vnew = vold & ~(MPIC_INFO(VECPRI_POLARITY_MASK) | MPIC_INFO(VECPRI_SENSE_MASK)); vnew |= vecpri; if (vold != vnew) mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew); return 0; } void mpic_set_vector(unsigned int virq, unsigned int vector) { struct mpic *mpic = mpic_from_irq(virq); unsigned int src = mpic_irq_to_hw(virq); unsigned int vecpri; DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n", mpic, virq, src, vector); if (src >= mpic->irq_count) return; vecpri = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)); vecpri = vecpri & ~MPIC_INFO(VECPRI_VECTOR_MASK); vecpri |= vector; mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); } static struct irq_chip mpic_irq_chip = { .mask = mpic_mask_irq, .unmask = mpic_unmask_irq, .eoi = mpic_end_irq, .set_type = mpic_set_irq_type, }; #ifdef CONFIG_SMP static struct irq_chip mpic_ipi_chip = { .mask = mpic_mask_ipi, .unmask = mpic_unmask_ipi, .eoi = mpic_end_ipi, }; #endif /* CONFIG_SMP */ #ifdef CONFIG_MPIC_U3_HT_IRQS static struct irq_chip mpic_irq_ht_chip = { .startup = mpic_startup_ht_irq, .shutdown = mpic_shutdown_ht_irq, .mask = mpic_mask_irq, .unmask = mpic_unmask_ht_irq, .eoi = mpic_end_ht_irq, .set_type = mpic_set_irq_type, }; #endif /* CONFIG_MPIC_U3_HT_IRQS */ static int mpic_host_match(struct irq_host *h, struct device_node *node) { /* Exact match, unless mpic node is NULL */ return h->of_node == NULL || h->of_node == node; } static int mpic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { struct mpic *mpic = h->host_data; struct irq_chip *chip; DBG("mpic: map virq %d, hwirq 0x%lx\n", virq, hw); if (hw == mpic->spurious_vec) return -EINVAL; if (mpic->protected && test_bit(hw, mpic->protected)) return -EINVAL; #ifdef CONFIG_SMP else if (hw >= mpic->ipi_vecs[0]) { WARN_ON(!(mpic->flags & MPIC_PRIMARY)); DBG("mpic: mapping as IPI\n"); set_irq_chip_data(virq, mpic); set_irq_chip_and_handler(virq, &mpic->hc_ipi, handle_percpu_irq); return 0; } #endif /* CONFIG_SMP */ if (hw >= mpic->irq_count) return -EINVAL; mpic_msi_reserve_hwirq(mpic, hw); /* Default chip */ chip = &mpic->hc_irq; #ifdef CONFIG_MPIC_U3_HT_IRQS /* Check for HT interrupts, override vecpri */ if (mpic_is_ht_interrupt(mpic, hw)) chip = &mpic->hc_ht_irq; #endif /* CONFIG_MPIC_U3_HT_IRQS */ DBG("mpic: mapping to irq chip @%p\n", chip); set_irq_chip_data(virq, mpic); set_irq_chip_and_handler(virq, chip, handle_fasteoi_irq); /* Set default irq type */ set_irq_type(virq, IRQ_TYPE_NONE); return 0; } static int mpic_host_xlate(struct irq_host *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { static unsigned char map_mpic_senses[4] = { IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW, IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_FALLING, }; *out_hwirq = intspec[0]; if (intsize > 1) { u32 mask = 0x3; /* Apple invented a new race of encoding on machines with * an HT APIC. They encode, among others, the index within * the HT APIC. We don't care about it here since thankfully, * it appears that they have the APIC already properly * configured, and thus our current fixup code that reads the * APIC config works fine. However, we still need to mask out * bits in the specifier to make sure we only get bit 0 which * is the level/edge bit (the only sense bit exposed by Apple), * as their bit 1 means something else. */ if (machine_is(powermac)) mask = 0x1; *out_flags = map_mpic_senses[intspec[1] & mask]; } else *out_flags = IRQ_TYPE_NONE; DBG("mpic: xlate (%d cells: 0x%08x 0x%08x) to line 0x%lx sense 0x%x\n", intsize, intspec[0], intspec[1], *out_hwirq, *out_flags); return 0; } static struct irq_host_ops mpic_host_ops = { .match = mpic_host_match, .map = mpic_host_map, .xlate = mpic_host_xlate, }; /* * Exported functions */ struct mpic * __init mpic_alloc(struct device_node *node, phys_addr_t phys_addr, unsigned int flags, unsigned int isu_size, unsigned int irq_count, const char *name) { struct mpic *mpic; u32 greg_feature; const char *vers; int i; int intvec_top; u64 paddr = phys_addr; mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL); if (mpic == NULL) return NULL; mpic->name = name; mpic->hc_irq = mpic_irq_chip; mpic->hc_irq.name = name; if (flags & MPIC_PRIMARY) mpic->hc_irq.set_affinity = mpic_set_affinity; #ifdef CONFIG_MPIC_U3_HT_IRQS mpic->hc_ht_irq = mpic_irq_ht_chip; mpic->hc_ht_irq.name = name; if (flags & MPIC_PRIMARY) mpic->hc_ht_irq.set_affinity = mpic_set_affinity; #endif /* CONFIG_MPIC_U3_HT_IRQS */ #ifdef CONFIG_SMP mpic->hc_ipi = mpic_ipi_chip; mpic->hc_ipi.name = name; #endif /* CONFIG_SMP */ mpic->flags = flags; mpic->isu_size = isu_size; mpic->irq_count = irq_count; mpic->num_sources = 0; /* so far */ if (flags & MPIC_LARGE_VECTORS) intvec_top = 2047; else intvec_top = 255; mpic->timer_vecs[0] = intvec_top - 8; mpic->timer_vecs[1] = intvec_top - 7; mpic->timer_vecs[2] = intvec_top - 6; mpic->timer_vecs[3] = intvec_top - 5; mpic->ipi_vecs[0] = intvec_top - 4; mpic->ipi_vecs[1] = intvec_top - 3; mpic->ipi_vecs[2] = intvec_top - 2; mpic->ipi_vecs[3] = intvec_top - 1; mpic->spurious_vec = intvec_top; /* Check for "big-endian" in device-tree */ if (node && of_get_property(node, "big-endian", NULL) != NULL) mpic->flags |= MPIC_BIG_ENDIAN; /* Look for protected sources */ if (node) { int psize; unsigned int bits, mapsize; const u32 *psrc = of_get_property(node, "protected-sources", &psize); if (psrc) { psize /= 4; bits = intvec_top + 1; mapsize = BITS_TO_LONGS(bits) * sizeof(unsigned long); mpic->protected = kzalloc(mapsize, GFP_KERNEL); BUG_ON(mpic->protected == NULL); for (i = 0; i < psize; i++) { if (psrc[i] > intvec_top) continue; __set_bit(psrc[i], mpic->protected); } } } #ifdef CONFIG_MPIC_WEIRD mpic->hw_set = mpic_infos[MPIC_GET_REGSET(flags)]; #endif /* default register type */ mpic->reg_type = (flags & MPIC_BIG_ENDIAN) ? mpic_access_mmio_be : mpic_access_mmio_le; /* If no physical address is passed in, a device-node is mandatory */ BUG_ON(paddr == 0 && node == NULL); /* If no physical address passed in, check if it's dcr based */ if (paddr == 0 && of_get_property(node, "dcr-reg", NULL) != NULL) { #ifdef CONFIG_PPC_DCR mpic->flags |= MPIC_USES_DCR; mpic->reg_type = mpic_access_dcr; #else BUG(); #endif /* CONFIG_PPC_DCR */ } /* If the MPIC is not DCR based, and no physical address was passed * in, try to obtain one */ if (paddr == 0 && !(mpic->flags & MPIC_USES_DCR)) { const u32 *reg = of_get_property(node, "reg", NULL); BUG_ON(reg == NULL); paddr = of_translate_address(node, reg); BUG_ON(paddr == OF_BAD_ADDR); } /* Map the global registers */ mpic_map(mpic, node, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000); mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000); /* Reset */ if (flags & MPIC_WANTS_RESET) { mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) | MPIC_GREG_GCONF_RESET); while( mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) & MPIC_GREG_GCONF_RESET) mb(); } /* CoreInt */ if (flags & MPIC_ENABLE_COREINT) mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) | MPIC_GREG_GCONF_COREINT); if (flags & MPIC_ENABLE_MCK) mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) | MPIC_GREG_GCONF_MCK); /* Read feature register, calculate num CPUs and, for non-ISU * MPICs, num sources as well. On ISU MPICs, sources are counted * as ISUs are added */ greg_feature = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0)); mpic->num_cpus = ((greg_feature & MPIC_GREG_FEATURE_LAST_CPU_MASK) >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1; if (isu_size == 0) { if (flags & MPIC_BROKEN_FRR_NIRQS) mpic->num_sources = mpic->irq_count; else mpic->num_sources = ((greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK) >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1; } /* Map the per-CPU registers */ for (i = 0; i < mpic->num_cpus; i++) { mpic_map(mpic, node, paddr, &mpic->cpuregs[i], MPIC_INFO(CPU_BASE) + i * MPIC_INFO(CPU_STRIDE), 0x1000); } /* Initialize main ISU if none provided */ if (mpic->isu_size == 0) { mpic->isu_size = mpic->num_sources; mpic_map(mpic, node, paddr, &mpic->isus[0], MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); } mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); mpic->isu_mask = (1 << mpic->isu_shift) - 1; mpic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, isu_size ? isu_size : mpic->num_sources, &mpic_host_ops, flags & MPIC_LARGE_VECTORS ? 2048 : 256); if (mpic->irqhost == NULL) return NULL; mpic->irqhost->host_data = mpic; /* Display version */ switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) { case 1: vers = "1.0"; break; case 2: vers = "1.2"; break; case 3: vers = "1.3"; break; default: vers = "<unknown>"; break; } printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx," " max %d CPUs\n", name, vers, (unsigned long long)paddr, mpic->num_cpus); printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size, mpic->isu_shift, mpic->isu_mask); mpic->next = mpics; mpics = mpic; if (flags & MPIC_PRIMARY) { mpic_primary = mpic; irq_set_default_host(mpic->irqhost); } return mpic; } void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, phys_addr_t paddr) { unsigned int isu_first = isu_num * mpic->isu_size; BUG_ON(isu_num >= MPIC_MAX_ISU); mpic_map(mpic, mpic->irqhost->of_node, paddr, &mpic->isus[isu_num], 0, MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); if ((isu_first + mpic->isu_size) > mpic->num_sources) mpic->num_sources = isu_first + mpic->isu_size; } void __init mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count) { mpic->senses = senses; mpic->senses_count = count; } void __init mpic_init(struct mpic *mpic) { int i; int cpu; BUG_ON(mpic->num_sources == 0); printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources); /* Set current processor priority to max */ mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); /* Initialize timers: just disable them all */ for (i = 0; i < 4; i++) { mpic_write(mpic->tmregs, i * MPIC_INFO(TIMER_STRIDE) + MPIC_INFO(TIMER_DESTINATION), 0); mpic_write(mpic->tmregs, i * MPIC_INFO(TIMER_STRIDE) + MPIC_INFO(TIMER_VECTOR_PRI), MPIC_VECPRI_MASK | (mpic->timer_vecs[0] + i)); } /* Initialize IPIs to our reserved vectors and mark them disabled for now */ mpic_test_broken_ipi(mpic); for (i = 0; i < 4; i++) { mpic_ipi_write(i, MPIC_VECPRI_MASK | (10 << MPIC_VECPRI_PRIORITY_SHIFT) | (mpic->ipi_vecs[0] + i)); } /* Initialize interrupt sources */ if (mpic->irq_count == 0) mpic->irq_count = mpic->num_sources; /* Do the HT PIC fixups on U3 broken mpic */ DBG("MPIC flags: %x\n", mpic->flags); if ((mpic->flags & MPIC_U3_HT_IRQS) && (mpic->flags & MPIC_PRIMARY)) { mpic_scan_ht_pics(mpic); mpic_u3msi_init(mpic); } mpic_pasemi_msi_init(mpic); if (mpic->flags & MPIC_PRIMARY) cpu = hard_smp_processor_id(); else cpu = 0; for (i = 0; i < mpic->num_sources; i++) { /* start with vector = source number, and masked */ u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT); /* check if protected */ if (mpic->protected && test_bit(i, mpic->protected)) continue; /* init hw */ mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu); } /* Init spurious vector */ mpic_write(mpic->gregs, MPIC_INFO(GREG_SPURIOUS), mpic->spurious_vec); /* Disable 8259 passthrough, if supported */ if (!(mpic->flags & MPIC_NO_PTHROU_DIS)) mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) | MPIC_GREG_GCONF_8259_PTHROU_DIS); if (mpic->flags & MPIC_NO_BIAS) mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) | MPIC_GREG_GCONF_NO_BIAS); /* Set current processor priority to 0 */ mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0); #ifdef CONFIG_PM /* allocate memory to save mpic state */ mpic->save_data = kmalloc(mpic->num_sources * sizeof(*mpic->save_data), GFP_KERNEL); BUG_ON(mpic->save_data == NULL); #endif } void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio) { u32 v; v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1); v &= ~MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK; v |= MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(clock_ratio); mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v); } void __init mpic_set_serial_int(struct mpic *mpic, int enable) { unsigned long flags; u32 v; raw_spin_lock_irqsave(&mpic_lock, flags); v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1); if (enable) v |= MPIC_GREG_GLOBAL_CONF_1_SIE; else v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE; mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v); raw_spin_unlock_irqrestore(&mpic_lock, flags); } void mpic_irq_set_priority(unsigned int irq, unsigned int pri) { struct mpic *mpic = mpic_find(irq); unsigned int src = mpic_irq_to_hw(irq); unsigned long flags; u32 reg; if (!mpic) return; raw_spin_lock_irqsave(&mpic_lock, flags); if (mpic_is_ipi(mpic, irq)) { reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) & ~MPIC_VECPRI_PRIORITY_MASK; mpic_ipi_write(src - mpic->ipi_vecs[0], reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); } else { reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & ~MPIC_VECPRI_PRIORITY_MASK; mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); } raw_spin_unlock_irqrestore(&mpic_lock, flags); } void mpic_setup_this_cpu(void) { #ifdef CONFIG_SMP struct mpic *mpic = mpic_primary; unsigned long flags; u32 msk = 1 << hard_smp_processor_id(); unsigned int i; BUG_ON(mpic == NULL); DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); raw_spin_lock_irqsave(&mpic_lock, flags); /* let the mpic know we want intrs. default affinity is 0xffffffff * until changed via /proc. That's how it's done on x86. If we want * it differently, then we should make sure we also change the default * values of irq_desc[].affinity in irq.c. */ if (distribute_irqs) { for (i = 0; i < mpic->num_sources ; i++) mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk); } /* Set current processor priority to 0 */ mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0); raw_spin_unlock_irqrestore(&mpic_lock, flags); #endif /* CONFIG_SMP */ } int mpic_cpu_get_priority(void) { struct mpic *mpic = mpic_primary; return mpic_cpu_read(MPIC_INFO(CPU_CURRENT_TASK_PRI)); } void mpic_cpu_set_priority(int prio) { struct mpic *mpic = mpic_primary; prio &= MPIC_CPU_TASKPRI_MASK; mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), prio); } void mpic_teardown_this_cpu(int secondary) { struct mpic *mpic = mpic_primary; unsigned long flags; u32 msk = 1 << hard_smp_processor_id(); unsigned int i; BUG_ON(mpic == NULL); DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); raw_spin_lock_irqsave(&mpic_lock, flags); /* let the mpic know we don't want intrs. */ for (i = 0; i < mpic->num_sources ; i++) mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) & ~msk); /* Set current processor priority to max */ mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); /* We need to EOI the IPI since not all platforms reset the MPIC * on boot and new interrupts wouldn't get delivered otherwise. */ mpic_eoi(mpic); raw_spin_unlock_irqrestore(&mpic_lock, flags); } static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg) { u32 src; src = mpic_cpu_read(reg) & MPIC_INFO(VECPRI_VECTOR_MASK); #ifdef DEBUG_LOW DBG("%s: get_one_irq(reg 0x%x): %d\n", mpic->name, reg, src); #endif if (unlikely(src == mpic->spurious_vec)) { if (mpic->flags & MPIC_SPV_EOI) mpic_eoi(mpic); return NO_IRQ; } if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { if (printk_ratelimit()) printk(KERN_WARNING "%s: Got protected source %d !\n", mpic->name, (int)src); mpic_eoi(mpic); return NO_IRQ; } return irq_linear_revmap(mpic->irqhost, src); } unsigned int mpic_get_one_irq(struct mpic *mpic) { return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_INTACK)); } unsigned int mpic_get_irq(void) { struct mpic *mpic = mpic_primary; BUG_ON(mpic == NULL); return mpic_get_one_irq(mpic); } unsigned int mpic_get_coreint_irq(void) { #ifdef CONFIG_BOOKE struct mpic *mpic = mpic_primary; u32 src; BUG_ON(mpic == NULL); src = mfspr(SPRN_EPR); if (unlikely(src == mpic->spurious_vec)) { if (mpic->flags & MPIC_SPV_EOI) mpic_eoi(mpic); return NO_IRQ; } if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { if (printk_ratelimit()) printk(KERN_WARNING "%s: Got protected source %d !\n", mpic->name, (int)src); return NO_IRQ; } return irq_linear_revmap(mpic->irqhost, src); #else return NO_IRQ; #endif } unsigned int mpic_get_mcirq(void) { struct mpic *mpic = mpic_primary; BUG_ON(mpic == NULL); return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_MCACK)); } #ifdef CONFIG_SMP void mpic_request_ipis(void) { struct mpic *mpic = mpic_primary; int i; BUG_ON(mpic == NULL); printk(KERN_INFO "mpic: requesting IPIs...\n"); for (i = 0; i < 4; i++) { unsigned int vipi = irq_create_mapping(mpic->irqhost, mpic->ipi_vecs[0] + i); if (vipi == NO_IRQ) { printk(KERN_ERR "Failed to map %s\n", smp_ipi_name[i]); continue; } smp_request_message_ipi(vipi, i); } } static void mpic_send_ipi(unsigned int ipi_no, const struct cpumask *cpu_mask) { struct mpic *mpic = mpic_primary; BUG_ON(mpic == NULL); #ifdef DEBUG_IPI DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no); #endif mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) + ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), mpic_physmask(cpumask_bits(cpu_mask)[0])); } void smp_mpic_message_pass(int target, int msg) { cpumask_var_t tmp; /* make sure we're sending something that translates to an IPI */ if ((unsigned int)msg > 3) { printk("SMP %d: smp_message_pass: unknown msg %d\n", smp_processor_id(), msg); return; } switch (target) { case MSG_ALL: mpic_send_ipi(msg, cpu_online_mask); break; case MSG_ALL_BUT_SELF: alloc_cpumask_var(&tmp, GFP_NOWAIT); cpumask_andnot(tmp, cpu_online_mask, cpumask_of(smp_processor_id())); mpic_send_ipi(msg, tmp); free_cpumask_var(tmp); break; default: mpic_send_ipi(msg, cpumask_of(target)); break; } } int __init smp_mpic_probe(void) { int nr_cpus; DBG("smp_mpic_probe()...\n"); nr_cpus = cpumask_weight(cpu_possible_mask); DBG("nr_cpus: %d\n", nr_cpus); if (nr_cpus > 1) mpic_request_ipis(); return nr_cpus; } void __devinit smp_mpic_setup_cpu(int cpu) { mpic_setup_this_cpu(); } void mpic_reset_core(int cpu) { struct mpic *mpic = mpic_primary; u32 pir; int cpuid = get_hard_smp_processor_id(cpu); /* Set target bit for core reset */ pir = mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT)); pir |= (1 << cpuid); mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir); mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT)); /* Restore target bit after reset complete */ pir &= ~(1 << cpuid); mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir); mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT)); } #endif /* CONFIG_SMP */ #ifdef CONFIG_PM static int mpic_suspend(struct sys_device *dev, pm_message_t state) { struct mpic *mpic = container_of(dev, struct mpic, sysdev); int i; for (i = 0; i < mpic->num_sources; i++) { mpic->save_data[i].vecprio = mpic_irq_read(i, MPIC_INFO(IRQ_VECTOR_PRI)); mpic->save_data[i].dest = mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)); } return 0; } static int mpic_resume(struct sys_device *dev) { struct mpic *mpic = container_of(dev, struct mpic, sysdev); int i; for (i = 0; i < mpic->num_sources; i++) { mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), mpic->save_data[i].vecprio); mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), mpic->save_data[i].dest); #ifdef CONFIG_MPIC_U3_HT_IRQS if (mpic->fixups) { struct mpic_irq_fixup *fixup = &mpic->fixups[i]; if (fixup->base) { /* we use the lowest bit in an inverted meaning */ if ((mpic->save_data[i].fixup_data & 1) == 0) continue; /* Enable and configure */ writeb(0x10 + 2 * fixup->index, fixup->base + 2); writel(mpic->save_data[i].fixup_data & ~1, fixup->base + 4); } } #endif } /* end for loop */ return 0; } #endif static struct sysdev_class mpic_sysclass = { #ifdef CONFIG_PM .resume = mpic_resume, .suspend = mpic_suspend, #endif .name = "mpic", }; static int mpic_init_sys(void) { struct mpic *mpic = mpics; int error, id = 0; error = sysdev_class_register(&mpic_sysclass); while (mpic && !error) { mpic->sysdev.cls = &mpic_sysclass; mpic->sysdev.id = id++; error = sysdev_register(&mpic->sysdev); mpic = mpic->next; } return error; } device_initcall(mpic_init_sys);
gpl-2.0
junmuzi/linux
drivers/video/fbdev/aty/aty128fb.c
134
66845
/* $Id: aty128fb.c,v 1.1.1.1.36.1 1999/12/11 09:03:05 Exp $ * linux/drivers/video/aty128fb.c -- Frame buffer device for ATI Rage128 * * Copyright (C) 1999-2003, Brad Douglas <brad@neruo.com> * Copyright (C) 1999, Anthony Tong <atong@uiuc.edu> * * Ani Joshi / Jeff Garzik * - Code cleanup * * Michel Danzer <michdaen@iiic.ethz.ch> * - 15/16 bit cleanup * - fix panning * * Benjamin Herrenschmidt * - pmac-specific PM stuff * - various fixes & cleanups * * Andreas Hundt <andi@convergence.de> * - FB_ACTIVATE fixes * * Paul Mackerras <paulus@samba.org> * - Convert to new framebuffer API, * fix colormap setting at 16 bits/pixel (565) * * Paul Mundt * - PCI hotplug * * Jon Smirl <jonsmirl@yahoo.com> * - PCI ID update * - replace ROM BIOS search * * Based off of Geert's atyfb.c and vfb.c. * * TODO: * - monitor sensing (DDC) * - virtual display * - other platform support (only ppc/x86 supported) * - hardware cursor support * * Please cc: your patches to brad@neruo.com. */ /* * A special note of gratitude to ATI's devrel for providing documentation, * example code and hardware. Thanks Nitya. -atong and brad */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/uaccess.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/console.h> #include <linux/backlight.h> #include <asm/io.h> #ifdef CONFIG_PPC_PMAC #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/prom.h> #include "../macmodes.h" #endif #ifdef CONFIG_PMAC_BACKLIGHT #include <asm/backlight.h> #endif #ifdef CONFIG_BOOTX_TEXT #include <asm/btext.h> #endif /* CONFIG_BOOTX_TEXT */ #include <video/aty128.h> /* Debug flag */ #undef DEBUG #ifdef DEBUG #define DBG(fmt, args...) \ printk(KERN_DEBUG "aty128fb: %s " fmt, __func__, ##args); #else #define DBG(fmt, args...) #endif #ifndef CONFIG_PPC_PMAC /* default mode */ static struct fb_var_screeninfo default_var = { /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */ 640, 480, 640, 480, 0, 0, 8, 0, {0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 39722, 48, 16, 33, 10, 96, 2, 0, FB_VMODE_NONINTERLACED }; #else /* CONFIG_PPC_PMAC */ /* default to 1024x768 at 75Hz on PPC - this will work * on the iMac, the usual 640x480 @ 60Hz doesn't. */ static struct fb_var_screeninfo default_var = { /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */ 1024, 768, 1024, 768, 0, 0, 8, 0, {0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0, 0, -1, -1, 0, 12699, 160, 32, 28, 1, 96, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }; #endif /* CONFIG_PPC_PMAC */ /* default modedb mode */ /* 640x480, 60 Hz, Non-Interlaced (25.172 MHz dotclock) */ static struct fb_videomode defaultmode = { .refresh = 60, .xres = 640, .yres = 480, .pixclock = 39722, .left_margin = 48, .right_margin = 16, .upper_margin = 33, .lower_margin = 10, .hsync_len = 96, .vsync_len = 2, .sync = 0, .vmode = FB_VMODE_NONINTERLACED }; /* Chip generations */ enum { rage_128, rage_128_pci, rage_128_pro, rage_128_pro_pci, rage_M3, rage_M3_pci, rage_M4, rage_128_ultra, }; /* Must match above enum */ static char * const r128_family[] = { "AGP", "PCI", "PRO AGP", "PRO PCI", "M3 AGP", "M3 PCI", "M4 AGP", "Ultra AGP", }; /* * PCI driver prototypes */ static int aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void aty128_remove(struct pci_dev *pdev); static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state); static int aty128_pci_resume(struct pci_dev *pdev); static int aty128_do_resume(struct pci_dev *pdev); /* supported Rage128 chipsets */ static struct pci_device_id aty128_pci_tbl[] = { { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_LE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_M3_pci }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_LF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_M3 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_MF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_M4 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_ML, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_M4 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro_pci }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PG, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PH, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PI, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PJ, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PK, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PN, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PP, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro_pci }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PQ, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro_pci }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PS, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PT, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PU, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PV, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PW, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_PX, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pro }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pci }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RG, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RK, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pci }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_RL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_pci }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SG, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SH, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SK, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_SN, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128 }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_TF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_ultra }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_TL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_ultra }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_ultra }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_TS, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_ultra }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_TT, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_ultra }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_TU, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rage_128_ultra }, { 0, } }; MODULE_DEVICE_TABLE(pci, aty128_pci_tbl); static struct pci_driver aty128fb_driver = { .name = "aty128fb", .id_table = aty128_pci_tbl, .probe = aty128_probe, .remove = aty128_remove, .suspend = aty128_pci_suspend, .resume = aty128_pci_resume, }; /* packed BIOS settings */ #ifndef CONFIG_PPC typedef struct { u8 clock_chip_type; u8 struct_size; u8 accelerator_entry; u8 VGA_entry; u16 VGA_table_offset; u16 POST_table_offset; u16 XCLK; u16 MCLK; u8 num_PLL_blocks; u8 size_PLL_blocks; u16 PCLK_ref_freq; u16 PCLK_ref_divider; u32 PCLK_min_freq; u32 PCLK_max_freq; u16 MCLK_ref_freq; u16 MCLK_ref_divider; u32 MCLK_min_freq; u32 MCLK_max_freq; u16 XCLK_ref_freq; u16 XCLK_ref_divider; u32 XCLK_min_freq; u32 XCLK_max_freq; } __attribute__ ((packed)) PLL_BLOCK; #endif /* !CONFIG_PPC */ /* onboard memory information */ struct aty128_meminfo { u8 ML; u8 MB; u8 Trcd; u8 Trp; u8 Twr; u8 CL; u8 Tr2w; u8 LoopLatency; u8 DspOn; u8 Rloop; const char *name; }; /* various memory configurations */ static const struct aty128_meminfo sdr_128 = { .ML = 4, .MB = 4, .Trcd = 3, .Trp = 3, .Twr = 1, .CL = 3, .Tr2w = 1, .LoopLatency = 16, .DspOn = 30, .Rloop = 16, .name = "128-bit SDR SGRAM (1:1)", }; static const struct aty128_meminfo sdr_64 = { .ML = 4, .MB = 8, .Trcd = 3, .Trp = 3, .Twr = 1, .CL = 3, .Tr2w = 1, .LoopLatency = 17, .DspOn = 46, .Rloop = 17, .name = "64-bit SDR SGRAM (1:1)", }; static const struct aty128_meminfo sdr_sgram = { .ML = 4, .MB = 4, .Trcd = 1, .Trp = 2, .Twr = 1, .CL = 2, .Tr2w = 1, .LoopLatency = 16, .DspOn = 24, .Rloop = 16, .name = "64-bit SDR SGRAM (2:1)", }; static const struct aty128_meminfo ddr_sgram = { .ML = 4, .MB = 4, .Trcd = 3, .Trp = 3, .Twr = 2, .CL = 3, .Tr2w = 1, .LoopLatency = 16, .DspOn = 31, .Rloop = 16, .name = "64-bit DDR SGRAM", }; static struct fb_fix_screeninfo aty128fb_fix = { .id = "ATY Rage128", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, .xpanstep = 8, .ypanstep = 1, .mmio_len = 0x2000, .accel = FB_ACCEL_ATI_RAGE128, }; static char *mode_option = NULL; #ifdef CONFIG_PPC_PMAC static int default_vmode = VMODE_1024_768_60; static int default_cmode = CMODE_8; #endif static int default_crt_on = 0; static int default_lcd_on = 1; static bool mtrr = true; #ifdef CONFIG_FB_ATY128_BACKLIGHT #ifdef CONFIG_PMAC_BACKLIGHT static int backlight = 1; #else static int backlight = 0; #endif #endif /* PLL constants */ struct aty128_constants { u32 ref_clk; u32 ppll_min; u32 ppll_max; u32 ref_divider; u32 xclk; u32 fifo_width; u32 fifo_depth; }; struct aty128_crtc { u32 gen_cntl; u32 h_total, h_sync_strt_wid; u32 v_total, v_sync_strt_wid; u32 pitch; u32 offset, offset_cntl; u32 xoffset, yoffset; u32 vxres, vyres; u32 depth, bpp; }; struct aty128_pll { u32 post_divider; u32 feedback_divider; u32 vclk; }; struct aty128_ddafifo { u32 dda_config; u32 dda_on_off; }; /* register values for a specific mode */ struct aty128fb_par { struct aty128_crtc crtc; struct aty128_pll pll; struct aty128_ddafifo fifo_reg; u32 accel_flags; struct aty128_constants constants; /* PLL and others */ void __iomem *regbase; /* remapped mmio */ u32 vram_size; /* onboard video ram */ int chip_gen; const struct aty128_meminfo *mem; /* onboard mem info */ int wc_cookie; int blitter_may_be_busy; int fifo_slots; /* free slots in FIFO (64 max) */ int crt_on, lcd_on; struct pci_dev *pdev; struct fb_info *next; int asleep; int lock_blank; u8 red[32]; /* see aty128fb_setcolreg */ u8 green[64]; u8 blue[32]; u32 pseudo_palette[16]; /* used for TRUECOLOR */ }; #define round_div(n, d) ((n+(d/2))/d) static int aty128fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info); static int aty128fb_set_par(struct fb_info *info); static int aty128fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info); static int aty128fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *fb); static int aty128fb_blank(int blank, struct fb_info *fb); static int aty128fb_ioctl(struct fb_info *info, u_int cmd, unsigned long arg); static int aty128fb_sync(struct fb_info *info); /* * Internal routines */ static int aty128_encode_var(struct fb_var_screeninfo *var, const struct aty128fb_par *par); static int aty128_decode_var(struct fb_var_screeninfo *var, struct aty128fb_par *par); #if 0 static void aty128_get_pllinfo(struct aty128fb_par *par, void __iomem *bios); static void __iomem *aty128_map_ROM(struct pci_dev *pdev, const struct aty128fb_par *par); #endif static void aty128_timings(struct aty128fb_par *par); static void aty128_init_engine(struct aty128fb_par *par); static void aty128_reset_engine(const struct aty128fb_par *par); static void aty128_flush_pixel_cache(const struct aty128fb_par *par); static void do_wait_for_fifo(u16 entries, struct aty128fb_par *par); static void wait_for_fifo(u16 entries, struct aty128fb_par *par); static void wait_for_idle(struct aty128fb_par *par); static u32 depth_to_dst(u32 depth); #ifdef CONFIG_FB_ATY128_BACKLIGHT static void aty128_bl_set_power(struct fb_info *info, int power); #endif #define BIOS_IN8(v) (readb(bios + (v))) #define BIOS_IN16(v) (readb(bios + (v)) | \ (readb(bios + (v) + 1) << 8)) #define BIOS_IN32(v) (readb(bios + (v)) | \ (readb(bios + (v) + 1) << 8) | \ (readb(bios + (v) + 2) << 16) | \ (readb(bios + (v) + 3) << 24)) static struct fb_ops aty128fb_ops = { .owner = THIS_MODULE, .fb_check_var = aty128fb_check_var, .fb_set_par = aty128fb_set_par, .fb_setcolreg = aty128fb_setcolreg, .fb_pan_display = aty128fb_pan_display, .fb_blank = aty128fb_blank, .fb_ioctl = aty128fb_ioctl, .fb_sync = aty128fb_sync, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; /* * Functions to read from/write to the mmio registers * - endian conversions may possibly be avoided by * using the other register aperture. TODO. */ static inline u32 _aty_ld_le32(volatile unsigned int regindex, const struct aty128fb_par *par) { return readl (par->regbase + regindex); } static inline void _aty_st_le32(volatile unsigned int regindex, u32 val, const struct aty128fb_par *par) { writel (val, par->regbase + regindex); } static inline u8 _aty_ld_8(unsigned int regindex, const struct aty128fb_par *par) { return readb (par->regbase + regindex); } static inline void _aty_st_8(unsigned int regindex, u8 val, const struct aty128fb_par *par) { writeb (val, par->regbase + regindex); } #define aty_ld_le32(regindex) _aty_ld_le32(regindex, par) #define aty_st_le32(regindex, val) _aty_st_le32(regindex, val, par) #define aty_ld_8(regindex) _aty_ld_8(regindex, par) #define aty_st_8(regindex, val) _aty_st_8(regindex, val, par) /* * Functions to read from/write to the pll registers */ #define aty_ld_pll(pll_index) _aty_ld_pll(pll_index, par) #define aty_st_pll(pll_index, val) _aty_st_pll(pll_index, val, par) static u32 _aty_ld_pll(unsigned int pll_index, const struct aty128fb_par *par) { aty_st_8(CLOCK_CNTL_INDEX, pll_index & 0x3F); return aty_ld_le32(CLOCK_CNTL_DATA); } static void _aty_st_pll(unsigned int pll_index, u32 val, const struct aty128fb_par *par) { aty_st_8(CLOCK_CNTL_INDEX, (pll_index & 0x3F) | PLL_WR_EN); aty_st_le32(CLOCK_CNTL_DATA, val); } /* return true when the PLL has completed an atomic update */ static int aty_pll_readupdate(const struct aty128fb_par *par) { return !(aty_ld_pll(PPLL_REF_DIV) & PPLL_ATOMIC_UPDATE_R); } static void aty_pll_wait_readupdate(const struct aty128fb_par *par) { unsigned long timeout = jiffies + HZ/100; // should be more than enough int reset = 1; while (time_before(jiffies, timeout)) if (aty_pll_readupdate(par)) { reset = 0; break; } if (reset) /* reset engine?? */ printk(KERN_DEBUG "aty128fb: PLL write timeout!\n"); } /* tell PLL to update */ static void aty_pll_writeupdate(const struct aty128fb_par *par) { aty_pll_wait_readupdate(par); aty_st_pll(PPLL_REF_DIV, aty_ld_pll(PPLL_REF_DIV) | PPLL_ATOMIC_UPDATE_W); } /* write to the scratch register to test r/w functionality */ static int register_test(const struct aty128fb_par *par) { u32 val; int flag = 0; val = aty_ld_le32(BIOS_0_SCRATCH); aty_st_le32(BIOS_0_SCRATCH, 0x55555555); if (aty_ld_le32(BIOS_0_SCRATCH) == 0x55555555) { aty_st_le32(BIOS_0_SCRATCH, 0xAAAAAAAA); if (aty_ld_le32(BIOS_0_SCRATCH) == 0xAAAAAAAA) flag = 1; } aty_st_le32(BIOS_0_SCRATCH, val); // restore value return flag; } /* * Accelerator engine functions */ static void do_wait_for_fifo(u16 entries, struct aty128fb_par *par) { int i; for (;;) { for (i = 0; i < 2000000; i++) { par->fifo_slots = aty_ld_le32(GUI_STAT) & 0x0fff; if (par->fifo_slots >= entries) return; } aty128_reset_engine(par); } } static void wait_for_idle(struct aty128fb_par *par) { int i; do_wait_for_fifo(64, par); for (;;) { for (i = 0; i < 2000000; i++) { if (!(aty_ld_le32(GUI_STAT) & (1 << 31))) { aty128_flush_pixel_cache(par); par->blitter_may_be_busy = 0; return; } } aty128_reset_engine(par); } } static void wait_for_fifo(u16 entries, struct aty128fb_par *par) { if (par->fifo_slots < entries) do_wait_for_fifo(64, par); par->fifo_slots -= entries; } static void aty128_flush_pixel_cache(const struct aty128fb_par *par) { int i; u32 tmp; tmp = aty_ld_le32(PC_NGUI_CTLSTAT); tmp &= ~(0x00ff); tmp |= 0x00ff; aty_st_le32(PC_NGUI_CTLSTAT, tmp); for (i = 0; i < 2000000; i++) if (!(aty_ld_le32(PC_NGUI_CTLSTAT) & PC_BUSY)) break; } static void aty128_reset_engine(const struct aty128fb_par *par) { u32 gen_reset_cntl, clock_cntl_index, mclk_cntl; aty128_flush_pixel_cache(par); clock_cntl_index = aty_ld_le32(CLOCK_CNTL_INDEX); mclk_cntl = aty_ld_pll(MCLK_CNTL); aty_st_pll(MCLK_CNTL, mclk_cntl | 0x00030000); gen_reset_cntl = aty_ld_le32(GEN_RESET_CNTL); aty_st_le32(GEN_RESET_CNTL, gen_reset_cntl | SOFT_RESET_GUI); aty_ld_le32(GEN_RESET_CNTL); aty_st_le32(GEN_RESET_CNTL, gen_reset_cntl & ~(SOFT_RESET_GUI)); aty_ld_le32(GEN_RESET_CNTL); aty_st_pll(MCLK_CNTL, mclk_cntl); aty_st_le32(CLOCK_CNTL_INDEX, clock_cntl_index); aty_st_le32(GEN_RESET_CNTL, gen_reset_cntl); /* use old pio mode */ aty_st_le32(PM4_BUFFER_CNTL, PM4_BUFFER_CNTL_NONPM4); DBG("engine reset"); } static void aty128_init_engine(struct aty128fb_par *par) { u32 pitch_value; wait_for_idle(par); /* 3D scaler not spoken here */ wait_for_fifo(1, par); aty_st_le32(SCALE_3D_CNTL, 0x00000000); aty128_reset_engine(par); pitch_value = par->crtc.pitch; if (par->crtc.bpp == 24) { pitch_value = pitch_value * 3; } wait_for_fifo(4, par); /* setup engine offset registers */ aty_st_le32(DEFAULT_OFFSET, 0x00000000); /* setup engine pitch registers */ aty_st_le32(DEFAULT_PITCH, pitch_value); /* set the default scissor register to max dimensions */ aty_st_le32(DEFAULT_SC_BOTTOM_RIGHT, (0x1FFF << 16) | 0x1FFF); /* set the drawing controls registers */ aty_st_le32(DP_GUI_MASTER_CNTL, GMC_SRC_PITCH_OFFSET_DEFAULT | GMC_DST_PITCH_OFFSET_DEFAULT | GMC_SRC_CLIP_DEFAULT | GMC_DST_CLIP_DEFAULT | GMC_BRUSH_SOLIDCOLOR | (depth_to_dst(par->crtc.depth) << 8) | GMC_SRC_DSTCOLOR | GMC_BYTE_ORDER_MSB_TO_LSB | GMC_DP_CONVERSION_TEMP_6500 | ROP3_PATCOPY | GMC_DP_SRC_RECT | GMC_3D_FCN_EN_CLR | GMC_DST_CLR_CMP_FCN_CLEAR | GMC_AUX_CLIP_CLEAR | GMC_WRITE_MASK_SET); wait_for_fifo(8, par); /* clear the line drawing registers */ aty_st_le32(DST_BRES_ERR, 0); aty_st_le32(DST_BRES_INC, 0); aty_st_le32(DST_BRES_DEC, 0); /* set brush color registers */ aty_st_le32(DP_BRUSH_FRGD_CLR, 0xFFFFFFFF); /* white */ aty_st_le32(DP_BRUSH_BKGD_CLR, 0x00000000); /* black */ /* set source color registers */ aty_st_le32(DP_SRC_FRGD_CLR, 0xFFFFFFFF); /* white */ aty_st_le32(DP_SRC_BKGD_CLR, 0x00000000); /* black */ /* default write mask */ aty_st_le32(DP_WRITE_MASK, 0xFFFFFFFF); /* Wait for all the writes to be completed before returning */ wait_for_idle(par); } /* convert depth values to their register representation */ static u32 depth_to_dst(u32 depth) { if (depth <= 8) return DST_8BPP; else if (depth <= 15) return DST_15BPP; else if (depth == 16) return DST_16BPP; else if (depth <= 24) return DST_24BPP; else if (depth <= 32) return DST_32BPP; return -EINVAL; } /* * PLL informations retreival */ #ifndef __sparc__ static void __iomem *aty128_map_ROM(const struct aty128fb_par *par, struct pci_dev *dev) { u16 dptr; u8 rom_type; void __iomem *bios; size_t rom_size; /* Fix from ATI for problem with Rage128 hardware not leaving ROM enabled */ unsigned int temp; temp = aty_ld_le32(RAGE128_MPP_TB_CONFIG); temp &= 0x00ffffffu; temp |= 0x04 << 24; aty_st_le32(RAGE128_MPP_TB_CONFIG, temp); temp = aty_ld_le32(RAGE128_MPP_TB_CONFIG); bios = pci_map_rom(dev, &rom_size); if (!bios) { printk(KERN_ERR "aty128fb: ROM failed to map\n"); return NULL; } /* Very simple test to make sure it appeared */ if (BIOS_IN16(0) != 0xaa55) { printk(KERN_DEBUG "aty128fb: Invalid ROM signature %x should " " be 0xaa55\n", BIOS_IN16(0)); goto failed; } /* Look for the PCI data to check the ROM type */ dptr = BIOS_IN16(0x18); /* Check the PCI data signature. If it's wrong, we still assume a normal * x86 ROM for now, until I've verified this works everywhere. * The goal here is more to phase out Open Firmware images. * * Currently, we only look at the first PCI data, we could iteratre and * deal with them all, and we should use fb_bios_start relative to start * of image and not relative start of ROM, but so far, I never found a * dual-image ATI card. * * typedef struct { * u32 signature; + 0x00 * u16 vendor; + 0x04 * u16 device; + 0x06 * u16 reserved_1; + 0x08 * u16 dlen; + 0x0a * u8 drevision; + 0x0c * u8 class_hi; + 0x0d * u16 class_lo; + 0x0e * u16 ilen; + 0x10 * u16 irevision; + 0x12 * u8 type; + 0x14 * u8 indicator; + 0x15 * u16 reserved_2; + 0x16 * } pci_data_t; */ if (BIOS_IN32(dptr) != (('R' << 24) | ('I' << 16) | ('C' << 8) | 'P')) { printk(KERN_WARNING "aty128fb: PCI DATA signature in ROM incorrect: %08x\n", BIOS_IN32(dptr)); goto anyway; } rom_type = BIOS_IN8(dptr + 0x14); switch(rom_type) { case 0: printk(KERN_INFO "aty128fb: Found Intel x86 BIOS ROM Image\n"); break; case 1: printk(KERN_INFO "aty128fb: Found Open Firmware ROM Image\n"); goto failed; case 2: printk(KERN_INFO "aty128fb: Found HP PA-RISC ROM Image\n"); goto failed; default: printk(KERN_INFO "aty128fb: Found unknown type %d ROM Image\n", rom_type); goto failed; } anyway: return bios; failed: pci_unmap_rom(dev, bios); return NULL; } static void aty128_get_pllinfo(struct aty128fb_par *par, unsigned char __iomem *bios) { unsigned int bios_hdr; unsigned int bios_pll; bios_hdr = BIOS_IN16(0x48); bios_pll = BIOS_IN16(bios_hdr + 0x30); par->constants.ppll_max = BIOS_IN32(bios_pll + 0x16); par->constants.ppll_min = BIOS_IN32(bios_pll + 0x12); par->constants.xclk = BIOS_IN16(bios_pll + 0x08); par->constants.ref_divider = BIOS_IN16(bios_pll + 0x10); par->constants.ref_clk = BIOS_IN16(bios_pll + 0x0e); DBG("ppll_max %d ppll_min %d xclk %d ref_divider %d ref clock %d\n", par->constants.ppll_max, par->constants.ppll_min, par->constants.xclk, par->constants.ref_divider, par->constants.ref_clk); } #ifdef CONFIG_X86 static void __iomem *aty128_find_mem_vbios(struct aty128fb_par *par) { /* I simplified this code as we used to miss the signatures in * a lot of case. It's now closer to XFree, we just don't check * for signatures at all... Something better will have to be done * if we end up having conflicts */ u32 segstart; unsigned char __iomem *rom_base = NULL; for (segstart=0x000c0000; segstart<0x000f0000; segstart+=0x00001000) { rom_base = ioremap(segstart, 0x10000); if (rom_base == NULL) return NULL; if (readb(rom_base) == 0x55 && readb(rom_base + 1) == 0xaa) break; iounmap(rom_base); rom_base = NULL; } return rom_base; } #endif #endif /* ndef(__sparc__) */ /* fill in known card constants if pll_block is not available */ static void aty128_timings(struct aty128fb_par *par) { #ifdef CONFIG_PPC /* instead of a table lookup, assume OF has properly * setup the PLL registers and use their values * to set the XCLK values and reference divider values */ u32 x_mpll_ref_fb_div; u32 xclk_cntl; u32 Nx, M; unsigned PostDivSet[] = { 0, 1, 2, 4, 8, 3, 6, 12 }; #endif if (!par->constants.ref_clk) par->constants.ref_clk = 2950; #ifdef CONFIG_PPC x_mpll_ref_fb_div = aty_ld_pll(X_MPLL_REF_FB_DIV); xclk_cntl = aty_ld_pll(XCLK_CNTL) & 0x7; Nx = (x_mpll_ref_fb_div & 0x00ff00) >> 8; M = x_mpll_ref_fb_div & 0x0000ff; par->constants.xclk = round_div((2 * Nx * par->constants.ref_clk), (M * PostDivSet[xclk_cntl])); par->constants.ref_divider = aty_ld_pll(PPLL_REF_DIV) & PPLL_REF_DIV_MASK; #endif if (!par->constants.ref_divider) { par->constants.ref_divider = 0x3b; aty_st_pll(X_MPLL_REF_FB_DIV, 0x004c4c1e); aty_pll_writeupdate(par); } aty_st_pll(PPLL_REF_DIV, par->constants.ref_divider); aty_pll_writeupdate(par); /* from documentation */ if (!par->constants.ppll_min) par->constants.ppll_min = 12500; if (!par->constants.ppll_max) par->constants.ppll_max = 25000; /* 23000 on some cards? */ if (!par->constants.xclk) par->constants.xclk = 0x1d4d; /* same as mclk */ par->constants.fifo_width = 128; par->constants.fifo_depth = 32; switch (aty_ld_le32(MEM_CNTL) & 0x3) { case 0: par->mem = &sdr_128; break; case 1: par->mem = &sdr_sgram; break; case 2: par->mem = &ddr_sgram; break; default: par->mem = &sdr_sgram; } } /* * CRTC programming */ /* Program the CRTC registers */ static void aty128_set_crtc(const struct aty128_crtc *crtc, const struct aty128fb_par *par) { aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl); aty_st_le32(CRTC_H_TOTAL_DISP, crtc->h_total); aty_st_le32(CRTC_H_SYNC_STRT_WID, crtc->h_sync_strt_wid); aty_st_le32(CRTC_V_TOTAL_DISP, crtc->v_total); aty_st_le32(CRTC_V_SYNC_STRT_WID, crtc->v_sync_strt_wid); aty_st_le32(CRTC_PITCH, crtc->pitch); aty_st_le32(CRTC_OFFSET, crtc->offset); aty_st_le32(CRTC_OFFSET_CNTL, crtc->offset_cntl); /* Disable ATOMIC updating. Is this the right place? */ aty_st_pll(PPLL_CNTL, aty_ld_pll(PPLL_CNTL) & ~(0x00030000)); } static int aty128_var_to_crtc(const struct fb_var_screeninfo *var, struct aty128_crtc *crtc, const struct aty128fb_par *par) { u32 xres, yres, vxres, vyres, xoffset, yoffset, bpp, dst; u32 left, right, upper, lower, hslen, vslen, sync, vmode; u32 h_total, h_disp, h_sync_strt, h_sync_wid, h_sync_pol; u32 v_total, v_disp, v_sync_strt, v_sync_wid, v_sync_pol, c_sync; u32 depth, bytpp; u8 mode_bytpp[7] = { 0, 0, 1, 2, 2, 3, 4 }; /* input */ xres = var->xres; yres = var->yres; vxres = var->xres_virtual; vyres = var->yres_virtual; xoffset = var->xoffset; yoffset = var->yoffset; bpp = var->bits_per_pixel; left = var->left_margin; right = var->right_margin; upper = var->upper_margin; lower = var->lower_margin; hslen = var->hsync_len; vslen = var->vsync_len; sync = var->sync; vmode = var->vmode; if (bpp != 16) depth = bpp; else depth = (var->green.length == 6) ? 16 : 15; /* check for mode eligibility * accept only non interlaced modes */ if ((vmode & FB_VMODE_MASK) != FB_VMODE_NONINTERLACED) return -EINVAL; /* convert (and round up) and validate */ xres = (xres + 7) & ~7; xoffset = (xoffset + 7) & ~7; if (vxres < xres + xoffset) vxres = xres + xoffset; if (vyres < yres + yoffset) vyres = yres + yoffset; /* convert depth into ATI register depth */ dst = depth_to_dst(depth); if (dst == -EINVAL) { printk(KERN_ERR "aty128fb: Invalid depth or RGBA\n"); return -EINVAL; } /* convert register depth to bytes per pixel */ bytpp = mode_bytpp[dst]; /* make sure there is enough video ram for the mode */ if ((u32)(vxres * vyres * bytpp) > par->vram_size) { printk(KERN_ERR "aty128fb: Not enough memory for mode\n"); return -EINVAL; } h_disp = (xres >> 3) - 1; h_total = (((xres + right + hslen + left) >> 3) - 1) & 0xFFFFL; v_disp = yres - 1; v_total = (yres + upper + vslen + lower - 1) & 0xFFFFL; /* check to make sure h_total and v_total are in range */ if (((h_total >> 3) - 1) > 0x1ff || (v_total - 1) > 0x7FF) { printk(KERN_ERR "aty128fb: invalid width ranges\n"); return -EINVAL; } h_sync_wid = (hslen + 7) >> 3; if (h_sync_wid == 0) h_sync_wid = 1; else if (h_sync_wid > 0x3f) /* 0x3f = max hwidth */ h_sync_wid = 0x3f; h_sync_strt = (h_disp << 3) + right; v_sync_wid = vslen; if (v_sync_wid == 0) v_sync_wid = 1; else if (v_sync_wid > 0x1f) /* 0x1f = max vwidth */ v_sync_wid = 0x1f; v_sync_strt = v_disp + lower; h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1; v_sync_pol = sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1; c_sync = sync & FB_SYNC_COMP_HIGH_ACT ? (1 << 4) : 0; crtc->gen_cntl = 0x3000000L | c_sync | (dst << 8); crtc->h_total = h_total | (h_disp << 16); crtc->v_total = v_total | (v_disp << 16); crtc->h_sync_strt_wid = h_sync_strt | (h_sync_wid << 16) | (h_sync_pol << 23); crtc->v_sync_strt_wid = v_sync_strt | (v_sync_wid << 16) | (v_sync_pol << 23); crtc->pitch = vxres >> 3; crtc->offset = 0; if ((var->activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW) crtc->offset_cntl = 0x00010000; else crtc->offset_cntl = 0; crtc->vxres = vxres; crtc->vyres = vyres; crtc->xoffset = xoffset; crtc->yoffset = yoffset; crtc->depth = depth; crtc->bpp = bpp; return 0; } static int aty128_pix_width_to_var(int pix_width, struct fb_var_screeninfo *var) { /* fill in pixel info */ var->red.msb_right = 0; var->green.msb_right = 0; var->blue.offset = 0; var->blue.msb_right = 0; var->transp.offset = 0; var->transp.length = 0; var->transp.msb_right = 0; switch (pix_width) { case CRTC_PIX_WIDTH_8BPP: var->bits_per_pixel = 8; var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.length = 8; break; case CRTC_PIX_WIDTH_15BPP: var->bits_per_pixel = 16; var->red.offset = 10; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.length = 5; break; case CRTC_PIX_WIDTH_16BPP: var->bits_per_pixel = 16; var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.length = 5; break; case CRTC_PIX_WIDTH_24BPP: var->bits_per_pixel = 24; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.length = 8; break; case CRTC_PIX_WIDTH_32BPP: var->bits_per_pixel = 32; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; default: printk(KERN_ERR "aty128fb: Invalid pixel width\n"); return -EINVAL; } return 0; } static int aty128_crtc_to_var(const struct aty128_crtc *crtc, struct fb_var_screeninfo *var) { u32 xres, yres, left, right, upper, lower, hslen, vslen, sync; u32 h_total, h_disp, h_sync_strt, h_sync_dly, h_sync_wid, h_sync_pol; u32 v_total, v_disp, v_sync_strt, v_sync_wid, v_sync_pol, c_sync; u32 pix_width; /* fun with masking */ h_total = crtc->h_total & 0x1ff; h_disp = (crtc->h_total >> 16) & 0xff; h_sync_strt = (crtc->h_sync_strt_wid >> 3) & 0x1ff; h_sync_dly = crtc->h_sync_strt_wid & 0x7; h_sync_wid = (crtc->h_sync_strt_wid >> 16) & 0x3f; h_sync_pol = (crtc->h_sync_strt_wid >> 23) & 0x1; v_total = crtc->v_total & 0x7ff; v_disp = (crtc->v_total >> 16) & 0x7ff; v_sync_strt = crtc->v_sync_strt_wid & 0x7ff; v_sync_wid = (crtc->v_sync_strt_wid >> 16) & 0x1f; v_sync_pol = (crtc->v_sync_strt_wid >> 23) & 0x1; c_sync = crtc->gen_cntl & CRTC_CSYNC_EN ? 1 : 0; pix_width = crtc->gen_cntl & CRTC_PIX_WIDTH_MASK; /* do conversions */ xres = (h_disp + 1) << 3; yres = v_disp + 1; left = ((h_total - h_sync_strt - h_sync_wid) << 3) - h_sync_dly; right = ((h_sync_strt - h_disp) << 3) + h_sync_dly; hslen = h_sync_wid << 3; upper = v_total - v_sync_strt - v_sync_wid; lower = v_sync_strt - v_disp; vslen = v_sync_wid; sync = (h_sync_pol ? 0 : FB_SYNC_HOR_HIGH_ACT) | (v_sync_pol ? 0 : FB_SYNC_VERT_HIGH_ACT) | (c_sync ? FB_SYNC_COMP_HIGH_ACT : 0); aty128_pix_width_to_var(pix_width, var); var->xres = xres; var->yres = yres; var->xres_virtual = crtc->vxres; var->yres_virtual = crtc->vyres; var->xoffset = crtc->xoffset; var->yoffset = crtc->yoffset; var->left_margin = left; var->right_margin = right; var->upper_margin = upper; var->lower_margin = lower; var->hsync_len = hslen; var->vsync_len = vslen; var->sync = sync; var->vmode = FB_VMODE_NONINTERLACED; return 0; } static void aty128_set_crt_enable(struct aty128fb_par *par, int on) { if (on) { aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) | CRT_CRTC_ON); aty_st_le32(DAC_CNTL, (aty_ld_le32(DAC_CNTL) | DAC_PALETTE2_SNOOP_EN)); } else aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) & ~CRT_CRTC_ON); } static void aty128_set_lcd_enable(struct aty128fb_par *par, int on) { u32 reg; #ifdef CONFIG_FB_ATY128_BACKLIGHT struct fb_info *info = pci_get_drvdata(par->pdev); #endif if (on) { reg = aty_ld_le32(LVDS_GEN_CNTL); reg |= LVDS_ON | LVDS_EN | LVDS_BLON | LVDS_DIGION; reg &= ~LVDS_DISPLAY_DIS; aty_st_le32(LVDS_GEN_CNTL, reg); #ifdef CONFIG_FB_ATY128_BACKLIGHT aty128_bl_set_power(info, FB_BLANK_UNBLANK); #endif } else { #ifdef CONFIG_FB_ATY128_BACKLIGHT aty128_bl_set_power(info, FB_BLANK_POWERDOWN); #endif reg = aty_ld_le32(LVDS_GEN_CNTL); reg |= LVDS_DISPLAY_DIS; aty_st_le32(LVDS_GEN_CNTL, reg); mdelay(100); reg &= ~(LVDS_ON /*| LVDS_EN*/); aty_st_le32(LVDS_GEN_CNTL, reg); } } static void aty128_set_pll(struct aty128_pll *pll, const struct aty128fb_par *par) { u32 div3; unsigned char post_conv[] = /* register values for post dividers */ { 2, 0, 1, 4, 2, 2, 6, 2, 3, 2, 2, 2, 7 }; /* select PPLL_DIV_3 */ aty_st_le32(CLOCK_CNTL_INDEX, aty_ld_le32(CLOCK_CNTL_INDEX) | (3 << 8)); /* reset PLL */ aty_st_pll(PPLL_CNTL, aty_ld_pll(PPLL_CNTL) | PPLL_RESET | PPLL_ATOMIC_UPDATE_EN); /* write the reference divider */ aty_pll_wait_readupdate(par); aty_st_pll(PPLL_REF_DIV, par->constants.ref_divider & 0x3ff); aty_pll_writeupdate(par); div3 = aty_ld_pll(PPLL_DIV_3); div3 &= ~PPLL_FB3_DIV_MASK; div3 |= pll->feedback_divider; div3 &= ~PPLL_POST3_DIV_MASK; div3 |= post_conv[pll->post_divider] << 16; /* write feedback and post dividers */ aty_pll_wait_readupdate(par); aty_st_pll(PPLL_DIV_3, div3); aty_pll_writeupdate(par); aty_pll_wait_readupdate(par); aty_st_pll(HTOTAL_CNTL, 0); /* no horiz crtc adjustment */ aty_pll_writeupdate(par); /* clear the reset, just in case */ aty_st_pll(PPLL_CNTL, aty_ld_pll(PPLL_CNTL) & ~PPLL_RESET); } static int aty128_var_to_pll(u32 period_in_ps, struct aty128_pll *pll, const struct aty128fb_par *par) { const struct aty128_constants c = par->constants; unsigned char post_dividers[] = {1,2,4,8,3,6,12}; u32 output_freq; u32 vclk; /* in .01 MHz */ int i = 0; u32 n, d; vclk = 100000000 / period_in_ps; /* convert units to 10 kHz */ /* adjust pixel clock if necessary */ if (vclk > c.ppll_max) vclk = c.ppll_max; if (vclk * 12 < c.ppll_min) vclk = c.ppll_min/12; /* now, find an acceptable divider */ for (i = 0; i < ARRAY_SIZE(post_dividers); i++) { output_freq = post_dividers[i] * vclk; if (output_freq >= c.ppll_min && output_freq <= c.ppll_max) { pll->post_divider = post_dividers[i]; break; } } if (i == ARRAY_SIZE(post_dividers)) return -EINVAL; /* calculate feedback divider */ n = c.ref_divider * output_freq; d = c.ref_clk; pll->feedback_divider = round_div(n, d); pll->vclk = vclk; DBG("post %d feedback %d vlck %d output %d ref_divider %d " "vclk_per: %d\n", pll->post_divider, pll->feedback_divider, vclk, output_freq, c.ref_divider, period_in_ps); return 0; } static int aty128_pll_to_var(const struct aty128_pll *pll, struct fb_var_screeninfo *var) { var->pixclock = 100000000 / pll->vclk; return 0; } static void aty128_set_fifo(const struct aty128_ddafifo *dsp, const struct aty128fb_par *par) { aty_st_le32(DDA_CONFIG, dsp->dda_config); aty_st_le32(DDA_ON_OFF, dsp->dda_on_off); } static int aty128_ddafifo(struct aty128_ddafifo *dsp, const struct aty128_pll *pll, u32 depth, const struct aty128fb_par *par) { const struct aty128_meminfo *m = par->mem; u32 xclk = par->constants.xclk; u32 fifo_width = par->constants.fifo_width; u32 fifo_depth = par->constants.fifo_depth; s32 x, b, p, ron, roff; u32 n, d, bpp; /* round up to multiple of 8 */ bpp = (depth+7) & ~7; n = xclk * fifo_width; d = pll->vclk * bpp; x = round_div(n, d); ron = 4 * m->MB + 3 * ((m->Trcd - 2 > 0) ? m->Trcd - 2 : 0) + 2 * m->Trp + m->Twr + m->CL + m->Tr2w + x; DBG("x %x\n", x); b = 0; while (x) { x >>= 1; b++; } p = b + 1; ron <<= (11 - p); n <<= (11 - p); x = round_div(n, d); roff = x * (fifo_depth - 4); if ((ron + m->Rloop) >= roff) { printk(KERN_ERR "aty128fb: Mode out of range!\n"); return -EINVAL; } DBG("p: %x rloop: %x x: %x ron: %x roff: %x\n", p, m->Rloop, x, ron, roff); dsp->dda_config = p << 16 | m->Rloop << 20 | x; dsp->dda_on_off = ron << 16 | roff; return 0; } /* * This actually sets the video mode. */ static int aty128fb_set_par(struct fb_info *info) { struct aty128fb_par *par = info->par; u32 config; int err; if ((err = aty128_decode_var(&info->var, par)) != 0) return err; if (par->blitter_may_be_busy) wait_for_idle(par); /* clear all registers that may interfere with mode setting */ aty_st_le32(OVR_CLR, 0); aty_st_le32(OVR_WID_LEFT_RIGHT, 0); aty_st_le32(OVR_WID_TOP_BOTTOM, 0); aty_st_le32(OV0_SCALE_CNTL, 0); aty_st_le32(MPP_TB_CONFIG, 0); aty_st_le32(MPP_GP_CONFIG, 0); aty_st_le32(SUBPIC_CNTL, 0); aty_st_le32(VIPH_CONTROL, 0); aty_st_le32(I2C_CNTL_1, 0); /* turn off i2c */ aty_st_le32(GEN_INT_CNTL, 0); /* turn off interrupts */ aty_st_le32(CAP0_TRIG_CNTL, 0); aty_st_le32(CAP1_TRIG_CNTL, 0); aty_st_8(CRTC_EXT_CNTL + 1, 4); /* turn video off */ aty128_set_crtc(&par->crtc, par); aty128_set_pll(&par->pll, par); aty128_set_fifo(&par->fifo_reg, par); config = aty_ld_le32(CNFG_CNTL) & ~3; #if defined(__BIG_ENDIAN) if (par->crtc.bpp == 32) config |= 2; /* make aperture do 32 bit swapping */ else if (par->crtc.bpp == 16) config |= 1; /* make aperture do 16 bit swapping */ #endif aty_st_le32(CNFG_CNTL, config); aty_st_8(CRTC_EXT_CNTL + 1, 0); /* turn the video back on */ info->fix.line_length = (par->crtc.vxres * par->crtc.bpp) >> 3; info->fix.visual = par->crtc.bpp == 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; if (par->chip_gen == rage_M3) { aty128_set_crt_enable(par, par->crt_on); aty128_set_lcd_enable(par, par->lcd_on); } if (par->accel_flags & FB_ACCELF_TEXT) aty128_init_engine(par); #ifdef CONFIG_BOOTX_TEXT btext_update_display(info->fix.smem_start, (((par->crtc.h_total>>16) & 0xff)+1)*8, ((par->crtc.v_total>>16) & 0x7ff)+1, par->crtc.bpp, par->crtc.vxres*par->crtc.bpp/8); #endif /* CONFIG_BOOTX_TEXT */ return 0; } /* * encode/decode the User Defined Part of the Display */ static int aty128_decode_var(struct fb_var_screeninfo *var, struct aty128fb_par *par) { int err; struct aty128_crtc crtc; struct aty128_pll pll; struct aty128_ddafifo fifo_reg; if ((err = aty128_var_to_crtc(var, &crtc, par))) return err; if ((err = aty128_var_to_pll(var->pixclock, &pll, par))) return err; if ((err = aty128_ddafifo(&fifo_reg, &pll, crtc.depth, par))) return err; par->crtc = crtc; par->pll = pll; par->fifo_reg = fifo_reg; par->accel_flags = var->accel_flags; return 0; } static int aty128_encode_var(struct fb_var_screeninfo *var, const struct aty128fb_par *par) { int err; if ((err = aty128_crtc_to_var(&par->crtc, var))) return err; if ((err = aty128_pll_to_var(&par->pll, var))) return err; var->nonstd = 0; var->activate = 0; var->height = -1; var->width = -1; var->accel_flags = par->accel_flags; return 0; } static int aty128fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct aty128fb_par par; int err; par = *(struct aty128fb_par *)info->par; if ((err = aty128_decode_var(var, &par)) != 0) return err; aty128_encode_var(var, &par); return 0; } /* * Pan or Wrap the Display */ static int aty128fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *fb) { struct aty128fb_par *par = fb->par; u32 xoffset, yoffset; u32 offset; u32 xres, yres; xres = (((par->crtc.h_total >> 16) & 0xff) + 1) << 3; yres = ((par->crtc.v_total >> 16) & 0x7ff) + 1; xoffset = (var->xoffset +7) & ~7; yoffset = var->yoffset; if (xoffset+xres > par->crtc.vxres || yoffset+yres > par->crtc.vyres) return -EINVAL; par->crtc.xoffset = xoffset; par->crtc.yoffset = yoffset; offset = ((yoffset * par->crtc.vxres + xoffset) * (par->crtc.bpp >> 3)) & ~7; if (par->crtc.bpp == 24) offset += 8 * (offset % 3); /* Must be multiple of 8 and 3 */ aty_st_le32(CRTC_OFFSET, offset); return 0; } /* * Helper function to store a single palette register */ static void aty128_st_pal(u_int regno, u_int red, u_int green, u_int blue, struct aty128fb_par *par) { if (par->chip_gen == rage_M3) { #if 0 /* Note: For now, on M3, we set palette on both heads, which may * be useless. Can someone with a M3 check this ? * * This code would still be useful if using the second CRTC to * do mirroring */ aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) | DAC_PALETTE_ACCESS_CNTL); aty_st_8(PALETTE_INDEX, regno); aty_st_le32(PALETTE_DATA, (red<<16)|(green<<8)|blue); #endif aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) & ~DAC_PALETTE_ACCESS_CNTL); } aty_st_8(PALETTE_INDEX, regno); aty_st_le32(PALETTE_DATA, (red<<16)|(green<<8)|blue); } static int aty128fb_sync(struct fb_info *info) { struct aty128fb_par *par = info->par; if (par->blitter_may_be_busy) wait_for_idle(par); return 0; } #ifndef MODULE static int aty128fb_setup(char *options) { char *this_opt; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!strncmp(this_opt, "lcd:", 4)) { default_lcd_on = simple_strtoul(this_opt+4, NULL, 0); continue; } else if (!strncmp(this_opt, "crt:", 4)) { default_crt_on = simple_strtoul(this_opt+4, NULL, 0); continue; } else if (!strncmp(this_opt, "backlight:", 10)) { #ifdef CONFIG_FB_ATY128_BACKLIGHT backlight = simple_strtoul(this_opt+10, NULL, 0); #endif continue; } if(!strncmp(this_opt, "nomtrr", 6)) { mtrr = 0; continue; } #ifdef CONFIG_PPC_PMAC /* vmode and cmode deprecated */ if (!strncmp(this_opt, "vmode:", 6)) { unsigned int vmode = simple_strtoul(this_opt+6, NULL, 0); if (vmode > 0 && vmode <= VMODE_MAX) default_vmode = vmode; continue; } else if (!strncmp(this_opt, "cmode:", 6)) { unsigned int cmode = simple_strtoul(this_opt+6, NULL, 0); switch (cmode) { case 0: case 8: default_cmode = CMODE_8; break; case 15: case 16: default_cmode = CMODE_16; break; case 24: case 32: default_cmode = CMODE_32; break; } continue; } #endif /* CONFIG_PPC_PMAC */ mode_option = this_opt; } return 0; } #endif /* MODULE */ /* Backlight */ #ifdef CONFIG_FB_ATY128_BACKLIGHT #define MAX_LEVEL 0xFF static int aty128_bl_get_level_brightness(struct aty128fb_par *par, int level) { struct fb_info *info = pci_get_drvdata(par->pdev); int atylevel; /* Get and convert the value */ /* No locking of bl_curve since we read a single value */ atylevel = MAX_LEVEL - (info->bl_curve[level] * FB_BACKLIGHT_MAX / MAX_LEVEL); if (atylevel < 0) atylevel = 0; else if (atylevel > MAX_LEVEL) atylevel = MAX_LEVEL; return atylevel; } /* We turn off the LCD completely instead of just dimming the backlight. * This provides greater power saving and the display is useless without * backlight anyway */ #define BACKLIGHT_LVDS_OFF /* That one prevents proper CRT output with LCD off */ #undef BACKLIGHT_DAC_OFF static int aty128_bl_update_status(struct backlight_device *bd) { struct aty128fb_par *par = bl_get_data(bd); unsigned int reg = aty_ld_le32(LVDS_GEN_CNTL); int level; if (bd->props.power != FB_BLANK_UNBLANK || bd->props.fb_blank != FB_BLANK_UNBLANK || !par->lcd_on) level = 0; else level = bd->props.brightness; reg |= LVDS_BL_MOD_EN | LVDS_BLON; if (level > 0) { reg |= LVDS_DIGION; if (!(reg & LVDS_ON)) { reg &= ~LVDS_BLON; aty_st_le32(LVDS_GEN_CNTL, reg); aty_ld_le32(LVDS_GEN_CNTL); mdelay(10); reg |= LVDS_BLON; aty_st_le32(LVDS_GEN_CNTL, reg); } reg &= ~LVDS_BL_MOD_LEVEL_MASK; reg |= (aty128_bl_get_level_brightness(par, level) << LVDS_BL_MOD_LEVEL_SHIFT); #ifdef BACKLIGHT_LVDS_OFF reg |= LVDS_ON | LVDS_EN; reg &= ~LVDS_DISPLAY_DIS; #endif aty_st_le32(LVDS_GEN_CNTL, reg); #ifdef BACKLIGHT_DAC_OFF aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) & (~DAC_PDWN)); #endif } else { reg &= ~LVDS_BL_MOD_LEVEL_MASK; reg |= (aty128_bl_get_level_brightness(par, 0) << LVDS_BL_MOD_LEVEL_SHIFT); #ifdef BACKLIGHT_LVDS_OFF reg |= LVDS_DISPLAY_DIS; aty_st_le32(LVDS_GEN_CNTL, reg); aty_ld_le32(LVDS_GEN_CNTL); udelay(10); reg &= ~(LVDS_ON | LVDS_EN | LVDS_BLON | LVDS_DIGION); #endif aty_st_le32(LVDS_GEN_CNTL, reg); #ifdef BACKLIGHT_DAC_OFF aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) | DAC_PDWN); #endif } return 0; } static const struct backlight_ops aty128_bl_data = { .update_status = aty128_bl_update_status, }; static void aty128_bl_set_power(struct fb_info *info, int power) { if (info->bl_dev) { info->bl_dev->props.power = power; backlight_update_status(info->bl_dev); } } static void aty128_bl_init(struct aty128fb_par *par) { struct backlight_properties props; struct fb_info *info = pci_get_drvdata(par->pdev); struct backlight_device *bd; char name[12]; /* Could be extended to Rage128Pro LVDS output too */ if (par->chip_gen != rage_M3) return; #ifdef CONFIG_PMAC_BACKLIGHT if (!pmac_has_backlight_type("ati")) return; #endif snprintf(name, sizeof(name), "aty128bl%d", info->node); memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = FB_BACKLIGHT_LEVELS - 1; bd = backlight_device_register(name, info->dev, par, &aty128_bl_data, &props); if (IS_ERR(bd)) { info->bl_dev = NULL; printk(KERN_WARNING "aty128: Backlight registration failed\n"); goto error; } info->bl_dev = bd; fb_bl_default_curve(info, 0, 63 * FB_BACKLIGHT_MAX / MAX_LEVEL, 219 * FB_BACKLIGHT_MAX / MAX_LEVEL); bd->props.brightness = bd->props.max_brightness; bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); printk("aty128: Backlight initialized (%s)\n", name); return; error: return; } static void aty128_bl_exit(struct backlight_device *bd) { backlight_device_unregister(bd); printk("aty128: Backlight unloaded\n"); } #endif /* CONFIG_FB_ATY128_BACKLIGHT */ /* * Initialisation */ #ifdef CONFIG_PPC_PMAC__disabled static void aty128_early_resume(void *data) { struct aty128fb_par *par = data; if (!console_trylock()) return; pci_restore_state(par->pdev); aty128_do_resume(par->pdev); console_unlock(); } #endif /* CONFIG_PPC_PMAC */ static int aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent) { struct fb_info *info = pci_get_drvdata(pdev); struct aty128fb_par *par = info->par; struct fb_var_screeninfo var; char video_card[50]; u8 chip_rev; u32 dac; /* Get the chip revision */ chip_rev = (aty_ld_le32(CNFG_CNTL) >> 16) & 0x1F; strcpy(video_card, "Rage128 XX "); video_card[8] = ent->device >> 8; video_card[9] = ent->device & 0xFF; /* range check to make sure */ if (ent->driver_data < ARRAY_SIZE(r128_family)) strlcat(video_card, r128_family[ent->driver_data], sizeof(video_card)); printk(KERN_INFO "aty128fb: %s [chip rev 0x%x] ", video_card, chip_rev); if (par->vram_size % (1024 * 1024) == 0) printk("%dM %s\n", par->vram_size / (1024*1024), par->mem->name); else printk("%dk %s\n", par->vram_size / 1024, par->mem->name); par->chip_gen = ent->driver_data; /* fill in info */ info->fbops = &aty128fb_ops; info->flags = FBINFO_FLAG_DEFAULT; par->lcd_on = default_lcd_on; par->crt_on = default_crt_on; var = default_var; #ifdef CONFIG_PPC_PMAC if (machine_is(powermac)) { /* Indicate sleep capability */ if (par->chip_gen == rage_M3) { pmac_call_feature(PMAC_FTR_DEVICE_CAN_WAKE, NULL, 0, 1); #if 0 /* Disable the early video resume hack for now as it's causing problems, * among others we now rely on the PCI core restoring the config space * for us, which isn't the case with that hack, and that code path causes * various things to be called with interrupts off while they shouldn't. * I'm leaving the code in as it can be useful for debugging purposes */ pmac_set_early_video_resume(aty128_early_resume, par); #endif } /* Find default mode */ if (mode_option) { if (!mac_find_mode(&var, info, mode_option, 8)) var = default_var; } else { if (default_vmode <= 0 || default_vmode > VMODE_MAX) default_vmode = VMODE_1024_768_60; /* iMacs need that resolution * PowerMac2,1 first r128 iMacs * PowerMac2,2 summer 2000 iMacs * PowerMac4,1 january 2001 iMacs "flower power" */ if (of_machine_is_compatible("PowerMac2,1") || of_machine_is_compatible("PowerMac2,2") || of_machine_is_compatible("PowerMac4,1")) default_vmode = VMODE_1024_768_75; /* iBook SE */ if (of_machine_is_compatible("PowerBook2,2")) default_vmode = VMODE_800_600_60; /* PowerBook Firewire (Pismo), iBook Dual USB */ if (of_machine_is_compatible("PowerBook3,1") || of_machine_is_compatible("PowerBook4,1")) default_vmode = VMODE_1024_768_60; /* PowerBook Titanium */ if (of_machine_is_compatible("PowerBook3,2")) default_vmode = VMODE_1152_768_60; if (default_cmode > 16) default_cmode = CMODE_32; else if (default_cmode > 8) default_cmode = CMODE_16; else default_cmode = CMODE_8; if (mac_vmode_to_var(default_vmode, default_cmode, &var)) var = default_var; } } else #endif /* CONFIG_PPC_PMAC */ { if (mode_option) if (fb_find_mode(&var, info, mode_option, NULL, 0, &defaultmode, 8) == 0) var = default_var; } var.accel_flags &= ~FB_ACCELF_TEXT; // var.accel_flags |= FB_ACCELF_TEXT;/* FIXME Will add accel later */ if (aty128fb_check_var(&var, info)) { printk(KERN_ERR "aty128fb: Cannot set default mode.\n"); return 0; } /* setup the DAC the way we like it */ dac = aty_ld_le32(DAC_CNTL); dac |= (DAC_8BIT_EN | DAC_RANGE_CNTL); dac |= DAC_MASK; if (par->chip_gen == rage_M3) dac |= DAC_PALETTE2_SNOOP_EN; aty_st_le32(DAC_CNTL, dac); /* turn off bus mastering, just in case */ aty_st_le32(BUS_CNTL, aty_ld_le32(BUS_CNTL) | BUS_MASTER_DIS); info->var = var; fb_alloc_cmap(&info->cmap, 256, 0); var.activate = FB_ACTIVATE_NOW; aty128_init_engine(par); par->pdev = pdev; par->asleep = 0; par->lock_blank = 0; #ifdef CONFIG_FB_ATY128_BACKLIGHT if (backlight) aty128_bl_init(par); #endif if (register_framebuffer(info) < 0) return 0; fb_info(info, "%s frame buffer device on %s\n", info->fix.id, video_card); return 1; /* success! */ } #ifdef CONFIG_PCI /* register a card ++ajoshi */ static int aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned long fb_addr, reg_addr; struct aty128fb_par *par; struct fb_info *info; int err; #ifndef __sparc__ void __iomem *bios = NULL; #endif /* Enable device in PCI config */ if ((err = pci_enable_device(pdev))) { printk(KERN_ERR "aty128fb: Cannot enable PCI device: %d\n", err); return -ENODEV; } fb_addr = pci_resource_start(pdev, 0); if (!request_mem_region(fb_addr, pci_resource_len(pdev, 0), "aty128fb FB")) { printk(KERN_ERR "aty128fb: cannot reserve frame " "buffer memory\n"); return -ENODEV; } reg_addr = pci_resource_start(pdev, 2); if (!request_mem_region(reg_addr, pci_resource_len(pdev, 2), "aty128fb MMIO")) { printk(KERN_ERR "aty128fb: cannot reserve MMIO region\n"); goto err_free_fb; } /* We have the resources. Now virtualize them */ info = framebuffer_alloc(sizeof(struct aty128fb_par), &pdev->dev); if (info == NULL) { printk(KERN_ERR "aty128fb: can't alloc fb_info_aty128\n"); goto err_free_mmio; } par = info->par; info->pseudo_palette = par->pseudo_palette; /* Virtualize mmio region */ info->fix.mmio_start = reg_addr; par->regbase = pci_ioremap_bar(pdev, 2); if (!par->regbase) goto err_free_info; /* Grab memory size from the card */ // How does this relate to the resource length from the PCI hardware? par->vram_size = aty_ld_le32(CNFG_MEMSIZE) & 0x03FFFFFF; /* Virtualize the framebuffer */ info->screen_base = ioremap_wc(fb_addr, par->vram_size); if (!info->screen_base) goto err_unmap_out; /* Set up info->fix */ info->fix = aty128fb_fix; info->fix.smem_start = fb_addr; info->fix.smem_len = par->vram_size; info->fix.mmio_start = reg_addr; /* If we can't test scratch registers, something is seriously wrong */ if (!register_test(par)) { printk(KERN_ERR "aty128fb: Can't write to video register!\n"); goto err_out; } #ifndef __sparc__ bios = aty128_map_ROM(par, pdev); #ifdef CONFIG_X86 if (bios == NULL) bios = aty128_find_mem_vbios(par); #endif if (bios == NULL) printk(KERN_INFO "aty128fb: BIOS not located, guessing timings.\n"); else { printk(KERN_INFO "aty128fb: Rage128 BIOS located\n"); aty128_get_pllinfo(par, bios); pci_unmap_rom(pdev, bios); } #endif /* __sparc__ */ aty128_timings(par); pci_set_drvdata(pdev, info); if (!aty128_init(pdev, ent)) goto err_out; if (mtrr) par->wc_cookie = arch_phys_wc_add(info->fix.smem_start, par->vram_size); return 0; err_out: iounmap(info->screen_base); err_unmap_out: iounmap(par->regbase); err_free_info: framebuffer_release(info); err_free_mmio: release_mem_region(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); err_free_fb: release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); return -ENODEV; } static void aty128_remove(struct pci_dev *pdev) { struct fb_info *info = pci_get_drvdata(pdev); struct aty128fb_par *par; if (!info) return; par = info->par; unregister_framebuffer(info); #ifdef CONFIG_FB_ATY128_BACKLIGHT aty128_bl_exit(info->bl_dev); #endif arch_phys_wc_del(par->wc_cookie); iounmap(par->regbase); iounmap(info->screen_base); release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); release_mem_region(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); framebuffer_release(info); } #endif /* CONFIG_PCI */ /* * Blank the display. */ static int aty128fb_blank(int blank, struct fb_info *fb) { struct aty128fb_par *par = fb->par; u8 state; if (par->lock_blank || par->asleep) return 0; switch (blank) { case FB_BLANK_NORMAL: state = 4; break; case FB_BLANK_VSYNC_SUSPEND: state = 6; break; case FB_BLANK_HSYNC_SUSPEND: state = 5; break; case FB_BLANK_POWERDOWN: state = 7; break; case FB_BLANK_UNBLANK: default: state = 0; break; } aty_st_8(CRTC_EXT_CNTL+1, state); if (par->chip_gen == rage_M3) { aty128_set_crt_enable(par, par->crt_on && !blank); aty128_set_lcd_enable(par, par->lcd_on && !blank); } return 0; } /* * Set a single color register. The values supplied are already * rounded down to the hardware's capabilities (according to the * entries in the var structure). Return != 0 for invalid regno. */ static int aty128fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct aty128fb_par *par = info->par; if (regno > 255 || (par->crtc.depth == 16 && regno > 63) || (par->crtc.depth == 15 && regno > 31)) return 1; red >>= 8; green >>= 8; blue >>= 8; if (regno < 16) { int i; u32 *pal = info->pseudo_palette; switch (par->crtc.depth) { case 15: pal[regno] = (regno << 10) | (regno << 5) | regno; break; case 16: pal[regno] = (regno << 11) | (regno << 6) | regno; break; case 24: pal[regno] = (regno << 16) | (regno << 8) | regno; break; case 32: i = (regno << 8) | regno; pal[regno] = (i << 16) | i; break; } } if (par->crtc.depth == 16 && regno > 0) { /* * With the 5-6-5 split of bits for RGB at 16 bits/pixel, we * have 32 slots for R and B values but 64 slots for G values. * Thus the R and B values go in one slot but the G value * goes in a different slot, and we have to avoid disturbing * the other fields in the slots we touch. */ par->green[regno] = green; if (regno < 32) { par->red[regno] = red; par->blue[regno] = blue; aty128_st_pal(regno * 8, red, par->green[regno*2], blue, par); } red = par->red[regno/2]; blue = par->blue[regno/2]; regno <<= 2; } else if (par->crtc.bpp == 16) regno <<= 3; aty128_st_pal(regno, red, green, blue, par); return 0; } #define ATY_MIRROR_LCD_ON 0x00000001 #define ATY_MIRROR_CRT_ON 0x00000002 /* out param: u32* backlight value: 0 to 15 */ #define FBIO_ATY128_GET_MIRROR _IOR('@', 1, __u32) /* in param: u32* backlight value: 0 to 15 */ #define FBIO_ATY128_SET_MIRROR _IOW('@', 2, __u32) static int aty128fb_ioctl(struct fb_info *info, u_int cmd, u_long arg) { struct aty128fb_par *par = info->par; u32 value; int rc; switch (cmd) { case FBIO_ATY128_SET_MIRROR: if (par->chip_gen != rage_M3) return -EINVAL; rc = get_user(value, (__u32 __user *)arg); if (rc) return rc; par->lcd_on = (value & 0x01) != 0; par->crt_on = (value & 0x02) != 0; if (!par->crt_on && !par->lcd_on) par->lcd_on = 1; aty128_set_crt_enable(par, par->crt_on); aty128_set_lcd_enable(par, par->lcd_on); return 0; case FBIO_ATY128_GET_MIRROR: if (par->chip_gen != rage_M3) return -EINVAL; value = (par->crt_on << 1) | par->lcd_on; return put_user(value, (__u32 __user *)arg); } return -EINVAL; } #if 0 /* * Accelerated functions */ static inline void aty128_rectcopy(int srcx, int srcy, int dstx, int dsty, u_int width, u_int height, struct fb_info_aty128 *par) { u32 save_dp_datatype, save_dp_cntl, dstval; if (!width || !height) return; dstval = depth_to_dst(par->current_par.crtc.depth); if (dstval == DST_24BPP) { srcx *= 3; dstx *= 3; width *= 3; } else if (dstval == -EINVAL) { printk("aty128fb: invalid depth or RGBA\n"); return; } wait_for_fifo(2, par); save_dp_datatype = aty_ld_le32(DP_DATATYPE); save_dp_cntl = aty_ld_le32(DP_CNTL); wait_for_fifo(6, par); aty_st_le32(SRC_Y_X, (srcy << 16) | srcx); aty_st_le32(DP_MIX, ROP3_SRCCOPY | DP_SRC_RECT); aty_st_le32(DP_CNTL, DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM); aty_st_le32(DP_DATATYPE, save_dp_datatype | dstval | SRC_DSTCOLOR); aty_st_le32(DST_Y_X, (dsty << 16) | dstx); aty_st_le32(DST_HEIGHT_WIDTH, (height << 16) | width); par->blitter_may_be_busy = 1; wait_for_fifo(2, par); aty_st_le32(DP_DATATYPE, save_dp_datatype); aty_st_le32(DP_CNTL, save_dp_cntl); } /* * Text mode accelerated functions */ static void fbcon_aty128_bmove(struct display *p, int sy, int sx, int dy, int dx, int height, int width) { sx *= fontwidth(p); sy *= fontheight(p); dx *= fontwidth(p); dy *= fontheight(p); width *= fontwidth(p); height *= fontheight(p); aty128_rectcopy(sx, sy, dx, dy, width, height, (struct fb_info_aty128 *)p->fb_info); } #endif /* 0 */ static void aty128_set_suspend(struct aty128fb_par *par, int suspend) { u32 pmgt; struct pci_dev *pdev = par->pdev; if (!par->pdev->pm_cap) return; /* Set the chip into the appropriate suspend mode (we use D2, * D3 would require a complete re-initialisation of the chip, * including PCI config registers, clocks, AGP configuration, ...) * * For resume, the core will have already brought us back to D0 */ if (suspend) { /* Make sure CRTC2 is reset. Remove that the day we decide to * actually use CRTC2 and replace it with real code for disabling * the CRTC2 output during sleep */ aty_st_le32(CRTC2_GEN_CNTL, aty_ld_le32(CRTC2_GEN_CNTL) & ~(CRTC2_EN)); /* Set the power management mode to be PCI based */ /* Use this magic value for now */ pmgt = 0x0c005407; aty_st_pll(POWER_MANAGEMENT, pmgt); (void)aty_ld_pll(POWER_MANAGEMENT); aty_st_le32(BUS_CNTL1, 0x00000010); aty_st_le32(MEM_POWER_MISC, 0x0c830000); mdelay(100); /* Switch PCI power management to D2 */ pci_set_power_state(pdev, PCI_D2); } } static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct fb_info *info = pci_get_drvdata(pdev); struct aty128fb_par *par = info->par; /* Because we may change PCI D state ourselves, we need to * first save the config space content so the core can * restore it properly on resume. */ pci_save_state(pdev); /* We don't do anything but D2, for now we return 0, but * we may want to change that. How do we know if the BIOS * can properly take care of D3 ? Also, with swsusp, we * know we'll be rebooted, ... */ #ifndef CONFIG_PPC_PMAC /* HACK ALERT ! Once I find a proper way to say to each driver * individually what will happen with it's PCI slot, I'll change * that. On laptops, the AGP slot is just unclocked, so D2 is * expected, while on desktops, the card is powered off */ return 0; #endif /* CONFIG_PPC_PMAC */ if (state.event == pdev->dev.power.power_state.event) return 0; printk(KERN_DEBUG "aty128fb: suspending...\n"); console_lock(); fb_set_suspend(info, 1); /* Make sure engine is reset */ wait_for_idle(par); aty128_reset_engine(par); wait_for_idle(par); /* Blank display and LCD */ aty128fb_blank(FB_BLANK_POWERDOWN, info); /* Sleep */ par->asleep = 1; par->lock_blank = 1; #ifdef CONFIG_PPC_PMAC /* On powermac, we have hooks to properly suspend/resume AGP now, * use them here. We'll ultimately need some generic support here, * but the generic code isn't quite ready for that yet */ pmac_suspend_agp_for_card(pdev); #endif /* CONFIG_PPC_PMAC */ /* We need a way to make sure the fbdev layer will _not_ touch the * framebuffer before we put the chip to suspend state. On 2.4, I * used dummy fb ops, 2.5 need proper support for this at the * fbdev level */ if (state.event != PM_EVENT_ON) aty128_set_suspend(par, 1); console_unlock(); pdev->dev.power.power_state = state; return 0; } static int aty128_do_resume(struct pci_dev *pdev) { struct fb_info *info = pci_get_drvdata(pdev); struct aty128fb_par *par = info->par; if (pdev->dev.power.power_state.event == PM_EVENT_ON) return 0; /* PCI state will have been restored by the core, so * we should be in D0 now with our config space fully * restored */ /* Wakeup chip */ aty128_set_suspend(par, 0); par->asleep = 0; /* Restore display & engine */ aty128_reset_engine(par); wait_for_idle(par); aty128fb_set_par(info); fb_pan_display(info, &info->var); fb_set_cmap(&info->cmap, info); /* Refresh */ fb_set_suspend(info, 0); /* Unblank */ par->lock_blank = 0; aty128fb_blank(0, info); #ifdef CONFIG_PPC_PMAC /* On powermac, we have hooks to properly suspend/resume AGP now, * use them here. We'll ultimately need some generic support here, * but the generic code isn't quite ready for that yet */ pmac_resume_agp_for_card(pdev); #endif /* CONFIG_PPC_PMAC */ pdev->dev.power.power_state = PMSG_ON; printk(KERN_DEBUG "aty128fb: resumed !\n"); return 0; } static int aty128_pci_resume(struct pci_dev *pdev) { int rc; console_lock(); rc = aty128_do_resume(pdev); console_unlock(); return rc; } static int aty128fb_init(void) { #ifndef MODULE char *option = NULL; if (fb_get_options("aty128fb", &option)) return -ENODEV; aty128fb_setup(option); #endif return pci_register_driver(&aty128fb_driver); } static void __exit aty128fb_exit(void) { pci_unregister_driver(&aty128fb_driver); } module_init(aty128fb_init); module_exit(aty128fb_exit); MODULE_AUTHOR("(c)1999-2003 Brad Douglas <brad@neruo.com>"); MODULE_DESCRIPTION("FBDev driver for ATI Rage128 / Pro cards"); MODULE_LICENSE("GPL"); module_param(mode_option, charp, 0); MODULE_PARM_DESC(mode_option, "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" "); module_param_named(nomtrr, mtrr, invbool, 0); MODULE_PARM_DESC(nomtrr, "bool: Disable MTRR support (0 or 1=disabled) (default=0)");
gpl-2.0
ChronoMonochrome/Samsung_STE_Kernel
drivers/video/omap2/dss/rfbi.c
390
23072
/* * linux/drivers/video/omap2/dss/rfbi.c * * Copyright (C) 2009 Nokia Corporation * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> * * Some code and ideas taken from drivers/video/omap/ driver * by Imre Deak. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #define DSS_SUBSYS_NAME "RFBI" #include <linux/kernel.h> #include <linux/dma-mapping.h> #include <linux/vmalloc.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/kfifo.h> #include <linux/ktime.h> #include <linux/hrtimer.h> #include <linux/seq_file.h> #include <linux/semaphore.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <video/omapdss.h> #include "dss.h" struct rfbi_reg { u16 idx; }; #define RFBI_REG(idx) ((const struct rfbi_reg) { idx }) #define RFBI_REVISION RFBI_REG(0x0000) #define RFBI_SYSCONFIG RFBI_REG(0x0010) #define RFBI_SYSSTATUS RFBI_REG(0x0014) #define RFBI_CONTROL RFBI_REG(0x0040) #define RFBI_PIXEL_CNT RFBI_REG(0x0044) #define RFBI_LINE_NUMBER RFBI_REG(0x0048) #define RFBI_CMD RFBI_REG(0x004c) #define RFBI_PARAM RFBI_REG(0x0050) #define RFBI_DATA RFBI_REG(0x0054) #define RFBI_READ RFBI_REG(0x0058) #define RFBI_STATUS RFBI_REG(0x005c) #define RFBI_CONFIG(n) RFBI_REG(0x0060 + (n)*0x18) #define RFBI_ONOFF_TIME(n) RFBI_REG(0x0064 + (n)*0x18) #define RFBI_CYCLE_TIME(n) RFBI_REG(0x0068 + (n)*0x18) #define RFBI_DATA_CYCLE1(n) RFBI_REG(0x006c + (n)*0x18) #define RFBI_DATA_CYCLE2(n) RFBI_REG(0x0070 + (n)*0x18) #define RFBI_DATA_CYCLE3(n) RFBI_REG(0x0074 + (n)*0x18) #define RFBI_VSYNC_WIDTH RFBI_REG(0x0090) #define RFBI_HSYNC_WIDTH RFBI_REG(0x0094) #define REG_FLD_MOD(idx, val, start, end) \ rfbi_write_reg(idx, FLD_MOD(rfbi_read_reg(idx), val, start, end)) enum omap_rfbi_cycleformat { OMAP_DSS_RFBI_CYCLEFORMAT_1_1 = 0, OMAP_DSS_RFBI_CYCLEFORMAT_2_1 = 1, OMAP_DSS_RFBI_CYCLEFORMAT_3_1 = 2, OMAP_DSS_RFBI_CYCLEFORMAT_3_2 = 3, }; enum omap_rfbi_datatype { OMAP_DSS_RFBI_DATATYPE_12 = 0, OMAP_DSS_RFBI_DATATYPE_16 = 1, OMAP_DSS_RFBI_DATATYPE_18 = 2, OMAP_DSS_RFBI_DATATYPE_24 = 3, }; enum omap_rfbi_parallelmode { OMAP_DSS_RFBI_PARALLELMODE_8 = 0, OMAP_DSS_RFBI_PARALLELMODE_9 = 1, OMAP_DSS_RFBI_PARALLELMODE_12 = 2, OMAP_DSS_RFBI_PARALLELMODE_16 = 3, }; static int rfbi_convert_timings(struct rfbi_timings *t); static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div); static struct { struct platform_device *pdev; void __iomem *base; unsigned long l4_khz; enum omap_rfbi_datatype datatype; enum omap_rfbi_parallelmode parallelmode; enum omap_rfbi_te_mode te_mode; int te_enabled; void (*framedone_callback)(void *data); void *framedone_callback_data; struct omap_dss_device *dssdev[2]; struct semaphore bus_lock; } rfbi; static inline void rfbi_write_reg(const struct rfbi_reg idx, u32 val) { __raw_writel(val, rfbi.base + idx.idx); } static inline u32 rfbi_read_reg(const struct rfbi_reg idx) { return __raw_readl(rfbi.base + idx.idx); } static int rfbi_runtime_get(void) { int r; DSSDBG("rfbi_runtime_get\n"); r = pm_runtime_get_sync(&rfbi.pdev->dev); WARN_ON(r < 0); return r < 0 ? r : 0; } static void rfbi_runtime_put(void) { int r; DSSDBG("rfbi_runtime_put\n"); r = pm_runtime_put(&rfbi.pdev->dev); WARN_ON(r < 0); } void rfbi_bus_lock(void) { down(&rfbi.bus_lock); } EXPORT_SYMBOL(rfbi_bus_lock); void rfbi_bus_unlock(void) { up(&rfbi.bus_lock); } EXPORT_SYMBOL(rfbi_bus_unlock); void omap_rfbi_write_command(const void *buf, u32 len) { switch (rfbi.parallelmode) { case OMAP_DSS_RFBI_PARALLELMODE_8: { const u8 *b = buf; for (; len; len--) rfbi_write_reg(RFBI_CMD, *b++); break; } case OMAP_DSS_RFBI_PARALLELMODE_16: { const u16 *w = buf; BUG_ON(len & 1); for (; len; len -= 2) rfbi_write_reg(RFBI_CMD, *w++); break; } case OMAP_DSS_RFBI_PARALLELMODE_9: case OMAP_DSS_RFBI_PARALLELMODE_12: default: BUG(); } } EXPORT_SYMBOL(omap_rfbi_write_command); void omap_rfbi_read_data(void *buf, u32 len) { switch (rfbi.parallelmode) { case OMAP_DSS_RFBI_PARALLELMODE_8: { u8 *b = buf; for (; len; len--) { rfbi_write_reg(RFBI_READ, 0); *b++ = rfbi_read_reg(RFBI_READ); } break; } case OMAP_DSS_RFBI_PARALLELMODE_16: { u16 *w = buf; BUG_ON(len & ~1); for (; len; len -= 2) { rfbi_write_reg(RFBI_READ, 0); *w++ = rfbi_read_reg(RFBI_READ); } break; } case OMAP_DSS_RFBI_PARALLELMODE_9: case OMAP_DSS_RFBI_PARALLELMODE_12: default: BUG(); } } EXPORT_SYMBOL(omap_rfbi_read_data); void omap_rfbi_write_data(const void *buf, u32 len) { switch (rfbi.parallelmode) { case OMAP_DSS_RFBI_PARALLELMODE_8: { const u8 *b = buf; for (; len; len--) rfbi_write_reg(RFBI_PARAM, *b++); break; } case OMAP_DSS_RFBI_PARALLELMODE_16: { const u16 *w = buf; BUG_ON(len & 1); for (; len; len -= 2) rfbi_write_reg(RFBI_PARAM, *w++); break; } case OMAP_DSS_RFBI_PARALLELMODE_9: case OMAP_DSS_RFBI_PARALLELMODE_12: default: BUG(); } } EXPORT_SYMBOL(omap_rfbi_write_data); void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width, u16 x, u16 y, u16 w, u16 h) { int start_offset = scr_width * y + x; int horiz_offset = scr_width - w; int i; if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 && rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) { const u16 __iomem *pd = buf; pd += start_offset; for (; h; --h) { for (i = 0; i < w; ++i) { const u8 __iomem *b = (const u8 __iomem *)pd; rfbi_write_reg(RFBI_PARAM, __raw_readb(b+1)); rfbi_write_reg(RFBI_PARAM, __raw_readb(b+0)); ++pd; } pd += horiz_offset; } } else if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_24 && rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) { const u32 __iomem *pd = buf; pd += start_offset; for (; h; --h) { for (i = 0; i < w; ++i) { const u8 __iomem *b = (const u8 __iomem *)pd; rfbi_write_reg(RFBI_PARAM, __raw_readb(b+2)); rfbi_write_reg(RFBI_PARAM, __raw_readb(b+1)); rfbi_write_reg(RFBI_PARAM, __raw_readb(b+0)); ++pd; } pd += horiz_offset; } } else if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 && rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_16) { const u16 __iomem *pd = buf; pd += start_offset; for (; h; --h) { for (i = 0; i < w; ++i) { rfbi_write_reg(RFBI_PARAM, __raw_readw(pd)); ++pd; } pd += horiz_offset; } } else { BUG(); } } EXPORT_SYMBOL(omap_rfbi_write_pixels); static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, u16 height, void (*callback)(void *data), void *data) { u32 l; /*BUG_ON(callback == 0);*/ BUG_ON(rfbi.framedone_callback != NULL); DSSDBG("rfbi_transfer_area %dx%d\n", width, height); dispc_set_lcd_size(dssdev->manager->id, width, height); dispc_enable_channel(dssdev->manager->id, true); rfbi.framedone_callback = callback; rfbi.framedone_callback_data = data; rfbi_write_reg(RFBI_PIXEL_CNT, width * height); l = rfbi_read_reg(RFBI_CONTROL); l = FLD_MOD(l, 1, 0, 0); /* enable */ if (!rfbi.te_enabled) l = FLD_MOD(l, 1, 4, 4); /* ITE */ rfbi_write_reg(RFBI_CONTROL, l); } static void framedone_callback(void *data, u32 mask) { void (*callback)(void *data); DSSDBG("FRAMEDONE\n"); REG_FLD_MOD(RFBI_CONTROL, 0, 0, 0); callback = rfbi.framedone_callback; rfbi.framedone_callback = NULL; if (callback != NULL) callback(rfbi.framedone_callback_data); } #if 1 /* VERBOSE */ static void rfbi_print_timings(void) { u32 l; u32 time; l = rfbi_read_reg(RFBI_CONFIG(0)); time = 1000000000 / rfbi.l4_khz; if (l & (1 << 4)) time *= 2; DSSDBG("Tick time %u ps\n", time); l = rfbi_read_reg(RFBI_ONOFF_TIME(0)); DSSDBG("CSONTIME %d, CSOFFTIME %d, WEONTIME %d, WEOFFTIME %d, " "REONTIME %d, REOFFTIME %d\n", l & 0x0f, (l >> 4) & 0x3f, (l >> 10) & 0x0f, (l >> 14) & 0x3f, (l >> 20) & 0x0f, (l >> 24) & 0x3f); l = rfbi_read_reg(RFBI_CYCLE_TIME(0)); DSSDBG("WECYCLETIME %d, RECYCLETIME %d, CSPULSEWIDTH %d, " "ACCESSTIME %d\n", (l & 0x3f), (l >> 6) & 0x3f, (l >> 12) & 0x3f, (l >> 22) & 0x3f); } #else static void rfbi_print_timings(void) {} #endif static u32 extif_clk_period; static inline unsigned long round_to_extif_ticks(unsigned long ps, int div) { int bus_tick = extif_clk_period * div; return (ps + bus_tick - 1) / bus_tick * bus_tick; } static int calc_reg_timing(struct rfbi_timings *t, int div) { t->clk_div = div; t->cs_on_time = round_to_extif_ticks(t->cs_on_time, div); t->we_on_time = round_to_extif_ticks(t->we_on_time, div); t->we_off_time = round_to_extif_ticks(t->we_off_time, div); t->we_cycle_time = round_to_extif_ticks(t->we_cycle_time, div); t->re_on_time = round_to_extif_ticks(t->re_on_time, div); t->re_off_time = round_to_extif_ticks(t->re_off_time, div); t->re_cycle_time = round_to_extif_ticks(t->re_cycle_time, div); t->access_time = round_to_extif_ticks(t->access_time, div); t->cs_off_time = round_to_extif_ticks(t->cs_off_time, div); t->cs_pulse_width = round_to_extif_ticks(t->cs_pulse_width, div); DSSDBG("[reg]cson %d csoff %d reon %d reoff %d\n", t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time); DSSDBG("[reg]weon %d weoff %d recyc %d wecyc %d\n", t->we_on_time, t->we_off_time, t->re_cycle_time, t->we_cycle_time); DSSDBG("[reg]rdaccess %d cspulse %d\n", t->access_time, t->cs_pulse_width); return rfbi_convert_timings(t); } static int calc_extif_timings(struct rfbi_timings *t) { u32 max_clk_div; int div; rfbi_get_clk_info(&extif_clk_period, &max_clk_div); for (div = 1; div <= max_clk_div; div++) { if (calc_reg_timing(t, div) == 0) break; } if (div <= max_clk_div) return 0; DSSERR("can't setup timings\n"); return -1; } static void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t) { int r; if (!t->converted) { r = calc_extif_timings(t); if (r < 0) DSSERR("Failed to calc timings\n"); } BUG_ON(!t->converted); rfbi_write_reg(RFBI_ONOFF_TIME(rfbi_module), t->tim[0]); rfbi_write_reg(RFBI_CYCLE_TIME(rfbi_module), t->tim[1]); /* TIMEGRANULARITY */ REG_FLD_MOD(RFBI_CONFIG(rfbi_module), (t->tim[2] ? 1 : 0), 4, 4); rfbi_print_timings(); } static int ps_to_rfbi_ticks(int time, int div) { unsigned long tick_ps; int ret; /* Calculate in picosecs to yield more exact results */ tick_ps = 1000000000 / (rfbi.l4_khz) * div; ret = (time + tick_ps - 1) / tick_ps; return ret; } static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div) { *clk_period = 1000000000 / rfbi.l4_khz; *max_clk_div = 2; } static int rfbi_convert_timings(struct rfbi_timings *t) { u32 l; int reon, reoff, weon, weoff, cson, csoff, cs_pulse; int actim, recyc, wecyc; int div = t->clk_div; if (div <= 0 || div > 2) return -1; /* Make sure that after conversion it still holds that: * weoff > weon, reoff > reon, recyc >= reoff, wecyc >= weoff, * csoff > cson, csoff >= max(weoff, reoff), actim > reon */ weon = ps_to_rfbi_ticks(t->we_on_time, div); weoff = ps_to_rfbi_ticks(t->we_off_time, div); if (weoff <= weon) weoff = weon + 1; if (weon > 0x0f) return -1; if (weoff > 0x3f) return -1; reon = ps_to_rfbi_ticks(t->re_on_time, div); reoff = ps_to_rfbi_ticks(t->re_off_time, div); if (reoff <= reon) reoff = reon + 1; if (reon > 0x0f) return -1; if (reoff > 0x3f) return -1; cson = ps_to_rfbi_ticks(t->cs_on_time, div); csoff = ps_to_rfbi_ticks(t->cs_off_time, div); if (csoff <= cson) csoff = cson + 1; if (csoff < max(weoff, reoff)) csoff = max(weoff, reoff); if (cson > 0x0f) return -1; if (csoff > 0x3f) return -1; l = cson; l |= csoff << 4; l |= weon << 10; l |= weoff << 14; l |= reon << 20; l |= reoff << 24; t->tim[0] = l; actim = ps_to_rfbi_ticks(t->access_time, div); if (actim <= reon) actim = reon + 1; if (actim > 0x3f) return -1; wecyc = ps_to_rfbi_ticks(t->we_cycle_time, div); if (wecyc < weoff) wecyc = weoff; if (wecyc > 0x3f) return -1; recyc = ps_to_rfbi_ticks(t->re_cycle_time, div); if (recyc < reoff) recyc = reoff; if (recyc > 0x3f) return -1; cs_pulse = ps_to_rfbi_ticks(t->cs_pulse_width, div); if (cs_pulse > 0x3f) return -1; l = wecyc; l |= recyc << 6; l |= cs_pulse << 12; l |= actim << 22; t->tim[1] = l; t->tim[2] = div - 1; t->converted = 1; return 0; } /* xxx FIX module selection missing */ int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode, unsigned hs_pulse_time, unsigned vs_pulse_time, int hs_pol_inv, int vs_pol_inv, int extif_div) { int hs, vs; int min; u32 l; hs = ps_to_rfbi_ticks(hs_pulse_time, 1); vs = ps_to_rfbi_ticks(vs_pulse_time, 1); if (hs < 2) return -EDOM; if (mode == OMAP_DSS_RFBI_TE_MODE_2) min = 2; else /* OMAP_DSS_RFBI_TE_MODE_1 */ min = 4; if (vs < min) return -EDOM; if (vs == hs) return -EINVAL; rfbi.te_mode = mode; DSSDBG("setup_te: mode %d hs %d vs %d hs_inv %d vs_inv %d\n", mode, hs, vs, hs_pol_inv, vs_pol_inv); rfbi_write_reg(RFBI_HSYNC_WIDTH, hs); rfbi_write_reg(RFBI_VSYNC_WIDTH, vs); l = rfbi_read_reg(RFBI_CONFIG(0)); if (hs_pol_inv) l &= ~(1 << 21); else l |= 1 << 21; if (vs_pol_inv) l &= ~(1 << 20); else l |= 1 << 20; return 0; } EXPORT_SYMBOL(omap_rfbi_setup_te); /* xxx FIX module selection missing */ int omap_rfbi_enable_te(bool enable, unsigned line) { u32 l; DSSDBG("te %d line %d mode %d\n", enable, line, rfbi.te_mode); if (line > (1 << 11) - 1) return -EINVAL; l = rfbi_read_reg(RFBI_CONFIG(0)); l &= ~(0x3 << 2); if (enable) { rfbi.te_enabled = 1; l |= rfbi.te_mode << 2; } else rfbi.te_enabled = 0; rfbi_write_reg(RFBI_CONFIG(0), l); rfbi_write_reg(RFBI_LINE_NUMBER, line); return 0; } EXPORT_SYMBOL(omap_rfbi_enable_te); static int rfbi_configure(int rfbi_module, int bpp, int lines) { u32 l; int cycle1 = 0, cycle2 = 0, cycle3 = 0; enum omap_rfbi_cycleformat cycleformat; enum omap_rfbi_datatype datatype; enum omap_rfbi_parallelmode parallelmode; switch (bpp) { case 12: datatype = OMAP_DSS_RFBI_DATATYPE_12; break; case 16: datatype = OMAP_DSS_RFBI_DATATYPE_16; break; case 18: datatype = OMAP_DSS_RFBI_DATATYPE_18; break; case 24: datatype = OMAP_DSS_RFBI_DATATYPE_24; break; default: BUG(); return 1; } rfbi.datatype = datatype; switch (lines) { case 8: parallelmode = OMAP_DSS_RFBI_PARALLELMODE_8; break; case 9: parallelmode = OMAP_DSS_RFBI_PARALLELMODE_9; break; case 12: parallelmode = OMAP_DSS_RFBI_PARALLELMODE_12; break; case 16: parallelmode = OMAP_DSS_RFBI_PARALLELMODE_16; break; default: BUG(); return 1; } rfbi.parallelmode = parallelmode; if ((bpp % lines) == 0) { switch (bpp / lines) { case 1: cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_1_1; break; case 2: cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_2_1; break; case 3: cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_3_1; break; default: BUG(); return 1; } } else if ((2 * bpp % lines) == 0) { if ((2 * bpp / lines) == 3) cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_3_2; else { BUG(); return 1; } } else { BUG(); return 1; } switch (cycleformat) { case OMAP_DSS_RFBI_CYCLEFORMAT_1_1: cycle1 = lines; break; case OMAP_DSS_RFBI_CYCLEFORMAT_2_1: cycle1 = lines; cycle2 = lines; break; case OMAP_DSS_RFBI_CYCLEFORMAT_3_1: cycle1 = lines; cycle2 = lines; cycle3 = lines; break; case OMAP_DSS_RFBI_CYCLEFORMAT_3_2: cycle1 = lines; cycle2 = (lines / 2) | ((lines / 2) << 16); cycle3 = (lines << 16); break; } REG_FLD_MOD(RFBI_CONTROL, 0, 3, 2); /* clear CS */ l = 0; l |= FLD_VAL(parallelmode, 1, 0); l |= FLD_VAL(0, 3, 2); /* TRIGGERMODE: ITE */ l |= FLD_VAL(0, 4, 4); /* TIMEGRANULARITY */ l |= FLD_VAL(datatype, 6, 5); /* l |= FLD_VAL(2, 8, 7); */ /* L4FORMAT, 2pix/L4 */ l |= FLD_VAL(0, 8, 7); /* L4FORMAT, 1pix/L4 */ l |= FLD_VAL(cycleformat, 10, 9); l |= FLD_VAL(0, 12, 11); /* UNUSEDBITS */ l |= FLD_VAL(0, 16, 16); /* A0POLARITY */ l |= FLD_VAL(0, 17, 17); /* REPOLARITY */ l |= FLD_VAL(0, 18, 18); /* WEPOLARITY */ l |= FLD_VAL(0, 19, 19); /* CSPOLARITY */ l |= FLD_VAL(1, 20, 20); /* TE_VSYNC_POLARITY */ l |= FLD_VAL(1, 21, 21); /* HSYNCPOLARITY */ rfbi_write_reg(RFBI_CONFIG(rfbi_module), l); rfbi_write_reg(RFBI_DATA_CYCLE1(rfbi_module), cycle1); rfbi_write_reg(RFBI_DATA_CYCLE2(rfbi_module), cycle2); rfbi_write_reg(RFBI_DATA_CYCLE3(rfbi_module), cycle3); l = rfbi_read_reg(RFBI_CONTROL); l = FLD_MOD(l, rfbi_module+1, 3, 2); /* Select CSx */ l = FLD_MOD(l, 0, 1, 1); /* clear bypass */ rfbi_write_reg(RFBI_CONTROL, l); DSSDBG("RFBI config: bpp %d, lines %d, cycles: 0x%x 0x%x 0x%x\n", bpp, lines, cycle1, cycle2, cycle3); return 0; } int omap_rfbi_configure(struct omap_dss_device *dssdev, int pixel_size, int data_lines) { return rfbi_configure(dssdev->phy.rfbi.channel, pixel_size, data_lines); } EXPORT_SYMBOL(omap_rfbi_configure); int omap_rfbi_prepare_update(struct omap_dss_device *dssdev, u16 *x, u16 *y, u16 *w, u16 *h) { u16 dw, dh; dssdev->driver->get_resolution(dssdev, &dw, &dh); if (*x > dw || *y > dh) return -EINVAL; if (*x + *w > dw) return -EINVAL; if (*y + *h > dh) return -EINVAL; if (*w == 1) return -EINVAL; if (*w == 0 || *h == 0) return -EINVAL; if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { dss_setup_partial_planes(dssdev, x, y, w, h, true); dispc_set_lcd_size(dssdev->manager->id, *w, *h); } return 0; } EXPORT_SYMBOL(omap_rfbi_prepare_update); int omap_rfbi_update(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h, void (*callback)(void *), void *data) { if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { rfbi_transfer_area(dssdev, w, h, callback, data); } else { struct omap_overlay *ovl; void __iomem *addr; int scr_width; ovl = dssdev->manager->overlays[0]; scr_width = ovl->info.screen_width; addr = ovl->info.vaddr; omap_rfbi_write_pixels(addr, scr_width, x, y, w, h); callback(data); } return 0; } EXPORT_SYMBOL(omap_rfbi_update); void rfbi_dump_regs(struct seq_file *s) { #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r)) if (rfbi_runtime_get()) return; DUMPREG(RFBI_REVISION); DUMPREG(RFBI_SYSCONFIG); DUMPREG(RFBI_SYSSTATUS); DUMPREG(RFBI_CONTROL); DUMPREG(RFBI_PIXEL_CNT); DUMPREG(RFBI_LINE_NUMBER); DUMPREG(RFBI_CMD); DUMPREG(RFBI_PARAM); DUMPREG(RFBI_DATA); DUMPREG(RFBI_READ); DUMPREG(RFBI_STATUS); DUMPREG(RFBI_CONFIG(0)); DUMPREG(RFBI_ONOFF_TIME(0)); DUMPREG(RFBI_CYCLE_TIME(0)); DUMPREG(RFBI_DATA_CYCLE1(0)); DUMPREG(RFBI_DATA_CYCLE2(0)); DUMPREG(RFBI_DATA_CYCLE3(0)); DUMPREG(RFBI_CONFIG(1)); DUMPREG(RFBI_ONOFF_TIME(1)); DUMPREG(RFBI_CYCLE_TIME(1)); DUMPREG(RFBI_DATA_CYCLE1(1)); DUMPREG(RFBI_DATA_CYCLE2(1)); DUMPREG(RFBI_DATA_CYCLE3(1)); DUMPREG(RFBI_VSYNC_WIDTH); DUMPREG(RFBI_HSYNC_WIDTH); rfbi_runtime_put(); #undef DUMPREG } int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev) { int r; r = rfbi_runtime_get(); if (r) return r; r = omap_dss_start_device(dssdev); if (r) { DSSERR("failed to start device\n"); goto err0; } r = omap_dispc_register_isr(framedone_callback, NULL, DISPC_IRQ_FRAMEDONE); if (r) { DSSERR("can't get FRAMEDONE irq\n"); goto err1; } dispc_set_lcd_display_type(dssdev->manager->id, OMAP_DSS_LCD_DISPLAY_TFT); dispc_set_parallel_interface_mode(dssdev->manager->id, OMAP_DSS_PARALLELMODE_RFBI); dispc_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size); rfbi_configure(dssdev->phy.rfbi.channel, dssdev->ctrl.pixel_size, dssdev->phy.rfbi.data_lines); rfbi_set_timings(dssdev->phy.rfbi.channel, &dssdev->ctrl.rfbi_timings); return 0; err1: omap_dss_stop_device(dssdev); err0: rfbi_runtime_put(); return r; } EXPORT_SYMBOL(omapdss_rfbi_display_enable); void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev) { omap_dispc_unregister_isr(framedone_callback, NULL, DISPC_IRQ_FRAMEDONE); omap_dss_stop_device(dssdev); rfbi_runtime_put(); } EXPORT_SYMBOL(omapdss_rfbi_display_disable); int rfbi_init_display(struct omap_dss_device *dssdev) { rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev; dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE; return 0; } /* RFBI HW IP initialisation */ static int omap_rfbihw_probe(struct platform_device *pdev) { u32 rev; struct resource *rfbi_mem; struct clk *clk; int r; rfbi.pdev = pdev; sema_init(&rfbi.bus_lock, 1); rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0); if (!rfbi_mem) { DSSERR("can't get IORESOURCE_MEM RFBI\n"); r = -EINVAL; goto err_ioremap; } rfbi.base = ioremap(rfbi_mem->start, resource_size(rfbi_mem)); if (!rfbi.base) { DSSERR("can't ioremap RFBI\n"); r = -ENOMEM; goto err_ioremap; } pm_runtime_enable(&pdev->dev); r = rfbi_runtime_get(); if (r) goto err_get_rfbi; msleep(10); if (cpu_is_omap24xx() || cpu_is_omap34xx() || cpu_is_omap3630()) clk = dss_get_ick(); else clk = clk_get(&pdev->dev, "ick"); if (IS_ERR(clk)) { DSSERR("can't get ick\n"); r = PTR_ERR(clk); goto err_get_ick; } rfbi.l4_khz = clk_get_rate(clk) / 1000; clk_put(clk); rev = rfbi_read_reg(RFBI_REVISION); dev_dbg(&pdev->dev, "OMAP RFBI rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); rfbi_runtime_put(); return 0; err_get_ick: rfbi_runtime_put(); err_get_rfbi: pm_runtime_disable(&pdev->dev); iounmap(rfbi.base); err_ioremap: return r; } static int omap_rfbihw_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); iounmap(rfbi.base); return 0; } static int rfbi_runtime_suspend(struct device *dev) { dispc_runtime_put(); dss_runtime_put(); return 0; } static int rfbi_runtime_resume(struct device *dev) { int r; r = dss_runtime_get(); if (r < 0) goto err_get_dss; r = dispc_runtime_get(); if (r < 0) goto err_get_dispc; return 0; err_get_dispc: dss_runtime_put(); err_get_dss: return r; } static const struct dev_pm_ops rfbi_pm_ops = { .runtime_suspend = rfbi_runtime_suspend, .runtime_resume = rfbi_runtime_resume, }; static struct platform_driver omap_rfbihw_driver = { .probe = omap_rfbihw_probe, .remove = omap_rfbihw_remove, .driver = { .name = "omapdss_rfbi", .owner = THIS_MODULE, .pm = &rfbi_pm_ops, }, }; int rfbi_init_platform_driver(void) { return platform_driver_register(&omap_rfbihw_driver); } void rfbi_uninit_platform_driver(void) { return platform_driver_unregister(&omap_rfbihw_driver); }
gpl-2.0
sudipm-mukherjee/stable
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
646
12592
/* * Copyright 2014 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #include <linux/printk.h> #include <linux/slab.h> #include "kfd_priv.h" #include "kfd_mqd_manager.h" #include "cik_regs.h" #include "cik_structs.h" #include "oss/oss_2_4_sh_mask.h" static inline struct cik_mqd *get_mqd(void *mqd) { return (struct cik_mqd *)mqd; } static int init_mqd(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) { uint64_t addr; struct cik_mqd *m; int retval; BUG_ON(!mm || !q || !mqd); pr_debug("kfd: In func %s\n", __func__); retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), mqd_mem_obj); if (retval != 0) return -ENOMEM; m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr; addr = (*mqd_mem_obj)->gpu_addr; memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256)); m->header = 0xC0310800; m->compute_pipelinestat_enable = 1; m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; /* * Make sure to use the last queue state saved on mqd when the cp * reassigns the queue, so when queue is switched on/off (e.g over * subscription or quantum timeout) the context will be consistent */ m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE | PRELOAD_REQ; m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN; m->cp_mqd_base_addr_lo = lower_32_bits(addr); m->cp_mqd_base_addr_hi = upper_32_bits(addr); m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE | IB_ATC_EN; /* Although WinKFD writes this, I suspect it should not be necessary */ m->cp_hqd_ib_control = IB_ATC_EN | DEFAULT_MIN_IB_AVAIL_SIZE; m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS | QUANTUM_DURATION(10); /* * Pipe Priority * Identifies the pipe relative priority when this queue is connected * to the pipeline. The pipe priority is against the GFX pipe and HP3D. * In KFD we are using a fixed pipe priority set to CS_MEDIUM. * 0 = CS_LOW (typically below GFX) * 1 = CS_MEDIUM (typically between HP3D and GFX * 2 = CS_HIGH (typically above HP3D) */ m->cp_hqd_pipe_priority = 1; m->cp_hqd_queue_priority = 15; if (q->format == KFD_QUEUE_FORMAT_AQL) m->cp_hqd_iq_rptr = AQL_ENABLE; *mqd = m; if (gart_addr != NULL) *gart_addr = addr; retval = mm->update_mqd(mm, m, q); return retval; } static int init_mqd_sdma(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) { int retval; struct cik_sdma_rlc_registers *m; BUG_ON(!mm || !mqd || !mqd_mem_obj); retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_sdma_rlc_registers), mqd_mem_obj); if (retval != 0) return -ENOMEM; m = (struct cik_sdma_rlc_registers *) (*mqd_mem_obj)->cpu_ptr; memset(m, 0, sizeof(struct cik_sdma_rlc_registers)); *mqd = m; if (gart_addr != NULL) *gart_addr = (*mqd_mem_obj)->gpu_addr; retval = mm->update_mqd(mm, m, q); return retval; } static void uninit_mqd(struct mqd_manager *mm, void *mqd, struct kfd_mem_obj *mqd_mem_obj) { BUG_ON(!mm || !mqd); kfd_gtt_sa_free(mm->dev, mqd_mem_obj); } static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd, struct kfd_mem_obj *mqd_mem_obj) { BUG_ON(!mm || !mqd); kfd_gtt_sa_free(mm->dev, mqd_mem_obj); } static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr) { return mm->dev->kfd2kgd->hqd_load (mm->dev->kgd, mqd, pipe_id, queue_id, wptr); } static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr) { return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd); } static int update_mqd(struct mqd_manager *mm, void *mqd, struct queue_properties *q) { struct cik_mqd *m; BUG_ON(!mm || !q || !mqd); pr_debug("kfd: In func %s\n", __func__); m = get_mqd(mqd); m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE | DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN; /* * Calculating queue size which is log base 2 of actual queue size -1 * dwords and another -1 for ffs */ m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1; m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); m->cp_hqd_pq_doorbell_control = DOORBELL_EN | DOORBELL_OFFSET(q->doorbell_off); m->cp_hqd_vmid = q->vmid; if (q->format == KFD_QUEUE_FORMAT_AQL) { m->cp_hqd_pq_control |= NO_UPDATE_RPTR; } m->cp_hqd_active = 0; q->is_active = false; if (q->queue_size > 0 && q->queue_address != 0 && q->queue_percent > 0) { m->cp_hqd_active = 1; q->is_active = true; } return 0; } static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, struct queue_properties *q) { struct cik_sdma_rlc_registers *m; BUG_ON(!mm || !mqd || !q); m = get_sdma_mqd(mqd); m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT; m->sdma_rlc_rb_base = lower_32_bits(q->queue_address >> 8); m->sdma_rlc_rb_base_hi = upper_32_bits(q->queue_address >> 8); m->sdma_rlc_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr); m->sdma_rlc_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr); m->sdma_rlc_doorbell = q->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT | 1 << SDMA0_RLC0_DOORBELL__ENABLE__SHIFT; m->sdma_rlc_virtual_addr = q->sdma_vm_addr; m->sdma_engine_id = q->sdma_engine_id; m->sdma_queue_id = q->sdma_queue_id; q->is_active = false; if (q->queue_size > 0 && q->queue_address != 0 && q->queue_percent > 0) { m->sdma_rlc_rb_cntl |= 1 << SDMA0_RLC0_RB_CNTL__RB_ENABLE__SHIFT; q->is_active = true; } return 0; } static int destroy_mqd(struct mqd_manager *mm, void *mqd, enum kfd_preempt_type type, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout, pipe_id, queue_id); } /* * preempt type here is ignored because there is only one way * to preempt sdma queue */ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, enum kfd_preempt_type type, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); } static bool is_occupied(struct mqd_manager *mm, void *mqd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address, pipe_id, queue_id); } static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); } /* * HIQ MQD Implementation, concrete implementation for HIQ MQD implementation. * The HIQ queue in Kaveri is using the same MQD structure as all the user mode * queues but with different initial values. */ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) { uint64_t addr; struct cik_mqd *m; int retval; BUG_ON(!mm || !q || !mqd || !mqd_mem_obj); pr_debug("kfd: In func %s\n", __func__); retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), mqd_mem_obj); if (retval != 0) return -ENOMEM; m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr; addr = (*mqd_mem_obj)->gpu_addr; memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256)); m->header = 0xC0310800; m->compute_pipelinestat_enable = 1; m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE | PRELOAD_REQ; m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS | QUANTUM_DURATION(10); m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN; m->cp_mqd_base_addr_lo = lower_32_bits(addr); m->cp_mqd_base_addr_hi = upper_32_bits(addr); m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE; /* * Pipe Priority * Identifies the pipe relative priority when this queue is connected * to the pipeline. The pipe priority is against the GFX pipe and HP3D. * In KFD we are using a fixed pipe priority set to CS_MEDIUM. * 0 = CS_LOW (typically below GFX) * 1 = CS_MEDIUM (typically between HP3D and GFX * 2 = CS_HIGH (typically above HP3D) */ m->cp_hqd_pipe_priority = 1; m->cp_hqd_queue_priority = 15; *mqd = m; if (gart_addr) *gart_addr = addr; retval = mm->update_mqd(mm, m, q); return retval; } static int update_mqd_hiq(struct mqd_manager *mm, void *mqd, struct queue_properties *q) { struct cik_mqd *m; BUG_ON(!mm || !q || !mqd); pr_debug("kfd: In func %s\n", __func__); m = get_mqd(mqd); m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE | DEFAULT_MIN_AVAIL_SIZE | PRIV_STATE | KMD_QUEUE; /* * Calculating queue size which is log base 2 of actual queue * size -1 dwords */ m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1; m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); m->cp_hqd_pq_doorbell_control = DOORBELL_EN | DOORBELL_OFFSET(q->doorbell_off); m->cp_hqd_vmid = q->vmid; m->cp_hqd_active = 0; q->is_active = false; if (q->queue_size > 0 && q->queue_address != 0 && q->queue_percent > 0) { m->cp_hqd_active = 1; q->is_active = true; } return 0; } struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) { struct cik_sdma_rlc_registers *m; BUG_ON(!mqd); m = (struct cik_sdma_rlc_registers *)mqd; return m; } struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, struct kfd_dev *dev) { struct mqd_manager *mqd; BUG_ON(!dev); BUG_ON(type >= KFD_MQD_TYPE_MAX); pr_debug("kfd: In func %s\n", __func__); mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL); if (!mqd) return NULL; mqd->dev = dev; switch (type) { case KFD_MQD_TYPE_CP: case KFD_MQD_TYPE_COMPUTE: mqd->init_mqd = init_mqd; mqd->uninit_mqd = uninit_mqd; mqd->load_mqd = load_mqd; mqd->update_mqd = update_mqd; mqd->destroy_mqd = destroy_mqd; mqd->is_occupied = is_occupied; break; case KFD_MQD_TYPE_HIQ: mqd->init_mqd = init_mqd_hiq; mqd->uninit_mqd = uninit_mqd; mqd->load_mqd = load_mqd; mqd->update_mqd = update_mqd_hiq; mqd->destroy_mqd = destroy_mqd; mqd->is_occupied = is_occupied; break; case KFD_MQD_TYPE_SDMA: mqd->init_mqd = init_mqd_sdma; mqd->uninit_mqd = uninit_mqd_sdma; mqd->load_mqd = load_mqd_sdma; mqd->update_mqd = update_mqd_sdma; mqd->destroy_mqd = destroy_mqd_sdma; mqd->is_occupied = is_occupied_sdma; break; default: kfree(mqd); return NULL; } return mqd; }
gpl-2.0
playfulgod/Kernel_AS85-LG-Ignite
drivers/media/video/gspca/benq.c
902
8220
/* * Benq DC E300 subdriver * * Copyright (C) 2009 Jean-Francois Moine (http://moinejf.free.fr) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define MODULE_NAME "benq" #include "gspca.h" MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>"); MODULE_DESCRIPTION("Benq DC E300 USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ }; /* V4L2 controls supported by the driver */ static const struct ctrl sd_ctrls[] = { }; static const struct v4l2_pix_format vga_mode[] = { {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG}, }; static void sd_isoc_irq(struct urb *urb); /* -- write a register -- */ static void reg_w(struct gspca_dev *gspca_dev, u16 value, u16 index) { struct usb_device *dev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x02, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, NULL, 0, 500); if (ret < 0) { PDEBUG(D_ERR, "reg_w err %d", ret); gspca_dev->usb_err = ret; } } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { gspca_dev->cam.cam_mode = vga_mode; gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode); gspca_dev->cam.no_urb_create = 1; gspca_dev->cam.reverse_alts = 1; return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { return 0; } static int sd_isoc_init(struct gspca_dev *gspca_dev) { int ret; ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, gspca_dev->nbalt - 1); if (ret < 0) { err("usb_set_interface failed"); return ret; } /* reg_w(gspca_dev, 0x0003, 0x0002); */ return 0; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct urb *urb; int i, n; /* create 4 URBs - 2 on endpoint 0x83 and 2 on 0x082 */ #if MAX_NURBS < 4 #error "Not enough URBs in the gspca table" #endif #define SD_PKT_SZ 64 #define SD_NPKT 32 for (n = 0; n < 4; n++) { urb = usb_alloc_urb(SD_NPKT, GFP_KERNEL); if (!urb) { err("usb_alloc_urb failed"); return -ENOMEM; } gspca_dev->urb[n] = urb; urb->transfer_buffer = usb_alloc_coherent(gspca_dev->dev, SD_PKT_SZ * SD_NPKT, GFP_KERNEL, &urb->transfer_dma); if (urb->transfer_buffer == NULL) { err("usb_alloc_coherent failed"); return -ENOMEM; } urb->dev = gspca_dev->dev; urb->context = gspca_dev; urb->transfer_buffer_length = SD_PKT_SZ * SD_NPKT; urb->pipe = usb_rcvisocpipe(gspca_dev->dev, n & 1 ? 0x82 : 0x83); urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; urb->interval = 1; urb->complete = sd_isoc_irq; urb->number_of_packets = SD_NPKT; for (i = 0; i < SD_NPKT; i++) { urb->iso_frame_desc[i].length = SD_PKT_SZ; urb->iso_frame_desc[i].offset = SD_PKT_SZ * i; } } return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { reg_w(gspca_dev, 0x003c, 0x0003); reg_w(gspca_dev, 0x003c, 0x0004); reg_w(gspca_dev, 0x003c, 0x0005); reg_w(gspca_dev, 0x003c, 0x0006); reg_w(gspca_dev, 0x003c, 0x0007); usb_set_interface(gspca_dev->dev, gspca_dev->iface, gspca_dev->nbalt - 1); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { /* unused */ } /* reception of an URB */ static void sd_isoc_irq(struct urb *urb) { struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context; struct urb *urb0; u8 *data; int i, st; PDEBUG(D_PACK, "sd isoc irq"); if (!gspca_dev->streaming) return; if (urb->status != 0) { if (urb->status == -ESHUTDOWN) return; /* disconnection */ #ifdef CONFIG_PM if (gspca_dev->frozen) return; #endif PDEBUG(D_ERR|D_PACK, "urb status: %d", urb->status); return; } /* if this is a control URN (ep 0x83), wait */ if (urb == gspca_dev->urb[0] || urb == gspca_dev->urb[2]) return; /* scan both received URBs */ if (urb == gspca_dev->urb[1]) urb0 = gspca_dev->urb[0]; else urb0 = gspca_dev->urb[2]; for (i = 0; i < urb->number_of_packets; i++) { /* check the packet status and length */ if (urb0->iso_frame_desc[i].actual_length != SD_PKT_SZ || urb->iso_frame_desc[i].actual_length != SD_PKT_SZ) { PDEBUG(D_ERR, "ISOC bad lengths %d / %d", urb0->iso_frame_desc[i].actual_length, urb->iso_frame_desc[i].actual_length); gspca_dev->last_packet_type = DISCARD_PACKET; continue; } st = urb0->iso_frame_desc[i].status; if (st == 0) st = urb->iso_frame_desc[i].status; if (st) { PDEBUG(D_ERR, "ISOC data error: [%d] status=%d", i, st); gspca_dev->last_packet_type = DISCARD_PACKET; continue; } /* * The images are received in URBs of different endpoints * (0x83 and 0x82). * Image pieces in URBs of ep 0x83 are continuated in URBs of * ep 0x82 of the same index. * The packets in the URBs of endpoint 0x83 start with: * - 80 ba/bb 00 00 = start of image followed by 'ff d8' * - 04 ba/bb oo oo = image piece * where 'oo oo' is the image offset (not cheked) * - (other -> bad frame) * The images are JPEG encoded with full header and * normal ff escape. * The end of image ('ff d9') may occur in any URB. * (not cheked) */ data = (u8 *) urb0->transfer_buffer + urb0->iso_frame_desc[i].offset; if (data[0] == 0x80 && (data[1] & 0xfe) == 0xba) { /* new image */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); gspca_frame_add(gspca_dev, FIRST_PACKET, data + 4, SD_PKT_SZ - 4); } else if (data[0] == 0x04 && (data[1] & 0xfe) == 0xba) { gspca_frame_add(gspca_dev, INTER_PACKET, data + 4, SD_PKT_SZ - 4); } else { gspca_dev->last_packet_type = DISCARD_PACKET; continue; } data = (u8 *) urb->transfer_buffer + urb->iso_frame_desc[i].offset; gspca_frame_add(gspca_dev, INTER_PACKET, data, SD_PKT_SZ); } /* resubmit the URBs */ st = usb_submit_urb(urb0, GFP_ATOMIC); if (st < 0) PDEBUG(D_ERR|D_PACK, "usb_submit_urb(0) ret %d", st); st = usb_submit_urb(urb, GFP_ATOMIC); if (st < 0) PDEBUG(D_ERR|D_PACK, "usb_submit_urb() ret %d", st); } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .ctrls = sd_ctrls, .nctrls = ARRAY_SIZE(sd_ctrls), .config = sd_config, .init = sd_init, .isoc_init = sd_isoc_init, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, }; /* -- module initialisation -- */ static const __devinitdata struct usb_device_id device_table[] = { {USB_DEVICE(0x04a5, 0x3035)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; /* -- module insert / remove -- */ static int __init sd_mod_init(void) { int ret; ret = usb_register(&sd_driver); if (ret < 0) return ret; info("registered"); return 0; } static void __exit sd_mod_exit(void) { usb_deregister(&sd_driver); info("deregistered"); } module_init(sd_mod_init); module_exit(sd_mod_exit);
gpl-2.0
YoungjaeLee/linux-4.3-cxlbdev
arch/x86/kernel/quirks.c
1158
16904
/* * This file contains work-arounds for x86 and x86_64 platform bugs. */ #include <linux/pci.h> #include <linux/irq.h> #include <asm/hpet.h> #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI) static void quirk_intel_irqbalance(struct pci_dev *dev) { u8 config; u16 word; /* BIOS may enable hardware IRQ balancing for * E7520/E7320/E7525(revision ID 0x9 and below) * based platforms. * Disable SW irqbalance/affinity on those platforms. */ if (dev->revision > 0x9) return; /* enable access to config space*/ pci_read_config_byte(dev, 0xf4, &config); pci_write_config_byte(dev, 0xf4, config|0x2); /* * read xTPR register. We may not have a pci_dev for device 8 * because it might be hidden until the above write. */ pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word); if (!(word & (1 << 13))) { dev_info(&dev->dev, "Intel E7520/7320/7525 detected; " "disabling irq balancing and affinity\n"); noirqdebug_setup(""); #ifdef CONFIG_PROC_FS no_irq_affinity = 1; #endif } /* put back the original value for config space*/ if (!(config & 0x2)) pci_write_config_byte(dev, 0xf4, config); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance); #endif #if defined(CONFIG_HPET_TIMER) unsigned long force_hpet_address; static enum { NONE_FORCE_HPET_RESUME, OLD_ICH_FORCE_HPET_RESUME, ICH_FORCE_HPET_RESUME, VT8237_FORCE_HPET_RESUME, NVIDIA_FORCE_HPET_RESUME, ATI_FORCE_HPET_RESUME, } force_hpet_resume_type; static void __iomem *rcba_base; static void ich_force_hpet_resume(void) { u32 val; if (!force_hpet_address) return; BUG_ON(rcba_base == NULL); /* read the Function Disable register, dword mode only */ val = readl(rcba_base + 0x3404); if (!(val & 0x80)) { /* HPET disabled in HPTC. Trying to enable */ writel(val | 0x80, rcba_base + 0x3404); } val = readl(rcba_base + 0x3404); if (!(val & 0x80)) BUG(); else printk(KERN_DEBUG "Force enabled HPET at resume\n"); return; } static void ich_force_enable_hpet(struct pci_dev *dev) { u32 val; u32 uninitialized_var(rcba); int err = 0; if (hpet_address || force_hpet_address) return; pci_read_config_dword(dev, 0xF0, &rcba); rcba &= 0xFFFFC000; if (rcba == 0) { dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; " "cannot force enable HPET\n"); return; } /* use bits 31:14, 16 kB aligned */ rcba_base = ioremap_nocache(rcba, 0x4000); if (rcba_base == NULL) { dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; " "cannot force enable HPET\n"); return; } /* read the Function Disable register, dword mode only */ val = readl(rcba_base + 0x3404); if (val & 0x80) { /* HPET is enabled in HPTC. Just not reported by BIOS */ val = val & 0x3; force_hpet_address = 0xFED00000 | (val << 12); dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " "0x%lx\n", force_hpet_address); iounmap(rcba_base); return; } /* HPET disabled in HPTC. Trying to enable */ writel(val | 0x80, rcba_base + 0x3404); val = readl(rcba_base + 0x3404); if (!(val & 0x80)) { err = 1; } else { val = val & 0x3; force_hpet_address = 0xFED00000 | (val << 12); } if (err) { force_hpet_address = 0; iounmap(rcba_base); dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n"); } else { force_hpet_resume_type = ICH_FORCE_HPET_RESUME; dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " "0x%lx\n", force_hpet_address); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16, /* ICH10 */ ich_force_enable_hpet); static struct pci_dev *cached_dev; static void hpet_print_force_info(void) { printk(KERN_INFO "HPET not enabled in BIOS. " "You might try hpet=force boot option\n"); } static void old_ich_force_hpet_resume(void) { u32 val; u32 uninitialized_var(gen_cntl); if (!force_hpet_address || !cached_dev) return; pci_read_config_dword(cached_dev, 0xD0, &gen_cntl); gen_cntl &= (~(0x7 << 15)); gen_cntl |= (0x4 << 15); pci_write_config_dword(cached_dev, 0xD0, gen_cntl); pci_read_config_dword(cached_dev, 0xD0, &gen_cntl); val = gen_cntl >> 15; val &= 0x7; if (val == 0x4) printk(KERN_DEBUG "Force enabled HPET at resume\n"); else BUG(); } static void old_ich_force_enable_hpet(struct pci_dev *dev) { u32 val; u32 uninitialized_var(gen_cntl); if (hpet_address || force_hpet_address) return; pci_read_config_dword(dev, 0xD0, &gen_cntl); /* * Bit 17 is HPET enable bit. * Bit 16:15 control the HPET base address. */ val = gen_cntl >> 15; val &= 0x7; if (val & 0x4) { val &= 0x3; force_hpet_address = 0xFED00000 | (val << 12); dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n", force_hpet_address); return; } /* * HPET is disabled. Trying enabling at FED00000 and check * whether it sticks */ gen_cntl &= (~(0x7 << 15)); gen_cntl |= (0x4 << 15); pci_write_config_dword(dev, 0xD0, gen_cntl); pci_read_config_dword(dev, 0xD0, &gen_cntl); val = gen_cntl >> 15; val &= 0x7; if (val & 0x4) { /* HPET is enabled in HPTC. Just not reported by BIOS */ val &= 0x3; force_hpet_address = 0xFED00000 | (val << 12); dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " "0x%lx\n", force_hpet_address); cached_dev = dev; force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME; return; } dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n"); } /* * Undocumented chipset features. Make sure that the user enforced * this. */ static void old_ich_force_enable_hpet_user(struct pci_dev *dev) { if (hpet_force_user) old_ich_force_enable_hpet(dev); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, old_ich_force_enable_hpet_user); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, old_ich_force_enable_hpet_user); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, old_ich_force_enable_hpet_user); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, old_ich_force_enable_hpet_user); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, old_ich_force_enable_hpet_user); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, old_ich_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12, old_ich_force_enable_hpet); static void vt8237_force_hpet_resume(void) { u32 val; if (!force_hpet_address || !cached_dev) return; val = 0xfed00000 | 0x80; pci_write_config_dword(cached_dev, 0x68, val); pci_read_config_dword(cached_dev, 0x68, &val); if (val & 0x80) printk(KERN_DEBUG "Force enabled HPET at resume\n"); else BUG(); } static void vt8237_force_enable_hpet(struct pci_dev *dev) { u32 uninitialized_var(val); if (hpet_address || force_hpet_address) return; if (!hpet_force_user) { hpet_print_force_info(); return; } pci_read_config_dword(dev, 0x68, &val); /* * Bit 7 is HPET enable bit. * Bit 31:10 is HPET base address (contrary to what datasheet claims) */ if (val & 0x80) { force_hpet_address = (val & ~0x3ff); dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n", force_hpet_address); return; } /* * HPET is disabled. Trying enabling at FED00000 and check * whether it sticks */ val = 0xfed00000 | 0x80; pci_write_config_dword(dev, 0x68, val); pci_read_config_dword(dev, 0x68, &val); if (val & 0x80) { force_hpet_address = (val & ~0x3ff); dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " "0x%lx\n", force_hpet_address); cached_dev = dev; force_hpet_resume_type = VT8237_FORCE_HPET_RESUME; return; } dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, vt8237_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, vt8237_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700, vt8237_force_enable_hpet); static void ati_force_hpet_resume(void) { pci_write_config_dword(cached_dev, 0x14, 0xfed00000); printk(KERN_DEBUG "Force enabled HPET at resume\n"); } static u32 ati_ixp4x0_rev(struct pci_dev *dev) { int err = 0; u32 d = 0; u8 b = 0; err = pci_read_config_byte(dev, 0xac, &b); b &= ~(1<<5); err |= pci_write_config_byte(dev, 0xac, b); err |= pci_read_config_dword(dev, 0x70, &d); d |= 1<<8; err |= pci_write_config_dword(dev, 0x70, d); err |= pci_read_config_dword(dev, 0x8, &d); d &= 0xff; dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d); WARN_ON_ONCE(err); return d; } static void ati_force_enable_hpet(struct pci_dev *dev) { u32 d, val; u8 b; if (hpet_address || force_hpet_address) return; if (!hpet_force_user) { hpet_print_force_info(); return; } d = ati_ixp4x0_rev(dev); if (d < 0x82) return; /* base address */ pci_write_config_dword(dev, 0x14, 0xfed00000); pci_read_config_dword(dev, 0x14, &val); /* enable interrupt */ outb(0x72, 0xcd6); b = inb(0xcd7); b |= 0x1; outb(0x72, 0xcd6); outb(b, 0xcd7); outb(0x72, 0xcd6); b = inb(0xcd7); if (!(b & 0x1)) return; pci_read_config_dword(dev, 0x64, &d); d |= (1<<10); pci_write_config_dword(dev, 0x64, d); pci_read_config_dword(dev, 0x64, &d); if (!(d & (1<<10))) return; force_hpet_address = val; force_hpet_resume_type = ATI_FORCE_HPET_RESUME; dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n", force_hpet_address); cached_dev = dev; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS, ati_force_enable_hpet); /* * Undocumented chipset feature taken from LinuxBIOS. */ static void nvidia_force_hpet_resume(void) { pci_write_config_dword(cached_dev, 0x44, 0xfed00001); printk(KERN_DEBUG "Force enabled HPET at resume\n"); } static void nvidia_force_enable_hpet(struct pci_dev *dev) { u32 uninitialized_var(val); if (hpet_address || force_hpet_address) return; if (!hpet_force_user) { hpet_print_force_info(); return; } pci_write_config_dword(dev, 0x44, 0xfed00001); pci_read_config_dword(dev, 0x44, &val); force_hpet_address = val & 0xfffffffe; force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME; dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n", force_hpet_address); cached_dev = dev; return; } /* ISA Bridges */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051, nvidia_force_enable_hpet); /* LPC bridges */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367, nvidia_force_enable_hpet); void force_hpet_resume(void) { switch (force_hpet_resume_type) { case ICH_FORCE_HPET_RESUME: ich_force_hpet_resume(); return; case OLD_ICH_FORCE_HPET_RESUME: old_ich_force_hpet_resume(); return; case VT8237_FORCE_HPET_RESUME: vt8237_force_hpet_resume(); return; case NVIDIA_FORCE_HPET_RESUME: nvidia_force_hpet_resume(); return; case ATI_FORCE_HPET_RESUME: ati_force_hpet_resume(); return; default: break; } } /* * According to the datasheet e6xx systems have the HPET hardwired to * 0xfed00000 */ static void e6xx_force_enable_hpet(struct pci_dev *dev) { if (hpet_address || force_hpet_address) return; force_hpet_address = 0xFED00000; force_hpet_resume_type = NONE_FORCE_HPET_RESUME; dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " "0x%lx\n", force_hpet_address); return; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU, e6xx_force_enable_hpet); /* * HPET MSI on some boards (ATI SB700/SB800) has side effect on * floppy DMA. Disable HPET MSI on such platforms. * See erratum #27 (Misinterpreted MSI Requests May Result in * Corrupted LPC DMA Data) in AMD Publication #46837, * "SB700 Family Product Errata", Rev. 1.0, March 2010. */ static void force_disable_hpet_msi(struct pci_dev *unused) { hpet_msi_disable = 1; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, force_disable_hpet_msi); #endif #if defined(CONFIG_PCI) && defined(CONFIG_NUMA) /* Set correct numa_node information for AMD NB functions */ static void quirk_amd_nb_node(struct pci_dev *dev) { struct pci_dev *nb_ht; unsigned int devfn; u32 node; u32 val; devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); nb_ht = pci_get_slot(dev->bus, devfn); if (!nb_ht) return; pci_read_config_dword(nb_ht, 0x60, &val); node = pcibus_to_node(dev->bus) | (val & 7); /* * Some hardware may return an invalid node ID, * so check it first: */ if (node_online(node)) set_dev_node(&dev->dev, node); pci_dev_put(nb_ht); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5, quirk_amd_nb_node); #endif #ifdef CONFIG_PCI /* * Processor does not ensure DRAM scrub read/write sequence * is atomic wrt accesses to CC6 save state area. Therefore * if a concurrent scrub read/write access is to same address * the entry may appear as if it is not written. This quirk * applies to Fam16h models 00h-0Fh * * See "Revision Guide" for AMD F16h models 00h-0fh, * document 51810 rev. 3.04, Nov 2013 */ static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev) { u32 val; /* * Suggested workaround: * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b */ pci_read_config_dword(dev, 0x58, &val); if (val & 0x1F) { val &= ~(0x1F); pci_write_config_dword(dev, 0x58, val); } pci_read_config_dword(dev, 0x5C, &val); if (val & BIT(0)) { val &= ~BIT(0); pci_write_config_dword(dev, 0x5c, val); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3, amd_disable_seq_and_redirect_scrub); #endif
gpl-2.0
drakaz/gaosp_kernel
arch/sh/boards/mach-se/7751/setup.c
1670
1761
/* * linux/arch/sh/boards/se/7751/setup.c * * Copyright (C) 2000 Kazumoto Kojima * * Hitachi SolutionEngine Support. * * Modified for 7751 Solution Engine by * Ian da Silva and Jeremy Siegel, 2001. */ #include <linux/init.h> #include <linux/platform_device.h> #include <asm/machvec.h> #include <mach-se/mach/se7751.h> #include <asm/io.h> #include <asm/heartbeat.h> static unsigned char heartbeat_bit_pos[] = { 8, 9, 10, 11, 12, 13, 14, 15 }; static struct heartbeat_data heartbeat_data = { .bit_pos = heartbeat_bit_pos, .nr_bits = ARRAY_SIZE(heartbeat_bit_pos), }; static struct resource heartbeat_resources[] = { [0] = { .start = PA_LED, .end = PA_LED, .flags = IORESOURCE_MEM, }, }; static struct platform_device heartbeat_device = { .name = "heartbeat", .id = -1, .dev = { .platform_data = &heartbeat_data, }, .num_resources = ARRAY_SIZE(heartbeat_resources), .resource = heartbeat_resources, }; static struct platform_device *se7751_devices[] __initdata = { &heartbeat_device, }; static int __init se7751_devices_setup(void) { return platform_add_devices(se7751_devices, ARRAY_SIZE(se7751_devices)); } __initcall(se7751_devices_setup); /* * The Machine Vector */ static struct sh_machine_vector mv_7751se __initmv = { .mv_name = "7751 SolutionEngine", .mv_nr_irqs = 72, .mv_inb = sh7751se_inb, .mv_inw = sh7751se_inw, .mv_inl = sh7751se_inl, .mv_outb = sh7751se_outb, .mv_outw = sh7751se_outw, .mv_outl = sh7751se_outl, .mv_inb_p = sh7751se_inb_p, .mv_inw_p = sh7751se_inw, .mv_inl_p = sh7751se_inl, .mv_outb_p = sh7751se_outb_p, .mv_outw_p = sh7751se_outw, .mv_outl_p = sh7751se_outl, .mv_insl = sh7751se_insl, .mv_outsl = sh7751se_outsl, .mv_init_irq = init_7751se_IRQ, };
gpl-2.0
DirtyJerz/omap
drivers/spi/ti-ssp-spi.c
2950
9566
/* * Sequencer Serial Port (SSP) based SPI master driver * * Copyright (C) 2010 Texas Instruments Inc * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/mfd/ti_ssp.h> #define MODE_BITS (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH) struct ti_ssp_spi { struct spi_master *master; struct device *dev; spinlock_t lock; struct list_head msg_queue; struct completion complete; bool shutdown; struct workqueue_struct *workqueue; struct work_struct work; u8 mode, bpw; int cs_active; u32 pc_en, pc_dis, pc_wr, pc_rd; void (*select)(int cs); }; static u32 ti_ssp_spi_rx(struct ti_ssp_spi *hw) { u32 ret; ti_ssp_run(hw->dev, hw->pc_rd, 0, &ret); return ret; } static void ti_ssp_spi_tx(struct ti_ssp_spi *hw, u32 data) { ti_ssp_run(hw->dev, hw->pc_wr, data << (32 - hw->bpw), NULL); } static int ti_ssp_spi_txrx(struct ti_ssp_spi *hw, struct spi_message *msg, struct spi_transfer *t) { int count; if (hw->bpw <= 8) { u8 *rx = t->rx_buf; const u8 *tx = t->tx_buf; for (count = 0; count < t->len; count += 1) { if (t->tx_buf) ti_ssp_spi_tx(hw, *tx++); if (t->rx_buf) *rx++ = ti_ssp_spi_rx(hw); } } else if (hw->bpw <= 16) { u16 *rx = t->rx_buf; const u16 *tx = t->tx_buf; for (count = 0; count < t->len; count += 2) { if (t->tx_buf) ti_ssp_spi_tx(hw, *tx++); if (t->rx_buf) *rx++ = ti_ssp_spi_rx(hw); } } else { u32 *rx = t->rx_buf; const u32 *tx = t->tx_buf; for (count = 0; count < t->len; count += 4) { if (t->tx_buf) ti_ssp_spi_tx(hw, *tx++); if (t->rx_buf) *rx++ = ti_ssp_spi_rx(hw); } } msg->actual_length += count; /* bytes transferred */ dev_dbg(&msg->spi->dev, "xfer %s%s, %d bytes, %d bpw, count %d%s\n", t->tx_buf ? "tx" : "", t->rx_buf ? "rx" : "", t->len, hw->bpw, count, (count < t->len) ? " (under)" : ""); return (count < t->len) ? -EIO : 0; /* left over data */ } static void ti_ssp_spi_chip_select(struct ti_ssp_spi *hw, int cs_active) { cs_active = !!cs_active; if (cs_active == hw->cs_active) return; ti_ssp_run(hw->dev, cs_active ? hw->pc_en : hw->pc_dis, 0, NULL); hw->cs_active = cs_active; } #define __SHIFT_OUT(bits) (SSP_OPCODE_SHIFT | SSP_OUT_MODE | \ cs_en | clk | SSP_COUNT((bits) * 2 - 1)) #define __SHIFT_IN(bits) (SSP_OPCODE_SHIFT | SSP_IN_MODE | \ cs_en | clk | SSP_COUNT((bits) * 2 - 1)) static int ti_ssp_spi_setup_transfer(struct ti_ssp_spi *hw, u8 bpw, u8 mode) { int error, idx = 0; u32 seqram[16]; u32 cs_en, cs_dis, clk; u32 topbits, botbits; mode &= MODE_BITS; if (mode == hw->mode && bpw == hw->bpw) return 0; cs_en = (mode & SPI_CS_HIGH) ? SSP_CS_HIGH : SSP_CS_LOW; cs_dis = (mode & SPI_CS_HIGH) ? SSP_CS_LOW : SSP_CS_HIGH; clk = (mode & SPI_CPOL) ? SSP_CLK_HIGH : SSP_CLK_LOW; /* Construct instructions */ /* Disable Chip Select */ hw->pc_dis = idx; seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_dis | clk; seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_dis | clk; /* Enable Chip Select */ hw->pc_en = idx; seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_en | clk; seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk; /* Reads and writes need to be split for bpw > 16 */ topbits = (bpw > 16) ? 16 : bpw; botbits = bpw - topbits; /* Write */ hw->pc_wr = idx; seqram[idx++] = __SHIFT_OUT(topbits) | SSP_ADDR_REG; if (botbits) seqram[idx++] = __SHIFT_OUT(botbits) | SSP_DATA_REG; seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk; /* Read */ hw->pc_rd = idx; if (botbits) seqram[idx++] = __SHIFT_IN(botbits) | SSP_ADDR_REG; seqram[idx++] = __SHIFT_IN(topbits) | SSP_DATA_REG; seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk; error = ti_ssp_load(hw->dev, 0, seqram, idx); if (error < 0) return error; error = ti_ssp_set_mode(hw->dev, ((mode & SPI_CPHA) ? 0 : SSP_EARLY_DIN)); if (error < 0) return error; hw->bpw = bpw; hw->mode = mode; return error; } static void ti_ssp_spi_work(struct work_struct *work) { struct ti_ssp_spi *hw = container_of(work, struct ti_ssp_spi, work); spin_lock(&hw->lock); while (!list_empty(&hw->msg_queue)) { struct spi_message *m; struct spi_device *spi; struct spi_transfer *t = NULL; int status = 0; m = container_of(hw->msg_queue.next, struct spi_message, queue); list_del_init(&m->queue); spin_unlock(&hw->lock); spi = m->spi; if (hw->select) hw->select(spi->chip_select); list_for_each_entry(t, &m->transfers, transfer_list) { int bpw = spi->bits_per_word; int xfer_status; if (t->bits_per_word) bpw = t->bits_per_word; if (ti_ssp_spi_setup_transfer(hw, bpw, spi->mode) < 0) break; ti_ssp_spi_chip_select(hw, 1); xfer_status = ti_ssp_spi_txrx(hw, m, t); if (xfer_status < 0) status = xfer_status; if (t->delay_usecs) udelay(t->delay_usecs); if (t->cs_change) ti_ssp_spi_chip_select(hw, 0); } ti_ssp_spi_chip_select(hw, 0); m->status = status; m->complete(m->context); spin_lock(&hw->lock); } if (hw->shutdown) complete(&hw->complete); spin_unlock(&hw->lock); } static int ti_ssp_spi_setup(struct spi_device *spi) { if (spi->bits_per_word > 32) return -EINVAL; return 0; } static int ti_ssp_spi_transfer(struct spi_device *spi, struct spi_message *m) { struct ti_ssp_spi *hw; struct spi_transfer *t; int error = 0; m->actual_length = 0; m->status = -EINPROGRESS; hw = spi_master_get_devdata(spi->master); if (list_empty(&m->transfers) || !m->complete) return -EINVAL; list_for_each_entry(t, &m->transfers, transfer_list) { if (t->len && !(t->rx_buf || t->tx_buf)) { dev_err(&spi->dev, "invalid xfer, no buffer\n"); return -EINVAL; } if (t->len && t->rx_buf && t->tx_buf) { dev_err(&spi->dev, "invalid xfer, full duplex\n"); return -EINVAL; } if (t->bits_per_word > 32) { dev_err(&spi->dev, "invalid xfer width %d\n", t->bits_per_word); return -EINVAL; } } spin_lock(&hw->lock); if (hw->shutdown) { error = -ESHUTDOWN; goto error_unlock; } list_add_tail(&m->queue, &hw->msg_queue); queue_work(hw->workqueue, &hw->work); error_unlock: spin_unlock(&hw->lock); return error; } static int __devinit ti_ssp_spi_probe(struct platform_device *pdev) { const struct ti_ssp_spi_data *pdata; struct ti_ssp_spi *hw; struct spi_master *master; struct device *dev = &pdev->dev; int error = 0; pdata = dev->platform_data; if (!pdata) { dev_err(dev, "platform data not found\n"); return -EINVAL; } master = spi_alloc_master(dev, sizeof(struct ti_ssp_spi)); if (!master) { dev_err(dev, "cannot allocate SPI master\n"); return -ENOMEM; } hw = spi_master_get_devdata(master); platform_set_drvdata(pdev, hw); hw->master = master; hw->dev = dev; hw->select = pdata->select; spin_lock_init(&hw->lock); init_completion(&hw->complete); INIT_LIST_HEAD(&hw->msg_queue); INIT_WORK(&hw->work, ti_ssp_spi_work); hw->workqueue = create_singlethread_workqueue(dev_name(dev)); if (!hw->workqueue) { error = -ENOMEM; dev_err(dev, "work queue creation failed\n"); goto error_wq; } error = ti_ssp_set_iosel(hw->dev, pdata->iosel); if (error < 0) { dev_err(dev, "io setup failed\n"); goto error_iosel; } master->bus_num = pdev->id; master->num_chipselect = pdata->num_cs; master->mode_bits = MODE_BITS; master->flags = SPI_MASTER_HALF_DUPLEX; master->setup = ti_ssp_spi_setup; master->transfer = ti_ssp_spi_transfer; error = spi_register_master(master); if (error) { dev_err(dev, "master registration failed\n"); goto error_reg; } return 0; error_reg: error_iosel: destroy_workqueue(hw->workqueue); error_wq: spi_master_put(master); return error; } static int __devexit ti_ssp_spi_remove(struct platform_device *pdev) { struct ti_ssp_spi *hw = platform_get_drvdata(pdev); int error; hw->shutdown = 1; while (!list_empty(&hw->msg_queue)) { error = wait_for_completion_interruptible(&hw->complete); if (error < 0) { hw->shutdown = 0; return error; } } destroy_workqueue(hw->workqueue); spi_unregister_master(hw->master); return 0; } static struct platform_driver ti_ssp_spi_driver = { .probe = ti_ssp_spi_probe, .remove = __devexit_p(ti_ssp_spi_remove), .driver = { .name = "ti-ssp-spi", .owner = THIS_MODULE, }, }; static int __init ti_ssp_spi_init(void) { return platform_driver_register(&ti_ssp_spi_driver); } module_init(ti_ssp_spi_init); static void __exit ti_ssp_spi_exit(void) { platform_driver_unregister(&ti_ssp_spi_driver); } module_exit(ti_ssp_spi_exit); MODULE_DESCRIPTION("SSP SPI Master"); MODULE_AUTHOR("Cyril Chemparathy"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ti-ssp-spi");
gpl-2.0
iBluemind/android_kernel_lge_su760
net/netfilter/ipvs/ip_vs_proto_tcp.c
2950
19645
/* * ip_vs_proto_tcp.c: TCP load balancing support for IPVS * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * Julian Anastasov <ja@ssi.bg> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Changes: Hans Schillstrom <hans.schillstrom@ericsson.com> * * Network name space (netns) aware. * Global data moved to netns i.e struct netns_ipvs * tcp_timeouts table has copy per netns in a hash table per * protocol ip_vs_proto_data and is handled by netns */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/kernel.h> #include <linux/ip.h> #include <linux/tcp.h> /* for tcphdr */ #include <net/ip.h> #include <net/tcp.h> /* for csum_tcpudp_magic */ #include <net/ip6_checksum.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <net/ip_vs.h> static int tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, int *verdict, struct ip_vs_conn **cpp) { struct net *net; struct ip_vs_service *svc; struct tcphdr _tcph, *th; struct ip_vs_iphdr iph; ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); th = skb_header_pointer(skb, iph.len, sizeof(_tcph), &_tcph); if (th == NULL) { *verdict = NF_DROP; return 0; } net = skb_net(skb); /* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */ if (th->syn && (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol, &iph.daddr, th->dest))) { int ignored; if (ip_vs_todrop(net_ipvs(net))) { /* * It seems that we are very loaded. * We have to drop this packet :( */ ip_vs_service_put(svc); *verdict = NF_DROP; return 0; } /* * Let the virtual server select a real server for the * incoming connection, and create a connection entry. */ *cpp = ip_vs_schedule(svc, skb, pd, &ignored); if (!*cpp && ignored <= 0) { if (!ignored) *verdict = ip_vs_leave(svc, skb, pd); else { ip_vs_service_put(svc); *verdict = NF_DROP; } return 0; } ip_vs_service_put(svc); } /* NF_ACCEPT */ return 1; } static inline void tcp_fast_csum_update(int af, struct tcphdr *tcph, const union nf_inet_addr *oldip, const union nf_inet_addr *newip, __be16 oldport, __be16 newport) { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) tcph->check = csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6, ip_vs_check_diff2(oldport, newport, ~csum_unfold(tcph->check)))); else #endif tcph->check = csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip, ip_vs_check_diff2(oldport, newport, ~csum_unfold(tcph->check)))); } static inline void tcp_partial_csum_update(int af, struct tcphdr *tcph, const union nf_inet_addr *oldip, const union nf_inet_addr *newip, __be16 oldlen, __be16 newlen) { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) tcph->check = ~csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6, ip_vs_check_diff2(oldlen, newlen, csum_unfold(tcph->check)))); else #endif tcph->check = ~csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip, ip_vs_check_diff2(oldlen, newlen, csum_unfold(tcph->check)))); } static int tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp) { struct tcphdr *tcph; unsigned int tcphoff; int oldlen; int payload_csum = 0; #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) tcphoff = sizeof(struct ipv6hdr); else #endif tcphoff = ip_hdrlen(skb); oldlen = skb->len - tcphoff; /* csum_check requires unshared skb */ if (!skb_make_writable(skb, tcphoff+sizeof(*tcph))) return 0; if (unlikely(cp->app != NULL)) { int ret; /* Some checks before mangling */ if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) return 0; /* Call application helper if needed */ if (!(ret = ip_vs_app_pkt_out(cp, skb))) return 0; /* ret=2: csum update is needed after payload mangling */ if (ret == 1) oldlen = skb->len - tcphoff; else payload_csum = 1; } tcph = (void *)skb_network_header(skb) + tcphoff; tcph->source = cp->vport; /* Adjust TCP checksums */ if (skb->ip_summed == CHECKSUM_PARTIAL) { tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr, htons(oldlen), htons(skb->len - tcphoff)); } else if (!payload_csum) { /* Only port and addr are changed, do fast csum update */ tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr, cp->dport, cp->vport); if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = (cp->app && pp->csum_check) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; } else { /* full checksum calculation */ tcph->check = 0; skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) tcph->check = csum_ipv6_magic(&cp->vaddr.in6, &cp->caddr.in6, skb->len - tcphoff, cp->protocol, skb->csum); else #endif tcph->check = csum_tcpudp_magic(cp->vaddr.ip, cp->caddr.ip, skb->len - tcphoff, cp->protocol, skb->csum); skb->ip_summed = CHECKSUM_UNNECESSARY; IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", pp->name, tcph->check, (char*)&(tcph->check) - (char*)tcph); } return 1; } static int tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp) { struct tcphdr *tcph; unsigned int tcphoff; int oldlen; int payload_csum = 0; #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) tcphoff = sizeof(struct ipv6hdr); else #endif tcphoff = ip_hdrlen(skb); oldlen = skb->len - tcphoff; /* csum_check requires unshared skb */ if (!skb_make_writable(skb, tcphoff+sizeof(*tcph))) return 0; if (unlikely(cp->app != NULL)) { int ret; /* Some checks before mangling */ if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) return 0; /* * Attempt ip_vs_app call. * It will fix ip_vs_conn and iph ack_seq stuff */ if (!(ret = ip_vs_app_pkt_in(cp, skb))) return 0; /* ret=2: csum update is needed after payload mangling */ if (ret == 1) oldlen = skb->len - tcphoff; else payload_csum = 1; } tcph = (void *)skb_network_header(skb) + tcphoff; tcph->dest = cp->dport; /* * Adjust TCP checksums */ if (skb->ip_summed == CHECKSUM_PARTIAL) { tcp_partial_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr, htons(oldlen), htons(skb->len - tcphoff)); } else if (!payload_csum) { /* Only port and addr are changed, do fast csum update */ tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr, cp->vport, cp->dport); if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = (cp->app && pp->csum_check) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; } else { /* full checksum calculation */ tcph->check = 0; skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) tcph->check = csum_ipv6_magic(&cp->caddr.in6, &cp->daddr.in6, skb->len - tcphoff, cp->protocol, skb->csum); else #endif tcph->check = csum_tcpudp_magic(cp->caddr.ip, cp->daddr.ip, skb->len - tcphoff, cp->protocol, skb->csum); skb->ip_summed = CHECKSUM_UNNECESSARY; } return 1; } static int tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) { unsigned int tcphoff; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) tcphoff = sizeof(struct ipv6hdr); else #endif tcphoff = ip_hdrlen(skb); switch (skb->ip_summed) { case CHECKSUM_NONE: skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); case CHECKSUM_COMPLETE: #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len - tcphoff, ipv6_hdr(skb)->nexthdr, skb->csum)) { IP_VS_DBG_RL_PKT(0, af, pp, skb, 0, "Failed checksum for"); return 0; } } else #endif if (csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, skb->len - tcphoff, ip_hdr(skb)->protocol, skb->csum)) { IP_VS_DBG_RL_PKT(0, af, pp, skb, 0, "Failed checksum for"); return 0; } break; default: /* No need to checksum. */ break; } return 1; } #define TCP_DIR_INPUT 0 #define TCP_DIR_OUTPUT 4 #define TCP_DIR_INPUT_ONLY 8 static const int tcp_state_off[IP_VS_DIR_LAST] = { [IP_VS_DIR_INPUT] = TCP_DIR_INPUT, [IP_VS_DIR_OUTPUT] = TCP_DIR_OUTPUT, [IP_VS_DIR_INPUT_ONLY] = TCP_DIR_INPUT_ONLY, }; /* * Timeout table[state] */ static const int tcp_timeouts[IP_VS_TCP_S_LAST+1] = { [IP_VS_TCP_S_NONE] = 2*HZ, [IP_VS_TCP_S_ESTABLISHED] = 15*60*HZ, [IP_VS_TCP_S_SYN_SENT] = 2*60*HZ, [IP_VS_TCP_S_SYN_RECV] = 1*60*HZ, [IP_VS_TCP_S_FIN_WAIT] = 2*60*HZ, [IP_VS_TCP_S_TIME_WAIT] = 2*60*HZ, [IP_VS_TCP_S_CLOSE] = 10*HZ, [IP_VS_TCP_S_CLOSE_WAIT] = 60*HZ, [IP_VS_TCP_S_LAST_ACK] = 30*HZ, [IP_VS_TCP_S_LISTEN] = 2*60*HZ, [IP_VS_TCP_S_SYNACK] = 120*HZ, [IP_VS_TCP_S_LAST] = 2*HZ, }; static const char *const tcp_state_name_table[IP_VS_TCP_S_LAST+1] = { [IP_VS_TCP_S_NONE] = "NONE", [IP_VS_TCP_S_ESTABLISHED] = "ESTABLISHED", [IP_VS_TCP_S_SYN_SENT] = "SYN_SENT", [IP_VS_TCP_S_SYN_RECV] = "SYN_RECV", [IP_VS_TCP_S_FIN_WAIT] = "FIN_WAIT", [IP_VS_TCP_S_TIME_WAIT] = "TIME_WAIT", [IP_VS_TCP_S_CLOSE] = "CLOSE", [IP_VS_TCP_S_CLOSE_WAIT] = "CLOSE_WAIT", [IP_VS_TCP_S_LAST_ACK] = "LAST_ACK", [IP_VS_TCP_S_LISTEN] = "LISTEN", [IP_VS_TCP_S_SYNACK] = "SYNACK", [IP_VS_TCP_S_LAST] = "BUG!", }; #define sNO IP_VS_TCP_S_NONE #define sES IP_VS_TCP_S_ESTABLISHED #define sSS IP_VS_TCP_S_SYN_SENT #define sSR IP_VS_TCP_S_SYN_RECV #define sFW IP_VS_TCP_S_FIN_WAIT #define sTW IP_VS_TCP_S_TIME_WAIT #define sCL IP_VS_TCP_S_CLOSE #define sCW IP_VS_TCP_S_CLOSE_WAIT #define sLA IP_VS_TCP_S_LAST_ACK #define sLI IP_VS_TCP_S_LISTEN #define sSA IP_VS_TCP_S_SYNACK struct tcp_states_t { int next_state[IP_VS_TCP_S_LAST]; }; static const char * tcp_state_name(int state) { if (state >= IP_VS_TCP_S_LAST) return "ERR!"; return tcp_state_name_table[state] ? tcp_state_name_table[state] : "?"; } static struct tcp_states_t tcp_states [] = { /* INPUT */ /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ /*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }}, /*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sTW }}, /*ack*/ {{sCL, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }}, /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sSR }}, /* OUTPUT */ /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ /*syn*/ {{sSS, sES, sSS, sSR, sSS, sSS, sSS, sSS, sSS, sLI, sSR }}, /*fin*/ {{sTW, sFW, sSS, sTW, sFW, sTW, sCL, sTW, sLA, sLI, sTW }}, /*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sLA, sES, sES }}, /*rst*/ {{sCL, sCL, sSS, sCL, sCL, sTW, sCL, sCL, sCL, sCL, sCL }}, /* INPUT-ONLY */ /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ /*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }}, /*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW }}, /*ack*/ {{sCL, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }}, /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }}, }; static struct tcp_states_t tcp_states_dos [] = { /* INPUT */ /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ /*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSA }}, /*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sSA }}, /*ack*/ {{sCL, sES, sSS, sSR, sFW, sTW, sCL, sCW, sCL, sLI, sSA }}, /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }}, /* OUTPUT */ /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ /*syn*/ {{sSS, sES, sSS, sSA, sSS, sSS, sSS, sSS, sSS, sLI, sSA }}, /*fin*/ {{sTW, sFW, sSS, sTW, sFW, sTW, sCL, sTW, sLA, sLI, sTW }}, /*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sLA, sES, sES }}, /*rst*/ {{sCL, sCL, sSS, sCL, sCL, sTW, sCL, sCL, sCL, sCL, sCL }}, /* INPUT-ONLY */ /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ /*syn*/ {{sSA, sES, sES, sSR, sSA, sSA, sSA, sSA, sSA, sSA, sSA }}, /*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW }}, /*ack*/ {{sCL, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }}, /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }}, }; static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags) { int on = (flags & 1); /* secure_tcp */ /* ** FIXME: change secure_tcp to independent sysctl var ** or make it per-service or per-app because it is valid ** for most if not for all of the applications. Something ** like "capabilities" (flags) for each object. */ pd->tcp_state_table = (on ? tcp_states_dos : tcp_states); } static inline int tcp_state_idx(struct tcphdr *th) { if (th->rst) return 3; if (th->syn) return 0; if (th->fin) return 1; if (th->ack) return 2; return -1; } static inline void set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, int direction, struct tcphdr *th) { int state_idx; int new_state = IP_VS_TCP_S_CLOSE; int state_off = tcp_state_off[direction]; /* * Update state offset to INPUT_ONLY if necessary * or delete NO_OUTPUT flag if output packet detected */ if (cp->flags & IP_VS_CONN_F_NOOUTPUT) { if (state_off == TCP_DIR_OUTPUT) cp->flags &= ~IP_VS_CONN_F_NOOUTPUT; else state_off = TCP_DIR_INPUT_ONLY; } if ((state_idx = tcp_state_idx(th)) < 0) { IP_VS_DBG(8, "tcp_state_idx=%d!!!\n", state_idx); goto tcp_state_out; } new_state = pd->tcp_state_table[state_off+state_idx].next_state[cp->state]; tcp_state_out: if (new_state != cp->state) { struct ip_vs_dest *dest = cp->dest; IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] %s:%d->" "%s:%d state: %s->%s conn->refcnt:%d\n", pd->pp->name, ((state_off == TCP_DIR_OUTPUT) ? "output " : "input "), th->syn ? 'S' : '.', th->fin ? 'F' : '.', th->ack ? 'A' : '.', th->rst ? 'R' : '.', IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport), IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), tcp_state_name(cp->state), tcp_state_name(new_state), atomic_read(&cp->refcnt)); if (dest) { if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && (new_state != IP_VS_TCP_S_ESTABLISHED)) { atomic_dec(&dest->activeconns); atomic_inc(&dest->inactconns); cp->flags |= IP_VS_CONN_F_INACTIVE; } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) && (new_state == IP_VS_TCP_S_ESTABLISHED)) { atomic_inc(&dest->activeconns); atomic_dec(&dest->inactconns); cp->flags &= ~IP_VS_CONN_F_INACTIVE; } } } if (likely(pd)) cp->timeout = pd->timeout_table[cp->state = new_state]; else /* What to do ? */ cp->timeout = tcp_timeouts[cp->state = new_state]; } /* * Handle state transitions */ static int tcp_state_transition(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, struct ip_vs_proto_data *pd) { struct tcphdr _tcph, *th; #ifdef CONFIG_IP_VS_IPV6 int ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); #else int ihl = ip_hdrlen(skb); #endif th = skb_header_pointer(skb, ihl, sizeof(_tcph), &_tcph); if (th == NULL) return 0; spin_lock(&cp->lock); set_tcp_state(pd, cp, direction, th); spin_unlock(&cp->lock); return 1; } static inline __u16 tcp_app_hashkey(__be16 port) { return (((__force u16)port >> TCP_APP_TAB_BITS) ^ (__force u16)port) & TCP_APP_TAB_MASK; } static int tcp_register_app(struct net *net, struct ip_vs_app *inc) { struct ip_vs_app *i; __u16 hash; __be16 port = inc->port; int ret = 0; struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP); hash = tcp_app_hashkey(port); spin_lock_bh(&ipvs->tcp_app_lock); list_for_each_entry(i, &ipvs->tcp_apps[hash], p_list) { if (i->port == port) { ret = -EEXIST; goto out; } } list_add(&inc->p_list, &ipvs->tcp_apps[hash]); atomic_inc(&pd->appcnt); out: spin_unlock_bh(&ipvs->tcp_app_lock); return ret; } static void tcp_unregister_app(struct net *net, struct ip_vs_app *inc) { struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP); spin_lock_bh(&ipvs->tcp_app_lock); atomic_dec(&pd->appcnt); list_del(&inc->p_list); spin_unlock_bh(&ipvs->tcp_app_lock); } static int tcp_app_conn_bind(struct ip_vs_conn *cp) { struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp)); int hash; struct ip_vs_app *inc; int result = 0; /* Default binding: bind app only for NAT */ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) return 0; /* Lookup application incarnations and bind the right one */ hash = tcp_app_hashkey(cp->vport); spin_lock(&ipvs->tcp_app_lock); list_for_each_entry(inc, &ipvs->tcp_apps[hash], p_list) { if (inc->port == cp->vport) { if (unlikely(!ip_vs_app_inc_get(inc))) break; spin_unlock(&ipvs->tcp_app_lock); IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->" "%s:%u to app %s on port %u\n", __func__, IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), inc->name, ntohs(inc->port)); cp->app = inc; if (inc->init_conn) result = inc->init_conn(inc, cp); goto out; } } spin_unlock(&ipvs->tcp_app_lock); out: return result; } /* * Set LISTEN timeout. (ip_vs_conn_put will setup timer) */ void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp) { struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP); spin_lock(&cp->lock); cp->state = IP_VS_TCP_S_LISTEN; cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN] : tcp_timeouts[IP_VS_TCP_S_LISTEN]); spin_unlock(&cp->lock); } /* --------------------------------------------- * timeouts is netns related now. * --------------------------------------------- */ static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd) { struct netns_ipvs *ipvs = net_ipvs(net); ip_vs_init_hash_table(ipvs->tcp_apps, TCP_APP_TAB_SIZE); spin_lock_init(&ipvs->tcp_app_lock); pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts, sizeof(tcp_timeouts)); pd->tcp_state_table = tcp_states; } static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd) { kfree(pd->timeout_table); } struct ip_vs_protocol ip_vs_protocol_tcp = { .name = "TCP", .protocol = IPPROTO_TCP, .num_states = IP_VS_TCP_S_LAST, .dont_defrag = 0, .init = NULL, .exit = NULL, .init_netns = __ip_vs_tcp_init, .exit_netns = __ip_vs_tcp_exit, .register_app = tcp_register_app, .unregister_app = tcp_unregister_app, .conn_schedule = tcp_conn_schedule, .conn_in_get = ip_vs_conn_in_get_proto, .conn_out_get = ip_vs_conn_out_get_proto, .snat_handler = tcp_snat_handler, .dnat_handler = tcp_dnat_handler, .csum_check = tcp_csum_check, .state_name = tcp_state_name, .state_transition = tcp_state_transition, .app_conn_bind = tcp_app_conn_bind, .debug_packet = ip_vs_tcpudp_debug_packet, .timeout_change = tcp_timeout_change, };
gpl-2.0
MikeC84/android_kernel_motorola_shamu
arch/arm/mach-pxa/tosa-bt.c
3206
2912
/* * Bluetooth built-in chip control * * Copyright (c) 2008 Dmitry Baryshkov * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/rfkill.h> #include <mach/tosa_bt.h> static void tosa_bt_on(struct tosa_bt_data *data) { gpio_set_value(data->gpio_reset, 0); gpio_set_value(data->gpio_pwr, 1); gpio_set_value(data->gpio_reset, 1); mdelay(20); gpio_set_value(data->gpio_reset, 0); } static void tosa_bt_off(struct tosa_bt_data *data) { gpio_set_value(data->gpio_reset, 1); mdelay(10); gpio_set_value(data->gpio_pwr, 0); gpio_set_value(data->gpio_reset, 0); } static int tosa_bt_set_block(void *data, bool blocked) { pr_info("BT_RADIO going: %s\n", blocked ? "off" : "on"); if (!blocked) { pr_info("TOSA_BT: going ON\n"); tosa_bt_on(data); } else { pr_info("TOSA_BT: going OFF\n"); tosa_bt_off(data); } return 0; } static const struct rfkill_ops tosa_bt_rfkill_ops = { .set_block = tosa_bt_set_block, }; static int tosa_bt_probe(struct platform_device *dev) { int rc; struct rfkill *rfk; struct tosa_bt_data *data = dev->dev.platform_data; rc = gpio_request(data->gpio_reset, "Bluetooth reset"); if (rc) goto err_reset; rc = gpio_direction_output(data->gpio_reset, 0); if (rc) goto err_reset_dir; rc = gpio_request(data->gpio_pwr, "Bluetooth power"); if (rc) goto err_pwr; rc = gpio_direction_output(data->gpio_pwr, 0); if (rc) goto err_pwr_dir; rfk = rfkill_alloc("tosa-bt", &dev->dev, RFKILL_TYPE_BLUETOOTH, &tosa_bt_rfkill_ops, data); if (!rfk) { rc = -ENOMEM; goto err_rfk_alloc; } rc = rfkill_register(rfk); if (rc) goto err_rfkill; platform_set_drvdata(dev, rfk); return 0; err_rfkill: rfkill_destroy(rfk); err_rfk_alloc: tosa_bt_off(data); err_pwr_dir: gpio_free(data->gpio_pwr); err_pwr: err_reset_dir: gpio_free(data->gpio_reset); err_reset: return rc; } static int tosa_bt_remove(struct platform_device *dev) { struct tosa_bt_data *data = dev->dev.platform_data; struct rfkill *rfk = platform_get_drvdata(dev); platform_set_drvdata(dev, NULL); if (rfk) { rfkill_unregister(rfk); rfkill_destroy(rfk); } rfk = NULL; tosa_bt_off(data); gpio_free(data->gpio_pwr); gpio_free(data->gpio_reset); return 0; } static struct platform_driver tosa_bt_driver = { .probe = tosa_bt_probe, .remove = tosa_bt_remove, .driver = { .name = "tosa-bt", .owner = THIS_MODULE, }, }; static int __init tosa_bt_init(void) { return platform_driver_register(&tosa_bt_driver); } static void __exit tosa_bt_exit(void) { platform_driver_unregister(&tosa_bt_driver); } module_init(tosa_bt_init); module_exit(tosa_bt_exit);
gpl-2.0
BlownFuze/Koding
drivers/gpu/drm/ttm/ttm_lock.c
3206
7395
/************************************************************************** * * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ /* * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ #include "ttm/ttm_lock.h" #include "ttm/ttm_module.h" #include <asm/atomic.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/module.h> #define TTM_WRITE_LOCK_PENDING (1 << 0) #define TTM_VT_LOCK_PENDING (1 << 1) #define TTM_SUSPEND_LOCK_PENDING (1 << 2) #define TTM_VT_LOCK (1 << 3) #define TTM_SUSPEND_LOCK (1 << 4) void ttm_lock_init(struct ttm_lock *lock) { spin_lock_init(&lock->lock); init_waitqueue_head(&lock->queue); lock->rw = 0; lock->flags = 0; lock->kill_takers = false; lock->signal = SIGKILL; } EXPORT_SYMBOL(ttm_lock_init); void ttm_read_unlock(struct ttm_lock *lock) { spin_lock(&lock->lock); if (--lock->rw == 0) wake_up_all(&lock->queue); spin_unlock(&lock->lock); } EXPORT_SYMBOL(ttm_read_unlock); static bool __ttm_read_lock(struct ttm_lock *lock) { bool locked = false; spin_lock(&lock->lock); if (unlikely(lock->kill_takers)) { send_sig(lock->signal, current, 0); spin_unlock(&lock->lock); return false; } if (lock->rw >= 0 && lock->flags == 0) { ++lock->rw; locked = true; } spin_unlock(&lock->lock); return locked; } int ttm_read_lock(struct ttm_lock *lock, bool interruptible) { int ret = 0; if (interruptible) ret = wait_event_interruptible(lock->queue, __ttm_read_lock(lock)); else wait_event(lock->queue, __ttm_read_lock(lock)); return ret; } EXPORT_SYMBOL(ttm_read_lock); static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked) { bool block = true; *locked = false; spin_lock(&lock->lock); if (unlikely(lock->kill_takers)) { send_sig(lock->signal, current, 0); spin_unlock(&lock->lock); return false; } if (lock->rw >= 0 && lock->flags == 0) { ++lock->rw; block = false; *locked = true; } else if (lock->flags == 0) { block = false; } spin_unlock(&lock->lock); return !block; } int ttm_read_trylock(struct ttm_lock *lock, bool interruptible) { int ret = 0; bool locked; if (interruptible) ret = wait_event_interruptible (lock->queue, __ttm_read_trylock(lock, &locked)); else wait_event(lock->queue, __ttm_read_trylock(lock, &locked)); if (unlikely(ret != 0)) { BUG_ON(locked); return ret; } return (locked) ? 0 : -EBUSY; } void ttm_write_unlock(struct ttm_lock *lock) { spin_lock(&lock->lock); lock->rw = 0; wake_up_all(&lock->queue); spin_unlock(&lock->lock); } EXPORT_SYMBOL(ttm_write_unlock); static bool __ttm_write_lock(struct ttm_lock *lock) { bool locked = false; spin_lock(&lock->lock); if (unlikely(lock->kill_takers)) { send_sig(lock->signal, current, 0); spin_unlock(&lock->lock); return false; } if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) { lock->rw = -1; lock->flags &= ~TTM_WRITE_LOCK_PENDING; locked = true; } else { lock->flags |= TTM_WRITE_LOCK_PENDING; } spin_unlock(&lock->lock); return locked; } int ttm_write_lock(struct ttm_lock *lock, bool interruptible) { int ret = 0; if (interruptible) { ret = wait_event_interruptible(lock->queue, __ttm_write_lock(lock)); if (unlikely(ret != 0)) { spin_lock(&lock->lock); lock->flags &= ~TTM_WRITE_LOCK_PENDING; wake_up_all(&lock->queue); spin_unlock(&lock->lock); } } else wait_event(lock->queue, __ttm_read_lock(lock)); return ret; } EXPORT_SYMBOL(ttm_write_lock); void ttm_write_lock_downgrade(struct ttm_lock *lock) { spin_lock(&lock->lock); lock->rw = 1; wake_up_all(&lock->queue); spin_unlock(&lock->lock); } static int __ttm_vt_unlock(struct ttm_lock *lock) { int ret = 0; spin_lock(&lock->lock); if (unlikely(!(lock->flags & TTM_VT_LOCK))) ret = -EINVAL; lock->flags &= ~TTM_VT_LOCK; wake_up_all(&lock->queue); spin_unlock(&lock->lock); return ret; } static void ttm_vt_lock_remove(struct ttm_base_object **p_base) { struct ttm_base_object *base = *p_base; struct ttm_lock *lock = container_of(base, struct ttm_lock, base); int ret; *p_base = NULL; ret = __ttm_vt_unlock(lock); BUG_ON(ret != 0); } static bool __ttm_vt_lock(struct ttm_lock *lock) { bool locked = false; spin_lock(&lock->lock); if (lock->rw == 0) { lock->flags &= ~TTM_VT_LOCK_PENDING; lock->flags |= TTM_VT_LOCK; locked = true; } else { lock->flags |= TTM_VT_LOCK_PENDING; } spin_unlock(&lock->lock); return locked; } int ttm_vt_lock(struct ttm_lock *lock, bool interruptible, struct ttm_object_file *tfile) { int ret = 0; if (interruptible) { ret = wait_event_interruptible(lock->queue, __ttm_vt_lock(lock)); if (unlikely(ret != 0)) { spin_lock(&lock->lock); lock->flags &= ~TTM_VT_LOCK_PENDING; wake_up_all(&lock->queue); spin_unlock(&lock->lock); return ret; } } else wait_event(lock->queue, __ttm_vt_lock(lock)); /* * Add a base-object, the destructor of which will * make sure the lock is released if the client dies * while holding it. */ ret = ttm_base_object_init(tfile, &lock->base, false, ttm_lock_type, &ttm_vt_lock_remove, NULL); if (ret) (void)__ttm_vt_unlock(lock); else lock->vt_holder = tfile; return ret; } EXPORT_SYMBOL(ttm_vt_lock); int ttm_vt_unlock(struct ttm_lock *lock) { return ttm_ref_object_base_unref(lock->vt_holder, lock->base.hash.key, TTM_REF_USAGE); } EXPORT_SYMBOL(ttm_vt_unlock); void ttm_suspend_unlock(struct ttm_lock *lock) { spin_lock(&lock->lock); lock->flags &= ~TTM_SUSPEND_LOCK; wake_up_all(&lock->queue); spin_unlock(&lock->lock); } EXPORT_SYMBOL(ttm_suspend_unlock); static bool __ttm_suspend_lock(struct ttm_lock *lock) { bool locked = false; spin_lock(&lock->lock); if (lock->rw == 0) { lock->flags &= ~TTM_SUSPEND_LOCK_PENDING; lock->flags |= TTM_SUSPEND_LOCK; locked = true; } else { lock->flags |= TTM_SUSPEND_LOCK_PENDING; } spin_unlock(&lock->lock); return locked; } void ttm_suspend_lock(struct ttm_lock *lock) { wait_event(lock->queue, __ttm_suspend_lock(lock)); } EXPORT_SYMBOL(ttm_suspend_lock);
gpl-2.0
bigzz/shamu_flar2
drivers/pinctrl/spear/pinctrl-spear3xx.c
3718
12603
/* * Driver for the ST Microelectronics SPEAr3xx pinmux * * Copyright (C) 2012 ST Microelectronics * Viresh Kumar <viresh.linux@gmail.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/pinctrl/pinctrl.h> #include "pinctrl-spear3xx.h" /* pins */ static const struct pinctrl_pin_desc spear3xx_pins[] = { SPEAR_PIN_0_TO_101, }; /* firda_pins */ static const unsigned firda_pins[] = { 0, 1 }; static struct spear_muxreg firda_muxreg[] = { { .reg = -1, .mask = PMX_FIRDA_MASK, .val = PMX_FIRDA_MASK, }, }; static struct spear_modemux firda_modemux[] = { { .modes = ~0, .muxregs = firda_muxreg, .nmuxregs = ARRAY_SIZE(firda_muxreg), }, }; struct spear_pingroup spear3xx_firda_pingroup = { .name = "firda_grp", .pins = firda_pins, .npins = ARRAY_SIZE(firda_pins), .modemuxs = firda_modemux, .nmodemuxs = ARRAY_SIZE(firda_modemux), }; static const char *const firda_grps[] = { "firda_grp" }; struct spear_function spear3xx_firda_function = { .name = "firda", .groups = firda_grps, .ngroups = ARRAY_SIZE(firda_grps), }; /* i2c_pins */ static const unsigned i2c_pins[] = { 4, 5 }; static struct spear_muxreg i2c_muxreg[] = { { .reg = -1, .mask = PMX_I2C_MASK, .val = PMX_I2C_MASK, }, }; static struct spear_modemux i2c_modemux[] = { { .modes = ~0, .muxregs = i2c_muxreg, .nmuxregs = ARRAY_SIZE(i2c_muxreg), }, }; struct spear_pingroup spear3xx_i2c_pingroup = { .name = "i2c0_grp", .pins = i2c_pins, .npins = ARRAY_SIZE(i2c_pins), .modemuxs = i2c_modemux, .nmodemuxs = ARRAY_SIZE(i2c_modemux), }; static const char *const i2c_grps[] = { "i2c0_grp" }; struct spear_function spear3xx_i2c_function = { .name = "i2c0", .groups = i2c_grps, .ngroups = ARRAY_SIZE(i2c_grps), }; /* ssp_cs_pins */ static const unsigned ssp_cs_pins[] = { 34, 35, 36 }; static struct spear_muxreg ssp_cs_muxreg[] = { { .reg = -1, .mask = PMX_SSP_CS_MASK, .val = PMX_SSP_CS_MASK, }, }; static struct spear_modemux ssp_cs_modemux[] = { { .modes = ~0, .muxregs = ssp_cs_muxreg, .nmuxregs = ARRAY_SIZE(ssp_cs_muxreg), }, }; struct spear_pingroup spear3xx_ssp_cs_pingroup = { .name = "ssp_cs_grp", .pins = ssp_cs_pins, .npins = ARRAY_SIZE(ssp_cs_pins), .modemuxs = ssp_cs_modemux, .nmodemuxs = ARRAY_SIZE(ssp_cs_modemux), }; static const char *const ssp_cs_grps[] = { "ssp_cs_grp" }; struct spear_function spear3xx_ssp_cs_function = { .name = "ssp_cs", .groups = ssp_cs_grps, .ngroups = ARRAY_SIZE(ssp_cs_grps), }; /* ssp_pins */ static const unsigned ssp_pins[] = { 6, 7, 8, 9 }; static struct spear_muxreg ssp_muxreg[] = { { .reg = -1, .mask = PMX_SSP_MASK, .val = PMX_SSP_MASK, }, }; static struct spear_modemux ssp_modemux[] = { { .modes = ~0, .muxregs = ssp_muxreg, .nmuxregs = ARRAY_SIZE(ssp_muxreg), }, }; struct spear_pingroup spear3xx_ssp_pingroup = { .name = "ssp0_grp", .pins = ssp_pins, .npins = ARRAY_SIZE(ssp_pins), .modemuxs = ssp_modemux, .nmodemuxs = ARRAY_SIZE(ssp_modemux), }; static const char *const ssp_grps[] = { "ssp0_grp" }; struct spear_function spear3xx_ssp_function = { .name = "ssp0", .groups = ssp_grps, .ngroups = ARRAY_SIZE(ssp_grps), }; /* mii_pins */ static const unsigned mii_pins[] = { 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27 }; static struct spear_muxreg mii_muxreg[] = { { .reg = -1, .mask = PMX_MII_MASK, .val = PMX_MII_MASK, }, }; static struct spear_modemux mii_modemux[] = { { .modes = ~0, .muxregs = mii_muxreg, .nmuxregs = ARRAY_SIZE(mii_muxreg), }, }; struct spear_pingroup spear3xx_mii_pingroup = { .name = "mii0_grp", .pins = mii_pins, .npins = ARRAY_SIZE(mii_pins), .modemuxs = mii_modemux, .nmodemuxs = ARRAY_SIZE(mii_modemux), }; static const char *const mii_grps[] = { "mii0_grp" }; struct spear_function spear3xx_mii_function = { .name = "mii0", .groups = mii_grps, .ngroups = ARRAY_SIZE(mii_grps), }; /* gpio0_pin0_pins */ static const unsigned gpio0_pin0_pins[] = { 28 }; static struct spear_muxreg gpio0_pin0_muxreg[] = { { .reg = -1, .mask = PMX_GPIO_PIN0_MASK, .val = PMX_GPIO_PIN0_MASK, }, }; static struct spear_modemux gpio0_pin0_modemux[] = { { .modes = ~0, .muxregs = gpio0_pin0_muxreg, .nmuxregs = ARRAY_SIZE(gpio0_pin0_muxreg), }, }; struct spear_pingroup spear3xx_gpio0_pin0_pingroup = { .name = "gpio0_pin0_grp", .pins = gpio0_pin0_pins, .npins = ARRAY_SIZE(gpio0_pin0_pins), .modemuxs = gpio0_pin0_modemux, .nmodemuxs = ARRAY_SIZE(gpio0_pin0_modemux), }; /* gpio0_pin1_pins */ static const unsigned gpio0_pin1_pins[] = { 29 }; static struct spear_muxreg gpio0_pin1_muxreg[] = { { .reg = -1, .mask = PMX_GPIO_PIN1_MASK, .val = PMX_GPIO_PIN1_MASK, }, }; static struct spear_modemux gpio0_pin1_modemux[] = { { .modes = ~0, .muxregs = gpio0_pin1_muxreg, .nmuxregs = ARRAY_SIZE(gpio0_pin1_muxreg), }, }; struct spear_pingroup spear3xx_gpio0_pin1_pingroup = { .name = "gpio0_pin1_grp", .pins = gpio0_pin1_pins, .npins = ARRAY_SIZE(gpio0_pin1_pins), .modemuxs = gpio0_pin1_modemux, .nmodemuxs = ARRAY_SIZE(gpio0_pin1_modemux), }; /* gpio0_pin2_pins */ static const unsigned gpio0_pin2_pins[] = { 30 }; static struct spear_muxreg gpio0_pin2_muxreg[] = { { .reg = -1, .mask = PMX_GPIO_PIN2_MASK, .val = PMX_GPIO_PIN2_MASK, }, }; static struct spear_modemux gpio0_pin2_modemux[] = { { .modes = ~0, .muxregs = gpio0_pin2_muxreg, .nmuxregs = ARRAY_SIZE(gpio0_pin2_muxreg), }, }; struct spear_pingroup spear3xx_gpio0_pin2_pingroup = { .name = "gpio0_pin2_grp", .pins = gpio0_pin2_pins, .npins = ARRAY_SIZE(gpio0_pin2_pins), .modemuxs = gpio0_pin2_modemux, .nmodemuxs = ARRAY_SIZE(gpio0_pin2_modemux), }; /* gpio0_pin3_pins */ static const unsigned gpio0_pin3_pins[] = { 31 }; static struct spear_muxreg gpio0_pin3_muxreg[] = { { .reg = -1, .mask = PMX_GPIO_PIN3_MASK, .val = PMX_GPIO_PIN3_MASK, }, }; static struct spear_modemux gpio0_pin3_modemux[] = { { .modes = ~0, .muxregs = gpio0_pin3_muxreg, .nmuxregs = ARRAY_SIZE(gpio0_pin3_muxreg), }, }; struct spear_pingroup spear3xx_gpio0_pin3_pingroup = { .name = "gpio0_pin3_grp", .pins = gpio0_pin3_pins, .npins = ARRAY_SIZE(gpio0_pin3_pins), .modemuxs = gpio0_pin3_modemux, .nmodemuxs = ARRAY_SIZE(gpio0_pin3_modemux), }; /* gpio0_pin4_pins */ static const unsigned gpio0_pin4_pins[] = { 32 }; static struct spear_muxreg gpio0_pin4_muxreg[] = { { .reg = -1, .mask = PMX_GPIO_PIN4_MASK, .val = PMX_GPIO_PIN4_MASK, }, }; static struct spear_modemux gpio0_pin4_modemux[] = { { .modes = ~0, .muxregs = gpio0_pin4_muxreg, .nmuxregs = ARRAY_SIZE(gpio0_pin4_muxreg), }, }; struct spear_pingroup spear3xx_gpio0_pin4_pingroup = { .name = "gpio0_pin4_grp", .pins = gpio0_pin4_pins, .npins = ARRAY_SIZE(gpio0_pin4_pins), .modemuxs = gpio0_pin4_modemux, .nmodemuxs = ARRAY_SIZE(gpio0_pin4_modemux), }; /* gpio0_pin5_pins */ static const unsigned gpio0_pin5_pins[] = { 33 }; static struct spear_muxreg gpio0_pin5_muxreg[] = { { .reg = -1, .mask = PMX_GPIO_PIN5_MASK, .val = PMX_GPIO_PIN5_MASK, }, }; static struct spear_modemux gpio0_pin5_modemux[] = { { .modes = ~0, .muxregs = gpio0_pin5_muxreg, .nmuxregs = ARRAY_SIZE(gpio0_pin5_muxreg), }, }; struct spear_pingroup spear3xx_gpio0_pin5_pingroup = { .name = "gpio0_pin5_grp", .pins = gpio0_pin5_pins, .npins = ARRAY_SIZE(gpio0_pin5_pins), .modemuxs = gpio0_pin5_modemux, .nmodemuxs = ARRAY_SIZE(gpio0_pin5_modemux), }; static const char *const gpio0_grps[] = { "gpio0_pin0_grp", "gpio0_pin1_grp", "gpio0_pin2_grp", "gpio0_pin3_grp", "gpio0_pin4_grp", "gpio0_pin5_grp", }; struct spear_function spear3xx_gpio0_function = { .name = "gpio0", .groups = gpio0_grps, .ngroups = ARRAY_SIZE(gpio0_grps), }; /* uart0_ext_pins */ static const unsigned uart0_ext_pins[] = { 37, 38, 39, 40, 41, 42 }; static struct spear_muxreg uart0_ext_muxreg[] = { { .reg = -1, .mask = PMX_UART0_MODEM_MASK, .val = PMX_UART0_MODEM_MASK, }, }; static struct spear_modemux uart0_ext_modemux[] = { { .modes = ~0, .muxregs = uart0_ext_muxreg, .nmuxregs = ARRAY_SIZE(uart0_ext_muxreg), }, }; struct spear_pingroup spear3xx_uart0_ext_pingroup = { .name = "uart0_ext_grp", .pins = uart0_ext_pins, .npins = ARRAY_SIZE(uart0_ext_pins), .modemuxs = uart0_ext_modemux, .nmodemuxs = ARRAY_SIZE(uart0_ext_modemux), }; static const char *const uart0_ext_grps[] = { "uart0_ext_grp" }; struct spear_function spear3xx_uart0_ext_function = { .name = "uart0_ext", .groups = uart0_ext_grps, .ngroups = ARRAY_SIZE(uart0_ext_grps), }; /* uart0_pins */ static const unsigned uart0_pins[] = { 2, 3 }; static struct spear_muxreg uart0_muxreg[] = { { .reg = -1, .mask = PMX_UART0_MASK, .val = PMX_UART0_MASK, }, }; static struct spear_modemux uart0_modemux[] = { { .modes = ~0, .muxregs = uart0_muxreg, .nmuxregs = ARRAY_SIZE(uart0_muxreg), }, }; struct spear_pingroup spear3xx_uart0_pingroup = { .name = "uart0_grp", .pins = uart0_pins, .npins = ARRAY_SIZE(uart0_pins), .modemuxs = uart0_modemux, .nmodemuxs = ARRAY_SIZE(uart0_modemux), }; static const char *const uart0_grps[] = { "uart0_grp" }; struct spear_function spear3xx_uart0_function = { .name = "uart0", .groups = uart0_grps, .ngroups = ARRAY_SIZE(uart0_grps), }; /* timer_0_1_pins */ static const unsigned timer_0_1_pins[] = { 43, 44, 47, 48 }; static struct spear_muxreg timer_0_1_muxreg[] = { { .reg = -1, .mask = PMX_TIMER_0_1_MASK, .val = PMX_TIMER_0_1_MASK, }, }; static struct spear_modemux timer_0_1_modemux[] = { { .modes = ~0, .muxregs = timer_0_1_muxreg, .nmuxregs = ARRAY_SIZE(timer_0_1_muxreg), }, }; struct spear_pingroup spear3xx_timer_0_1_pingroup = { .name = "timer_0_1_grp", .pins = timer_0_1_pins, .npins = ARRAY_SIZE(timer_0_1_pins), .modemuxs = timer_0_1_modemux, .nmodemuxs = ARRAY_SIZE(timer_0_1_modemux), }; static const char *const timer_0_1_grps[] = { "timer_0_1_grp" }; struct spear_function spear3xx_timer_0_1_function = { .name = "timer_0_1", .groups = timer_0_1_grps, .ngroups = ARRAY_SIZE(timer_0_1_grps), }; /* timer_2_3_pins */ static const unsigned timer_2_3_pins[] = { 45, 46, 49, 50 }; static struct spear_muxreg timer_2_3_muxreg[] = { { .reg = -1, .mask = PMX_TIMER_2_3_MASK, .val = PMX_TIMER_2_3_MASK, }, }; static struct spear_modemux timer_2_3_modemux[] = { { .modes = ~0, .muxregs = timer_2_3_muxreg, .nmuxregs = ARRAY_SIZE(timer_2_3_muxreg), }, }; struct spear_pingroup spear3xx_timer_2_3_pingroup = { .name = "timer_2_3_grp", .pins = timer_2_3_pins, .npins = ARRAY_SIZE(timer_2_3_pins), .modemuxs = timer_2_3_modemux, .nmodemuxs = ARRAY_SIZE(timer_2_3_modemux), }; static const char *const timer_2_3_grps[] = { "timer_2_3_grp" }; struct spear_function spear3xx_timer_2_3_function = { .name = "timer_2_3", .groups = timer_2_3_grps, .ngroups = ARRAY_SIZE(timer_2_3_grps), }; /* Define muxreg arrays */ DEFINE_MUXREG(firda_pins, 0, PMX_FIRDA_MASK, 0); DEFINE_MUXREG(i2c_pins, 0, PMX_I2C_MASK, 0); DEFINE_MUXREG(ssp_cs_pins, 0, PMX_SSP_CS_MASK, 0); DEFINE_MUXREG(ssp_pins, 0, PMX_SSP_MASK, 0); DEFINE_MUXREG(mii_pins, 0, PMX_MII_MASK, 0); DEFINE_MUXREG(gpio0_pin0_pins, 0, PMX_GPIO_PIN0_MASK, 0); DEFINE_MUXREG(gpio0_pin1_pins, 0, PMX_GPIO_PIN1_MASK, 0); DEFINE_MUXREG(gpio0_pin2_pins, 0, PMX_GPIO_PIN2_MASK, 0); DEFINE_MUXREG(gpio0_pin3_pins, 0, PMX_GPIO_PIN3_MASK, 0); DEFINE_MUXREG(gpio0_pin4_pins, 0, PMX_GPIO_PIN4_MASK, 0); DEFINE_MUXREG(gpio0_pin5_pins, 0, PMX_GPIO_PIN5_MASK, 0); DEFINE_MUXREG(uart0_ext_pins, 0, PMX_UART0_MODEM_MASK, 0); DEFINE_MUXREG(uart0_pins, 0, PMX_UART0_MASK, 0); DEFINE_MUXREG(timer_0_1_pins, 0, PMX_TIMER_0_1_MASK, 0); DEFINE_MUXREG(timer_2_3_pins, 0, PMX_TIMER_2_3_MASK, 0); static struct spear_gpio_pingroup spear3xx_gpio_pingroup[] = { GPIO_PINGROUP(firda_pins), GPIO_PINGROUP(i2c_pins), GPIO_PINGROUP(ssp_cs_pins), GPIO_PINGROUP(ssp_pins), GPIO_PINGROUP(mii_pins), GPIO_PINGROUP(gpio0_pin0_pins), GPIO_PINGROUP(gpio0_pin1_pins), GPIO_PINGROUP(gpio0_pin2_pins), GPIO_PINGROUP(gpio0_pin3_pins), GPIO_PINGROUP(gpio0_pin4_pins), GPIO_PINGROUP(gpio0_pin5_pins), GPIO_PINGROUP(uart0_ext_pins), GPIO_PINGROUP(uart0_pins), GPIO_PINGROUP(timer_0_1_pins), GPIO_PINGROUP(timer_2_3_pins), }; struct spear_pinctrl_machdata spear3xx_machdata = { .pins = spear3xx_pins, .npins = ARRAY_SIZE(spear3xx_pins), .gpio_pingroups = spear3xx_gpio_pingroup, .ngpio_pingroups = ARRAY_SIZE(spear3xx_gpio_pingroup), };
gpl-2.0
c313742678/qt210_kernel
drivers/usb/wusbcore/wa-hc.c
3974
2592
/* * Wire Adapter Host Controller Driver * Common items to HWA and DWA based HCDs * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * FIXME: docs */ #include <linux/slab.h> #include "wusbhc.h" #include "wa-hc.h" /** * Assumes * * wa->usb_dev and wa->usb_iface initialized and refcounted, * wa->wa_descr initialized. */ int wa_create(struct wahc *wa, struct usb_interface *iface) { int result; struct device *dev = &iface->dev; result = wa_rpipes_create(wa); if (result < 0) goto error_rpipes_create; /* Fill up Data Transfer EP pointers */ wa->dti_epd = &iface->cur_altsetting->endpoint[1].desc; wa->dto_epd = &iface->cur_altsetting->endpoint[2].desc; wa->xfer_result_size = le16_to_cpu(wa->dti_epd->wMaxPacketSize); wa->xfer_result = kmalloc(wa->xfer_result_size, GFP_KERNEL); if (wa->xfer_result == NULL) goto error_xfer_result_alloc; result = wa_nep_create(wa, iface); if (result < 0) { dev_err(dev, "WA-CDS: can't initialize notif endpoint: %d\n", result); goto error_nep_create; } return 0; error_nep_create: kfree(wa->xfer_result); error_xfer_result_alloc: wa_rpipes_destroy(wa); error_rpipes_create: return result; } EXPORT_SYMBOL_GPL(wa_create); void __wa_destroy(struct wahc *wa) { if (wa->dti_urb) { usb_kill_urb(wa->dti_urb); usb_put_urb(wa->dti_urb); usb_kill_urb(wa->buf_in_urb); usb_put_urb(wa->buf_in_urb); } kfree(wa->xfer_result); wa_nep_destroy(wa); wa_rpipes_destroy(wa); } EXPORT_SYMBOL_GPL(__wa_destroy); /** * wa_reset_all - reset the WA device * @wa: the WA to be reset * * For HWAs the radio controller and all other PALs are also reset. */ void wa_reset_all(struct wahc *wa) { /* FIXME: assuming HWA. */ wusbhc_reset_all(wa->wusb); } MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); MODULE_DESCRIPTION("Wireless USB Wire Adapter core"); MODULE_LICENSE("GPL");
gpl-2.0
vamanea/goldfish-trustzone
net/bridge/br_forward.c
4742
6262
/* * Forwarding decision * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/err.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/netpoll.h> #include <linux/skbuff.h> #include <linux/if_vlan.h> #include <linux/netfilter_bridge.h> #include "br_private.h" static int deliver_clone(const struct net_bridge_port *prev, struct sk_buff *skb, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb)); /* Don't forward packets to originating port or forwarding diasabled */ static inline int should_deliver(const struct net_bridge_port *p, const struct sk_buff *skb) { return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && p->state == BR_STATE_FORWARDING); } static inline unsigned packet_length(const struct sk_buff *skb) { return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0); } int br_dev_queue_push_xmit(struct sk_buff *skb) { /* ip_fragment doesn't copy the MAC header */ if (nf_bridge_maybe_copy_header(skb) || (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))) { kfree_skb(skb); } else { skb_push(skb, ETH_HLEN); br_drop_fake_rtable(skb); dev_queue_xmit(skb); } return 0; } int br_forward_finish(struct sk_buff *skb) { return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev, br_dev_queue_push_xmit); } static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) { skb->dev = to->dev; if (unlikely(netpoll_tx_running(to->dev))) { if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) kfree_skb(skb); else { skb_push(skb, ETH_HLEN); br_netpoll_send_skb(to, skb); } return; } NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, br_forward_finish); } static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) { struct net_device *indev; if (skb_warn_if_lro(skb)) { kfree_skb(skb); return; } indev = skb->dev; skb->dev = to->dev; skb_forward_csum(skb); NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev, br_forward_finish); } /* called with rcu_read_lock */ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) { if (to && should_deliver(to, skb)) { __br_deliver(to, skb); return; } kfree_skb(skb); } /* called with rcu_read_lock */ void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) { if (should_deliver(to, skb)) { if (skb0) deliver_clone(to, skb, __br_forward); else __br_forward(to, skb); return; } if (!skb0) kfree_skb(skb); } static int deliver_clone(const struct net_bridge_port *prev, struct sk_buff *skb, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb)) { struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; skb = skb_clone(skb, GFP_ATOMIC); if (!skb) { dev->stats.tx_dropped++; return -ENOMEM; } __packet_hook(prev, skb); return 0; } static struct net_bridge_port *maybe_deliver( struct net_bridge_port *prev, struct net_bridge_port *p, struct sk_buff *skb, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb)) { int err; if (!should_deliver(p, skb)) return prev; if (!prev) goto out; err = deliver_clone(prev, skb, __packet_hook); if (err) return ERR_PTR(err); out: return p; } /* called under bridge lock */ static void br_flood(struct net_bridge *br, struct sk_buff *skb, struct sk_buff *skb0, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb)) { struct net_bridge_port *p; struct net_bridge_port *prev; prev = NULL; list_for_each_entry_rcu(p, &br->port_list, list) { prev = maybe_deliver(prev, p, skb, __packet_hook); if (IS_ERR(prev)) goto out; } if (!prev) goto out; if (skb0) deliver_clone(prev, skb, __packet_hook); else __packet_hook(prev, skb); return; out: if (!skb0) kfree_skb(skb); } /* called with rcu_read_lock */ void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb) { br_flood(br, skb, NULL, __br_deliver); } /* called under bridge lock */ void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, struct sk_buff *skb2) { br_flood(br, skb, skb2, __br_forward); } #ifdef CONFIG_BRIDGE_IGMP_SNOOPING /* called with rcu_read_lock */ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst, struct sk_buff *skb, struct sk_buff *skb0, void (*__packet_hook)( const struct net_bridge_port *p, struct sk_buff *skb)) { struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *prev = NULL; struct net_bridge_port_group *p; struct hlist_node *rp; rp = rcu_dereference(hlist_first_rcu(&br->router_list)); p = mdst ? rcu_dereference(mdst->ports) : NULL; while (p || rp) { struct net_bridge_port *port, *lport, *rport; lport = p ? p->port : NULL; rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) : NULL; port = (unsigned long)lport > (unsigned long)rport ? lport : rport; prev = maybe_deliver(prev, port, skb, __packet_hook); if (IS_ERR(prev)) goto out; if ((unsigned long)lport >= (unsigned long)port) p = rcu_dereference(p->next); if ((unsigned long)rport >= (unsigned long)port) rp = rcu_dereference(hlist_next_rcu(rp)); } if (!prev) goto out; if (skb0) deliver_clone(prev, skb, __packet_hook); else __packet_hook(prev, skb); return; out: if (!skb0) kfree_skb(skb); } /* called with rcu_read_lock */ void br_multicast_deliver(struct net_bridge_mdb_entry *mdst, struct sk_buff *skb) { br_multicast_flood(mdst, skb, NULL, __br_deliver); } /* called with rcu_read_lock */ void br_multicast_forward(struct net_bridge_mdb_entry *mdst, struct sk_buff *skb, struct sk_buff *skb2) { br_multicast_flood(mdst, skb, skb2, __br_forward); } #endif
gpl-2.0
Sparhawk76/android_kernel_samsung_afyonltev1
arch/powerpc/sysdev/rtc_cmos_setup.c
4998
1616
/* * Setup code for PC-style Real-Time Clock. * * Author: Wade Farnsworth <wfarnsworth@mvista.com> * * 2007 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/platform_device.h> #include <linux/err.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mc146818rtc.h> #include <asm/prom.h> static int __init add_rtc(void) { struct device_node *np; struct platform_device *pd; struct resource res[2]; unsigned int num_res = 1; int ret; memset(&res, 0, sizeof(res)); np = of_find_compatible_node(NULL, NULL, "pnpPNP,b00"); if (!np) return -ENODEV; ret = of_address_to_resource(np, 0, &res[0]); of_node_put(np); if (ret) return ret; /* * RTC_PORT(x) is hardcoded in asm/mc146818rtc.h. Verify that the * address provided by the device node matches. */ if (res[0].start != RTC_PORT(0)) return -EINVAL; np = of_find_compatible_node(NULL, NULL, "chrp,iic"); if (!np) np = of_find_compatible_node(NULL, NULL, "pnpPNP,000"); if (np) { of_node_put(np); /* * Use a fixed interrupt value of 8 since on PPC if we are * using this its off an i8259 which we ensure has interrupt * numbers 0..15. */ res[1].start = 8; res[1].end = 8; res[1].flags = IORESOURCE_IRQ; num_res++; } pd = platform_device_register_simple("rtc_cmos", -1, &res[0], num_res); if (IS_ERR(pd)) return PTR_ERR(pd); return 0; } fs_initcall(add_rtc); MODULE_LICENSE("GPL");
gpl-2.0
Fusion-Devices/android_kernel_lge_mako
drivers/gpu/drm/nouveau/nv50_dac.c
5254
9006
/* * Copyright (C) 2008 Maarten Maathuis. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "drm_crtc_helper.h" #define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) #include "nouveau_reg.h" #include "nouveau_drv.h" #include "nouveau_dma.h" #include "nouveau_encoder.h" #include "nouveau_connector.h" #include "nouveau_crtc.h" #include "nv50_display.h" static void nv50_dac_disconnect(struct drm_encoder *encoder) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_device *dev = encoder->dev; struct nouveau_channel *evo = nv50_display(dev)->master; int ret; if (!nv_encoder->crtc) return; nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or); ret = RING_SPACE(evo, 4); if (ret) { NV_ERROR(dev, "no space while disconnecting DAC\n"); return; } BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1); OUT_RING (evo, 0); BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); OUT_RING (evo, 0); nv_encoder->crtc = NULL; } static enum drm_connector_status nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; enum drm_connector_status status = connector_status_disconnected; uint32_t dpms_state, load_pattern, load_state; int or = nv_encoder->or; nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001); dpms_state = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)); nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or))); return status; } /* Use bios provided value if possible. */ if (dev_priv->vbios.dactestval) { load_pattern = dev_priv->vbios.dactestval; NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n", load_pattern); } else { load_pattern = 340; NV_DEBUG_KMS(dev, "Using default load_pattern of %d\n", load_pattern); } nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern); mdelay(45); /* give it some time to process */ load_state = nv_rd32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or)); nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0); nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) == NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) status = connector_status_connected; if (status == connector_status_connected) NV_DEBUG_KMS(dev, "Load was detected on output with or %d\n", or); else NV_DEBUG_KMS(dev, "Load was not detected on output with or %d\n", or); return status; } static void nv50_dac_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); uint32_t val; int or = nv_encoder->or; NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); /* wait for it to be done */ if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or))); return; } val = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F; if (mode != DRM_MODE_DPMS_ON) val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED; switch (mode) { case DRM_MODE_DPMS_STANDBY: val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF; break; case DRM_MODE_DPMS_SUSPEND: val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF; break; case DRM_MODE_DPMS_OFF: val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF; val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF; val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF; break; default: break; } nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); } static void nv50_dac_save(struct drm_encoder *encoder) { NV_ERROR(encoder->dev, "!!\n"); } static void nv50_dac_restore(struct drm_encoder *encoder) { NV_ERROR(encoder->dev, "!!\n"); } static bool nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_connector *connector; NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or); connector = nouveau_encoder_connector_get(nv_encoder); if (!connector) { NV_ERROR(encoder->dev, "Encoder has no connector\n"); return false; } if (connector->scaling_mode != DRM_MODE_SCALE_NONE && connector->native_mode) drm_mode_copy(adjusted_mode, connector->native_mode); return true; } static void nv50_dac_commit(struct drm_encoder *encoder) { } static void nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_device *dev = encoder->dev; struct nouveau_channel *evo = nv50_display(dev)->master; struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); uint32_t mode_ctl = 0, mode_ctl2 = 0; int ret; NV_DEBUG_KMS(dev, "or %d type %d crtc %d\n", nv_encoder->or, nv_encoder->dcb->type, crtc->index); nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON); if (crtc->index == 1) mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1; else mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0; /* Lacking a working tv-out, this is not a 100% sure. */ if (nv_encoder->dcb->type == OUTPUT_ANALOG) mode_ctl |= 0x40; else if (nv_encoder->dcb->type == OUTPUT_TV) mode_ctl |= 0x100; if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC; if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC; ret = RING_SPACE(evo, 3); if (ret) { NV_ERROR(dev, "no space while connecting DAC\n"); return; } BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2); OUT_RING(evo, mode_ctl); OUT_RING(evo, mode_ctl2); nv_encoder->crtc = encoder->crtc; } static struct drm_crtc * nv50_dac_crtc_get(struct drm_encoder *encoder) { return nouveau_encoder(encoder)->crtc; } static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = { .dpms = nv50_dac_dpms, .save = nv50_dac_save, .restore = nv50_dac_restore, .mode_fixup = nv50_dac_mode_fixup, .prepare = nv50_dac_disconnect, .commit = nv50_dac_commit, .mode_set = nv50_dac_mode_set, .get_crtc = nv50_dac_crtc_get, .detect = nv50_dac_detect, .disable = nv50_dac_disconnect }; static void nv50_dac_destroy(struct drm_encoder *encoder) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); if (!encoder) return; NV_DEBUG_KMS(encoder->dev, "\n"); drm_encoder_cleanup(encoder); kfree(nv_encoder); } static const struct drm_encoder_funcs nv50_dac_encoder_funcs = { .destroy = nv50_dac_destroy, }; int nv50_dac_create(struct drm_connector *connector, struct dcb_entry *entry) { struct nouveau_encoder *nv_encoder; struct drm_encoder *encoder; nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); if (!nv_encoder) return -ENOMEM; encoder = to_drm_encoder(nv_encoder); nv_encoder->dcb = entry; nv_encoder->or = ffs(entry->or) - 1; drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs, DRM_MODE_ENCODER_DAC); drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs); encoder->possible_crtcs = entry->heads; encoder->possible_clones = 0; drm_mode_connector_attach_encoder(connector, encoder); return 0; }
gpl-2.0
jassycliq/lg_g2d801
drivers/firewire/init_ohci1394_dma.c
8326
9836
/* * init_ohci1394_dma.c - Initializes physical DMA on all OHCI 1394 controllers * * Copyright (C) 2006-2007 Bernhard Kaindl <bk@suse.de> * * Derived from drivers/ieee1394/ohci1394.c and arch/x86/kernel/early-quirks.c * this file has functions to: * - scan the PCI very early on boot for all OHCI 1394-compliant controllers * - reset and initialize them and make them join the IEEE1394 bus and * - enable physical DMA on them to allow remote debugging * * All code and data is marked as __init and __initdata, respective as * during boot, all OHCI1394 controllers may be claimed by the firewire * stack and at this point, this code should not touch them anymore. * * To use physical DMA after the initialization of the firewire stack, * be sure that the stack enables it and (re-)attach after the bus reset * which may be caused by the firewire stack initialization. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/delay.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/pci.h> /* for PCI defines */ #include <linux/string.h> #include <asm/pci-direct.h> /* for direct PCI config space access */ #include <asm/fixmap.h> #include <linux/init_ohci1394_dma.h> #include "ohci.h" int __initdata init_ohci1394_dma_early; struct ohci { void __iomem *registers; }; static inline void reg_write(const struct ohci *ohci, int offset, u32 data) { writel(data, ohci->registers + offset); } static inline u32 reg_read(const struct ohci *ohci, int offset) { return readl(ohci->registers + offset); } #define OHCI_LOOP_COUNT 100 /* Number of loops for reg read waits */ /* Reads a PHY register of an OHCI-1394 controller */ static inline u8 __init get_phy_reg(struct ohci *ohci, u8 addr) { int i; u32 r; reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000); for (i = 0; i < OHCI_LOOP_COUNT; i++) { if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000) break; mdelay(1); } r = reg_read(ohci, OHCI1394_PhyControl); return (r & 0x00ff0000) >> 16; } /* Writes to a PHY register of an OHCI-1394 controller */ static inline void __init set_phy_reg(struct ohci *ohci, u8 addr, u8 data) { int i; reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000); for (i = 0; i < OHCI_LOOP_COUNT; i++) { if (!(reg_read(ohci, OHCI1394_PhyControl) & 0x00004000)) break; mdelay(1); } } /* Resets an OHCI-1394 controller (for sane state before initialization) */ static inline void __init init_ohci1394_soft_reset(struct ohci *ohci) { int i; reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); for (i = 0; i < OHCI_LOOP_COUNT; i++) { if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset)) break; mdelay(1); } } #define OHCI1394_MAX_AT_REQ_RETRIES 0xf #define OHCI1394_MAX_AT_RESP_RETRIES 0x2 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 /* Basic OHCI-1394 register and port inititalization */ static inline void __init init_ohci1394_initialize(struct ohci *ohci) { u32 bus_options; int num_ports, i; /* Put some defaults to these undefined bus options */ bus_options = reg_read(ohci, OHCI1394_BusOptions); bus_options |= 0x60000000; /* Enable CMC and ISC */ bus_options &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */ bus_options &= ~0x18000000; /* Disable PMC and BMC */ reg_write(ohci, OHCI1394_BusOptions, bus_options); /* Set the bus number */ reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0); /* Enable posted writes */ reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable); /* Clear link control register */ reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff); /* enable phys */ reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_rcvPhyPkt); /* Don't accept phy packets into AR request context */ reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400); /* Clear the Isochonouys interrupt masks */ reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff); reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff); reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff); reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff); /* Accept asyncronous transfer requests from all nodes for now */ reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); /* Specify asyncronous transfer retries */ reg_write(ohci, OHCI1394_ATRetries, OHCI1394_MAX_AT_REQ_RETRIES | (OHCI1394_MAX_AT_RESP_RETRIES<<4) | (OHCI1394_MAX_PHYS_RESP_RETRIES<<8)); /* We don't want hardware swapping */ reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwapData); /* Enable link */ reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable); /* If anything is connected to a port, make sure it is enabled */ num_ports = get_phy_reg(ohci, 2) & 0xf; for (i = 0; i < num_ports; i++) { unsigned int status; set_phy_reg(ohci, 7, i); status = get_phy_reg(ohci, 8); if (status & 0x20) set_phy_reg(ohci, 8, status & ~1); } } /** * init_ohci1394_wait_for_busresets - wait until bus resets are completed * * OHCI1394 initialization itself and any device going on- or offline * and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec * specifies that physical DMA is disabled on each bus reset and it * has to be enabled after each bus reset when needed. We resort * to polling here because on early boot, we have no interrupts. */ static inline void __init init_ohci1394_wait_for_busresets(struct ohci *ohci) { int i, events; for (i = 0; i < 9; i++) { mdelay(200); events = reg_read(ohci, OHCI1394_IntEventSet); if (events & OHCI1394_busReset) reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); } } /** * init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging * This enables remote DMA access over IEEE1394 from every host for the low * 4GB of address space. DMA accesses above 4GB are not available currently. */ static inline void __init init_ohci1394_enable_physical_dma(struct ohci *ohci) { reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 0xffffffff); reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 0xffffffff); reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000); } /** * init_ohci1394_reset_and_init_dma - init controller and enable DMA * This initializes the given controller and enables physical DMA engine in it. */ static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci) { /* Start off with a soft reset, clears everything to a sane state. */ init_ohci1394_soft_reset(ohci); /* Accessing some registers without LPS enabled may cause lock up */ reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS); /* Disable and clear interrupts */ reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff); reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff); mdelay(50); /* Wait 50msec to make sure we have full link enabled */ init_ohci1394_initialize(ohci); /* * The initialization causes at least one IEEE1394 bus reset. Enabling * physical DMA only works *after* *all* bus resets have calmed down: */ init_ohci1394_wait_for_busresets(ohci); /* We had to wait and do this now if we want to debug early problems */ init_ohci1394_enable_physical_dma(ohci); } /** * init_ohci1394_controller - Map the registers of the controller and init DMA * This maps the registers of the specified controller and initializes it */ static inline void __init init_ohci1394_controller(int num, int slot, int func) { unsigned long ohci_base; struct ohci ohci; printk(KERN_INFO "init_ohci1394_dma: initializing OHCI-1394" " at %02x:%02x.%x\n", num, slot, func); ohci_base = read_pci_config(num, slot, func, PCI_BASE_ADDRESS_0+(0<<2)) & PCI_BASE_ADDRESS_MEM_MASK; set_fixmap_nocache(FIX_OHCI1394_BASE, ohci_base); ohci.registers = (void __iomem *)fix_to_virt(FIX_OHCI1394_BASE); init_ohci1394_reset_and_init_dma(&ohci); } /** * debug_init_ohci1394_dma - scan for OHCI1394 controllers and init DMA on them * Scans the whole PCI space for OHCI1394 controllers and inits DMA on them */ void __init init_ohci1394_dma_on_all_controllers(void) { int num, slot, func; u32 class; if (!early_pci_allowed()) return; /* Poor man's PCI discovery, the only thing we can do at early boot */ for (num = 0; num < 32; num++) { for (slot = 0; slot < 32; slot++) { for (func = 0; func < 8; func++) { class = read_pci_config(num, slot, func, PCI_CLASS_REVISION); if (class == 0xffffffff) continue; /* No device at this func */ if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI) continue; /* Not an OHCI-1394 device */ init_ohci1394_controller(num, slot, func); break; /* Assume one controller per device */ } } } printk(KERN_INFO "init_ohci1394_dma: finished initializing OHCI DMA\n"); } /** * setup_init_ohci1394_early - enables early OHCI1394 DMA initialization */ static int __init setup_ohci1394_dma(char *opt) { if (!strcmp(opt, "early")) init_ohci1394_dma_early = 1; return 0; } /* passing ohci1394_dma=early on boot causes early OHCI1394 DMA initialization */ early_param("ohci1394_dma", setup_ohci1394_dma);
gpl-2.0
fschaefer/android-samsung-3.0-jb
sound/pci/emu10k1/emuproc.c
8838
21631
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Creative Labs, Inc. * Routines for control of EMU10K1 chips / proc interface routines * * Copyright (c) by James Courtier-Dutton <James@superbug.co.uk> * Added EMU 1010 support. * * BUGS: * -- * * TODO: * -- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/slab.h> #include <linux/init.h> #include <sound/core.h> #include <sound/emu10k1.h> #include "p16v.h" #ifdef CONFIG_PROC_FS static void snd_emu10k1_proc_spdif_status(struct snd_emu10k1 * emu, struct snd_info_buffer *buffer, char *title, int status_reg, int rate_reg) { static char *clkaccy[4] = { "1000ppm", "50ppm", "variable", "unknown" }; static int samplerate[16] = { 44100, 1, 48000, 32000, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; static char *channel[16] = { "unspec", "left", "right", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15" }; static char *emphasis[8] = { "none", "50/15 usec 2 channel", "2", "3", "4", "5", "6", "7" }; unsigned int status, rate = 0; status = snd_emu10k1_ptr_read(emu, status_reg, 0); snd_iprintf(buffer, "\n%s\n", title); if (status != 0xffffffff) { snd_iprintf(buffer, "Professional Mode : %s\n", (status & SPCS_PROFESSIONAL) ? "yes" : "no"); snd_iprintf(buffer, "Not Audio Data : %s\n", (status & SPCS_NOTAUDIODATA) ? "yes" : "no"); snd_iprintf(buffer, "Copyright : %s\n", (status & SPCS_COPYRIGHT) ? "yes" : "no"); snd_iprintf(buffer, "Emphasis : %s\n", emphasis[(status & SPCS_EMPHASISMASK) >> 3]); snd_iprintf(buffer, "Mode : %i\n", (status & SPCS_MODEMASK) >> 6); snd_iprintf(buffer, "Category Code : 0x%x\n", (status & SPCS_CATEGORYCODEMASK) >> 8); snd_iprintf(buffer, "Generation Status : %s\n", status & SPCS_GENERATIONSTATUS ? "original" : "copy"); snd_iprintf(buffer, "Source Mask : %i\n", (status & SPCS_SOURCENUMMASK) >> 16); snd_iprintf(buffer, "Channel Number : %s\n", channel[(status & SPCS_CHANNELNUMMASK) >> 20]); snd_iprintf(buffer, "Sample Rate : %iHz\n", samplerate[(status & SPCS_SAMPLERATEMASK) >> 24]); snd_iprintf(buffer, "Clock Accuracy : %s\n", clkaccy[(status & SPCS_CLKACCYMASK) >> 28]); if (rate_reg > 0) { rate = snd_emu10k1_ptr_read(emu, rate_reg, 0); snd_iprintf(buffer, "S/PDIF Valid : %s\n", rate & SRCS_SPDIFVALID ? "on" : "off"); snd_iprintf(buffer, "S/PDIF Locked : %s\n", rate & SRCS_SPDIFLOCKED ? "on" : "off"); snd_iprintf(buffer, "Rate Locked : %s\n", rate & SRCS_RATELOCKED ? "on" : "off"); /* From ((Rate * 48000 ) / 262144); */ snd_iprintf(buffer, "Estimated Sample Rate : %d\n", ((rate & 0xFFFFF ) * 375) >> 11); } } else { snd_iprintf(buffer, "No signal detected.\n"); } } static void snd_emu10k1_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { /* FIXME - output names are in emufx.c too */ static char *creative_outs[32] = { /* 00 */ "AC97 Left", /* 01 */ "AC97 Right", /* 02 */ "Optical IEC958 Left", /* 03 */ "Optical IEC958 Right", /* 04 */ "Center", /* 05 */ "LFE", /* 06 */ "Headphone Left", /* 07 */ "Headphone Right", /* 08 */ "Surround Left", /* 09 */ "Surround Right", /* 10 */ "PCM Capture Left", /* 11 */ "PCM Capture Right", /* 12 */ "MIC Capture", /* 13 */ "AC97 Surround Left", /* 14 */ "AC97 Surround Right", /* 15 */ "???", /* 16 */ "???", /* 17 */ "Analog Center", /* 18 */ "Analog LFE", /* 19 */ "???", /* 20 */ "???", /* 21 */ "???", /* 22 */ "???", /* 23 */ "???", /* 24 */ "???", /* 25 */ "???", /* 26 */ "???", /* 27 */ "???", /* 28 */ "???", /* 29 */ "???", /* 30 */ "???", /* 31 */ "???" }; static char *audigy_outs[64] = { /* 00 */ "Digital Front Left", /* 01 */ "Digital Front Right", /* 02 */ "Digital Center", /* 03 */ "Digital LEF", /* 04 */ "Headphone Left", /* 05 */ "Headphone Right", /* 06 */ "Digital Rear Left", /* 07 */ "Digital Rear Right", /* 08 */ "Front Left", /* 09 */ "Front Right", /* 10 */ "Center", /* 11 */ "LFE", /* 12 */ "???", /* 13 */ "???", /* 14 */ "Rear Left", /* 15 */ "Rear Right", /* 16 */ "AC97 Front Left", /* 17 */ "AC97 Front Right", /* 18 */ "ADC Caputre Left", /* 19 */ "ADC Capture Right", /* 20 */ "???", /* 21 */ "???", /* 22 */ "???", /* 23 */ "???", /* 24 */ "???", /* 25 */ "???", /* 26 */ "???", /* 27 */ "???", /* 28 */ "???", /* 29 */ "???", /* 30 */ "???", /* 31 */ "???", /* 32 */ "FXBUS2_0", /* 33 */ "FXBUS2_1", /* 34 */ "FXBUS2_2", /* 35 */ "FXBUS2_3", /* 36 */ "FXBUS2_4", /* 37 */ "FXBUS2_5", /* 38 */ "FXBUS2_6", /* 39 */ "FXBUS2_7", /* 40 */ "FXBUS2_8", /* 41 */ "FXBUS2_9", /* 42 */ "FXBUS2_10", /* 43 */ "FXBUS2_11", /* 44 */ "FXBUS2_12", /* 45 */ "FXBUS2_13", /* 46 */ "FXBUS2_14", /* 47 */ "FXBUS2_15", /* 48 */ "FXBUS2_16", /* 49 */ "FXBUS2_17", /* 50 */ "FXBUS2_18", /* 51 */ "FXBUS2_19", /* 52 */ "FXBUS2_20", /* 53 */ "FXBUS2_21", /* 54 */ "FXBUS2_22", /* 55 */ "FXBUS2_23", /* 56 */ "FXBUS2_24", /* 57 */ "FXBUS2_25", /* 58 */ "FXBUS2_26", /* 59 */ "FXBUS2_27", /* 60 */ "FXBUS2_28", /* 61 */ "FXBUS2_29", /* 62 */ "FXBUS2_30", /* 63 */ "FXBUS2_31" }; struct snd_emu10k1 *emu = entry->private_data; unsigned int val, val1; int nefx = emu->audigy ? 64 : 32; char **outputs = emu->audigy ? audigy_outs : creative_outs; int idx; snd_iprintf(buffer, "EMU10K1\n\n"); snd_iprintf(buffer, "Card : %s\n", emu->audigy ? "Audigy" : (emu->card_capabilities->ecard ? "EMU APS" : "Creative")); snd_iprintf(buffer, "Internal TRAM (words) : 0x%x\n", emu->fx8010.itram_size); snd_iprintf(buffer, "External TRAM (words) : 0x%x\n", (int)emu->fx8010.etram_pages.bytes / 2); snd_iprintf(buffer, "\n"); snd_iprintf(buffer, "Effect Send Routing :\n"); for (idx = 0; idx < NUM_G; idx++) { val = emu->audigy ? snd_emu10k1_ptr_read(emu, A_FXRT1, idx) : snd_emu10k1_ptr_read(emu, FXRT, idx); val1 = emu->audigy ? snd_emu10k1_ptr_read(emu, A_FXRT2, idx) : 0; if (emu->audigy) { snd_iprintf(buffer, "Ch%i: A=%i, B=%i, C=%i, D=%i, ", idx, val & 0x3f, (val >> 8) & 0x3f, (val >> 16) & 0x3f, (val >> 24) & 0x3f); snd_iprintf(buffer, "E=%i, F=%i, G=%i, H=%i\n", val1 & 0x3f, (val1 >> 8) & 0x3f, (val1 >> 16) & 0x3f, (val1 >> 24) & 0x3f); } else { snd_iprintf(buffer, "Ch%i: A=%i, B=%i, C=%i, D=%i\n", idx, (val >> 16) & 0x0f, (val >> 20) & 0x0f, (val >> 24) & 0x0f, (val >> 28) & 0x0f); } } snd_iprintf(buffer, "\nCaptured FX Outputs :\n"); for (idx = 0; idx < nefx; idx++) { if (emu->efx_voices_mask[idx/32] & (1 << (idx%32))) snd_iprintf(buffer, " Output %02i [%s]\n", idx, outputs[idx]); } snd_iprintf(buffer, "\nAll FX Outputs :\n"); for (idx = 0; idx < (emu->audigy ? 64 : 32); idx++) snd_iprintf(buffer, " Output %02i [%s]\n", idx, outputs[idx]); } static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_emu10k1 *emu = entry->private_data; u32 value; u32 value2; unsigned long flags; u32 rate; if (emu->card_capabilities->emu_model) { spin_lock_irqsave(&emu->emu_lock, flags); snd_emu1010_fpga_read(emu, 0x38, &value); spin_unlock_irqrestore(&emu->emu_lock, flags); if ((value & 0x1) == 0) { spin_lock_irqsave(&emu->emu_lock, flags); snd_emu1010_fpga_read(emu, 0x2a, &value); snd_emu1010_fpga_read(emu, 0x2b, &value2); spin_unlock_irqrestore(&emu->emu_lock, flags); rate = 0x1770000 / (((value << 5) | value2)+1); snd_iprintf(buffer, "ADAT Locked : %u\n", rate); } else { snd_iprintf(buffer, "ADAT Unlocked\n"); } spin_lock_irqsave(&emu->emu_lock, flags); snd_emu1010_fpga_read(emu, 0x20, &value); spin_unlock_irqrestore(&emu->emu_lock, flags); if ((value & 0x4) == 0) { spin_lock_irqsave(&emu->emu_lock, flags); snd_emu1010_fpga_read(emu, 0x28, &value); snd_emu1010_fpga_read(emu, 0x29, &value2); spin_unlock_irqrestore(&emu->emu_lock, flags); rate = 0x1770000 / (((value << 5) | value2)+1); snd_iprintf(buffer, "SPDIF Locked : %d\n", rate); } else { snd_iprintf(buffer, "SPDIF Unlocked\n"); } } else { snd_emu10k1_proc_spdif_status(emu, buffer, "CD-ROM S/PDIF In", CDCS, CDSRCS); snd_emu10k1_proc_spdif_status(emu, buffer, "Optical or Coax S/PDIF In", GPSCS, GPSRCS); } #if 0 val = snd_emu10k1_ptr_read(emu, ZVSRCS, 0); snd_iprintf(buffer, "\nZoomed Video\n"); snd_iprintf(buffer, "Rate Locked : %s\n", val & SRCS_RATELOCKED ? "on" : "off"); snd_iprintf(buffer, "Estimated Sample Rate : 0x%x\n", val & SRCS_ESTSAMPLERATE); #endif } static void snd_emu10k1_proc_rates_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { static int samplerate[8] = { 44100, 48000, 96000, 192000, 4, 5, 6, 7 }; struct snd_emu10k1 *emu = entry->private_data; unsigned int val, tmp, n; val = snd_emu10k1_ptr20_read(emu, CAPTURE_RATE_STATUS, 0); tmp = (val >> 16) & 0x8; for (n = 0; n < 4; n++) { tmp = val >> (16 + (n*4)); if (tmp & 0x8) snd_iprintf(buffer, "Channel %d: Rate=%d\n", n, samplerate[tmp & 0x7]); else snd_iprintf(buffer, "Channel %d: No input\n", n); } } static void snd_emu10k1_proc_acode_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { u32 pc; struct snd_emu10k1 *emu = entry->private_data; snd_iprintf(buffer, "FX8010 Instruction List '%s'\n", emu->fx8010.name); snd_iprintf(buffer, " Code dump :\n"); for (pc = 0; pc < (emu->audigy ? 1024 : 512); pc++) { u32 low, high; low = snd_emu10k1_efx_read(emu, pc * 2); high = snd_emu10k1_efx_read(emu, pc * 2 + 1); if (emu->audigy) snd_iprintf(buffer, " OP(0x%02x, 0x%03x, 0x%03x, 0x%03x, 0x%03x) /* 0x%04x: 0x%08x%08x */\n", (high >> 24) & 0x0f, (high >> 12) & 0x7ff, (high >> 0) & 0x7ff, (low >> 12) & 0x7ff, (low >> 0) & 0x7ff, pc, high, low); else snd_iprintf(buffer, " OP(0x%02x, 0x%03x, 0x%03x, 0x%03x, 0x%03x) /* 0x%04x: 0x%08x%08x */\n", (high >> 20) & 0x0f, (high >> 10) & 0x3ff, (high >> 0) & 0x3ff, (low >> 10) & 0x3ff, (low >> 0) & 0x3ff, pc, high, low); } } #define TOTAL_SIZE_GPR (0x100*4) #define A_TOTAL_SIZE_GPR (0x200*4) #define TOTAL_SIZE_TANKMEM_DATA (0xa0*4) #define TOTAL_SIZE_TANKMEM_ADDR (0xa0*4) #define A_TOTAL_SIZE_TANKMEM_DATA (0x100*4) #define A_TOTAL_SIZE_TANKMEM_ADDR (0x100*4) #define TOTAL_SIZE_CODE (0x200*8) #define A_TOTAL_SIZE_CODE (0x400*8) static ssize_t snd_emu10k1_fx8010_read(struct snd_info_entry *entry, void *file_private_data, struct file *file, char __user *buf, size_t count, loff_t pos) { struct snd_emu10k1 *emu = entry->private_data; unsigned int offset; int tram_addr = 0; unsigned int *tmp; long res; unsigned int idx; if (!strcmp(entry->name, "fx8010_tram_addr")) { offset = TANKMEMADDRREGBASE; tram_addr = 1; } else if (!strcmp(entry->name, "fx8010_tram_data")) { offset = TANKMEMDATAREGBASE; } else if (!strcmp(entry->name, "fx8010_code")) { offset = emu->audigy ? A_MICROCODEBASE : MICROCODEBASE; } else { offset = emu->audigy ? A_FXGPREGBASE : FXGPREGBASE; } tmp = kmalloc(count + 8, GFP_KERNEL); if (!tmp) return -ENOMEM; for (idx = 0; idx < ((pos & 3) + count + 3) >> 2; idx++) { unsigned int val; val = snd_emu10k1_ptr_read(emu, offset + idx + (pos >> 2), 0); if (tram_addr && emu->audigy) { val >>= 11; val |= snd_emu10k1_ptr_read(emu, 0x100 + idx + (pos >> 2), 0) << 20; } tmp[idx] = val; } if (copy_to_user(buf, ((char *)tmp) + (pos & 3), count)) res = -EFAULT; else res = count; kfree(tmp); return res; } static void snd_emu10k1_proc_voices_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_emu10k1 *emu = entry->private_data; struct snd_emu10k1_voice *voice; int idx; snd_iprintf(buffer, "ch\tuse\tpcm\tefx\tsynth\tmidi\n"); for (idx = 0; idx < NUM_G; idx++) { voice = &emu->voices[idx]; snd_iprintf(buffer, "%i\t%i\t%i\t%i\t%i\t%i\n", idx, voice->use, voice->pcm, voice->efx, voice->synth, voice->midi); } } #ifdef CONFIG_SND_DEBUG static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_emu10k1 *emu = entry->private_data; u32 value; unsigned long flags; int i; snd_iprintf(buffer, "EMU1010 Registers:\n\n"); for(i = 0; i < 0x40; i+=1) { spin_lock_irqsave(&emu->emu_lock, flags); snd_emu1010_fpga_read(emu, i, &value); spin_unlock_irqrestore(&emu->emu_lock, flags); snd_iprintf(buffer, "%02X: %08X, %02X\n", i, value, (value >> 8) & 0x7f); } } static void snd_emu_proc_io_reg_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_emu10k1 *emu = entry->private_data; unsigned long value; unsigned long flags; int i; snd_iprintf(buffer, "IO Registers:\n\n"); for(i = 0; i < 0x40; i+=4) { spin_lock_irqsave(&emu->emu_lock, flags); value = inl(emu->port + i); spin_unlock_irqrestore(&emu->emu_lock, flags); snd_iprintf(buffer, "%02X: %08lX\n", i, value); } } static void snd_emu_proc_io_reg_write(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_emu10k1 *emu = entry->private_data; unsigned long flags; char line[64]; u32 reg, val; while (!snd_info_get_line(buffer, line, sizeof(line))) { if (sscanf(line, "%x %x", &reg, &val) != 2) continue; if (reg < 0x40 && val <= 0xffffffff) { spin_lock_irqsave(&emu->emu_lock, flags); outl(val, emu->port + (reg & 0xfffffffc)); spin_unlock_irqrestore(&emu->emu_lock, flags); } } } static unsigned int snd_ptr_read(struct snd_emu10k1 * emu, unsigned int iobase, unsigned int reg, unsigned int chn) { unsigned long flags; unsigned int regptr, val; regptr = (reg << 16) | chn; spin_lock_irqsave(&emu->emu_lock, flags); outl(regptr, emu->port + iobase + PTR); val = inl(emu->port + iobase + DATA); spin_unlock_irqrestore(&emu->emu_lock, flags); return val; } static void snd_ptr_write(struct snd_emu10k1 *emu, unsigned int iobase, unsigned int reg, unsigned int chn, unsigned int data) { unsigned int regptr; unsigned long flags; regptr = (reg << 16) | chn; spin_lock_irqsave(&emu->emu_lock, flags); outl(regptr, emu->port + iobase + PTR); outl(data, emu->port + iobase + DATA); spin_unlock_irqrestore(&emu->emu_lock, flags); } static void snd_emu_proc_ptr_reg_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer, int iobase, int offset, int length, int voices) { struct snd_emu10k1 *emu = entry->private_data; unsigned long value; int i,j; if (offset+length > 0xa0) { snd_iprintf(buffer, "Input values out of range\n"); return; } snd_iprintf(buffer, "Registers 0x%x\n", iobase); for(i = offset; i < offset+length; i++) { snd_iprintf(buffer, "%02X: ",i); for (j = 0; j < voices; j++) { if(iobase == 0) value = snd_ptr_read(emu, 0, i, j); else value = snd_ptr_read(emu, 0x20, i, j); snd_iprintf(buffer, "%08lX ", value); } snd_iprintf(buffer, "\n"); } } static void snd_emu_proc_ptr_reg_write(struct snd_info_entry *entry, struct snd_info_buffer *buffer, int iobase) { struct snd_emu10k1 *emu = entry->private_data; char line[64]; unsigned int reg, channel_id , val; while (!snd_info_get_line(buffer, line, sizeof(line))) { if (sscanf(line, "%x %x %x", &reg, &channel_id, &val) != 3) continue; if (reg < 0xa0 && val <= 0xffffffff && channel_id <= 3) snd_ptr_write(emu, iobase, reg, channel_id, val); } } static void snd_emu_proc_ptr_reg_write00(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { snd_emu_proc_ptr_reg_write(entry, buffer, 0); } static void snd_emu_proc_ptr_reg_write20(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { snd_emu_proc_ptr_reg_write(entry, buffer, 0x20); } static void snd_emu_proc_ptr_reg_read00a(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { snd_emu_proc_ptr_reg_read(entry, buffer, 0, 0, 0x40, 64); } static void snd_emu_proc_ptr_reg_read00b(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { snd_emu_proc_ptr_reg_read(entry, buffer, 0, 0x40, 0x40, 64); } static void snd_emu_proc_ptr_reg_read20a(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { snd_emu_proc_ptr_reg_read(entry, buffer, 0x20, 0, 0x40, 4); } static void snd_emu_proc_ptr_reg_read20b(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { snd_emu_proc_ptr_reg_read(entry, buffer, 0x20, 0x40, 0x40, 4); } static void snd_emu_proc_ptr_reg_read20c(struct snd_info_entry *entry, struct snd_info_buffer * buffer) { snd_emu_proc_ptr_reg_read(entry, buffer, 0x20, 0x80, 0x20, 4); } #endif static struct snd_info_entry_ops snd_emu10k1_proc_ops_fx8010 = { .read = snd_emu10k1_fx8010_read, }; int __devinit snd_emu10k1_proc_init(struct snd_emu10k1 * emu) { struct snd_info_entry *entry; #ifdef CONFIG_SND_DEBUG if (emu->card_capabilities->emu_model) { if (! snd_card_proc_new(emu->card, "emu1010_regs", &entry)) snd_info_set_text_ops(entry, emu, snd_emu_proc_emu1010_reg_read); } if (! snd_card_proc_new(emu->card, "io_regs", &entry)) { snd_info_set_text_ops(entry, emu, snd_emu_proc_io_reg_read); entry->c.text.write = snd_emu_proc_io_reg_write; entry->mode |= S_IWUSR; } if (! snd_card_proc_new(emu->card, "ptr_regs00a", &entry)) { snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read00a); entry->c.text.write = snd_emu_proc_ptr_reg_write00; entry->mode |= S_IWUSR; } if (! snd_card_proc_new(emu->card, "ptr_regs00b", &entry)) { snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read00b); entry->c.text.write = snd_emu_proc_ptr_reg_write00; entry->mode |= S_IWUSR; } if (! snd_card_proc_new(emu->card, "ptr_regs20a", &entry)) { snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read20a); entry->c.text.write = snd_emu_proc_ptr_reg_write20; entry->mode |= S_IWUSR; } if (! snd_card_proc_new(emu->card, "ptr_regs20b", &entry)) { snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read20b); entry->c.text.write = snd_emu_proc_ptr_reg_write20; entry->mode |= S_IWUSR; } if (! snd_card_proc_new(emu->card, "ptr_regs20c", &entry)) { snd_info_set_text_ops(entry, emu, snd_emu_proc_ptr_reg_read20c); entry->c.text.write = snd_emu_proc_ptr_reg_write20; entry->mode |= S_IWUSR; } #endif if (! snd_card_proc_new(emu->card, "emu10k1", &entry)) snd_info_set_text_ops(entry, emu, snd_emu10k1_proc_read); if (emu->card_capabilities->emu10k2_chip) { if (! snd_card_proc_new(emu->card, "spdif-in", &entry)) snd_info_set_text_ops(entry, emu, snd_emu10k1_proc_spdif_read); } if (emu->card_capabilities->ca0151_chip) { if (! snd_card_proc_new(emu->card, "capture-rates", &entry)) snd_info_set_text_ops(entry, emu, snd_emu10k1_proc_rates_read); } if (! snd_card_proc_new(emu->card, "voices", &entry)) snd_info_set_text_ops(entry, emu, snd_emu10k1_proc_voices_read); if (! snd_card_proc_new(emu->card, "fx8010_gpr", &entry)) { entry->content = SNDRV_INFO_CONTENT_DATA; entry->private_data = emu; entry->mode = S_IFREG | S_IRUGO /*| S_IWUSR*/; entry->size = emu->audigy ? A_TOTAL_SIZE_GPR : TOTAL_SIZE_GPR; entry->c.ops = &snd_emu10k1_proc_ops_fx8010; } if (! snd_card_proc_new(emu->card, "fx8010_tram_data", &entry)) { entry->content = SNDRV_INFO_CONTENT_DATA; entry->private_data = emu; entry->mode = S_IFREG | S_IRUGO /*| S_IWUSR*/; entry->size = emu->audigy ? A_TOTAL_SIZE_TANKMEM_DATA : TOTAL_SIZE_TANKMEM_DATA ; entry->c.ops = &snd_emu10k1_proc_ops_fx8010; } if (! snd_card_proc_new(emu->card, "fx8010_tram_addr", &entry)) { entry->content = SNDRV_INFO_CONTENT_DATA; entry->private_data = emu; entry->mode = S_IFREG | S_IRUGO /*| S_IWUSR*/; entry->size = emu->audigy ? A_TOTAL_SIZE_TANKMEM_ADDR : TOTAL_SIZE_TANKMEM_ADDR ; entry->c.ops = &snd_emu10k1_proc_ops_fx8010; } if (! snd_card_proc_new(emu->card, "fx8010_code", &entry)) { entry->content = SNDRV_INFO_CONTENT_DATA; entry->private_data = emu; entry->mode = S_IFREG | S_IRUGO /*| S_IWUSR*/; entry->size = emu->audigy ? A_TOTAL_SIZE_CODE : TOTAL_SIZE_CODE; entry->c.ops = &snd_emu10k1_proc_ops_fx8010; } if (! snd_card_proc_new(emu->card, "fx8010_acode", &entry)) { entry->content = SNDRV_INFO_CONTENT_TEXT; entry->private_data = emu; entry->mode = S_IFREG | S_IRUGO /*| S_IWUSR*/; entry->c.text.read = snd_emu10k1_proc_acode_read; } return 0; } #endif /* CONFIG_PROC_FS */
gpl-2.0
ausdim/TW-jb-Edition-I9505-jfltexx
drivers/net/ethernet/chelsio/cxgb3/mc5.c
11654
13000
/* * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "common.h" #include "regs.h" enum { IDT75P52100 = 4, IDT75N43102 = 5 }; /* DBGI command mode */ enum { DBGI_MODE_MBUS = 0, DBGI_MODE_IDT52100 = 5 }; /* IDT 75P52100 commands */ #define IDT_CMD_READ 0 #define IDT_CMD_WRITE 1 #define IDT_CMD_SEARCH 2 #define IDT_CMD_LEARN 3 /* IDT LAR register address and value for 144-bit mode (low 32 bits) */ #define IDT_LAR_ADR0 0x180006 #define IDT_LAR_MODE144 0xffff0000 /* IDT SCR and SSR addresses (low 32 bits) */ #define IDT_SCR_ADR0 0x180000 #define IDT_SSR0_ADR0 0x180002 #define IDT_SSR1_ADR0 0x180004 /* IDT GMR base address (low 32 bits) */ #define IDT_GMR_BASE_ADR0 0x180020 /* IDT data and mask array base addresses (low 32 bits) */ #define IDT_DATARY_BASE_ADR0 0 #define IDT_MSKARY_BASE_ADR0 0x80000 /* IDT 75N43102 commands */ #define IDT4_CMD_SEARCH144 3 #define IDT4_CMD_WRITE 4 #define IDT4_CMD_READ 5 /* IDT 75N43102 SCR address (low 32 bits) */ #define IDT4_SCR_ADR0 0x3 /* IDT 75N43102 GMR base addresses (low 32 bits) */ #define IDT4_GMR_BASE0 0x10 #define IDT4_GMR_BASE1 0x20 #define IDT4_GMR_BASE2 0x30 /* IDT 75N43102 data and mask array base addresses (low 32 bits) */ #define IDT4_DATARY_BASE_ADR0 0x1000000 #define IDT4_MSKARY_BASE_ADR0 0x2000000 #define MAX_WRITE_ATTEMPTS 5 #define MAX_ROUTES 2048 /* * Issue a command to the TCAM and wait for its completion. The address and * any data required by the command must have been setup by the caller. */ static int mc5_cmd_write(struct adapter *adapter, u32 cmd) { t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd); return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS, F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1); } static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2, u32 v3) { t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1); t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2); t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3); } static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2, u32 v3) { t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1); t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2); t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3); } static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2, u32 *v3) { *v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0); *v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1); *v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2); } /* * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM * command cmd. The data to be written must have been set up by the caller. * Returns -1 on failure, 0 on success. */ static int mc5_write(struct adapter *adapter, u32 addr_lo, u32 cmd) { t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo); if (mc5_cmd_write(adapter, cmd) == 0) return 0; CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n", addr_lo); return -1; } static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base, u32 data_array_base, u32 write_cmd, int addr_shift) { unsigned int i; struct adapter *adap = mc5->adapter; /* * We need the size of the TCAM data and mask arrays in terms of * 72-bit entries. */ unsigned int size72 = mc5->tcam_size; unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX); if (mc5->mode == MC5_MODE_144_BIT) { size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */ server_base *= 2; } /* Clear the data array */ dbgi_wr_data3(adap, 0, 0, 0); for (i = 0; i < size72; i++) if (mc5_write(adap, data_array_base + (i << addr_shift), write_cmd)) return -1; /* Initialize the mask array. */ dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff); for (i = 0; i < size72; i++) { if (i == server_base) /* entering server or routing region */ t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0, mc5->mode == MC5_MODE_144_BIT ? 0xfffffff9 : 0xfffffffd); if (mc5_write(adap, mask_array_base + (i << addr_shift), write_cmd)) return -1; } return 0; } static int init_idt52100(struct mc5 *mc5) { int i; struct adapter *adap = mc5->adapter; t3_write_reg(adap, A_MC5_DB_RSP_LATENCY, V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15)); t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2); /* * Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and * GMRs 8-9 for ACK- and AOPEN searches. */ t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE); t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE); t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH); t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN); t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000); t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN); t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH); t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN); t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH); t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000); t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE); t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ); /* Set DBGI command mode for IDT TCAM. */ t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100); /* Set up LAR */ dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0); if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE)) goto err; /* Set up SSRs */ dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0); if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) || mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE)) goto err; /* Set up GMRs */ for (i = 0; i < 32; ++i) { if (i >= 12 && i < 15) dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff); else if (i == 15) dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff); else dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff); if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE)) goto err; } /* Set up SCR */ dbgi_wr_data3(adap, 1, 0, 0); if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE)) goto err; return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0, IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0); err: return -EIO; } static int init_idt43102(struct mc5 *mc5) { int i; struct adapter *adap = mc5->adapter; t3_write_reg(adap, A_MC5_DB_RSP_LATENCY, adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) : V_RDLAT(0xd) | V_SRCHLAT(0x12)); /* * Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask * for ACK- and AOPEN searches. */ t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE); t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE); t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800); t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144); t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800); t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800); t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800); t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE); t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ); t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3); /* Set DBGI command mode for IDT TCAM. */ t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100); /* Set up GMRs */ dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff); for (i = 0; i < 7; ++i) if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE)) goto err; for (i = 0; i < 4; ++i) if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE)) goto err; dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff); if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) || mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) || mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE)) goto err; dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff); if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE)) goto err; /* Set up SCR */ dbgi_wr_data3(adap, 0xf0000000, 0, 0); if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE)) goto err; return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0, IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1); err: return -EIO; } /* Put MC5 in DBGI mode. */ static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5) { t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG, V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN); } /* Put MC5 in M-Bus mode. */ static void mc5_dbgi_mode_disable(const struct mc5 *mc5) { t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG, V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | V_COMPEN(mc5->mode == MC5_MODE_72_BIT) | V_PRTYEN(mc5->parity_enabled) | F_MBUSEN); } /* * Initialization that requires the OS and protocol layers to already * be initialized goes here. */ int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters, unsigned int nroutes) { u32 cfg; int err; unsigned int tcam_size = mc5->tcam_size; struct adapter *adap = mc5->adapter; if (!tcam_size) return 0; if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size) return -EINVAL; /* Reset the TCAM */ cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE; cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST; t3_write_reg(adap, A_MC5_DB_CONFIG, cfg); if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) { CH_ERR(adap, "TCAM reset timed out\n"); return -1; } t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes); t3_write_reg(adap, A_MC5_DB_FILTER_TABLE, tcam_size - nroutes - nfilters); t3_write_reg(adap, A_MC5_DB_SERVER_INDEX, tcam_size - nroutes - nfilters - nservers); mc5->parity_enabled = 1; /* All the TCAM addresses we access have only the low 32 bits non 0 */ t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0); t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0); mc5_dbgi_mode_enable(mc5); switch (mc5->part_type) { case IDT75P52100: err = init_idt52100(mc5); break; case IDT75N43102: err = init_idt43102(mc5); break; default: CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type); err = -EINVAL; break; } mc5_dbgi_mode_disable(mc5); return err; } #define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR) /* * MC5 interrupt handler */ void t3_mc5_intr_handler(struct mc5 *mc5) { struct adapter *adap = mc5->adapter; u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE); if ((cause & F_PARITYERR) && mc5->parity_enabled) { CH_ALERT(adap, "MC5 parity error\n"); mc5->stats.parity_err++; } if (cause & F_REQQPARERR) { CH_ALERT(adap, "MC5 request queue parity error\n"); mc5->stats.reqq_parity_err++; } if (cause & F_DISPQPARERR) { CH_ALERT(adap, "MC5 dispatch queue parity error\n"); mc5->stats.dispq_parity_err++; } if (cause & F_ACTRGNFULL) mc5->stats.active_rgn_full++; if (cause & F_NFASRCHFAIL) mc5->stats.nfa_srch_err++; if (cause & F_UNKNOWNCMD) mc5->stats.unknown_cmd++; if (cause & F_DELACTEMPTY) mc5->stats.del_act_empty++; if (cause & MC5_INT_FATAL) t3_fatal_err(adap); t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause); } void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode) { #define K * 1024 static unsigned int tcam_part_size[] = { /* in K 72-bit entries */ 64 K, 128 K, 256 K, 32 K }; #undef K u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG); mc5->adapter = adapter; mc5->mode = (unsigned char)mode; mc5->part_type = (unsigned char)G_TMTYPE(cfg); if (cfg & F_TMTYPEHI) mc5->part_type |= 4; mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)]; if (mode == MC5_MODE_144_BIT) mc5->tcam_size /= 2; }
gpl-2.0
Metallium-Devices/android_kernel_lge_msm8974
lib/crc-t10dif.c
12422
2965
/* * T10 Data Integrity Field CRC16 calculation * * Copyright (c) 2007 Oracle Corporation. All rights reserved. * Written by Martin K. Petersen <martin.petersen@oracle.com> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/types.h> #include <linux/module.h> #include <linux/crc-t10dif.h> /* Table generated using the following polynomium: * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1 * gt: 0x8bb7 */ static const __u16 t10_dif_crc_table[256] = { 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 }; __u16 crc_t10dif(const unsigned char *buffer, size_t len) { __u16 crc = 0; unsigned int i; for (i = 0 ; i < len ; i++) crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff]; return crc; } EXPORT_SYMBOL(crc_t10dif); MODULE_DESCRIPTION("T10 DIF CRC calculation"); MODULE_LICENSE("GPL");
gpl-2.0
andi34/kernel_oneplus_msm8974
drivers/base/module.c
12934
1985
/* * module.c - module sysfs fun for drivers * * This file is released under the GPLv2 * */ #include <linux/device.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/string.h> #include "base.h" static char *make_driver_name(struct device_driver *drv) { char *driver_name; driver_name = kasprintf(GFP_KERNEL, "%s:%s", drv->bus->name, drv->name); if (!driver_name) return NULL; return driver_name; } static void module_create_drivers_dir(struct module_kobject *mk) { if (!mk || mk->drivers_dir) return; mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj); } void module_add_driver(struct module *mod, struct device_driver *drv) { char *driver_name; int no_warn; struct module_kobject *mk = NULL; if (!drv) return; if (mod) mk = &mod->mkobj; else if (drv->mod_name) { struct kobject *mkobj; /* Lookup built-in module entry in /sys/modules */ mkobj = kset_find_obj(module_kset, drv->mod_name); if (mkobj) { mk = container_of(mkobj, struct module_kobject, kobj); /* remember our module structure */ drv->p->mkobj = mk; /* kset_find_obj took a reference */ kobject_put(mkobj); } } if (!mk) return; /* Don't check return codes; these calls are idempotent */ no_warn = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module"); driver_name = make_driver_name(drv); if (driver_name) { module_create_drivers_dir(mk); no_warn = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, driver_name); kfree(driver_name); } } void module_remove_driver(struct device_driver *drv) { struct module_kobject *mk = NULL; char *driver_name; if (!drv) return; sysfs_remove_link(&drv->p->kobj, "module"); if (drv->owner) mk = &drv->owner->mkobj; else if (drv->p->mkobj) mk = drv->p->mkobj; if (mk && mk->drivers_dir) { driver_name = make_driver_name(drv); if (driver_name) { sysfs_remove_link(mk->drivers_dir, driver_name); kfree(driver_name); } } }
gpl-2.0
CyanHacker-Lollipop/kernel_google_msm
drivers/input/lid.c
135
5278
/* * ASUS Lid driver. */ #include <linux/module.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/input.h> #include <linux/platform_device.h> #include <linux/workqueue.h> #include <linux/gpio_event.h> #include <linux/gpio.h> #define LID_DEBUG 0 #define CONVERSION_TIME_MS 50 #if LID_DEBUG #define LID_INFO(format, arg...) \ pr_info("hall_sensor: [%s] " format , __func__ , ##arg) #else #define LID_INFO(format, arg...) do { } while (0) #endif #define LID_NOTICE(format, arg...) \ pr_notice("hall_sensor: [%s] " format , __func__ , ##arg) #define LID_ERR(format, arg...) \ pr_err("hall_sensor: [%s] " format , __func__ , ##arg) struct delayed_work lid_hall_sensor_work; /* * functions declaration */ static void lid_report_function(struct work_struct *dat); static int lid_input_device_create(void); static ssize_t show_lid_status(struct device *class, struct device_attribute *attr, char *buf); /* * global variable */ static unsigned int hall_sensor_gpio = 36; static int hall_sensor_irq; static struct workqueue_struct *lid_wq; static struct input_dev *lid_indev; static DEVICE_ATTR(lid_status, S_IWUSR | S_IRUGO, show_lid_status, NULL); /* Attribute Descriptor */ static struct attribute *lid_attrs[] = { &dev_attr_lid_status.attr, NULL }; /* Attribute group */ static struct attribute_group lid_attr_group = { .attrs = lid_attrs, }; static ssize_t show_lid_status(struct device *class, struct device_attribute *attr, char *buf) { char *s = buf; s += sprintf(buf, "%u\n", gpio_get_value(hall_sensor_gpio) ? 1 : 0); return s - buf; } static irqreturn_t lid_interrupt_handler(int irq, void *dev_id) { if (irq == hall_sensor_irq) { LID_NOTICE("LID interrupt handler...gpio: %d..\n", gpio_get_value(hall_sensor_gpio)); queue_delayed_work(lid_wq, &lid_hall_sensor_work, 0); } return IRQ_HANDLED; } static void lid_report_function(struct work_struct *dat) { int value = 0; if (!lid_indev) { LID_ERR("LID input device doesn't exist\n"); return; } msleep(CONVERSION_TIME_MS); value = gpio_get_value(hall_sensor_gpio) ? 1 : 0; input_report_switch(lid_indev, SW_LID, !value); input_sync(lid_indev); LID_NOTICE("SW_LID report value = %d\n", value); } static int lid_input_device_create(void){ int err = 0; lid_indev = input_allocate_device(); if (!lid_indev) { LID_ERR("lid_indev allocation fails\n"); err = -ENOMEM; goto exit; } lid_indev->name = "lid_input"; lid_indev->phys = "/dev/input/lid_indev"; set_bit(EV_SW, lid_indev->evbit); set_bit(SW_LID, lid_indev->swbit); err = input_register_device(lid_indev); if (err) { LID_ERR("lid_indev registration fails\n"); goto exit_input_free; } return 0; exit_input_free: input_free_device(lid_indev); lid_indev = NULL; exit: return err; } static int __init lid_driver_probe(struct platform_device *pdev) { int ret = 0, irq = 0; unsigned long irqflags; if (!pdev) return -EINVAL; pr_info("ASUSTek: %s", __func__); ret = sysfs_create_group(&pdev->dev.kobj, &lid_attr_group); if (ret) { LID_ERR("Unable to create sysfs, error: %d\n", ret); goto fail_sysfs; } ret = lid_input_device_create(); if (ret) { LID_ERR( "Unable to register input device, error: %d\n", ret); goto fail_create; } lid_wq = create_singlethread_workqueue("lid_wq"); if(!lid_wq){ LID_ERR("Unable to create workqueue\n"); goto fail_create; } if (!gpio_is_valid(hall_sensor_gpio)) { LID_ERR("Invalid GPIO %d\n", hall_sensor_gpio); goto fail_create; } ret = gpio_request(hall_sensor_gpio, "LID"); if (ret < 0) { LID_ERR("Failed to request GPIO %d\n", hall_sensor_gpio); goto fail_create; } ret = gpio_direction_input(hall_sensor_gpio); if (ret < 0) { LID_ERR( "Failed to configure direction for GPIO %d\n", hall_sensor_gpio); goto fail_free; } irq = gpio_to_irq(hall_sensor_gpio); hall_sensor_irq = irq; if (irq < 0) { LID_ERR("Unable to get irq number for GPIO %d\n", hall_sensor_gpio); goto fail_free; } irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; ret = request_any_context_irq(irq, lid_interrupt_handler, irqflags, "hall_sensor", lid_indev); if (ret < 0) { LID_ERR("Unable to claim irq %d\n", irq); goto fail_free; } device_init_wakeup(&pdev->dev, 1); enable_irq_wake(irq); INIT_DELAYED_WORK_DEFERRABLE(&lid_hall_sensor_work, lid_report_function); return ret; fail_free: gpio_free(hall_sensor_gpio); fail_create: sysfs_remove_group(&pdev->dev.kobj, &lid_attr_group); fail_sysfs: return ret; } static int __devexit lid_driver_remove(struct platform_device *pdev) { sysfs_remove_group(&pdev->dev.kobj, &lid_attr_group); free_irq(hall_sensor_irq, NULL); cancel_delayed_work_sync(&lid_hall_sensor_work); if (gpio_is_valid(hall_sensor_gpio)) gpio_free(hall_sensor_gpio); input_unregister_device(lid_indev); device_init_wakeup(&pdev->dev, 0); return 0; } static struct platform_driver asustek_lid_driver __refdata = { .probe = lid_driver_probe, .remove = __devexit_p(lid_driver_remove), .driver = { .name = "asustek_lid", .owner = THIS_MODULE, }, }; module_platform_driver(asustek_lid_driver); MODULE_DESCRIPTION("Hall Sensor Driver"); MODULE_LICENSE("GPL");
gpl-2.0
MrStaticVoid/hostap
src/crypto/md5.c
135
2834
/* * MD5 hash implementation and interface functions * Copyright (c) 2003-2005, Jouni Malinen <j@w1.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Alternatively, this software may be distributed under the terms of BSD * license. * * See README and COPYING for more details. */ #include "includes.h" #include "common.h" #include "md5.h" #include "crypto.h" /** * hmac_md5_vector - HMAC-MD5 over data vector (RFC 2104) * @key: Key for HMAC operations * @key_len: Length of the key in bytes * @num_elem: Number of elements in the data vector * @addr: Pointers to the data areas * @len: Lengths of the data blocks * @mac: Buffer for the hash (16 bytes) * Returns: 0 on success, -1 on failure */ int hmac_md5_vector(const u8 *key, size_t key_len, size_t num_elem, const u8 *addr[], const size_t *len, u8 *mac) { u8 k_pad[64]; /* padding - key XORd with ipad/opad */ u8 tk[16]; const u8 *_addr[6]; size_t i, _len[6]; if (num_elem > 5) { /* * Fixed limit on the number of fragments to avoid having to * allocate memory (which could fail). */ return -1; } /* if key is longer than 64 bytes reset it to key = MD5(key) */ if (key_len > 64) { if (md5_vector(1, &key, &key_len, tk)) return -1; key = tk; key_len = 16; } /* the HMAC_MD5 transform looks like: * * MD5(K XOR opad, MD5(K XOR ipad, text)) * * where K is an n byte key * ipad is the byte 0x36 repeated 64 times * opad is the byte 0x5c repeated 64 times * and text is the data being protected */ /* start out by storing key in ipad */ os_memset(k_pad, 0, sizeof(k_pad)); os_memcpy(k_pad, key, key_len); /* XOR key with ipad values */ for (i = 0; i < 64; i++) k_pad[i] ^= 0x36; /* perform inner MD5 */ _addr[0] = k_pad; _len[0] = 64; for (i = 0; i < num_elem; i++) { _addr[i + 1] = addr[i]; _len[i + 1] = len[i]; } if (md5_vector(1 + num_elem, _addr, _len, mac)) return -1; os_memset(k_pad, 0, sizeof(k_pad)); os_memcpy(k_pad, key, key_len); /* XOR key with opad values */ for (i = 0; i < 64; i++) k_pad[i] ^= 0x5c; /* perform outer MD5 */ _addr[0] = k_pad; _len[0] = 64; _addr[1] = mac; _len[1] = MD5_MAC_LEN; return md5_vector(2, _addr, _len, mac); } /** * hmac_md5 - HMAC-MD5 over data buffer (RFC 2104) * @key: Key for HMAC operations * @key_len: Length of the key in bytes * @data: Pointers to the data area * @data_len: Length of the data area * @mac: Buffer for the hash (16 bytes) * Returns: 0 on success, -1 on failure */ int hmac_md5(const u8 *key, size_t key_len, const u8 *data, size_t data_len, u8 *mac) { return hmac_md5_vector(key, key_len, 1, &data, &data_len, mac); }
gpl-2.0
nikez/android_external_wpa_supplicant_8
src/crypto/md4-internal.c
135
8278
/* * MD4 hash implementation * Copyright (c) 2006, Jouni Malinen <j@w1.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Alternatively, this software may be distributed under the terms of BSD * license. * * See README and COPYING for more details. */ #include "includes.h" #include "common.h" #include "crypto.h" #define MD4_BLOCK_LENGTH 64 #define MD4_DIGEST_LENGTH 16 typedef struct MD4Context { u32 state[4]; /* state */ u64 count; /* number of bits, mod 2^64 */ u8 buffer[MD4_BLOCK_LENGTH]; /* input buffer */ } MD4_CTX; static void MD4Init(MD4_CTX *ctx); static void MD4Update(MD4_CTX *ctx, const unsigned char *input, size_t len); static void MD4Final(unsigned char digest[MD4_DIGEST_LENGTH], MD4_CTX *ctx); int md4_vector(size_t num_elem, const u8 *addr[], const size_t *len, u8 *mac) { MD4_CTX ctx; size_t i; MD4Init(&ctx); for (i = 0; i < num_elem; i++) MD4Update(&ctx, addr[i], len[i]); MD4Final(mac, &ctx); return 0; } /* ===== start - public domain MD4 implementation ===== */ /* $OpenBSD: md4.c,v 1.7 2005/08/08 08:05:35 espie Exp $ */ /* * This code implements the MD4 message-digest algorithm. * The algorithm is due to Ron Rivest. This code was * written by Colin Plumb in 1993, no copyright is claimed. * This code is in the public domain; do with it what you wish. * Todd C. Miller modified the MD5 code to do MD4 based on RFC 1186. * * Equivalent code is available from RSA Data Security, Inc. * This code has been tested against that, and is equivalent, * except that you don't need to include two pages of legalese * with every copy. * * To compute the message digest of a chunk of bytes, declare an * MD4Context structure, pass it to MD4Init, call MD4Update as * needed on buffers full of bytes, and then call MD4Final, which * will fill a supplied 16-byte array with the digest. */ #define MD4_DIGEST_STRING_LENGTH (MD4_DIGEST_LENGTH * 2 + 1) static void MD4Transform(u32 state[4], const u8 block[MD4_BLOCK_LENGTH]); #define PUT_64BIT_LE(cp, value) do { \ (cp)[7] = (value) >> 56; \ (cp)[6] = (value) >> 48; \ (cp)[5] = (value) >> 40; \ (cp)[4] = (value) >> 32; \ (cp)[3] = (value) >> 24; \ (cp)[2] = (value) >> 16; \ (cp)[1] = (value) >> 8; \ (cp)[0] = (value); } while (0) #define PUT_32BIT_LE(cp, value) do { \ (cp)[3] = (value) >> 24; \ (cp)[2] = (value) >> 16; \ (cp)[1] = (value) >> 8; \ (cp)[0] = (value); } while (0) static u8 PADDING[MD4_BLOCK_LENGTH] = { 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; /* * Start MD4 accumulation. * Set bit count to 0 and buffer to mysterious initialization constants. */ static void MD4Init(MD4_CTX *ctx) { ctx->count = 0; ctx->state[0] = 0x67452301; ctx->state[1] = 0xefcdab89; ctx->state[2] = 0x98badcfe; ctx->state[3] = 0x10325476; } /* * Update context to reflect the concatenation of another buffer full * of bytes. */ static void MD4Update(MD4_CTX *ctx, const unsigned char *input, size_t len) { size_t have, need; /* Check how many bytes we already have and how many more we need. */ have = (size_t)((ctx->count >> 3) & (MD4_BLOCK_LENGTH - 1)); need = MD4_BLOCK_LENGTH - have; /* Update bitcount */ ctx->count += (u64)len << 3; if (len >= need) { if (have != 0) { os_memcpy(ctx->buffer + have, input, need); MD4Transform(ctx->state, ctx->buffer); input += need; len -= need; have = 0; } /* Process data in MD4_BLOCK_LENGTH-byte chunks. */ while (len >= MD4_BLOCK_LENGTH) { MD4Transform(ctx->state, input); input += MD4_BLOCK_LENGTH; len -= MD4_BLOCK_LENGTH; } } /* Handle any remaining bytes of data. */ if (len != 0) os_memcpy(ctx->buffer + have, input, len); } /* * Pad pad to 64-byte boundary with the bit pattern * 1 0* (64-bit count of bits processed, MSB-first) */ static void MD4Pad(MD4_CTX *ctx) { u8 count[8]; size_t padlen; /* Convert count to 8 bytes in little endian order. */ PUT_64BIT_LE(count, ctx->count); /* Pad out to 56 mod 64. */ padlen = MD4_BLOCK_LENGTH - ((ctx->count >> 3) & (MD4_BLOCK_LENGTH - 1)); if (padlen < 1 + 8) padlen += MD4_BLOCK_LENGTH; MD4Update(ctx, PADDING, padlen - 8); /* padlen - 8 <= 64 */ MD4Update(ctx, count, 8); } /* * Final wrapup--call MD4Pad, fill in digest and zero out ctx. */ static void MD4Final(unsigned char digest[MD4_DIGEST_LENGTH], MD4_CTX *ctx) { int i; MD4Pad(ctx); if (digest != NULL) { for (i = 0; i < 4; i++) PUT_32BIT_LE(digest + i * 4, ctx->state[i]); os_memset(ctx, 0, sizeof(*ctx)); } } /* The three core functions - F1 is optimized somewhat */ /* #define F1(x, y, z) (x & y | ~x & z) */ #define F1(x, y, z) (z ^ (x & (y ^ z))) #define F2(x, y, z) ((x & y) | (x & z) | (y & z)) #define F3(x, y, z) (x ^ y ^ z) /* This is the central step in the MD4 algorithm. */ #define MD4STEP(f, w, x, y, z, data, s) \ ( w += f(x, y, z) + data, w = w<<s | w>>(32-s) ) /* * The core of the MD4 algorithm, this alters an existing MD4 hash to * reflect the addition of 16 longwords of new data. MD4Update blocks * the data and converts bytes into longwords for this routine. */ static void MD4Transform(u32 state[4], const u8 block[MD4_BLOCK_LENGTH]) { u32 a, b, c, d, in[MD4_BLOCK_LENGTH / 4]; #if BYTE_ORDER == LITTLE_ENDIAN os_memcpy(in, block, sizeof(in)); #else for (a = 0; a < MD4_BLOCK_LENGTH / 4; a++) { in[a] = (u32)( (u32)(block[a * 4 + 0]) | (u32)(block[a * 4 + 1]) << 8 | (u32)(block[a * 4 + 2]) << 16 | (u32)(block[a * 4 + 3]) << 24); } #endif a = state[0]; b = state[1]; c = state[2]; d = state[3]; MD4STEP(F1, a, b, c, d, in[ 0], 3); MD4STEP(F1, d, a, b, c, in[ 1], 7); MD4STEP(F1, c, d, a, b, in[ 2], 11); MD4STEP(F1, b, c, d, a, in[ 3], 19); MD4STEP(F1, a, b, c, d, in[ 4], 3); MD4STEP(F1, d, a, b, c, in[ 5], 7); MD4STEP(F1, c, d, a, b, in[ 6], 11); MD4STEP(F1, b, c, d, a, in[ 7], 19); MD4STEP(F1, a, b, c, d, in[ 8], 3); MD4STEP(F1, d, a, b, c, in[ 9], 7); MD4STEP(F1, c, d, a, b, in[10], 11); MD4STEP(F1, b, c, d, a, in[11], 19); MD4STEP(F1, a, b, c, d, in[12], 3); MD4STEP(F1, d, a, b, c, in[13], 7); MD4STEP(F1, c, d, a, b, in[14], 11); MD4STEP(F1, b, c, d, a, in[15], 19); MD4STEP(F2, a, b, c, d, in[ 0] + 0x5a827999, 3); MD4STEP(F2, d, a, b, c, in[ 4] + 0x5a827999, 5); MD4STEP(F2, c, d, a, b, in[ 8] + 0x5a827999, 9); MD4STEP(F2, b, c, d, a, in[12] + 0x5a827999, 13); MD4STEP(F2, a, b, c, d, in[ 1] + 0x5a827999, 3); MD4STEP(F2, d, a, b, c, in[ 5] + 0x5a827999, 5); MD4STEP(F2, c, d, a, b, in[ 9] + 0x5a827999, 9); MD4STEP(F2, b, c, d, a, in[13] + 0x5a827999, 13); MD4STEP(F2, a, b, c, d, in[ 2] + 0x5a827999, 3); MD4STEP(F2, d, a, b, c, in[ 6] + 0x5a827999, 5); MD4STEP(F2, c, d, a, b, in[10] + 0x5a827999, 9); MD4STEP(F2, b, c, d, a, in[14] + 0x5a827999, 13); MD4STEP(F2, a, b, c, d, in[ 3] + 0x5a827999, 3); MD4STEP(F2, d, a, b, c, in[ 7] + 0x5a827999, 5); MD4STEP(F2, c, d, a, b, in[11] + 0x5a827999, 9); MD4STEP(F2, b, c, d, a, in[15] + 0x5a827999, 13); MD4STEP(F3, a, b, c, d, in[ 0] + 0x6ed9eba1, 3); MD4STEP(F3, d, a, b, c, in[ 8] + 0x6ed9eba1, 9); MD4STEP(F3, c, d, a, b, in[ 4] + 0x6ed9eba1, 11); MD4STEP(F3, b, c, d, a, in[12] + 0x6ed9eba1, 15); MD4STEP(F3, a, b, c, d, in[ 2] + 0x6ed9eba1, 3); MD4STEP(F3, d, a, b, c, in[10] + 0x6ed9eba1, 9); MD4STEP(F3, c, d, a, b, in[ 6] + 0x6ed9eba1, 11); MD4STEP(F3, b, c, d, a, in[14] + 0x6ed9eba1, 15); MD4STEP(F3, a, b, c, d, in[ 1] + 0x6ed9eba1, 3); MD4STEP(F3, d, a, b, c, in[ 9] + 0x6ed9eba1, 9); MD4STEP(F3, c, d, a, b, in[ 5] + 0x6ed9eba1, 11); MD4STEP(F3, b, c, d, a, in[13] + 0x6ed9eba1, 15); MD4STEP(F3, a, b, c, d, in[ 3] + 0x6ed9eba1, 3); MD4STEP(F3, d, a, b, c, in[11] + 0x6ed9eba1, 9); MD4STEP(F3, c, d, a, b, in[ 7] + 0x6ed9eba1, 11); MD4STEP(F3, b, c, d, a, in[15] + 0x6ed9eba1, 15); state[0] += a; state[1] += b; state[2] += c; state[3] += d; } /* ===== end - public domain MD4 implementation ===== */
gpl-2.0
jledet/linux-xlnx
net/bridge/netfilter/ebt_ip6.c
135
4431
/* * ebt_ip6 * * Authors: * Manohar Castelino <manohar.r.castelino@intel.com> * Kuo-Lang Tseng <kuo-lang.tseng@intel.com> * Jan Engelhardt <jengelh@medozas.de> * * Summary: * This is just a modification of the IPv4 code written by * Bart De Schuymer <bdschuym@pandora.be> * with the changes required to support IPv6 * * Jan, 2008 */ #include <linux/ipv6.h> #include <net/ipv6.h> #include <linux/in.h> #include <linux/module.h> #include <net/dsfield.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_ip6.h> union pkthdr { struct { __be16 src; __be16 dst; } tcpudphdr; struct { u8 type; u8 code; } icmphdr; }; static bool ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct ebt_ip6_info *info = par->matchinfo; const struct ipv6hdr *ih6; struct ipv6hdr _ip6h; const union pkthdr *pptr; union pkthdr _pkthdr; ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h); if (ih6 == NULL) return false; if (info->bitmask & EBT_IP6_TCLASS && FWINV(info->tclass != ipv6_get_dsfield(ih6), EBT_IP6_TCLASS)) return false; if ((info->bitmask & EBT_IP6_SOURCE && FWINV(ipv6_masked_addr_cmp(&ih6->saddr, &info->smsk, &info->saddr), EBT_IP6_SOURCE)) || (info->bitmask & EBT_IP6_DEST && FWINV(ipv6_masked_addr_cmp(&ih6->daddr, &info->dmsk, &info->daddr), EBT_IP6_DEST))) return false; if (info->bitmask & EBT_IP6_PROTO) { uint8_t nexthdr = ih6->nexthdr; __be16 frag_off; int offset_ph; offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr, &frag_off); if (offset_ph == -1) return false; if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO)) return false; if (!(info->bitmask & (EBT_IP6_DPORT | EBT_IP6_SPORT | EBT_IP6_ICMP6))) return true; /* min icmpv6 headersize is 4, so sizeof(_pkthdr) is ok. */ pptr = skb_header_pointer(skb, offset_ph, sizeof(_pkthdr), &_pkthdr); if (pptr == NULL) return false; if (info->bitmask & EBT_IP6_DPORT) { u16 dst = ntohs(pptr->tcpudphdr.dst); if (FWINV(dst < info->dport[0] || dst > info->dport[1], EBT_IP6_DPORT)) return false; } if (info->bitmask & EBT_IP6_SPORT) { u16 src = ntohs(pptr->tcpudphdr.src); if (FWINV(src < info->sport[0] || src > info->sport[1], EBT_IP6_SPORT)) return false; } if ((info->bitmask & EBT_IP6_ICMP6) && FWINV(pptr->icmphdr.type < info->icmpv6_type[0] || pptr->icmphdr.type > info->icmpv6_type[1] || pptr->icmphdr.code < info->icmpv6_code[0] || pptr->icmphdr.code > info->icmpv6_code[1], EBT_IP6_ICMP6)) return false; } return true; } static int ebt_ip6_mt_check(const struct xt_mtchk_param *par) { const struct ebt_entry *e = par->entryinfo; struct ebt_ip6_info *info = par->matchinfo; if (e->ethproto != htons(ETH_P_IPV6) || e->invflags & EBT_IPROTO) return -EINVAL; if (info->bitmask & ~EBT_IP6_MASK || info->invflags & ~EBT_IP6_MASK) return -EINVAL; if (info->bitmask & (EBT_IP6_DPORT | EBT_IP6_SPORT)) { if (info->invflags & EBT_IP6_PROTO) return -EINVAL; if (info->protocol != IPPROTO_TCP && info->protocol != IPPROTO_UDP && info->protocol != IPPROTO_UDPLITE && info->protocol != IPPROTO_SCTP && info->protocol != IPPROTO_DCCP) return -EINVAL; } if (info->bitmask & EBT_IP6_DPORT && info->dport[0] > info->dport[1]) return -EINVAL; if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1]) return -EINVAL; if (info->bitmask & EBT_IP6_ICMP6) { if ((info->invflags & EBT_IP6_PROTO) || info->protocol != IPPROTO_ICMPV6) return -EINVAL; if (info->icmpv6_type[0] > info->icmpv6_type[1] || info->icmpv6_code[0] > info->icmpv6_code[1]) return -EINVAL; } return 0; } static struct xt_match ebt_ip6_mt_reg __read_mostly = { .name = "ip6", .revision = 0, .family = NFPROTO_BRIDGE, .match = ebt_ip6_mt, .checkentry = ebt_ip6_mt_check, .matchsize = sizeof(struct ebt_ip6_info), .me = THIS_MODULE, }; static int __init ebt_ip6_init(void) { return xt_register_match(&ebt_ip6_mt_reg); } static void __exit ebt_ip6_fini(void) { xt_unregister_match(&ebt_ip6_mt_reg); } module_init(ebt_ip6_init); module_exit(ebt_ip6_fini); MODULE_DESCRIPTION("Ebtables: IPv6 protocol packet match"); MODULE_AUTHOR("Kuo-Lang Tseng <kuo-lang.tseng@intel.com>"); MODULE_LICENSE("GPL");
gpl-2.0
gilou811/Archos_OPENAOS_Kernel_ICS
arch/s390/kernel/setup.c
135
22474
/* * arch/s390/kernel/setup.c * * S390 version * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Hartmut Penner (hp@de.ibm.com), * Martin Schwidefsky (schwidefsky@de.ibm.com) * * Derived from "arch/i386/kernel/setup.c" * Copyright (C) 1995, Linus Torvalds */ /* * This file handles the architecture-dependent parts of initialization */ #define KMSG_COMPONENT "setup" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/errno.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/user.h> #include <linux/tty.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/initrd.h> #include <linux/bootmem.h> #include <linux/root_dev.h> #include <linux/console.h> #include <linux/kernel_stat.h> #include <linux/device.h> #include <linux/notifier.h> #include <linux/pfn.h> #include <linux/ctype.h> #include <linux/reboot.h> #include <linux/topology.h> #include <asm/ipl.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/smp.h> #include <asm/mmu_context.h> #include <asm/cpcmd.h> #include <asm/lowcore.h> #include <asm/irq.h> #include <asm/page.h> #include <asm/ptrace.h> #include <asm/sections.h> #include <asm/ebcdic.h> #include <asm/compat.h> #include <asm/kvm_virtio.h> long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | PSW_MASK_MCHECK | PSW_DEFAULT_KEY); long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | PSW_MASK_PSTATE | PSW_DEFAULT_KEY); /* * User copy operations. */ struct uaccess_ops uaccess; EXPORT_SYMBOL(uaccess); /* * Machine setup.. */ unsigned int console_mode = 0; unsigned int console_devno = -1; unsigned int console_irq = -1; unsigned long machine_flags; unsigned long elf_hwcap = 0; char elf_platform[ELF_PLATFORM_SIZE]; struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ int __initdata memory_end_set; unsigned long __initdata memory_end; /* * This is set up by the setup-routine at boot-time * for S390 need to find out, what we have to setup * using address 0x10400 ... */ #include <asm/setup.h> static struct resource code_resource = { .name = "Kernel code", .flags = IORESOURCE_BUSY | IORESOURCE_MEM, }; static struct resource data_resource = { .name = "Kernel data", .flags = IORESOURCE_BUSY | IORESOURCE_MEM, }; /* * cpu_init() initializes state that is per-CPU. */ void __cpuinit cpu_init(void) { int addr = hard_smp_processor_id(); /* * Store processor id in lowcore (used e.g. in timer_interrupt) */ get_cpu_id(&S390_lowcore.cpu_data.cpu_id); S390_lowcore.cpu_data.cpu_addr = addr; /* * Force FPU initialization: */ clear_thread_flag(TIF_USEDFPU); clear_used_math(); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; if (current->mm) BUG(); enter_lazy_tlb(&init_mm, current); } /* * condev= and conmode= setup parameter. */ static int __init condev_setup(char *str) { int vdev; vdev = simple_strtoul(str, &str, 0); if (vdev >= 0 && vdev < 65536) { console_devno = vdev; console_irq = -1; } return 1; } __setup("condev=", condev_setup); static int __init conmode_setup(char *str) { #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0) SET_CONSOLE_SCLP; #endif #if defined(CONFIG_TN3215_CONSOLE) if (strncmp(str, "3215", 5) == 0) SET_CONSOLE_3215; #endif #if defined(CONFIG_TN3270_CONSOLE) if (strncmp(str, "3270", 5) == 0) SET_CONSOLE_3270; #endif return 1; } __setup("conmode=", conmode_setup); static void __init conmode_default(void) { char query_buffer[1024]; char *ptr; if (MACHINE_IS_VM) { cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL); console_devno = simple_strtoul(query_buffer + 5, NULL, 16); ptr = strstr(query_buffer, "SUBCHANNEL ="); console_irq = simple_strtoul(ptr + 13, NULL, 16); cpcmd("QUERY TERM", query_buffer, 1024, NULL); ptr = strstr(query_buffer, "CONMODE"); /* * Set the conmode to 3215 so that the device recognition * will set the cu_type of the console to 3215. If the * conmode is 3270 and we don't set it back then both * 3215 and the 3270 driver will try to access the console * device (3215 as console and 3270 as normal tty). */ cpcmd("TERM CONMODE 3215", NULL, 0, NULL); if (ptr == NULL) { #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) SET_CONSOLE_SCLP; #endif return; } if (strncmp(ptr + 8, "3270", 4) == 0) { #if defined(CONFIG_TN3270_CONSOLE) SET_CONSOLE_3270; #elif defined(CONFIG_TN3215_CONSOLE) SET_CONSOLE_3215; #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) SET_CONSOLE_SCLP; #endif } else if (strncmp(ptr + 8, "3215", 4) == 0) { #if defined(CONFIG_TN3215_CONSOLE) SET_CONSOLE_3215; #elif defined(CONFIG_TN3270_CONSOLE) SET_CONSOLE_3270; #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) SET_CONSOLE_SCLP; #endif } } else { #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) SET_CONSOLE_SCLP; #endif } } #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) static void __init setup_zfcpdump(unsigned int console_devno) { static char str[41]; if (ipl_info.type != IPL_TYPE_FCP_DUMP) return; if (console_devno != -1) sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x", ipl_info.data.fcp.dev_id.devno, console_devno); else sprintf(str, " cio_ignore=all,!0.0.%04x", ipl_info.data.fcp.dev_id.devno); strcat(boot_command_line, str); console_loglevel = 2; } #else static inline void setup_zfcpdump(unsigned int console_devno) {} #endif /* CONFIG_ZFCPDUMP */ /* * Reboot, halt and power_off stubs. They just call _machine_restart, * _machine_halt or _machine_power_off. */ void machine_restart(char *command) { if ((!in_interrupt() && !in_atomic()) || oops_in_progress) /* * Only unblank the console if we are called in enabled * context or a bust_spinlocks cleared the way for us. */ console_unblank(); _machine_restart(command); } void machine_halt(void) { if (!in_interrupt() || oops_in_progress) /* * Only unblank the console if we are called in enabled * context or a bust_spinlocks cleared the way for us. */ console_unblank(); _machine_halt(); } void machine_power_off(void) { if (!in_interrupt() || oops_in_progress) /* * Only unblank the console if we are called in enabled * context or a bust_spinlocks cleared the way for us. */ console_unblank(); _machine_power_off(); } /* * Dummy power off function. */ void (*pm_power_off)(void) = machine_power_off; static int __init early_parse_mem(char *p) { memory_end = memparse(p, &p); memory_end_set = 1; return 0; } early_param("mem", early_parse_mem); #ifdef CONFIG_S390_SWITCH_AMODE #ifdef CONFIG_PGSTE unsigned int switch_amode = 1; #else unsigned int switch_amode = 0; #endif EXPORT_SYMBOL_GPL(switch_amode); static int set_amode_and_uaccess(unsigned long user_amode, unsigned long user32_amode) { psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | PSW_MASK_PSTATE | PSW_DEFAULT_KEY; #ifdef CONFIG_COMPAT psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | PSW_MASK_PSTATE | PSW_DEFAULT_KEY; psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode | PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | PSW32_MASK_PSTATE; #endif psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | PSW_MASK_MCHECK | PSW_DEFAULT_KEY; if (MACHINE_HAS_MVCOS) { memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); return 1; } else { memcpy(&uaccess, &uaccess_pt, sizeof(uaccess)); return 0; } } /* * Switch kernel/user addressing modes? */ static int __init early_parse_switch_amode(char *p) { switch_amode = 1; return 0; } early_param("switch_amode", early_parse_switch_amode); #else /* CONFIG_S390_SWITCH_AMODE */ static inline int set_amode_and_uaccess(unsigned long user_amode, unsigned long user32_amode) { return 0; } #endif /* CONFIG_S390_SWITCH_AMODE */ #ifdef CONFIG_S390_EXEC_PROTECT unsigned int s390_noexec = 0; EXPORT_SYMBOL_GPL(s390_noexec); /* * Enable execute protection? */ static int __init early_parse_noexec(char *p) { if (!strncmp(p, "off", 3)) return 0; switch_amode = 1; s390_noexec = 1; return 0; } early_param("noexec", early_parse_noexec); #endif /* CONFIG_S390_EXEC_PROTECT */ static void setup_addressing_mode(void) { if (s390_noexec) { if (set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY)) pr_info("Execute protection active, " "mvcos available\n"); else pr_info("Execute protection active, " "mvcos not available\n"); } else if (switch_amode) { if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) pr_info("Address spaces switched, " "mvcos available\n"); else pr_info("Address spaces switched, " "mvcos not available\n"); } #ifdef CONFIG_TRACE_IRQFLAGS sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; io_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; #endif } static void __init setup_lowcore(void) { struct _lowcore *lc; int lc_pages; /* * Setup lowcore for boot cpu */ lc_pages = sizeof(void *) == 8 ? 2 : 1; lc = (struct _lowcore *) __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0); memset(lc, 0, lc_pages * PAGE_SIZE); lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) restart_int_handler; if (switch_amode) lc->restart_psw.mask |= PSW_ASC_HOME; lc->external_new_psw.mask = psw_kernel_bits; lc->external_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) ext_int_handler; lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; lc->program_new_psw.mask = psw_kernel_bits; lc->program_new_psw.addr = PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; lc->mcck_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; lc->mcck_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; lc->io_new_psw.mask = psw_kernel_bits; lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; lc->ipl_device = S390_lowcore.ipl_device; lc->clock_comparator = -1ULL; lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; lc->async_stack = (unsigned long) __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; lc->panic_stack = (unsigned long) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; lc->current_task = (unsigned long) init_thread_union.thread_info.task; lc->thread_info = (unsigned long) &init_thread_union; #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { lc->extended_save_area_addr = (__u32) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); /* enable extended save area */ __ctl_set_bit(14, 29); } #else lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; #endif set_prefix((u32)(unsigned long) lc); } static void __init setup_resources(void) { struct resource *res, *sub_res; int i; code_resource.start = (unsigned long) &_text; code_resource.end = (unsigned long) &_etext - 1; data_resource.start = (unsigned long) &_etext; data_resource.end = (unsigned long) &_edata - 1; for (i = 0; i < MEMORY_CHUNKS; i++) { if (!memory_chunk[i].size) continue; res = alloc_bootmem_low(sizeof(struct resource)); res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; switch (memory_chunk[i].type) { case CHUNK_READ_WRITE: res->name = "System RAM"; break; case CHUNK_READ_ONLY: res->name = "System ROM"; res->flags |= IORESOURCE_READONLY; break; default: res->name = "reserved"; } res->start = memory_chunk[i].addr; res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; request_resource(&iomem_resource, res); if (code_resource.start >= res->start && code_resource.start <= res->end && code_resource.end > res->end) { sub_res = alloc_bootmem_low(sizeof(struct resource)); memcpy(sub_res, &code_resource, sizeof(struct resource)); sub_res->end = res->end; code_resource.start = res->end + 1; request_resource(res, sub_res); } if (code_resource.start >= res->start && code_resource.start <= res->end && code_resource.end <= res->end) request_resource(res, &code_resource); if (data_resource.start >= res->start && data_resource.start <= res->end && data_resource.end > res->end) { sub_res = alloc_bootmem_low(sizeof(struct resource)); memcpy(sub_res, &data_resource, sizeof(struct resource)); sub_res->end = res->end; data_resource.start = res->end + 1; request_resource(res, sub_res); } if (data_resource.start >= res->start && data_resource.start <= res->end && data_resource.end <= res->end) request_resource(res, &data_resource); } } unsigned long real_memory_size; EXPORT_SYMBOL_GPL(real_memory_size); static void __init setup_memory_end(void) { unsigned long memory_size; unsigned long max_mem; int i; #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) if (ipl_info.type == IPL_TYPE_FCP_DUMP) { memory_end = ZFCPDUMP_HSA_SIZE; memory_end_set = 1; } #endif memory_size = 0; memory_end &= PAGE_MASK; max_mem = memory_end ? min(VMEM_MAX_PHYS, memory_end) : VMEM_MAX_PHYS; memory_end = min(max_mem, memory_end); /* * Make sure all chunks are MAX_ORDER aligned so we don't need the * extra checks that HOLES_IN_ZONE would require. */ for (i = 0; i < MEMORY_CHUNKS; i++) { unsigned long start, end; struct mem_chunk *chunk; unsigned long align; chunk = &memory_chunk[i]; align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1); start = (chunk->addr + align - 1) & ~(align - 1); end = (chunk->addr + chunk->size) & ~(align - 1); if (start >= end) memset(chunk, 0, sizeof(*chunk)); else { chunk->addr = start; chunk->size = end - start; } } for (i = 0; i < MEMORY_CHUNKS; i++) { struct mem_chunk *chunk = &memory_chunk[i]; real_memory_size = max(real_memory_size, chunk->addr + chunk->size); if (chunk->addr >= max_mem) { memset(chunk, 0, sizeof(*chunk)); continue; } if (chunk->addr + chunk->size > max_mem) chunk->size = max_mem - chunk->addr; memory_size = max(memory_size, chunk->addr + chunk->size); } if (!memory_end) memory_end = memory_size; } static void __init setup_memory(void) { unsigned long bootmap_size; unsigned long start_pfn, end_pfn; int i; /* * partially used pages are not usable - thus * we are rounding upwards: */ start_pfn = PFN_UP(__pa(&_end)); end_pfn = max_pfn = PFN_DOWN(memory_end); #ifdef CONFIG_BLK_DEV_INITRD /* * Move the initrd in case the bitmap of the bootmem allocater * would overwrite it. */ if (INITRD_START && INITRD_SIZE) { unsigned long bmap_size; unsigned long start; bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1); bmap_size = PFN_PHYS(bmap_size); if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; if (start + INITRD_SIZE > memory_end) { pr_err("initrd extends beyond end of " "memory (0x%08lx > 0x%08lx) " "disabling initrd\n", start + INITRD_SIZE, memory_end); INITRD_START = INITRD_SIZE = 0; } else { pr_info("Moving initrd (0x%08lx -> " "0x%08lx, size: %ld)\n", INITRD_START, start, INITRD_SIZE); memmove((void *) start, (void *) INITRD_START, INITRD_SIZE); INITRD_START = start; } } } #endif /* * Initialize the boot-time allocator */ bootmap_size = init_bootmem(start_pfn, end_pfn); /* * Register RAM areas with the bootmem allocator. */ for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { unsigned long start_chunk, end_chunk, pfn; if (memory_chunk[i].type != CHUNK_READ_WRITE) continue; start_chunk = PFN_DOWN(memory_chunk[i].addr); end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); end_chunk = min(end_chunk, end_pfn); if (start_chunk >= end_chunk) continue; add_active_range(0, start_chunk, end_chunk); pfn = max(start_chunk, start_pfn); for (; pfn < end_chunk; pfn++) page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY); } psw_set_key(PAGE_DEFAULT_KEY); free_bootmem_with_active_regions(0, max_pfn); /* * Reserve memory used for lowcore/command line/kernel image. */ reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT); reserve_bootmem((unsigned long)_stext, PFN_PHYS(start_pfn) - (unsigned long)_stext, BOOTMEM_DEFAULT); /* * Reserve the bootmem bitmap itself as well. We do this in two * steps (first step was init_bootmem()) because this catches * the (very unlikely) case of us accidentally initializing the * bootmem allocator with an invalid RAM area. */ reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, BOOTMEM_DEFAULT); #ifdef CONFIG_BLK_DEV_INITRD if (INITRD_START && INITRD_SIZE) { if (INITRD_START + INITRD_SIZE <= memory_end) { reserve_bootmem(INITRD_START, INITRD_SIZE, BOOTMEM_DEFAULT); initrd_start = INITRD_START; initrd_end = initrd_start + INITRD_SIZE; } else { pr_err("initrd extends beyond end of " "memory (0x%08lx > 0x%08lx) " "disabling initrd\n", initrd_start + INITRD_SIZE, memory_end); initrd_start = initrd_end = 0; } } #endif } /* * Setup hardware capabilities. */ static void __init setup_hwcaps(void) { static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data; unsigned long long facility_list_extended; unsigned int facility_list; int i; facility_list = stfl(); /* * The store facility list bits numbers as found in the principles * of operation are numbered with bit 1UL<<31 as number 0 to * bit 1UL<<0 as number 31. * Bit 0: instructions named N3, "backported" to esa-mode * Bit 2: z/Architecture mode is active * Bit 7: the store-facility-list-extended facility is installed * Bit 17: the message-security assist is installed * Bit 19: the long-displacement facility is installed * Bit 21: the extended-immediate facility is installed * These get translated to: * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1, * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3, * HWCAP_S390_LDISP bit 4, and HWCAP_S390_EIMM bit 5. */ for (i = 0; i < 6; i++) if (facility_list & (1UL << (31 - stfl_bits[i]))) elf_hwcap |= 1UL << i; /* * Check for additional facilities with store-facility-list-extended. * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0 * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information * as stored by stfl, bits 32-xxx contain additional facilities. * How many facility words are stored depends on the number of * doublewords passed to the instruction. The additional facilites * are: * Bit 43: decimal floating point facility is installed * translated to: * HWCAP_S390_DFP bit 6. */ if ((elf_hwcap & (1UL << 2)) && __stfle(&facility_list_extended, 1) > 0) { if (facility_list_extended & (1ULL << (64 - 43))) elf_hwcap |= 1UL << 6; } if (MACHINE_HAS_HPAGE) elf_hwcap |= 1UL << 7; switch (cpuinfo->cpu_id.machine) { case 0x9672: #if !defined(CONFIG_64BIT) default: /* Use "g5" as default for 31 bit kernels. */ #endif strcpy(elf_platform, "g5"); break; case 0x2064: case 0x2066: #if defined(CONFIG_64BIT) default: /* Use "z900" as default for 64 bit kernels. */ #endif strcpy(elf_platform, "z900"); break; case 0x2084: case 0x2086: strcpy(elf_platform, "z990"); break; case 0x2094: case 0x2096: strcpy(elf_platform, "z9-109"); break; case 0x2097: case 0x2098: strcpy(elf_platform, "z10"); break; } } /* * Setup function called from init/main.c just after the banner * was printed. */ void __init setup_arch(char **cmdline_p) { /* set up preferred console */ add_preferred_console("ttyS", 0, NULL); /* * print what head.S has found out about the machine */ #ifndef CONFIG_64BIT if (MACHINE_IS_VM) pr_info("Linux is running as a z/VM " "guest operating system in 31-bit mode\n"); else pr_info("Linux is running natively in 31-bit mode\n"); if (MACHINE_HAS_IEEE) pr_info("The hardware system has IEEE compatible " "floating point units\n"); else pr_info("The hardware system has no IEEE compatible " "floating point units\n"); #else /* CONFIG_64BIT */ if (MACHINE_IS_VM) pr_info("Linux is running as a z/VM " "guest operating system in 64-bit mode\n"); else if (MACHINE_IS_KVM) { pr_info("Linux is running under KVM in 64-bit mode\n"); add_preferred_console("hvc", 0, NULL); s390_virtio_console_init(); } else pr_info("Linux is running natively in 64-bit mode\n"); #endif /* CONFIG_64BIT */ /* Have one command line that is parsed and saved in /proc/cmdline */ /* boot_command_line has been already set up in early.c */ *cmdline_p = boot_command_line; ROOT_DEV = Root_RAM0; init_mm.start_code = PAGE_OFFSET; init_mm.end_code = (unsigned long) &_etext; init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) &_end; if (MACHINE_HAS_MVCOS) memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess)); else memcpy(&uaccess, &uaccess_std, sizeof(uaccess)); parse_early_param(); setup_ipl(); setup_memory_end(); setup_addressing_mode(); setup_memory(); setup_resources(); setup_lowcore(); cpu_init(); __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; s390_init_cpu_topology(); /* * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). */ setup_hwcaps(); /* * Create kernel page tables and switch to virtual addressing. */ paging_init(); /* Setup default console */ conmode_default(); /* Setup zfcpdump support */ setup_zfcpdump(console_devno); }
gpl-2.0
davepmer/test-kernel
drivers/xen/xen-pciback/conf_space_header.c
391
8836
/* * PCI Backend - Handles the virtual fields in the configuration space headers. * * Author: Ryan Wilson <hap9@epoch.ncsc.mil> */ #include <linux/kernel.h> #include <linux/pci.h> #include "pciback.h" #include "conf_space.h" struct pci_bar_info { u32 val; u32 len_val; int which; }; #define DRV_NAME "xen-pciback" #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO)) #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) { int i; int ret; ret = xen_pcibk_read_config_word(dev, offset, value, data); if (!atomic_read(&dev->enable_cnt)) return ret; for (i = 0; i < PCI_ROM_RESOURCE; i++) { if (dev->resource[i].flags & IORESOURCE_IO) *value |= PCI_COMMAND_IO; if (dev->resource[i].flags & IORESOURCE_MEM) *value |= PCI_COMMAND_MEMORY; } return ret; } static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) { struct xen_pcibk_dev_data *dev_data; int err; dev_data = pci_get_drvdata(dev); if (!pci_is_enabled(dev) && is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable\n", pci_name(dev)); err = pci_enable_device(dev); if (err) return err; if (dev_data) dev_data->enable_intx = 1; } else if (pci_is_enabled(dev) && !is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: disable\n", pci_name(dev)); pci_disable_device(dev); if (dev_data) dev_data->enable_intx = 0; } if (!dev->is_busmaster && is_master_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: set bus master\n", pci_name(dev)); pci_set_master(dev); } if (value & PCI_COMMAND_INVALIDATE) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable memory-write-invalidate\n", pci_name(dev)); err = pci_set_mwi(dev); if (err) { printk(KERN_WARNING DRV_NAME ": %s: cannot enable " "memory-write-invalidate (%d)\n", pci_name(dev), err); value &= ~PCI_COMMAND_INVALIDATE; } } return pci_write_config_word(dev, offset, value); } static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data) { struct pci_bar_info *bar = data; if (unlikely(!bar)) { printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n", pci_name(dev)); return XEN_PCI_ERR_op_failed; } /* A write to obtain the length must happen as a 32-bit write. * This does not (yet) support writing individual bytes */ if (value == ~PCI_ROM_ADDRESS_ENABLE) bar->which = 1; else { u32 tmpval; pci_read_config_dword(dev, offset, &tmpval); if (tmpval != bar->val && value == bar->val) { /* Allow restoration of bar value. */ pci_write_config_dword(dev, offset, bar->val); } bar->which = 0; } /* Do we need to support enabling/disabling the rom address here? */ return 0; } /* For the BARs, only allow writes which write ~0 or * the correct resource information * (Needed for when the driver probes the resource usage) */ static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data) { struct pci_bar_info *bar = data; if (unlikely(!bar)) { printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n", pci_name(dev)); return XEN_PCI_ERR_op_failed; } /* A write to obtain the length must happen as a 32-bit write. * This does not (yet) support writing individual bytes */ if (value == ~0) bar->which = 1; else { u32 tmpval; pci_read_config_dword(dev, offset, &tmpval); if (tmpval != bar->val && value == bar->val) { /* Allow restoration of bar value. */ pci_write_config_dword(dev, offset, bar->val); } bar->which = 0; } return 0; } static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data) { struct pci_bar_info *bar = data; if (unlikely(!bar)) { printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n", pci_name(dev)); return XEN_PCI_ERR_op_failed; } *value = bar->which ? bar->len_val : bar->val; return 0; } static inline void read_dev_bar(struct pci_dev *dev, struct pci_bar_info *bar_info, int offset, u32 len_mask) { int pos; struct resource *res = dev->resource; if (offset == PCI_ROM_ADDRESS || offset == PCI_ROM_ADDRESS1) pos = PCI_ROM_RESOURCE; else { pos = (offset - PCI_BASE_ADDRESS_0) / 4; if (pos && ((res[pos - 1].flags & (PCI_BASE_ADDRESS_SPACE | PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == (PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64))) { bar_info->val = res[pos - 1].start >> 32; bar_info->len_val = res[pos - 1].end >> 32; return; } } bar_info->val = res[pos].start | (res[pos].flags & PCI_REGION_FLAG_MASK); bar_info->len_val = res[pos].end - res[pos].start + 1; } static void *bar_init(struct pci_dev *dev, int offset) { struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); if (!bar) return ERR_PTR(-ENOMEM); read_dev_bar(dev, bar, offset, ~0); bar->which = 0; return bar; } static void *rom_init(struct pci_dev *dev, int offset) { struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); if (!bar) return ERR_PTR(-ENOMEM); read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE); bar->which = 0; return bar; } static void bar_reset(struct pci_dev *dev, int offset, void *data) { struct pci_bar_info *bar = data; bar->which = 0; } static void bar_release(struct pci_dev *dev, int offset, void *data) { kfree(data); } static int xen_pcibk_read_vendor(struct pci_dev *dev, int offset, u16 *value, void *data) { *value = dev->vendor; return 0; } static int xen_pcibk_read_device(struct pci_dev *dev, int offset, u16 *value, void *data) { *value = dev->device; return 0; } static int interrupt_read(struct pci_dev *dev, int offset, u8 * value, void *data) { *value = (u8) dev->irq; return 0; } static int bist_write(struct pci_dev *dev, int offset, u8 value, void *data) { u8 cur_value; int err; err = pci_read_config_byte(dev, offset, &cur_value); if (err) goto out; if ((cur_value & ~PCI_BIST_START) == (value & ~PCI_BIST_START) || value == PCI_BIST_START) err = pci_write_config_byte(dev, offset, value); out: return err; } static const struct config_field header_common[] = { { .offset = PCI_VENDOR_ID, .size = 2, .u.w.read = xen_pcibk_read_vendor, }, { .offset = PCI_DEVICE_ID, .size = 2, .u.w.read = xen_pcibk_read_device, }, { .offset = PCI_COMMAND, .size = 2, .u.w.read = command_read, .u.w.write = command_write, }, { .offset = PCI_INTERRUPT_LINE, .size = 1, .u.b.read = interrupt_read, }, { .offset = PCI_INTERRUPT_PIN, .size = 1, .u.b.read = xen_pcibk_read_config_byte, }, { /* Any side effects of letting driver domain control cache line? */ .offset = PCI_CACHE_LINE_SIZE, .size = 1, .u.b.read = xen_pcibk_read_config_byte, .u.b.write = xen_pcibk_write_config_byte, }, { .offset = PCI_LATENCY_TIMER, .size = 1, .u.b.read = xen_pcibk_read_config_byte, }, { .offset = PCI_BIST, .size = 1, .u.b.read = xen_pcibk_read_config_byte, .u.b.write = bist_write, }, {} }; #define CFG_FIELD_BAR(reg_offset) \ { \ .offset = reg_offset, \ .size = 4, \ .init = bar_init, \ .reset = bar_reset, \ .release = bar_release, \ .u.dw.read = bar_read, \ .u.dw.write = bar_write, \ } #define CFG_FIELD_ROM(reg_offset) \ { \ .offset = reg_offset, \ .size = 4, \ .init = rom_init, \ .reset = bar_reset, \ .release = bar_release, \ .u.dw.read = bar_read, \ .u.dw.write = rom_write, \ } static const struct config_field header_0[] = { CFG_FIELD_BAR(PCI_BASE_ADDRESS_0), CFG_FIELD_BAR(PCI_BASE_ADDRESS_1), CFG_FIELD_BAR(PCI_BASE_ADDRESS_2), CFG_FIELD_BAR(PCI_BASE_ADDRESS_3), CFG_FIELD_BAR(PCI_BASE_ADDRESS_4), CFG_FIELD_BAR(PCI_BASE_ADDRESS_5), CFG_FIELD_ROM(PCI_ROM_ADDRESS), {} }; static const struct config_field header_1[] = { CFG_FIELD_BAR(PCI_BASE_ADDRESS_0), CFG_FIELD_BAR(PCI_BASE_ADDRESS_1), CFG_FIELD_ROM(PCI_ROM_ADDRESS1), {} }; int xen_pcibk_config_header_add_fields(struct pci_dev *dev) { int err; err = xen_pcibk_config_add_fields(dev, header_common); if (err) goto out; switch (dev->hdr_type) { case PCI_HEADER_TYPE_NORMAL: err = xen_pcibk_config_add_fields(dev, header_0); break; case PCI_HEADER_TYPE_BRIDGE: err = xen_pcibk_config_add_fields(dev, header_1); break; default: err = -EINVAL; printk(KERN_ERR DRV_NAME ": %s: Unsupported header type %d!\n", pci_name(dev), dev->hdr_type); break; } out: return err; }
gpl-2.0
ubuntustudio-kernel/ubuntu-precise-lowlatency
drivers/pcmcia/pxa2xx_mainstone.c
647
4393
/* * linux/drivers/pcmcia/pxa2xx_mainstone.c * * Mainstone PCMCIA specific routines. * * Created: May 12, 2004 * Author: Nicolas Pitre * Copyright: MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <pcmcia/ss.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <mach/pxa2xx-regs.h> #include <mach/mainstone.h> #include "soc_common.h" static struct pcmcia_irqs irqs[] = { { 0, MAINSTONE_S0_CD_IRQ, "PCMCIA0 CD" }, { 1, MAINSTONE_S1_CD_IRQ, "PCMCIA1 CD" }, { 0, MAINSTONE_S0_STSCHG_IRQ, "PCMCIA0 STSCHG" }, { 1, MAINSTONE_S1_STSCHG_IRQ, "PCMCIA1 STSCHG" }, }; static int mst_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { /* * Setup default state of GPIO outputs * before we enable them as outputs. */ skt->socket.pci_irq = (skt->nr == 0) ? MAINSTONE_S0_IRQ : MAINSTONE_S1_IRQ; return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); } static void mst_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) { soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); } static unsigned long mst_pcmcia_status[2]; static void mst_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { unsigned long status, flip; status = (skt->nr == 0) ? MST_PCMCIA0 : MST_PCMCIA1; flip = (status ^ mst_pcmcia_status[skt->nr]) & MST_PCMCIA_nSTSCHG_BVD1; /* * Workaround for STSCHG which can't be deasserted: * We therefore disable/enable corresponding IRQs * as needed to avoid IRQ locks. */ if (flip) { mst_pcmcia_status[skt->nr] = status; if (status & MST_PCMCIA_nSTSCHG_BVD1) enable_irq( (skt->nr == 0) ? MAINSTONE_S0_STSCHG_IRQ : MAINSTONE_S1_STSCHG_IRQ ); else disable_irq( (skt->nr == 0) ? MAINSTONE_S0_STSCHG_IRQ : MAINSTONE_S1_STSCHG_IRQ ); } state->detect = (status & MST_PCMCIA_nCD) ? 0 : 1; state->ready = (status & MST_PCMCIA_nIRQ) ? 1 : 0; state->bvd1 = (status & MST_PCMCIA_nSTSCHG_BVD1) ? 1 : 0; state->bvd2 = (status & MST_PCMCIA_nSPKR_BVD2) ? 1 : 0; state->vs_3v = (status & MST_PCMCIA_nVS1) ? 0 : 1; state->vs_Xv = (status & MST_PCMCIA_nVS2) ? 0 : 1; state->wrprot = 0; /* not available */ } static int mst_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { unsigned long power = 0; int ret = 0; switch (state->Vcc) { case 0: power |= MST_PCMCIA_PWR_VCC_0; break; case 33: power |= MST_PCMCIA_PWR_VCC_33; break; case 50: power |= MST_PCMCIA_PWR_VCC_50; break; default: printk(KERN_ERR "%s(): bad Vcc %u\n", __func__, state->Vcc); ret = -1; } switch (state->Vpp) { case 0: power |= MST_PCMCIA_PWR_VPP_0; break; case 120: power |= MST_PCMCIA_PWR_VPP_120; break; default: if(state->Vpp == state->Vcc) { power |= MST_PCMCIA_PWR_VPP_VCC; } else { printk(KERN_ERR "%s(): bad Vpp %u\n", __func__, state->Vpp); ret = -1; } } if (state->flags & SS_RESET) power |= MST_PCMCIA_RESET; switch (skt->nr) { case 0: MST_PCMCIA0 = power; break; case 1: MST_PCMCIA1 = power; break; default: ret = -1; } return ret; } static struct pcmcia_low_level mst_pcmcia_ops __initdata = { .owner = THIS_MODULE, .hw_init = mst_pcmcia_hw_init, .hw_shutdown = mst_pcmcia_hw_shutdown, .socket_state = mst_pcmcia_socket_state, .configure_socket = mst_pcmcia_configure_socket, .nr = 2, }; static struct platform_device *mst_pcmcia_device; static int __init mst_pcmcia_init(void) { int ret; if (!machine_is_mainstone()) return -ENODEV; mst_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); if (!mst_pcmcia_device) return -ENOMEM; ret = platform_device_add_data(mst_pcmcia_device, &mst_pcmcia_ops, sizeof(mst_pcmcia_ops)); if (ret == 0) ret = platform_device_add(mst_pcmcia_device); if (ret) platform_device_put(mst_pcmcia_device); return ret; } static void __exit mst_pcmcia_exit(void) { platform_device_unregister(mst_pcmcia_device); } fs_initcall(mst_pcmcia_init); module_exit(mst_pcmcia_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa2xx-pcmcia");
gpl-2.0
lgeek/linux-2.6.35.3-imx53
drivers/scsi/bfa/bfa_itnim.c
903
25580
/* * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <bfa.h> #include <bfa_fcpim.h> #include "bfa_fcpim_priv.h" BFA_TRC_FILE(HAL, ITNIM); #define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \ ((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))) #define bfa_fcpim_additn(__itnim) \ list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q) #define bfa_fcpim_delitn(__itnim) do { \ bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \ list_del(&(__itnim)->qe); \ bfa_assert(list_empty(&(__itnim)->io_q)); \ bfa_assert(list_empty(&(__itnim)->io_cleanup_q)); \ bfa_assert(list_empty(&(__itnim)->pending_q)); \ } while (0) #define bfa_itnim_online_cb(__itnim) do { \ if ((__itnim)->bfa->fcs) \ bfa_cb_itnim_online((__itnim)->ditn); \ else { \ bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ __bfa_cb_itnim_online, (__itnim)); \ } \ } while (0) #define bfa_itnim_offline_cb(__itnim) do { \ if ((__itnim)->bfa->fcs) \ bfa_cb_itnim_offline((__itnim)->ditn); \ else { \ bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ __bfa_cb_itnim_offline, (__itnim)); \ } \ } while (0) #define bfa_itnim_sler_cb(__itnim) do { \ if ((__itnim)->bfa->fcs) \ bfa_cb_itnim_sler((__itnim)->ditn); \ else { \ bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ __bfa_cb_itnim_sler, (__itnim)); \ } \ } while (0) /* * forward declarations */ static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim); static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim); static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim); static void bfa_itnim_cleanp_comp(void *itnim_cbarg); static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim); static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete); static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim); static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim); static void bfa_itnim_iotov(void *itnim_arg); static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim); static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim); static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim); /** * bfa_itnim_sm BFA itnim state machine */ enum bfa_itnim_event { BFA_ITNIM_SM_CREATE = 1, /* itnim is created */ BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */ BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */ BFA_ITNIM_SM_FWRSP = 4, /* firmware response */ BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */ BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */ BFA_ITNIM_SM_SLER = 7, /* second level error recovery */ BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */ BFA_ITNIM_SM_QRESUME = 9, /* queue space available */ }; static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); /** * Beginning/unallocated state - no events expected. */ static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_CREATE: bfa_sm_set_state(itnim, bfa_itnim_sm_created); itnim->is_online = BFA_FALSE; bfa_fcpim_additn(itnim); break; default: bfa_sm_fault(itnim->bfa, event); } } /** * Beginning state, only online event expected. */ static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_ONLINE: if (bfa_itnim_send_fwcreate(itnim)) bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); else bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); break; case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); bfa_fcpim_delitn(itnim); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); break; default: bfa_sm_fault(itnim->bfa, event); } } /** * Waiting for itnim create response from firmware. */ static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_FWRSP: bfa_sm_set_state(itnim, bfa_itnim_sm_online); itnim->is_online = BFA_TRUE; bfa_itnim_iotov_online(itnim); bfa_itnim_online_cb(itnim); break; case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending); break; case BFA_ITNIM_SM_OFFLINE: if (bfa_itnim_send_fwdelete(itnim)) bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); else bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); break; default: bfa_sm_fault(itnim->bfa, event); } } static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_QRESUME: bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); bfa_itnim_send_fwcreate(itnim); break; case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); bfa_reqq_wcancel(&itnim->reqq_wait); bfa_fcpim_delitn(itnim); break; case BFA_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_itnim_sm_offline); bfa_reqq_wcancel(&itnim->reqq_wait); bfa_itnim_offline_cb(itnim); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); bfa_reqq_wcancel(&itnim->reqq_wait); break; default: bfa_sm_fault(itnim->bfa, event); } } /** * Waiting for itnim create response from firmware, a delete is pending. */ static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_FWRSP: if (bfa_itnim_send_fwdelete(itnim)) bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); else bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); bfa_fcpim_delitn(itnim); break; default: bfa_sm_fault(itnim->bfa, event); } } /** * Online state - normal parking state. */ static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline); itnim->is_online = BFA_FALSE; bfa_itnim_iotov_start(itnim); bfa_itnim_cleanup(itnim); break; case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); itnim->is_online = BFA_FALSE; bfa_itnim_cleanup(itnim); break; case BFA_ITNIM_SM_SLER: bfa_sm_set_state(itnim, bfa_itnim_sm_sler); itnim->is_online = BFA_FALSE; bfa_itnim_iotov_start(itnim); bfa_itnim_sler_cb(itnim); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); itnim->is_online = BFA_FALSE; bfa_itnim_iotov_start(itnim); bfa_itnim_iocdisable_cleanup(itnim); break; default: bfa_sm_fault(itnim->bfa, event); } } /** * Second level error recovery need. */ static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline); bfa_itnim_cleanup(itnim); break; case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); bfa_itnim_cleanup(itnim); bfa_itnim_iotov_delete(itnim); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); bfa_itnim_iocdisable_cleanup(itnim); break; default: bfa_sm_fault(itnim->bfa, event); } } /** * Going offline. Waiting for active IO cleanup. */ static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_CLEANUP: if (bfa_itnim_send_fwdelete(itnim)) bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); else bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull); break; case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); bfa_itnim_iotov_delete(itnim); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); bfa_itnim_iocdisable_cleanup(itnim); bfa_itnim_offline_cb(itnim); break; case BFA_ITNIM_SM_SLER: break; default: bfa_sm_fault(itnim->bfa, event); } } /** * Deleting itnim. Waiting for active IO cleanup. */ static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_CLEANUP: if (bfa_itnim_send_fwdelete(itnim)) bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); else bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); bfa_itnim_iocdisable_cleanup(itnim); break; default: bfa_sm_fault(itnim->bfa, event); } } /** * Rport offline. Fimrware itnim is being deleted - awaiting f/w response. */ static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_FWRSP: bfa_sm_set_state(itnim, bfa_itnim_sm_offline); bfa_itnim_offline_cb(itnim); break; case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); bfa_itnim_offline_cb(itnim); break; default: bfa_sm_fault(itnim->bfa, event); } } static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_QRESUME: bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); bfa_itnim_send_fwdelete(itnim); break; case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); bfa_reqq_wcancel(&itnim->reqq_wait); bfa_itnim_offline_cb(itnim); break; default: bfa_sm_fault(itnim->bfa, event); } } /** * Offline state. */ static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); bfa_itnim_iotov_delete(itnim); bfa_fcpim_delitn(itnim); break; case BFA_ITNIM_SM_ONLINE: if (bfa_itnim_send_fwcreate(itnim)) bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); else bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); break; default: bfa_sm_fault(itnim->bfa, event); } } /** * IOC h/w failed state. */ static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); bfa_itnim_iotov_delete(itnim); bfa_fcpim_delitn(itnim); break; case BFA_ITNIM_SM_OFFLINE: bfa_itnim_offline_cb(itnim); break; case BFA_ITNIM_SM_ONLINE: if (bfa_itnim_send_fwcreate(itnim)) bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); else bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); break; case BFA_ITNIM_SM_HWFAIL: break; default: bfa_sm_fault(itnim->bfa, event); } } /** * Itnim is deleted, waiting for firmware response to delete. */ static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_FWRSP: case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); bfa_fcpim_delitn(itnim); break; default: bfa_sm_fault(itnim->bfa, event); } } static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) { bfa_trc(itnim->bfa, itnim->rport->rport_tag); bfa_trc(itnim->bfa, event); switch (event) { case BFA_ITNIM_SM_QRESUME: bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); bfa_itnim_send_fwdelete(itnim); break; case BFA_ITNIM_SM_HWFAIL: bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); bfa_reqq_wcancel(&itnim->reqq_wait); bfa_fcpim_delitn(itnim); break; default: bfa_sm_fault(itnim->bfa, event); } } /** * bfa_itnim_private */ /** * Initiate cleanup of all IOs on an IOC failure. */ static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim) { struct bfa_tskim_s *tskim; struct bfa_ioim_s *ioim; struct list_head *qe, *qen; list_for_each_safe(qe, qen, &itnim->tsk_q) { tskim = (struct bfa_tskim_s *) qe; bfa_tskim_iocdisable(tskim); } list_for_each_safe(qe, qen, &itnim->io_q) { ioim = (struct bfa_ioim_s *) qe; bfa_ioim_iocdisable(ioim); } /** * For IO request in pending queue, we pretend an early timeout. */ list_for_each_safe(qe, qen, &itnim->pending_q) { ioim = (struct bfa_ioim_s *) qe; bfa_ioim_tov(ioim); } list_for_each_safe(qe, qen, &itnim->io_cleanup_q) { ioim = (struct bfa_ioim_s *) qe; bfa_ioim_iocdisable(ioim); } } /** * IO cleanup completion */ static void bfa_itnim_cleanp_comp(void *itnim_cbarg) { struct bfa_itnim_s *itnim = itnim_cbarg; bfa_stats(itnim, cleanup_comps); bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP); } /** * Initiate cleanup of all IOs. */ static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim) { struct bfa_ioim_s *ioim; struct bfa_tskim_s *tskim; struct list_head *qe, *qen; bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim); list_for_each_safe(qe, qen, &itnim->io_q) { ioim = (struct bfa_ioim_s *) qe; /** * Move IO to a cleanup queue from active queue so that a later * TM will not pickup this IO. */ list_del(&ioim->qe); list_add_tail(&ioim->qe, &itnim->io_cleanup_q); bfa_wc_up(&itnim->wc); bfa_ioim_cleanup(ioim); } list_for_each_safe(qe, qen, &itnim->tsk_q) { tskim = (struct bfa_tskim_s *) qe; bfa_wc_up(&itnim->wc); bfa_tskim_cleanup(tskim); } bfa_wc_wait(&itnim->wc); } static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete) { struct bfa_itnim_s *itnim = cbarg; if (complete) bfa_cb_itnim_online(itnim->ditn); } static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete) { struct bfa_itnim_s *itnim = cbarg; if (complete) bfa_cb_itnim_offline(itnim->ditn); } static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete) { struct bfa_itnim_s *itnim = cbarg; if (complete) bfa_cb_itnim_sler(itnim->ditn); } /** * Call to resume any I/O requests waiting for room in request queue. */ static void bfa_itnim_qresume(void *cbarg) { struct bfa_itnim_s *itnim = cbarg; bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME); } /** * bfa_itnim_public */ void bfa_itnim_iodone(struct bfa_itnim_s *itnim) { bfa_wc_down(&itnim->wc); } void bfa_itnim_tskdone(struct bfa_itnim_s *itnim) { bfa_wc_down(&itnim->wc); } void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, u32 *dm_len) { /** * ITN memory */ *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s); } void bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) { struct bfa_s *bfa = fcpim->bfa; struct bfa_itnim_s *itnim; int i; INIT_LIST_HEAD(&fcpim->itnim_q); itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo); fcpim->itnim_arr = itnim; for (i = 0; i < fcpim->num_itnims; i++, itnim++) { bfa_os_memset(itnim, 0, sizeof(struct bfa_itnim_s)); itnim->bfa = bfa; itnim->fcpim = fcpim; itnim->reqq = BFA_REQQ_QOS_LO; itnim->rport = BFA_RPORT_FROM_TAG(bfa, i); itnim->iotov_active = BFA_FALSE; bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim); INIT_LIST_HEAD(&itnim->io_q); INIT_LIST_HEAD(&itnim->io_cleanup_q); INIT_LIST_HEAD(&itnim->pending_q); INIT_LIST_HEAD(&itnim->tsk_q); INIT_LIST_HEAD(&itnim->delay_comp_q); bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); } bfa_meminfo_kva(minfo) = (u8 *) itnim; } void bfa_itnim_iocdisable(struct bfa_itnim_s *itnim) { bfa_stats(itnim, ioc_disabled); bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL); } static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim) { struct bfi_itnim_create_req_s *m; itnim->msg_no++; /** * check for room in queue to send request now */ m = bfa_reqq_next(itnim->bfa, itnim->reqq); if (!m) { bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait); return BFA_FALSE; } bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ, bfa_lpuid(itnim->bfa)); m->fw_handle = itnim->rport->fw_handle; m->class = FC_CLASS_3; m->seq_rec = itnim->seq_rec; m->msg_no = itnim->msg_no; /** * queue I/O message to firmware */ bfa_reqq_produce(itnim->bfa, itnim->reqq); return BFA_TRUE; } static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim) { struct bfi_itnim_delete_req_s *m; /** * check for room in queue to send request now */ m = bfa_reqq_next(itnim->bfa, itnim->reqq); if (!m) { bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait); return BFA_FALSE; } bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ, bfa_lpuid(itnim->bfa)); m->fw_handle = itnim->rport->fw_handle; /** * queue I/O message to firmware */ bfa_reqq_produce(itnim->bfa, itnim->reqq); return BFA_TRUE; } /** * Cleanup all pending failed inflight requests. */ static void bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov) { struct bfa_ioim_s *ioim; struct list_head *qe, *qen; list_for_each_safe(qe, qen, &itnim->delay_comp_q) { ioim = (struct bfa_ioim_s *)qe; bfa_ioim_delayed_comp(ioim, iotov); } } /** * Start all pending IO requests. */ static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim) { struct bfa_ioim_s *ioim; bfa_itnim_iotov_stop(itnim); /** * Abort all inflight IO requests in the queue */ bfa_itnim_delayed_comp(itnim, BFA_FALSE); /** * Start all pending IO requests. */ while (!list_empty(&itnim->pending_q)) { bfa_q_deq(&itnim->pending_q, &ioim); list_add_tail(&ioim->qe, &itnim->io_q); bfa_ioim_start(ioim); } } /** * Fail all pending IO requests */ static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim) { struct bfa_ioim_s *ioim; /** * Fail all inflight IO requests in the queue */ bfa_itnim_delayed_comp(itnim, BFA_TRUE); /** * Fail any pending IO requests. */ while (!list_empty(&itnim->pending_q)) { bfa_q_deq(&itnim->pending_q, &ioim); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); bfa_ioim_tov(ioim); } } /** * IO TOV timer callback. Fail any pending IO requests. */ static void bfa_itnim_iotov(void *itnim_arg) { struct bfa_itnim_s *itnim = itnim_arg; itnim->iotov_active = BFA_FALSE; bfa_cb_itnim_tov_begin(itnim->ditn); bfa_itnim_iotov_cleanup(itnim); bfa_cb_itnim_tov(itnim->ditn); } /** * Start IO TOV timer for failing back pending IO requests in offline state. */ static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim) { if (itnim->fcpim->path_tov > 0) { itnim->iotov_active = BFA_TRUE; bfa_assert(bfa_itnim_hold_io(itnim)); bfa_timer_start(itnim->bfa, &itnim->timer, bfa_itnim_iotov, itnim, itnim->fcpim->path_tov); } } /** * Stop IO TOV timer. */ static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim) { if (itnim->iotov_active) { itnim->iotov_active = BFA_FALSE; bfa_timer_stop(&itnim->timer); } } /** * Stop IO TOV timer. */ static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim) { bfa_boolean_t pathtov_active = BFA_FALSE; if (itnim->iotov_active) pathtov_active = BFA_TRUE; bfa_itnim_iotov_stop(itnim); if (pathtov_active) bfa_cb_itnim_tov_begin(itnim->ditn); bfa_itnim_iotov_cleanup(itnim); if (pathtov_active) bfa_cb_itnim_tov(itnim->ditn); } /** * bfa_itnim_public */ /** * Itnim interrupt processing. */ void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) { struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); union bfi_itnim_i2h_msg_u msg; struct bfa_itnim_s *itnim; bfa_trc(bfa, m->mhdr.msg_id); msg.msg = m; switch (m->mhdr.msg_id) { case BFI_ITNIM_I2H_CREATE_RSP: itnim = BFA_ITNIM_FROM_TAG(fcpim, msg.create_rsp->bfa_handle); bfa_assert(msg.create_rsp->status == BFA_STATUS_OK); bfa_stats(itnim, create_comps); bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); break; case BFI_ITNIM_I2H_DELETE_RSP: itnim = BFA_ITNIM_FROM_TAG(fcpim, msg.delete_rsp->bfa_handle); bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK); bfa_stats(itnim, delete_comps); bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); break; case BFI_ITNIM_I2H_SLER_EVENT: itnim = BFA_ITNIM_FROM_TAG(fcpim, msg.sler_event->bfa_handle); bfa_stats(itnim, sler_events); bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER); break; default: bfa_trc(bfa, m->mhdr.msg_id); bfa_assert(0); } } /** * bfa_itnim_api */ struct bfa_itnim_s * bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn) { struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); struct bfa_itnim_s *itnim; itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag); bfa_assert(itnim->rport == rport); itnim->ditn = ditn; bfa_stats(itnim, creates); bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE); return itnim; } void bfa_itnim_delete(struct bfa_itnim_s *itnim) { bfa_stats(itnim, deletes); bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE); } void bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec) { itnim->seq_rec = seq_rec; bfa_stats(itnim, onlines); bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE); } void bfa_itnim_offline(struct bfa_itnim_s *itnim) { bfa_stats(itnim, offlines); bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE); } /** * Return true if itnim is considered offline for holding off IO request. * IO is not held if itnim is being deleted. */ bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim) { return itnim->fcpim->path_tov && itnim->iotov_active && (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) || bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) || bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) || bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) || bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) || bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable)) ; } void bfa_itnim_get_stats(struct bfa_itnim_s *itnim, struct bfa_itnim_hal_stats_s *stats) { *stats = itnim->stats; } void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) { bfa_os_memset(&itnim->stats, 0, sizeof(itnim->stats)); }
gpl-2.0
Motorola-CyanogenMod/android_kernel_motorola_msm8916
arch/arm/mach-msm/keypad-surf-ffa.c
1159
8476
/* * Copyright (C) 2007 Google, Inc. * Copyright (c) 2008-2009, 2013, The Linux Foundation. All rights reserved. * Author: Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/platform_device.h> #include <linux/gpio_event.h> /* don't turn this on without updating the ffa support */ #define SCAN_FUNCTION_KEYS 0 /* FFA: 36: KEYSENSE_N(0) 37: KEYSENSE_N(1) 38: KEYSENSE_N(2) 39: KEYSENSE_N(3) 40: KEYSENSE_N(4) 31: KYPD_17 32: KYPD_15 33: KYPD_13 34: KYPD_11 35: KYPD_9 41: KYPD_MEMO */ static unsigned int keypad_row_gpios[] = { 31, 32, 33, 34, 35, 41 #if SCAN_FUNCTION_KEYS , 42 #endif }; static unsigned int keypad_col_gpios[] = { 36, 37, 38, 39, 40 }; static unsigned int keypad_row_gpios_8k_ffa[] = {31, 32, 33, 34, 35, 36}; static unsigned int keypad_col_gpios_8k_ffa[] = {38, 39, 40, 41, 42}; #define KEYMAP_INDEX(row, col) ((row)*ARRAY_SIZE(keypad_col_gpios) + (col)) #define FFA_8K_KEYMAP_INDEX(row, col) ((row)* \ ARRAY_SIZE(keypad_col_gpios_8k_ffa) + (col)) static const unsigned short keypad_keymap_surf[ARRAY_SIZE(keypad_col_gpios) * ARRAY_SIZE(keypad_row_gpios)] = { [KEYMAP_INDEX(0, 0)] = KEY_5, [KEYMAP_INDEX(0, 1)] = KEY_9, [KEYMAP_INDEX(0, 2)] = 229, /* SOFT1 */ [KEYMAP_INDEX(0, 3)] = KEY_6, [KEYMAP_INDEX(0, 4)] = KEY_LEFT, [KEYMAP_INDEX(1, 0)] = KEY_0, [KEYMAP_INDEX(1, 1)] = KEY_RIGHT, [KEYMAP_INDEX(1, 2)] = KEY_1, [KEYMAP_INDEX(1, 3)] = 228, /* KEY_SHARP */ [KEYMAP_INDEX(1, 4)] = KEY_SEND, [KEYMAP_INDEX(2, 0)] = KEY_VOLUMEUP, [KEYMAP_INDEX(2, 1)] = KEY_HOME, /* FA */ [KEYMAP_INDEX(2, 2)] = KEY_F8, /* QCHT */ [KEYMAP_INDEX(2, 3)] = KEY_F6, /* R+ */ [KEYMAP_INDEX(2, 4)] = KEY_F7, /* R- */ [KEYMAP_INDEX(3, 0)] = KEY_UP, [KEYMAP_INDEX(3, 1)] = KEY_CLEAR, [KEYMAP_INDEX(3, 2)] = KEY_4, [KEYMAP_INDEX(3, 3)] = KEY_MUTE, /* SPKR */ [KEYMAP_INDEX(3, 4)] = KEY_2, [KEYMAP_INDEX(4, 0)] = 230, /* SOFT2 */ [KEYMAP_INDEX(4, 1)] = 232, /* KEY_CENTER */ [KEYMAP_INDEX(4, 2)] = KEY_DOWN, [KEYMAP_INDEX(4, 3)] = KEY_BACK, /* FB */ [KEYMAP_INDEX(4, 4)] = KEY_8, [KEYMAP_INDEX(5, 0)] = KEY_VOLUMEDOWN, [KEYMAP_INDEX(5, 1)] = 227, /* KEY_STAR */ [KEYMAP_INDEX(5, 2)] = KEY_MAIL, /* MESG */ [KEYMAP_INDEX(5, 3)] = KEY_3, [KEYMAP_INDEX(5, 4)] = KEY_7, #if SCAN_FUNCTION_KEYS [KEYMAP_INDEX(6, 0)] = KEY_F5, [KEYMAP_INDEX(6, 1)] = KEY_F4, [KEYMAP_INDEX(6, 2)] = KEY_F3, [KEYMAP_INDEX(6, 3)] = KEY_F2, [KEYMAP_INDEX(6, 4)] = KEY_F1 #endif }; static const unsigned short keypad_keymap_ffa[ARRAY_SIZE(keypad_col_gpios) * ARRAY_SIZE(keypad_row_gpios)] = { /*[KEYMAP_INDEX(0, 0)] = ,*/ /*[KEYMAP_INDEX(0, 1)] = ,*/ [KEYMAP_INDEX(0, 2)] = KEY_1, [KEYMAP_INDEX(0, 3)] = KEY_SEND, [KEYMAP_INDEX(0, 4)] = KEY_LEFT, [KEYMAP_INDEX(1, 0)] = KEY_3, [KEYMAP_INDEX(1, 1)] = KEY_RIGHT, [KEYMAP_INDEX(1, 2)] = KEY_VOLUMEUP, /*[KEYMAP_INDEX(1, 3)] = ,*/ [KEYMAP_INDEX(1, 4)] = KEY_6, [KEYMAP_INDEX(2, 0)] = KEY_HOME, /* A */ [KEYMAP_INDEX(2, 1)] = KEY_BACK, /* B */ [KEYMAP_INDEX(2, 2)] = KEY_0, [KEYMAP_INDEX(2, 3)] = 228, /* KEY_SHARP */ [KEYMAP_INDEX(2, 4)] = KEY_9, [KEYMAP_INDEX(3, 0)] = KEY_UP, [KEYMAP_INDEX(3, 1)] = 232, /* KEY_CENTER */ /* i */ [KEYMAP_INDEX(3, 2)] = KEY_4, /*[KEYMAP_INDEX(3, 3)] = ,*/ [KEYMAP_INDEX(3, 4)] = KEY_2, [KEYMAP_INDEX(4, 0)] = KEY_VOLUMEDOWN, [KEYMAP_INDEX(4, 1)] = KEY_SOUND, [KEYMAP_INDEX(4, 2)] = KEY_DOWN, [KEYMAP_INDEX(4, 3)] = KEY_8, [KEYMAP_INDEX(4, 4)] = KEY_5, /*[KEYMAP_INDEX(5, 0)] = ,*/ [KEYMAP_INDEX(5, 1)] = 227, /* KEY_STAR */ [KEYMAP_INDEX(5, 2)] = 230, /*SOFT2*/ /* 2 */ [KEYMAP_INDEX(5, 3)] = KEY_MENU, /* 1 */ [KEYMAP_INDEX(5, 4)] = KEY_7, }; #define QSD8x50_FFA_KEYMAP_SIZE (ARRAY_SIZE(keypad_col_gpios_8k_ffa) * \ ARRAY_SIZE(keypad_row_gpios_8k_ffa)) static const unsigned short keypad_keymap_8k_ffa[QSD8x50_FFA_KEYMAP_SIZE] = { [FFA_8K_KEYMAP_INDEX(0, 0)] = KEY_VOLUMEDOWN, /*[KEYMAP_INDEX(0, 1)] = ,*/ [FFA_8K_KEYMAP_INDEX(0, 2)] = KEY_DOWN, [FFA_8K_KEYMAP_INDEX(0, 3)] = KEY_8, [FFA_8K_KEYMAP_INDEX(0, 4)] = KEY_5, [FFA_8K_KEYMAP_INDEX(1, 0)] = KEY_UP, [FFA_8K_KEYMAP_INDEX(1, 1)] = KEY_CLEAR, [FFA_8K_KEYMAP_INDEX(1, 2)] = KEY_4, /*[KEYMAP_INDEX(1, 3)] = ,*/ [FFA_8K_KEYMAP_INDEX(1, 4)] = KEY_2, [FFA_8K_KEYMAP_INDEX(2, 0)] = KEY_HOME, /* A */ [FFA_8K_KEYMAP_INDEX(2, 1)] = KEY_BACK, /* B */ [FFA_8K_KEYMAP_INDEX(2, 2)] = KEY_0, [FFA_8K_KEYMAP_INDEX(2, 3)] = 228, /* KEY_SHARP */ [FFA_8K_KEYMAP_INDEX(2, 4)] = KEY_9, [FFA_8K_KEYMAP_INDEX(3, 0)] = KEY_3, [FFA_8K_KEYMAP_INDEX(3, 1)] = KEY_RIGHT, [FFA_8K_KEYMAP_INDEX(3, 2)] = KEY_VOLUMEUP, /*[KEYMAP_INDEX(3, 3)] = ,*/ [FFA_8K_KEYMAP_INDEX(3, 4)] = KEY_6, [FFA_8K_KEYMAP_INDEX(4, 0)] = 232, /* OK */ [FFA_8K_KEYMAP_INDEX(4, 1)] = KEY_SOUND, [FFA_8K_KEYMAP_INDEX(4, 2)] = KEY_1, [FFA_8K_KEYMAP_INDEX(4, 3)] = KEY_SEND, [FFA_8K_KEYMAP_INDEX(4, 4)] = KEY_LEFT, /*[KEYMAP_INDEX(5, 0)] = ,*/ [FFA_8K_KEYMAP_INDEX(5, 1)] = 227, /* KEY_STAR */ [FFA_8K_KEYMAP_INDEX(5, 2)] = 230, /*SOFT2*/ /* 2 */ [FFA_8K_KEYMAP_INDEX(5, 3)] = 229, /* 1 */ [FFA_8K_KEYMAP_INDEX(5, 4)] = KEY_7, }; /* SURF keypad platform device information */ static struct gpio_event_matrix_info surf_keypad_matrix_info = { .info.func = gpio_event_matrix_func, .keymap = keypad_keymap_surf, .output_gpios = keypad_row_gpios, .input_gpios = keypad_col_gpios, .noutputs = ARRAY_SIZE(keypad_row_gpios), .ninputs = ARRAY_SIZE(keypad_col_gpios), .settle_time.tv64 = 40 * NSEC_PER_USEC, .poll_time.tv64 = 20 * NSEC_PER_MSEC, .flags = GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_DRIVE_INACTIVE | GPIOKPF_PRINT_UNMAPPED_KEYS }; static struct gpio_event_info *surf_keypad_info[] = { &surf_keypad_matrix_info.info }; static struct gpio_event_platform_data surf_keypad_data = { .name = "surf_keypad", .info = surf_keypad_info, .info_count = ARRAY_SIZE(surf_keypad_info) }; struct platform_device keypad_device_surf = { .name = GPIO_EVENT_DEV_NAME, .id = -1, .dev = { .platform_data = &surf_keypad_data, }, }; /* 8k FFA keypad platform device information */ static struct gpio_event_matrix_info keypad_matrix_info_8k_ffa = { .info.func = gpio_event_matrix_func, .keymap = keypad_keymap_8k_ffa, .output_gpios = keypad_row_gpios_8k_ffa, .input_gpios = keypad_col_gpios_8k_ffa, .noutputs = ARRAY_SIZE(keypad_row_gpios_8k_ffa), .ninputs = ARRAY_SIZE(keypad_col_gpios_8k_ffa), .settle_time.tv64 = 40 * NSEC_PER_USEC, .poll_time.tv64 = 20 * NSEC_PER_MSEC, .flags = GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_DRIVE_INACTIVE | GPIOKPF_PRINT_UNMAPPED_KEYS }; static struct gpio_event_info *keypad_info_8k_ffa[] = { &keypad_matrix_info_8k_ffa.info }; static struct gpio_event_platform_data keypad_data_8k_ffa = { .name = "8k_ffa_keypad", .info = keypad_info_8k_ffa, .info_count = ARRAY_SIZE(keypad_info_8k_ffa) }; struct platform_device keypad_device_8k_ffa = { .name = GPIO_EVENT_DEV_NAME, .id = -1, .dev = { .platform_data = &keypad_data_8k_ffa, }, }; /* 7k FFA keypad platform device information */ static struct gpio_event_matrix_info keypad_matrix_info_7k_ffa = { .info.func = gpio_event_matrix_func, .keymap = keypad_keymap_ffa, .output_gpios = keypad_row_gpios, .input_gpios = keypad_col_gpios, .noutputs = ARRAY_SIZE(keypad_row_gpios), .ninputs = ARRAY_SIZE(keypad_col_gpios), .settle_time.tv64 = 40 * NSEC_PER_USEC, .poll_time.tv64 = 20 * NSEC_PER_MSEC, .flags = GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_DRIVE_INACTIVE | GPIOKPF_PRINT_UNMAPPED_KEYS }; static struct gpio_event_info *keypad_info_7k_ffa[] = { &keypad_matrix_info_7k_ffa.info }; static struct gpio_event_platform_data keypad_data_7k_ffa = { .name = "7k_ffa_keypad", .info = keypad_info_7k_ffa, .info_count = ARRAY_SIZE(keypad_info_7k_ffa) }; struct platform_device keypad_device_7k_ffa = { .name = GPIO_EVENT_DEV_NAME, .id = -1, .dev = { .platform_data = &keypad_data_7k_ffa, }, };
gpl-2.0
fredvj/kernel_huawei_u8860
net/sunrpc/svcsock.c
1415
43620
/* * linux/net/sunrpc/svcsock.c * * These are the RPC server socket internals. * * The server scheduling algorithm does not always distribute the load * evenly when servicing a single client. May need to modify the * svc_xprt_enqueue procedure... * * TCP support is largely untested and may be a little slow. The problem * is that we currently do two separate recvfrom's, one for the 4-byte * record length, and the second for the actual record. This could possibly * be improved by always reading a minimum size of around 100 bytes and * tucking any superfluous bytes away in a temporary store. Still, that * leaves write requests out in the rain. An alternative may be to peek at * the first skb in the queue, and if it matches the next TCP sequence * number, to extract the record marker. Yuck. * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/fcntl.h> #include <linux/net.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/udp.h> #include <linux/tcp.h> #include <linux/unistd.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/file.h> #include <linux/freezer.h> #include <net/sock.h> #include <net/checksum.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/tcp.h> #include <net/tcp_states.h> #include <asm/uaccess.h> #include <asm/ioctls.h> #include <linux/sunrpc/types.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/msg_prot.h> #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/stats.h> #include <linux/sunrpc/xprt.h> #define RPCDBG_FACILITY RPCDBG_SVCXPRT static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, int *errp, int flags); static void svc_udp_data_ready(struct sock *, int); static int svc_udp_recvfrom(struct svc_rqst *); static int svc_udp_sendto(struct svc_rqst *); static void svc_sock_detach(struct svc_xprt *); static void svc_tcp_sock_detach(struct svc_xprt *); static void svc_sock_free(struct svc_xprt *); static struct svc_xprt *svc_create_socket(struct svc_serv *, int, struct net *, struct sockaddr *, int, int); #if defined(CONFIG_NFS_V4_1) static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int, struct net *, struct sockaddr *, int, int); static void svc_bc_sock_free(struct svc_xprt *xprt); #endif /* CONFIG_NFS_V4_1 */ #ifdef CONFIG_DEBUG_LOCK_ALLOC static struct lock_class_key svc_key[2]; static struct lock_class_key svc_slock_key[2]; static void svc_reclassify_socket(struct socket *sock) { struct sock *sk = sock->sk; BUG_ON(sock_owned_by_user(sk)); switch (sk->sk_family) { case AF_INET: sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD", &svc_slock_key[0], "sk_xprt.xpt_lock-AF_INET-NFSD", &svc_key[0]); break; case AF_INET6: sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD", &svc_slock_key[1], "sk_xprt.xpt_lock-AF_INET6-NFSD", &svc_key[1]); break; default: BUG(); } } #else static void svc_reclassify_socket(struct socket *sock) { } #endif /* * Release an skbuff after use */ static void svc_release_skb(struct svc_rqst *rqstp) { struct sk_buff *skb = rqstp->rq_xprt_ctxt; if (skb) { struct svc_sock *svsk = container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); rqstp->rq_xprt_ctxt = NULL; dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); skb_free_datagram_locked(svsk->sk_sk, skb); } } union svc_pktinfo_u { struct in_pktinfo pkti; struct in6_pktinfo pkti6; }; #define SVC_PKTINFO_SPACE \ CMSG_SPACE(sizeof(union svc_pktinfo_u)) static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) { struct svc_sock *svsk = container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); switch (svsk->sk_sk->sk_family) { case AF_INET: { struct in_pktinfo *pki = CMSG_DATA(cmh); cmh->cmsg_level = SOL_IP; cmh->cmsg_type = IP_PKTINFO; pki->ipi_ifindex = 0; pki->ipi_spec_dst.s_addr = rqstp->rq_daddr.addr.s_addr; cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); } break; case AF_INET6: { struct in6_pktinfo *pki = CMSG_DATA(cmh); cmh->cmsg_level = SOL_IPV6; cmh->cmsg_type = IPV6_PKTINFO; pki->ipi6_ifindex = 0; ipv6_addr_copy(&pki->ipi6_addr, &rqstp->rq_daddr.addr6); cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); } break; } } /* * send routine intended to be shared by the fore- and back-channel */ int svc_send_common(struct socket *sock, struct xdr_buf *xdr, struct page *headpage, unsigned long headoffset, struct page *tailpage, unsigned long tailoffset) { int result; int size; struct page **ppage = xdr->pages; size_t base = xdr->page_base; unsigned int pglen = xdr->page_len; unsigned int flags = MSG_MORE; int slen; int len = 0; slen = xdr->len; /* send head */ if (slen == xdr->head[0].iov_len) flags = 0; len = kernel_sendpage(sock, headpage, headoffset, xdr->head[0].iov_len, flags); if (len != xdr->head[0].iov_len) goto out; slen -= xdr->head[0].iov_len; if (slen == 0) goto out; /* send page data */ size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen; while (pglen > 0) { if (slen == size) flags = 0; result = kernel_sendpage(sock, *ppage, base, size, flags); if (result > 0) len += result; if (result != size) goto out; slen -= size; pglen -= size; size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen; base = 0; ppage++; } /* send tail */ if (xdr->tail[0].iov_len) { result = kernel_sendpage(sock, tailpage, tailoffset, xdr->tail[0].iov_len, 0); if (result > 0) len += result; } out: return len; } /* * Generic sendto routine */ static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) { struct svc_sock *svsk = container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); struct socket *sock = svsk->sk_sock; union { struct cmsghdr hdr; long all[SVC_PKTINFO_SPACE / sizeof(long)]; } buffer; struct cmsghdr *cmh = &buffer.hdr; int len = 0; unsigned long tailoff; unsigned long headoff; RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); if (rqstp->rq_prot == IPPROTO_UDP) { struct msghdr msg = { .msg_name = &rqstp->rq_addr, .msg_namelen = rqstp->rq_addrlen, .msg_control = cmh, .msg_controllen = sizeof(buffer), .msg_flags = MSG_MORE, }; svc_set_cmsg_data(rqstp, cmh); if (sock_sendmsg(sock, &msg, 0) < 0) goto out; } tailoff = ((unsigned long)xdr->tail[0].iov_base) & (PAGE_SIZE-1); headoff = 0; len = svc_send_common(sock, xdr, rqstp->rq_respages[0], headoff, rqstp->rq_respages[0], tailoff); out: dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n", svsk, xdr->head[0].iov_base, xdr->head[0].iov_len, xdr->len, len, svc_print_addr(rqstp, buf, sizeof(buf))); return len; } /* * Report socket names for nfsdfs */ static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining) { const struct sock *sk = svsk->sk_sk; const char *proto_name = sk->sk_protocol == IPPROTO_UDP ? "udp" : "tcp"; int len; switch (sk->sk_family) { case PF_INET: len = snprintf(buf, remaining, "ipv4 %s %pI4 %d\n", proto_name, &inet_sk(sk)->inet_rcv_saddr, inet_sk(sk)->inet_num); break; case PF_INET6: len = snprintf(buf, remaining, "ipv6 %s %pI6 %d\n", proto_name, &inet6_sk(sk)->rcv_saddr, inet_sk(sk)->inet_num); break; default: len = snprintf(buf, remaining, "*unknown-%d*\n", sk->sk_family); } if (len >= remaining) { *buf = '\0'; return -ENAMETOOLONG; } return len; } /** * svc_sock_names - construct a list of listener names in a string * @serv: pointer to RPC service * @buf: pointer to a buffer to fill in with socket names * @buflen: size of the buffer to be filled * @toclose: pointer to '\0'-terminated C string containing the name * of a listener to be closed * * Fills in @buf with a '\n'-separated list of names of listener * sockets. If @toclose is not NULL, the socket named by @toclose * is closed, and is not included in the output list. * * Returns positive length of the socket name string, or a negative * errno value on error. */ int svc_sock_names(struct svc_serv *serv, char *buf, const size_t buflen, const char *toclose) { struct svc_sock *svsk, *closesk = NULL; int len = 0; if (!serv) return 0; spin_lock_bh(&serv->sv_lock); list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) { int onelen = svc_one_sock_name(svsk, buf + len, buflen - len); if (onelen < 0) { len = onelen; break; } if (toclose && strcmp(toclose, buf + len) == 0) { closesk = svsk; svc_xprt_get(&closesk->sk_xprt); } else len += onelen; } spin_unlock_bh(&serv->sv_lock); if (closesk) { /* Should unregister with portmap, but you cannot * unregister just one protocol... */ svc_close_xprt(&closesk->sk_xprt); svc_xprt_put(&closesk->sk_xprt); } else if (toclose) return -ENOENT; return len; } EXPORT_SYMBOL_GPL(svc_sock_names); /* * Check input queue length */ static int svc_recv_available(struct svc_sock *svsk) { struct socket *sock = svsk->sk_sock; int avail, err; err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail); return (err >= 0)? avail : err; } /* * Generic recvfrom routine. */ static int svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen) { struct svc_sock *svsk = container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); struct msghdr msg = { .msg_flags = MSG_DONTWAIT, }; int len; rqstp->rq_xprt_hlen = 0; len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen, msg.msg_flags); dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", svsk, iov[0].iov_base, iov[0].iov_len, len); return len; } static int svc_partial_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen, unsigned int base) { size_t save_iovlen; void __user *save_iovbase; unsigned int i; int ret; if (base == 0) return svc_recvfrom(rqstp, iov, nr, buflen); for (i = 0; i < nr; i++) { if (iov[i].iov_len > base) break; base -= iov[i].iov_len; } save_iovlen = iov[i].iov_len; save_iovbase = iov[i].iov_base; iov[i].iov_len -= base; iov[i].iov_base += base; ret = svc_recvfrom(rqstp, &iov[i], nr - i, buflen); iov[i].iov_len = save_iovlen; iov[i].iov_base = save_iovbase; return ret; } /* * Set socket snd and rcv buffer lengths */ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv) { #if 0 mm_segment_t oldfs; oldfs = get_fs(); set_fs(KERNEL_DS); sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, (char*)&snd, sizeof(snd)); sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, (char*)&rcv, sizeof(rcv)); #else /* sock_setsockopt limits use to sysctl_?mem_max, * which isn't acceptable. Until that is made conditional * on not having CAP_SYS_RESOURCE or similar, we go direct... * DaveM said I could! */ lock_sock(sock->sk); sock->sk->sk_sndbuf = snd * 2; sock->sk->sk_rcvbuf = rcv * 2; sock->sk->sk_write_space(sock->sk); release_sock(sock->sk); #endif } /* * INET callback when data has been received on the socket. */ static void svc_udp_data_ready(struct sock *sk, int count) { struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; wait_queue_head_t *wq = sk_sleep(sk); if (svsk) { dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n", svsk, sk, count, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); svc_xprt_enqueue(&svsk->sk_xprt); } if (wq && waitqueue_active(wq)) wake_up_interruptible(wq); } /* * INET callback when space is newly available on the socket. */ static void svc_write_space(struct sock *sk) { struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data); wait_queue_head_t *wq = sk_sleep(sk); if (svsk) { dprintk("svc: socket %p(inet %p), write_space busy=%d\n", svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); svc_xprt_enqueue(&svsk->sk_xprt); } if (wq && waitqueue_active(wq)) { dprintk("RPC svc_write_space: someone sleeping on %p\n", svsk); wake_up_interruptible(wq); } } static void svc_tcp_write_space(struct sock *sk) { struct socket *sock = sk->sk_socket; if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) clear_bit(SOCK_NOSPACE, &sock->flags); svc_write_space(sk); } /* * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo */ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp, struct cmsghdr *cmh) { struct in_pktinfo *pki = CMSG_DATA(cmh); if (cmh->cmsg_type != IP_PKTINFO) return 0; rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr; return 1; } /* * See net/ipv6/datagram.c : datagram_recv_ctl */ static int svc_udp_get_dest_address6(struct svc_rqst *rqstp, struct cmsghdr *cmh) { struct in6_pktinfo *pki = CMSG_DATA(cmh); if (cmh->cmsg_type != IPV6_PKTINFO) return 0; ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr); return 1; } /* * Copy the UDP datagram's destination address to the rqstp structure. * The 'destination' address in this case is the address to which the * peer sent the datagram, i.e. our local address. For multihomed * hosts, this can change from msg to msg. Note that only the IP * address changes, the port number should remain the same. */ static int svc_udp_get_dest_address(struct svc_rqst *rqstp, struct cmsghdr *cmh) { switch (cmh->cmsg_level) { case SOL_IP: return svc_udp_get_dest_address4(rqstp, cmh); case SOL_IPV6: return svc_udp_get_dest_address6(rqstp, cmh); } return 0; } /* * Receive a datagram from a UDP socket. */ static int svc_udp_recvfrom(struct svc_rqst *rqstp) { struct svc_sock *svsk = container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); struct svc_serv *serv = svsk->sk_xprt.xpt_server; struct sk_buff *skb; union { struct cmsghdr hdr; long all[SVC_PKTINFO_SPACE / sizeof(long)]; } buffer; struct cmsghdr *cmh = &buffer.hdr; struct msghdr msg = { .msg_name = svc_addr(rqstp), .msg_control = cmh, .msg_controllen = sizeof(buffer), .msg_flags = MSG_DONTWAIT, }; size_t len; int err; if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags)) /* udp sockets need large rcvbuf as all pending * requests are still in that buffer. sndbuf must * also be large enough that there is enough space * for one reply per thread. We count all threads * rather than threads in a particular pool, which * provides an upper bound on the number of threads * which will access the socket. */ svc_sock_setbufsize(svsk->sk_sock, (serv->sv_nrthreads+3) * serv->sv_max_mesg, (serv->sv_nrthreads+3) * serv->sv_max_mesg); clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); skb = NULL; err = kernel_recvmsg(svsk->sk_sock, &msg, NULL, 0, 0, MSG_PEEK | MSG_DONTWAIT); if (err >= 0) skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err); if (skb == NULL) { if (err != -EAGAIN) { /* possibly an icmp error */ dprintk("svc: recvfrom returned error %d\n", -err); set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); } return -EAGAIN; } len = svc_addr_len(svc_addr(rqstp)); if (len == 0) return -EAFNOSUPPORT; rqstp->rq_addrlen = len; if (skb->tstamp.tv64 == 0) { skb->tstamp = ktime_get_real(); /* Don't enable netstamp, sunrpc doesn't need that much accuracy */ } svsk->sk_sk->sk_stamp = skb->tstamp; set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */ len = skb->len - sizeof(struct udphdr); rqstp->rq_arg.len = len; rqstp->rq_prot = IPPROTO_UDP; if (!svc_udp_get_dest_address(rqstp, cmh)) { if (net_ratelimit()) printk(KERN_WARNING "svc: received unknown control message %d/%d; " "dropping RPC reply datagram\n", cmh->cmsg_level, cmh->cmsg_type); skb_free_datagram_locked(svsk->sk_sk, skb); return 0; } if (skb_is_nonlinear(skb)) { /* we have to copy */ local_bh_disable(); if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) { local_bh_enable(); /* checksum error */ skb_free_datagram_locked(svsk->sk_sk, skb); return 0; } local_bh_enable(); skb_free_datagram_locked(svsk->sk_sk, skb); } else { /* we can use it in-place */ rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr); rqstp->rq_arg.head[0].iov_len = len; if (skb_checksum_complete(skb)) { skb_free_datagram_locked(svsk->sk_sk, skb); return 0; } rqstp->rq_xprt_ctxt = skb; } rqstp->rq_arg.page_base = 0; if (len <= rqstp->rq_arg.head[0].iov_len) { rqstp->rq_arg.head[0].iov_len = len; rqstp->rq_arg.page_len = 0; rqstp->rq_respages = rqstp->rq_pages+1; } else { rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; rqstp->rq_respages = rqstp->rq_pages + 1 + DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE); } if (serv->sv_stats) serv->sv_stats->netudpcnt++; return len; } static int svc_udp_sendto(struct svc_rqst *rqstp) { int error; error = svc_sendto(rqstp, &rqstp->rq_res); if (error == -ECONNREFUSED) /* ICMP error on earlier request. */ error = svc_sendto(rqstp, &rqstp->rq_res); return error; } static void svc_udp_prep_reply_hdr(struct svc_rqst *rqstp) { } static int svc_udp_has_wspace(struct svc_xprt *xprt) { struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); struct svc_serv *serv = xprt->xpt_server; unsigned long required; /* * Set the SOCK_NOSPACE flag before checking the available * sock space. */ set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg; if (required*2 > sock_wspace(svsk->sk_sk)) return 0; clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); return 1; } static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt) { BUG(); return NULL; } static struct svc_xprt *svc_udp_create(struct svc_serv *serv, struct net *net, struct sockaddr *sa, int salen, int flags) { return svc_create_socket(serv, IPPROTO_UDP, net, sa, salen, flags); } static struct svc_xprt_ops svc_udp_ops = { .xpo_create = svc_udp_create, .xpo_recvfrom = svc_udp_recvfrom, .xpo_sendto = svc_udp_sendto, .xpo_release_rqst = svc_release_skb, .xpo_detach = svc_sock_detach, .xpo_free = svc_sock_free, .xpo_prep_reply_hdr = svc_udp_prep_reply_hdr, .xpo_has_wspace = svc_udp_has_wspace, .xpo_accept = svc_udp_accept, }; static struct svc_xprt_class svc_udp_class = { .xcl_name = "udp", .xcl_owner = THIS_MODULE, .xcl_ops = &svc_udp_ops, .xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP, }; static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv) { int err, level, optname, one = 1; svc_xprt_init(&svc_udp_class, &svsk->sk_xprt, serv); clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); svsk->sk_sk->sk_data_ready = svc_udp_data_ready; svsk->sk_sk->sk_write_space = svc_write_space; /* initialise setting must have enough space to * receive and respond to one request. * svc_udp_recvfrom will re-adjust if necessary */ svc_sock_setbufsize(svsk->sk_sock, 3 * svsk->sk_xprt.xpt_server->sv_max_mesg, 3 * svsk->sk_xprt.xpt_server->sv_max_mesg); /* data might have come in before data_ready set up */ set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); /* make sure we get destination address info */ switch (svsk->sk_sk->sk_family) { case AF_INET: level = SOL_IP; optname = IP_PKTINFO; break; case AF_INET6: level = SOL_IPV6; optname = IPV6_RECVPKTINFO; break; default: BUG(); } err = kernel_setsockopt(svsk->sk_sock, level, optname, (char *)&one, sizeof(one)); dprintk("svc: kernel_setsockopt returned %d\n", err); } /* * A data_ready event on a listening socket means there's a connection * pending. Do not use state_change as a substitute for it. */ static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused) { struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; wait_queue_head_t *wq; dprintk("svc: socket %p TCP (listen) state change %d\n", sk, sk->sk_state); /* * This callback may called twice when a new connection * is established as a child socket inherits everything * from a parent LISTEN socket. * 1) data_ready method of the parent socket will be called * when one of child sockets become ESTABLISHED. * 2) data_ready method of the child socket may be called * when it receives data before the socket is accepted. * In case of 2, we should ignore it silently. */ if (sk->sk_state == TCP_LISTEN) { if (svsk) { set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); svc_xprt_enqueue(&svsk->sk_xprt); } else printk("svc: socket %p: no user data\n", sk); } wq = sk_sleep(sk); if (wq && waitqueue_active(wq)) wake_up_interruptible_all(wq); } /* * A state change on a connected socket means it's dying or dead. */ static void svc_tcp_state_change(struct sock *sk) { struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; wait_queue_head_t *wq = sk_sleep(sk); dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n", sk, sk->sk_state, sk->sk_user_data); if (!svsk) printk("svc: socket %p: no user data\n", sk); else { set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); svc_xprt_enqueue(&svsk->sk_xprt); } if (wq && waitqueue_active(wq)) wake_up_interruptible_all(wq); } static void svc_tcp_data_ready(struct sock *sk, int count) { struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; wait_queue_head_t *wq = sk_sleep(sk); dprintk("svc: socket %p TCP data ready (svsk %p)\n", sk, sk->sk_user_data); if (svsk) { set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); svc_xprt_enqueue(&svsk->sk_xprt); } if (wq && waitqueue_active(wq)) wake_up_interruptible(wq); } /* * Accept a TCP connection */ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) { struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); struct sockaddr_storage addr; struct sockaddr *sin = (struct sockaddr *) &addr; struct svc_serv *serv = svsk->sk_xprt.xpt_server; struct socket *sock = svsk->sk_sock; struct socket *newsock; struct svc_sock *newsvsk; int err, slen; RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); dprintk("svc: tcp_accept %p sock %p\n", svsk, sock); if (!sock) return NULL; clear_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); err = kernel_accept(sock, &newsock, O_NONBLOCK); if (err < 0) { if (err == -ENOMEM) printk(KERN_WARNING "%s: no more sockets!\n", serv->sv_name); else if (err != -EAGAIN && net_ratelimit()) printk(KERN_WARNING "%s: accept failed (err %d)!\n", serv->sv_name, -err); return NULL; } set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); err = kernel_getpeername(newsock, sin, &slen); if (err < 0) { if (net_ratelimit()) printk(KERN_WARNING "%s: peername failed (err %d)!\n", serv->sv_name, -err); goto failed; /* aborted connection or whatever */ } /* Ideally, we would want to reject connections from unauthorized * hosts here, but when we get encryption, the IP of the host won't * tell us anything. For now just warn about unpriv connections. */ if (!svc_port_is_privileged(sin)) { dprintk(KERN_WARNING "%s: connect from unprivileged port: %s\n", serv->sv_name, __svc_print_addr(sin, buf, sizeof(buf))); } dprintk("%s: connect from %s\n", serv->sv_name, __svc_print_addr(sin, buf, sizeof(buf))); /* make sure that a write doesn't block forever when * low on memory */ newsock->sk->sk_sndtimeo = HZ*30; if (!(newsvsk = svc_setup_socket(serv, newsock, &err, (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY)))) goto failed; svc_xprt_set_remote(&newsvsk->sk_xprt, sin, slen); err = kernel_getsockname(newsock, sin, &slen); if (unlikely(err < 0)) { dprintk("svc_tcp_accept: kernel_getsockname error %d\n", -err); slen = offsetof(struct sockaddr, sa_data); } svc_xprt_set_local(&newsvsk->sk_xprt, sin, slen); if (serv->sv_stats) serv->sv_stats->nettcpconn++; return &newsvsk->sk_xprt; failed: sock_release(newsock); return NULL; } static unsigned int svc_tcp_restore_pages(struct svc_sock *svsk, struct svc_rqst *rqstp) { unsigned int i, len, npages; if (svsk->sk_tcplen <= sizeof(rpc_fraghdr)) return 0; len = svsk->sk_tcplen - sizeof(rpc_fraghdr); npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < npages; i++) { if (rqstp->rq_pages[i] != NULL) put_page(rqstp->rq_pages[i]); BUG_ON(svsk->sk_pages[i] == NULL); rqstp->rq_pages[i] = svsk->sk_pages[i]; svsk->sk_pages[i] = NULL; } rqstp->rq_arg.head[0].iov_base = page_address(rqstp->rq_pages[0]); return len; } static void svc_tcp_save_pages(struct svc_sock *svsk, struct svc_rqst *rqstp) { unsigned int i, len, npages; if (svsk->sk_tcplen <= sizeof(rpc_fraghdr)) return; len = svsk->sk_tcplen - sizeof(rpc_fraghdr); npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < npages; i++) { svsk->sk_pages[i] = rqstp->rq_pages[i]; rqstp->rq_pages[i] = NULL; } } static void svc_tcp_clear_pages(struct svc_sock *svsk) { unsigned int i, len, npages; if (svsk->sk_tcplen <= sizeof(rpc_fraghdr)) goto out; len = svsk->sk_tcplen - sizeof(rpc_fraghdr); npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < npages; i++) { BUG_ON(svsk->sk_pages[i] == NULL); put_page(svsk->sk_pages[i]); svsk->sk_pages[i] = NULL; } out: svsk->sk_tcplen = 0; } /* * Receive data. * If we haven't gotten the record length yet, get the next four bytes. * Otherwise try to gobble up as much as possible up to the complete * record length. */ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) { struct svc_serv *serv = svsk->sk_xprt.xpt_server; unsigned int want; int len; clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) { struct kvec iov; want = sizeof(rpc_fraghdr) - svsk->sk_tcplen; iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen; iov.iov_len = want; if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0) goto error; svsk->sk_tcplen += len; if (len < want) { dprintk("svc: short recvfrom while reading record " "length (%d of %d)\n", len, want); return -EAGAIN; } svsk->sk_reclen = ntohl(svsk->sk_reclen); if (!(svsk->sk_reclen & RPC_LAST_STREAM_FRAGMENT)) { /* FIXME: technically, a record can be fragmented, * and non-terminal fragments will not have the top * bit set in the fragment length header. * But apparently no known nfs clients send fragmented * records. */ if (net_ratelimit()) printk(KERN_NOTICE "RPC: multiple fragments " "per record not supported\n"); goto err_delete; } svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK; dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); if (svsk->sk_reclen > serv->sv_max_mesg) { if (net_ratelimit()) printk(KERN_NOTICE "RPC: " "fragment too large: 0x%08lx\n", (unsigned long)svsk->sk_reclen); goto err_delete; } } if (svsk->sk_reclen < 8) goto err_delete; /* client is nuts. */ len = svsk->sk_reclen; return len; error: dprintk("RPC: TCP recv_record got %d\n", len); return len; err_delete: set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); return -EAGAIN; } static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp) { struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt; struct rpc_rqst *req = NULL; struct kvec *src, *dst; __be32 *p = (__be32 *)rqstp->rq_arg.head[0].iov_base; __be32 xid; __be32 calldir; xid = *p++; calldir = *p; if (bc_xprt) req = xprt_lookup_rqst(bc_xprt, xid); if (!req) { printk(KERN_NOTICE "%s: Got unrecognized reply: " "calldir 0x%x xpt_bc_xprt %p xid %08x\n", __func__, ntohl(calldir), bc_xprt, xid); return -EAGAIN; } memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); /* * XXX!: cheating for now! Only copying HEAD. * But we know this is good enough for now (in fact, for any * callback reply in the forseeable future). */ dst = &req->rq_private_buf.head[0]; src = &rqstp->rq_arg.head[0]; if (dst->iov_len < src->iov_len) return -EAGAIN; /* whatever; just giving up. */ memcpy(dst->iov_base, src->iov_base, src->iov_len); xprt_complete_rqst(req->rq_task, svsk->sk_reclen); rqstp->rq_arg.len = 0; return 0; } static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len) { int i = 0; int t = 0; while (t < len) { vec[i].iov_base = page_address(pages[i]); vec[i].iov_len = PAGE_SIZE; i++; t += PAGE_SIZE; } return i; } /* * Receive data from a TCP socket. */ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) { struct svc_sock *svsk = container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); struct svc_serv *serv = svsk->sk_xprt.xpt_server; int len; struct kvec *vec; unsigned int want, base; __be32 *p; __be32 calldir; int pnum; dprintk("svc: tcp_recv %p data %d conn %d close %d\n", svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags), test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags), test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); len = svc_tcp_recv_record(svsk, rqstp); if (len < 0) goto error; base = svc_tcp_restore_pages(svsk, rqstp); want = svsk->sk_reclen - base; vec = rqstp->rq_vec; pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0], svsk->sk_reclen); rqstp->rq_respages = &rqstp->rq_pages[pnum]; /* Now receive data */ len = svc_partial_recvfrom(rqstp, vec, pnum, want, base); if (len >= 0) svsk->sk_tcplen += len; if (len != want) { if (len < 0 && len != -EAGAIN) goto err_other; svc_tcp_save_pages(svsk, rqstp); dprintk("svc: incomplete TCP record (%d of %d)\n", svsk->sk_tcplen, svsk->sk_reclen); goto err_noclose; } rqstp->rq_arg.len = svsk->sk_reclen; rqstp->rq_arg.page_base = 0; if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; rqstp->rq_arg.page_len = 0; } else rqstp->rq_arg.page_len = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; rqstp->rq_xprt_ctxt = NULL; rqstp->rq_prot = IPPROTO_TCP; p = (__be32 *)rqstp->rq_arg.head[0].iov_base; calldir = p[1]; if (calldir) len = receive_cb_reply(svsk, rqstp); /* Reset TCP read info */ svsk->sk_reclen = 0; svsk->sk_tcplen = 0; /* If we have more data, signal svc_xprt_enqueue() to try again */ if (svc_recv_available(svsk) > sizeof(rpc_fraghdr)) set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); if (len < 0) goto error; svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt); if (serv->sv_stats) serv->sv_stats->nettcpcnt++; dprintk("svc: TCP complete record (%d bytes)\n", rqstp->rq_arg.len); return rqstp->rq_arg.len; error: if (len != -EAGAIN) goto err_other; dprintk("RPC: TCP recvfrom got EAGAIN\n"); return -EAGAIN; err_other: printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", svsk->sk_xprt.xpt_server->sv_name, -len); set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); err_noclose: return -EAGAIN; /* record not complete */ } /* * Send out data on TCP socket. */ static int svc_tcp_sendto(struct svc_rqst *rqstp) { struct xdr_buf *xbufp = &rqstp->rq_res; int sent; __be32 reclen; /* Set up the first element of the reply kvec. * Any other kvecs that may be in use have been taken * care of by the server implementation itself. */ reclen = htonl(0x80000000|((xbufp->len ) - 4)); memcpy(xbufp->head[0].iov_base, &reclen, 4); sent = svc_sendto(rqstp, &rqstp->rq_res); if (sent != xbufp->len) { printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes " "- shutting down socket\n", rqstp->rq_xprt->xpt_server->sv_name, (sent<0)?"got error":"sent only", sent, xbufp->len); set_bit(XPT_CLOSE, &rqstp->rq_xprt->xpt_flags); svc_xprt_enqueue(rqstp->rq_xprt); sent = -EAGAIN; } return sent; } /* * Setup response header. TCP has a 4B record length field. */ static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp) { struct kvec *resv = &rqstp->rq_res.head[0]; /* tcp needs a space for the record length... */ svc_putnl(resv, 0); } static int svc_tcp_has_wspace(struct svc_xprt *xprt) { struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); struct svc_serv *serv = svsk->sk_xprt.xpt_server; int required; if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) return 1; required = atomic_read(&xprt->xpt_reserved) + serv->sv_max_mesg; if (sk_stream_wspace(svsk->sk_sk) >= required) return 1; set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); return 0; } static struct svc_xprt *svc_tcp_create(struct svc_serv *serv, struct net *net, struct sockaddr *sa, int salen, int flags) { return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags); } #if defined(CONFIG_NFS_V4_1) static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int, struct net *, struct sockaddr *, int, int); static void svc_bc_sock_free(struct svc_xprt *xprt); static struct svc_xprt *svc_bc_tcp_create(struct svc_serv *serv, struct net *net, struct sockaddr *sa, int salen, int flags) { return svc_bc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags); } static void svc_bc_tcp_sock_detach(struct svc_xprt *xprt) { } static struct svc_xprt_ops svc_tcp_bc_ops = { .xpo_create = svc_bc_tcp_create, .xpo_detach = svc_bc_tcp_sock_detach, .xpo_free = svc_bc_sock_free, .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr, }; static struct svc_xprt_class svc_tcp_bc_class = { .xcl_name = "tcp-bc", .xcl_owner = THIS_MODULE, .xcl_ops = &svc_tcp_bc_ops, .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, }; static void svc_init_bc_xprt_sock(void) { svc_reg_xprt_class(&svc_tcp_bc_class); } static void svc_cleanup_bc_xprt_sock(void) { svc_unreg_xprt_class(&svc_tcp_bc_class); } #else /* CONFIG_NFS_V4_1 */ static void svc_init_bc_xprt_sock(void) { } static void svc_cleanup_bc_xprt_sock(void) { } #endif /* CONFIG_NFS_V4_1 */ static struct svc_xprt_ops svc_tcp_ops = { .xpo_create = svc_tcp_create, .xpo_recvfrom = svc_tcp_recvfrom, .xpo_sendto = svc_tcp_sendto, .xpo_release_rqst = svc_release_skb, .xpo_detach = svc_tcp_sock_detach, .xpo_free = svc_sock_free, .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr, .xpo_has_wspace = svc_tcp_has_wspace, .xpo_accept = svc_tcp_accept, }; static struct svc_xprt_class svc_tcp_class = { .xcl_name = "tcp", .xcl_owner = THIS_MODULE, .xcl_ops = &svc_tcp_ops, .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, }; void svc_init_xprt_sock(void) { svc_reg_xprt_class(&svc_tcp_class); svc_reg_xprt_class(&svc_udp_class); svc_init_bc_xprt_sock(); } void svc_cleanup_xprt_sock(void) { svc_unreg_xprt_class(&svc_tcp_class); svc_unreg_xprt_class(&svc_udp_class); svc_cleanup_bc_xprt_sock(); } static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) { struct sock *sk = svsk->sk_sk; svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt, serv); set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); if (sk->sk_state == TCP_LISTEN) { dprintk("setting up TCP socket for listening\n"); set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags); sk->sk_data_ready = svc_tcp_listen_data_ready; set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); } else { dprintk("setting up TCP socket for reading\n"); sk->sk_state_change = svc_tcp_state_change; sk->sk_data_ready = svc_tcp_data_ready; sk->sk_write_space = svc_tcp_write_space; svsk->sk_reclen = 0; svsk->sk_tcplen = 0; memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages)); tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); if (sk->sk_state != TCP_ESTABLISHED) set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); } } void svc_sock_update_bufs(struct svc_serv *serv) { /* * The number of server threads has changed. Update * rcvbuf and sndbuf accordingly on all sockets */ struct svc_sock *svsk; spin_lock_bh(&serv->sv_lock); list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); list_for_each_entry(svsk, &serv->sv_tempsocks, sk_xprt.xpt_list) set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); spin_unlock_bh(&serv->sv_lock); } EXPORT_SYMBOL_GPL(svc_sock_update_bufs); /* * Initialize socket for RPC use and create svc_sock struct * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF. */ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, struct socket *sock, int *errp, int flags) { struct svc_sock *svsk; struct sock *inet; int pmap_register = !(flags & SVC_SOCK_ANONYMOUS); dprintk("svc: svc_setup_socket %p\n", sock); if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) { *errp = -ENOMEM; return NULL; } inet = sock->sk; /* Register socket with portmapper */ if (*errp >= 0 && pmap_register) *errp = svc_register(serv, inet->sk_family, inet->sk_protocol, ntohs(inet_sk(inet)->inet_sport)); if (*errp < 0) { kfree(svsk); return NULL; } inet->sk_user_data = svsk; svsk->sk_sock = sock; svsk->sk_sk = inet; svsk->sk_ostate = inet->sk_state_change; svsk->sk_odata = inet->sk_data_ready; svsk->sk_owspace = inet->sk_write_space; /* Initialize the socket */ if (sock->type == SOCK_DGRAM) svc_udp_init(svsk, serv); else { /* initialise setting must have enough space to * receive and respond to one request. */ svc_sock_setbufsize(svsk->sk_sock, 4 * serv->sv_max_mesg, 4 * serv->sv_max_mesg); svc_tcp_init(svsk, serv); } dprintk("svc: svc_setup_socket created %p (inet %p)\n", svsk, svsk->sk_sk); return svsk; } /** * svc_addsock - add a listener socket to an RPC service * @serv: pointer to RPC service to which to add a new listener * @fd: file descriptor of the new listener * @name_return: pointer to buffer to fill in with name of listener * @len: size of the buffer * * Fills in socket name and returns positive length of name if successful. * Name is terminated with '\n'. On error, returns a negative errno * value. */ int svc_addsock(struct svc_serv *serv, const int fd, char *name_return, const size_t len) { int err = 0; struct socket *so = sockfd_lookup(fd, &err); struct svc_sock *svsk = NULL; if (!so) return err; if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6)) err = -EAFNOSUPPORT; else if (so->sk->sk_protocol != IPPROTO_TCP && so->sk->sk_protocol != IPPROTO_UDP) err = -EPROTONOSUPPORT; else if (so->state > SS_UNCONNECTED) err = -EISCONN; else { if (!try_module_get(THIS_MODULE)) err = -ENOENT; else svsk = svc_setup_socket(serv, so, &err, SVC_SOCK_DEFAULTS); if (svsk) { struct sockaddr_storage addr; struct sockaddr *sin = (struct sockaddr *)&addr; int salen; if (kernel_getsockname(svsk->sk_sock, sin, &salen) == 0) svc_xprt_set_local(&svsk->sk_xprt, sin, salen); clear_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags); spin_lock_bh(&serv->sv_lock); list_add(&svsk->sk_xprt.xpt_list, &serv->sv_permsocks); spin_unlock_bh(&serv->sv_lock); svc_xprt_received(&svsk->sk_xprt); err = 0; } else module_put(THIS_MODULE); } if (err) { sockfd_put(so); return err; } return svc_one_sock_name(svsk, name_return, len); } EXPORT_SYMBOL_GPL(svc_addsock); /* * Create socket for RPC service. */ static struct svc_xprt *svc_create_socket(struct svc_serv *serv, int protocol, struct net *net, struct sockaddr *sin, int len, int flags) { struct svc_sock *svsk; struct socket *sock; int error; int type; struct sockaddr_storage addr; struct sockaddr *newsin = (struct sockaddr *)&addr; int newlen; int family; int val; RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); dprintk("svc: svc_create_socket(%s, %d, %s)\n", serv->sv_program->pg_name, protocol, __svc_print_addr(sin, buf, sizeof(buf))); if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) { printk(KERN_WARNING "svc: only UDP and TCP " "sockets supported\n"); return ERR_PTR(-EINVAL); } type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; switch (sin->sa_family) { case AF_INET6: family = PF_INET6; break; case AF_INET: family = PF_INET; break; default: return ERR_PTR(-EINVAL); } error = __sock_create(net, family, type, protocol, &sock, 1); if (error < 0) return ERR_PTR(error); svc_reclassify_socket(sock); /* * If this is an PF_INET6 listener, we want to avoid * getting requests from IPv4 remotes. Those should * be shunted to a PF_INET listener via rpcbind. */ val = 1; if (family == PF_INET6) kernel_setsockopt(sock, SOL_IPV6, IPV6_V6ONLY, (char *)&val, sizeof(val)); if (type == SOCK_STREAM) sock->sk->sk_reuse = 1; /* allow address reuse */ error = kernel_bind(sock, sin, len); if (error < 0) goto bummer; newlen = len; error = kernel_getsockname(sock, newsin, &newlen); if (error < 0) goto bummer; if (protocol == IPPROTO_TCP) { if ((error = kernel_listen(sock, 64)) < 0) goto bummer; } if ((svsk = svc_setup_socket(serv, sock, &error, flags)) != NULL) { svc_xprt_set_local(&svsk->sk_xprt, newsin, newlen); return (struct svc_xprt *)svsk; } bummer: dprintk("svc: svc_create_socket error = %d\n", -error); sock_release(sock); return ERR_PTR(error); } /* * Detach the svc_sock from the socket so that no * more callbacks occur. */ static void svc_sock_detach(struct svc_xprt *xprt) { struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); struct sock *sk = svsk->sk_sk; wait_queue_head_t *wq; dprintk("svc: svc_sock_detach(%p)\n", svsk); /* put back the old socket callbacks */ sk->sk_state_change = svsk->sk_ostate; sk->sk_data_ready = svsk->sk_odata; sk->sk_write_space = svsk->sk_owspace; wq = sk_sleep(sk); if (wq && waitqueue_active(wq)) wake_up_interruptible(wq); } /* * Disconnect the socket, and reset the callbacks */ static void svc_tcp_sock_detach(struct svc_xprt *xprt) { struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); dprintk("svc: svc_tcp_sock_detach(%p)\n", svsk); svc_sock_detach(xprt); if (!test_bit(XPT_LISTENER, &xprt->xpt_flags)) { svc_tcp_clear_pages(svsk); kernel_sock_shutdown(svsk->sk_sock, SHUT_RDWR); } } /* * Free the svc_sock's socket resources and the svc_sock itself. */ static void svc_sock_free(struct svc_xprt *xprt) { struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); dprintk("svc: svc_sock_free(%p)\n", svsk); if (svsk->sk_sock->file) sockfd_put(svsk->sk_sock); else sock_release(svsk->sk_sock); kfree(svsk); } #if defined(CONFIG_NFS_V4_1) /* * Create a back channel svc_xprt which shares the fore channel socket. */ static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv, int protocol, struct net *net, struct sockaddr *sin, int len, int flags) { struct svc_sock *svsk; struct svc_xprt *xprt; if (protocol != IPPROTO_TCP) { printk(KERN_WARNING "svc: only TCP sockets" " supported on shared back channel\n"); return ERR_PTR(-EINVAL); } svsk = kzalloc(sizeof(*svsk), GFP_KERNEL); if (!svsk) return ERR_PTR(-ENOMEM); xprt = &svsk->sk_xprt; svc_xprt_init(&svc_tcp_bc_class, xprt, serv); serv->sv_bc_xprt = xprt; return xprt; } /* * Free a back channel svc_sock. */ static void svc_bc_sock_free(struct svc_xprt *xprt) { if (xprt) kfree(container_of(xprt, struct svc_sock, sk_xprt)); } #endif /* CONFIG_NFS_V4_1 */
gpl-2.0
DingSoung/linux-3.0.1
arch/powerpc/kvm/e500_tlb.c
2183
19997
/* * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. * * Author: Yu Liu, yu.liu@freescale.com * * Description: * This file is based on arch/powerpc/kvm/44x_tlb.c, * by Hollis Blanchard <hollisb@us.ibm.com>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/highmem.h> #include <asm/kvm_ppc.h> #include <asm/kvm_e500.h> #include "../mm/mmu_decl.h" #include "e500_tlb.h" #include "trace.h" #include "timing.h" #define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1) static unsigned int tlb1_entry_num; void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct tlbe *tlbe; int i, tlbsel; printk("| %8s | %8s | %8s | %8s | %8s |\n", "nr", "mas1", "mas2", "mas3", "mas7"); for (tlbsel = 0; tlbsel < 2; tlbsel++) { printk("Guest TLB%d:\n", tlbsel); for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) { tlbe = &vcpu_e500->guest_tlb[tlbsel][i]; if (tlbe->mas1 & MAS1_VALID) printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n", tlbsel, i, tlbe->mas1, tlbe->mas2, tlbe->mas3, tlbe->mas7); } } for (tlbsel = 0; tlbsel < 2; tlbsel++) { printk("Shadow TLB%d:\n", tlbsel); for (i = 0; i < vcpu_e500->shadow_tlb_size[tlbsel]; i++) { tlbe = &vcpu_e500->shadow_tlb[tlbsel][i]; if (tlbe->mas1 & MAS1_VALID) printk(" S[%d][%3d] | %08X | %08X | %08X | %08X |\n", tlbsel, i, tlbe->mas1, tlbe->mas2, tlbe->mas3, tlbe->mas7); } } } static inline unsigned int tlb0_get_next_victim( struct kvmppc_vcpu_e500 *vcpu_e500) { unsigned int victim; victim = vcpu_e500->guest_tlb_nv[0]++; if (unlikely(vcpu_e500->guest_tlb_nv[0] >= KVM_E500_TLB0_WAY_NUM)) vcpu_e500->guest_tlb_nv[0] = 0; return victim; } static inline unsigned int tlb1_max_shadow_size(void) { return tlb1_entry_num - tlbcam_index; } static inline int tlbe_is_writable(struct tlbe *tlbe) { return tlbe->mas3 & (MAS3_SW|MAS3_UW); } static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) { /* Mask off reserved bits. */ mas3 &= MAS3_ATTRIB_MASK; if (!usermode) { /* Guest is in supervisor mode, * so we need to translate guest * supervisor permissions into user permissions. */ mas3 &= ~E500_TLB_USER_PERM_MASK; mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; } return mas3 | E500_TLB_SUPER_PERM_MASK; } static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) { #ifdef CONFIG_SMP return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M; #else return mas2 & MAS2_ATTRIB_MASK; #endif } /* * writing shadow tlb entry to host TLB */ static inline void __write_host_tlbe(struct tlbe *stlbe) { mtspr(SPRN_MAS1, stlbe->mas1); mtspr(SPRN_MAS2, stlbe->mas2); mtspr(SPRN_MAS3, stlbe->mas3); mtspr(SPRN_MAS7, stlbe->mas7); __asm__ __volatile__ ("tlbwe\n" : : ); } static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int esel) { struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel]; local_irq_disable(); if (tlbsel == 0) { __write_host_tlbe(stlbe); } else { unsigned register mas0; mas0 = mfspr(SPRN_MAS0); mtspr(SPRN_MAS0, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(esel))); __write_host_tlbe(stlbe); mtspr(SPRN_MAS0, mas0); } local_irq_enable(); } void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int i; unsigned register mas0; /* Load all valid TLB1 entries to reduce guest tlb miss fault */ local_irq_disable(); mas0 = mfspr(SPRN_MAS0); for (i = 0; i < tlb1_max_shadow_size(); i++) { struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i]; if (get_tlb_v(stlbe)) { mtspr(SPRN_MAS0, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(i))); __write_host_tlbe(stlbe); } } mtspr(SPRN_MAS0, mas0); local_irq_enable(); } void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu) { _tlbil_all(); } /* Search the guest TLB for a matching entry. */ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t eaddr, int tlbsel, unsigned int pid, int as) { int i; /* XXX Replace loop with fancy data structures. */ for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) { struct tlbe *tlbe = &vcpu_e500->guest_tlb[tlbsel][i]; unsigned int tid; if (eaddr < get_tlb_eaddr(tlbe)) continue; if (eaddr > get_tlb_end(tlbe)) continue; tid = get_tlb_tid(tlbe); if (tid && (tid != pid)) continue; if (!get_tlb_v(tlbe)) continue; if (get_tlb_ts(tlbe) != as && as != -1) continue; return i; } return -1; } static void kvmppc_e500_shadow_release(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int esel) { struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel]; struct page *page = vcpu_e500->shadow_pages[tlbsel][esel]; if (page) { vcpu_e500->shadow_pages[tlbsel][esel] = NULL; if (get_tlb_v(stlbe)) { if (tlbe_is_writable(stlbe)) kvm_release_page_dirty(page); else kvm_release_page_clean(page); } } } static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int esel) { struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel]; kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel); stlbe->mas1 = 0; trace_kvm_stlb_inval(index_of(tlbsel, esel)); } static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t eaddr, gva_t eend, u32 tid) { unsigned int pid = tid & 0xff; unsigned int i; /* XXX Replace loop with fancy data structures. */ for (i = 0; i < vcpu_e500->guest_tlb_size[1]; i++) { struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i]; unsigned int tid; if (!get_tlb_v(stlbe)) continue; if (eend < get_tlb_eaddr(stlbe)) continue; if (eaddr > get_tlb_end(stlbe)) continue; tid = get_tlb_tid(stlbe); if (tid && (tid != pid)) continue; kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i); write_host_tlbe(vcpu_e500, 1, i); } } static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, unsigned int eaddr, int as) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); unsigned int victim, pidsel, tsized; int tlbsel; /* since we only have two TLBs, only lower bit is used. */ tlbsel = (vcpu_e500->mas4 >> 28) & 0x1; victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0; pidsel = (vcpu_e500->mas4 >> 16) & 0xf; tsized = (vcpu_e500->mas4 >> 7) & 0x1f; vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]); vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) | MAS1_TID(vcpu_e500->pid[pidsel]) | MAS1_TSIZE(tsized); vcpu_e500->mas2 = (eaddr & MAS2_EPN) | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK); vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1) | (get_cur_pid(vcpu) << 16) | (as ? MAS6_SAS : 0); vcpu_e500->mas7 = 0; } static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel) { struct page *new_page; struct tlbe *stlbe; hpa_t hpaddr; stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel]; /* Get reference to new page. */ new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn); if (is_error_page(new_page)) { printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", (long)gfn); kvm_release_page_clean(new_page); return; } hpaddr = page_to_phys(new_page); /* Drop reference to old page. */ kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel); vcpu_e500->shadow_pages[tlbsel][esel] = new_page; /* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */ stlbe->mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K) | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID; stlbe->mas2 = (gvaddr & MAS2_EPN) | e500_shadow_mas2_attrib(gtlbe->mas2, vcpu_e500->vcpu.arch.shared->msr & MSR_PR); stlbe->mas3 = (hpaddr & MAS3_RPN) | e500_shadow_mas3_attrib(gtlbe->mas3, vcpu_e500->vcpu.arch.shared->msr & MSR_PR); stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN; trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, stlbe->mas3, stlbe->mas7); } /* XXX only map the one-one case, for now use TLB0 */ static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int esel) { struct tlbe *gtlbe; gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel]; kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), get_tlb_raddr(gtlbe) >> PAGE_SHIFT, gtlbe, tlbsel, esel); return esel; } /* Caller must ensure that the specified guest TLB entry is safe to insert into * the shadow TLB. */ /* XXX for both one-one and one-to-many , for now use TLB1 */ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe) { unsigned int victim; victim = vcpu_e500->guest_tlb_nv[1]++; if (unlikely(vcpu_e500->guest_tlb_nv[1] >= tlb1_max_shadow_size())) vcpu_e500->guest_tlb_nv[1] = 0; kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim); return victim; } /* Invalidate all guest kernel mappings when enter usermode, * so that when they fault back in they will get the * proper permission bits. */ void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) { if (usermode) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int i; /* XXX Replace loop with fancy data structures. */ for (i = 0; i < tlb1_max_shadow_size(); i++) kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i); _tlbil_all(); } } static int kvmppc_e500_gtlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int esel) { struct tlbe *gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel]; if (unlikely(get_tlb_iprot(gtlbe))) return -1; if (tlbsel == 1) { kvmppc_e500_tlb1_invalidate(vcpu_e500, get_tlb_eaddr(gtlbe), get_tlb_end(gtlbe), get_tlb_tid(gtlbe)); } else { kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel); } gtlbe->mas1 = 0; return 0; } int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value) { int esel; if (value & MMUCSR0_TLB0FI) for (esel = 0; esel < vcpu_e500->guest_tlb_size[0]; esel++) kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel); if (value & MMUCSR0_TLB1FI) for (esel = 0; esel < vcpu_e500->guest_tlb_size[1]; esel++) kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); _tlbil_all(); return EMULATE_DONE; } int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); unsigned int ia; int esel, tlbsel; gva_t ea; ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb); ia = (ea >> 2) & 0x1; /* since we only have two TLBs, only lower bit is used. */ tlbsel = (ea >> 3) & 0x1; if (ia) { /* invalidate all entries */ for (esel = 0; esel < vcpu_e500->guest_tlb_size[tlbsel]; esel++) kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); } else { ea &= 0xfffff000; esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, get_cur_pid(vcpu), -1); if (esel >= 0) kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); } _tlbil_all(); return EMULATE_DONE; } int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int tlbsel, esel; struct tlbe *gtlbe; tlbsel = get_tlb_tlbsel(vcpu_e500); esel = get_tlb_esel(vcpu_e500, tlbsel); gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel]; vcpu_e500->mas0 &= ~MAS0_NV(~0); vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]); vcpu_e500->mas1 = gtlbe->mas1; vcpu_e500->mas2 = gtlbe->mas2; vcpu_e500->mas3 = gtlbe->mas3; vcpu_e500->mas7 = gtlbe->mas7; return EMULATE_DONE; } int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int as = !!get_cur_sas(vcpu_e500); unsigned int pid = get_cur_spid(vcpu_e500); int esel, tlbsel; struct tlbe *gtlbe = NULL; gva_t ea; ea = kvmppc_get_gpr(vcpu, rb); for (tlbsel = 0; tlbsel < 2; tlbsel++) { esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); if (esel >= 0) { gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel]; break; } } if (gtlbe) { vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel) | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]); vcpu_e500->mas1 = gtlbe->mas1; vcpu_e500->mas2 = gtlbe->mas2; vcpu_e500->mas3 = gtlbe->mas3; vcpu_e500->mas7 = gtlbe->mas7; } else { int victim; /* since we only have two TLBs, only lower bit is used. */ tlbsel = vcpu_e500->mas4 >> 28 & 0x1; victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0; vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]); vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0) | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0)) | (vcpu_e500->mas4 & MAS4_TSIZED(~0)); vcpu_e500->mas2 &= MAS2_EPN; vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK; vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; vcpu_e500->mas7 = 0; } kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); return EMULATE_DONE; } int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); u64 eaddr; u64 raddr; u32 tid; struct tlbe *gtlbe; int tlbsel, esel, stlbsel, sesel; tlbsel = get_tlb_tlbsel(vcpu_e500); esel = get_tlb_esel(vcpu_e500, tlbsel); gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel]; if (get_tlb_v(gtlbe) && tlbsel == 1) { eaddr = get_tlb_eaddr(gtlbe); tid = get_tlb_tid(gtlbe); kvmppc_e500_tlb1_invalidate(vcpu_e500, eaddr, get_tlb_end(gtlbe), tid); } gtlbe->mas1 = vcpu_e500->mas1; gtlbe->mas2 = vcpu_e500->mas2; gtlbe->mas3 = vcpu_e500->mas3; gtlbe->mas7 = vcpu_e500->mas7; trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2, gtlbe->mas3, gtlbe->mas7); /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ if (tlbe_is_host_safe(vcpu, gtlbe)) { switch (tlbsel) { case 0: /* TLB0 */ gtlbe->mas1 &= ~MAS1_TSIZE(~0); gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K); stlbsel = 0; sesel = kvmppc_e500_stlbe_map(vcpu_e500, 0, esel); break; case 1: /* TLB1 */ eaddr = get_tlb_eaddr(gtlbe); raddr = get_tlb_raddr(gtlbe); /* Create a 4KB mapping on the host. * If the guest wanted a large page, * only the first 4KB is mapped here and the rest * are mapped on the fly. */ stlbsel = 1; sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, raddr >> PAGE_SHIFT, gtlbe); break; default: BUG(); } write_host_tlbe(vcpu_e500, stlbsel, sesel); } kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); return EMULATE_DONE; } int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) { unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); } int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) { unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); } void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) { unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as); } void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) { unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as); } gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index, gva_t eaddr) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct tlbe *gtlbe = &vcpu_e500->guest_tlb[tlbsel_of(index)][esel_of(index)]; u64 pgmask = get_tlb_bytes(gtlbe) - 1; return get_tlb_raddr(gtlbe) | (eaddr & pgmask); } void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int tlbsel, i; for (tlbsel = 0; tlbsel < 2; tlbsel++) for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) kvmppc_e500_shadow_release(vcpu_e500, tlbsel, i); /* discard all guest mapping */ _tlbil_all(); } void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, unsigned int index) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int tlbsel = tlbsel_of(index); int esel = esel_of(index); int stlbsel, sesel; switch (tlbsel) { case 0: stlbsel = 0; sesel = esel; break; case 1: { gfn_t gfn = gpaddr >> PAGE_SHIFT; struct tlbe *gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel]; stlbsel = 1; sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe); break; } default: BUG(); break; } write_host_tlbe(vcpu_e500, stlbsel, sesel); } int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, int as) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int esel, tlbsel; for (tlbsel = 0; tlbsel < 2; tlbsel++) { esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as); if (esel >= 0) return index_of(tlbsel, esel); } return -1; } void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); vcpu_e500->pid[0] = vcpu->arch.shadow_pid = vcpu->arch.pid = pid; } void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) { struct tlbe *tlbe; /* Insert large initial mapping for guest. */ tlbe = &vcpu_e500->guest_tlb[1][0]; tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M); tlbe->mas2 = 0; tlbe->mas3 = E500_TLB_SUPER_PERM_MASK; tlbe->mas7 = 0; /* 4K map for serial output. Used by kernel wrapper. */ tlbe = &vcpu_e500->guest_tlb[1][1]; tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K); tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G; tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK; tlbe->mas7 = 0; } int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) { tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF; vcpu_e500->guest_tlb_size[0] = KVM_E500_TLB0_SIZE; vcpu_e500->guest_tlb[0] = kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL); if (vcpu_e500->guest_tlb[0] == NULL) goto err_out; vcpu_e500->shadow_tlb_size[0] = KVM_E500_TLB0_SIZE; vcpu_e500->shadow_tlb[0] = kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL); if (vcpu_e500->shadow_tlb[0] == NULL) goto err_out_guest0; vcpu_e500->guest_tlb_size[1] = KVM_E500_TLB1_SIZE; vcpu_e500->guest_tlb[1] = kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL); if (vcpu_e500->guest_tlb[1] == NULL) goto err_out_shadow0; vcpu_e500->shadow_tlb_size[1] = tlb1_entry_num; vcpu_e500->shadow_tlb[1] = kzalloc(sizeof(struct tlbe) * tlb1_entry_num, GFP_KERNEL); if (vcpu_e500->shadow_tlb[1] == NULL) goto err_out_guest1; vcpu_e500->shadow_pages[0] = (struct page **) kzalloc(sizeof(struct page *) * KVM_E500_TLB0_SIZE, GFP_KERNEL); if (vcpu_e500->shadow_pages[0] == NULL) goto err_out_shadow1; vcpu_e500->shadow_pages[1] = (struct page **) kzalloc(sizeof(struct page *) * tlb1_entry_num, GFP_KERNEL); if (vcpu_e500->shadow_pages[1] == NULL) goto err_out_page0; /* Init TLB configuration register */ vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL; vcpu_e500->tlb0cfg |= vcpu_e500->guest_tlb_size[0]; vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL; vcpu_e500->tlb1cfg |= vcpu_e500->guest_tlb_size[1]; return 0; err_out_page0: kfree(vcpu_e500->shadow_pages[0]); err_out_shadow1: kfree(vcpu_e500->shadow_tlb[1]); err_out_guest1: kfree(vcpu_e500->guest_tlb[1]); err_out_shadow0: kfree(vcpu_e500->shadow_tlb[0]); err_out_guest0: kfree(vcpu_e500->guest_tlb[0]); err_out: return -1; } void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) { kfree(vcpu_e500->shadow_pages[1]); kfree(vcpu_e500->shadow_pages[0]); kfree(vcpu_e500->shadow_tlb[1]); kfree(vcpu_e500->guest_tlb[1]); kfree(vcpu_e500->shadow_tlb[0]); kfree(vcpu_e500->guest_tlb[0]); }
gpl-2.0