repo_name
string
path
string
copies
string
size
string
content
string
license
string
hiikezoe/android_kernel_nec_n06e
drivers/scsi/libfc/fc_frame.c
9143
2437
/* * Copyright(c) 2007 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Maintained at www.Open-FCoE.org */ /* * Frame allocation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/crc32.h> #include <linux/gfp.h> #include <scsi/fc_frame.h> /* * Check the CRC in a frame. */ u32 fc_frame_crc_check(struct fc_frame *fp) { u32 crc; u32 error; const u8 *bp; unsigned int len; WARN_ON(!fc_frame_is_linear(fp)); fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; len = (fr_len(fp) + 3) & ~3; /* round up length to include fill */ bp = (const u8 *) fr_hdr(fp); crc = ~crc32(~0, bp, len); error = crc ^ fr_crc(fp); return error; } EXPORT_SYMBOL(fc_frame_crc_check); /* * Allocate a frame intended to be sent via fcoe_xmit. * Get an sk_buff for the frame and set the length. */ struct fc_frame *_fc_frame_alloc(size_t len) { struct fc_frame *fp; struct sk_buff *skb; WARN_ON((len % sizeof(u32)) != 0); len += sizeof(struct fc_frame_header); skb = alloc_skb_fclone(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM + NET_SKB_PAD, GFP_ATOMIC); if (!skb) return NULL; skb_reserve(skb, NET_SKB_PAD + FC_FRAME_HEADROOM); fp = (struct fc_frame *) skb; fc_frame_init(fp); skb_put(skb, len); return fp; } EXPORT_SYMBOL(_fc_frame_alloc); struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len) { struct fc_frame *fp; size_t fill; fill = payload_len % 4; if (fill != 0) fill = 4 - fill; fp = _fc_frame_alloc(payload_len + fill); if (fp) { memset((char *) fr_hdr(fp) + payload_len, 0, fill); /* trim is OK, we just allocated it so there are no fragments */ skb_trim(fp_skb(fp), payload_len + sizeof(struct fc_frame_header)); } return fp; } EXPORT_SYMBOL(fc_frame_alloc_fill);
gpl-2.0
s-class/Kona_01
drivers/scsi/libfc/fc_frame.c
9143
2437
/* * Copyright(c) 2007 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Maintained at www.Open-FCoE.org */ /* * Frame allocation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/crc32.h> #include <linux/gfp.h> #include <scsi/fc_frame.h> /* * Check the CRC in a frame. */ u32 fc_frame_crc_check(struct fc_frame *fp) { u32 crc; u32 error; const u8 *bp; unsigned int len; WARN_ON(!fc_frame_is_linear(fp)); fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; len = (fr_len(fp) + 3) & ~3; /* round up length to include fill */ bp = (const u8 *) fr_hdr(fp); crc = ~crc32(~0, bp, len); error = crc ^ fr_crc(fp); return error; } EXPORT_SYMBOL(fc_frame_crc_check); /* * Allocate a frame intended to be sent via fcoe_xmit. * Get an sk_buff for the frame and set the length. */ struct fc_frame *_fc_frame_alloc(size_t len) { struct fc_frame *fp; struct sk_buff *skb; WARN_ON((len % sizeof(u32)) != 0); len += sizeof(struct fc_frame_header); skb = alloc_skb_fclone(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM + NET_SKB_PAD, GFP_ATOMIC); if (!skb) return NULL; skb_reserve(skb, NET_SKB_PAD + FC_FRAME_HEADROOM); fp = (struct fc_frame *) skb; fc_frame_init(fp); skb_put(skb, len); return fp; } EXPORT_SYMBOL(_fc_frame_alloc); struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len) { struct fc_frame *fp; size_t fill; fill = payload_len % 4; if (fill != 0) fill = 4 - fill; fp = _fc_frame_alloc(payload_len + fill); if (fp) { memset((char *) fr_hdr(fp) + payload_len, 0, fill); /* trim is OK, we just allocated it so there are no fragments */ skb_trim(fp_skb(fp), payload_len + sizeof(struct fc_frame_header)); } return fp; } EXPORT_SYMBOL(fc_frame_alloc_fill);
gpl-2.0
elephone-dev/P8000-Kernel
arch/arm/mm/copypage-xscale.c
9655
3650
/* * linux/arch/arm/lib/copypage-xscale.S * * Copyright (C) 1995-2005 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This handles the mini data cache, as found on SA11x0 and XScale * processors. When we copy a user page page, we map it in such a way * that accesses to this page will not touch the main data cache, but * will be cached in the mini data cache. This prevents us thrashing * the main data cache on page faults. */ #include <linux/init.h> #include <linux/mm.h> #include <linux/highmem.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include "mm.h" #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ L_PTE_MT_MINICACHE) static DEFINE_RAW_SPINLOCK(minicache_lock); /* * XScale mini-dcache optimised copy_user_highpage * * We flush the destination cache lines just before we write the data into the * corresponding address. Since the Dcache is read-allocate, this removes the * Dcache aliasing issue. The writes will be forwarded to the write buffer, * and merged as appropriate. */ static void __naked mc_copy_user_page(void *from, void *to) { /* * Strangely enough, best performance is achieved * when prefetching destination as well. (NP) */ asm volatile( "stmfd sp!, {r4, r5, lr} \n\ mov lr, %2 \n\ pld [r0, #0] \n\ pld [r0, #32] \n\ pld [r1, #0] \n\ pld [r1, #32] \n\ 1: pld [r0, #64] \n\ pld [r0, #96] \n\ pld [r1, #64] \n\ pld [r1, #96] \n\ 2: ldrd r2, [r0], #8 \n\ ldrd r4, [r0], #8 \n\ mov ip, r1 \n\ strd r2, [r1], #8 \n\ ldrd r2, [r0], #8 \n\ strd r4, [r1], #8 \n\ ldrd r4, [r0], #8 \n\ strd r2, [r1], #8 \n\ strd r4, [r1], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ ldrd r2, [r0], #8 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ ldrd r4, [r0], #8 \n\ mov ip, r1 \n\ strd r2, [r1], #8 \n\ ldrd r2, [r0], #8 \n\ strd r4, [r1], #8 \n\ ldrd r4, [r0], #8 \n\ strd r2, [r1], #8 \n\ strd r4, [r1], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ subs lr, lr, #1 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ bgt 1b \n\ beq 2b \n\ ldmfd sp!, {r4, r5, pc} " : : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); } void xscale_mc_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { void *kto = kmap_atomic(to); if (!test_and_set_bit(PG_dcache_clean, &from->flags)) __flush_dcache_page(page_mapping(from), from); raw_spin_lock(&minicache_lock); set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); raw_spin_unlock(&minicache_lock); kunmap_atomic(kto); } /* * XScale optimised clear_user_page */ void xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { void *ptr, *kaddr = kmap_atomic(page); asm volatile( "mov r1, %2 \n\ mov r2, #0 \n\ mov r3, #0 \n\ 1: mov ip, %0 \n\ strd r2, [%0], #8 \n\ strd r2, [%0], #8 \n\ strd r2, [%0], #8 \n\ strd r2, [%0], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ subs r1, r1, #1 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ bne 1b" : "=r" (ptr) : "0" (kaddr), "I" (PAGE_SIZE / 32) : "r1", "r2", "r3", "ip"); kunmap_atomic(kaddr); } struct cpu_user_fns xscale_mc_user_fns __initdata = { .cpu_clear_user_highpage = xscale_mc_clear_user_highpage, .cpu_copy_user_highpage = xscale_mc_copy_user_highpage, };
gpl-2.0
neomanu/NeoKernel-MT6589-A116
arch/unicore32/kernel/gpio.c
11703
3070
/* * linux/arch/unicore32/kernel/gpio.c * * Code specific to PKUnity SoC and UniCore ISA * * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn> * Copyright (C) 2001-2010 Guan Xuetao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* in FPGA, no GPIO support */ #include <linux/init.h> #include <linux/module.h> #include <linux/gpio.h> #include <mach/hardware.h> #ifdef CONFIG_LEDS #include <linux/leds.h> #include <linux/platform_device.h> static const struct gpio_led puv3_gpio_leds[] = { { .name = "cpuhealth", .gpio = GPO_CPU_HEALTH, .active_low = 0, .default_trigger = "heartbeat", }, { .name = "hdd_led", .gpio = GPO_HDD_LED, .active_low = 1, .default_trigger = "ide-disk", }, }; static const struct gpio_led_platform_data puv3_gpio_led_data = { .num_leds = ARRAY_SIZE(puv3_gpio_leds), .leds = (void *) puv3_gpio_leds, }; static struct platform_device puv3_gpio_gpio_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = (void *) &puv3_gpio_led_data, } }; static int __init puv3_gpio_leds_init(void) { platform_device_register(&puv3_gpio_gpio_leds); return 0; } device_initcall(puv3_gpio_leds_init); #endif static int puv3_gpio_get(struct gpio_chip *chip, unsigned offset) { return readl(GPIO_GPLR) & GPIO_GPIO(offset); } static void puv3_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { if (value) writel(GPIO_GPIO(offset), GPIO_GPSR); else writel(GPIO_GPIO(offset), GPIO_GPCR); } static int puv3_direction_input(struct gpio_chip *chip, unsigned offset) { unsigned long flags; local_irq_save(flags); writel(readl(GPIO_GPDR) & ~GPIO_GPIO(offset), GPIO_GPDR); local_irq_restore(flags); return 0; } static int puv3_direction_output(struct gpio_chip *chip, unsigned offset, int value) { unsigned long flags; local_irq_save(flags); puv3_gpio_set(chip, offset, value); writel(readl(GPIO_GPDR) | GPIO_GPIO(offset), GPIO_GPDR); local_irq_restore(flags); return 0; } static struct gpio_chip puv3_gpio_chip = { .label = "gpio", .direction_input = puv3_direction_input, .direction_output = puv3_direction_output, .set = puv3_gpio_set, .get = puv3_gpio_get, .base = 0, .ngpio = GPIO_MAX + 1, }; void __init puv3_init_gpio(void) { writel(GPIO_DIR, GPIO_GPDR); #if defined(CONFIG_PUV3_NB0916) || defined(CONFIG_PUV3_SMW0919) \ || defined(CONFIG_PUV3_DB0913) gpio_set_value(GPO_WIFI_EN, 1); gpio_set_value(GPO_HDD_LED, 1); gpio_set_value(GPO_VGA_EN, 1); gpio_set_value(GPO_LCD_EN, 1); gpio_set_value(GPO_CAM_PWR_EN, 0); gpio_set_value(GPO_LCD_VCC_EN, 1); gpio_set_value(GPO_SOFT_OFF, 1); gpio_set_value(GPO_BT_EN, 1); gpio_set_value(GPO_FAN_ON, 0); gpio_set_value(GPO_SPKR, 0); gpio_set_value(GPO_CPU_HEALTH, 1); gpio_set_value(GPO_LAN_SEL, 1); /* * DO NOT modify the GPO_SET_V1 and GPO_SET_V2 in kernel * gpio_set_value(GPO_SET_V1, 1); * gpio_set_value(GPO_SET_V2, 1); */ #endif gpiochip_add(&puv3_gpio_chip); }
gpl-2.0
francescosganga/remixos-kernel
drivers/net/ethernet/qlogic/qed/qed_cxt.c
184
21874
/* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation * * This software is available under the terms of the GNU General Public License * (GPL) Version 2, available from the file COPYING in the main directory of * this source tree. */ #include <linux/types.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/log2.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/bitops.h> #include "qed.h" #include "qed_cxt.h" #include "qed_dev_api.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_reg_addr.h" /* Max number of connection types in HW (DQ/CDU etc.) */ #define MAX_CONN_TYPES PROTOCOLID_COMMON #define NUM_TASK_TYPES 2 #define NUM_TASK_PF_SEGMENTS 4 /* QM constants */ #define QM_PQ_ELEMENT_SIZE 4 /* in bytes */ /* Doorbell-Queue constants */ #define DQ_RANGE_SHIFT 4 #define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT) /* ILT constants */ #define ILT_DEFAULT_HW_P_SIZE 3 #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET /* ILT entry structure */ #define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL #define ILT_ENTRY_PHY_ADDR_SHIFT 0 #define ILT_ENTRY_VALID_MASK 0x1ULL #define ILT_ENTRY_VALID_SHIFT 52 #define ILT_ENTRY_IN_REGS 2 #define ILT_REG_SIZE_IN_BYTES 4 /* connection context union */ union conn_context { struct core_conn_context core_ctx; struct eth_conn_context eth_ctx; }; #define CONN_CXT_SIZE(p_hwfn) \ ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) /* PF per protocl configuration object */ struct qed_conn_type_cfg { u32 cid_count; u32 cid_start; }; /* ILT Client configuration, Per connection type (protocol) resources. */ #define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2) #define CDUC_BLK (0) enum ilt_clients { ILT_CLI_CDUC, ILT_CLI_QM, ILT_CLI_MAX }; struct ilt_cfg_pair { u32 reg; u32 val; }; struct qed_ilt_cli_blk { u32 total_size; /* 0 means not active */ u32 real_size_in_page; u32 start_line; }; struct qed_ilt_client_cfg { bool active; /* ILT boundaries */ struct ilt_cfg_pair first; struct ilt_cfg_pair last; struct ilt_cfg_pair p_size; /* ILT client blocks for PF */ struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS]; u32 pf_total_lines; }; /* Per Path - * ILT shadow table * Protocol acquired CID lists * PF start line in ILT */ struct qed_dma_mem { dma_addr_t p_phys; void *p_virt; size_t size; }; struct qed_cid_acquired_map { u32 start_cid; u32 max_count; unsigned long *cid_map; }; struct qed_cxt_mngr { /* Per protocl configuration */ struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES]; /* computed ILT structure */ struct qed_ilt_client_cfg clients[ILT_CLI_MAX]; /* Acquired CIDs */ struct qed_cid_acquired_map acquired[MAX_CONN_TYPES]; /* ILT shadow table */ struct qed_dma_mem *ilt_shadow; u32 pf_start_line; }; static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr) { u32 type, pf_cids = 0; for (type = 0; type < MAX_CONN_TYPES; type++) pf_cids += p_mngr->conn_cfg[type].cid_count; return pf_cids; } static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, struct qed_qm_iids *iids) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; int type; for (type = 0; type < MAX_CONN_TYPES; type++) iids->cids += p_mngr->conn_cfg[type].cid_count; DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids); } /* set the iids count per protocol */ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 cid_count) { struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type]; p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN); } static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli, struct qed_ilt_cli_blk *p_blk, u32 start_line, u32 total_size, u32 elem_size) { u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val); /* verify thatits called only once for each block */ if (p_blk->total_size) return; p_blk->total_size = total_size; p_blk->real_size_in_page = 0; if (elem_size) p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size; p_blk->start_line = start_line; } static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn, struct qed_ilt_client_cfg *p_cli, struct qed_ilt_cli_blk *p_blk, u32 *p_line, enum ilt_clients client_id) { if (!p_blk->total_size) return; if (!p_cli->active) p_cli->first.val = *p_line; p_cli->active = true; *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page); p_cli->last.val = *p_line - 1; DP_VERBOSE(p_hwfn, QED_MSG_ILT, "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n", client_id, p_cli->first.val, p_cli->last.val, p_blk->total_size, p_blk->real_size_in_page, p_blk->start_line); } int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_ilt_client_cfg *p_cli; struct qed_ilt_cli_blk *p_blk; u32 curr_line, total, pf_cids; struct qed_qm_iids qm_iids; memset(&qm_iids, 0, sizeof(qm_iids)); p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT); DP_VERBOSE(p_hwfn, QED_MSG_ILT, "hwfn [%d] - Set context manager starting line to be 0x%08x\n", p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line); /* CDUC */ p_cli = &p_mngr->clients[ILT_CLI_CDUC]; curr_line = p_mngr->pf_start_line; p_cli->pf_total_lines = 0; /* get the counters for the CDUC and QM clients */ pf_cids = qed_cxt_cdu_iids(p_mngr); p_blk = &p_cli->pf_blks[CDUC_BLK]; total = pf_cids * CONN_CXT_SIZE(p_hwfn); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total, CONN_CXT_SIZE(p_hwfn)); qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); p_cli->pf_total_lines = curr_line - p_blk->start_line; /* QM */ p_cli = &p_mngr->clients[ILT_CLI_QM]; p_blk = &p_cli->pf_blks[0]; qed_cxt_qm_iids(p_hwfn, &qm_iids); total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0, p_hwfn->qm_info.num_pqs, 0); DP_VERBOSE(p_hwfn, QED_MSG_ILT, "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n", qm_iids.cids, p_hwfn->qm_info.num_pqs, total); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * 0x1000, QM_PQ_ELEMENT_SIZE); qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM); p_cli->pf_total_lines = curr_line - p_blk->start_line; if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line > RESC_NUM(p_hwfn, QED_ILT)) { DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n", curr_line - p_hwfn->p_cxt_mngr->pf_start_line); return -EINVAL; } return 0; } #define for_each_ilt_valid_client(pos, clients) \ for (pos = 0; pos < ILT_CLI_MAX; pos++) /* Total number of ILT lines used by this PF */ static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients) { u32 size = 0; u32 i; for_each_ilt_valid_client(i, ilt_clients) { if (!ilt_clients[i].active) continue; size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1); } return size; } static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn) { struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients; struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 ilt_size, i; ilt_size = qed_cxt_ilt_shadow_size(p_cli); for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) { struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i]; if (p_dma->p_virt) dma_free_coherent(&p_hwfn->cdev->pdev->dev, p_dma->size, p_dma->p_virt, p_dma->p_phys); p_dma->p_virt = NULL; } kfree(p_mngr->ilt_shadow); } static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, struct qed_ilt_cli_blk *p_blk, enum ilt_clients ilt_client, u32 start_line_offset) { struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow; u32 lines, line, sz_left; if (!p_blk->total_size) return 0; sz_left = p_blk->total_size; lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page); line = p_blk->start_line + start_line_offset - p_hwfn->p_cxt_mngr->pf_start_line; for (; lines; lines--) { dma_addr_t p_phys; void *p_virt; u32 size; size = min_t(u32, sz_left, p_blk->real_size_in_page); p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size, &p_phys, GFP_KERNEL); if (!p_virt) return -ENOMEM; memset(p_virt, 0, size); ilt_shadow[line].p_phys = p_phys; ilt_shadow[line].p_virt = p_virt; ilt_shadow[line].size = size; DP_VERBOSE(p_hwfn, QED_MSG_ILT, "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n", line, (u64)p_phys, p_virt, size); sz_left -= size; line++; } return 0; } static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_ilt_client_cfg *clients = p_mngr->clients; struct qed_ilt_cli_blk *p_blk; u32 size, i, j; int rc; size = qed_cxt_ilt_shadow_size(clients); p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem), GFP_KERNEL); if (!p_mngr->ilt_shadow) { DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n"); rc = -ENOMEM; goto ilt_shadow_fail; } DP_VERBOSE(p_hwfn, QED_MSG_ILT, "Allocated 0x%x bytes for ilt shadow\n", (u32)(size * sizeof(struct qed_dma_mem))); for_each_ilt_valid_client(i, clients) { if (!clients[i].active) continue; for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) { p_blk = &clients[i].pf_blks[j]; rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0); if (rc != 0) goto ilt_shadow_fail; } } return 0; ilt_shadow_fail: qed_ilt_shadow_free(p_hwfn); return rc; } static void qed_cid_map_free(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 type; for (type = 0; type < MAX_CONN_TYPES; type++) { kfree(p_mngr->acquired[type].cid_map); p_mngr->acquired[type].max_count = 0; p_mngr->acquired[type].start_cid = 0; } } static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 start_cid = 0; u32 type; for (type = 0; type < MAX_CONN_TYPES; type++) { u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; u32 size; if (cid_cnt == 0) continue; size = DIV_ROUND_UP(cid_cnt, sizeof(unsigned long) * BITS_PER_BYTE) * sizeof(unsigned long); p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL); if (!p_mngr->acquired[type].cid_map) goto cid_map_fail; p_mngr->acquired[type].max_count = cid_cnt; p_mngr->acquired[type].start_cid = start_cid; p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid; DP_VERBOSE(p_hwfn, QED_MSG_CXT, "Type %08x start: %08x count %08x\n", type, p_mngr->acquired[type].start_cid, p_mngr->acquired[type].max_count); start_cid += cid_cnt; } return 0; cid_map_fail: qed_cid_map_free(p_hwfn); return -ENOMEM; } int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr; u32 i; p_mngr = kzalloc(sizeof(*p_mngr), GFP_ATOMIC); if (!p_mngr) { DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n"); return -ENOMEM; } /* Initialize ILT client registers */ p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT); p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT); p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE); p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT); p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT); p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE); /* default ILT page size for all clients is 32K */ for (i = 0; i < ILT_CLI_MAX; i++) p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE; /* Set the cxt mangr pointer priori to further allocations */ p_hwfn->p_cxt_mngr = p_mngr; return 0; } int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn) { int rc; /* Allocate the ILT shadow table */ rc = qed_ilt_shadow_alloc(p_hwfn); if (rc) { DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n"); goto tables_alloc_fail; } /* Allocate and initialize the acquired cids bitmaps */ rc = qed_cid_map_alloc(p_hwfn); if (rc) { DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n"); goto tables_alloc_fail; } return 0; tables_alloc_fail: qed_cxt_mngr_free(p_hwfn); return rc; } void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn) { if (!p_hwfn->p_cxt_mngr) return; qed_cid_map_free(p_hwfn); qed_ilt_shadow_free(p_hwfn); kfree(p_hwfn->p_cxt_mngr); p_hwfn->p_cxt_mngr = NULL; } void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; int type; /* Reset acquired cids */ for (type = 0; type < MAX_CONN_TYPES; type++) { u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; if (cid_cnt == 0) continue; memset(p_mngr->acquired[type].cid_map, 0, DIV_ROUND_UP(cid_cnt, sizeof(unsigned long) * BITS_PER_BYTE) * sizeof(unsigned long)); } } /* CDU Common */ #define CDUC_CXT_SIZE_SHIFT \ CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT #define CDUC_CXT_SIZE_MASK \ (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT) #define CDUC_BLOCK_WASTE_SHIFT \ CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT #define CDUC_BLOCK_WASTE_MASK \ (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT) #define CDUC_NCIB_SHIFT \ CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT #define CDUC_NCIB_MASK \ (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT) static void qed_cdu_init_common(struct qed_hwfn *p_hwfn) { u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0; /* CDUC - connection configuration */ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; cxt_size = CONN_CXT_SIZE(p_hwfn); elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size); SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste); SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page); STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params); } void qed_qm_init_pf(struct qed_hwfn *p_hwfn) { struct qed_qm_pf_rt_init_params params; struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_iids iids; memset(&iids, 0, sizeof(iids)); qed_cxt_qm_iids(p_hwfn, &iids); memset(&params, 0, sizeof(params)); params.port_id = p_hwfn->port_id; params.pf_id = p_hwfn->rel_pf_id; params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; params.is_first_pf = p_hwfn->first_on_engine; params.num_pf_cids = iids.cids; params.start_pq = qm_info->start_pq; params.num_pf_pqs = qm_info->num_pqs; params.start_vport = qm_info->num_vports; params.pf_wfq = qm_info->pf_wfq; params.pf_rl = qm_info->pf_rl; params.pq_params = qm_info->qm_pq_params; params.vport_params = qm_info->qm_vport_params; qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params); } /* CM PF */ static int qed_cm_init_pf(struct qed_hwfn *p_hwfn) { union qed_qm_pq_params pq_params; u16 pq; /* XCM pure-LB queue */ memset(&pq_params, 0, sizeof(pq_params)); pq_params.core.tc = LB_TC; pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq); return 0; } /* DQ PF */ static void qed_dq_init_pf(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 dq_pf_max_cid = 0; dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid); dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid); dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid); dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid); dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid); /* 5 - PF */ dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid); } static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn) { struct qed_ilt_client_cfg *ilt_clients; int i; ilt_clients = p_hwfn->p_cxt_mngr->clients; for_each_ilt_valid_client(i, ilt_clients) { if (!ilt_clients[i].active) continue; STORE_RT_REG(p_hwfn, ilt_clients[i].first.reg, ilt_clients[i].first.val); STORE_RT_REG(p_hwfn, ilt_clients[i].last.reg, ilt_clients[i].last.val); STORE_RT_REG(p_hwfn, ilt_clients[i].p_size.reg, ilt_clients[i].p_size.val); } } /* ILT (PSWRQ2) PF */ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) { struct qed_ilt_client_cfg *clients; struct qed_cxt_mngr *p_mngr; struct qed_dma_mem *p_shdw; u32 line, rt_offst, i; qed_ilt_bounds_init(p_hwfn); p_mngr = p_hwfn->p_cxt_mngr; p_shdw = p_mngr->ilt_shadow; clients = p_hwfn->p_cxt_mngr->clients; for_each_ilt_valid_client(i, clients) { if (!clients[i].active) continue; /** Client's 1st val and RT array are absolute, ILT shadows' * lines are relative. */ line = clients[i].first.val - p_mngr->pf_start_line; rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET + clients[i].first.val * ILT_ENTRY_IN_REGS; for (; line <= clients[i].last.val - p_mngr->pf_start_line; line++, rt_offst += ILT_ENTRY_IN_REGS) { u64 ilt_hw_entry = 0; /** p_virt could be NULL incase of dynamic * allocation */ if (p_shdw[line].p_virt) { SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL); SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR, (p_shdw[line].p_phys >> 12)); DP_VERBOSE(p_hwfn, QED_MSG_ILT, "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n", rt_offst, line, i, (u64)(p_shdw[line].p_phys >> 12)); } STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry); } } } void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn) { qed_cdu_init_common(p_hwfn); } void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn) { qed_qm_init_pf(p_hwfn); qed_cm_init_pf(p_hwfn); qed_dq_init_pf(p_hwfn); qed_ilt_init_pf(p_hwfn); } int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *p_cid) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 rel_cid; if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) { DP_NOTICE(p_hwfn, "Invalid protocol type %d", type); return -EINVAL; } rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map, p_mngr->acquired[type].max_count); if (rel_cid >= p_mngr->acquired[type].max_count) { DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type); return -EINVAL; } __set_bit(rel_cid, p_mngr->acquired[type].cid_map); *p_cid = rel_cid + p_mngr->acquired[type].start_cid; return 0; } static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn, u32 cid, enum protocol_type *p_type) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_cid_acquired_map *p_map; enum protocol_type p; u32 rel_cid; /* Iterate over protocols and find matching cid range */ for (p = 0; p < MAX_CONN_TYPES; p++) { p_map = &p_mngr->acquired[p]; if (!p_map->cid_map) continue; if (cid >= p_map->start_cid && cid < p_map->start_cid + p_map->max_count) break; } *p_type = p; if (p == MAX_CONN_TYPES) { DP_NOTICE(p_hwfn, "Invalid CID %d", cid); return false; } rel_cid = cid - p_map->start_cid; if (!test_bit(rel_cid, p_map->cid_map)) { DP_NOTICE(p_hwfn, "CID %d not acquired", cid); return false; } return true; } void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; enum protocol_type type; bool b_acquired; u32 rel_cid; /* Test acquired and find matching per-protocol map */ b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type); if (!b_acquired) return; rel_cid = cid - p_mngr->acquired[type].start_cid; __clear_bit(rel_cid, p_mngr->acquired[type].cid_map); } int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 conn_cxt_size, hw_p_size, cxts_per_p, line; enum protocol_type type; bool b_acquired; /* Test acquired and find matching per-protocol map */ b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type); if (!b_acquired) return -EINVAL; /* set the protocl type */ p_info->type = type; /* compute context virtual pointer */ hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; conn_cxt_size = CONN_CXT_SIZE(p_hwfn); cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size; line = p_info->iid / cxts_per_p; /* Make sure context is allocated (dynamic allocation) */ if (!p_mngr->ilt_shadow[line].p_virt) return -EINVAL; p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt + p_info->iid % cxts_per_p * conn_cxt_size; DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT), "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n", p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid); return 0; } int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn) { struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params; /* Set the number of required CORE connections */ u32 core_cids = 1; /* SPQ */ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids); qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, p_params->num_cons); return 0; }
gpl-2.0
TomGiordano/kernel_huawei_u8220
arch/sparc/kernel/ioport.c
184
19592
/* * ioport.c: Simple io mapping allocator. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) * * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev. * * 2000/01/29 * <rth> zait: as long as pci_alloc_consistent produces something addressable, * things are ok. * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a * pointer into the big page mapping * <rth> zait: so what? * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page())) * <zaitcev> Hmm * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())). * So far so good. * <zaitcev> Now, driver calls pci_free_consistent(with result of * remap_it_my_way()). * <zaitcev> How do you find the address to pass to free_pages()? * <rth> zait: walk the page tables? It's only two or three level after all. * <rth> zait: you have to walk them anyway to remove the mapping. * <zaitcev> Hmm * <zaitcev> Sounds reasonable */ #include <linux/module.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/pci.h> /* struct pci_dev */ #include <linux/proc_fs.h> #include <linux/scatterlist.h> #include <linux/of_device.h> #include <asm/io.h> #include <asm/vaddrs.h> #include <asm/oplib.h> #include <asm/prom.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/dma.h> #include <asm/iommu.h> #include <asm/io-unit.h> #include "dma.h" #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ static struct resource *_sparc_find_resource(struct resource *r, unsigned long); static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz); static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys, unsigned long size, char *name); static void _sparc_free_io(struct resource *res); static void register_proc_sparc_ioport(void); /* This points to the next to use virtual memory for DVMA mappings */ static struct resource _sparc_dvma = { .name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1 }; /* This points to the start of I/O mappings, cluable from outside. */ /*ext*/ struct resource sparc_iomap = { .name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1 }; /* * Our mini-allocator... * Boy this is gross! We need it because we must map I/O for * timers and interrupt controller before the kmalloc is available. */ #define XNMLN 15 #define XNRES 10 /* SS-10 uses 8 */ struct xresource { struct resource xres; /* Must be first */ int xflag; /* 1 == used */ char xname[XNMLN+1]; }; static struct xresource xresv[XNRES]; static struct xresource *xres_alloc(void) { struct xresource *xrp; int n; xrp = xresv; for (n = 0; n < XNRES; n++) { if (xrp->xflag == 0) { xrp->xflag = 1; return xrp; } xrp++; } return NULL; } static void xres_free(struct xresource *xrp) { xrp->xflag = 0; } /* * These are typically used in PCI drivers * which are trying to be cross-platform. * * Bus type is always zero on IIep. */ void __iomem *ioremap(unsigned long offset, unsigned long size) { char name[14]; sprintf(name, "phys_%08x", (u32)offset); return _sparc_alloc_io(0, offset, size, name); } EXPORT_SYMBOL(ioremap); /* * Comlimentary to ioremap(). */ void iounmap(volatile void __iomem *virtual) { unsigned long vaddr = (unsigned long) virtual & PAGE_MASK; struct resource *res; if ((res = _sparc_find_resource(&sparc_iomap, vaddr)) == NULL) { printk("free_io/iounmap: cannot free %lx\n", vaddr); return; } _sparc_free_io(res); if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) { xres_free((struct xresource *)res); } else { kfree(res); } } EXPORT_SYMBOL(iounmap); void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name) { return _sparc_alloc_io(res->flags & 0xF, res->start + offset, size, name); } EXPORT_SYMBOL(of_ioremap); void of_iounmap(struct resource *res, void __iomem *base, unsigned long size) { iounmap(base); } EXPORT_SYMBOL(of_iounmap); /* * Meat of mapping */ static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys, unsigned long size, char *name) { static int printed_full; struct xresource *xres; struct resource *res; char *tack; int tlen; void __iomem *va; /* P3 diag */ if (name == NULL) name = "???"; if ((xres = xres_alloc()) != 0) { tack = xres->xname; res = &xres->xres; } else { if (!printed_full) { printk("ioremap: done with statics, switching to malloc\n"); printed_full = 1; } tlen = strlen(name); tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL); if (tack == NULL) return NULL; memset(tack, 0, sizeof(struct resource)); res = (struct resource *) tack; tack += sizeof (struct resource); } strlcpy(tack, name, XNMLN+1); res->name = tack; va = _sparc_ioremap(res, busno, phys, size); /* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */ return va; } /* */ static void __iomem * _sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz) { unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK); if (allocate_resource(&sparc_iomap, res, (offset + sz + PAGE_SIZE-1) & PAGE_MASK, sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) { /* Usually we cannot see printks in this case. */ prom_printf("alloc_io_res(%s): cannot occupy\n", (res->name != NULL)? res->name: "???"); prom_halt(); } pa &= PAGE_MASK; sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1); return (void __iomem *)(unsigned long)(res->start + offset); } /* * Comlimentary to _sparc_ioremap(). */ static void _sparc_free_io(struct resource *res) { unsigned long plen; plen = res->end - res->start + 1; BUG_ON((plen & (PAGE_SIZE-1)) != 0); sparc_unmapiorange(res->start, plen); release_resource(res); } #ifdef CONFIG_SBUS void sbus_set_sbus64(struct device *dev, int x) { printk("sbus_set_sbus64: unsupported\n"); } EXPORT_SYMBOL(sbus_set_sbus64); /* * Allocate a chunk of memory suitable for DMA. * Typically devices use them for control blocks. * CPU may access them without any explicit flushing. */ void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp) { struct of_device *op = to_of_device(dev); unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; unsigned long va; struct resource *res; int order; /* XXX why are some lengths signed, others unsigned? */ if (len <= 0) { return NULL; } /* XXX So what is maxphys for us and how do drivers know it? */ if (len > 256*1024) { /* __get_free_pages() limit */ return NULL; } order = get_order(len_total); if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0) goto err_nopages; if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) goto err_nomem; if (allocate_resource(&_sparc_dvma, res, len_total, _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total); goto err_nova; } mmu_inval_dma_area(va, len_total); // XXX The mmu_map_dma_area does this for us below, see comments. // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); /* * XXX That's where sdev would be used. Currently we load * all iommu tables with the same translations. */ if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0) goto err_noiommu; res->name = op->node->name; return (void *)(unsigned long)res->start; err_noiommu: release_resource(res); err_nova: free_pages(va, order); err_nomem: kfree(res); err_nopages: return NULL; } void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) { struct resource *res; struct page *pgv; if ((res = _sparc_find_resource(&_sparc_dvma, (unsigned long)p)) == NULL) { printk("sbus_free_consistent: cannot free %p\n", p); return; } if (((unsigned long)p & (PAGE_SIZE-1)) != 0) { printk("sbus_free_consistent: unaligned va %p\n", p); return; } n = (n + PAGE_SIZE-1) & PAGE_MASK; if ((res->end-res->start)+1 != n) { printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n", (long)((res->end-res->start)+1), n); return; } release_resource(res); kfree(res); /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */ pgv = virt_to_page(p); mmu_unmap_dma_area(dev, ba, n); __free_pages(pgv, get_order(n)); } /* * Map a chunk of memory so that devices can see it. * CPU view of this memory may be inconsistent with * a device view and explicit flushing is necessary. */ dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction) { /* XXX why are some lengths signed, others unsigned? */ if (len <= 0) { return 0; } /* XXX So what is maxphys for us and how do drivers know it? */ if (len > 256*1024) { /* __get_free_pages() limit */ return 0; } return mmu_get_scsi_one(dev, va, len); } void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction) { mmu_release_scsi_one(dev, ba, n); } int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction) { mmu_get_scsi_sgl(dev, sg, n); /* * XXX sparc64 can return a partial length here. sun4c should do this * but it currently panics if it can't fulfill the request - Anton */ return n; } void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction) { mmu_release_scsi_sgl(dev, sg, n); } void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction) { } void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction) { } static int __init sparc_register_ioport(void) { register_proc_sparc_ioport(); return 0; } arch_initcall(sparc_register_ioport); #endif /* CONFIG_SBUS */ #ifdef CONFIG_PCI /* Allocate and map kernel buffer using consistent mode DMA for a device. * hwdev should be valid struct pci_dev pointer for PCI devices. */ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) { unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; unsigned long va; struct resource *res; int order; if (len == 0) { return NULL; } if (len > 256*1024) { /* __get_free_pages() limit */ return NULL; } order = get_order(len_total); va = __get_free_pages(GFP_KERNEL, order); if (va == 0) { printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT); return NULL; } if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { free_pages(va, order); printk("pci_alloc_consistent: no core\n"); return NULL; } if (allocate_resource(&_sparc_dvma, res, len_total, _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total); free_pages(va, order); kfree(res); return NULL; } mmu_inval_dma_area(va, len_total); #if 0 /* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n", (long)va, (long)res->start, (long)virt_to_phys(va), len_total); #endif sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ return (void *) res->start; } EXPORT_SYMBOL(pci_alloc_consistent); /* Free and unmap a consistent DMA buffer. * cpu_addr is what was returned from pci_alloc_consistent, * size must be the same as what as passed into pci_alloc_consistent, * and likewise dma_addr must be the same as what *dma_addrp was set to. * * References to the memory and mappings associated with cpu_addr/dma_addr * past this call are illegal. */ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) { struct resource *res; unsigned long pgp; if ((res = _sparc_find_resource(&_sparc_dvma, (unsigned long)p)) == NULL) { printk("pci_free_consistent: cannot free %p\n", p); return; } if (((unsigned long)p & (PAGE_SIZE-1)) != 0) { printk("pci_free_consistent: unaligned va %p\n", p); return; } n = (n + PAGE_SIZE-1) & PAGE_MASK; if ((res->end-res->start)+1 != n) { printk("pci_free_consistent: region 0x%lx asked 0x%lx\n", (long)((res->end-res->start)+1), (long)n); return; } pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */ mmu_inval_dma_area(pgp, n); sparc_unmapiorange((unsigned long)p, n); release_resource(res); kfree(res); free_pages(pgp, get_order(n)); } EXPORT_SYMBOL(pci_free_consistent); /* Map a single buffer of the indicated size for DMA in streaming mode. * The 32-bit bus address to use is returned. * * Once the device is given the dma address, the device owns this memory * until either pci_unmap_single or pci_dma_sync_single_* is performed. */ dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) { BUG_ON(direction == PCI_DMA_NONE); /* IIep is write-through, not flushing. */ return virt_to_phys(ptr); } EXPORT_SYMBOL(pci_map_single); /* Unmap a single streaming mode DMA translation. The dma_addr and size * must match what was provided for in a previous pci_map_single call. All * other usages are undefined. * * After this call, reads by the cpu to the buffer are guaranteed to see * whatever the device wrote there. */ void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) { BUG_ON(direction == PCI_DMA_NONE); if (direction != PCI_DMA_TODEVICE) { mmu_inval_dma_area((unsigned long)phys_to_virt(ba), (size + PAGE_SIZE-1) & PAGE_MASK); } } EXPORT_SYMBOL(pci_unmap_single); /* * Same as pci_map_single, but with pages. */ dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, unsigned long offset, size_t size, int direction) { BUG_ON(direction == PCI_DMA_NONE); /* IIep is write-through, not flushing. */ return page_to_phys(page) + offset; } EXPORT_SYMBOL(pci_map_page); void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, size_t size, int direction) { BUG_ON(direction == PCI_DMA_NONE); /* mmu_inval_dma_area XXX */ } EXPORT_SYMBOL(pci_unmap_page); /* Map a set of buffers described by scatterlist in streaming * mode for DMA. This is the scather-gather version of the * above pci_map_single interface. Here the scatter gather list * elements are each tagged with the appropriate dma address * and length. They are obtained via sg_dma_{address,length}(SG). * * NOTE: An implementation may be able to use a smaller number of * DMA address/length pairs than there are SG table elements. * (for example via virtual mapping capabilities) * The routine returns the number of addr/length pairs actually * used, at most nents. * * Device ownership issues as mentioned above for pci_map_single are * the same here. */ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) { struct scatterlist *sg; int n; BUG_ON(direction == PCI_DMA_NONE); /* IIep is write-through, not flushing. */ for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg_page(sg)) == NULL); sg->dma_address = virt_to_phys(sg_virt(sg)); sg->dma_length = sg->length; } return nents; } EXPORT_SYMBOL(pci_map_sg); /* Unmap a set of streaming mode DMA translations. * Again, cpu read rules concerning calls here are the same as for * pci_unmap_single() above. */ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) { struct scatterlist *sg; int n; BUG_ON(direction == PCI_DMA_NONE); if (direction != PCI_DMA_TODEVICE) { for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg_page(sg)) == NULL); mmu_inval_dma_area( (unsigned long) page_address(sg_page(sg)), (sg->length + PAGE_SIZE-1) & PAGE_MASK); } } } EXPORT_SYMBOL(pci_unmap_sg); /* Make physical memory consistent for a single * streaming mode DMA translation before or after a transfer. * * If you perform a pci_map_single() but wish to interrogate the * buffer using the cpu, yet do not wish to teardown the PCI dma * mapping, you must call this function before doing so. At the * next point you give the PCI dma address back to the card, you * must first perform a pci_dma_sync_for_device, and then the * device again owns the buffer. */ void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) { BUG_ON(direction == PCI_DMA_NONE); if (direction != PCI_DMA_TODEVICE) { mmu_inval_dma_area((unsigned long)phys_to_virt(ba), (size + PAGE_SIZE-1) & PAGE_MASK); } } EXPORT_SYMBOL(pci_dma_sync_single_for_cpu); void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) { BUG_ON(direction == PCI_DMA_NONE); if (direction != PCI_DMA_TODEVICE) { mmu_inval_dma_area((unsigned long)phys_to_virt(ba), (size + PAGE_SIZE-1) & PAGE_MASK); } } EXPORT_SYMBOL(pci_dma_sync_single_for_device); /* Make physical memory consistent for a set of streaming * mode DMA translations after a transfer. * * The same as pci_dma_sync_single_* but for a scatter-gather list, * same rules and usage. */ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) { struct scatterlist *sg; int n; BUG_ON(direction == PCI_DMA_NONE); if (direction != PCI_DMA_TODEVICE) { for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg_page(sg)) == NULL); mmu_inval_dma_area( (unsigned long) page_address(sg_page(sg)), (sg->length + PAGE_SIZE-1) & PAGE_MASK); } } } EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu); void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) { struct scatterlist *sg; int n; BUG_ON(direction == PCI_DMA_NONE); if (direction != PCI_DMA_TODEVICE) { for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg_page(sg)) == NULL); mmu_inval_dma_area( (unsigned long) page_address(sg_page(sg)), (sg->length + PAGE_SIZE-1) & PAGE_MASK); } } } EXPORT_SYMBOL(pci_dma_sync_sg_for_device); #endif /* CONFIG_PCI */ #ifdef CONFIG_PROC_FS static int _sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data) { char *p = buf, *e = buf + length; struct resource *r; const char *nm; for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { if (p + 32 >= e) /* Better than nothing */ break; if ((nm = r->name) == 0) nm = "???"; p += sprintf(p, "%016llx-%016llx: %s\n", (unsigned long long)r->start, (unsigned long long)r->end, nm); } return p-buf; } #endif /* CONFIG_PROC_FS */ /* * This is a version of find_resource and it belongs to kernel/resource.c. * Until we have agreement with Linus and Martin, it lingers here. * * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case. * This probably warrants some sort of hashing. */ static struct resource *_sparc_find_resource(struct resource *root, unsigned long hit) { struct resource *tmp; for (tmp = root->child; tmp != 0; tmp = tmp->sibling) { if (tmp->start <= hit && tmp->end >= hit) return tmp; } return NULL; } static void register_proc_sparc_ioport(void) { #ifdef CONFIG_PROC_FS create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap); create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma); #endif }
gpl-2.0
github-easyway/linux-1
mm/list_lru.c
184
12490
/* * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. * Authors: David Chinner and Glauber Costa * * Generic LRU infrastructure */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/list_lru.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/memcontrol.h> #ifdef CONFIG_MEMCG_KMEM static LIST_HEAD(list_lrus); static DEFINE_MUTEX(list_lrus_mutex); static void list_lru_register(struct list_lru *lru) { mutex_lock(&list_lrus_mutex); list_add(&lru->list, &list_lrus); mutex_unlock(&list_lrus_mutex); } static void list_lru_unregister(struct list_lru *lru) { mutex_lock(&list_lrus_mutex); list_del(&lru->list); mutex_unlock(&list_lrus_mutex); } #else static void list_lru_register(struct list_lru *lru) { } static void list_lru_unregister(struct list_lru *lru) { } #endif /* CONFIG_MEMCG_KMEM */ #ifdef CONFIG_MEMCG_KMEM static inline bool list_lru_memcg_aware(struct list_lru *lru) { return !!lru->node[0].memcg_lrus; } static inline struct list_lru_one * list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) { /* * The lock protects the array of per cgroup lists from relocation * (see memcg_update_list_lru_node). */ lockdep_assert_held(&nlru->lock); if (nlru->memcg_lrus && idx >= 0) return nlru->memcg_lrus->lru[idx]; return &nlru->lru; } static inline struct list_lru_one * list_lru_from_kmem(struct list_lru_node *nlru, void *ptr) { struct mem_cgroup *memcg; if (!nlru->memcg_lrus) return &nlru->lru; memcg = mem_cgroup_from_kmem(ptr); if (!memcg) return &nlru->lru; return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); } #else static inline bool list_lru_memcg_aware(struct list_lru *lru) { return false; } static inline struct list_lru_one * list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) { return &nlru->lru; } static inline struct list_lru_one * list_lru_from_kmem(struct list_lru_node *nlru, void *ptr) { return &nlru->lru; } #endif /* CONFIG_MEMCG_KMEM */ bool list_lru_add(struct list_lru *lru, struct list_head *item) { int nid = page_to_nid(virt_to_page(item)); struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_one *l; spin_lock(&nlru->lock); if (list_empty(item)) { l = list_lru_from_kmem(nlru, item); list_add_tail(item, &l->list); l->nr_items++; spin_unlock(&nlru->lock); return true; } spin_unlock(&nlru->lock); return false; } EXPORT_SYMBOL_GPL(list_lru_add); bool list_lru_del(struct list_lru *lru, struct list_head *item) { int nid = page_to_nid(virt_to_page(item)); struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_one *l; spin_lock(&nlru->lock); if (!list_empty(item)) { l = list_lru_from_kmem(nlru, item); list_del_init(item); l->nr_items--; spin_unlock(&nlru->lock); return true; } spin_unlock(&nlru->lock); return false; } EXPORT_SYMBOL_GPL(list_lru_del); void list_lru_isolate(struct list_lru_one *list, struct list_head *item) { list_del_init(item); list->nr_items--; } EXPORT_SYMBOL_GPL(list_lru_isolate); void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, struct list_head *head) { list_move(item, head); list->nr_items--; } EXPORT_SYMBOL_GPL(list_lru_isolate_move); static unsigned long __list_lru_count_one(struct list_lru *lru, int nid, int memcg_idx) { struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_one *l; unsigned long count; spin_lock(&nlru->lock); l = list_lru_from_memcg_idx(nlru, memcg_idx); count = l->nr_items; spin_unlock(&nlru->lock); return count; } unsigned long list_lru_count_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg) { return __list_lru_count_one(lru, nid, memcg_cache_id(memcg)); } EXPORT_SYMBOL_GPL(list_lru_count_one); unsigned long list_lru_count_node(struct list_lru *lru, int nid) { long count = 0; int memcg_idx; count += __list_lru_count_one(lru, nid, -1); if (list_lru_memcg_aware(lru)) { for_each_memcg_cache_index(memcg_idx) count += __list_lru_count_one(lru, nid, memcg_idx); } return count; } EXPORT_SYMBOL_GPL(list_lru_count_node); static unsigned long __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_one *l; struct list_head *item, *n; unsigned long isolated = 0; spin_lock(&nlru->lock); l = list_lru_from_memcg_idx(nlru, memcg_idx); restart: list_for_each_safe(item, n, &l->list) { enum lru_status ret; /* * decrement nr_to_walk first so that we don't livelock if we * get stuck on large numbesr of LRU_RETRY items */ if (!*nr_to_walk) break; --*nr_to_walk; ret = isolate(item, l, &nlru->lock, cb_arg); switch (ret) { case LRU_REMOVED_RETRY: assert_spin_locked(&nlru->lock); case LRU_REMOVED: isolated++; /* * If the lru lock has been dropped, our list * traversal is now invalid and so we have to * restart from scratch. */ if (ret == LRU_REMOVED_RETRY) goto restart; break; case LRU_ROTATE: list_move_tail(item, &l->list); break; case LRU_SKIP: break; case LRU_RETRY: /* * The lru lock has been dropped, our list traversal is * now invalid and so we have to restart from scratch. */ assert_spin_locked(&nlru->lock); goto restart; default: BUG(); } } spin_unlock(&nlru->lock); return isolated; } unsigned long list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), isolate, cb_arg, nr_to_walk); } EXPORT_SYMBOL_GPL(list_lru_walk_one); unsigned long list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { long isolated = 0; int memcg_idx; isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg, nr_to_walk); if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { for_each_memcg_cache_index(memcg_idx) { isolated += __list_lru_walk_one(lru, nid, memcg_idx, isolate, cb_arg, nr_to_walk); if (*nr_to_walk <= 0) break; } } return isolated; } EXPORT_SYMBOL_GPL(list_lru_walk_node); static void init_one_lru(struct list_lru_one *l) { INIT_LIST_HEAD(&l->list); l->nr_items = 0; } #ifdef CONFIG_MEMCG_KMEM static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus, int begin, int end) { int i; for (i = begin; i < end; i++) kfree(memcg_lrus->lru[i]); } static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus, int begin, int end) { int i; for (i = begin; i < end; i++) { struct list_lru_one *l; l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL); if (!l) goto fail; init_one_lru(l); memcg_lrus->lru[i] = l; } return 0; fail: __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1); return -ENOMEM; } static int memcg_init_list_lru_node(struct list_lru_node *nlru) { int size = memcg_nr_cache_ids; nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL); if (!nlru->memcg_lrus) return -ENOMEM; if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) { kfree(nlru->memcg_lrus); return -ENOMEM; } return 0; } static void memcg_destroy_list_lru_node(struct list_lru_node *nlru) { __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids); kfree(nlru->memcg_lrus); } static int memcg_update_list_lru_node(struct list_lru_node *nlru, int old_size, int new_size) { struct list_lru_memcg *old, *new; BUG_ON(old_size > new_size); old = nlru->memcg_lrus; new = kmalloc(new_size * sizeof(void *), GFP_KERNEL); if (!new) return -ENOMEM; if (__memcg_init_list_lru_node(new, old_size, new_size)) { kfree(new); return -ENOMEM; } memcpy(new, old, old_size * sizeof(void *)); /* * The lock guarantees that we won't race with a reader * (see list_lru_from_memcg_idx). * * Since list_lru_{add,del} may be called under an IRQ-safe lock, * we have to use IRQ-safe primitives here to avoid deadlock. */ spin_lock_irq(&nlru->lock); nlru->memcg_lrus = new; spin_unlock_irq(&nlru->lock); kfree(old); return 0; } static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru, int old_size, int new_size) { /* do not bother shrinking the array back to the old size, because we * cannot handle allocation failures here */ __memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size); } static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { int i; for (i = 0; i < nr_node_ids; i++) { if (!memcg_aware) lru->node[i].memcg_lrus = NULL; else if (memcg_init_list_lru_node(&lru->node[i])) goto fail; } return 0; fail: for (i = i - 1; i >= 0; i--) memcg_destroy_list_lru_node(&lru->node[i]); return -ENOMEM; } static void memcg_destroy_list_lru(struct list_lru *lru) { int i; if (!list_lru_memcg_aware(lru)) return; for (i = 0; i < nr_node_ids; i++) memcg_destroy_list_lru_node(&lru->node[i]); } static int memcg_update_list_lru(struct list_lru *lru, int old_size, int new_size) { int i; if (!list_lru_memcg_aware(lru)) return 0; for (i = 0; i < nr_node_ids; i++) { if (memcg_update_list_lru_node(&lru->node[i], old_size, new_size)) goto fail; } return 0; fail: for (i = i - 1; i >= 0; i--) memcg_cancel_update_list_lru_node(&lru->node[i], old_size, new_size); return -ENOMEM; } static void memcg_cancel_update_list_lru(struct list_lru *lru, int old_size, int new_size) { int i; if (!list_lru_memcg_aware(lru)) return; for (i = 0; i < nr_node_ids; i++) memcg_cancel_update_list_lru_node(&lru->node[i], old_size, new_size); } int memcg_update_all_list_lrus(int new_size) { int ret = 0; struct list_lru *lru; int old_size = memcg_nr_cache_ids; mutex_lock(&list_lrus_mutex); list_for_each_entry(lru, &list_lrus, list) { ret = memcg_update_list_lru(lru, old_size, new_size); if (ret) goto fail; } out: mutex_unlock(&list_lrus_mutex); return ret; fail: list_for_each_entry_continue_reverse(lru, &list_lrus, list) memcg_cancel_update_list_lru(lru, old_size, new_size); goto out; } static void memcg_drain_list_lru_node(struct list_lru_node *nlru, int src_idx, int dst_idx) { struct list_lru_one *src, *dst; /* * Since list_lru_{add,del} may be called under an IRQ-safe lock, * we have to use IRQ-safe primitives here to avoid deadlock. */ spin_lock_irq(&nlru->lock); src = list_lru_from_memcg_idx(nlru, src_idx); dst = list_lru_from_memcg_idx(nlru, dst_idx); list_splice_init(&src->list, &dst->list); dst->nr_items += src->nr_items; src->nr_items = 0; spin_unlock_irq(&nlru->lock); } static void memcg_drain_list_lru(struct list_lru *lru, int src_idx, int dst_idx) { int i; if (!list_lru_memcg_aware(lru)) return; for (i = 0; i < nr_node_ids; i++) memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx); } void memcg_drain_all_list_lrus(int src_idx, int dst_idx) { struct list_lru *lru; mutex_lock(&list_lrus_mutex); list_for_each_entry(lru, &list_lrus, list) memcg_drain_list_lru(lru, src_idx, dst_idx); mutex_unlock(&list_lrus_mutex); } #else static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { return 0; } static void memcg_destroy_list_lru(struct list_lru *lru) { } #endif /* CONFIG_MEMCG_KMEM */ int __list_lru_init(struct list_lru *lru, bool memcg_aware, struct lock_class_key *key) { int i; size_t size = sizeof(*lru->node) * nr_node_ids; int err = -ENOMEM; memcg_get_cache_ids(); lru->node = kzalloc(size, GFP_KERNEL); if (!lru->node) goto out; for (i = 0; i < nr_node_ids; i++) { spin_lock_init(&lru->node[i].lock); if (key) lockdep_set_class(&lru->node[i].lock, key); init_one_lru(&lru->node[i].lru); } err = memcg_init_list_lru(lru, memcg_aware); if (err) { kfree(lru->node); goto out; } list_lru_register(lru); out: memcg_put_cache_ids(); return err; } EXPORT_SYMBOL_GPL(__list_lru_init); void list_lru_destroy(struct list_lru *lru) { /* Already destroyed or not yet initialized? */ if (!lru->node) return; memcg_get_cache_ids(); list_lru_unregister(lru); memcg_destroy_list_lru(lru); kfree(lru->node); lru->node = NULL; memcg_put_cache_ids(); } EXPORT_SYMBOL_GPL(list_lru_destroy);
gpl-2.0
jimbojr/linux
drivers/misc/ti-st/st_kim.c
440
25035
/* * Shared Transport Line discipline driver Core * Init Manager module responsible for GPIO control * and firmware download * Copyright (C) 2009-2010 Texas Instruments * Author: Pavan Savoy <pavan_savoy@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #define pr_fmt(fmt) "(stk) :" fmt #include <linux/platform_device.h> #include <linux/jiffies.h> #include <linux/firmware.h> #include <linux/delay.h> #include <linux/wait.h> #include <linux/gpio.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/sched.h> #include <linux/sysfs.h> #include <linux/tty.h> #include <linux/skbuff.h> #include <linux/ti_wilink_st.h> #include <linux/module.h> #define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */ static struct platform_device *st_kim_devices[MAX_ST_DEVICES]; /**********************************************************************/ /* internal functions */ /** * st_get_plat_device - * function which returns the reference to the platform device * requested by id. As of now only 1 such device exists (id=0) * the context requesting for reference can get the id to be * requested by a. The protocol driver which is registering or * b. the tty device which is opened. */ static struct platform_device *st_get_plat_device(int id) { return st_kim_devices[id]; } /** * validate_firmware_response - * function to return whether the firmware response was proper * in case of error don't complete so that waiting for proper * response times out */ static void validate_firmware_response(struct kim_data_s *kim_gdata) { struct sk_buff *skb = kim_gdata->rx_skb; if (!skb) return; /* these magic numbers are the position in the response buffer which * allows us to distinguish whether the response is for the read * version info. command */ if (skb->data[2] == 0x01 && skb->data[3] == 0x01 && skb->data[4] == 0x10 && skb->data[5] == 0x00) { /* fw version response */ memcpy(kim_gdata->resp_buffer, kim_gdata->rx_skb->data, kim_gdata->rx_skb->len); complete_all(&kim_gdata->kim_rcvd); kim_gdata->rx_state = ST_W4_PACKET_TYPE; kim_gdata->rx_skb = NULL; kim_gdata->rx_count = 0; } else if (unlikely(skb->data[5] != 0)) { pr_err("no proper response during fw download"); pr_err("data6 %x", skb->data[5]); kfree_skb(skb); return; /* keep waiting for the proper response */ } /* becos of all the script being downloaded */ complete_all(&kim_gdata->kim_rcvd); kfree_skb(skb); } /* check for data len received inside kim_int_recv * most often hit the last case to update state to waiting for data */ static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len) { register int room = skb_tailroom(kim_gdata->rx_skb); pr_debug("len %d room %d", len, room); if (!len) { validate_firmware_response(kim_gdata); } else if (len > room) { /* Received packet's payload length is larger. * We can't accommodate it in created skb. */ pr_err("Data length is too large len %d room %d", len, room); kfree_skb(kim_gdata->rx_skb); } else { /* Packet header has non-zero payload length and * we have enough space in created skb. Lets read * payload data */ kim_gdata->rx_state = ST_W4_DATA; kim_gdata->rx_count = len; return len; } /* Change ST LL state to continue to process next * packet */ kim_gdata->rx_state = ST_W4_PACKET_TYPE; kim_gdata->rx_skb = NULL; kim_gdata->rx_count = 0; return 0; } /** * kim_int_recv - receive function called during firmware download * firmware download responses on different UART drivers * have been observed to come in bursts of different * tty_receive and hence the logic */ static void kim_int_recv(struct kim_data_s *kim_gdata, const unsigned char *data, long count) { const unsigned char *ptr; int len = 0, type = 0; unsigned char *plen; pr_debug("%s", __func__); /* Decode received bytes here */ ptr = data; if (unlikely(ptr == NULL)) { pr_err(" received null from TTY "); return; } while (count) { if (kim_gdata->rx_count) { len = min_t(unsigned int, kim_gdata->rx_count, count); memcpy(skb_put(kim_gdata->rx_skb, len), ptr, len); kim_gdata->rx_count -= len; count -= len; ptr += len; if (kim_gdata->rx_count) continue; /* Check ST RX state machine , where are we? */ switch (kim_gdata->rx_state) { /* Waiting for complete packet ? */ case ST_W4_DATA: pr_debug("Complete pkt received"); validate_firmware_response(kim_gdata); kim_gdata->rx_state = ST_W4_PACKET_TYPE; kim_gdata->rx_skb = NULL; continue; /* Waiting for Bluetooth event header ? */ case ST_W4_HEADER: plen = (unsigned char *)&kim_gdata->rx_skb->data[1]; pr_debug("event hdr: plen 0x%02x\n", *plen); kim_check_data_len(kim_gdata, *plen); continue; } /* end of switch */ } /* end of if rx_state */ switch (*ptr) { /* Bluetooth event packet? */ case 0x04: kim_gdata->rx_state = ST_W4_HEADER; kim_gdata->rx_count = 2; type = *ptr; break; default: pr_info("unknown packet"); ptr++; count--; continue; } ptr++; count--; kim_gdata->rx_skb = alloc_skb(1024+8, GFP_ATOMIC); if (!kim_gdata->rx_skb) { pr_err("can't allocate mem for new packet"); kim_gdata->rx_state = ST_W4_PACKET_TYPE; kim_gdata->rx_count = 0; return; } skb_reserve(kim_gdata->rx_skb, 8); kim_gdata->rx_skb->cb[0] = 4; kim_gdata->rx_skb->cb[1] = 0; } return; } static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name) { unsigned short version = 0, chip = 0, min_ver = 0, maj_ver = 0; const char read_ver_cmd[] = { 0x01, 0x01, 0x10, 0x00 }; long timeout; pr_debug("%s", __func__); reinit_completion(&kim_gdata->kim_rcvd); if (4 != st_int_write(kim_gdata->core_data, read_ver_cmd, 4)) { pr_err("kim: couldn't write 4 bytes"); return -EIO; } timeout = wait_for_completion_interruptible_timeout( &kim_gdata->kim_rcvd, msecs_to_jiffies(CMD_RESP_TIME)); if (timeout <= 0) { pr_err(" waiting for ver info- timed out or received signal"); return timeout ? -ERESTARTSYS : -ETIMEDOUT; } reinit_completion(&kim_gdata->kim_rcvd); /* the positions 12 & 13 in the response buffer provide with the * chip, major & minor numbers */ version = MAKEWORD(kim_gdata->resp_buffer[12], kim_gdata->resp_buffer[13]); chip = (version & 0x7C00) >> 10; min_ver = (version & 0x007F); maj_ver = (version & 0x0380) >> 7; if (version & 0x8000) maj_ver |= 0x0008; sprintf(bts_scr_name, "ti-connectivity/TIInit_%d.%d.%d.bts", chip, maj_ver, min_ver); /* to be accessed later via sysfs entry */ kim_gdata->version.full = version; kim_gdata->version.chip = chip; kim_gdata->version.maj_ver = maj_ver; kim_gdata->version.min_ver = min_ver; pr_info("%s", bts_scr_name); return 0; } static void skip_change_remote_baud(unsigned char **ptr, long *len) { unsigned char *nxt_action, *cur_action; cur_action = *ptr; nxt_action = cur_action + sizeof(struct bts_action) + ((struct bts_action *) cur_action)->size; if (((struct bts_action *) nxt_action)->type != ACTION_WAIT_EVENT) { pr_err("invalid action after change remote baud command"); } else { *ptr = *ptr + sizeof(struct bts_action) + ((struct bts_action *)cur_action)->size; *len = *len - (sizeof(struct bts_action) + ((struct bts_action *)cur_action)->size); /* warn user on not commenting these in firmware */ pr_warn("skipping the wait event of change remote baud"); } } /** * download_firmware - * internal function which parses through the .bts firmware * script file intreprets SEND, DELAY actions only as of now */ static long download_firmware(struct kim_data_s *kim_gdata) { long err = 0; long len = 0; unsigned char *ptr = NULL; unsigned char *action_ptr = NULL; unsigned char bts_scr_name[40] = { 0 }; /* 40 char long bts scr name? */ int wr_room_space; int cmd_size; unsigned long timeout; err = read_local_version(kim_gdata, bts_scr_name); if (err != 0) { pr_err("kim: failed to read local ver"); return err; } err = request_firmware(&kim_gdata->fw_entry, bts_scr_name, &kim_gdata->kim_pdev->dev); if (unlikely((err != 0) || (kim_gdata->fw_entry->data == NULL) || (kim_gdata->fw_entry->size == 0))) { pr_err(" request_firmware failed(errno %ld) for %s", err, bts_scr_name); return -EINVAL; } ptr = (void *)kim_gdata->fw_entry->data; len = kim_gdata->fw_entry->size; /* bts_header to remove out magic number and * version */ ptr += sizeof(struct bts_header); len -= sizeof(struct bts_header); while (len > 0 && ptr) { pr_debug(" action size %d, type %d ", ((struct bts_action *)ptr)->size, ((struct bts_action *)ptr)->type); switch (((struct bts_action *)ptr)->type) { case ACTION_SEND_COMMAND: /* action send */ pr_debug("S"); action_ptr = &(((struct bts_action *)ptr)->data[0]); if (unlikely (((struct hci_command *)action_ptr)->opcode == 0xFF36)) { /* ignore remote change * baud rate HCI VS command */ pr_warn("change remote baud" " rate command in firmware"); skip_change_remote_baud(&ptr, &len); break; } /* * Make sure we have enough free space in uart * tx buffer to write current firmware command */ cmd_size = ((struct bts_action *)ptr)->size; timeout = jiffies + msecs_to_jiffies(CMD_WR_TIME); do { wr_room_space = st_get_uart_wr_room(kim_gdata->core_data); if (wr_room_space < 0) { pr_err("Unable to get free " "space info from uart tx buffer"); release_firmware(kim_gdata->fw_entry); return wr_room_space; } mdelay(1); /* wait 1ms before checking room */ } while ((wr_room_space < cmd_size) && time_before(jiffies, timeout)); /* Timeout happened ? */ if (time_after_eq(jiffies, timeout)) { pr_err("Timeout while waiting for free " "free space in uart tx buffer"); release_firmware(kim_gdata->fw_entry); return -ETIMEDOUT; } /* reinit completion before sending for the * relevant wait */ reinit_completion(&kim_gdata->kim_rcvd); /* * Free space found in uart buffer, call st_int_write * to send current firmware command to the uart tx * buffer. */ err = st_int_write(kim_gdata->core_data, ((struct bts_action_send *)action_ptr)->data, ((struct bts_action *)ptr)->size); if (unlikely(err < 0)) { release_firmware(kim_gdata->fw_entry); return err; } /* * Check number of bytes written to the uart tx buffer * and requested command write size */ if (err != cmd_size) { pr_err("Number of bytes written to uart " "tx buffer are not matching with " "requested cmd write size"); release_firmware(kim_gdata->fw_entry); return -EIO; } break; case ACTION_WAIT_EVENT: /* wait */ pr_debug("W"); err = wait_for_completion_interruptible_timeout( &kim_gdata->kim_rcvd, msecs_to_jiffies(CMD_RESP_TIME)); if (err <= 0) { pr_err("response timeout/signaled during fw download "); /* timed out */ release_firmware(kim_gdata->fw_entry); return err ? -ERESTARTSYS : -ETIMEDOUT; } reinit_completion(&kim_gdata->kim_rcvd); break; case ACTION_DELAY: /* sleep */ pr_info("sleep command in scr"); action_ptr = &(((struct bts_action *)ptr)->data[0]); mdelay(((struct bts_action_delay *)action_ptr)->msec); break; } len = len - (sizeof(struct bts_action) + ((struct bts_action *)ptr)->size); ptr = ptr + sizeof(struct bts_action) + ((struct bts_action *)ptr)->size; } /* fw download complete */ release_firmware(kim_gdata->fw_entry); return 0; } /**********************************************************************/ /* functions called from ST core */ /* called from ST Core, when REG_IN_PROGRESS (registration in progress) * can be because of * 1. response to read local version * 2. during send/recv's of firmware download */ void st_kim_recv(void *disc_data, const unsigned char *data, long count) { struct st_data_s *st_gdata = (struct st_data_s *)disc_data; struct kim_data_s *kim_gdata = st_gdata->kim_data; /* proceed to gather all data and distinguish read fw version response * from other fw responses when data gathering is complete */ kim_int_recv(kim_gdata, data, count); return; } /* to signal completion of line discipline installation * called from ST Core, upon tty_open */ void st_kim_complete(void *kim_data) { struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; complete(&kim_gdata->ldisc_installed); } /** * st_kim_start - called from ST Core upon 1st registration * This involves toggling the chip enable gpio, reading * the firmware version from chip, forming the fw file name * based on the chip version, requesting the fw, parsing it * and perform download(send/recv). */ long st_kim_start(void *kim_data) { long err = 0; long retry = POR_RETRY_COUNT; struct ti_st_plat_data *pdata; struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; pr_info(" %s", __func__); pdata = kim_gdata->kim_pdev->dev.platform_data; do { /* platform specific enabling code here */ if (pdata->chip_enable) pdata->chip_enable(kim_gdata); /* Configure BT nShutdown to HIGH state */ gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW); mdelay(5); /* FIXME: a proper toggle */ gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_HIGH); mdelay(100); /* re-initialize the completion */ reinit_completion(&kim_gdata->ldisc_installed); /* send notification to UIM */ kim_gdata->ldisc_install = 1; pr_info("ldisc_install = 1"); sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, NULL, "install"); /* wait for ldisc to be installed */ err = wait_for_completion_interruptible_timeout( &kim_gdata->ldisc_installed, msecs_to_jiffies(LDISC_TIME)); if (!err) { /* ldisc installation timeout, * flush uart, power cycle BT_EN */ pr_err("ldisc installation timeout"); err = st_kim_stop(kim_gdata); continue; } else { /* ldisc installed now */ pr_info("line discipline installed"); err = download_firmware(kim_gdata); if (err != 0) { /* ldisc installed but fw download failed, * flush uart & power cycle BT_EN */ pr_err("download firmware failed"); err = st_kim_stop(kim_gdata); continue; } else { /* on success don't retry */ break; } } } while (retry--); return err; } /** * st_kim_stop - stop communication with chip. * This can be called from ST Core/KIM, on the- * (a) last un-register when chip need not be powered there-after, * (b) upon failure to either install ldisc or download firmware. * The function is responsible to (a) notify UIM about un-installation, * (b) flush UART if the ldisc was installed. * (c) reset BT_EN - pull down nshutdown at the end. * (d) invoke platform's chip disabling routine. */ long st_kim_stop(void *kim_data) { long err = 0; struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; struct ti_st_plat_data *pdata = kim_gdata->kim_pdev->dev.platform_data; struct tty_struct *tty = kim_gdata->core_data->tty; reinit_completion(&kim_gdata->ldisc_installed); if (tty) { /* can be called before ldisc is installed */ /* Flush any pending characters in the driver and discipline. */ tty_ldisc_flush(tty); tty_driver_flush_buffer(tty); } /* send uninstall notification to UIM */ pr_info("ldisc_install = 0"); kim_gdata->ldisc_install = 0; sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, NULL, "install"); /* wait for ldisc to be un-installed */ err = wait_for_completion_interruptible_timeout( &kim_gdata->ldisc_installed, msecs_to_jiffies(LDISC_TIME)); if (!err) { /* timeout */ pr_err(" timed out waiting for ldisc to be un-installed"); err = -ETIMEDOUT; } /* By default configure BT nShutdown to LOW state */ gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW); mdelay(1); gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_HIGH); mdelay(1); gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW); /* platform specific disable */ if (pdata->chip_disable) pdata->chip_disable(kim_gdata); return err; } /**********************************************************************/ /* functions called from subsystems */ /* called when debugfs entry is read from */ static int show_version(struct seq_file *s, void *unused) { struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private; seq_printf(s, "%04X %d.%d.%d\n", kim_gdata->version.full, kim_gdata->version.chip, kim_gdata->version.maj_ver, kim_gdata->version.min_ver); return 0; } static int show_list(struct seq_file *s, void *unused) { struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private; kim_st_list_protocols(kim_gdata->core_data, s); return 0; } static ssize_t show_install(struct device *dev, struct device_attribute *attr, char *buf) { struct kim_data_s *kim_data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", kim_data->ldisc_install); } #ifdef DEBUG static ssize_t store_dev_name(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kim_data_s *kim_data = dev_get_drvdata(dev); pr_debug("storing dev name >%s<", buf); strncpy(kim_data->dev_name, buf, count); pr_debug("stored dev name >%s<", kim_data->dev_name); return count; } static ssize_t store_baud_rate(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kim_data_s *kim_data = dev_get_drvdata(dev); pr_debug("storing baud rate >%s<", buf); sscanf(buf, "%ld", &kim_data->baud_rate); pr_debug("stored baud rate >%ld<", kim_data->baud_rate); return count; } #endif /* if DEBUG */ static ssize_t show_dev_name(struct device *dev, struct device_attribute *attr, char *buf) { struct kim_data_s *kim_data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", kim_data->dev_name); } static ssize_t show_baud_rate(struct device *dev, struct device_attribute *attr, char *buf) { struct kim_data_s *kim_data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", kim_data->baud_rate); } static ssize_t show_flow_cntrl(struct device *dev, struct device_attribute *attr, char *buf) { struct kim_data_s *kim_data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", kim_data->flow_cntrl); } /* structures specific for sysfs entries */ static struct kobj_attribute ldisc_install = __ATTR(install, 0444, (void *)show_install, NULL); static struct kobj_attribute uart_dev_name = #ifdef DEBUG /* TODO: move this to debug-fs if possible */ __ATTR(dev_name, 0644, (void *)show_dev_name, (void *)store_dev_name); #else __ATTR(dev_name, 0444, (void *)show_dev_name, NULL); #endif static struct kobj_attribute uart_baud_rate = #ifdef DEBUG /* TODO: move to debugfs */ __ATTR(baud_rate, 0644, (void *)show_baud_rate, (void *)store_baud_rate); #else __ATTR(baud_rate, 0444, (void *)show_baud_rate, NULL); #endif static struct kobj_attribute uart_flow_cntrl = __ATTR(flow_cntrl, 0444, (void *)show_flow_cntrl, NULL); static struct attribute *uim_attrs[] = { &ldisc_install.attr, &uart_dev_name.attr, &uart_baud_rate.attr, &uart_flow_cntrl.attr, NULL, }; static struct attribute_group uim_attr_grp = { .attrs = uim_attrs, }; /** * st_kim_ref - reference the core's data * This references the per-ST platform device in the arch/xx/ * board-xx.c file. * This would enable multiple such platform devices to exist * on a given platform */ void st_kim_ref(struct st_data_s **core_data, int id) { struct platform_device *pdev; struct kim_data_s *kim_gdata; /* get kim_gdata reference from platform device */ pdev = st_get_plat_device(id); if (!pdev) goto err; kim_gdata = platform_get_drvdata(pdev); if (!kim_gdata) goto err; *core_data = kim_gdata->core_data; return; err: *core_data = NULL; } static int kim_version_open(struct inode *i, struct file *f) { return single_open(f, show_version, i->i_private); } static int kim_list_open(struct inode *i, struct file *f) { return single_open(f, show_list, i->i_private); } static const struct file_operations version_debugfs_fops = { /* version info */ .open = kim_version_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations list_debugfs_fops = { /* protocols info */ .open = kim_list_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /**********************************************************************/ /* functions called from platform device driver subsystem * need to have a relevant platform device entry in the platform's * board-*.c file */ static struct dentry *kim_debugfs_dir; static int kim_probe(struct platform_device *pdev) { struct kim_data_s *kim_gdata; struct ti_st_plat_data *pdata = pdev->dev.platform_data; int err; if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) { /* multiple devices could exist */ st_kim_devices[pdev->id] = pdev; } else { /* platform's sure about existence of 1 device */ st_kim_devices[0] = pdev; } kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC); if (!kim_gdata) { pr_err("no mem to allocate"); return -ENOMEM; } platform_set_drvdata(pdev, kim_gdata); err = st_core_init(&kim_gdata->core_data); if (err != 0) { pr_err(" ST core init failed"); err = -EIO; goto err_core_init; } /* refer to itself */ kim_gdata->core_data->kim_data = kim_gdata; /* Claim the chip enable nShutdown gpio from the system */ kim_gdata->nshutdown = pdata->nshutdown_gpio; err = gpio_request(kim_gdata->nshutdown, "kim"); if (unlikely(err)) { pr_err(" gpio %d request failed ", kim_gdata->nshutdown); return err; } /* Configure nShutdown GPIO as output=0 */ err = gpio_direction_output(kim_gdata->nshutdown, 0); if (unlikely(err)) { pr_err(" unable to configure gpio %d", kim_gdata->nshutdown); return err; } /* get reference of pdev for request_firmware */ kim_gdata->kim_pdev = pdev; init_completion(&kim_gdata->kim_rcvd); init_completion(&kim_gdata->ldisc_installed); err = sysfs_create_group(&pdev->dev.kobj, &uim_attr_grp); if (err) { pr_err("failed to create sysfs entries"); goto err_sysfs_group; } /* copying platform data */ strncpy(kim_gdata->dev_name, pdata->dev_name, UART_DEV_NAME_LEN); kim_gdata->flow_cntrl = pdata->flow_cntrl; kim_gdata->baud_rate = pdata->baud_rate; pr_info("sysfs entries created\n"); kim_debugfs_dir = debugfs_create_dir("ti-st", NULL); if (!kim_debugfs_dir) { pr_err(" debugfs entries creation failed "); return 0; } debugfs_create_file("version", S_IRUGO, kim_debugfs_dir, kim_gdata, &version_debugfs_fops); debugfs_create_file("protocols", S_IRUGO, kim_debugfs_dir, kim_gdata, &list_debugfs_fops); return 0; err_sysfs_group: st_core_exit(kim_gdata->core_data); err_core_init: kfree(kim_gdata); return err; } static int kim_remove(struct platform_device *pdev) { /* free the GPIOs requested */ struct ti_st_plat_data *pdata = pdev->dev.platform_data; struct kim_data_s *kim_gdata; kim_gdata = platform_get_drvdata(pdev); /* Free the Bluetooth/FM/GPIO * nShutdown gpio from the system */ gpio_free(pdata->nshutdown_gpio); pr_info("nshutdown GPIO Freed"); debugfs_remove_recursive(kim_debugfs_dir); sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp); pr_info("sysfs entries removed"); kim_gdata->kim_pdev = NULL; st_core_exit(kim_gdata->core_data); kfree(kim_gdata); kim_gdata = NULL; return 0; } static int kim_suspend(struct platform_device *pdev, pm_message_t state) { struct ti_st_plat_data *pdata = pdev->dev.platform_data; if (pdata->suspend) return pdata->suspend(pdev, state); return 0; } static int kim_resume(struct platform_device *pdev) { struct ti_st_plat_data *pdata = pdev->dev.platform_data; if (pdata->resume) return pdata->resume(pdev); return 0; } /**********************************************************************/ /* entry point for ST KIM module, called in from ST Core */ static struct platform_driver kim_platform_driver = { .probe = kim_probe, .remove = kim_remove, .suspend = kim_suspend, .resume = kim_resume, .driver = { .name = "kim", }, }; module_platform_driver(kim_platform_driver); MODULE_AUTHOR("Pavan Savoy <pavan_savoy@ti.com>"); MODULE_DESCRIPTION("Shared Transport Driver for TI BT/FM/GPS combo chips "); MODULE_LICENSE("GPL");
gpl-2.0
Lmaths/linux-stable-rcn-ee
drivers/media/platform/davinci/vpbe_display.c
440
42643
/* * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/string.h> #include <linux/wait.h> #include <linux/time.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/videodev2.h> #include <linux/slab.h> #include <asm/pgtable.h> #include <mach/cputype.h> #include <media/v4l2-dev.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-device.h> #include <media/davinci/vpbe_display.h> #include <media/davinci/vpbe_types.h> #include <media/davinci/vpbe.h> #include <media/davinci/vpbe_venc.h> #include <media/davinci/vpbe_osd.h> #include "vpbe_venc_regs.h" #define VPBE_DISPLAY_DRIVER "vpbe-v4l2" static int debug; #define VPBE_DEFAULT_NUM_BUFS 3 module_param(debug, int, 0644); static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev, struct vpbe_layer *layer); static int venc_is_second_field(struct vpbe_display *disp_dev) { struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev; int ret; int val; ret = v4l2_subdev_call(vpbe_dev->venc, core, ioctl, VENC_GET_FLD, &val); if (ret < 0) { v4l2_err(&vpbe_dev->v4l2_dev, "Error in getting Field ID 0\n"); } return val; } static void vpbe_isr_even_field(struct vpbe_display *disp_obj, struct vpbe_layer *layer) { struct timespec timevalue; if (layer->cur_frm == layer->next_frm) return; ktime_get_ts(&timevalue); layer->cur_frm->vb.v4l2_buf.timestamp.tv_sec = timevalue.tv_sec; layer->cur_frm->vb.v4l2_buf.timestamp.tv_usec = timevalue.tv_nsec / NSEC_PER_USEC; vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_DONE); /* Make cur_frm pointing to next_frm */ layer->cur_frm = layer->next_frm; } static void vpbe_isr_odd_field(struct vpbe_display *disp_obj, struct vpbe_layer *layer) { struct osd_state *osd_device = disp_obj->osd_device; unsigned long addr; spin_lock(&disp_obj->dma_queue_lock); if (list_empty(&layer->dma_queue) || (layer->cur_frm != layer->next_frm)) { spin_unlock(&disp_obj->dma_queue_lock); return; } /* * one field is displayed configure * the next frame if it is available * otherwise hold on current frame * Get next from the buffer queue */ layer->next_frm = list_entry(layer->dma_queue.next, struct vpbe_disp_buffer, list); /* Remove that from the buffer queue */ list_del(&layer->next_frm->list); spin_unlock(&disp_obj->dma_queue_lock); /* Mark state of the frame to active */ layer->next_frm->vb.state = VB2_BUF_STATE_ACTIVE; addr = vb2_dma_contig_plane_dma_addr(&layer->next_frm->vb, 0); osd_device->ops.start_layer(osd_device, layer->layer_info.id, addr, disp_obj->cbcr_ofst); } /* interrupt service routine */ static irqreturn_t venc_isr(int irq, void *arg) { struct vpbe_display *disp_dev = (struct vpbe_display *)arg; struct vpbe_layer *layer; static unsigned last_event; unsigned event = 0; int fid; int i; if ((NULL == arg) || (NULL == disp_dev->dev[0])) return IRQ_HANDLED; if (venc_is_second_field(disp_dev)) event |= VENC_SECOND_FIELD; else event |= VENC_FIRST_FIELD; if (event == (last_event & ~VENC_END_OF_FRAME)) { /* * If the display is non-interlaced, then we need to flag the * end-of-frame event at every interrupt regardless of the * value of the FIDST bit. We can conclude that the display is * non-interlaced if the value of the FIDST bit is unchanged * from the previous interrupt. */ event |= VENC_END_OF_FRAME; } else if (event == VENC_SECOND_FIELD) { /* end-of-frame for interlaced display */ event |= VENC_END_OF_FRAME; } last_event = event; for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) { layer = disp_dev->dev[i]; if (!vb2_start_streaming_called(&layer->buffer_queue)) continue; if (layer->layer_first_int) { layer->layer_first_int = 0; continue; } /* Check the field format */ if ((V4L2_FIELD_NONE == layer->pix_fmt.field) && (event & VENC_END_OF_FRAME)) { /* Progressive mode */ vpbe_isr_even_field(disp_dev, layer); vpbe_isr_odd_field(disp_dev, layer); } else { /* Interlaced mode */ layer->field_id ^= 1; if (event & VENC_FIRST_FIELD) fid = 0; else fid = 1; /* * If field id does not match with store * field id */ if (fid != layer->field_id) { /* Make them in sync */ layer->field_id = fid; continue; } /* * device field id and local field id are * in sync. If this is even field */ if (0 == fid) vpbe_isr_even_field(disp_dev, layer); else /* odd field */ vpbe_isr_odd_field(disp_dev, layer); } } return IRQ_HANDLED; } /* * vpbe_buffer_prepare() * This is the callback function called from vb2_qbuf() function * the buffer is prepared and user space virtual address is converted into * physical address */ static int vpbe_buffer_prepare(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; struct vpbe_layer *layer = vb2_get_drv_priv(q); struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev; unsigned long addr; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_buffer_prepare\n"); vb2_set_plane_payload(vb, 0, layer->pix_fmt.sizeimage); if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) return -EINVAL; addr = vb2_dma_contig_plane_dma_addr(vb, 0); if (!IS_ALIGNED(addr, 8)) { v4l2_err(&vpbe_dev->v4l2_dev, "buffer_prepare:offset is not aligned to 32 bytes\n"); return -EINVAL; } return 0; } /* * vpbe_buffer_setup() * This function allocates memory for the buffers */ static int vpbe_buffer_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { /* Get the file handle object and layer object */ struct vpbe_layer *layer = vb2_get_drv_priv(vq); struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_buffer_setup\n"); if (fmt && fmt->fmt.pix.sizeimage < layer->pix_fmt.sizeimage) return -EINVAL; /* Store number of buffers allocated in numbuffer member */ if (vq->num_buffers + *nbuffers < VPBE_DEFAULT_NUM_BUFS) *nbuffers = VPBE_DEFAULT_NUM_BUFS - vq->num_buffers; *nplanes = 1; sizes[0] = fmt ? fmt->fmt.pix.sizeimage : layer->pix_fmt.sizeimage; alloc_ctxs[0] = layer->alloc_ctx; return 0; } /* * vpbe_buffer_queue() * This function adds the buffer to DMA queue */ static void vpbe_buffer_queue(struct vb2_buffer *vb) { /* Get the file handle object and layer object */ struct vpbe_disp_buffer *buf = container_of(vb, struct vpbe_disp_buffer, vb); struct vpbe_layer *layer = vb2_get_drv_priv(vb->vb2_queue); struct vpbe_display *disp = layer->disp_dev; struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev; unsigned long flags; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_buffer_queue\n"); /* add the buffer to the DMA queue */ spin_lock_irqsave(&disp->dma_queue_lock, flags); list_add_tail(&buf->list, &layer->dma_queue); spin_unlock_irqrestore(&disp->dma_queue_lock, flags); } static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count) { struct vpbe_layer *layer = vb2_get_drv_priv(vq); struct osd_state *osd_device = layer->disp_dev->osd_device; int ret; osd_device->ops.disable_layer(osd_device, layer->layer_info.id); /* Get the next frame from the buffer queue */ layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next, struct vpbe_disp_buffer, list); /* Remove buffer from the buffer queue */ list_del(&layer->cur_frm->list); /* Mark state of the current frame to active */ layer->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE; /* Initialize field_id and started member */ layer->field_id = 0; /* Set parameters in OSD and VENC */ ret = vpbe_set_osd_display_params(layer->disp_dev, layer); if (ret < 0) { struct vpbe_disp_buffer *buf, *tmp; vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_QUEUED); list_for_each_entry_safe(buf, tmp, &layer->dma_queue, list) { list_del(&buf->list); vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); } return ret; } /* * if request format is yuv420 semiplanar, need to * enable both video windows */ layer->layer_first_int = 1; return ret; } static void vpbe_stop_streaming(struct vb2_queue *vq) { struct vpbe_layer *layer = vb2_get_drv_priv(vq); struct osd_state *osd_device = layer->disp_dev->osd_device; struct vpbe_display *disp = layer->disp_dev; unsigned long flags; if (!vb2_is_streaming(vq)) return; osd_device->ops.disable_layer(osd_device, layer->layer_info.id); /* release all active buffers */ spin_lock_irqsave(&disp->dma_queue_lock, flags); if (layer->cur_frm == layer->next_frm) { vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_ERROR); } else { if (layer->cur_frm != NULL) vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_ERROR); if (layer->next_frm != NULL) vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR); } while (!list_empty(&layer->dma_queue)) { layer->next_frm = list_entry(layer->dma_queue.next, struct vpbe_disp_buffer, list); list_del(&layer->next_frm->list); vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR); } spin_unlock_irqrestore(&disp->dma_queue_lock, flags); } static struct vb2_ops video_qops = { .queue_setup = vpbe_buffer_queue_setup, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, .buf_prepare = vpbe_buffer_prepare, .start_streaming = vpbe_start_streaming, .stop_streaming = vpbe_stop_streaming, .buf_queue = vpbe_buffer_queue, }; static struct vpbe_layer* _vpbe_display_get_other_win_layer(struct vpbe_display *disp_dev, struct vpbe_layer *layer) { enum vpbe_display_device_id thiswin, otherwin; thiswin = layer->device_id; otherwin = (thiswin == VPBE_DISPLAY_DEVICE_0) ? VPBE_DISPLAY_DEVICE_1 : VPBE_DISPLAY_DEVICE_0; return disp_dev->dev[otherwin]; } static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev, struct vpbe_layer *layer) { struct osd_layer_config *cfg = &layer->layer_info.config; struct osd_state *osd_device = disp_dev->osd_device; struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev; unsigned long addr; int ret; addr = vb2_dma_contig_plane_dma_addr(&layer->cur_frm->vb, 0); /* Set address in the display registers */ osd_device->ops.start_layer(osd_device, layer->layer_info.id, addr, disp_dev->cbcr_ofst); ret = osd_device->ops.enable_layer(osd_device, layer->layer_info.id, 0); if (ret < 0) { v4l2_err(&vpbe_dev->v4l2_dev, "Error in enabling osd window layer 0\n"); return -1; } /* Enable the window */ layer->layer_info.enable = 1; if (cfg->pixfmt == PIXFMT_NV12) { struct vpbe_layer *otherlayer = _vpbe_display_get_other_win_layer(disp_dev, layer); ret = osd_device->ops.enable_layer(osd_device, otherlayer->layer_info.id, 1); if (ret < 0) { v4l2_err(&vpbe_dev->v4l2_dev, "Error in enabling osd window layer 1\n"); return -1; } otherlayer->layer_info.enable = 1; } return 0; } static void vpbe_disp_calculate_scale_factor(struct vpbe_display *disp_dev, struct vpbe_layer *layer, int expected_xsize, int expected_ysize) { struct display_layer_info *layer_info = &layer->layer_info; struct v4l2_pix_format *pixfmt = &layer->pix_fmt; struct osd_layer_config *cfg = &layer->layer_info.config; struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev; int calculated_xsize; int h_exp = 0; int v_exp = 0; int h_scale; int v_scale; v4l2_std_id standard_id = vpbe_dev->current_timings.std_id; /* * Application initially set the image format. Current display * size is obtained from the vpbe display controller. expected_xsize * and expected_ysize are set through S_CROP ioctl. Based on this, * driver will calculate the scale factors for vertical and * horizontal direction so that the image is displayed scaled * and expanded. Application uses expansion to display the image * in a square pixel. Otherwise it is displayed using displays * pixel aspect ratio.It is expected that application chooses * the crop coordinates for cropped or scaled display. if crop * size is less than the image size, it is displayed cropped or * it is displayed scaled and/or expanded. * * to begin with, set the crop window same as expected. Later we * will override with scaled window size */ cfg->xsize = pixfmt->width; cfg->ysize = pixfmt->height; layer_info->h_zoom = ZOOM_X1; /* no horizontal zoom */ layer_info->v_zoom = ZOOM_X1; /* no horizontal zoom */ layer_info->h_exp = H_EXP_OFF; /* no horizontal zoom */ layer_info->v_exp = V_EXP_OFF; /* no horizontal zoom */ if (pixfmt->width < expected_xsize) { h_scale = vpbe_dev->current_timings.xres / pixfmt->width; if (h_scale < 2) h_scale = 1; else if (h_scale >= 4) h_scale = 4; else h_scale = 2; cfg->xsize *= h_scale; if (cfg->xsize < expected_xsize) { if ((standard_id & V4L2_STD_525_60) || (standard_id & V4L2_STD_625_50)) { calculated_xsize = (cfg->xsize * VPBE_DISPLAY_H_EXP_RATIO_N) / VPBE_DISPLAY_H_EXP_RATIO_D; if (calculated_xsize <= expected_xsize) { h_exp = 1; cfg->xsize = calculated_xsize; } } } if (h_scale == 2) layer_info->h_zoom = ZOOM_X2; else if (h_scale == 4) layer_info->h_zoom = ZOOM_X4; if (h_exp) layer_info->h_exp = H_EXP_9_OVER_8; } else { /* no scaling, only cropping. Set display area to crop area */ cfg->xsize = expected_xsize; } if (pixfmt->height < expected_ysize) { v_scale = expected_ysize / pixfmt->height; if (v_scale < 2) v_scale = 1; else if (v_scale >= 4) v_scale = 4; else v_scale = 2; cfg->ysize *= v_scale; if (cfg->ysize < expected_ysize) { if ((standard_id & V4L2_STD_625_50)) { calculated_xsize = (cfg->ysize * VPBE_DISPLAY_V_EXP_RATIO_N) / VPBE_DISPLAY_V_EXP_RATIO_D; if (calculated_xsize <= expected_ysize) { v_exp = 1; cfg->ysize = calculated_xsize; } } } if (v_scale == 2) layer_info->v_zoom = ZOOM_X2; else if (v_scale == 4) layer_info->v_zoom = ZOOM_X4; if (v_exp) layer_info->h_exp = V_EXP_6_OVER_5; } else { /* no scaling, only cropping. Set display area to crop area */ cfg->ysize = expected_ysize; } v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "crop display xsize = %d, ysize = %d\n", cfg->xsize, cfg->ysize); } static void vpbe_disp_adj_position(struct vpbe_display *disp_dev, struct vpbe_layer *layer, int top, int left) { struct osd_layer_config *cfg = &layer->layer_info.config; struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev; cfg->xpos = min((unsigned int)left, vpbe_dev->current_timings.xres - cfg->xsize); cfg->ypos = min((unsigned int)top, vpbe_dev->current_timings.yres - cfg->ysize); v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "new xpos = %d, ypos = %d\n", cfg->xpos, cfg->ypos); } static void vpbe_disp_check_window_params(struct vpbe_display *disp_dev, struct v4l2_rect *c) { struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev; if ((c->width == 0) || ((c->width + c->left) > vpbe_dev->current_timings.xres)) c->width = vpbe_dev->current_timings.xres - c->left; if ((c->height == 0) || ((c->height + c->top) > vpbe_dev->current_timings.yres)) c->height = vpbe_dev->current_timings.yres - c->top; /* window height must be even for interlaced display */ if (vpbe_dev->current_timings.interlaced) c->height &= (~0x01); } /** * vpbe_try_format() * If user application provides width and height, and have bytesperline set * to zero, driver calculates bytesperline and sizeimage based on hardware * limits. */ static int vpbe_try_format(struct vpbe_display *disp_dev, struct v4l2_pix_format *pixfmt, int check) { struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev; int min_height = 1; int min_width = 32; int max_height; int max_width; int bpp; if ((pixfmt->pixelformat != V4L2_PIX_FMT_UYVY) && (pixfmt->pixelformat != V4L2_PIX_FMT_NV12)) /* choose default as V4L2_PIX_FMT_UYVY */ pixfmt->pixelformat = V4L2_PIX_FMT_UYVY; /* Check the field format */ if ((pixfmt->field != V4L2_FIELD_INTERLACED) && (pixfmt->field != V4L2_FIELD_NONE)) { if (vpbe_dev->current_timings.interlaced) pixfmt->field = V4L2_FIELD_INTERLACED; else pixfmt->field = V4L2_FIELD_NONE; } if (pixfmt->field == V4L2_FIELD_INTERLACED) min_height = 2; if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12) bpp = 1; else bpp = 2; max_width = vpbe_dev->current_timings.xres; max_height = vpbe_dev->current_timings.yres; min_width /= bpp; if (!pixfmt->width || (pixfmt->width < min_width) || (pixfmt->width > max_width)) { pixfmt->width = vpbe_dev->current_timings.xres; } if (!pixfmt->height || (pixfmt->height < min_height) || (pixfmt->height > max_height)) { pixfmt->height = vpbe_dev->current_timings.yres; } if (pixfmt->bytesperline < (pixfmt->width * bpp)) pixfmt->bytesperline = pixfmt->width * bpp; /* Make the bytesperline 32 byte aligned */ pixfmt->bytesperline = ((pixfmt->width * bpp + 31) & ~31); if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12) pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height + (pixfmt->bytesperline * pixfmt->height >> 1); else pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height; return 0; } static int vpbe_display_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct vpbe_layer *layer = video_drvdata(file); struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev; cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; snprintf(cap->driver, sizeof(cap->driver), "%s", dev_name(vpbe_dev->pdev)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", dev_name(vpbe_dev->pdev)); strlcpy(cap->card, vpbe_dev->cfg->module_name, sizeof(cap->card)); return 0; } static int vpbe_display_s_crop(struct file *file, void *priv, const struct v4l2_crop *crop) { struct vpbe_layer *layer = video_drvdata(file); struct vpbe_display *disp_dev = layer->disp_dev; struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev; struct osd_layer_config *cfg = &layer->layer_info.config; struct osd_state *osd_device = disp_dev->osd_device; struct v4l2_rect rect = crop->c; int ret; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_CROP, layer id = %d\n", layer->device_id); if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) { v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buf type\n"); return -EINVAL; } if (rect.top < 0) rect.top = 0; if (rect.left < 0) rect.left = 0; vpbe_disp_check_window_params(disp_dev, &rect); osd_device->ops.get_layer_config(osd_device, layer->layer_info.id, cfg); vpbe_disp_calculate_scale_factor(disp_dev, layer, rect.width, rect.height); vpbe_disp_adj_position(disp_dev, layer, rect.top, rect.left); ret = osd_device->ops.set_layer_config(osd_device, layer->layer_info.id, cfg); if (ret < 0) { v4l2_err(&vpbe_dev->v4l2_dev, "Error in set layer config:\n"); return -EINVAL; } /* apply zooming and h or v expansion */ osd_device->ops.set_zoom(osd_device, layer->layer_info.id, layer->layer_info.h_zoom, layer->layer_info.v_zoom); ret = osd_device->ops.set_vid_expansion(osd_device, layer->layer_info.h_exp, layer->layer_info.v_exp); if (ret < 0) { v4l2_err(&vpbe_dev->v4l2_dev, "Error in set vid expansion:\n"); return -EINVAL; } if ((layer->layer_info.h_zoom != ZOOM_X1) || (layer->layer_info.v_zoom != ZOOM_X1) || (layer->layer_info.h_exp != H_EXP_OFF) || (layer->layer_info.v_exp != V_EXP_OFF)) /* Enable expansion filter */ osd_device->ops.set_interpolation_filter(osd_device, 1); else osd_device->ops.set_interpolation_filter(osd_device, 0); return 0; } static int vpbe_display_g_crop(struct file *file, void *priv, struct v4l2_crop *crop) { struct vpbe_layer *layer = video_drvdata(file); struct osd_layer_config *cfg = &layer->layer_info.config; struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev; struct osd_state *osd_device = layer->disp_dev->osd_device; struct v4l2_rect *rect = &crop->c; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_CROP, layer id = %d\n", layer->device_id); if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) { v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buf type\n"); return -EINVAL; } osd_device->ops.get_layer_config(osd_device, layer->layer_info.id, cfg); rect->top = cfg->ypos; rect->left = cfg->xpos; rect->width = cfg->xsize; rect->height = cfg->ysize; return 0; } static int vpbe_display_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cropcap) { struct vpbe_layer *layer = video_drvdata(file); struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_CROPCAP ioctl\n"); cropcap->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; cropcap->bounds.left = 0; cropcap->bounds.top = 0; cropcap->bounds.width = vpbe_dev->current_timings.xres; cropcap->bounds.height = vpbe_dev->current_timings.yres; cropcap->pixelaspect = vpbe_dev->current_timings.aspect; cropcap->defrect = cropcap->bounds; return 0; } static int vpbe_display_g_fmt(struct file *file, void *priv, struct v4l2_format *fmt) { struct vpbe_layer *layer = video_drvdata(file); struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_FMT, layer id = %d\n", layer->device_id); /* If buffer type is video output */ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) { v4l2_err(&vpbe_dev->v4l2_dev, "invalid type\n"); return -EINVAL; } /* Fill in the information about format */ fmt->fmt.pix = layer->pix_fmt; return 0; } static int vpbe_display_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *fmt) { struct vpbe_layer *layer = video_drvdata(file); struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev; unsigned int index = 0; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_FMT, layer id = %d\n", layer->device_id); if (fmt->index > 1) { v4l2_err(&vpbe_dev->v4l2_dev, "Invalid format index\n"); return -EINVAL; } /* Fill in the information about format */ index = fmt->index; memset(fmt, 0, sizeof(*fmt)); fmt->index = index; fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; if (index == 0) { strcpy(fmt->description, "YUV 4:2:2 - UYVY"); fmt->pixelformat = V4L2_PIX_FMT_UYVY; } else { strcpy(fmt->description, "Y/CbCr 4:2:0"); fmt->pixelformat = V4L2_PIX_FMT_NV12; } return 0; } static int vpbe_display_s_fmt(struct file *file, void *priv, struct v4l2_format *fmt) { struct vpbe_layer *layer = video_drvdata(file); struct vpbe_display *disp_dev = layer->disp_dev; struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev; struct osd_layer_config *cfg = &layer->layer_info.config; struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; struct osd_state *osd_device = disp_dev->osd_device; int ret; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_FMT, layer id = %d\n", layer->device_id); if (vb2_is_busy(&layer->buffer_queue)) return -EBUSY; if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) { v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "invalid type\n"); return -EINVAL; } /* Check for valid pixel format */ ret = vpbe_try_format(disp_dev, pixfmt, 1); if (ret) return ret; /* YUV420 is requested, check availability of the other video window */ layer->pix_fmt = *pixfmt; if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12) { struct vpbe_layer *otherlayer; otherlayer = _vpbe_display_get_other_win_layer(disp_dev, layer); /* if other layer is available, only * claim it, do not configure it */ ret = osd_device->ops.request_layer(osd_device, otherlayer->layer_info.id); if (ret < 0) { v4l2_err(&vpbe_dev->v4l2_dev, "Display Manager failed to allocate layer\n"); return -EBUSY; } } /* Get osd layer config */ osd_device->ops.get_layer_config(osd_device, layer->layer_info.id, cfg); /* Store the pixel format in the layer object */ cfg->xsize = pixfmt->width; cfg->ysize = pixfmt->height; cfg->line_length = pixfmt->bytesperline; cfg->ypos = 0; cfg->xpos = 0; cfg->interlaced = vpbe_dev->current_timings.interlaced; if (V4L2_PIX_FMT_UYVY == pixfmt->pixelformat) cfg->pixfmt = PIXFMT_YCBCRI; /* Change of the default pixel format for both video windows */ if (V4L2_PIX_FMT_NV12 == pixfmt->pixelformat) { struct vpbe_layer *otherlayer; cfg->pixfmt = PIXFMT_NV12; otherlayer = _vpbe_display_get_other_win_layer(disp_dev, layer); otherlayer->layer_info.config.pixfmt = PIXFMT_NV12; } /* Set the layer config in the osd window */ ret = osd_device->ops.set_layer_config(osd_device, layer->layer_info.id, cfg); if (ret < 0) { v4l2_err(&vpbe_dev->v4l2_dev, "Error in S_FMT params:\n"); return -EINVAL; } /* Readback and fill the local copy of current pix format */ osd_device->ops.get_layer_config(osd_device, layer->layer_info.id, cfg); return 0; } static int vpbe_display_try_fmt(struct file *file, void *priv, struct v4l2_format *fmt) { struct vpbe_layer *layer = video_drvdata(file); struct vpbe_display *disp_dev = layer->disp_dev; struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev; struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_TRY_FMT\n"); if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) { v4l2_err(&vpbe_dev->v4l2_dev, "invalid type\n"); return -EINVAL; } /* Check for valid field format */ return vpbe_try_format(disp_dev, pixfmt, 0); } /** * vpbe_display_s_std - Set the given standard in the encoder * * Sets the standard if supported by the current encoder. Return the status. * 0 - success & -EINVAL on error */ static int vpbe_display_s_std(struct file *file, void *priv, v4l2_std_id std_id) { struct vpbe_layer *layer = video_drvdata(file); struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev; int ret; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_STD\n"); if (vb2_is_busy(&layer->buffer_queue)) return -EBUSY; if (NULL != vpbe_dev->ops.s_std) { ret = vpbe_dev->ops.s_std(vpbe_dev, std_id); if (ret) { v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set standard for sub devices\n"); return -EINVAL; } } else { return -EINVAL; } return 0; } /** * vpbe_display_g_std - Get the standard in the current encoder * * Get the standard in the current encoder. Return the status. 0 - success * -EINVAL on error */ static int vpbe_display_g_std(struct file *file, void *priv, v4l2_std_id *std_id) { struct vpbe_layer *layer = video_drvdata(file); struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_STD\n"); /* Get the standard from the current encoder */ if (vpbe_dev->current_timings.timings_type & VPBE_ENC_STD) { *std_id = vpbe_dev->current_timings.std_id; return 0; } return -EINVAL; } /** * vpbe_display_enum_output - enumerate outputs * * Enumerates the outputs available at the vpbe display * returns the status, -EINVAL if end of output list */ static int vpbe_display_enum_output(struct file *file, void *priv, struct v4l2_output *output) { struct vpbe_layer *layer = video_drvdata(file); struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev; int ret; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_OUTPUT\n"); /* Enumerate outputs */ if (NULL == vpbe_dev->ops.enum_outputs) return -EINVAL; ret = vpbe_dev->ops.enum_outputs(vpbe_dev, output); if (ret) { v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "Failed to enumerate outputs\n"); return -EINVAL; } return 0; } /** * vpbe_display_s_output - Set output to * the output specified by the index */ static int vpbe_display_s_output(struct file *file, void *priv, unsigned int i) { struct vpbe_layer *layer = video_drvdata(file); struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev; int ret; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_OUTPUT\n"); if (vb2_is_busy(&layer->buffer_queue)) return -EBUSY; if (NULL == vpbe_dev->ops.set_output) return -EINVAL; ret = vpbe_dev->ops.set_output(vpbe_dev, i); if (ret) { v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set output for sub devices\n"); return -EINVAL; } return 0; } /** * vpbe_display_g_output - Get output from subdevice * for a given by the index */ static int vpbe_display_g_output(struct file *file, void *priv, unsigned int *i) { struct vpbe_layer *layer = video_drvdata(file); struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_OUTPUT\n"); /* Get the standard from the current encoder */ *i = vpbe_dev->current_out_index; return 0; } /** * vpbe_display_enum_dv_timings - Enumerate the dv timings * * enum the timings in the current encoder. Return the status. 0 - success * -EINVAL on error */ static int vpbe_display_enum_dv_timings(struct file *file, void *priv, struct v4l2_enum_dv_timings *timings) { struct vpbe_layer *layer = video_drvdata(file); struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev; int ret; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_DV_TIMINGS\n"); /* Enumerate outputs */ if (NULL == vpbe_dev->ops.enum_dv_timings) return -EINVAL; ret = vpbe_dev->ops.enum_dv_timings(vpbe_dev, timings); if (ret) { v4l2_err(&vpbe_dev->v4l2_dev, "Failed to enumerate dv timings info\n"); return -EINVAL; } return 0; } /** * vpbe_display_s_dv_timings - Set the dv timings * * Set the timings in the current encoder. Return the status. 0 - success * -EINVAL on error */ static int vpbe_display_s_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpbe_layer *layer = video_drvdata(file); struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev; int ret; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_DV_TIMINGS\n"); if (vb2_is_busy(&layer->buffer_queue)) return -EBUSY; /* Set the given standard in the encoder */ if (!vpbe_dev->ops.s_dv_timings) return -EINVAL; ret = vpbe_dev->ops.s_dv_timings(vpbe_dev, timings); if (ret) { v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set the dv timings info\n"); return -EINVAL; } return 0; } /** * vpbe_display_g_dv_timings - Set the dv timings * * Get the timings in the current encoder. Return the status. 0 - success * -EINVAL on error */ static int vpbe_display_g_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *dv_timings) { struct vpbe_layer *layer = video_drvdata(file); struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_DV_TIMINGS\n"); /* Get the given standard in the encoder */ if (vpbe_dev->current_timings.timings_type & VPBE_ENC_DV_TIMINGS) { *dv_timings = vpbe_dev->current_timings.dv_timings; } else { return -EINVAL; } return 0; } /* * vpbe_display_open() * It creates object of file handle structure and stores it in private_data * member of filepointer */ static int vpbe_display_open(struct file *file) { struct vpbe_layer *layer = video_drvdata(file); struct vpbe_display *disp_dev = layer->disp_dev; struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev; struct osd_state *osd_device = disp_dev->osd_device; int err; /* creating context for file descriptor */ err = v4l2_fh_open(file); if (err) { v4l2_err(&vpbe_dev->v4l2_dev, "v4l2_fh_open failed\n"); return err; } /* leaving if layer is already initialized */ if (!v4l2_fh_is_singular_file(file)) return err; if (!layer->usrs) { if (mutex_lock_interruptible(&layer->opslock)) return -ERESTARTSYS; /* First claim the layer for this device */ err = osd_device->ops.request_layer(osd_device, layer->layer_info.id); mutex_unlock(&layer->opslock); if (err < 0) { /* Couldn't get layer */ v4l2_err(&vpbe_dev->v4l2_dev, "Display Manager failed to allocate layer\n"); v4l2_fh_release(file); return -EINVAL; } } /* Increment layer usrs counter */ layer->usrs++; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe display device opened successfully\n"); return 0; } /* * vpbe_display_release() * This function deletes buffer queue, frees the buffers and the davinci * display file * handle */ static int vpbe_display_release(struct file *file) { struct vpbe_layer *layer = video_drvdata(file); struct osd_layer_config *cfg = &layer->layer_info.config; struct vpbe_display *disp_dev = layer->disp_dev; struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev; struct osd_state *osd_device = disp_dev->osd_device; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_release\n"); mutex_lock(&layer->opslock); osd_device->ops.disable_layer(osd_device, layer->layer_info.id); /* Decrement layer usrs counter */ layer->usrs--; /* If this file handle has initialize encoder device, reset it */ if (!layer->usrs) { if (cfg->pixfmt == PIXFMT_NV12) { struct vpbe_layer *otherlayer; otherlayer = _vpbe_display_get_other_win_layer(disp_dev, layer); osd_device->ops.disable_layer(osd_device, otherlayer->layer_info.id); osd_device->ops.release_layer(osd_device, otherlayer->layer_info.id); } osd_device->ops.disable_layer(osd_device, layer->layer_info.id); osd_device->ops.release_layer(osd_device, layer->layer_info.id); } _vb2_fop_release(file, NULL); mutex_unlock(&layer->opslock); disp_dev->cbcr_ofst = 0; return 0; } /* vpbe capture ioctl operations */ static const struct v4l2_ioctl_ops vpbe_ioctl_ops = { .vidioc_querycap = vpbe_display_querycap, .vidioc_g_fmt_vid_out = vpbe_display_g_fmt, .vidioc_enum_fmt_vid_out = vpbe_display_enum_fmt, .vidioc_s_fmt_vid_out = vpbe_display_s_fmt, .vidioc_try_fmt_vid_out = vpbe_display_try_fmt, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_cropcap = vpbe_display_cropcap, .vidioc_g_crop = vpbe_display_g_crop, .vidioc_s_crop = vpbe_display_s_crop, .vidioc_s_std = vpbe_display_s_std, .vidioc_g_std = vpbe_display_g_std, .vidioc_enum_output = vpbe_display_enum_output, .vidioc_s_output = vpbe_display_s_output, .vidioc_g_output = vpbe_display_g_output, .vidioc_s_dv_timings = vpbe_display_s_dv_timings, .vidioc_g_dv_timings = vpbe_display_g_dv_timings, .vidioc_enum_dv_timings = vpbe_display_enum_dv_timings, }; static struct v4l2_file_operations vpbe_fops = { .owner = THIS_MODULE, .open = vpbe_display_open, .release = vpbe_display_release, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, .poll = vb2_fop_poll, }; static int vpbe_device_get(struct device *dev, void *data) { struct platform_device *pdev = to_platform_device(dev); struct vpbe_display *vpbe_disp = data; if (strcmp("vpbe_controller", pdev->name) == 0) vpbe_disp->vpbe_dev = platform_get_drvdata(pdev); if (strstr(pdev->name, "vpbe-osd") != NULL) vpbe_disp->osd_device = platform_get_drvdata(pdev); return 0; } static int init_vpbe_layer(int i, struct vpbe_display *disp_dev, struct platform_device *pdev) { struct vpbe_layer *vpbe_display_layer = NULL; struct video_device *vbd = NULL; /* Allocate memory for four plane display objects */ disp_dev->dev[i] = kzalloc(sizeof(struct vpbe_layer), GFP_KERNEL); /* If memory allocation fails, return error */ if (!disp_dev->dev[i]) { printk(KERN_ERR "ran out of memory\n"); return -ENOMEM; } spin_lock_init(&disp_dev->dev[i]->irqlock); mutex_init(&disp_dev->dev[i]->opslock); /* Get the pointer to the layer object */ vpbe_display_layer = disp_dev->dev[i]; vbd = &vpbe_display_layer->video_dev; /* Initialize field of video device */ vbd->release = video_device_release_empty; vbd->fops = &vpbe_fops; vbd->ioctl_ops = &vpbe_ioctl_ops; vbd->minor = -1; vbd->v4l2_dev = &disp_dev->vpbe_dev->v4l2_dev; vbd->lock = &vpbe_display_layer->opslock; vbd->vfl_dir = VFL_DIR_TX; if (disp_dev->vpbe_dev->current_timings.timings_type & VPBE_ENC_STD) vbd->tvnorms = (V4L2_STD_525_60 | V4L2_STD_625_50); snprintf(vbd->name, sizeof(vbd->name), "DaVinci_VPBE Display_DRIVER_V%d.%d.%d", (VPBE_DISPLAY_VERSION_CODE >> 16) & 0xff, (VPBE_DISPLAY_VERSION_CODE >> 8) & 0xff, (VPBE_DISPLAY_VERSION_CODE) & 0xff); vpbe_display_layer->device_id = i; vpbe_display_layer->layer_info.id = ((i == VPBE_DISPLAY_DEVICE_0) ? WIN_VID0 : WIN_VID1); return 0; } static int register_device(struct vpbe_layer *vpbe_display_layer, struct vpbe_display *disp_dev, struct platform_device *pdev) { int err; v4l2_info(&disp_dev->vpbe_dev->v4l2_dev, "Trying to register VPBE display device.\n"); v4l2_info(&disp_dev->vpbe_dev->v4l2_dev, "layer=%x,layer->video_dev=%x\n", (int)vpbe_display_layer, (int)&vpbe_display_layer->video_dev); vpbe_display_layer->video_dev.queue = &vpbe_display_layer->buffer_queue; err = video_register_device(&vpbe_display_layer->video_dev, VFL_TYPE_GRABBER, -1); if (err) return -ENODEV; vpbe_display_layer->disp_dev = disp_dev; /* set the driver data in platform device */ platform_set_drvdata(pdev, disp_dev); video_set_drvdata(&vpbe_display_layer->video_dev, vpbe_display_layer); return 0; } /* * vpbe_display_probe() * This function creates device entries by register itself to the V4L2 driver * and initializes fields of each layer objects */ static int vpbe_display_probe(struct platform_device *pdev) { struct vpbe_display *disp_dev; struct v4l2_device *v4l2_dev; struct resource *res = NULL; struct vb2_queue *q; int k; int i; int err; int irq; printk(KERN_DEBUG "vpbe_display_probe\n"); /* Allocate memory for vpbe_display */ disp_dev = devm_kzalloc(&pdev->dev, sizeof(struct vpbe_display), GFP_KERNEL); if (!disp_dev) return -ENOMEM; spin_lock_init(&disp_dev->dma_queue_lock); /* * Scan all the platform devices to find the vpbe * controller device and get the vpbe_dev object */ err = bus_for_each_dev(&platform_bus_type, NULL, disp_dev, vpbe_device_get); if (err < 0) return err; v4l2_dev = &disp_dev->vpbe_dev->v4l2_dev; /* Initialize the vpbe display controller */ if (NULL != disp_dev->vpbe_dev->ops.initialize) { err = disp_dev->vpbe_dev->ops.initialize(&pdev->dev, disp_dev->vpbe_dev); if (err) { v4l2_err(v4l2_dev, "Error initing vpbe\n"); err = -ENOMEM; goto probe_out; } } for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) { if (init_vpbe_layer(i, disp_dev, pdev)) { err = -ENODEV; goto probe_out; } } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { v4l2_err(v4l2_dev, "Unable to get VENC interrupt resource\n"); err = -ENODEV; goto probe_out; } irq = res->start; err = devm_request_irq(&pdev->dev, irq, venc_isr, 0, VPBE_DISPLAY_DRIVER, disp_dev); if (err) { v4l2_err(v4l2_dev, "VPBE IRQ request failed\n"); goto probe_out; } for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) { /* initialize vb2 queue */ q = &disp_dev->dev[i]->buffer_queue; memset(q, 0, sizeof(*q)); q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; q->drv_priv = disp_dev->dev[i]; q->ops = &video_qops; q->mem_ops = &vb2_dma_contig_memops; q->buf_struct_size = sizeof(struct vpbe_disp_buffer); q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->min_buffers_needed = 1; q->lock = &disp_dev->dev[i]->opslock; err = vb2_queue_init(q); if (err) { v4l2_err(v4l2_dev, "vb2_queue_init() failed\n"); goto probe_out; } disp_dev->dev[i]->alloc_ctx = vb2_dma_contig_init_ctx(disp_dev->vpbe_dev->pdev); if (IS_ERR(disp_dev->dev[i]->alloc_ctx)) { v4l2_err(v4l2_dev, "Failed to get the context\n"); err = PTR_ERR(disp_dev->dev[i]->alloc_ctx); goto probe_out; } INIT_LIST_HEAD(&disp_dev->dev[i]->dma_queue); if (register_device(disp_dev->dev[i], disp_dev, pdev)) { err = -ENODEV; goto probe_out; } } v4l2_dbg(1, debug, v4l2_dev, "Successfully completed the probing of vpbe v4l2 device\n"); return 0; probe_out: for (k = 0; k < VPBE_DISPLAY_MAX_DEVICES; k++) { /* Unregister video device */ if (disp_dev->dev[k] != NULL) { vb2_dma_contig_cleanup_ctx(disp_dev->dev[k]->alloc_ctx); video_unregister_device(&disp_dev->dev[k]->video_dev); kfree(disp_dev->dev[k]); } } return err; } /* * vpbe_display_remove() * It un-register hardware layer from V4L2 driver */ static int vpbe_display_remove(struct platform_device *pdev) { struct vpbe_layer *vpbe_display_layer; struct vpbe_display *disp_dev = platform_get_drvdata(pdev); struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev; int i; v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_remove\n"); /* deinitialize the vpbe display controller */ if (NULL != vpbe_dev->ops.deinitialize) vpbe_dev->ops.deinitialize(&pdev->dev, vpbe_dev); /* un-register device */ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) { /* Get the pointer to the layer object */ vpbe_display_layer = disp_dev->dev[i]; vb2_dma_contig_cleanup_ctx(vpbe_display_layer->alloc_ctx); /* Unregister video device */ video_unregister_device(&vpbe_display_layer->video_dev); } for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) { kfree(disp_dev->dev[i]); disp_dev->dev[i] = NULL; } return 0; } static struct platform_driver vpbe_display_driver = { .driver = { .name = VPBE_DISPLAY_DRIVER, .bus = &platform_bus_type, }, .probe = vpbe_display_probe, .remove = vpbe_display_remove, }; module_platform_driver(vpbe_display_driver); MODULE_DESCRIPTION("TI DM644x/DM355/DM365 VPBE Display controller"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Texas Instruments");
gpl-2.0
martinbrook/amlogic-meson3
arch/arm/mach-omap1/timer32k.c
952
5769
/* * linux/arch/arm/mach-omap1/timer32k.c * * OMAP 32K Timer * * Copyright (C) 2004 - 2005 Nokia Corporation * Partial timer rewrite and additional dynamic tick timer support by * Tony Lindgen <tony@atomide.com> and * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> * OMAP Dual-mode timer framework support by Timo Teras * * MPU timer code based on the older MPU timer code for OMAP * Copyright (C) 2000 RidgeRun, Inc. * Author: Greg Lonnon <glonnon@ridgerun.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/io.h> #include <asm/system.h> #include <mach/hardware.h> #include <asm/leds.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> #include <plat/dmtimer.h> struct sys_timer omap_timer; /* * --------------------------------------------------------------------------- * 32KHz OS timer * * This currently works only on 16xx, as 1510 does not have the continuous * 32KHz synchronous timer. The 32KHz synchronous timer is used to keep track * of time in addition to the 32KHz OS timer. Using only the 32KHz OS timer * on 1510 would be possible, but the timer would not be as accurate as * with the 32KHz synchronized timer. * --------------------------------------------------------------------------- */ /* 16xx specific defines */ #define OMAP1_32K_TIMER_BASE 0xfffb9000 #define OMAP1_32K_TIMER_CR 0x08 #define OMAP1_32K_TIMER_TVR 0x00 #define OMAP1_32K_TIMER_TCR 0x04 #define OMAP_32K_TICKS_PER_SEC (32768) /* * TRM says 1 / HZ = ( TVR + 1) / 32768, so TRV = (32768 / HZ) - 1 * so with HZ = 128, TVR = 255. */ #define OMAP_32K_TIMER_TICK_PERIOD ((OMAP_32K_TICKS_PER_SEC / HZ) - 1) #define JIFFIES_TO_HW_TICKS(nr_jiffies, clock_rate) \ (((nr_jiffies) * (clock_rate)) / HZ) static inline void omap_32k_timer_write(int val, int reg) { omap_writew(val, OMAP1_32K_TIMER_BASE + reg); } static inline unsigned long omap_32k_timer_read(int reg) { return omap_readl(OMAP1_32K_TIMER_BASE + reg) & 0xffffff; } static inline void omap_32k_timer_start(unsigned long load_val) { if (!load_val) load_val = 1; omap_32k_timer_write(load_val, OMAP1_32K_TIMER_TVR); omap_32k_timer_write(0x0f, OMAP1_32K_TIMER_CR); } static inline void omap_32k_timer_stop(void) { omap_32k_timer_write(0x0, OMAP1_32K_TIMER_CR); } #define omap_32k_timer_ack_irq() static int omap_32k_timer_set_next_event(unsigned long delta, struct clock_event_device *dev) { omap_32k_timer_start(delta); return 0; } static void omap_32k_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { omap_32k_timer_stop(); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: omap_32k_timer_start(OMAP_32K_TIMER_TICK_PERIOD); break; case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: break; case CLOCK_EVT_MODE_RESUME: break; } } static struct clock_event_device clockevent_32k_timer = { .name = "32k-timer", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .shift = 32, .set_next_event = omap_32k_timer_set_next_event, .set_mode = omap_32k_timer_set_mode, }; static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = &clockevent_32k_timer; omap_32k_timer_ack_irq(); evt->event_handler(evt); return IRQ_HANDLED; } static struct irqaction omap_32k_timer_irq = { .name = "32KHz timer", .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .handler = omap_32k_timer_interrupt, }; static __init void omap_init_32k_timer(void) { setup_irq(INT_OS_TIMER, &omap_32k_timer_irq); clockevent_32k_timer.mult = div_sc(OMAP_32K_TICKS_PER_SEC, NSEC_PER_SEC, clockevent_32k_timer.shift); clockevent_32k_timer.max_delta_ns = clockevent_delta2ns(0xfffffffe, &clockevent_32k_timer); clockevent_32k_timer.min_delta_ns = clockevent_delta2ns(1, &clockevent_32k_timer); clockevent_32k_timer.cpumask = cpumask_of(0); clockevents_register_device(&clockevent_32k_timer); } /* * --------------------------------------------------------------------------- * Timer initialization * --------------------------------------------------------------------------- */ static void __init omap_timer_init(void) { #ifdef CONFIG_OMAP_DM_TIMER omap_dm_timer_init(); #endif omap_init_32k_timer(); } struct sys_timer omap_timer = { .init = omap_timer_init, };
gpl-2.0
srfarias/srfarias_kernel_msm8916
arch/x86/pci/mmconfig-shared.c
2232
18887
/* * mmconfig-shared.c - Low-level direct PCI config space access via * MMCONFIG - common code between i386 and x86-64. * * This code does: * - known chipset handling * - ACPI decoding and validation * * Per-architecture code takes care of the mappings and accesses * themselves. */ #include <linux/pci.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/sfi_acpi.h> #include <linux/bitmap.h> #include <linux/dmi.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/rculist.h> #include <asm/e820.h> #include <asm/pci_x86.h> #include <asm/acpi.h> #define PREFIX "PCI: " /* Indicate if the mmcfg resources have been placed into the resource table. */ static bool pci_mmcfg_running_state; static bool pci_mmcfg_arch_init_failed; static DEFINE_MUTEX(pci_mmcfg_lock); LIST_HEAD(pci_mmcfg_list); static __init void pci_mmconfig_remove(struct pci_mmcfg_region *cfg) { if (cfg->res.parent) release_resource(&cfg->res); list_del(&cfg->list); kfree(cfg); } static __init void free_all_mmcfg(void) { struct pci_mmcfg_region *cfg, *tmp; pci_mmcfg_arch_free(); list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list) pci_mmconfig_remove(cfg); } static void list_add_sorted(struct pci_mmcfg_region *new) { struct pci_mmcfg_region *cfg; /* keep list sorted by segment and starting bus number */ list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list) { if (cfg->segment > new->segment || (cfg->segment == new->segment && cfg->start_bus >= new->start_bus)) { list_add_tail_rcu(&new->list, &cfg->list); return; } } list_add_tail_rcu(&new->list, &pci_mmcfg_list); } static struct pci_mmcfg_region *pci_mmconfig_alloc(int segment, int start, int end, u64 addr) { struct pci_mmcfg_region *new; struct resource *res; if (addr == 0) return NULL; new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) return NULL; new->address = addr; new->segment = segment; new->start_bus = start; new->end_bus = end; res = &new->res; res->start = addr + PCI_MMCFG_BUS_OFFSET(start); res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN, "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end); res->name = new->name; return new; } static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start, int end, u64 addr) { struct pci_mmcfg_region *new; new = pci_mmconfig_alloc(segment, start, end, addr); if (new) { mutex_lock(&pci_mmcfg_lock); list_add_sorted(new); mutex_unlock(&pci_mmcfg_lock); pr_info(PREFIX "MMCONFIG for domain %04x [bus %02x-%02x] at %pR " "(base %#lx)\n", segment, start, end, &new->res, (unsigned long)addr); } return new; } struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus) { struct pci_mmcfg_region *cfg; list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list) if (cfg->segment == segment && cfg->start_bus <= bus && bus <= cfg->end_bus) return cfg; return NULL; } static const char __init *pci_mmcfg_e7520(void) { u32 win; raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win); win = win & 0xf000; if (win == 0x0000 || win == 0xf000) return NULL; if (pci_mmconfig_add(0, 0, 255, win << 16) == NULL) return NULL; return "Intel Corporation E7520 Memory Controller Hub"; } static const char __init *pci_mmcfg_intel_945(void) { u32 pciexbar, mask = 0, len = 0; raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0x48, 4, &pciexbar); /* Enable bit */ if (!(pciexbar & 1)) return NULL; /* Size bits */ switch ((pciexbar >> 1) & 3) { case 0: mask = 0xf0000000U; len = 0x10000000U; break; case 1: mask = 0xf8000000U; len = 0x08000000U; break; case 2: mask = 0xfc000000U; len = 0x04000000U; break; default: return NULL; } /* Errata #2, things break when not aligned on a 256Mb boundary */ /* Can only happen in 64M/128M mode */ if ((pciexbar & mask) & 0x0fffffffU) return NULL; /* Don't hit the APIC registers and their friends */ if ((pciexbar & mask) >= 0xf0000000U) return NULL; if (pci_mmconfig_add(0, 0, (len >> 20) - 1, pciexbar & mask) == NULL) return NULL; return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub"; } static const char __init *pci_mmcfg_amd_fam10h(void) { u32 low, high, address; u64 base, msr; int i; unsigned segnbits = 0, busnbits, end_bus; if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF)) return NULL; address = MSR_FAM10H_MMIO_CONF_BASE; if (rdmsr_safe(address, &low, &high)) return NULL; msr = high; msr <<= 32; msr |= low; /* mmconfig is not enable */ if (!(msr & FAM10H_MMIO_CONF_ENABLE)) return NULL; base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT); busnbits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & FAM10H_MMIO_CONF_BUSRANGE_MASK; /* * only handle bus 0 ? * need to skip it */ if (!busnbits) return NULL; if (busnbits > 8) { segnbits = busnbits - 8; busnbits = 8; } end_bus = (1 << busnbits) - 1; for (i = 0; i < (1 << segnbits); i++) if (pci_mmconfig_add(i, 0, end_bus, base + (1<<28) * i) == NULL) { free_all_mmcfg(); return NULL; } return "AMD Family 10h NB"; } static bool __initdata mcp55_checked; static const char __init *pci_mmcfg_nvidia_mcp55(void) { int bus; int mcp55_mmconf_found = 0; static const u32 extcfg_regnum = 0x90; static const u32 extcfg_regsize = 4; static const u32 extcfg_enable_mask = 1<<31; static const u32 extcfg_start_mask = 0xff<<16; static const int extcfg_start_shift = 16; static const u32 extcfg_size_mask = 0x3<<28; static const int extcfg_size_shift = 28; static const int extcfg_sizebus[] = {0x100, 0x80, 0x40, 0x20}; static const u32 extcfg_base_mask[] = {0x7ff8, 0x7ffc, 0x7ffe, 0x7fff}; static const int extcfg_base_lshift = 25; /* * do check if amd fam10h already took over */ if (!acpi_disabled || !list_empty(&pci_mmcfg_list) || mcp55_checked) return NULL; mcp55_checked = true; for (bus = 0; bus < 256; bus++) { u64 base; u32 l, extcfg; u16 vendor, device; int start, size_index, end; raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), 0, 4, &l); vendor = l & 0xffff; device = (l >> 16) & 0xffff; if (PCI_VENDOR_ID_NVIDIA != vendor || 0x0369 != device) continue; raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), extcfg_regnum, extcfg_regsize, &extcfg); if (!(extcfg & extcfg_enable_mask)) continue; size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift; base = extcfg & extcfg_base_mask[size_index]; /* base could > 4G */ base <<= extcfg_base_lshift; start = (extcfg & extcfg_start_mask) >> extcfg_start_shift; end = start + extcfg_sizebus[size_index] - 1; if (pci_mmconfig_add(0, start, end, base) == NULL) continue; mcp55_mmconf_found++; } if (!mcp55_mmconf_found) return NULL; return "nVidia MCP55"; } struct pci_mmcfg_hostbridge_probe { u32 bus; u32 devfn; u32 vendor; u32 device; const char *(*probe)(void); }; static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = { { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, pci_mmcfg_e7520 }, { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82945G_HB, pci_mmcfg_intel_945 }, { 0, PCI_DEVFN(0x18, 0), PCI_VENDOR_ID_AMD, 0x1200, pci_mmcfg_amd_fam10h }, { 0xff, PCI_DEVFN(0, 0), PCI_VENDOR_ID_AMD, 0x1200, pci_mmcfg_amd_fam10h }, { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_NVIDIA, 0x0369, pci_mmcfg_nvidia_mcp55 }, }; static void __init pci_mmcfg_check_end_bus_number(void) { struct pci_mmcfg_region *cfg, *cfgx; /* Fixup overlaps */ list_for_each_entry(cfg, &pci_mmcfg_list, list) { if (cfg->end_bus < cfg->start_bus) cfg->end_bus = 255; /* Don't access the list head ! */ if (cfg->list.next == &pci_mmcfg_list) break; cfgx = list_entry(cfg->list.next, typeof(*cfg), list); if (cfg->end_bus >= cfgx->start_bus) cfg->end_bus = cfgx->start_bus - 1; } } static int __init pci_mmcfg_check_hostbridge(void) { u32 l; u32 bus, devfn; u16 vendor, device; int i; const char *name; if (!raw_pci_ops) return 0; free_all_mmcfg(); for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) { bus = pci_mmcfg_probes[i].bus; devfn = pci_mmcfg_probes[i].devfn; raw_pci_ops->read(0, bus, devfn, 0, 4, &l); vendor = l & 0xffff; device = (l >> 16) & 0xffff; name = NULL; if (pci_mmcfg_probes[i].vendor == vendor && pci_mmcfg_probes[i].device == device) name = pci_mmcfg_probes[i].probe(); if (name) pr_info(PREFIX "%s with MMCONFIG support\n", name); } /* some end_bus_number is crazy, fix it */ pci_mmcfg_check_end_bus_number(); return !list_empty(&pci_mmcfg_list); } static acpi_status check_mcfg_resource(struct acpi_resource *res, void *data) { struct resource *mcfg_res = data; struct acpi_resource_address64 address; acpi_status status; if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) { struct acpi_resource_fixed_memory32 *fixmem32 = &res->data.fixed_memory32; if (!fixmem32) return AE_OK; if ((mcfg_res->start >= fixmem32->address) && (mcfg_res->end < (fixmem32->address + fixmem32->address_length))) { mcfg_res->flags = 1; return AE_CTRL_TERMINATE; } } if ((res->type != ACPI_RESOURCE_TYPE_ADDRESS32) && (res->type != ACPI_RESOURCE_TYPE_ADDRESS64)) return AE_OK; status = acpi_resource_to_address64(res, &address); if (ACPI_FAILURE(status) || (address.address_length <= 0) || (address.resource_type != ACPI_MEMORY_RANGE)) return AE_OK; if ((mcfg_res->start >= address.minimum) && (mcfg_res->end < (address.minimum + address.address_length))) { mcfg_res->flags = 1; return AE_CTRL_TERMINATE; } return AE_OK; } static acpi_status find_mboard_resource(acpi_handle handle, u32 lvl, void *context, void **rv) { struct resource *mcfg_res = context; acpi_walk_resources(handle, METHOD_NAME__CRS, check_mcfg_resource, context); if (mcfg_res->flags) return AE_CTRL_TERMINATE; return AE_OK; } static int is_acpi_reserved(u64 start, u64 end, unsigned not_used) { struct resource mcfg_res; mcfg_res.start = start; mcfg_res.end = end - 1; mcfg_res.flags = 0; acpi_get_devices("PNP0C01", find_mboard_resource, &mcfg_res, NULL); if (!mcfg_res.flags) acpi_get_devices("PNP0C02", find_mboard_resource, &mcfg_res, NULL); return mcfg_res.flags; } typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type); static int __ref is_mmconf_reserved(check_reserved_t is_reserved, struct pci_mmcfg_region *cfg, struct device *dev, int with_e820) { u64 addr = cfg->res.start; u64 size = resource_size(&cfg->res); u64 old_size = size; int num_buses; char *method = with_e820 ? "E820" : "ACPI motherboard resources"; while (!is_reserved(addr, addr + size, E820_RESERVED)) { size >>= 1; if (size < (16UL<<20)) break; } if (size < (16UL<<20) && size != old_size) return 0; if (dev) dev_info(dev, "MMCONFIG at %pR reserved in %s\n", &cfg->res, method); else pr_info(PREFIX "MMCONFIG at %pR reserved in %s\n", &cfg->res, method); if (old_size != size) { /* update end_bus */ cfg->end_bus = cfg->start_bus + ((size>>20) - 1); num_buses = cfg->end_bus - cfg->start_bus + 1; cfg->res.end = cfg->res.start + PCI_MMCFG_BUS_OFFSET(num_buses) - 1; snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN, "PCI MMCONFIG %04x [bus %02x-%02x]", cfg->segment, cfg->start_bus, cfg->end_bus); if (dev) dev_info(dev, "MMCONFIG " "at %pR (base %#lx) (size reduced!)\n", &cfg->res, (unsigned long) cfg->address); else pr_info(PREFIX "MMCONFIG for %04x [bus%02x-%02x] " "at %pR (base %#lx) (size reduced!)\n", cfg->segment, cfg->start_bus, cfg->end_bus, &cfg->res, (unsigned long) cfg->address); } return 1; } static int __ref pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int early) { if (!early && !acpi_disabled) { if (is_mmconf_reserved(is_acpi_reserved, cfg, dev, 0)) return 1; if (dev) dev_info(dev, FW_INFO "MMCONFIG at %pR not reserved in " "ACPI motherboard resources\n", &cfg->res); else pr_info(FW_INFO PREFIX "MMCONFIG at %pR not reserved in " "ACPI motherboard resources\n", &cfg->res); } /* * e820_all_mapped() is marked as __init. * All entries from ACPI MCFG table have been checked at boot time. * For MCFG information constructed from hotpluggable host bridge's * _CBA method, just assume it's reserved. */ if (pci_mmcfg_running_state) return 1; /* Don't try to do this check unless configuration type 1 is available. how about type 2 ?*/ if (raw_pci_ops) return is_mmconf_reserved(e820_all_mapped, cfg, dev, 1); return 0; } static void __init pci_mmcfg_reject_broken(int early) { struct pci_mmcfg_region *cfg; list_for_each_entry(cfg, &pci_mmcfg_list, list) { if (pci_mmcfg_check_reserved(NULL, cfg, early) == 0) { pr_info(PREFIX "not using MMCONFIG\n"); free_all_mmcfg(); return; } } } static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg, struct acpi_mcfg_allocation *cfg) { int year; if (cfg->address < 0xFFFFFFFF) return 0; if (!strncmp(mcfg->header.oem_id, "SGI", 3)) return 0; if (mcfg->header.revision >= 1) { if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2010) return 0; } pr_err(PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx " "is above 4GB, ignored\n", cfg->pci_segment, cfg->start_bus_number, cfg->end_bus_number, cfg->address); return -EINVAL; } static int __init pci_parse_mcfg(struct acpi_table_header *header) { struct acpi_table_mcfg *mcfg; struct acpi_mcfg_allocation *cfg_table, *cfg; unsigned long i; int entries; if (!header) return -EINVAL; mcfg = (struct acpi_table_mcfg *)header; /* how many config structures do we have */ free_all_mmcfg(); entries = 0; i = header->length - sizeof(struct acpi_table_mcfg); while (i >= sizeof(struct acpi_mcfg_allocation)) { entries++; i -= sizeof(struct acpi_mcfg_allocation); } if (entries == 0) { pr_err(PREFIX "MMCONFIG has no entries\n"); return -ENODEV; } cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1]; for (i = 0; i < entries; i++) { cfg = &cfg_table[i]; if (acpi_mcfg_check_entry(mcfg, cfg)) { free_all_mmcfg(); return -ENODEV; } if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number, cfg->end_bus_number, cfg->address) == NULL) { pr_warn(PREFIX "no memory for MCFG entries\n"); free_all_mmcfg(); return -ENOMEM; } } return 0; } static void __init __pci_mmcfg_init(int early) { pci_mmcfg_reject_broken(early); if (list_empty(&pci_mmcfg_list)) return; if (pcibios_last_bus < 0) { const struct pci_mmcfg_region *cfg; list_for_each_entry(cfg, &pci_mmcfg_list, list) { if (cfg->segment) break; pcibios_last_bus = cfg->end_bus; } } if (pci_mmcfg_arch_init()) pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; else { free_all_mmcfg(); pci_mmcfg_arch_init_failed = true; } } static int __initdata known_bridge; void __init pci_mmcfg_early_init(void) { if (pci_probe & PCI_PROBE_MMCONF) { if (pci_mmcfg_check_hostbridge()) known_bridge = 1; else acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg); __pci_mmcfg_init(1); } } void __init pci_mmcfg_late_init(void) { /* MMCONFIG disabled */ if ((pci_probe & PCI_PROBE_MMCONF) == 0) return; if (known_bridge) return; /* MMCONFIG hasn't been enabled yet, try again */ if (pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF) { acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg); __pci_mmcfg_init(0); } } static int __init pci_mmcfg_late_insert_resources(void) { struct pci_mmcfg_region *cfg; pci_mmcfg_running_state = true; /* If we are not using MMCONFIG, don't insert the resources. */ if ((pci_probe & PCI_PROBE_MMCONF) == 0) return 1; /* * Attempt to insert the mmcfg resources but not with the busy flag * marked so it won't cause request errors when __request_region is * called. */ list_for_each_entry(cfg, &pci_mmcfg_list, list) if (!cfg->res.parent) insert_resource(&iomem_resource, &cfg->res); return 0; } /* * Perform MMCONFIG resource insertion after PCI initialization to allow for * misprogrammed MCFG tables that state larger sizes but actually conflict * with other system resources. */ late_initcall(pci_mmcfg_late_insert_resources); /* Add MMCFG information for host bridges */ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end, phys_addr_t addr) { int rc; struct resource *tmp = NULL; struct pci_mmcfg_region *cfg; if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed) return -ENODEV; if (start > end) return -EINVAL; mutex_lock(&pci_mmcfg_lock); cfg = pci_mmconfig_lookup(seg, start); if (cfg) { if (cfg->end_bus < end) dev_info(dev, FW_INFO "MMCONFIG for " "domain %04x [bus %02x-%02x] " "only partially covers this bridge\n", cfg->segment, cfg->start_bus, cfg->end_bus); mutex_unlock(&pci_mmcfg_lock); return -EEXIST; } if (!addr) { mutex_unlock(&pci_mmcfg_lock); return -EINVAL; } rc = -EBUSY; cfg = pci_mmconfig_alloc(seg, start, end, addr); if (cfg == NULL) { dev_warn(dev, "fail to add MMCONFIG (out of memory)\n"); rc = -ENOMEM; } else if (!pci_mmcfg_check_reserved(dev, cfg, 0)) { dev_warn(dev, FW_BUG "MMCONFIG %pR isn't reserved\n", &cfg->res); } else { /* Insert resource if it's not in boot stage */ if (pci_mmcfg_running_state) tmp = insert_resource_conflict(&iomem_resource, &cfg->res); if (tmp) { dev_warn(dev, "MMCONFIG %pR conflicts with " "%s %pR\n", &cfg->res, tmp->name, tmp); } else if (pci_mmcfg_arch_map(cfg)) { dev_warn(dev, "fail to map MMCONFIG %pR.\n", &cfg->res); } else { list_add_sorted(cfg); dev_info(dev, "MMCONFIG at %pR (base %#lx)\n", &cfg->res, (unsigned long)addr); cfg = NULL; rc = 0; } } if (cfg) { if (cfg->res.parent) release_resource(&cfg->res); kfree(cfg); } mutex_unlock(&pci_mmcfg_lock); return rc; } /* Delete MMCFG information for host bridges */ int pci_mmconfig_delete(u16 seg, u8 start, u8 end) { struct pci_mmcfg_region *cfg; mutex_lock(&pci_mmcfg_lock); list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list) if (cfg->segment == seg && cfg->start_bus == start && cfg->end_bus == end) { list_del_rcu(&cfg->list); synchronize_rcu(); pci_mmcfg_arch_unmap(cfg); if (cfg->res.parent) release_resource(&cfg->res); mutex_unlock(&pci_mmcfg_lock); kfree(cfg); return 0; } mutex_unlock(&pci_mmcfg_lock); return -ENOENT; }
gpl-2.0
gerard87/kernel_shamu_n_preview
drivers/misc/bmp085-i2c.c
2488
2236
/* * Copyright (c) 2012 Bosch Sensortec GmbH * Copyright (c) 2012 Unixphere AB * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/err.h> #include "bmp085.h" #define BMP085_I2C_ADDRESS 0x77 static const unsigned short normal_i2c[] = { BMP085_I2C_ADDRESS, I2C_CLIENT_END }; static int bmp085_i2c_detect(struct i2c_client *client, struct i2c_board_info *info) { if (client->addr != BMP085_I2C_ADDRESS) return -ENODEV; return bmp085_detect(&client->dev); } static int bmp085_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { int err; struct regmap *regmap = devm_regmap_init_i2c(client, &bmp085_regmap_config); if (IS_ERR(regmap)) { err = PTR_ERR(regmap); dev_err(&client->dev, "Failed to init regmap: %d\n", err); return err; } return bmp085_probe(&client->dev, regmap); } static int bmp085_i2c_remove(struct i2c_client *client) { return bmp085_remove(&client->dev); } static const struct i2c_device_id bmp085_id[] = { { BMP085_NAME, 0 }, { "bmp180", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, bmp085_id); static struct i2c_driver bmp085_i2c_driver = { .driver = { .owner = THIS_MODULE, .name = BMP085_NAME, }, .id_table = bmp085_id, .probe = bmp085_i2c_probe, .remove = bmp085_i2c_remove, .detect = bmp085_i2c_detect, .address_list = normal_i2c }; module_i2c_driver(bmp085_i2c_driver); MODULE_AUTHOR("Eric Andersson <eric.andersson@unixphere.com>"); MODULE_DESCRIPTION("BMP085 I2C bus driver"); MODULE_LICENSE("GPL");
gpl-2.0
elixirflash/dm-keepfast
arch/mips/ath79/setup.c
3000
4352
/* * Atheros AR71XX/AR724X/AR913X specific setup * * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> * * Parts of this file are based on Atheros' 2.6.15 BSP * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/err.h> #include <linux/clk.h> #include <asm/bootinfo.h> #include <asm/time.h> /* for mips_hpt_frequency */ #include <asm/reboot.h> /* for _machine_{restart,halt} */ #include <asm/mips_machine.h> #include <asm/mach-ath79/ath79.h> #include <asm/mach-ath79/ar71xx_regs.h> #include "common.h" #include "dev-common.h" #include "machtypes.h" #define ATH79_SYS_TYPE_LEN 64 #define AR71XX_BASE_FREQ 40000000 #define AR724X_BASE_FREQ 5000000 #define AR913X_BASE_FREQ 5000000 static char ath79_sys_type[ATH79_SYS_TYPE_LEN]; static void ath79_restart(char *command) { ath79_device_reset_set(AR71XX_RESET_FULL_CHIP); for (;;) if (cpu_wait) cpu_wait(); } static void ath79_halt(void) { while (1) cpu_wait(); } static void __init ath79_detect_mem_size(void) { unsigned long size; for (size = ATH79_MEM_SIZE_MIN; size < ATH79_MEM_SIZE_MAX; size <<= 1) { if (!memcmp(ath79_detect_mem_size, ath79_detect_mem_size + size, 1024)) break; } add_memory_region(0, size, BOOT_MEM_RAM); } static void __init ath79_detect_sys_type(void) { char *chip = "????"; u32 id; u32 major; u32 minor; u32 rev = 0; id = ath79_reset_rr(AR71XX_RESET_REG_REV_ID); major = id & REV_ID_MAJOR_MASK; switch (major) { case REV_ID_MAJOR_AR71XX: minor = id & AR71XX_REV_ID_MINOR_MASK; rev = id >> AR71XX_REV_ID_REVISION_SHIFT; rev &= AR71XX_REV_ID_REVISION_MASK; switch (minor) { case AR71XX_REV_ID_MINOR_AR7130: ath79_soc = ATH79_SOC_AR7130; chip = "7130"; break; case AR71XX_REV_ID_MINOR_AR7141: ath79_soc = ATH79_SOC_AR7141; chip = "7141"; break; case AR71XX_REV_ID_MINOR_AR7161: ath79_soc = ATH79_SOC_AR7161; chip = "7161"; break; } break; case REV_ID_MAJOR_AR7240: ath79_soc = ATH79_SOC_AR7240; chip = "7240"; rev = (id & AR724X_REV_ID_REVISION_MASK); break; case REV_ID_MAJOR_AR7241: ath79_soc = ATH79_SOC_AR7241; chip = "7241"; rev = (id & AR724X_REV_ID_REVISION_MASK); break; case REV_ID_MAJOR_AR7242: ath79_soc = ATH79_SOC_AR7242; chip = "7242"; rev = (id & AR724X_REV_ID_REVISION_MASK); break; case REV_ID_MAJOR_AR913X: minor = id & AR913X_REV_ID_MINOR_MASK; rev = id >> AR913X_REV_ID_REVISION_SHIFT; rev &= AR913X_REV_ID_REVISION_MASK; switch (minor) { case AR913X_REV_ID_MINOR_AR9130: ath79_soc = ATH79_SOC_AR9130; chip = "9130"; break; case AR913X_REV_ID_MINOR_AR9132: ath79_soc = ATH79_SOC_AR9132; chip = "9132"; break; } break; default: panic("ath79: unknown SoC, id:0x%08x\n", id); } sprintf(ath79_sys_type, "Atheros AR%s rev %u", chip, rev); pr_info("SoC: %s\n", ath79_sys_type); } const char *get_system_type(void) { return ath79_sys_type; } unsigned int __cpuinit get_c0_compare_int(void) { return CP0_LEGACY_COMPARE_IRQ; } void __init plat_mem_setup(void) { set_io_port_base(KSEG1); ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE, AR71XX_RESET_SIZE); ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE, AR71XX_PLL_SIZE); ath79_ddr_base = ioremap_nocache(AR71XX_DDR_CTRL_BASE, AR71XX_DDR_CTRL_SIZE); ath79_detect_sys_type(); ath79_detect_mem_size(); ath79_clocks_init(); _machine_restart = ath79_restart; _machine_halt = ath79_halt; pm_power_off = ath79_halt; } void __init plat_time_init(void) { struct clk *clk; clk = clk_get(NULL, "cpu"); if (IS_ERR(clk)) panic("unable to get CPU clock, err=%ld", PTR_ERR(clk)); mips_hpt_frequency = clk_get_rate(clk) / 2; } static int __init ath79_setup(void) { ath79_gpio_init(); ath79_register_uart(); ath79_register_wdt(); mips_machine_setup(); return 0; } arch_initcall(ath79_setup); static void __init ath79_generic_init(void) { /* Nothing to do */ } MIPS_MACHINE(ATH79_MACH_GENERIC, "Generic", "Generic AR71XX/AR724X/AR913X based board", ath79_generic_init);
gpl-2.0
kuba160/tf300t-kernel
sound/mips/hal2.c
4024
25898
/* * Driver for A2 audio system used in SGI machines * Copyright (c) 2008 Thomas Bogendoerfer <tsbogend@alpha.fanken.de> * * Based on OSS code from Ladislav Michl <ladis@linux-mips.org>, which * was based on code from Ulf Carlsson * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/slab.h> #include <asm/sgi/hpc3.h> #include <asm/sgi/ip22.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/pcm-indirect.h> #include <sound/initval.h> #include "hal2.h" static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */ static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */ module_param(index, int, 0444); MODULE_PARM_DESC(index, "Index value for SGI HAL2 soundcard."); module_param(id, charp, 0444); MODULE_PARM_DESC(id, "ID string for SGI HAL2 soundcard."); MODULE_DESCRIPTION("ALSA driver for SGI HAL2 audio"); MODULE_AUTHOR("Thomas Bogendoerfer"); MODULE_LICENSE("GPL"); #define H2_BLOCK_SIZE 1024 #define H2_BUF_SIZE 16384 struct hal2_pbus { struct hpc3_pbus_dmacregs *pbus; int pbusnr; unsigned int ctrl; /* Current state of pbus->pbdma_ctrl */ }; struct hal2_desc { struct hpc_dma_desc desc; u32 pad; /* padding */ }; struct hal2_codec { struct snd_pcm_indirect pcm_indirect; struct snd_pcm_substream *substream; unsigned char *buffer; dma_addr_t buffer_dma; struct hal2_desc *desc; dma_addr_t desc_dma; int desc_count; struct hal2_pbus pbus; int voices; /* mono/stereo */ unsigned int sample_rate; unsigned int master; /* Master frequency */ unsigned short mod; /* MOD value */ unsigned short inc; /* INC value */ }; #define H2_MIX_OUTPUT_ATT 0 #define H2_MIX_INPUT_GAIN 1 struct snd_hal2 { struct snd_card *card; struct hal2_ctl_regs *ctl_regs; /* HAL2 ctl registers */ struct hal2_aes_regs *aes_regs; /* HAL2 aes registers */ struct hal2_vol_regs *vol_regs; /* HAL2 vol registers */ struct hal2_syn_regs *syn_regs; /* HAL2 syn registers */ struct hal2_codec dac; struct hal2_codec adc; }; #define H2_INDIRECT_WAIT(regs) while (hal2_read(&regs->isr) & H2_ISR_TSTATUS); #define H2_READ_ADDR(addr) (addr | (1<<7)) #define H2_WRITE_ADDR(addr) (addr) static inline u32 hal2_read(u32 *reg) { return __raw_readl(reg); } static inline void hal2_write(u32 val, u32 *reg) { __raw_writel(val, reg); } static u32 hal2_i_read32(struct snd_hal2 *hal2, u16 addr) { u32 ret; struct hal2_ctl_regs *regs = hal2->ctl_regs; hal2_write(H2_READ_ADDR(addr), &regs->iar); H2_INDIRECT_WAIT(regs); ret = hal2_read(&regs->idr0) & 0xffff; hal2_write(H2_READ_ADDR(addr) | 0x1, &regs->iar); H2_INDIRECT_WAIT(regs); ret |= (hal2_read(&regs->idr0) & 0xffff) << 16; return ret; } static void hal2_i_write16(struct snd_hal2 *hal2, u16 addr, u16 val) { struct hal2_ctl_regs *regs = hal2->ctl_regs; hal2_write(val, &regs->idr0); hal2_write(0, &regs->idr1); hal2_write(0, &regs->idr2); hal2_write(0, &regs->idr3); hal2_write(H2_WRITE_ADDR(addr), &regs->iar); H2_INDIRECT_WAIT(regs); } static void hal2_i_write32(struct snd_hal2 *hal2, u16 addr, u32 val) { struct hal2_ctl_regs *regs = hal2->ctl_regs; hal2_write(val & 0xffff, &regs->idr0); hal2_write(val >> 16, &regs->idr1); hal2_write(0, &regs->idr2); hal2_write(0, &regs->idr3); hal2_write(H2_WRITE_ADDR(addr), &regs->iar); H2_INDIRECT_WAIT(regs); } static void hal2_i_setbit16(struct snd_hal2 *hal2, u16 addr, u16 bit) { struct hal2_ctl_regs *regs = hal2->ctl_regs; hal2_write(H2_READ_ADDR(addr), &regs->iar); H2_INDIRECT_WAIT(regs); hal2_write((hal2_read(&regs->idr0) & 0xffff) | bit, &regs->idr0); hal2_write(0, &regs->idr1); hal2_write(0, &regs->idr2); hal2_write(0, &regs->idr3); hal2_write(H2_WRITE_ADDR(addr), &regs->iar); H2_INDIRECT_WAIT(regs); } static void hal2_i_clearbit16(struct snd_hal2 *hal2, u16 addr, u16 bit) { struct hal2_ctl_regs *regs = hal2->ctl_regs; hal2_write(H2_READ_ADDR(addr), &regs->iar); H2_INDIRECT_WAIT(regs); hal2_write((hal2_read(&regs->idr0) & 0xffff) & ~bit, &regs->idr0); hal2_write(0, &regs->idr1); hal2_write(0, &regs->idr2); hal2_write(0, &regs->idr3); hal2_write(H2_WRITE_ADDR(addr), &regs->iar); H2_INDIRECT_WAIT(regs); } static int hal2_gain_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; switch ((int)kcontrol->private_value) { case H2_MIX_OUTPUT_ATT: uinfo->value.integer.max = 31; break; case H2_MIX_INPUT_GAIN: uinfo->value.integer.max = 15; break; } return 0; } static int hal2_gain_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_hal2 *hal2 = snd_kcontrol_chip(kcontrol); u32 tmp; int l, r; switch ((int)kcontrol->private_value) { case H2_MIX_OUTPUT_ATT: tmp = hal2_i_read32(hal2, H2I_DAC_C2); if (tmp & H2I_C2_MUTE) { l = 0; r = 0; } else { l = 31 - ((tmp >> H2I_C2_L_ATT_SHIFT) & 31); r = 31 - ((tmp >> H2I_C2_R_ATT_SHIFT) & 31); } break; case H2_MIX_INPUT_GAIN: tmp = hal2_i_read32(hal2, H2I_ADC_C2); l = (tmp >> H2I_C2_L_GAIN_SHIFT) & 15; r = (tmp >> H2I_C2_R_GAIN_SHIFT) & 15; break; } ucontrol->value.integer.value[0] = l; ucontrol->value.integer.value[1] = r; return 0; } static int hal2_gain_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_hal2 *hal2 = snd_kcontrol_chip(kcontrol); u32 old, new; int l, r; l = ucontrol->value.integer.value[0]; r = ucontrol->value.integer.value[1]; switch ((int)kcontrol->private_value) { case H2_MIX_OUTPUT_ATT: old = hal2_i_read32(hal2, H2I_DAC_C2); new = old & ~(H2I_C2_L_ATT_M | H2I_C2_R_ATT_M | H2I_C2_MUTE); if (l | r) { l = 31 - l; r = 31 - r; new |= (l << H2I_C2_L_ATT_SHIFT); new |= (r << H2I_C2_R_ATT_SHIFT); } else new |= H2I_C2_L_ATT_M | H2I_C2_R_ATT_M | H2I_C2_MUTE; hal2_i_write32(hal2, H2I_DAC_C2, new); break; case H2_MIX_INPUT_GAIN: old = hal2_i_read32(hal2, H2I_ADC_C2); new = old & ~(H2I_C2_L_GAIN_M | H2I_C2_R_GAIN_M); new |= (l << H2I_C2_L_GAIN_SHIFT); new |= (r << H2I_C2_R_GAIN_SHIFT); hal2_i_write32(hal2, H2I_ADC_C2, new); break; } return old != new; } static struct snd_kcontrol_new hal2_ctrl_headphone __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Headphone Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = H2_MIX_OUTPUT_ATT, .info = hal2_gain_info, .get = hal2_gain_get, .put = hal2_gain_put, }; static struct snd_kcontrol_new hal2_ctrl_mic __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Mic Capture Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = H2_MIX_INPUT_GAIN, .info = hal2_gain_info, .get = hal2_gain_get, .put = hal2_gain_put, }; static int __devinit hal2_mixer_create(struct snd_hal2 *hal2) { int err; /* mute DAC */ hal2_i_write32(hal2, H2I_DAC_C2, H2I_C2_L_ATT_M | H2I_C2_R_ATT_M | H2I_C2_MUTE); /* mute ADC */ hal2_i_write32(hal2, H2I_ADC_C2, 0); err = snd_ctl_add(hal2->card, snd_ctl_new1(&hal2_ctrl_headphone, hal2)); if (err < 0) return err; err = snd_ctl_add(hal2->card, snd_ctl_new1(&hal2_ctrl_mic, hal2)); if (err < 0) return err; return 0; } static irqreturn_t hal2_interrupt(int irq, void *dev_id) { struct snd_hal2 *hal2 = dev_id; irqreturn_t ret = IRQ_NONE; /* decide what caused this interrupt */ if (hal2->dac.pbus.pbus->pbdma_ctrl & HPC3_PDMACTRL_INT) { snd_pcm_period_elapsed(hal2->dac.substream); ret = IRQ_HANDLED; } if (hal2->adc.pbus.pbus->pbdma_ctrl & HPC3_PDMACTRL_INT) { snd_pcm_period_elapsed(hal2->adc.substream); ret = IRQ_HANDLED; } return ret; } static int hal2_compute_rate(struct hal2_codec *codec, unsigned int rate) { unsigned short mod; if (44100 % rate < 48000 % rate) { mod = 4 * 44100 / rate; codec->master = 44100; } else { mod = 4 * 48000 / rate; codec->master = 48000; } codec->inc = 4; codec->mod = mod; rate = 4 * codec->master / mod; return rate; } static void hal2_set_dac_rate(struct snd_hal2 *hal2) { unsigned int master = hal2->dac.master; int inc = hal2->dac.inc; int mod = hal2->dac.mod; hal2_i_write16(hal2, H2I_BRES1_C1, (master == 44100) ? 1 : 0); hal2_i_write32(hal2, H2I_BRES1_C2, ((0xffff & (inc - mod - 1)) << 16) | inc); } static void hal2_set_adc_rate(struct snd_hal2 *hal2) { unsigned int master = hal2->adc.master; int inc = hal2->adc.inc; int mod = hal2->adc.mod; hal2_i_write16(hal2, H2I_BRES2_C1, (master == 44100) ? 1 : 0); hal2_i_write32(hal2, H2I_BRES2_C2, ((0xffff & (inc - mod - 1)) << 16) | inc); } static void hal2_setup_dac(struct snd_hal2 *hal2) { unsigned int fifobeg, fifoend, highwater, sample_size; struct hal2_pbus *pbus = &hal2->dac.pbus; /* Now we set up some PBUS information. The PBUS needs information about * what portion of the fifo it will use. If it's receiving or * transmitting, and finally whether the stream is little endian or big * endian. The information is written later, on the start call. */ sample_size = 2 * hal2->dac.voices; /* Fifo should be set to hold exactly four samples. Highwater mark * should be set to two samples. */ highwater = (sample_size * 2) >> 1; /* halfwords */ fifobeg = 0; /* playback is first */ fifoend = (sample_size * 4) >> 3; /* doublewords */ pbus->ctrl = HPC3_PDMACTRL_RT | HPC3_PDMACTRL_LD | (highwater << 8) | (fifobeg << 16) | (fifoend << 24); /* We disable everything before we do anything at all */ pbus->pbus->pbdma_ctrl = HPC3_PDMACTRL_LD; hal2_i_clearbit16(hal2, H2I_DMA_PORT_EN, H2I_DMA_PORT_EN_CODECTX); /* Setup the HAL2 for playback */ hal2_set_dac_rate(hal2); /* Set endianess */ hal2_i_clearbit16(hal2, H2I_DMA_END, H2I_DMA_END_CODECTX); /* Set DMA bus */ hal2_i_setbit16(hal2, H2I_DMA_DRV, (1 << pbus->pbusnr)); /* We are using 1st Bresenham clock generator for playback */ hal2_i_write16(hal2, H2I_DAC_C1, (pbus->pbusnr << H2I_C1_DMA_SHIFT) | (1 << H2I_C1_CLKID_SHIFT) | (hal2->dac.voices << H2I_C1_DATAT_SHIFT)); } static void hal2_setup_adc(struct snd_hal2 *hal2) { unsigned int fifobeg, fifoend, highwater, sample_size; struct hal2_pbus *pbus = &hal2->adc.pbus; sample_size = 2 * hal2->adc.voices; highwater = (sample_size * 2) >> 1; /* halfwords */ fifobeg = (4 * 4) >> 3; /* record is second */ fifoend = (4 * 4 + sample_size * 4) >> 3; /* doublewords */ pbus->ctrl = HPC3_PDMACTRL_RT | HPC3_PDMACTRL_RCV | HPC3_PDMACTRL_LD | (highwater << 8) | (fifobeg << 16) | (fifoend << 24); pbus->pbus->pbdma_ctrl = HPC3_PDMACTRL_LD; hal2_i_clearbit16(hal2, H2I_DMA_PORT_EN, H2I_DMA_PORT_EN_CODECR); /* Setup the HAL2 for record */ hal2_set_adc_rate(hal2); /* Set endianess */ hal2_i_clearbit16(hal2, H2I_DMA_END, H2I_DMA_END_CODECR); /* Set DMA bus */ hal2_i_setbit16(hal2, H2I_DMA_DRV, (1 << pbus->pbusnr)); /* We are using 2nd Bresenham clock generator for record */ hal2_i_write16(hal2, H2I_ADC_C1, (pbus->pbusnr << H2I_C1_DMA_SHIFT) | (2 << H2I_C1_CLKID_SHIFT) | (hal2->adc.voices << H2I_C1_DATAT_SHIFT)); } static void hal2_start_dac(struct snd_hal2 *hal2) { struct hal2_pbus *pbus = &hal2->dac.pbus; pbus->pbus->pbdma_dptr = hal2->dac.desc_dma; pbus->pbus->pbdma_ctrl = pbus->ctrl | HPC3_PDMACTRL_ACT; /* enable DAC */ hal2_i_setbit16(hal2, H2I_DMA_PORT_EN, H2I_DMA_PORT_EN_CODECTX); } static void hal2_start_adc(struct snd_hal2 *hal2) { struct hal2_pbus *pbus = &hal2->adc.pbus; pbus->pbus->pbdma_dptr = hal2->adc.desc_dma; pbus->pbus->pbdma_ctrl = pbus->ctrl | HPC3_PDMACTRL_ACT; /* enable ADC */ hal2_i_setbit16(hal2, H2I_DMA_PORT_EN, H2I_DMA_PORT_EN_CODECR); } static inline void hal2_stop_dac(struct snd_hal2 *hal2) { hal2->dac.pbus.pbus->pbdma_ctrl = HPC3_PDMACTRL_LD; /* The HAL2 itself may remain enabled safely */ } static inline void hal2_stop_adc(struct snd_hal2 *hal2) { hal2->adc.pbus.pbus->pbdma_ctrl = HPC3_PDMACTRL_LD; } static int hal2_alloc_dmabuf(struct hal2_codec *codec) { struct hal2_desc *desc; dma_addr_t desc_dma, buffer_dma; int count = H2_BUF_SIZE / H2_BLOCK_SIZE; int i; codec->buffer = dma_alloc_noncoherent(NULL, H2_BUF_SIZE, &buffer_dma, GFP_KERNEL); if (!codec->buffer) return -ENOMEM; desc = dma_alloc_noncoherent(NULL, count * sizeof(struct hal2_desc), &desc_dma, GFP_KERNEL); if (!desc) { dma_free_noncoherent(NULL, H2_BUF_SIZE, codec->buffer, buffer_dma); return -ENOMEM; } codec->buffer_dma = buffer_dma; codec->desc_dma = desc_dma; codec->desc = desc; for (i = 0; i < count; i++) { desc->desc.pbuf = buffer_dma + i * H2_BLOCK_SIZE; desc->desc.cntinfo = HPCDMA_XIE | H2_BLOCK_SIZE; desc->desc.pnext = (i == count - 1) ? desc_dma : desc_dma + (i + 1) * sizeof(struct hal2_desc); desc++; } dma_cache_sync(NULL, codec->desc, count * sizeof(struct hal2_desc), DMA_TO_DEVICE); codec->desc_count = count; return 0; } static void hal2_free_dmabuf(struct hal2_codec *codec) { dma_free_noncoherent(NULL, codec->desc_count * sizeof(struct hal2_desc), codec->desc, codec->desc_dma); dma_free_noncoherent(NULL, H2_BUF_SIZE, codec->buffer, codec->buffer_dma); } static struct snd_pcm_hardware hal2_pcm_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER), .formats = SNDRV_PCM_FMTBIT_S16_BE, .rates = SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 65536, .period_bytes_min = 1024, .period_bytes_max = 65536, .periods_min = 2, .periods_max = 1024, }; static int hal2_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { int err; err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params)); if (err < 0) return err; return 0; } static int hal2_pcm_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static int hal2_playback_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream); int err; runtime->hw = hal2_pcm_hw; err = hal2_alloc_dmabuf(&hal2->dac); if (err) return err; return 0; } static int hal2_playback_close(struct snd_pcm_substream *substream) { struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream); hal2_free_dmabuf(&hal2->dac); return 0; } static int hal2_playback_prepare(struct snd_pcm_substream *substream) { struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct hal2_codec *dac = &hal2->dac; dac->voices = runtime->channels; dac->sample_rate = hal2_compute_rate(dac, runtime->rate); memset(&dac->pcm_indirect, 0, sizeof(dac->pcm_indirect)); dac->pcm_indirect.hw_buffer_size = H2_BUF_SIZE; dac->pcm_indirect.sw_buffer_size = snd_pcm_lib_buffer_bytes(substream); dac->substream = substream; hal2_setup_dac(hal2); return 0; } static int hal2_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream); switch (cmd) { case SNDRV_PCM_TRIGGER_START: hal2->dac.pcm_indirect.hw_io = hal2->dac.buffer_dma; hal2->dac.pcm_indirect.hw_data = 0; substream->ops->ack(substream); hal2_start_dac(hal2); break; case SNDRV_PCM_TRIGGER_STOP: hal2_stop_dac(hal2); break; default: return -EINVAL; } return 0; } static snd_pcm_uframes_t hal2_playback_pointer(struct snd_pcm_substream *substream) { struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream); struct hal2_codec *dac = &hal2->dac; return snd_pcm_indirect_playback_pointer(substream, &dac->pcm_indirect, dac->pbus.pbus->pbdma_bptr); } static void hal2_playback_transfer(struct snd_pcm_substream *substream, struct snd_pcm_indirect *rec, size_t bytes) { struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream); unsigned char *buf = hal2->dac.buffer + rec->hw_data; memcpy(buf, substream->runtime->dma_area + rec->sw_data, bytes); dma_cache_sync(NULL, buf, bytes, DMA_TO_DEVICE); } static int hal2_playback_ack(struct snd_pcm_substream *substream) { struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream); struct hal2_codec *dac = &hal2->dac; dac->pcm_indirect.hw_queue_size = H2_BUF_SIZE / 2; snd_pcm_indirect_playback_transfer(substream, &dac->pcm_indirect, hal2_playback_transfer); return 0; } static int hal2_capture_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream); struct hal2_codec *adc = &hal2->adc; int err; runtime->hw = hal2_pcm_hw; err = hal2_alloc_dmabuf(adc); if (err) return err; return 0; } static int hal2_capture_close(struct snd_pcm_substream *substream) { struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream); hal2_free_dmabuf(&hal2->adc); return 0; } static int hal2_capture_prepare(struct snd_pcm_substream *substream) { struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct hal2_codec *adc = &hal2->adc; adc->voices = runtime->channels; adc->sample_rate = hal2_compute_rate(adc, runtime->rate); memset(&adc->pcm_indirect, 0, sizeof(adc->pcm_indirect)); adc->pcm_indirect.hw_buffer_size = H2_BUF_SIZE; adc->pcm_indirect.hw_queue_size = H2_BUF_SIZE / 2; adc->pcm_indirect.sw_buffer_size = snd_pcm_lib_buffer_bytes(substream); adc->substream = substream; hal2_setup_adc(hal2); return 0; } static int hal2_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream); switch (cmd) { case SNDRV_PCM_TRIGGER_START: hal2->adc.pcm_indirect.hw_io = hal2->adc.buffer_dma; hal2->adc.pcm_indirect.hw_data = 0; printk(KERN_DEBUG "buffer_dma %x\n", hal2->adc.buffer_dma); hal2_start_adc(hal2); break; case SNDRV_PCM_TRIGGER_STOP: hal2_stop_adc(hal2); break; default: return -EINVAL; } return 0; } static snd_pcm_uframes_t hal2_capture_pointer(struct snd_pcm_substream *substream) { struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream); struct hal2_codec *adc = &hal2->adc; return snd_pcm_indirect_capture_pointer(substream, &adc->pcm_indirect, adc->pbus.pbus->pbdma_bptr); } static void hal2_capture_transfer(struct snd_pcm_substream *substream, struct snd_pcm_indirect *rec, size_t bytes) { struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream); unsigned char *buf = hal2->adc.buffer + rec->hw_data; dma_cache_sync(NULL, buf, bytes, DMA_FROM_DEVICE); memcpy(substream->runtime->dma_area + rec->sw_data, buf, bytes); } static int hal2_capture_ack(struct snd_pcm_substream *substream) { struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream); struct hal2_codec *adc = &hal2->adc; snd_pcm_indirect_capture_transfer(substream, &adc->pcm_indirect, hal2_capture_transfer); return 0; } static struct snd_pcm_ops hal2_playback_ops = { .open = hal2_playback_open, .close = hal2_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = hal2_pcm_hw_params, .hw_free = hal2_pcm_hw_free, .prepare = hal2_playback_prepare, .trigger = hal2_playback_trigger, .pointer = hal2_playback_pointer, .ack = hal2_playback_ack, }; static struct snd_pcm_ops hal2_capture_ops = { .open = hal2_capture_open, .close = hal2_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = hal2_pcm_hw_params, .hw_free = hal2_pcm_hw_free, .prepare = hal2_capture_prepare, .trigger = hal2_capture_trigger, .pointer = hal2_capture_pointer, .ack = hal2_capture_ack, }; static int __devinit hal2_pcm_create(struct snd_hal2 *hal2) { struct snd_pcm *pcm; int err; /* create first pcm device with one outputs and one input */ err = snd_pcm_new(hal2->card, "SGI HAL2 Audio", 0, 1, 1, &pcm); if (err < 0) return err; pcm->private_data = hal2; strcpy(pcm->name, "SGI HAL2"); /* set operators */ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &hal2_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &hal2_capture_ops); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, snd_dma_continuous_data(GFP_KERNEL), 0, 1024 * 1024); return 0; } static int hal2_dev_free(struct snd_device *device) { struct snd_hal2 *hal2 = device->device_data; free_irq(SGI_HPCDMA_IRQ, hal2); kfree(hal2); return 0; } static struct snd_device_ops hal2_ops = { .dev_free = hal2_dev_free, }; static void hal2_init_codec(struct hal2_codec *codec, struct hpc3_regs *hpc3, int index) { codec->pbus.pbusnr = index; codec->pbus.pbus = &hpc3->pbdma[index]; } static int hal2_detect(struct snd_hal2 *hal2) { unsigned short board, major, minor; unsigned short rev; /* reset HAL2 */ hal2_write(0, &hal2->ctl_regs->isr); /* release reset */ hal2_write(H2_ISR_GLOBAL_RESET_N | H2_ISR_CODEC_RESET_N, &hal2->ctl_regs->isr); hal2_i_write16(hal2, H2I_RELAY_C, H2I_RELAY_C_STATE); rev = hal2_read(&hal2->ctl_regs->rev); if (rev & H2_REV_AUDIO_PRESENT) return -ENODEV; board = (rev & H2_REV_BOARD_M) >> 12; major = (rev & H2_REV_MAJOR_CHIP_M) >> 4; minor = (rev & H2_REV_MINOR_CHIP_M); printk(KERN_INFO "SGI HAL2 revision %i.%i.%i\n", board, major, minor); return 0; } static int hal2_create(struct snd_card *card, struct snd_hal2 **rchip) { struct snd_hal2 *hal2; struct hpc3_regs *hpc3 = hpc3c0; int err; hal2 = kzalloc(sizeof(struct snd_hal2), GFP_KERNEL); if (!hal2) return -ENOMEM; hal2->card = card; if (request_irq(SGI_HPCDMA_IRQ, hal2_interrupt, IRQF_SHARED, "SGI HAL2", hal2)) { printk(KERN_ERR "HAL2: Can't get irq %d\n", SGI_HPCDMA_IRQ); kfree(hal2); return -EAGAIN; } hal2->ctl_regs = (struct hal2_ctl_regs *)hpc3->pbus_extregs[0]; hal2->aes_regs = (struct hal2_aes_regs *)hpc3->pbus_extregs[1]; hal2->vol_regs = (struct hal2_vol_regs *)hpc3->pbus_extregs[2]; hal2->syn_regs = (struct hal2_syn_regs *)hpc3->pbus_extregs[3]; if (hal2_detect(hal2) < 0) { kfree(hal2); return -ENODEV; } hal2_init_codec(&hal2->dac, hpc3, 0); hal2_init_codec(&hal2->adc, hpc3, 1); /* * All DMA channel interfaces in HAL2 are designed to operate with * PBUS programmed for 2 cycles in D3, 2 cycles in D4 and 2 cycles * in D5. HAL2 is a 16-bit device which can accept both big and little * endian format. It assumes that even address bytes are on high * portion of PBUS (15:8) and assumes that HPC3 is programmed to * accept a live (unsynchronized) version of P_DREQ_N from HAL2. */ #define HAL2_PBUS_DMACFG ((0 << HPC3_DMACFG_D3R_SHIFT) | \ (2 << HPC3_DMACFG_D4R_SHIFT) | \ (2 << HPC3_DMACFG_D5R_SHIFT) | \ (0 << HPC3_DMACFG_D3W_SHIFT) | \ (2 << HPC3_DMACFG_D4W_SHIFT) | \ (2 << HPC3_DMACFG_D5W_SHIFT) | \ HPC3_DMACFG_DS16 | \ HPC3_DMACFG_EVENHI | \ HPC3_DMACFG_RTIME | \ (8 << HPC3_DMACFG_BURST_SHIFT) | \ HPC3_DMACFG_DRQLIVE) /* * Ignore what's mentioned in the specification and write value which * works in The Real World (TM) */ hpc3->pbus_dmacfg[hal2->dac.pbus.pbusnr][0] = 0x8208844; hpc3->pbus_dmacfg[hal2->adc.pbus.pbusnr][0] = 0x8208844; err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, hal2, &hal2_ops); if (err < 0) { free_irq(SGI_HPCDMA_IRQ, hal2); kfree(hal2); return err; } *rchip = hal2; return 0; } static int __devinit hal2_probe(struct platform_device *pdev) { struct snd_card *card; struct snd_hal2 *chip; int err; err = snd_card_create(index, id, THIS_MODULE, 0, &card); if (err < 0) return err; err = hal2_create(card, &chip); if (err < 0) { snd_card_free(card); return err; } snd_card_set_dev(card, &pdev->dev); err = hal2_pcm_create(chip); if (err < 0) { snd_card_free(card); return err; } err = hal2_mixer_create(chip); if (err < 0) { snd_card_free(card); return err; } strcpy(card->driver, "SGI HAL2 Audio"); strcpy(card->shortname, "SGI HAL2 Audio"); sprintf(card->longname, "%s irq %i", card->shortname, SGI_HPCDMA_IRQ); err = snd_card_register(card); if (err < 0) { snd_card_free(card); return err; } platform_set_drvdata(pdev, card); return 0; } static int __devexit hal2_remove(struct platform_device *pdev) { struct snd_card *card = platform_get_drvdata(pdev); snd_card_free(card); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver hal2_driver = { .probe = hal2_probe, .remove = __devexit_p(hal2_remove), .driver = { .name = "sgihal2", .owner = THIS_MODULE, } }; static int __init alsa_card_hal2_init(void) { return platform_driver_register(&hal2_driver); } static void __exit alsa_card_hal2_exit(void) { platform_driver_unregister(&hal2_driver); } module_init(alsa_card_hal2_init); module_exit(alsa_card_hal2_exit);
gpl-2.0
xdatravelbug/N909D_Kernel_JB_4.1.2
drivers/net/wireless/wl12xx/main.c
4792
145804
/* * This file is part of wl1271 * * Copyright (C) 2008-2010 Nokia Corporation * * Contact: Luciano Coelho <luciano.coelho@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/module.h> #include <linux/firmware.h> #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/crc32.h> #include <linux/etherdevice.h> #include <linux/vmalloc.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/wl12xx.h> #include <linux/sched.h> #include <linux/interrupt.h> #include "wl12xx.h" #include "debug.h" #include "wl12xx_80211.h" #include "reg.h" #include "io.h" #include "event.h" #include "tx.h" #include "rx.h" #include "ps.h" #include "init.h" #include "debugfs.h" #include "cmd.h" #include "boot.h" #include "testmode.h" #include "scan.h" #define WL1271_BOOT_RETRIES 3 static struct conf_drv_settings default_conf = { .sg = { .params = { [CONF_SG_ACL_BT_MASTER_MIN_BR] = 10, [CONF_SG_ACL_BT_MASTER_MAX_BR] = 180, [CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10, [CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180, [CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10, [CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80, [CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10, [CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80, [CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8, [CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8, [CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20, [CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20, [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20, [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35, [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16, [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35, [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32, [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50, [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28, [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50, [CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10, [CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20, [CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75, [CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15, [CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27, [CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17, /* active scan params */ [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170, [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50, [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100, /* passive scan params */ [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800, [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200, [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200, /* passive scan in dual antenna params */ [CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0, [CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0, [CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0, /* general params */ [CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1, [CONF_SG_ANTENNA_CONFIGURATION] = 0, [CONF_SG_BEACON_MISS_PERCENT] = 60, [CONF_SG_DHCP_TIME] = 5000, [CONF_SG_RXT] = 1200, [CONF_SG_TXT] = 1000, [CONF_SG_ADAPTIVE_RXT_TXT] = 1, [CONF_SG_GENERAL_USAGE_BIT_MAP] = 3, [CONF_SG_HV3_MAX_SERVED] = 6, [CONF_SG_PS_POLL_TIMEOUT] = 10, [CONF_SG_UPSD_TIMEOUT] = 10, [CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2, [CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5, [CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30, /* AP params */ [CONF_AP_BEACON_MISS_TX] = 3, [CONF_AP_RX_WINDOW_AFTER_BEACON] = 10, [CONF_AP_BEACON_WINDOW_INTERVAL] = 2, [CONF_AP_CONNECTION_PROTECTION_TIME] = 0, [CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25, [CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25, /* CTS Diluting params */ [CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0, [CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0, }, .state = CONF_SG_PROTECTIVE, }, .rx = { .rx_msdu_life_time = 512000, .packet_detection_threshold = 0, .ps_poll_timeout = 15, .upsd_timeout = 15, .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD, .rx_cca_threshold = 0, .irq_blk_threshold = 0xFFFF, .irq_pkt_threshold = 0, .irq_timeout = 600, .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY, }, .tx = { .tx_energy_detection = 0, .sta_rc_conf = { .enabled_rates = 0, .short_retry_limit = 10, .long_retry_limit = 10, .aflags = 0, }, .ac_conf_count = 4, .ac_conf = { [CONF_TX_AC_BE] = { .ac = CONF_TX_AC_BE, .cw_min = 15, .cw_max = 63, .aifsn = 3, .tx_op_limit = 0, }, [CONF_TX_AC_BK] = { .ac = CONF_TX_AC_BK, .cw_min = 15, .cw_max = 63, .aifsn = 7, .tx_op_limit = 0, }, [CONF_TX_AC_VI] = { .ac = CONF_TX_AC_VI, .cw_min = 15, .cw_max = 63, .aifsn = CONF_TX_AIFS_PIFS, .tx_op_limit = 3008, }, [CONF_TX_AC_VO] = { .ac = CONF_TX_AC_VO, .cw_min = 15, .cw_max = 63, .aifsn = CONF_TX_AIFS_PIFS, .tx_op_limit = 1504, }, }, .max_tx_retries = 100, .ap_aging_period = 300, .tid_conf_count = 4, .tid_conf = { [CONF_TX_AC_BE] = { .queue_id = CONF_TX_AC_BE, .channel_type = CONF_CHANNEL_TYPE_EDCF, .tsid = CONF_TX_AC_BE, .ps_scheme = CONF_PS_SCHEME_LEGACY, .ack_policy = CONF_ACK_POLICY_LEGACY, .apsd_conf = {0, 0}, }, [CONF_TX_AC_BK] = { .queue_id = CONF_TX_AC_BK, .channel_type = CONF_CHANNEL_TYPE_EDCF, .tsid = CONF_TX_AC_BK, .ps_scheme = CONF_PS_SCHEME_LEGACY, .ack_policy = CONF_ACK_POLICY_LEGACY, .apsd_conf = {0, 0}, }, [CONF_TX_AC_VI] = { .queue_id = CONF_TX_AC_VI, .channel_type = CONF_CHANNEL_TYPE_EDCF, .tsid = CONF_TX_AC_VI, .ps_scheme = CONF_PS_SCHEME_LEGACY, .ack_policy = CONF_ACK_POLICY_LEGACY, .apsd_conf = {0, 0}, }, [CONF_TX_AC_VO] = { .queue_id = CONF_TX_AC_VO, .channel_type = CONF_CHANNEL_TYPE_EDCF, .tsid = CONF_TX_AC_VO, .ps_scheme = CONF_PS_SCHEME_LEGACY, .ack_policy = CONF_ACK_POLICY_LEGACY, .apsd_conf = {0, 0}, }, }, .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD, .tx_compl_timeout = 700, .tx_compl_threshold = 4, .basic_rate = CONF_HW_BIT_RATE_1MBPS, .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS, .tmpl_short_retry_limit = 10, .tmpl_long_retry_limit = 10, .tx_watchdog_timeout = 5000, }, .conn = { .wake_up_event = CONF_WAKE_UP_EVENT_DTIM, .listen_interval = 1, .suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM, .suspend_listen_interval = 3, .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED, .bcn_filt_ie_count = 2, .bcn_filt_ie = { [0] = { .ie = WLAN_EID_CHANNEL_SWITCH, .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE, }, [1] = { .ie = WLAN_EID_HT_INFORMATION, .rule = CONF_BCN_RULE_PASS_ON_CHANGE, }, }, .synch_fail_thold = 10, .bss_lose_timeout = 100, .beacon_rx_timeout = 10000, .broadcast_timeout = 20000, .rx_broadcast_in_ps = 1, .ps_poll_threshold = 10, .bet_enable = CONF_BET_MODE_ENABLE, .bet_max_consecutive = 50, .psm_entry_retries = 8, .psm_exit_retries = 16, .psm_entry_nullfunc_retries = 3, .dynamic_ps_timeout = 200, .forced_ps = false, .keep_alive_interval = 55000, .max_listen_interval = 20, }, .itrim = { .enable = false, .timeout = 50000, }, .pm_config = { .host_clk_settling_time = 5000, .host_fast_wakeup_support = false }, .roam_trigger = { .trigger_pacing = 1, .avg_weight_rssi_beacon = 20, .avg_weight_rssi_data = 10, .avg_weight_snr_beacon = 20, .avg_weight_snr_data = 10, }, .scan = { .min_dwell_time_active = 7500, .max_dwell_time_active = 30000, .min_dwell_time_passive = 100000, .max_dwell_time_passive = 100000, .num_probe_reqs = 2, .split_scan_timeout = 50000, }, .sched_scan = { /* sched_scan requires dwell times in TU instead of TU/1000 */ .min_dwell_time_active = 30, .max_dwell_time_active = 60, .dwell_time_passive = 100, .dwell_time_dfs = 150, .num_probe_reqs = 2, .rssi_threshold = -90, .snr_threshold = 0, }, .rf = { .tx_per_channel_power_compensation_2 = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }, .tx_per_channel_power_compensation_5 = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }, }, .ht = { .rx_ba_win_size = 8, .tx_ba_win_size = 64, .inactivity_timeout = 10000, .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP, }, .mem_wl127x = { .num_stations = 1, .ssid_profiles = 1, .rx_block_num = 70, .tx_min_block_num = 40, .dynamic_memory = 1, .min_req_tx_blocks = 100, .min_req_rx_blocks = 22, .tx_min = 27, }, .mem_wl128x = { .num_stations = 1, .ssid_profiles = 1, .rx_block_num = 40, .tx_min_block_num = 40, .dynamic_memory = 1, .min_req_tx_blocks = 45, .min_req_rx_blocks = 22, .tx_min = 27, }, .fm_coex = { .enable = true, .swallow_period = 5, .n_divider_fref_set_1 = 0xff, /* default */ .n_divider_fref_set_2 = 12, .m_divider_fref_set_1 = 148, .m_divider_fref_set_2 = 0xffff, /* default */ .coex_pll_stabilization_time = 0xffffffff, /* default */ .ldo_stabilization_time = 0xffff, /* default */ .fm_disturbed_band_margin = 0xff, /* default */ .swallow_clk_diff = 0xff, /* default */ }, .rx_streaming = { .duration = 150, .queues = 0x1, .interval = 20, .always = 0, }, .fwlog = { .mode = WL12XX_FWLOG_ON_DEMAND, .mem_blocks = 2, .severity = 0, .timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED, .output = WL12XX_FWLOG_OUTPUT_HOST, .threshold = 0, }, .hci_io_ds = HCI_IO_DS_6MA, .rate = { .rate_retry_score = 32000, .per_add = 8192, .per_th1 = 2048, .per_th2 = 4096, .max_per = 8100, .inverse_curiosity_factor = 5, .tx_fail_low_th = 4, .tx_fail_high_th = 10, .per_alpha_shift = 4, .per_add_shift = 13, .per_beta1_shift = 10, .per_beta2_shift = 8, .rate_check_up = 2, .rate_check_down = 12, .rate_retry_policy = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }, }, .hangover = { .recover_time = 0, .hangover_period = 20, .dynamic_mode = 1, .early_termination_mode = 1, .max_period = 20, .min_period = 1, .increase_delta = 1, .decrease_delta = 2, .quiet_time = 4, .increase_time = 1, .window_size = 16, }, }; static char *fwlog_param; static bool bug_on_recovery; static void __wl1271_op_remove_interface(struct wl1271 *wl, struct ieee80211_vif *vif, bool reset_tx_queues); static void wl1271_op_stop(struct ieee80211_hw *hw); static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif); static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret; if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS)) return -EINVAL; if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) return 0; if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags)) return 0; ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid); if (ret < 0) return ret; wl12xx_croc(wl, wlvif->role_id); wl1271_info("Association completed."); return 0; } static int wl1271_reg_notify(struct wiphy *wiphy, struct regulatory_request *request) { struct ieee80211_supported_band *band; struct ieee80211_channel *ch; int i; band = wiphy->bands[IEEE80211_BAND_5GHZ]; for (i = 0; i < band->n_channels; i++) { ch = &band->channels[i]; if (ch->flags & IEEE80211_CHAN_DISABLED) continue; if (ch->flags & IEEE80211_CHAN_RADAR) ch->flags |= IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_PASSIVE_SCAN; } return 0; } static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool enable) { int ret = 0; /* we should hold wl->mutex */ ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable); if (ret < 0) goto out; if (enable) set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags); else clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags); out: return ret; } /* * this function is being called when the rx_streaming interval * has beed changed or rx_streaming should be disabled */ int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret = 0; int period = wl->conf.rx_streaming.interval; /* don't reconfigure if rx_streaming is disabled */ if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) goto out; /* reconfigure/disable according to new streaming_period */ if (period && test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) && (wl->conf.rx_streaming.always || test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) ret = wl1271_set_rx_streaming(wl, wlvif, true); else { ret = wl1271_set_rx_streaming(wl, wlvif, false); /* don't cancel_work_sync since we might deadlock */ del_timer_sync(&wlvif->rx_streaming_timer); } out: return ret; } static void wl1271_rx_streaming_enable_work(struct work_struct *work) { int ret; struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif, rx_streaming_enable_work); struct wl1271 *wl = wlvif->wl; mutex_lock(&wl->mutex); if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) || !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || (!wl->conf.rx_streaming.always && !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) goto out; if (!wl->conf.rx_streaming.interval) goto out; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; ret = wl1271_set_rx_streaming(wl, wlvif, true); if (ret < 0) goto out_sleep; /* stop it after some time of inactivity */ mod_timer(&wlvif->rx_streaming_timer, jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration)); out_sleep: wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); } static void wl1271_rx_streaming_disable_work(struct work_struct *work) { int ret; struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif, rx_streaming_disable_work); struct wl1271 *wl = wlvif->wl; mutex_lock(&wl->mutex); if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) goto out; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; ret = wl1271_set_rx_streaming(wl, wlvif, false); if (ret) goto out_sleep; out_sleep: wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); } static void wl1271_rx_streaming_timer(unsigned long data) { struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data; struct wl1271 *wl = wlvif->wl; ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work); } /* wl->mutex must be taken */ void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl) { /* if the watchdog is not armed, don't do anything */ if (wl->tx_allocated_blocks == 0) return; cancel_delayed_work(&wl->tx_watchdog_work); ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work, msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout)); } static void wl12xx_tx_watchdog_work(struct work_struct *work) { struct delayed_work *dwork; struct wl1271 *wl; dwork = container_of(work, struct delayed_work, work); wl = container_of(dwork, struct wl1271, tx_watchdog_work); mutex_lock(&wl->mutex); if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; /* Tx went out in the meantime - everything is ok */ if (unlikely(wl->tx_allocated_blocks == 0)) goto out; /* * if a ROC is in progress, we might not have any Tx for a long * time (e.g. pending Tx on the non-ROC channels) */ if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) { wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC", wl->conf.tx.tx_watchdog_timeout); wl12xx_rearm_tx_watchdog_locked(wl); goto out; } /* * if a scan is in progress, we might not have any Tx for a long * time */ if (wl->scan.state != WL1271_SCAN_STATE_IDLE) { wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan", wl->conf.tx.tx_watchdog_timeout); wl12xx_rearm_tx_watchdog_locked(wl); goto out; } /* * AP might cache a frame for a long time for a sleeping station, * so rearm the timer if there's an AP interface with stations. If * Tx is genuinely stuck we will most hopefully discover it when all * stations are removed due to inactivity. */ if (wl->active_sta_count) { wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has " " %d stations", wl->conf.tx.tx_watchdog_timeout, wl->active_sta_count); wl12xx_rearm_tx_watchdog_locked(wl); goto out; } wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery", wl->conf.tx.tx_watchdog_timeout); wl12xx_queue_recovery_work(wl); out: mutex_unlock(&wl->mutex); } static void wl1271_conf_init(struct wl1271 *wl) { /* * This function applies the default configuration to the driver. This * function is invoked upon driver load (spi probe.) * * The configuration is stored in a run-time structure in order to * facilitate for run-time adjustment of any of the parameters. Making * changes to the configuration structure will apply the new values on * the next interface up (wl1271_op_start.) */ /* apply driver default configuration */ memcpy(&wl->conf, &default_conf, sizeof(default_conf)); /* Adjust settings according to optional module parameters */ if (fwlog_param) { if (!strcmp(fwlog_param, "continuous")) { wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS; } else if (!strcmp(fwlog_param, "ondemand")) { wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND; } else if (!strcmp(fwlog_param, "dbgpins")) { wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS; wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS; } else if (!strcmp(fwlog_param, "disable")) { wl->conf.fwlog.mem_blocks = 0; wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE; } else { wl1271_error("Unknown fwlog parameter %s", fwlog_param); } } } static int wl1271_plt_init(struct wl1271 *wl) { int ret; if (wl->chip.id == CHIP_ID_1283_PG20) ret = wl128x_cmd_general_parms(wl); else ret = wl1271_cmd_general_parms(wl); if (ret < 0) return ret; if (wl->chip.id == CHIP_ID_1283_PG20) ret = wl128x_cmd_radio_parms(wl); else ret = wl1271_cmd_radio_parms(wl); if (ret < 0) return ret; if (wl->chip.id != CHIP_ID_1283_PG20) { ret = wl1271_cmd_ext_radio_parms(wl); if (ret < 0) return ret; } /* Chip-specific initializations */ ret = wl1271_chip_specific_init(wl); if (ret < 0) return ret; ret = wl1271_acx_init_mem_config(wl); if (ret < 0) return ret; ret = wl12xx_acx_mem_cfg(wl); if (ret < 0) goto out_free_memmap; /* Enable data path */ ret = wl1271_cmd_data_path(wl, 1); if (ret < 0) goto out_free_memmap; /* Configure for CAM power saving (ie. always active) */ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); if (ret < 0) goto out_free_memmap; /* configure PM */ ret = wl1271_acx_pm_config(wl); if (ret < 0) goto out_free_memmap; return 0; out_free_memmap: kfree(wl->target_mem_map); wl->target_mem_map = NULL; return ret; } static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid, u8 tx_pkts) { bool fw_ps, single_sta; fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); single_sta = (wl->active_sta_count == 1); /* * Wake up from high level PS if the STA is asleep with too little * packets in FW or if the STA is awake. */ if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS) wl12xx_ps_link_end(wl, wlvif, hlid); /* * Start high-level PS if the STA is asleep with enough blocks in FW. * Make an exception if this is the only connected station. In this * case FW-memory congestion is not a problem. */ else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) wl12xx_ps_link_start(wl, wlvif, hlid, true); } static void wl12xx_irq_update_links_status(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct wl12xx_fw_status *status) { struct wl1271_link *lnk; u32 cur_fw_ps_map; u8 hlid, cnt; /* TODO: also use link_fast_bitmap here */ cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap); if (wl->ap_fw_ps_map != cur_fw_ps_map) { wl1271_debug(DEBUG_PSM, "link ps prev 0x%x cur 0x%x changed 0x%x", wl->ap_fw_ps_map, cur_fw_ps_map, wl->ap_fw_ps_map ^ cur_fw_ps_map); wl->ap_fw_ps_map = cur_fw_ps_map; } for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) { lnk = &wl->links[hlid]; cnt = status->tx_lnk_free_pkts[hlid] - lnk->prev_freed_pkts; lnk->prev_freed_pkts = status->tx_lnk_free_pkts[hlid]; lnk->allocated_pkts -= cnt; wl12xx_irq_ps_regulate_link(wl, wlvif, hlid, lnk->allocated_pkts); } } static void wl12xx_fw_status(struct wl1271 *wl, struct wl12xx_fw_status *status) { struct wl12xx_vif *wlvif; struct timespec ts; u32 old_tx_blk_count = wl->tx_blocks_available; int avail, freed_blocks; int i; wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false); wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " "drv_rx_counter = %d, tx_results_counter = %d)", status->intr, status->fw_rx_counter, status->drv_rx_counter, status->tx_results_counter); for (i = 0; i < NUM_TX_QUEUES; i++) { /* prevent wrap-around in freed-packets counter */ wl->tx_allocated_pkts[i] -= (status->tx_released_pkts[i] - wl->tx_pkts_freed[i]) & 0xff; wl->tx_pkts_freed[i] = status->tx_released_pkts[i]; } /* prevent wrap-around in total blocks counter */ if (likely(wl->tx_blocks_freed <= le32_to_cpu(status->total_released_blks))) freed_blocks = le32_to_cpu(status->total_released_blks) - wl->tx_blocks_freed; else freed_blocks = 0x100000000LL - wl->tx_blocks_freed + le32_to_cpu(status->total_released_blks); wl->tx_blocks_freed = le32_to_cpu(status->total_released_blks); wl->tx_allocated_blocks -= freed_blocks; /* * If the FW freed some blocks: * If we still have allocated blocks - re-arm the timer, Tx is * not stuck. Otherwise, cancel the timer (no Tx currently). */ if (freed_blocks) { if (wl->tx_allocated_blocks) wl12xx_rearm_tx_watchdog_locked(wl); else cancel_delayed_work(&wl->tx_watchdog_work); } avail = le32_to_cpu(status->tx_total) - wl->tx_allocated_blocks; /* * The FW might change the total number of TX memblocks before * we get a notification about blocks being released. Thus, the * available blocks calculation might yield a temporary result * which is lower than the actual available blocks. Keeping in * mind that only blocks that were allocated can be moved from * TX to RX, tx_blocks_available should never decrease here. */ wl->tx_blocks_available = max((int)wl->tx_blocks_available, avail); /* if more blocks are available now, tx work can be scheduled */ if (wl->tx_blocks_available > old_tx_blk_count) clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); /* for AP update num of allocated TX blocks per link and ps status */ wl12xx_for_each_wlvif_ap(wl, wlvif) { wl12xx_irq_update_links_status(wl, wlvif, status); } /* update the host-chipset time offset */ getnstimeofday(&ts); wl->time_offset = (timespec_to_ns(&ts) >> 10) - (s64)le32_to_cpu(status->fw_localtime); } static void wl1271_flush_deferred_work(struct wl1271 *wl) { struct sk_buff *skb; /* Pass all received frames to the network stack */ while ((skb = skb_dequeue(&wl->deferred_rx_queue))) ieee80211_rx_ni(wl->hw, skb); /* Return sent skbs to the network stack */ while ((skb = skb_dequeue(&wl->deferred_tx_queue))) ieee80211_tx_status_ni(wl->hw, skb); } static void wl1271_netstack_work(struct work_struct *work) { struct wl1271 *wl = container_of(work, struct wl1271, netstack_work); do { wl1271_flush_deferred_work(wl); } while (skb_queue_len(&wl->deferred_rx_queue)); } #define WL1271_IRQ_MAX_LOOPS 256 static irqreturn_t wl1271_irq(int irq, void *cookie) { int ret; u32 intr; int loopcount = WL1271_IRQ_MAX_LOOPS; struct wl1271 *wl = (struct wl1271 *)cookie; bool done = false; unsigned int defer_count; unsigned long flags; /* TX might be handled here, avoid redundant work */ set_bit(WL1271_FLAG_TX_PENDING, &wl->flags); cancel_work_sync(&wl->tx_work); /* * In case edge triggered interrupt must be used, we cannot iterate * more than once without introducing race conditions with the hardirq. */ if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) loopcount = 1; mutex_lock(&wl->mutex); wl1271_debug(DEBUG_IRQ, "IRQ work"); if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; while (!done && loopcount--) { /* * In order to avoid a race with the hardirq, clear the flag * before acknowledging the chip. Since the mutex is held, * wl1271_ps_elp_wakeup cannot be called concurrently. */ clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); smp_mb__after_clear_bit(); wl12xx_fw_status(wl, wl->fw_status); intr = le32_to_cpu(wl->fw_status->intr); intr &= WL1271_INTR_MASK; if (!intr) { done = true; continue; } if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) { wl1271_error("watchdog interrupt received! " "starting recovery."); wl12xx_queue_recovery_work(wl); /* restarting the chip. ignore any other interrupt. */ goto out; } if (likely(intr & WL1271_ACX_INTR_DATA)) { wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); wl12xx_rx(wl, wl->fw_status); /* Check if any tx blocks were freed */ spin_lock_irqsave(&wl->wl_lock, flags); if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && wl1271_tx_total_queue_count(wl) > 0) { spin_unlock_irqrestore(&wl->wl_lock, flags); /* * In order to avoid starvation of the TX path, * call the work function directly. */ wl1271_tx_work_locked(wl); } else { spin_unlock_irqrestore(&wl->wl_lock, flags); } /* check for tx results */ if (wl->fw_status->tx_results_counter != (wl->tx_results_count & 0xff)) wl1271_tx_complete(wl); /* Make sure the deferred queues don't get too long */ defer_count = skb_queue_len(&wl->deferred_tx_queue) + skb_queue_len(&wl->deferred_rx_queue); if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT) wl1271_flush_deferred_work(wl); } if (intr & WL1271_ACX_INTR_EVENT_A) { wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A"); wl1271_event_handle(wl, 0); } if (intr & WL1271_ACX_INTR_EVENT_B) { wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B"); wl1271_event_handle(wl, 1); } if (intr & WL1271_ACX_INTR_INIT_COMPLETE) wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_INIT_COMPLETE"); if (intr & WL1271_ACX_INTR_HW_AVAILABLE) wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); } wl1271_ps_elp_sleep(wl); out: spin_lock_irqsave(&wl->wl_lock, flags); /* In case TX was not handled here, queue TX work */ clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags); if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && wl1271_tx_total_queue_count(wl) > 0) ieee80211_queue_work(wl->hw, &wl->tx_work); spin_unlock_irqrestore(&wl->wl_lock, flags); mutex_unlock(&wl->mutex); return IRQ_HANDLED; } struct vif_counter_data { u8 counter; struct ieee80211_vif *cur_vif; bool cur_vif_running; }; static void wl12xx_vif_count_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct vif_counter_data *counter = data; counter->counter++; if (counter->cur_vif == vif) counter->cur_vif_running = true; } /* caller must not hold wl->mutex, as it might deadlock */ static void wl12xx_get_vif_count(struct ieee80211_hw *hw, struct ieee80211_vif *cur_vif, struct vif_counter_data *data) { memset(data, 0, sizeof(*data)); data->cur_vif = cur_vif; ieee80211_iterate_active_interfaces(hw, wl12xx_vif_count_iter, data); } static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt) { const struct firmware *fw; const char *fw_name; enum wl12xx_fw_type fw_type; int ret; if (plt) { fw_type = WL12XX_FW_TYPE_PLT; if (wl->chip.id == CHIP_ID_1283_PG20) fw_name = WL128X_PLT_FW_NAME; else fw_name = WL127X_PLT_FW_NAME; } else { /* * we can't call wl12xx_get_vif_count() here because * wl->mutex is taken, so use the cached last_vif_count value */ if (wl->last_vif_count > 1) { fw_type = WL12XX_FW_TYPE_MULTI; if (wl->chip.id == CHIP_ID_1283_PG20) fw_name = WL128X_FW_NAME_MULTI; else fw_name = WL127X_FW_NAME_MULTI; } else { fw_type = WL12XX_FW_TYPE_NORMAL; if (wl->chip.id == CHIP_ID_1283_PG20) fw_name = WL128X_FW_NAME_SINGLE; else fw_name = WL127X_FW_NAME_SINGLE; } } if (wl->fw_type == fw_type) return 0; wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name); ret = request_firmware(&fw, fw_name, wl->dev); if (ret < 0) { wl1271_error("could not get firmware %s: %d", fw_name, ret); return ret; } if (fw->size % 4) { wl1271_error("firmware size is not multiple of 32 bits: %zu", fw->size); ret = -EILSEQ; goto out; } vfree(wl->fw); wl->fw_type = WL12XX_FW_TYPE_NONE; wl->fw_len = fw->size; wl->fw = vmalloc(wl->fw_len); if (!wl->fw) { wl1271_error("could not allocate memory for the firmware"); ret = -ENOMEM; goto out; } memcpy(wl->fw, fw->data, wl->fw_len); ret = 0; wl->fw_type = fw_type; out: release_firmware(fw); return ret; } static int wl1271_fetch_nvs(struct wl1271 *wl) { const struct firmware *fw; int ret; ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev); if (ret < 0) { wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME, ret); return ret; } wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL); if (!wl->nvs) { wl1271_error("could not allocate memory for the nvs file"); ret = -ENOMEM; goto out; } wl->nvs_len = fw->size; out: release_firmware(fw); return ret; } void wl12xx_queue_recovery_work(struct wl1271 *wl) { if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) ieee80211_queue_work(wl->hw, &wl->recovery_work); } size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen) { size_t len = 0; /* The FW log is a length-value list, find where the log end */ while (len < maxlen) { if (memblock[len] == 0) break; if (len + memblock[len] + 1 > maxlen) break; len += memblock[len] + 1; } /* Make sure we have enough room */ len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size)); /* Fill the FW log file, consumed by the sysfs fwlog entry */ memcpy(wl->fwlog + wl->fwlog_size, memblock, len); wl->fwlog_size += len; return len; } static void wl12xx_read_fwlog_panic(struct wl1271 *wl) { u32 addr; u32 first_addr; u8 *block; if ((wl->quirks & WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED) || (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) || (wl->conf.fwlog.mem_blocks == 0)) return; wl1271_info("Reading FW panic log"); block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL); if (!block) return; /* * Make sure the chip is awake and the logger isn't active. * This might fail if the firmware hanged. */ if (!wl1271_ps_elp_wakeup(wl)) wl12xx_cmd_stop_fwlog(wl); /* Read the first memory block address */ wl12xx_fw_status(wl, wl->fw_status); first_addr = le32_to_cpu(wl->fw_status->log_start_addr); if (!first_addr) goto out; /* Traverse the memory blocks linked list */ addr = first_addr; do { memset(block, 0, WL12XX_HW_BLOCK_SIZE); wl1271_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE, false); /* * Memory blocks are linked to one another. The first 4 bytes * of each memory block hold the hardware address of the next * one. The last memory block points to the first one. */ addr = le32_to_cpup((__le32 *)block); if (!wl12xx_copy_fwlog(wl, block + sizeof(addr), WL12XX_HW_BLOCK_SIZE - sizeof(addr))) break; } while (addr && (addr != first_addr)); wake_up_interruptible(&wl->fwlog_waitq); out: kfree(block); } static void wl1271_recovery_work(struct work_struct *work) { struct wl1271 *wl = container_of(work, struct wl1271, recovery_work); struct wl12xx_vif *wlvif; struct ieee80211_vif *vif; mutex_lock(&wl->mutex); if (wl->state != WL1271_STATE_ON || wl->plt) goto out_unlock; /* Avoid a recursive recovery */ set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); wl12xx_read_fwlog_panic(wl); wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x", wl->chip.fw_ver_str, wl1271_read32(wl, SCR_PAD4)); BUG_ON(bug_on_recovery && !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)); /* * Advance security sequence number to overcome potential progress * in the firmware during recovery. This doens't hurt if the network is * not encrypted. */ wl12xx_for_each_wlvif(wl, wlvif) { if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) wlvif->tx_security_seq += WL1271_TX_SQN_POST_RECOVERY_PADDING; } /* Prevent spurious TX during FW restart */ ieee80211_stop_queues(wl->hw); if (wl->sched_scanning) { ieee80211_sched_scan_stopped(wl->hw); wl->sched_scanning = false; } /* reboot the chipset */ while (!list_empty(&wl->wlvif_list)) { wlvif = list_first_entry(&wl->wlvif_list, struct wl12xx_vif, list); vif = wl12xx_wlvif_to_vif(wlvif); __wl1271_op_remove_interface(wl, vif, false); } mutex_unlock(&wl->mutex); wl1271_op_stop(wl->hw); clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); ieee80211_restart_hw(wl->hw); /* * Its safe to enable TX now - the queues are stopped after a request * to restart the HW. */ ieee80211_wake_queues(wl->hw); return; out_unlock: mutex_unlock(&wl->mutex); } static void wl1271_fw_wakeup(struct wl1271 *wl) { u32 elp_reg; elp_reg = ELPCTRL_WAKE_UP; wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg); } static int wl1271_setup(struct wl1271 *wl) { wl->fw_status = kmalloc(sizeof(*wl->fw_status), GFP_KERNEL); if (!wl->fw_status) return -ENOMEM; wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL); if (!wl->tx_res_if) { kfree(wl->fw_status); return -ENOMEM; } return 0; } static int wl12xx_set_power_on(struct wl1271 *wl) { int ret; msleep(WL1271_PRE_POWER_ON_SLEEP); ret = wl1271_power_on(wl); if (ret < 0) goto out; msleep(WL1271_POWER_ON_SLEEP); wl1271_io_reset(wl); wl1271_io_init(wl); wl1271_set_partition(wl, &wl12xx_part_table[PART_DOWN]); /* ELP module wake up */ wl1271_fw_wakeup(wl); out: return ret; } static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt) { int ret = 0; ret = wl12xx_set_power_on(wl); if (ret < 0) goto out; /* * For wl127x based devices we could use the default block * size (512 bytes), but due to a bug in the sdio driver, we * need to set it explicitly after the chip is powered on. To * simplify the code and since the performance impact is * negligible, we use the same block size for all different * chip types. */ if (!wl1271_set_block_size(wl)) wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT; switch (wl->chip.id) { case CHIP_ID_1271_PG10: wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete", wl->chip.id); ret = wl1271_setup(wl); if (ret < 0) goto out; wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT; break; case CHIP_ID_1271_PG20: wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", wl->chip.id); ret = wl1271_setup(wl); if (ret < 0) goto out; wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT; break; case CHIP_ID_1283_PG20: wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)", wl->chip.id); ret = wl1271_setup(wl); if (ret < 0) goto out; break; case CHIP_ID_1283_PG10: default: wl1271_warning("unsupported chip id: 0x%x", wl->chip.id); ret = -ENODEV; goto out; } ret = wl12xx_fetch_firmware(wl, plt); if (ret < 0) goto out; /* No NVS from netlink, try to get it from the filesystem */ if (wl->nvs == NULL) { ret = wl1271_fetch_nvs(wl); if (ret < 0) goto out; } out: return ret; } int wl1271_plt_start(struct wl1271 *wl) { int retries = WL1271_BOOT_RETRIES; struct wiphy *wiphy = wl->hw->wiphy; int ret; mutex_lock(&wl->mutex); wl1271_notice("power up"); if (wl->state != WL1271_STATE_OFF) { wl1271_error("cannot go into PLT state because not " "in off state: %d", wl->state); ret = -EBUSY; goto out; } while (retries) { retries--; ret = wl12xx_chip_wakeup(wl, true); if (ret < 0) goto power_off; ret = wl1271_boot(wl); if (ret < 0) goto power_off; ret = wl1271_plt_init(wl); if (ret < 0) goto irq_disable; wl->plt = true; wl->state = WL1271_STATE_ON; wl1271_notice("firmware booted in PLT mode (%s)", wl->chip.fw_ver_str); /* update hw/fw version info in wiphy struct */ wiphy->hw_version = wl->chip.id; strncpy(wiphy->fw_version, wl->chip.fw_ver_str, sizeof(wiphy->fw_version)); goto out; irq_disable: mutex_unlock(&wl->mutex); /* Unlocking the mutex in the middle of handling is inherently unsafe. In this case we deem it safe to do, because we need to let any possibly pending IRQ out of the system (and while we are WL1271_STATE_OFF the IRQ work function will not do anything.) Also, any other possible concurrent operations will fail due to the current state, hence the wl1271 struct should be safe. */ wl1271_disable_interrupts(wl); wl1271_flush_deferred_work(wl); cancel_work_sync(&wl->netstack_work); mutex_lock(&wl->mutex); power_off: wl1271_power_off(wl); } wl1271_error("firmware boot in PLT mode failed despite %d retries", WL1271_BOOT_RETRIES); out: mutex_unlock(&wl->mutex); return ret; } int wl1271_plt_stop(struct wl1271 *wl) { int ret = 0; wl1271_notice("power down"); /* * Interrupts must be disabled before setting the state to OFF. * Otherwise, the interrupt handler might be called and exit without * reading the interrupt status. */ wl1271_disable_interrupts(wl); mutex_lock(&wl->mutex); if (!wl->plt) { mutex_unlock(&wl->mutex); /* * This will not necessarily enable interrupts as interrupts * may have been disabled when op_stop was called. It will, * however, balance the above call to disable_interrupts(). */ wl1271_enable_interrupts(wl); wl1271_error("cannot power down because not in PLT " "state: %d", wl->state); ret = -EBUSY; goto out; } mutex_unlock(&wl->mutex); wl1271_flush_deferred_work(wl); cancel_work_sync(&wl->netstack_work); cancel_work_sync(&wl->recovery_work); cancel_delayed_work_sync(&wl->elp_work); cancel_delayed_work_sync(&wl->tx_watchdog_work); mutex_lock(&wl->mutex); wl1271_power_off(wl); wl->flags = 0; wl->state = WL1271_STATE_OFF; wl->plt = false; wl->rx_counter = 0; mutex_unlock(&wl->mutex); out: return ret; } static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct wl1271 *wl = hw->priv; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; struct wl12xx_vif *wlvif = NULL; unsigned long flags; int q, mapping; u8 hlid; if (vif) wlvif = wl12xx_vif_to_data(vif); mapping = skb_get_queue_mapping(skb); q = wl1271_tx_get_queue(mapping); hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); spin_lock_irqsave(&wl->wl_lock, flags); /* queue the packet */ if (hlid == WL12XX_INVALID_LINK_ID || (wlvif && !test_bit(hlid, wlvif->links_map))) { wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q); ieee80211_free_txskb(hw, skb); goto out; } wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d", hlid, q, skb->len); skb_queue_tail(&wl->links[hlid].tx_queue[q], skb); wl->tx_queue_count[q]++; /* * The workqueue is slow to process the tx_queue and we need stop * the queue here, otherwise the queue will get too long. */ if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK) { wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q); ieee80211_stop_queue(wl->hw, mapping); set_bit(q, &wl->stopped_queues_map); } /* * The chip specific setup must run before the first TX packet - * before that, the tx_work will not be initialized! */ if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags)) ieee80211_queue_work(wl->hw, &wl->tx_work); out: spin_unlock_irqrestore(&wl->wl_lock, flags); } int wl1271_tx_dummy_packet(struct wl1271 *wl) { unsigned long flags; int q; /* no need to queue a new dummy packet if one is already pending */ if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) return 0; q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet)); spin_lock_irqsave(&wl->wl_lock, flags); set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); wl->tx_queue_count[q]++; spin_unlock_irqrestore(&wl->wl_lock, flags); /* The FW is low on RX memory blocks, so send the dummy packet asap */ if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) wl1271_tx_work_locked(wl); /* * If the FW TX is busy, TX work will be scheduled by the threaded * interrupt handler function */ return 0; } /* * The size of the dummy packet should be at least 1400 bytes. However, in * order to minimize the number of bus transactions, aligning it to 512 bytes * boundaries could be beneficial, performance wise */ #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512)) static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl) { struct sk_buff *skb; struct ieee80211_hdr_3addr *hdr; unsigned int dummy_packet_size; dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE - sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr); skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE); if (!skb) { wl1271_warning("Failed to allocate a dummy packet skb"); return NULL; } skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr)); hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr)); memset(hdr, 0, sizeof(*hdr)); hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS); memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size); /* Dummy packets require the TID to be management */ skb->priority = WL1271_TID_MGMT; /* Initialize all fields that might be used */ skb_set_queue_mapping(skb, 0); memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info)); return skb; } #ifdef CONFIG_PM static int wl1271_configure_suspend_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret = 0; mutex_lock(&wl->mutex); if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) goto out_unlock; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out_unlock; ret = wl1271_acx_wake_up_conditions(wl, wlvif, wl->conf.conn.suspend_wake_up_event, wl->conf.conn.suspend_listen_interval); if (ret < 0) wl1271_error("suspend: set wake up conditions failed: %d", ret); wl1271_ps_elp_sleep(wl); out_unlock: mutex_unlock(&wl->mutex); return ret; } static int wl1271_configure_suspend_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret = 0; mutex_lock(&wl->mutex); if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) goto out_unlock; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out_unlock; ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true); wl1271_ps_elp_sleep(wl); out_unlock: mutex_unlock(&wl->mutex); return ret; } static int wl1271_configure_suspend(struct wl1271 *wl, struct wl12xx_vif *wlvif) { if (wlvif->bss_type == BSS_TYPE_STA_BSS) return wl1271_configure_suspend_sta(wl, wlvif); if (wlvif->bss_type == BSS_TYPE_AP_BSS) return wl1271_configure_suspend_ap(wl, wlvif); return 0; } static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret = 0; bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS; bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS; if ((!is_ap) && (!is_sta)) return; mutex_lock(&wl->mutex); ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; if (is_sta) { ret = wl1271_acx_wake_up_conditions(wl, wlvif, wl->conf.conn.wake_up_event, wl->conf.conn.listen_interval); if (ret < 0) wl1271_error("resume: wake up conditions failed: %d", ret); } else if (is_ap) { ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false); } wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); } static int wl1271_op_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wow) { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif; int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow); WARN_ON(!wow || !wow->any); wl1271_tx_flush(wl); wl->wow_enabled = true; wl12xx_for_each_wlvif(wl, wlvif) { ret = wl1271_configure_suspend(wl, wlvif); if (ret < 0) { wl1271_warning("couldn't prepare device to suspend"); return ret; } } /* flush any remaining work */ wl1271_debug(DEBUG_MAC80211, "flushing remaining works"); /* * disable and re-enable interrupts in order to flush * the threaded_irq */ wl1271_disable_interrupts(wl); /* * set suspended flag to avoid triggering a new threaded_irq * work. no need for spinlock as interrupts are disabled. */ set_bit(WL1271_FLAG_SUSPENDED, &wl->flags); wl1271_enable_interrupts(wl); flush_work(&wl->tx_work); flush_delayed_work(&wl->elp_work); return 0; } static int wl1271_op_resume(struct ieee80211_hw *hw) { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif; unsigned long flags; bool run_irq_work = false; wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d", wl->wow_enabled); WARN_ON(!wl->wow_enabled); /* * re-enable irq_work enqueuing, and call irq_work directly if * there is a pending work. */ spin_lock_irqsave(&wl->wl_lock, flags); clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags); if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags)) run_irq_work = true; spin_unlock_irqrestore(&wl->wl_lock, flags); if (run_irq_work) { wl1271_debug(DEBUG_MAC80211, "run postponed irq_work directly"); wl1271_irq(0, wl); wl1271_enable_interrupts(wl); } wl12xx_for_each_wlvif(wl, wlvif) { wl1271_configure_resume(wl, wlvif); } wl->wow_enabled = false; return 0; } #endif static int wl1271_op_start(struct ieee80211_hw *hw) { wl1271_debug(DEBUG_MAC80211, "mac80211 start"); /* * We have to delay the booting of the hardware because * we need to know the local MAC address before downloading and * initializing the firmware. The MAC address cannot be changed * after boot, and without the proper MAC address, the firmware * will not function properly. * * The MAC address is first known when the corresponding interface * is added. That is where we will initialize the hardware. */ return 0; } static void wl1271_op_stop(struct ieee80211_hw *hw) { struct wl1271 *wl = hw->priv; int i; wl1271_debug(DEBUG_MAC80211, "mac80211 stop"); /* * Interrupts must be disabled before setting the state to OFF. * Otherwise, the interrupt handler might be called and exit without * reading the interrupt status. */ wl1271_disable_interrupts(wl); mutex_lock(&wl->mutex); if (wl->state == WL1271_STATE_OFF) { mutex_unlock(&wl->mutex); /* * This will not necessarily enable interrupts as interrupts * may have been disabled when op_stop was called. It will, * however, balance the above call to disable_interrupts(). */ wl1271_enable_interrupts(wl); return; } /* * this must be before the cancel_work calls below, so that the work * functions don't perform further work. */ wl->state = WL1271_STATE_OFF; mutex_unlock(&wl->mutex); wl1271_flush_deferred_work(wl); cancel_delayed_work_sync(&wl->scan_complete_work); cancel_work_sync(&wl->netstack_work); cancel_work_sync(&wl->tx_work); cancel_delayed_work_sync(&wl->elp_work); cancel_delayed_work_sync(&wl->tx_watchdog_work); /* let's notify MAC80211 about the remaining pending TX frames */ wl12xx_tx_reset(wl, true); mutex_lock(&wl->mutex); wl1271_power_off(wl); wl->band = IEEE80211_BAND_2GHZ; wl->rx_counter = 0; wl->power_level = WL1271_DEFAULT_POWER_LEVEL; wl->tx_blocks_available = 0; wl->tx_allocated_blocks = 0; wl->tx_results_count = 0; wl->tx_packets_count = 0; wl->time_offset = 0; wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; wl->ap_fw_ps_map = 0; wl->ap_ps_map = 0; wl->sched_scanning = false; memset(wl->roles_map, 0, sizeof(wl->roles_map)); memset(wl->links_map, 0, sizeof(wl->links_map)); memset(wl->roc_map, 0, sizeof(wl->roc_map)); wl->active_sta_count = 0; /* The system link is always allocated */ __set_bit(WL12XX_SYSTEM_HLID, wl->links_map); /* * this is performed after the cancel_work calls and the associated * mutex_lock, so that wl1271_op_add_interface does not accidentally * get executed before all these vars have been reset. */ wl->flags = 0; wl->tx_blocks_freed = 0; for (i = 0; i < NUM_TX_QUEUES; i++) { wl->tx_pkts_freed[i] = 0; wl->tx_allocated_pkts[i] = 0; } wl1271_debugfs_reset(wl); kfree(wl->fw_status); wl->fw_status = NULL; kfree(wl->tx_res_if); wl->tx_res_if = NULL; kfree(wl->target_mem_map); wl->target_mem_map = NULL; mutex_unlock(&wl->mutex); } static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx) { u8 policy = find_first_zero_bit(wl->rate_policies_map, WL12XX_MAX_RATE_POLICIES); if (policy >= WL12XX_MAX_RATE_POLICIES) return -EBUSY; __set_bit(policy, wl->rate_policies_map); *idx = policy; return 0; } static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx) { if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES)) return; __clear_bit(*idx, wl->rate_policies_map); *idx = WL12XX_MAX_RATE_POLICIES; } static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif) { switch (wlvif->bss_type) { case BSS_TYPE_AP_BSS: if (wlvif->p2p) return WL1271_ROLE_P2P_GO; else return WL1271_ROLE_AP; case BSS_TYPE_STA_BSS: if (wlvif->p2p) return WL1271_ROLE_P2P_CL; else return WL1271_ROLE_STA; case BSS_TYPE_IBSS: return WL1271_ROLE_IBSS; default: wl1271_error("invalid bss_type: %d", wlvif->bss_type); } return WL12XX_INVALID_ROLE_TYPE; } static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif) { struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int i; /* clear everything but the persistent data */ memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent)); switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_P2P_CLIENT: wlvif->p2p = 1; /* fall-through */ case NL80211_IFTYPE_STATION: wlvif->bss_type = BSS_TYPE_STA_BSS; break; case NL80211_IFTYPE_ADHOC: wlvif->bss_type = BSS_TYPE_IBSS; break; case NL80211_IFTYPE_P2P_GO: wlvif->p2p = 1; /* fall-through */ case NL80211_IFTYPE_AP: wlvif->bss_type = BSS_TYPE_AP_BSS; break; default: wlvif->bss_type = MAX_BSS_TYPE; return -EOPNOTSUPP; } wlvif->role_id = WL12XX_INVALID_ROLE_ID; wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID; wlvif->dev_hlid = WL12XX_INVALID_LINK_ID; if (wlvif->bss_type == BSS_TYPE_STA_BSS || wlvif->bss_type == BSS_TYPE_IBSS) { /* init sta/ibss data */ wlvif->sta.hlid = WL12XX_INVALID_LINK_ID; wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx); wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx); wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx); } else { /* init ap data */ wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID; wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID; wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx); wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx); for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++) wl12xx_allocate_rate_policy(wl, &wlvif->ap.ucast_rate_idx[i]); } wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate; wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5; wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC; wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC; wlvif->rate_set = CONF_TX_RATE_MASK_BASIC; wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT; /* * mac80211 configures some values globally, while we treat them * per-interface. thus, on init, we have to copy them from wl */ wlvif->band = wl->band; wlvif->channel = wl->channel; wlvif->power_level = wl->power_level; INIT_WORK(&wlvif->rx_streaming_enable_work, wl1271_rx_streaming_enable_work); INIT_WORK(&wlvif->rx_streaming_disable_work, wl1271_rx_streaming_disable_work); INIT_LIST_HEAD(&wlvif->list); setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, (unsigned long) wlvif); return 0; } static bool wl12xx_init_fw(struct wl1271 *wl) { int retries = WL1271_BOOT_RETRIES; bool booted = false; struct wiphy *wiphy = wl->hw->wiphy; int ret; while (retries) { retries--; ret = wl12xx_chip_wakeup(wl, false); if (ret < 0) goto power_off; ret = wl1271_boot(wl); if (ret < 0) goto power_off; ret = wl1271_hw_init(wl); if (ret < 0) goto irq_disable; booted = true; break; irq_disable: mutex_unlock(&wl->mutex); /* Unlocking the mutex in the middle of handling is inherently unsafe. In this case we deem it safe to do, because we need to let any possibly pending IRQ out of the system (and while we are WL1271_STATE_OFF the IRQ work function will not do anything.) Also, any other possible concurrent operations will fail due to the current state, hence the wl1271 struct should be safe. */ wl1271_disable_interrupts(wl); wl1271_flush_deferred_work(wl); cancel_work_sync(&wl->netstack_work); mutex_lock(&wl->mutex); power_off: wl1271_power_off(wl); } if (!booted) { wl1271_error("firmware boot failed despite %d retries", WL1271_BOOT_RETRIES); goto out; } wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str); /* update hw/fw version info in wiphy struct */ wiphy->hw_version = wl->chip.id; strncpy(wiphy->fw_version, wl->chip.fw_ver_str, sizeof(wiphy->fw_version)); /* * Now we know if 11a is supported (info from the NVS), so disable * 11a channels if not supported */ if (!wl->enable_11a) wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0; wl1271_debug(DEBUG_MAC80211, "11a is %ssupported", wl->enable_11a ? "" : "not "); wl->state = WL1271_STATE_ON; out: return booted; } static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif) { return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID; } /* * Check whether a fw switch (i.e. moving from one loaded * fw to another) is needed. This function is also responsible * for updating wl->last_vif_count, so it must be called before * loading a non-plt fw (so the correct fw (single-role/multi-role) * will be used). */ static bool wl12xx_need_fw_change(struct wl1271 *wl, struct vif_counter_data vif_counter_data, bool add) { enum wl12xx_fw_type current_fw = wl->fw_type; u8 vif_count = vif_counter_data.counter; if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags)) return false; /* increase the vif count if this is a new vif */ if (add && !vif_counter_data.cur_vif_running) vif_count++; wl->last_vif_count = vif_count; /* no need for fw change if the device is OFF */ if (wl->state == WL1271_STATE_OFF) return false; if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL) return true; if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI) return true; return false; } /* * Enter "forced psm". Make sure the sta is in psm against the ap, * to make the fw switch a bit more disconnection-persistent. */ static void wl12xx_force_active_psm(struct wl1271 *wl) { struct wl12xx_vif *wlvif; wl12xx_for_each_wlvif_sta(wl, wlvif) { wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE); } } static int wl1271_op_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct vif_counter_data vif_count; int ret = 0; u8 role_type; bool booted = false; vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI; wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", ieee80211_vif_type_p2p(vif), vif->addr); wl12xx_get_vif_count(hw, vif, &vif_count); mutex_lock(&wl->mutex); ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out_unlock; /* * in some very corner case HW recovery scenarios its possible to * get here before __wl1271_op_remove_interface is complete, so * opt out if that is the case. */ if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) || test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) { ret = -EBUSY; goto out; } ret = wl12xx_init_vif_data(wl, vif); if (ret < 0) goto out; wlvif->wl = wl; role_type = wl12xx_get_role_type(wl, wlvif); if (role_type == WL12XX_INVALID_ROLE_TYPE) { ret = -EINVAL; goto out; } if (wl12xx_need_fw_change(wl, vif_count, true)) { wl12xx_force_active_psm(wl); set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); mutex_unlock(&wl->mutex); wl1271_recovery_work(&wl->recovery_work); return 0; } /* * TODO: after the nvs issue will be solved, move this block * to start(), and make sure here the driver is ON. */ if (wl->state == WL1271_STATE_OFF) { /* * we still need this in order to configure the fw * while uploading the nvs */ memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN); booted = wl12xx_init_fw(wl); if (!booted) { ret = -EINVAL; goto out; } } if (wlvif->bss_type == BSS_TYPE_STA_BSS || wlvif->bss_type == BSS_TYPE_IBSS) { /* * The device role is a special role used for * rx and tx frames prior to association (as * the STA role can get packets only from * its associated bssid) */ ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE, &wlvif->dev_role_id); if (ret < 0) goto out; } ret = wl12xx_cmd_role_enable(wl, vif->addr, role_type, &wlvif->role_id); if (ret < 0) goto out; ret = wl1271_init_vif_specific(wl, vif); if (ret < 0) goto out; list_add(&wlvif->list, &wl->wlvif_list); set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags); if (wlvif->bss_type == BSS_TYPE_AP_BSS) wl->ap_count++; else wl->sta_count++; out: wl1271_ps_elp_sleep(wl); out_unlock: mutex_unlock(&wl->mutex); return ret; } static void __wl1271_op_remove_interface(struct wl1271 *wl, struct ieee80211_vif *vif, bool reset_tx_queues) { struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int i, ret; wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) return; /* because of hardware recovery, we may get here twice */ if (wl->state != WL1271_STATE_ON) return; wl1271_info("down"); if (wl->scan.state != WL1271_SCAN_STATE_IDLE && wl->scan_vif == vif) { /* * Rearm the tx watchdog just before idling scan. This * prevents just-finished scans from triggering the watchdog */ wl12xx_rearm_tx_watchdog_locked(wl); wl->scan.state = WL1271_SCAN_STATE_IDLE; memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); wl->scan_vif = NULL; wl->scan.req = NULL; ieee80211_scan_completed(wl->hw, true); } if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) { /* disable active roles */ ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto deinit; if (wlvif->bss_type == BSS_TYPE_STA_BSS || wlvif->bss_type == BSS_TYPE_IBSS) { if (wl12xx_dev_role_started(wlvif)) wl12xx_stop_dev(wl, wlvif); ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id); if (ret < 0) goto deinit; } ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id); if (ret < 0) goto deinit; wl1271_ps_elp_sleep(wl); } deinit: /* clear all hlids (except system_hlid) */ wlvif->dev_hlid = WL12XX_INVALID_LINK_ID; if (wlvif->bss_type == BSS_TYPE_STA_BSS || wlvif->bss_type == BSS_TYPE_IBSS) { wlvif->sta.hlid = WL12XX_INVALID_LINK_ID; wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx); wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx); wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx); } else { wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID; wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID; wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx); wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx); for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++) wl12xx_free_rate_policy(wl, &wlvif->ap.ucast_rate_idx[i]); } wl12xx_tx_reset_wlvif(wl, wlvif); wl1271_free_ap_keys(wl, wlvif); if (wl->last_wlvif == wlvif) wl->last_wlvif = NULL; list_del(&wlvif->list); memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map)); wlvif->role_id = WL12XX_INVALID_ROLE_ID; wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID; if (wlvif->bss_type == BSS_TYPE_AP_BSS) wl->ap_count--; else wl->sta_count--; mutex_unlock(&wl->mutex); del_timer_sync(&wlvif->rx_streaming_timer); cancel_work_sync(&wlvif->rx_streaming_enable_work); cancel_work_sync(&wlvif->rx_streaming_disable_work); mutex_lock(&wl->mutex); } static void wl1271_op_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct wl12xx_vif *iter; struct vif_counter_data vif_count; bool cancel_recovery = true; wl12xx_get_vif_count(hw, vif, &vif_count); mutex_lock(&wl->mutex); if (wl->state == WL1271_STATE_OFF || !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) goto out; /* * wl->vif can be null here if someone shuts down the interface * just when hardware recovery has been started. */ wl12xx_for_each_wlvif(wl, iter) { if (iter != wlvif) continue; __wl1271_op_remove_interface(wl, vif, true); break; } WARN_ON(iter != wlvif); if (wl12xx_need_fw_change(wl, vif_count, false)) { wl12xx_force_active_psm(wl); set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); wl12xx_queue_recovery_work(wl); cancel_recovery = false; } out: mutex_unlock(&wl->mutex); if (cancel_recovery) cancel_work_sync(&wl->recovery_work); } static int wl12xx_op_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum nl80211_iftype new_type, bool p2p) { struct wl1271 *wl = hw->priv; int ret; set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags); wl1271_op_remove_interface(hw, vif); vif->type = new_type; vif->p2p = p2p; ret = wl1271_op_add_interface(hw, vif); clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags); return ret; } static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool set_assoc) { int ret; bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS); /* * One of the side effects of the JOIN command is that is clears * WPA/WPA2 keys from the chipset. Performing a JOIN while associated * to a WPA/WPA2 access point will therefore kill the data-path. * Currently the only valid scenario for JOIN during association * is on roaming, in which case we will also be given new keys. * Keep the below message for now, unless it starts bothering * users who really like to roam a lot :) */ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) wl1271_info("JOIN while associated."); /* clear encryption type */ wlvif->encryption_type = KEY_NONE; if (set_assoc) set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags); if (is_ibss) ret = wl12xx_cmd_role_start_ibss(wl, wlvif); else ret = wl12xx_cmd_role_start_sta(wl, wlvif); if (ret < 0) goto out; if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) goto out; /* * The join command disable the keep-alive mode, shut down its process, * and also clear the template config, so we need to reset it all after * the join. The acx_aid starts the keep-alive process, and the order * of the commands below is relevant. */ ret = wl1271_acx_keep_alive_mode(wl, wlvif, true); if (ret < 0) goto out; ret = wl1271_acx_aid(wl, wlvif, wlvif->aid); if (ret < 0) goto out; ret = wl12xx_cmd_build_klv_null_data(wl, wlvif); if (ret < 0) goto out; ret = wl1271_acx_keep_alive_config(wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA, ACX_KEEP_ALIVE_TPL_VALID); if (ret < 0) goto out; out: return ret; } static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret; if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) { struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); wl12xx_cmd_stop_channel_switch(wl); ieee80211_chswitch_done(vif, false); } /* to stop listening to a channel, we disconnect */ ret = wl12xx_cmd_role_stop_sta(wl, wlvif); if (ret < 0) goto out; /* reset TX security counters on a clean disconnect */ wlvif->tx_security_last_seq_lsb = 0; wlvif->tx_security_seq = 0; out: return ret; } static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif) { wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band]; wlvif->rate_set = wlvif->basic_rate_set; } static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool idle) { int ret; bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); if (idle == cur_idle) return 0; if (idle) { /* no need to croc if we weren't busy (e.g. during boot) */ if (wl12xx_dev_role_started(wlvif)) { ret = wl12xx_stop_dev(wl, wlvif); if (ret < 0) goto out; } wlvif->rate_set = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); ret = wl1271_acx_sta_rate_policies(wl, wlvif); if (ret < 0) goto out; ret = wl1271_acx_keep_alive_config( wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA, ACX_KEEP_ALIVE_TPL_INVALID); if (ret < 0) goto out; clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); } else { /* The current firmware only supports sched_scan in idle */ if (wl->sched_scanning) { wl1271_scan_sched_scan_stop(wl); ieee80211_sched_scan_stopped(wl->hw); } ret = wl12xx_start_dev(wl, wlvif); if (ret < 0) goto out; set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); } out: return ret; } static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct ieee80211_conf *conf, u32 changed) { bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); int channel, ret; channel = ieee80211_frequency_to_channel(conf->channel->center_freq); /* if the channel changes while joined, join again */ if (changed & IEEE80211_CONF_CHANGE_CHANNEL && ((wlvif->band != conf->channel->band) || (wlvif->channel != channel))) { /* send all pending packets */ wl1271_tx_work_locked(wl); wlvif->band = conf->channel->band; wlvif->channel = channel; if (!is_ap) { /* * FIXME: the mac80211 should really provide a fixed * rate to use here. for now, just use the smallest * possible rate for the band as a fixed rate for * association frames and other control messages. */ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) wl1271_set_band_rate(wl, wlvif); wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); ret = wl1271_acx_sta_rate_policies(wl, wlvif); if (ret < 0) wl1271_warning("rate policy for channel " "failed %d", ret); /* * change the ROC channel. do it only if we are * not idle. otherwise, CROC will be called * anyway. */ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) && wl12xx_dev_role_started(wlvif) && !(conf->flags & IEEE80211_CONF_IDLE)) { ret = wl12xx_stop_dev(wl, wlvif); if (ret < 0) return ret; ret = wl12xx_start_dev(wl, wlvif); if (ret < 0) return ret; } } } if ((changed & IEEE80211_CONF_CHANGE_PS) && !is_ap) { if ((conf->flags & IEEE80211_CONF_PS) && test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) && !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) { int ps_mode; char *ps_mode_str; if (wl->conf.conn.forced_ps) { ps_mode = STATION_POWER_SAVE_MODE; ps_mode_str = "forced"; } else { ps_mode = STATION_AUTO_PS_MODE; ps_mode_str = "auto"; } wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str); ret = wl1271_ps_set_mode(wl, wlvif, ps_mode); if (ret < 0) wl1271_warning("enter %s ps failed %d", ps_mode_str, ret); } else if (!(conf->flags & IEEE80211_CONF_PS) && test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) { wl1271_debug(DEBUG_PSM, "auto ps disabled"); ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE); if (ret < 0) wl1271_warning("exit auto ps failed %d", ret); } } if (conf->power_level != wlvif->power_level) { ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level); if (ret < 0) return ret; wlvif->power_level = conf->power_level; } return 0; } static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif; struct ieee80211_conf *conf = &hw->conf; int channel, ret = 0; channel = ieee80211_frequency_to_channel(conf->channel->center_freq); wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s" " changed 0x%x", channel, conf->flags & IEEE80211_CONF_PS ? "on" : "off", conf->power_level, conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use", changed); /* * mac80211 will go to idle nearly immediately after transmitting some * frames, such as the deauth. To make sure those frames reach the air, * wait here until the TX queue is fully flushed. */ if ((changed & IEEE80211_CONF_CHANGE_IDLE) && (conf->flags & IEEE80211_CONF_IDLE)) wl1271_tx_flush(wl); mutex_lock(&wl->mutex); /* we support configuring the channel and band even while off */ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { wl->band = conf->channel->band; wl->channel = channel; } if (changed & IEEE80211_CONF_CHANGE_POWER) wl->power_level = conf->power_level; if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; /* configure each interface */ wl12xx_for_each_wlvif(wl, wlvif) { ret = wl12xx_config_vif(wl, wlvif, conf, changed); if (ret < 0) goto out_sleep; } out_sleep: wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); return ret; } struct wl1271_filter_params { bool enabled; int mc_list_length; u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN]; }; static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list) { struct wl1271_filter_params *fp; struct netdev_hw_addr *ha; struct wl1271 *wl = hw->priv; if (unlikely(wl->state == WL1271_STATE_OFF)) return 0; fp = kzalloc(sizeof(*fp), GFP_ATOMIC); if (!fp) { wl1271_error("Out of memory setting filters."); return 0; } /* update multicast filtering parameters */ fp->mc_list_length = 0; if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) { fp->enabled = false; } else { fp->enabled = true; netdev_hw_addr_list_for_each(ha, mc_list) { memcpy(fp->mc_list[fp->mc_list_length], ha->addr, ETH_ALEN); fp->mc_list_length++; } } return (u64)(unsigned long)fp; } #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \ FIF_ALLMULTI | \ FIF_FCSFAIL | \ FIF_BCN_PRBRESP_PROMISC | \ FIF_CONTROL | \ FIF_OTHER_BSS) static void wl1271_op_configure_filter(struct ieee80211_hw *hw, unsigned int changed, unsigned int *total, u64 multicast) { struct wl1271_filter_params *fp = (void *)(unsigned long)multicast; struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif; int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x" " total %x", changed, *total); mutex_lock(&wl->mutex); *total &= WL1271_SUPPORTED_FILTERS; changed &= WL1271_SUPPORTED_FILTERS; if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; wl12xx_for_each_wlvif(wl, wlvif) { if (wlvif->bss_type != BSS_TYPE_AP_BSS) { if (*total & FIF_ALLMULTI) ret = wl1271_acx_group_address_tbl(wl, wlvif, false, NULL, 0); else if (fp) ret = wl1271_acx_group_address_tbl(wl, wlvif, fp->enabled, fp->mc_list, fp->mc_list_length); if (ret < 0) goto out_sleep; } } /* * the fw doesn't provide an api to configure the filters. instead, * the filters configuration is based on the active roles / ROC * state. */ out_sleep: wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); kfree(fp); } static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 id, u8 key_type, u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, u16 tx_seq_16) { struct wl1271_ap_key *ap_key; int i; wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id); if (key_size > MAX_KEY_SIZE) return -EINVAL; /* * Find next free entry in ap_keys. Also check we are not replacing * an existing key. */ for (i = 0; i < MAX_NUM_KEYS; i++) { if (wlvif->ap.recorded_keys[i] == NULL) break; if (wlvif->ap.recorded_keys[i]->id == id) { wl1271_warning("trying to record key replacement"); return -EINVAL; } } if (i == MAX_NUM_KEYS) return -EBUSY; ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL); if (!ap_key) return -ENOMEM; ap_key->id = id; ap_key->key_type = key_type; ap_key->key_size = key_size; memcpy(ap_key->key, key, key_size); ap_key->hlid = hlid; ap_key->tx_seq_32 = tx_seq_32; ap_key->tx_seq_16 = tx_seq_16; wlvif->ap.recorded_keys[i] = ap_key; return 0; } static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int i; for (i = 0; i < MAX_NUM_KEYS; i++) { kfree(wlvif->ap.recorded_keys[i]); wlvif->ap.recorded_keys[i] = NULL; } } static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int i, ret = 0; struct wl1271_ap_key *key; bool wep_key_added = false; for (i = 0; i < MAX_NUM_KEYS; i++) { u8 hlid; if (wlvif->ap.recorded_keys[i] == NULL) break; key = wlvif->ap.recorded_keys[i]; hlid = key->hlid; if (hlid == WL12XX_INVALID_LINK_ID) hlid = wlvif->ap.bcast_hlid; ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE, key->id, key->key_type, key->key_size, key->key, hlid, key->tx_seq_32, key->tx_seq_16); if (ret < 0) goto out; if (key->key_type == KEY_WEP) wep_key_added = true; } if (wep_key_added) { ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key, wlvif->ap.bcast_hlid); if (ret < 0) goto out; } out: wl1271_free_ap_keys(wl, wlvif); return ret; } static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 action, u8 id, u8 key_type, u8 key_size, const u8 *key, u32 tx_seq_32, u16 tx_seq_16, struct ieee80211_sta *sta) { int ret; bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); if (is_ap) { struct wl1271_station *wl_sta; u8 hlid; if (sta) { wl_sta = (struct wl1271_station *)sta->drv_priv; hlid = wl_sta->hlid; } else { hlid = wlvif->ap.bcast_hlid; } if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { /* * We do not support removing keys after AP shutdown. * Pretend we do to make mac80211 happy. */ if (action != KEY_ADD_OR_REPLACE) return 0; ret = wl1271_record_ap_key(wl, wlvif, id, key_type, key_size, key, hlid, tx_seq_32, tx_seq_16); } else { ret = wl1271_cmd_set_ap_key(wl, wlvif, action, id, key_type, key_size, key, hlid, tx_seq_32, tx_seq_16); } if (ret < 0) return ret; } else { const u8 *addr; static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; /* * A STA set to GEM cipher requires 2 tx spare blocks. * Return to default value when GEM cipher key is removed */ if (key_type == KEY_GEM) { if (action == KEY_ADD_OR_REPLACE) wl->tx_spare_blocks = 2; else if (action == KEY_REMOVE) wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; } addr = sta ? sta->addr : bcast_addr; if (is_zero_ether_addr(addr)) { /* We dont support TX only encryption */ return -EOPNOTSUPP; } /* The wl1271 does not allow to remove unicast keys - they will be cleared automatically on next CMD_JOIN. Ignore the request silently, as we dont want the mac80211 to emit an error message. */ if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr)) return 0; /* don't remove key if hlid was already deleted */ if (action == KEY_REMOVE && wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) return 0; ret = wl1271_cmd_set_sta_key(wl, wlvif, action, id, key_type, key_size, key, addr, tx_seq_32, tx_seq_16); if (ret < 0) return ret; /* the default WEP key needs to be configured at least once */ if (key_type == KEY_WEP) { ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key, wlvif->sta.hlid); if (ret < 0) return ret; } } return 0; } static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key_conf) { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret; u32 tx_seq_32 = 0; u16 tx_seq_16 = 0; u8 key_type; wl1271_debug(DEBUG_MAC80211, "mac80211 set key"); wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta); wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x", key_conf->cipher, key_conf->keyidx, key_conf->keylen, key_conf->flags); wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen); mutex_lock(&wl->mutex); if (unlikely(wl->state == WL1271_STATE_OFF)) { ret = -EAGAIN; goto out_unlock; } ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out_unlock; switch (key_conf->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: key_type = KEY_WEP; key_conf->hw_key_idx = key_conf->keyidx; break; case WLAN_CIPHER_SUITE_TKIP: key_type = KEY_TKIP; key_conf->hw_key_idx = key_conf->keyidx; tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq); tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq); break; case WLAN_CIPHER_SUITE_CCMP: key_type = KEY_AES; key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq); tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq); break; case WL1271_CIPHER_SUITE_GEM: key_type = KEY_GEM; tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq); tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq); break; default: wl1271_error("Unknown key algo 0x%x", key_conf->cipher); ret = -EOPNOTSUPP; goto out_sleep; } switch (cmd) { case SET_KEY: ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE, key_conf->keyidx, key_type, key_conf->keylen, key_conf->key, tx_seq_32, tx_seq_16, sta); if (ret < 0) { wl1271_error("Could not add or replace key"); goto out_sleep; } /* * reconfiguring arp response if the unicast (or common) * encryption key type was changed */ if (wlvif->bss_type == BSS_TYPE_STA_BSS && (sta || key_type == KEY_WEP) && wlvif->encryption_type != key_type) { wlvif->encryption_type = key_type; ret = wl1271_cmd_build_arp_rsp(wl, wlvif); if (ret < 0) { wl1271_warning("build arp rsp failed: %d", ret); goto out_sleep; } } break; case DISABLE_KEY: ret = wl1271_set_key(wl, wlvif, KEY_REMOVE, key_conf->keyidx, key_type, key_conf->keylen, key_conf->key, 0, 0, sta); if (ret < 0) { wl1271_error("Could not remove key"); goto out_sleep; } break; default: wl1271_error("Unsupported key cmd 0x%x", cmd); ret = -EOPNOTSUPP; break; } out_sleep: wl1271_ps_elp_sleep(wl); out_unlock: mutex_unlock(&wl->mutex); return ret; } static int wl1271_op_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_scan_request *req) { struct wl1271 *wl = hw->priv; int ret; u8 *ssid = NULL; size_t len = 0; wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan"); if (req->n_ssids) { ssid = req->ssids[0].ssid; len = req->ssids[0].ssid_len; } mutex_lock(&wl->mutex); if (wl->state == WL1271_STATE_OFF) { /* * We cannot return -EBUSY here because cfg80211 will expect * a call to ieee80211_scan_completed if we do - in this case * there won't be any call. */ ret = -EAGAIN; goto out; } ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; /* fail if there is any role in ROC */ if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) { /* don't allow scanning right now */ ret = -EBUSY; goto out_sleep; } ret = wl1271_scan(hw->priv, vif, ssid, len, req); out_sleep: wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); return ret; } static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct wl1271 *wl = hw->priv; int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan"); mutex_lock(&wl->mutex); if (wl->state == WL1271_STATE_OFF) goto out; if (wl->scan.state == WL1271_SCAN_STATE_IDLE) goto out; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; if (wl->scan.state != WL1271_SCAN_STATE_DONE) { ret = wl1271_scan_stop(wl); if (ret < 0) goto out_sleep; } /* * Rearm the tx watchdog just before idling scan. This * prevents just-finished scans from triggering the watchdog */ wl12xx_rearm_tx_watchdog_locked(wl); wl->scan.state = WL1271_SCAN_STATE_IDLE; memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); wl->scan_vif = NULL; wl->scan.req = NULL; ieee80211_scan_completed(wl->hw, true); out_sleep: wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); cancel_delayed_work_sync(&wl->scan_complete_work); } static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_sched_scan_ies *ies) { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret; wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start"); mutex_lock(&wl->mutex); if (wl->state == WL1271_STATE_OFF) { ret = -EAGAIN; goto out; } ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies); if (ret < 0) goto out_sleep; ret = wl1271_scan_sched_scan_start(wl, wlvif); if (ret < 0) goto out_sleep; wl->sched_scanning = true; out_sleep: wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); return ret; } static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct wl1271 *wl = hw->priv; int ret; wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop"); mutex_lock(&wl->mutex); if (wl->state == WL1271_STATE_OFF) goto out; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; wl1271_scan_sched_scan_stop(wl); wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); } static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) { struct wl1271 *wl = hw->priv; int ret = 0; mutex_lock(&wl->mutex); if (unlikely(wl->state == WL1271_STATE_OFF)) { ret = -EAGAIN; goto out; } ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; ret = wl1271_acx_frag_threshold(wl, value); if (ret < 0) wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret); wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); return ret; } static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif; int ret = 0; mutex_lock(&wl->mutex); if (unlikely(wl->state == WL1271_STATE_OFF)) { ret = -EAGAIN; goto out; } ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; wl12xx_for_each_wlvif(wl, wlvif) { ret = wl1271_acx_rts_threshold(wl, wlvif, value); if (ret < 0) wl1271_warning("set rts threshold failed: %d", ret); } wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); return ret; } static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb, int offset) { struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); u8 ssid_len; const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset, skb->len - offset); if (!ptr) { wl1271_error("No SSID in IEs!"); return -ENOENT; } ssid_len = ptr[1]; if (ssid_len > IEEE80211_MAX_SSID_LEN) { wl1271_error("SSID is too long!"); return -EINVAL; } wlvif->ssid_len = ssid_len; memcpy(wlvif->ssid, ptr+2, ssid_len); return 0; } static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset) { int len; const u8 *next, *end = skb->data + skb->len; u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset, skb->len - ieoffset); if (!ie) return; len = ie[1] + 2; next = ie + len; memmove(ie, next, end - next); skb_trim(skb, skb->len - len); } static void wl12xx_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, u8 oui_type, int ieoffset) { int len; const u8 *next, *end = skb->data + skb->len; u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, skb->data + ieoffset, skb->len - ieoffset); if (!ie) return; len = ie[1] + 2; next = ie + len; memmove(ie, next, end - next); skb_trim(skb, skb->len - len); } static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates, struct ieee80211_vif *vif) { struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct sk_buff *skb; int ret; skb = ieee80211_proberesp_get(wl->hw, vif); if (!skb) return -EOPNOTSUPP; ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_AP_PROBE_RESPONSE, skb->data, skb->len, 0, rates); dev_kfree_skb(skb); return ret; } static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl, struct ieee80211_vif *vif, u8 *probe_rsp_data, size_t probe_rsp_len, u32 rates) { struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE]; int ssid_ie_offset, ie_offset, templ_len; const u8 *ptr; /* no need to change probe response if the SSID is set correctly */ if (wlvif->ssid_len > 0) return wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_AP_PROBE_RESPONSE, probe_rsp_data, probe_rsp_len, 0, rates); if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) { wl1271_error("probe_rsp template too big"); return -EINVAL; } /* start searching from IE offset */ ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset, probe_rsp_len - ie_offset); if (!ptr) { wl1271_error("No SSID in beacon!"); return -EINVAL; } ssid_ie_offset = ptr - probe_rsp_data; ptr += (ptr[1] + 2); memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset); /* insert SSID from bss_conf */ probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID; probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len; memcpy(probe_rsp_templ + ssid_ie_offset + 2, bss_conf->ssid, bss_conf->ssid_len); templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len; memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len, ptr, probe_rsp_len - (ptr - probe_rsp_data)); templ_len += probe_rsp_len - (ptr - probe_rsp_data); return wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_AP_PROBE_RESPONSE, probe_rsp_templ, templ_len, 0, rates); } static int wl1271_bss_erp_info_changed(struct wl1271 *wl, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changed) { struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret = 0; if (changed & BSS_CHANGED_ERP_SLOT) { if (bss_conf->use_short_slot) ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT); else ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG); if (ret < 0) { wl1271_warning("Set slot time failed %d", ret); goto out; } } if (changed & BSS_CHANGED_ERP_PREAMBLE) { if (bss_conf->use_short_preamble) wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT); else wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG); } if (changed & BSS_CHANGED_ERP_CTS_PROT) { if (bss_conf->use_cts_prot) ret = wl1271_acx_cts_protect(wl, wlvif, CTSPROTECT_ENABLE); else ret = wl1271_acx_cts_protect(wl, wlvif, CTSPROTECT_DISABLE); if (ret < 0) { wl1271_warning("Set ctsprotect failed %d", ret); goto out; } } out: return ret; } static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changed) { struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); int ret = 0; if ((changed & BSS_CHANGED_BEACON_INT)) { wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d", bss_conf->beacon_int); wlvif->beacon_int = bss_conf->beacon_int; } if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) { u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); if (!wl1271_ap_set_probe_resp_tmpl(wl, rate, vif)) { wl1271_debug(DEBUG_AP, "probe response updated"); set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags); } } if ((changed & BSS_CHANGED_BEACON)) { struct ieee80211_hdr *hdr; u32 min_rate; int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable); struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif); u16 tmpl_id; if (!beacon) { ret = -EINVAL; goto out; } wl1271_debug(DEBUG_MASTER, "beacon updated"); ret = wl1271_ssid_set(vif, beacon, ieoffset); if (ret < 0) { dev_kfree_skb(beacon); goto out; } min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON : CMD_TEMPL_BEACON; ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id, beacon->data, beacon->len, 0, min_rate); if (ret < 0) { dev_kfree_skb(beacon); goto out; } /* * In case we already have a probe-resp beacon set explicitly * by usermode, don't use the beacon data. */ if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags)) goto end_bcn; /* remove TIM ie from probe response */ wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset); /* * remove p2p ie from probe response. * the fw reponds to probe requests that don't include * the p2p ie. probe requests with p2p ie will be passed, * and will be responded by the supplicant (the spec * forbids including the p2p ie when responding to probe * requests that didn't include it). */ wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, ieoffset); hdr = (struct ieee80211_hdr *) beacon->data; hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); if (is_ap) ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif, beacon->data, beacon->len, min_rate); else ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_PROBE_RESPONSE, beacon->data, beacon->len, 0, min_rate); end_bcn: dev_kfree_skb(beacon); if (ret < 0) goto out; } out: if (ret != 0) wl1271_error("beacon info change failed: %d", ret); return ret; } /* AP mode changes */ static void wl1271_bss_info_changed_ap(struct wl1271 *wl, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changed) { struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret = 0; if ((changed & BSS_CHANGED_BASIC_RATES)) { u32 rates = bss_conf->basic_rates; wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates, wlvif->band); wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); ret = wl1271_init_ap_rates(wl, wlvif); if (ret < 0) { wl1271_error("AP rate policy change failed %d", ret); goto out; } ret = wl1271_ap_init_templates(wl, vif); if (ret < 0) goto out; } ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed); if (ret < 0) goto out; if ((changed & BSS_CHANGED_BEACON_ENABLED)) { if (bss_conf->enable_beacon) { if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { ret = wl12xx_cmd_role_start_ap(wl, wlvif); if (ret < 0) goto out; ret = wl1271_ap_init_hwenc(wl, wlvif); if (ret < 0) goto out; set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags); wl1271_debug(DEBUG_AP, "started AP"); } } else { if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { ret = wl12xx_cmd_role_stop_ap(wl, wlvif); if (ret < 0) goto out; clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags); clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags); wl1271_debug(DEBUG_AP, "stopped AP"); } } } ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed); if (ret < 0) goto out; /* Handle HT information change */ if ((changed & BSS_CHANGED_HT) && (bss_conf->channel_type != NL80211_CHAN_NO_HT)) { ret = wl1271_acx_set_ht_information(wl, wlvif, bss_conf->ht_operation_mode); if (ret < 0) { wl1271_warning("Set ht information failed %d", ret); goto out; } } out: return; } /* STA/IBSS mode changes */ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changed) { struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); bool do_join = false, set_assoc = false; bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS); bool ibss_joined = false; u32 sta_rate_set = 0; int ret; struct ieee80211_sta *sta; bool sta_exists = false; struct ieee80211_sta_ht_cap sta_ht_cap; if (is_ibss) { ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed); if (ret < 0) goto out; } if (changed & BSS_CHANGED_IBSS) { if (bss_conf->ibss_joined) { set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags); ibss_joined = true; } else { if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) wl1271_unjoin(wl, wlvif); } } if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined) do_join = true; /* Need to update the SSID (for filtering etc) */ if ((changed & BSS_CHANGED_BEACON) && ibss_joined) do_join = true; if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) { wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s", bss_conf->enable_beacon ? "enabled" : "disabled"); do_join = true; } if (changed & BSS_CHANGED_IDLE && !is_ibss) { ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle); if (ret < 0) wl1271_warning("idle mode change failed %d", ret); } if ((changed & BSS_CHANGED_CQM)) { bool enable = false; if (bss_conf->cqm_rssi_thold) enable = true; ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable, bss_conf->cqm_rssi_thold, bss_conf->cqm_rssi_hyst); if (ret < 0) goto out; wlvif->rssi_thold = bss_conf->cqm_rssi_thold; } if (changed & BSS_CHANGED_BSSID && (is_ibss || bss_conf->assoc)) if (!is_zero_ether_addr(bss_conf->bssid)) { ret = wl12xx_cmd_build_null_data(wl, wlvif); if (ret < 0) goto out; ret = wl1271_build_qos_null_data(wl, vif); if (ret < 0) goto out; /* Need to update the BSSID (for filtering etc) */ do_join = true; } if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) { rcu_read_lock(); sta = ieee80211_find_sta(vif, bss_conf->bssid); if (!sta) goto sta_not_found; /* save the supp_rates of the ap */ sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band]; if (sta->ht_cap.ht_supported) sta_rate_set |= (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET); sta_ht_cap = sta->ht_cap; sta_exists = true; sta_not_found: rcu_read_unlock(); } if ((changed & BSS_CHANGED_ASSOC)) { if (bss_conf->assoc) { u32 rates; int ieoffset; wlvif->aid = bss_conf->aid; wlvif->beacon_int = bss_conf->beacon_int; set_assoc = true; /* * use basic rates from AP, and determine lowest rate * to use with control frames. */ rates = bss_conf->basic_rates; wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates, wlvif->band); wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); if (sta_rate_set) wlvif->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rate_set, wlvif->band); ret = wl1271_acx_sta_rate_policies(wl, wlvif); if (ret < 0) goto out; /* * with wl1271, we don't need to update the * beacon_int and dtim_period, because the firmware * updates it by itself when the first beacon is * received after a join. */ ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid); if (ret < 0) goto out; /* * Get a template for hardware connection maintenance */ dev_kfree_skb(wlvif->probereq); wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl, wlvif, NULL); ieoffset = offsetof(struct ieee80211_mgmt, u.probe_req.variable); wl1271_ssid_set(vif, wlvif->probereq, ieoffset); /* enable the connection monitoring feature */ ret = wl1271_acx_conn_monit_params(wl, wlvif, true); if (ret < 0) goto out; } else { /* use defaults when not associated */ bool was_assoc = !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags); bool was_ifup = !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags); wlvif->aid = 0; /* free probe-request template */ dev_kfree_skb(wlvif->probereq); wlvif->probereq = NULL; /* revert back to minimum rates for the current band */ wl1271_set_band_rate(wl, wlvif); wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); ret = wl1271_acx_sta_rate_policies(wl, wlvif); if (ret < 0) goto out; /* disable connection monitor features */ ret = wl1271_acx_conn_monit_params(wl, wlvif, false); /* Disable the keep-alive feature */ ret = wl1271_acx_keep_alive_mode(wl, wlvif, false); if (ret < 0) goto out; /* restore the bssid filter and go to dummy bssid */ if (was_assoc) { /* * we might have to disable roc, if there was * no IF_OPER_UP notification. */ if (!was_ifup) { ret = wl12xx_croc(wl, wlvif->role_id); if (ret < 0) goto out; } /* * (we also need to disable roc in case of * roaming on the same channel. until we will * have a better flow...) */ if (test_bit(wlvif->dev_role_id, wl->roc_map)) { ret = wl12xx_croc(wl, wlvif->dev_role_id); if (ret < 0) goto out; } wl1271_unjoin(wl, wlvif); if (!bss_conf->idle) wl12xx_start_dev(wl, wlvif); } } } if (changed & BSS_CHANGED_IBSS) { wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d", bss_conf->ibss_joined); if (bss_conf->ibss_joined) { u32 rates = bss_conf->basic_rates; wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates, wlvif->band); wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); /* by default, use 11b + OFDM rates */ wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES; ret = wl1271_acx_sta_rate_policies(wl, wlvif); if (ret < 0) goto out; } } ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed); if (ret < 0) goto out; if (do_join) { ret = wl1271_join(wl, wlvif, set_assoc); if (ret < 0) { wl1271_warning("cmd join failed %d", ret); goto out; } /* ROC until connected (after EAPOL exchange) */ if (!is_ibss) { ret = wl12xx_roc(wl, wlvif, wlvif->role_id); if (ret < 0) goto out; if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags)) wl12xx_set_authorized(wl, wlvif); } /* * stop device role if started (we might already be in * STA/IBSS role). */ if (wl12xx_dev_role_started(wlvif)) { ret = wl12xx_stop_dev(wl, wlvif); if (ret < 0) goto out; } } /* Handle new association with HT. Do this after join. */ if (sta_exists) { if ((changed & BSS_CHANGED_HT) && (bss_conf->channel_type != NL80211_CHAN_NO_HT)) { ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap, true, wlvif->sta.hlid); if (ret < 0) { wl1271_warning("Set ht cap true failed %d", ret); goto out; } } /* handle new association without HT and disassociation */ else if (changed & BSS_CHANGED_ASSOC) { ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap, false, wlvif->sta.hlid); if (ret < 0) { wl1271_warning("Set ht cap false failed %d", ret); goto out; } } } /* Handle HT information change. Done after join. */ if ((changed & BSS_CHANGED_HT) && (bss_conf->channel_type != NL80211_CHAN_NO_HT)) { ret = wl1271_acx_set_ht_information(wl, wlvif, bss_conf->ht_operation_mode); if (ret < 0) { wl1271_warning("Set ht information failed %d", ret); goto out; } } /* Handle arp filtering. Done after join. */ if ((changed & BSS_CHANGED_ARP_FILTER) || (!is_ibss && (changed & BSS_CHANGED_QOS))) { __be32 addr = bss_conf->arp_addr_list[0]; wlvif->sta.qos = bss_conf->qos; WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS); if (bss_conf->arp_addr_cnt == 1 && bss_conf->arp_filter_enabled) { wlvif->ip_addr = addr; /* * The template should have been configured only upon * association. however, it seems that the correct ip * isn't being set (when sending), so we have to * reconfigure the template upon every ip change. */ ret = wl1271_cmd_build_arp_rsp(wl, wlvif); if (ret < 0) { wl1271_warning("build arp rsp failed: %d", ret); goto out; } ret = wl1271_acx_arp_ip_filter(wl, wlvif, (ACX_ARP_FILTER_ARP_FILTERING | ACX_ARP_FILTER_AUTO_ARP), addr); } else { wlvif->ip_addr = 0; ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr); } if (ret < 0) goto out; } out: return; } static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changed) { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x", (int)changed); mutex_lock(&wl->mutex); if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) goto out; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; if (is_ap) wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed); else wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed); wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); } static int wl1271_op_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, const struct ieee80211_tx_queue_params *params) { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); u8 ps_scheme; int ret = 0; mutex_lock(&wl->mutex); wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue); if (params->uapsd) ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER; else ps_scheme = CONF_PS_SCHEME_LEGACY; if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) goto out; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; /* * the txop is confed in units of 32us by the mac80211, * we need us */ ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue), params->cw_min, params->cw_max, params->aifs, params->txop << 5); if (ret < 0) goto out_sleep; ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue), CONF_CHANNEL_TYPE_EDCF, wl1271_tx_get_queue(queue), ps_scheme, CONF_ACK_POLICY_LEGACY, 0, 0); out_sleep: wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); return ret; } static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); u64 mactime = ULLONG_MAX; int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf"); mutex_lock(&wl->mutex); if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime); if (ret < 0) goto out_sleep; out_sleep: wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); return mactime; } static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct wl1271 *wl = hw->priv; struct ieee80211_conf *conf = &hw->conf; if (idx != 0) return -ENOENT; survey->channel = conf->channel; survey->filled = SURVEY_INFO_NOISE_DBM; survey->noise = wl->noise; return 0; } static int wl1271_allocate_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct ieee80211_sta *sta) { struct wl1271_station *wl_sta; int ret; if (wl->active_sta_count >= AP_MAX_STATIONS) { wl1271_warning("could not allocate HLID - too much stations"); return -EBUSY; } wl_sta = (struct wl1271_station *)sta->drv_priv; ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid); if (ret < 0) { wl1271_warning("could not allocate HLID - too many links"); return -EBUSY; } set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map); memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN); wl->active_sta_count++; return 0; } void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid) { if (!test_bit(hlid, wlvif->ap.sta_hlid_map)) return; clear_bit(hlid, wlvif->ap.sta_hlid_map); memset(wl->links[hlid].addr, 0, ETH_ALEN); wl->links[hlid].ba_bitmap = 0; __clear_bit(hlid, &wl->ap_ps_map); __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); wl12xx_free_link(wl, wlvif, &hlid); wl->active_sta_count--; /* * rearm the tx watchdog when the last STA is freed - give the FW a * chance to return STA-buffered packets before complaining. */ if (wl->active_sta_count == 0) wl12xx_rearm_tx_watchdog_locked(wl); } static int wl12xx_sta_add(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct ieee80211_sta *sta) { struct wl1271_station *wl_sta; int ret = 0; u8 hlid; wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid); ret = wl1271_allocate_sta(wl, wlvif, sta); if (ret < 0) return ret; wl_sta = (struct wl1271_station *)sta->drv_priv; hlid = wl_sta->hlid; ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid); if (ret < 0) wl1271_free_sta(wl, wlvif, hlid); return ret; } static int wl12xx_sta_remove(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct ieee80211_sta *sta) { struct wl1271_station *wl_sta; int ret = 0, id; wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid); wl_sta = (struct wl1271_station *)sta->drv_priv; id = wl_sta->hlid; if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map))) return -EINVAL; ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid); if (ret < 0) return ret; wl1271_free_sta(wl, wlvif, wl_sta->hlid); return ret; } static int wl12xx_update_sta_state(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state) { struct wl1271_station *wl_sta; u8 hlid; bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS; bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS; int ret; wl_sta = (struct wl1271_station *)sta->drv_priv; hlid = wl_sta->hlid; /* Add station (AP mode) */ if (is_ap && old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) return wl12xx_sta_add(wl, wlvif, sta); /* Remove station (AP mode) */ if (is_ap && old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) { /* must not fail */ wl12xx_sta_remove(wl, wlvif, sta); return 0; } /* Authorize station (AP mode) */ if (is_ap && new_state == IEEE80211_STA_AUTHORIZED) { ret = wl12xx_cmd_set_peer_state(wl, hlid); if (ret < 0) return ret; ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true, hlid); return ret; } /* Authorize station */ if (is_sta && new_state == IEEE80211_STA_AUTHORIZED) { set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags); return wl12xx_set_authorized(wl, wlvif); } if (is_sta && old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags); return 0; } return 0; } static int wl12xx_op_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state) { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d", sta->aid, old_state, new_state); mutex_lock(&wl->mutex); if (unlikely(wl->state == WL1271_STATE_OFF)) { ret = -EBUSY; goto out; } ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state); wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); if (new_state < old_state) return 0; return ret; } static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size) { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret; u8 hlid, *ba_bitmap; wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action, tid); /* sanity check - the fields in FW are only 8bits wide */ if (WARN_ON(tid > 0xFF)) return -ENOTSUPP; mutex_lock(&wl->mutex); if (unlikely(wl->state == WL1271_STATE_OFF)) { ret = -EAGAIN; goto out; } if (wlvif->bss_type == BSS_TYPE_STA_BSS) { hlid = wlvif->sta.hlid; ba_bitmap = &wlvif->sta.ba_rx_bitmap; } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) { struct wl1271_station *wl_sta; wl_sta = (struct wl1271_station *)sta->drv_priv; hlid = wl_sta->hlid; ba_bitmap = &wl->links[hlid].ba_bitmap; } else { ret = -EINVAL; goto out; } ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d", tid, action); switch (action) { case IEEE80211_AMPDU_RX_START: if (!wlvif->ba_support || !wlvif->ba_allowed) { ret = -ENOTSUPP; break; } if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) { ret = -EBUSY; wl1271_error("exceeded max RX BA sessions"); break; } if (*ba_bitmap & BIT(tid)) { ret = -EINVAL; wl1271_error("cannot enable RX BA session on active " "tid: %d", tid); break; } ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true, hlid); if (!ret) { *ba_bitmap |= BIT(tid); wl->ba_rx_session_count++; } break; case IEEE80211_AMPDU_RX_STOP: if (!(*ba_bitmap & BIT(tid))) { ret = -EINVAL; wl1271_error("no active RX BA session on tid: %d", tid); break; } ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false, hlid); if (!ret) { *ba_bitmap &= ~BIT(tid); wl->ba_rx_session_count--; } break; /* * The BA initiator session management in FW independently. * Falling break here on purpose for all TX APDU commands. */ case IEEE80211_AMPDU_TX_START: case IEEE80211_AMPDU_TX_STOP: case IEEE80211_AMPDU_TX_OPERATIONAL: ret = -EINVAL; break; default: wl1271_error("Incorrect ampdu action id=%x\n", action); ret = -EINVAL; } wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); return ret; } static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) { struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct wl1271 *wl = hw->priv; int i, ret = 0; wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x", mask->control[NL80211_BAND_2GHZ].legacy, mask->control[NL80211_BAND_5GHZ].legacy); mutex_lock(&wl->mutex); for (i = 0; i < IEEE80211_NUM_BANDS; i++) wlvif->bitrate_masks[i] = wl1271_tx_enabled_rates_get(wl, mask->control[i].legacy, i); if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; if (wlvif->bss_type == BSS_TYPE_STA_BSS && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; wl1271_set_band_rate(wl, wlvif); wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); ret = wl1271_acx_sta_rate_policies(wl, wlvif); wl1271_ps_elp_sleep(wl); } out: mutex_unlock(&wl->mutex); return ret; } static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, struct ieee80211_channel_switch *ch_switch) { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif; int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch"); wl1271_tx_flush(wl); mutex_lock(&wl->mutex); if (unlikely(wl->state == WL1271_STATE_OFF)) { wl12xx_for_each_wlvif_sta(wl, wlvif) { struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); ieee80211_chswitch_done(vif, false); } goto out; } ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; /* TODO: change mac80211 to pass vif as param */ wl12xx_for_each_wlvif_sta(wl, wlvif) { ret = wl12xx_cmd_channel_switch(wl, wlvif, ch_switch); if (!ret) set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags); } wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); } static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw) { struct wl1271 *wl = hw->priv; bool ret = false; mutex_lock(&wl->mutex); if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; /* packets are considered pending if in the TX queue or the FW */ ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0); out: mutex_unlock(&wl->mutex); return ret; } /* can't be const, mac80211 writes to this */ static struct ieee80211_rate wl1271_rates[] = { { .bitrate = 10, .hw_value = CONF_HW_BIT_RATE_1MBPS, .hw_value_short = CONF_HW_BIT_RATE_1MBPS, }, { .bitrate = 20, .hw_value = CONF_HW_BIT_RATE_2MBPS, .hw_value_short = CONF_HW_BIT_RATE_2MBPS, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 55, .hw_value = CONF_HW_BIT_RATE_5_5MBPS, .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 110, .hw_value = CONF_HW_BIT_RATE_11MBPS, .hw_value_short = CONF_HW_BIT_RATE_11MBPS, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 60, .hw_value = CONF_HW_BIT_RATE_6MBPS, .hw_value_short = CONF_HW_BIT_RATE_6MBPS, }, { .bitrate = 90, .hw_value = CONF_HW_BIT_RATE_9MBPS, .hw_value_short = CONF_HW_BIT_RATE_9MBPS, }, { .bitrate = 120, .hw_value = CONF_HW_BIT_RATE_12MBPS, .hw_value_short = CONF_HW_BIT_RATE_12MBPS, }, { .bitrate = 180, .hw_value = CONF_HW_BIT_RATE_18MBPS, .hw_value_short = CONF_HW_BIT_RATE_18MBPS, }, { .bitrate = 240, .hw_value = CONF_HW_BIT_RATE_24MBPS, .hw_value_short = CONF_HW_BIT_RATE_24MBPS, }, { .bitrate = 360, .hw_value = CONF_HW_BIT_RATE_36MBPS, .hw_value_short = CONF_HW_BIT_RATE_36MBPS, }, { .bitrate = 480, .hw_value = CONF_HW_BIT_RATE_48MBPS, .hw_value_short = CONF_HW_BIT_RATE_48MBPS, }, { .bitrate = 540, .hw_value = CONF_HW_BIT_RATE_54MBPS, .hw_value_short = CONF_HW_BIT_RATE_54MBPS, }, }; /* can't be const, mac80211 writes to this */ static struct ieee80211_channel wl1271_channels[] = { { .hw_value = 1, .center_freq = 2412, .max_power = 25 }, { .hw_value = 2, .center_freq = 2417, .max_power = 25 }, { .hw_value = 3, .center_freq = 2422, .max_power = 25 }, { .hw_value = 4, .center_freq = 2427, .max_power = 25 }, { .hw_value = 5, .center_freq = 2432, .max_power = 25 }, { .hw_value = 6, .center_freq = 2437, .max_power = 25 }, { .hw_value = 7, .center_freq = 2442, .max_power = 25 }, { .hw_value = 8, .center_freq = 2447, .max_power = 25 }, { .hw_value = 9, .center_freq = 2452, .max_power = 25 }, { .hw_value = 10, .center_freq = 2457, .max_power = 25 }, { .hw_value = 11, .center_freq = 2462, .max_power = 25 }, { .hw_value = 12, .center_freq = 2467, .max_power = 25 }, { .hw_value = 13, .center_freq = 2472, .max_power = 25 }, { .hw_value = 14, .center_freq = 2484, .max_power = 25 }, }; /* mapping to indexes for wl1271_rates */ static const u8 wl1271_rate_to_idx_2ghz[] = { /* MCS rates are used only with 11n */ 7, /* CONF_HW_RXTX_RATE_MCS7_SGI */ 7, /* CONF_HW_RXTX_RATE_MCS7 */ 6, /* CONF_HW_RXTX_RATE_MCS6 */ 5, /* CONF_HW_RXTX_RATE_MCS5 */ 4, /* CONF_HW_RXTX_RATE_MCS4 */ 3, /* CONF_HW_RXTX_RATE_MCS3 */ 2, /* CONF_HW_RXTX_RATE_MCS2 */ 1, /* CONF_HW_RXTX_RATE_MCS1 */ 0, /* CONF_HW_RXTX_RATE_MCS0 */ 11, /* CONF_HW_RXTX_RATE_54 */ 10, /* CONF_HW_RXTX_RATE_48 */ 9, /* CONF_HW_RXTX_RATE_36 */ 8, /* CONF_HW_RXTX_RATE_24 */ /* TI-specific rate */ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */ 7, /* CONF_HW_RXTX_RATE_18 */ 6, /* CONF_HW_RXTX_RATE_12 */ 3, /* CONF_HW_RXTX_RATE_11 */ 5, /* CONF_HW_RXTX_RATE_9 */ 4, /* CONF_HW_RXTX_RATE_6 */ 2, /* CONF_HW_RXTX_RATE_5_5 */ 1, /* CONF_HW_RXTX_RATE_2 */ 0 /* CONF_HW_RXTX_RATE_1 */ }; /* 11n STA capabilities */ #define HW_RX_HIGHEST_RATE 72 #define WL12XX_HT_CAP { \ .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | \ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT), \ .ht_supported = true, \ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, \ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \ .mcs = { \ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, \ .rx_highest = cpu_to_le16(HW_RX_HIGHEST_RATE), \ .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \ }, \ } /* can't be const, mac80211 writes to this */ static struct ieee80211_supported_band wl1271_band_2ghz = { .channels = wl1271_channels, .n_channels = ARRAY_SIZE(wl1271_channels), .bitrates = wl1271_rates, .n_bitrates = ARRAY_SIZE(wl1271_rates), .ht_cap = WL12XX_HT_CAP, }; /* 5 GHz data rates for WL1273 */ static struct ieee80211_rate wl1271_rates_5ghz[] = { { .bitrate = 60, .hw_value = CONF_HW_BIT_RATE_6MBPS, .hw_value_short = CONF_HW_BIT_RATE_6MBPS, }, { .bitrate = 90, .hw_value = CONF_HW_BIT_RATE_9MBPS, .hw_value_short = CONF_HW_BIT_RATE_9MBPS, }, { .bitrate = 120, .hw_value = CONF_HW_BIT_RATE_12MBPS, .hw_value_short = CONF_HW_BIT_RATE_12MBPS, }, { .bitrate = 180, .hw_value = CONF_HW_BIT_RATE_18MBPS, .hw_value_short = CONF_HW_BIT_RATE_18MBPS, }, { .bitrate = 240, .hw_value = CONF_HW_BIT_RATE_24MBPS, .hw_value_short = CONF_HW_BIT_RATE_24MBPS, }, { .bitrate = 360, .hw_value = CONF_HW_BIT_RATE_36MBPS, .hw_value_short = CONF_HW_BIT_RATE_36MBPS, }, { .bitrate = 480, .hw_value = CONF_HW_BIT_RATE_48MBPS, .hw_value_short = CONF_HW_BIT_RATE_48MBPS, }, { .bitrate = 540, .hw_value = CONF_HW_BIT_RATE_54MBPS, .hw_value_short = CONF_HW_BIT_RATE_54MBPS, }, }; /* 5 GHz band channels for WL1273 */ static struct ieee80211_channel wl1271_channels_5ghz[] = { { .hw_value = 7, .center_freq = 5035, .max_power = 25 }, { .hw_value = 8, .center_freq = 5040, .max_power = 25 }, { .hw_value = 9, .center_freq = 5045, .max_power = 25 }, { .hw_value = 11, .center_freq = 5055, .max_power = 25 }, { .hw_value = 12, .center_freq = 5060, .max_power = 25 }, { .hw_value = 16, .center_freq = 5080, .max_power = 25 }, { .hw_value = 34, .center_freq = 5170, .max_power = 25 }, { .hw_value = 36, .center_freq = 5180, .max_power = 25 }, { .hw_value = 38, .center_freq = 5190, .max_power = 25 }, { .hw_value = 40, .center_freq = 5200, .max_power = 25 }, { .hw_value = 42, .center_freq = 5210, .max_power = 25 }, { .hw_value = 44, .center_freq = 5220, .max_power = 25 }, { .hw_value = 46, .center_freq = 5230, .max_power = 25 }, { .hw_value = 48, .center_freq = 5240, .max_power = 25 }, { .hw_value = 52, .center_freq = 5260, .max_power = 25 }, { .hw_value = 56, .center_freq = 5280, .max_power = 25 }, { .hw_value = 60, .center_freq = 5300, .max_power = 25 }, { .hw_value = 64, .center_freq = 5320, .max_power = 25 }, { .hw_value = 100, .center_freq = 5500, .max_power = 25 }, { .hw_value = 104, .center_freq = 5520, .max_power = 25 }, { .hw_value = 108, .center_freq = 5540, .max_power = 25 }, { .hw_value = 112, .center_freq = 5560, .max_power = 25 }, { .hw_value = 116, .center_freq = 5580, .max_power = 25 }, { .hw_value = 120, .center_freq = 5600, .max_power = 25 }, { .hw_value = 124, .center_freq = 5620, .max_power = 25 }, { .hw_value = 128, .center_freq = 5640, .max_power = 25 }, { .hw_value = 132, .center_freq = 5660, .max_power = 25 }, { .hw_value = 136, .center_freq = 5680, .max_power = 25 }, { .hw_value = 140, .center_freq = 5700, .max_power = 25 }, { .hw_value = 149, .center_freq = 5745, .max_power = 25 }, { .hw_value = 153, .center_freq = 5765, .max_power = 25 }, { .hw_value = 157, .center_freq = 5785, .max_power = 25 }, { .hw_value = 161, .center_freq = 5805, .max_power = 25 }, { .hw_value = 165, .center_freq = 5825, .max_power = 25 }, }; /* mapping to indexes for wl1271_rates_5ghz */ static const u8 wl1271_rate_to_idx_5ghz[] = { /* MCS rates are used only with 11n */ 7, /* CONF_HW_RXTX_RATE_MCS7_SGI */ 7, /* CONF_HW_RXTX_RATE_MCS7 */ 6, /* CONF_HW_RXTX_RATE_MCS6 */ 5, /* CONF_HW_RXTX_RATE_MCS5 */ 4, /* CONF_HW_RXTX_RATE_MCS4 */ 3, /* CONF_HW_RXTX_RATE_MCS3 */ 2, /* CONF_HW_RXTX_RATE_MCS2 */ 1, /* CONF_HW_RXTX_RATE_MCS1 */ 0, /* CONF_HW_RXTX_RATE_MCS0 */ 7, /* CONF_HW_RXTX_RATE_54 */ 6, /* CONF_HW_RXTX_RATE_48 */ 5, /* CONF_HW_RXTX_RATE_36 */ 4, /* CONF_HW_RXTX_RATE_24 */ /* TI-specific rate */ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */ 3, /* CONF_HW_RXTX_RATE_18 */ 2, /* CONF_HW_RXTX_RATE_12 */ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_11 */ 1, /* CONF_HW_RXTX_RATE_9 */ 0, /* CONF_HW_RXTX_RATE_6 */ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_5_5 */ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_2 */ CONF_HW_RXTX_RATE_UNSUPPORTED /* CONF_HW_RXTX_RATE_1 */ }; static struct ieee80211_supported_band wl1271_band_5ghz = { .channels = wl1271_channels_5ghz, .n_channels = ARRAY_SIZE(wl1271_channels_5ghz), .bitrates = wl1271_rates_5ghz, .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz), .ht_cap = WL12XX_HT_CAP, }; static const u8 *wl1271_band_rate_to_idx[] = { [IEEE80211_BAND_2GHZ] = wl1271_rate_to_idx_2ghz, [IEEE80211_BAND_5GHZ] = wl1271_rate_to_idx_5ghz }; static const struct ieee80211_ops wl1271_ops = { .start = wl1271_op_start, .stop = wl1271_op_stop, .add_interface = wl1271_op_add_interface, .remove_interface = wl1271_op_remove_interface, .change_interface = wl12xx_op_change_interface, #ifdef CONFIG_PM .suspend = wl1271_op_suspend, .resume = wl1271_op_resume, #endif .config = wl1271_op_config, .prepare_multicast = wl1271_op_prepare_multicast, .configure_filter = wl1271_op_configure_filter, .tx = wl1271_op_tx, .set_key = wl1271_op_set_key, .hw_scan = wl1271_op_hw_scan, .cancel_hw_scan = wl1271_op_cancel_hw_scan, .sched_scan_start = wl1271_op_sched_scan_start, .sched_scan_stop = wl1271_op_sched_scan_stop, .bss_info_changed = wl1271_op_bss_info_changed, .set_frag_threshold = wl1271_op_set_frag_threshold, .set_rts_threshold = wl1271_op_set_rts_threshold, .conf_tx = wl1271_op_conf_tx, .get_tsf = wl1271_op_get_tsf, .get_survey = wl1271_op_get_survey, .sta_state = wl12xx_op_sta_state, .ampdu_action = wl1271_op_ampdu_action, .tx_frames_pending = wl1271_tx_frames_pending, .set_bitrate_mask = wl12xx_set_bitrate_mask, .channel_switch = wl12xx_op_channel_switch, CFG80211_TESTMODE_CMD(wl1271_tm_cmd) }; u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band) { u8 idx; BUG_ON(band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *)); if (unlikely(rate >= CONF_HW_RXTX_RATE_MAX)) { wl1271_error("Illegal RX rate from HW: %d", rate); return 0; } idx = wl1271_band_rate_to_idx[band][rate]; if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) { wl1271_error("Unsupported RX rate from HW: %d", rate); return 0; } return idx; } static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev, struct device_attribute *attr, char *buf) { struct wl1271 *wl = dev_get_drvdata(dev); ssize_t len; len = PAGE_SIZE; mutex_lock(&wl->mutex); len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n", wl->sg_enabled); mutex_unlock(&wl->mutex); return len; } static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct wl1271 *wl = dev_get_drvdata(dev); unsigned long res; int ret; ret = kstrtoul(buf, 10, &res); if (ret < 0) { wl1271_warning("incorrect value written to bt_coex_mode"); return count; } mutex_lock(&wl->mutex); res = !!res; if (res == wl->sg_enabled) goto out; wl->sg_enabled = res; if (wl->state == WL1271_STATE_OFF) goto out; ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; wl1271_acx_sg_enable(wl, wl->sg_enabled); wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); return count; } static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR, wl1271_sysfs_show_bt_coex_state, wl1271_sysfs_store_bt_coex_state); static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev, struct device_attribute *attr, char *buf) { struct wl1271 *wl = dev_get_drvdata(dev); ssize_t len; len = PAGE_SIZE; mutex_lock(&wl->mutex); if (wl->hw_pg_ver >= 0) len = snprintf(buf, len, "%d\n", wl->hw_pg_ver); else len = snprintf(buf, len, "n/a\n"); mutex_unlock(&wl->mutex); return len; } static DEVICE_ATTR(hw_pg_ver, S_IRUGO, wl1271_sysfs_show_hw_pg_ver, NULL); static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); struct wl1271 *wl = dev_get_drvdata(dev); ssize_t len; int ret; ret = mutex_lock_interruptible(&wl->mutex); if (ret < 0) return -ERESTARTSYS; /* Let only one thread read the log at a time, blocking others */ while (wl->fwlog_size == 0) { DEFINE_WAIT(wait); prepare_to_wait_exclusive(&wl->fwlog_waitq, &wait, TASK_INTERRUPTIBLE); if (wl->fwlog_size != 0) { finish_wait(&wl->fwlog_waitq, &wait); break; } mutex_unlock(&wl->mutex); schedule(); finish_wait(&wl->fwlog_waitq, &wait); if (signal_pending(current)) return -ERESTARTSYS; ret = mutex_lock_interruptible(&wl->mutex); if (ret < 0) return -ERESTARTSYS; } /* Check if the fwlog is still valid */ if (wl->fwlog_size < 0) { mutex_unlock(&wl->mutex); return 0; } /* Seeking is not supported - old logs are not kept. Disregard pos. */ len = min(count, (size_t)wl->fwlog_size); wl->fwlog_size -= len; memcpy(buffer, wl->fwlog, len); /* Make room for new messages */ memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size); mutex_unlock(&wl->mutex); return len; } static struct bin_attribute fwlog_attr = { .attr = {.name = "fwlog", .mode = S_IRUSR}, .read = wl1271_sysfs_read_fwlog, }; static bool wl12xx_mac_in_fuse(struct wl1271 *wl) { bool supported = false; u8 major, minor; if (wl->chip.id == CHIP_ID_1283_PG20) { major = WL128X_PG_GET_MAJOR(wl->hw_pg_ver); minor = WL128X_PG_GET_MINOR(wl->hw_pg_ver); /* in wl128x we have the MAC address if the PG is >= (2, 1) */ if (major > 2 || (major == 2 && minor >= 1)) supported = true; } else { major = WL127X_PG_GET_MAJOR(wl->hw_pg_ver); minor = WL127X_PG_GET_MINOR(wl->hw_pg_ver); /* in wl127x we have the MAC address if the PG is >= (3, 1) */ if (major == 3 && minor >= 1) supported = true; } wl1271_debug(DEBUG_PROBE, "PG Ver major = %d minor = %d, MAC %s present", major, minor, supported ? "is" : "is not"); return supported; } static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic, int n) { int i; wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x, n %d", oui, nic, n); if (nic + n - 1 > 0xffffff) wl1271_warning("NIC part of the MAC address wraps around!"); for (i = 0; i < n; i++) { wl->addresses[i].addr[0] = (u8)(oui >> 16); wl->addresses[i].addr[1] = (u8)(oui >> 8); wl->addresses[i].addr[2] = (u8) oui; wl->addresses[i].addr[3] = (u8)(nic >> 16); wl->addresses[i].addr[4] = (u8)(nic >> 8); wl->addresses[i].addr[5] = (u8) nic; nic++; } wl->hw->wiphy->n_addresses = n; wl->hw->wiphy->addresses = wl->addresses; } static void wl12xx_get_fuse_mac(struct wl1271 *wl) { u32 mac1, mac2; wl1271_set_partition(wl, &wl12xx_part_table[PART_DRPW]); mac1 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1); mac2 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2); /* these are the two parts of the BD_ADDR */ wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) + ((mac1 & 0xff000000) >> 24); wl->fuse_nic_addr = mac1 & 0xffffff; wl1271_set_partition(wl, &wl12xx_part_table[PART_DOWN]); } static int wl12xx_get_hw_info(struct wl1271 *wl) { int ret; u32 die_info; ret = wl12xx_set_power_on(wl); if (ret < 0) goto out; wl->chip.id = wl1271_read32(wl, CHIP_ID_B); if (wl->chip.id == CHIP_ID_1283_PG20) die_info = wl1271_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1); else die_info = wl1271_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1); wl->hw_pg_ver = (s8) (die_info & PG_VER_MASK) >> PG_VER_OFFSET; if (!wl12xx_mac_in_fuse(wl)) { wl->fuse_oui_addr = 0; wl->fuse_nic_addr = 0; } else { wl12xx_get_fuse_mac(wl); } wl1271_power_off(wl); out: return ret; } static int wl1271_register_hw(struct wl1271 *wl) { int ret; u32 oui_addr = 0, nic_addr = 0; if (wl->mac80211_registered) return 0; ret = wl12xx_get_hw_info(wl); if (ret < 0) { wl1271_error("couldn't get hw info"); goto out; } ret = wl1271_fetch_nvs(wl); if (ret == 0) { /* NOTE: The wl->nvs->nvs element must be first, in * order to simplify the casting, we assume it is at * the beginning of the wl->nvs structure. */ u8 *nvs_ptr = (u8 *)wl->nvs; oui_addr = (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6]; nic_addr = (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3]; } /* if the MAC address is zeroed in the NVS derive from fuse */ if (oui_addr == 0 && nic_addr == 0) { oui_addr = wl->fuse_oui_addr; /* fuse has the BD_ADDR, the WLAN addresses are the next two */ nic_addr = wl->fuse_nic_addr + 1; } wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr, 2); ret = ieee80211_register_hw(wl->hw); if (ret < 0) { wl1271_error("unable to register mac80211 hw: %d", ret); goto out; } wl->mac80211_registered = true; wl1271_debugfs_init(wl); wl1271_notice("loaded"); out: return ret; } static void wl1271_unregister_hw(struct wl1271 *wl) { if (wl->plt) wl1271_plt_stop(wl); ieee80211_unregister_hw(wl->hw); wl->mac80211_registered = false; } static int wl1271_init_ieee80211(struct wl1271 *wl) { static const u32 cipher_suites[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, WL1271_CIPHER_SUITE_GEM, }; /* The tx descriptor buffer and the TKIP space. */ wl->hw->extra_tx_headroom = WL1271_EXTRA_SPACE_TKIP + sizeof(struct wl1271_tx_hw_descr); /* unit us */ /* FIXME: find a proper value */ wl->hw->channel_change_time = 10000; wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval; wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS | IEEE80211_HW_SUPPORTS_UAPSD | IEEE80211_HW_HAS_RATE_CONTROL | IEEE80211_HW_CONNECTION_MONITOR | IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SPECTRUM_MGMT | IEEE80211_HW_AP_LINK_PS | IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_TX_AMPDU_SETUP_IN_HW | IEEE80211_HW_SCAN_WHILE_IDLE; wl->hw->wiphy->cipher_suites = cipher_suites; wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO); wl->hw->wiphy->max_scan_ssids = 1; wl->hw->wiphy->max_sched_scan_ssids = 16; wl->hw->wiphy->max_match_sets = 16; /* * Maximum length of elements in scanning probe request templates * should be the maximum length possible for a template, without * the IEEE80211 header of the template */ wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - sizeof(struct ieee80211_header); wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - sizeof(struct ieee80211_header); wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; /* make sure all our channels fit in the scanned_ch bitmask */ BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) + ARRAY_SIZE(wl1271_channels_5ghz) > WL1271_MAX_CHANNELS); /* * We keep local copies of the band structs because we need to * modify them on a per-device basis. */ memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz, sizeof(wl1271_band_2ghz)); memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz, sizeof(wl1271_band_5ghz)); wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl->bands[IEEE80211_BAND_2GHZ]; wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl->bands[IEEE80211_BAND_5GHZ]; wl->hw->queues = 4; wl->hw->max_rates = 1; wl->hw->wiphy->reg_notifier = wl1271_reg_notify; /* the FW answers probe-requests in AP-mode */ wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; wl->hw->wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; SET_IEEE80211_DEV(wl->hw, wl->dev); wl->hw->sta_data_size = sizeof(struct wl1271_station); wl->hw->vif_data_size = sizeof(struct wl12xx_vif); wl->hw->max_rx_aggregation_subframes = 8; return 0; } #define WL1271_DEFAULT_CHANNEL 0 static struct ieee80211_hw *wl1271_alloc_hw(void) { struct ieee80211_hw *hw; struct wl1271 *wl; int i, j, ret; unsigned int order; BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS); hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops); if (!hw) { wl1271_error("could not alloc ieee80211_hw"); ret = -ENOMEM; goto err_hw_alloc; } wl = hw->priv; memset(wl, 0, sizeof(*wl)); INIT_LIST_HEAD(&wl->wlvif_list); wl->hw = hw; for (i = 0; i < NUM_TX_QUEUES; i++) for (j = 0; j < WL12XX_MAX_LINKS; j++) skb_queue_head_init(&wl->links[j].tx_queue[i]); skb_queue_head_init(&wl->deferred_rx_queue); skb_queue_head_init(&wl->deferred_tx_queue); INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); INIT_WORK(&wl->netstack_work, wl1271_netstack_work); INIT_WORK(&wl->tx_work, wl1271_tx_work); INIT_WORK(&wl->recovery_work, wl1271_recovery_work); INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work); INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work); wl->freezable_wq = create_freezable_workqueue("wl12xx_wq"); if (!wl->freezable_wq) { ret = -ENOMEM; goto err_hw; } wl->channel = WL1271_DEFAULT_CHANNEL; wl->rx_counter = 0; wl->power_level = WL1271_DEFAULT_POWER_LEVEL; wl->band = IEEE80211_BAND_2GHZ; wl->flags = 0; wl->sg_enabled = true; wl->hw_pg_ver = -1; wl->ap_ps_map = 0; wl->ap_fw_ps_map = 0; wl->quirks = 0; wl->platform_quirks = 0; wl->sched_scanning = false; wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; wl->system_hlid = WL12XX_SYSTEM_HLID; wl->active_sta_count = 0; wl->fwlog_size = 0; init_waitqueue_head(&wl->fwlog_waitq); /* The system link is always allocated */ __set_bit(WL12XX_SYSTEM_HLID, wl->links_map); memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map)); for (i = 0; i < ACX_TX_DESCRIPTORS; i++) wl->tx_frames[i] = NULL; spin_lock_init(&wl->wl_lock); wl->state = WL1271_STATE_OFF; wl->fw_type = WL12XX_FW_TYPE_NONE; mutex_init(&wl->mutex); /* Apply default driver configuration. */ wl1271_conf_init(wl); order = get_order(WL1271_AGGR_BUFFER_SIZE); wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order); if (!wl->aggr_buf) { ret = -ENOMEM; goto err_wq; } wl->dummy_packet = wl12xx_alloc_dummy_packet(wl); if (!wl->dummy_packet) { ret = -ENOMEM; goto err_aggr; } /* Allocate one page for the FW log */ wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL); if (!wl->fwlog) { ret = -ENOMEM; goto err_dummy_packet; } return hw; err_dummy_packet: dev_kfree_skb(wl->dummy_packet); err_aggr: free_pages((unsigned long)wl->aggr_buf, order); err_wq: destroy_workqueue(wl->freezable_wq); err_hw: wl1271_debugfs_exit(wl); ieee80211_free_hw(hw); err_hw_alloc: return ERR_PTR(ret); } static int wl1271_free_hw(struct wl1271 *wl) { /* Unblock any fwlog readers */ mutex_lock(&wl->mutex); wl->fwlog_size = -1; wake_up_interruptible_all(&wl->fwlog_waitq); mutex_unlock(&wl->mutex); device_remove_bin_file(wl->dev, &fwlog_attr); device_remove_file(wl->dev, &dev_attr_hw_pg_ver); device_remove_file(wl->dev, &dev_attr_bt_coex_state); free_page((unsigned long)wl->fwlog); dev_kfree_skb(wl->dummy_packet); free_pages((unsigned long)wl->aggr_buf, get_order(WL1271_AGGR_BUFFER_SIZE)); wl1271_debugfs_exit(wl); vfree(wl->fw); wl->fw = NULL; wl->fw_type = WL12XX_FW_TYPE_NONE; kfree(wl->nvs); wl->nvs = NULL; kfree(wl->fw_status); kfree(wl->tx_res_if); destroy_workqueue(wl->freezable_wq); ieee80211_free_hw(wl->hw); return 0; } static irqreturn_t wl12xx_hardirq(int irq, void *cookie) { struct wl1271 *wl = cookie; unsigned long flags; wl1271_debug(DEBUG_IRQ, "IRQ"); /* complete the ELP completion */ spin_lock_irqsave(&wl->wl_lock, flags); set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); if (wl->elp_compl) { complete(wl->elp_compl); wl->elp_compl = NULL; } if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) { /* don't enqueue a work right now. mark it as pending */ set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags); wl1271_debug(DEBUG_IRQ, "should not enqueue work"); disable_irq_nosync(wl->irq); pm_wakeup_event(wl->dev, 0); spin_unlock_irqrestore(&wl->wl_lock, flags); return IRQ_HANDLED; } spin_unlock_irqrestore(&wl->wl_lock, flags); return IRQ_WAKE_THREAD; } static int __devinit wl12xx_probe(struct platform_device *pdev) { struct wl12xx_platform_data *pdata = pdev->dev.platform_data; struct ieee80211_hw *hw; struct wl1271 *wl; unsigned long irqflags; int ret = -ENODEV; hw = wl1271_alloc_hw(); if (IS_ERR(hw)) { wl1271_error("can't allocate hw"); ret = PTR_ERR(hw); goto out; } wl = hw->priv; wl->irq = platform_get_irq(pdev, 0); wl->ref_clock = pdata->board_ref_clock; wl->tcxo_clock = pdata->board_tcxo_clock; wl->platform_quirks = pdata->platform_quirks; wl->set_power = pdata->set_power; wl->dev = &pdev->dev; wl->if_ops = pdata->ops; platform_set_drvdata(pdev, wl); if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) irqflags = IRQF_TRIGGER_RISING; else irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT; ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq, irqflags, pdev->name, wl); if (ret < 0) { wl1271_error("request_irq() failed: %d", ret); goto out_free_hw; } ret = enable_irq_wake(wl->irq); if (!ret) { wl->irq_wake_enabled = true; device_init_wakeup(wl->dev, 1); if (pdata->pwr_in_suspend) hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; } disable_irq(wl->irq); ret = wl1271_init_ieee80211(wl); if (ret) goto out_irq; ret = wl1271_register_hw(wl); if (ret) goto out_irq; /* Create sysfs file to control bt coex state */ ret = device_create_file(wl->dev, &dev_attr_bt_coex_state); if (ret < 0) { wl1271_error("failed to create sysfs file bt_coex_state"); goto out_irq; } /* Create sysfs file to get HW PG version */ ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver); if (ret < 0) { wl1271_error("failed to create sysfs file hw_pg_ver"); goto out_bt_coex_state; } /* Create sysfs file for the FW log */ ret = device_create_bin_file(wl->dev, &fwlog_attr); if (ret < 0) { wl1271_error("failed to create sysfs file fwlog"); goto out_hw_pg_ver; } return 0; out_hw_pg_ver: device_remove_file(wl->dev, &dev_attr_hw_pg_ver); out_bt_coex_state: device_remove_file(wl->dev, &dev_attr_bt_coex_state); out_irq: free_irq(wl->irq, wl); out_free_hw: wl1271_free_hw(wl); out: return ret; } static int __devexit wl12xx_remove(struct platform_device *pdev) { struct wl1271 *wl = platform_get_drvdata(pdev); if (wl->irq_wake_enabled) { device_init_wakeup(wl->dev, 0); disable_irq_wake(wl->irq); } wl1271_unregister_hw(wl); free_irq(wl->irq, wl); wl1271_free_hw(wl); return 0; } static const struct platform_device_id wl12xx_id_table[] __devinitconst = { { "wl12xx", 0 }, { } /* Terminating Entry */ }; MODULE_DEVICE_TABLE(platform, wl12xx_id_table); static struct platform_driver wl12xx_driver = { .probe = wl12xx_probe, .remove = __devexit_p(wl12xx_remove), .id_table = wl12xx_id_table, .driver = { .name = "wl12xx_driver", .owner = THIS_MODULE, } }; static int __init wl12xx_init(void) { return platform_driver_register(&wl12xx_driver); } module_init(wl12xx_init); static void __exit wl12xx_exit(void) { platform_driver_unregister(&wl12xx_driver); } module_exit(wl12xx_exit); u32 wl12xx_debug_level = DEBUG_NONE; EXPORT_SYMBOL_GPL(wl12xx_debug_level); module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR); MODULE_PARM_DESC(debug_level, "wl12xx debugging level"); module_param_named(fwlog, fwlog_param, charp, 0); MODULE_PARM_DESC(fwlog, "FW logger options: continuous, ondemand, dbgpins or disable"); module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR); MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
gpl-2.0
nimengyu2/dm3730-android-gingerbread-2.3-dk2.1-kernel
sound/i2c/tea6330t.c
4792
11555
/* * Routines for control of the TEA6330T circuit via i2c bus * Sound fader control circuit for car radios by Philips Semiconductors * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/control.h> #include <sound/tea6330t.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Routines for control of the TEA6330T circuit via i2c bus"); MODULE_LICENSE("GPL"); #define TEA6330T_ADDR (0x80>>1) /* fixed address */ #define TEA6330T_SADDR_VOLUME_LEFT 0x00 /* volume left */ #define TEA6330T_SADDR_VOLUME_RIGHT 0x01 /* volume right */ #define TEA6330T_SADDR_BASS 0x02 /* bass control */ #define TEA6330T_SADDR_TREBLE 0x03 /* treble control */ #define TEA6330T_SADDR_FADER 0x04 /* fader control */ #define TEA6330T_MFN 0x20 /* mute control for selected channels */ #define TEA6330T_FCH 0x10 /* select fader channels - front or rear */ #define TEA6330T_SADDR_AUDIO_SWITCH 0x05 /* audio switch */ #define TEA6330T_GMU 0x80 /* mute control, general mute */ #define TEA6330T_EQN 0x40 /* equalizer switchover (0=equalizer-on) */ struct tea6330t { struct snd_i2c_device *device; struct snd_i2c_bus *bus; int equalizer; int fader; unsigned char regs[8]; unsigned char mleft, mright; unsigned char bass, treble; unsigned char max_bass, max_treble; }; int snd_tea6330t_detect(struct snd_i2c_bus *bus, int equalizer) { int res; snd_i2c_lock(bus); res = snd_i2c_probeaddr(bus, TEA6330T_ADDR); snd_i2c_unlock(bus); return res; } #if 0 static void snd_tea6330t_set(struct tea6330t *tea, unsigned char addr, unsigned char value) { #if 0 printk(KERN_DEBUG "set - 0x%x/0x%x\n", addr, value); #endif snd_i2c_write(tea->bus, TEA6330T_ADDR, addr, value, 1); } #endif #define TEA6330T_MASTER_VOLUME(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_tea6330t_info_master_volume, \ .get = snd_tea6330t_get_master_volume, .put = snd_tea6330t_put_master_volume } static int snd_tea6330t_info_master_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 43; return 0; } static int snd_tea6330t_get_master_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); snd_i2c_lock(tea->bus); ucontrol->value.integer.value[0] = tea->mleft - 0x14; ucontrol->value.integer.value[1] = tea->mright - 0x14; snd_i2c_unlock(tea->bus); return 0; } static int snd_tea6330t_put_master_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); int change, count, err; unsigned char bytes[3]; unsigned char val1, val2; val1 = (ucontrol->value.integer.value[0] % 44) + 0x14; val2 = (ucontrol->value.integer.value[1] % 44) + 0x14; snd_i2c_lock(tea->bus); change = val1 != tea->mleft || val2 != tea->mright; tea->mleft = val1; tea->mright = val2; count = 0; if (tea->regs[TEA6330T_SADDR_VOLUME_LEFT] != 0) { bytes[count++] = TEA6330T_SADDR_VOLUME_LEFT; bytes[count++] = tea->regs[TEA6330T_SADDR_VOLUME_LEFT] = tea->mleft; } if (tea->regs[TEA6330T_SADDR_VOLUME_RIGHT] != 0) { if (count == 0) bytes[count++] = TEA6330T_SADDR_VOLUME_RIGHT; bytes[count++] = tea->regs[TEA6330T_SADDR_VOLUME_RIGHT] = tea->mright; } if (count > 0) { if ((err = snd_i2c_sendbytes(tea->device, bytes, count)) < 0) change = err; } snd_i2c_unlock(tea->bus); return change; } #define TEA6330T_MASTER_SWITCH(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_tea6330t_info_master_switch, \ .get = snd_tea6330t_get_master_switch, .put = snd_tea6330t_put_master_switch } #define snd_tea6330t_info_master_switch snd_ctl_boolean_stereo_info static int snd_tea6330t_get_master_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); snd_i2c_lock(tea->bus); ucontrol->value.integer.value[0] = tea->regs[TEA6330T_SADDR_VOLUME_LEFT] == 0 ? 0 : 1; ucontrol->value.integer.value[1] = tea->regs[TEA6330T_SADDR_VOLUME_RIGHT] == 0 ? 0 : 1; snd_i2c_unlock(tea->bus); return 0; } static int snd_tea6330t_put_master_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); int change, err; unsigned char bytes[3]; unsigned char oval1, oval2, val1, val2; val1 = ucontrol->value.integer.value[0] & 1; val2 = ucontrol->value.integer.value[1] & 1; snd_i2c_lock(tea->bus); oval1 = tea->regs[TEA6330T_SADDR_VOLUME_LEFT] == 0 ? 0 : 1; oval2 = tea->regs[TEA6330T_SADDR_VOLUME_RIGHT] == 0 ? 0 : 1; change = val1 != oval1 || val2 != oval2; tea->regs[TEA6330T_SADDR_VOLUME_LEFT] = val1 ? tea->mleft : 0; tea->regs[TEA6330T_SADDR_VOLUME_RIGHT] = val2 ? tea->mright : 0; bytes[0] = TEA6330T_SADDR_VOLUME_LEFT; bytes[1] = tea->regs[TEA6330T_SADDR_VOLUME_LEFT]; bytes[2] = tea->regs[TEA6330T_SADDR_VOLUME_RIGHT]; if ((err = snd_i2c_sendbytes(tea->device, bytes, 3)) < 0) change = err; snd_i2c_unlock(tea->bus); return change; } #define TEA6330T_BASS(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_tea6330t_info_bass, \ .get = snd_tea6330t_get_bass, .put = snd_tea6330t_put_bass } static int snd_tea6330t_info_bass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = tea->max_bass; return 0; } static int snd_tea6330t_get_bass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = tea->bass; return 0; } static int snd_tea6330t_put_bass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); int change, err; unsigned char bytes[2]; unsigned char val1; val1 = ucontrol->value.integer.value[0] % (tea->max_bass + 1); snd_i2c_lock(tea->bus); tea->bass = val1; val1 += tea->equalizer ? 7 : 3; change = tea->regs[TEA6330T_SADDR_BASS] != val1; bytes[0] = TEA6330T_SADDR_BASS; bytes[1] = tea->regs[TEA6330T_SADDR_BASS] = val1; if ((err = snd_i2c_sendbytes(tea->device, bytes, 2)) < 0) change = err; snd_i2c_unlock(tea->bus); return change; } #define TEA6330T_TREBLE(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_tea6330t_info_treble, \ .get = snd_tea6330t_get_treble, .put = snd_tea6330t_put_treble } static int snd_tea6330t_info_treble(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = tea->max_treble; return 0; } static int snd_tea6330t_get_treble(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = tea->treble; return 0; } static int snd_tea6330t_put_treble(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); int change, err; unsigned char bytes[2]; unsigned char val1; val1 = ucontrol->value.integer.value[0] % (tea->max_treble + 1); snd_i2c_lock(tea->bus); tea->treble = val1; val1 += 3; change = tea->regs[TEA6330T_SADDR_TREBLE] != val1; bytes[0] = TEA6330T_SADDR_TREBLE; bytes[1] = tea->regs[TEA6330T_SADDR_TREBLE] = val1; if ((err = snd_i2c_sendbytes(tea->device, bytes, 2)) < 0) change = err; snd_i2c_unlock(tea->bus); return change; } static struct snd_kcontrol_new snd_tea6330t_controls[] = { TEA6330T_MASTER_SWITCH("Master Playback Switch", 0), TEA6330T_MASTER_VOLUME("Master Playback Volume", 0), TEA6330T_BASS("Tone Control - Bass", 0), TEA6330T_TREBLE("Tone Control - Treble", 0) }; static void snd_tea6330_free(struct snd_i2c_device *device) { kfree(device->private_data); } int snd_tea6330t_update_mixer(struct snd_card *card, struct snd_i2c_bus *bus, int equalizer, int fader) { struct snd_i2c_device *device; struct tea6330t *tea; struct snd_kcontrol_new *knew; unsigned int idx; int err = -ENOMEM; u8 default_treble, default_bass; unsigned char bytes[7]; tea = kzalloc(sizeof(*tea), GFP_KERNEL); if (tea == NULL) return -ENOMEM; if ((err = snd_i2c_device_create(bus, "TEA6330T", TEA6330T_ADDR, &device)) < 0) { kfree(tea); return err; } tea->device = device; tea->bus = bus; tea->equalizer = equalizer; tea->fader = fader; device->private_data = tea; device->private_free = snd_tea6330_free; snd_i2c_lock(bus); /* turn fader off and handle equalizer */ tea->regs[TEA6330T_SADDR_FADER] = 0x3f; tea->regs[TEA6330T_SADDR_AUDIO_SWITCH] = equalizer ? 0 : TEA6330T_EQN; /* initialize mixer */ if (!tea->equalizer) { tea->max_bass = 9; tea->max_treble = 8; default_bass = 3 + 4; tea->bass = 4; default_treble = 3 + 4; tea->treble = 4; } else { tea->max_bass = 5; tea->max_treble = 0; default_bass = 7 + 4; tea->bass = 4; default_treble = 3; tea->treble = 0; } tea->mleft = tea->mright = 0x14; tea->regs[TEA6330T_SADDR_BASS] = default_bass; tea->regs[TEA6330T_SADDR_TREBLE] = default_treble; /* compose I2C message and put the hardware to initial state */ bytes[0] = TEA6330T_SADDR_VOLUME_LEFT; for (idx = 0; idx < 6; idx++) bytes[idx+1] = tea->regs[idx]; if ((err = snd_i2c_sendbytes(device, bytes, 7)) < 0) goto __error; strcat(card->mixername, ",TEA6330T"); if ((err = snd_component_add(card, "TEA6330T")) < 0) goto __error; for (idx = 0; idx < ARRAY_SIZE(snd_tea6330t_controls); idx++) { knew = &snd_tea6330t_controls[idx]; if (tea->treble == 0 && !strcmp(knew->name, "Tone Control - Treble")) continue; if ((err = snd_ctl_add(card, snd_ctl_new1(knew, tea))) < 0) goto __error; } snd_i2c_unlock(bus); return 0; __error: snd_i2c_unlock(bus); snd_i2c_device_free(device); return err; } EXPORT_SYMBOL(snd_tea6330t_detect); EXPORT_SYMBOL(snd_tea6330t_update_mixer); /* * INIT part */ static int __init alsa_tea6330t_init(void) { return 0; } static void __exit alsa_tea6330t_exit(void) { } module_init(alsa_tea6330t_init) module_exit(alsa_tea6330t_exit)
gpl-2.0
aatjitra/hammerhead
drivers/staging/vme/devices/vme_user.c
5048
22635
/* * VMEbus User access driver * * Author: Martyn Welch <martyn.welch@ge.com> * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. * * Based on work by: * Tom Armistead and Ajit Prem * Copyright 2004 Motorola Inc. * * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/cdev.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/ioctl.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/pci.h> #include <linux/semaphore.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/syscalls.h> #include <linux/mutex.h> #include <linux/types.h> #include <linux/io.h> #include <linux/uaccess.h> #include "../vme.h" #include "vme_user.h" static DEFINE_MUTEX(vme_user_mutex); static const char driver_name[] = "vme_user"; static int bus[VME_USER_BUS_MAX]; static unsigned int bus_num; /* Currently Documentation/devices.txt defines the following for VME: * * 221 char VME bus * 0 = /dev/bus/vme/m0 First master image * 1 = /dev/bus/vme/m1 Second master image * 2 = /dev/bus/vme/m2 Third master image * 3 = /dev/bus/vme/m3 Fourth master image * 4 = /dev/bus/vme/s0 First slave image * 5 = /dev/bus/vme/s1 Second slave image * 6 = /dev/bus/vme/s2 Third slave image * 7 = /dev/bus/vme/s3 Fourth slave image * 8 = /dev/bus/vme/ctl Control * * It is expected that all VME bus drivers will use the * same interface. For interface documentation see * http://www.vmelinux.org/. * * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't * even support the tsi148 chipset (which has 8 master and 8 slave windows). * We'll run with this or now as far as possible, however it probably makes * sense to get rid of the old mappings and just do everything dynamically. * * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as * defined above and try to support at least some of the interface from * http://www.vmelinux.org/ as an alternative drive can be written providing a * saner interface later. * * The vmelinux.org driver never supported slave images, the devices reserved * for slaves were repurposed to support all 8 master images on the UniverseII! * We shall support 4 masters and 4 slaves with this driver. */ #define VME_MAJOR 221 /* VME Major Device Number */ #define VME_DEVS 9 /* Number of dev entries */ #define MASTER_MINOR 0 #define MASTER_MAX 3 #define SLAVE_MINOR 4 #define SLAVE_MAX 7 #define CONTROL_MINOR 8 #define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */ /* * Structure to handle image related parameters. */ struct image_desc { void *kern_buf; /* Buffer address in kernel space */ dma_addr_t pci_buf; /* Buffer address in PCI address space */ unsigned long long size_buf; /* Buffer size */ struct semaphore sem; /* Semaphore for locking image */ struct device *device; /* Sysfs device */ struct vme_resource *resource; /* VME resource */ int users; /* Number of current users */ }; static struct image_desc image[VME_DEVS]; struct driver_stats { unsigned long reads; unsigned long writes; unsigned long ioctls; unsigned long irqs; unsigned long berrs; unsigned long dmaErrors; unsigned long timeouts; unsigned long external; }; static struct driver_stats statistics; static struct cdev *vme_user_cdev; /* Character device */ static struct class *vme_user_sysfs_class; /* Sysfs class */ static struct vme_dev *vme_user_bridge; /* Pointer to user device */ static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR, MASTER_MINOR, MASTER_MINOR, SLAVE_MINOR, SLAVE_MINOR, SLAVE_MINOR, SLAVE_MINOR, CONTROL_MINOR }; static int vme_user_open(struct inode *, struct file *); static int vme_user_release(struct inode *, struct file *); static ssize_t vme_user_read(struct file *, char __user *, size_t, loff_t *); static ssize_t vme_user_write(struct file *, const char __user *, size_t, loff_t *); static loff_t vme_user_llseek(struct file *, loff_t, int); static long vme_user_unlocked_ioctl(struct file *, unsigned int, unsigned long); static int vme_user_match(struct vme_dev *); static int __devinit vme_user_probe(struct vme_dev *); static int __devexit vme_user_remove(struct vme_dev *); static const struct file_operations vme_user_fops = { .open = vme_user_open, .release = vme_user_release, .read = vme_user_read, .write = vme_user_write, .llseek = vme_user_llseek, .unlocked_ioctl = vme_user_unlocked_ioctl, }; /* * Reset all the statistic counters */ static void reset_counters(void) { statistics.reads = 0; statistics.writes = 0; statistics.ioctls = 0; statistics.irqs = 0; statistics.berrs = 0; statistics.dmaErrors = 0; statistics.timeouts = 0; } static int vme_user_open(struct inode *inode, struct file *file) { int err; unsigned int minor = MINOR(inode->i_rdev); down(&image[minor].sem); /* Allow device to be opened if a resource is needed and allocated. */ if (minor < CONTROL_MINOR && image[minor].resource == NULL) { printk(KERN_ERR "No resources allocated for device\n"); err = -EINVAL; goto err_res; } /* Increment user count */ image[minor].users++; up(&image[minor].sem); return 0; err_res: up(&image[minor].sem); return err; } static int vme_user_release(struct inode *inode, struct file *file) { unsigned int minor = MINOR(inode->i_rdev); down(&image[minor].sem); /* Decrement user count */ image[minor].users--; up(&image[minor].sem); return 0; } /* * We are going ot alloc a page during init per window for small transfers. * Small transfers will go VME -> buffer -> user space. Larger (more than a * page) transfers will lock the user space buffer into memory and then * transfer the data directly into the user space buffers. */ static ssize_t resource_to_user(int minor, char __user *buf, size_t count, loff_t *ppos) { ssize_t retval; ssize_t copied = 0; if (count <= image[minor].size_buf) { /* We copy to kernel buffer */ copied = vme_master_read(image[minor].resource, image[minor].kern_buf, count, *ppos); if (copied < 0) return (int)copied; retval = __copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied); if (retval != 0) { copied = (copied - retval); printk(KERN_INFO "User copy failed\n"); return -EINVAL; } } else { /* XXX Need to write this */ printk(KERN_INFO "Currently don't support large transfers\n"); /* Map in pages from userspace */ /* Call vme_master_read to do the transfer */ return -EINVAL; } return copied; } /* * We are going ot alloc a page during init per window for small transfers. * Small transfers will go user space -> buffer -> VME. Larger (more than a * page) transfers will lock the user space buffer into memory and then * transfer the data directly from the user space buffers out to VME. */ static ssize_t resource_from_user(unsigned int minor, const char __user *buf, size_t count, loff_t *ppos) { ssize_t retval; ssize_t copied = 0; if (count <= image[minor].size_buf) { retval = __copy_from_user(image[minor].kern_buf, buf, (unsigned long)count); if (retval != 0) copied = (copied - retval); else copied = count; copied = vme_master_write(image[minor].resource, image[minor].kern_buf, copied, *ppos); } else { /* XXX Need to write this */ printk(KERN_INFO "Currently don't support large transfers\n"); /* Map in pages from userspace */ /* Call vme_master_write to do the transfer */ return -EINVAL; } return copied; } static ssize_t buffer_to_user(unsigned int minor, char __user *buf, size_t count, loff_t *ppos) { void *image_ptr; ssize_t retval; image_ptr = image[minor].kern_buf + *ppos; retval = __copy_to_user(buf, image_ptr, (unsigned long)count); if (retval != 0) { retval = (count - retval); printk(KERN_WARNING "Partial copy to userspace\n"); } else retval = count; /* Return number of bytes successfully read */ return retval; } static ssize_t buffer_from_user(unsigned int minor, const char __user *buf, size_t count, loff_t *ppos) { void *image_ptr; size_t retval; image_ptr = image[minor].kern_buf + *ppos; retval = __copy_from_user(image_ptr, buf, (unsigned long)count); if (retval != 0) { retval = (count - retval); printk(KERN_WARNING "Partial copy to userspace\n"); } else retval = count; /* Return number of bytes successfully read */ return retval; } static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev); ssize_t retval; size_t image_size; size_t okcount; if (minor == CONTROL_MINOR) return 0; down(&image[minor].sem); /* XXX Do we *really* want this helper - we can use vme_*_get ? */ image_size = vme_get_size(image[minor].resource); /* Ensure we are starting at a valid location */ if ((*ppos < 0) || (*ppos > (image_size - 1))) { up(&image[minor].sem); return 0; } /* Ensure not reading past end of the image */ if (*ppos + count > image_size) okcount = image_size - *ppos; else okcount = count; switch (type[minor]) { case MASTER_MINOR: retval = resource_to_user(minor, buf, okcount, ppos); break; case SLAVE_MINOR: retval = buffer_to_user(minor, buf, okcount, ppos); break; default: retval = -EINVAL; } up(&image[minor].sem); if (retval > 0) *ppos += retval; return retval; } static ssize_t vme_user_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev); ssize_t retval; size_t image_size; size_t okcount; if (minor == CONTROL_MINOR) return 0; down(&image[minor].sem); image_size = vme_get_size(image[minor].resource); /* Ensure we are starting at a valid location */ if ((*ppos < 0) || (*ppos > (image_size - 1))) { up(&image[minor].sem); return 0; } /* Ensure not reading past end of the image */ if (*ppos + count > image_size) okcount = image_size - *ppos; else okcount = count; switch (type[minor]) { case MASTER_MINOR: retval = resource_from_user(minor, buf, okcount, ppos); break; case SLAVE_MINOR: retval = buffer_from_user(minor, buf, okcount, ppos); break; default: retval = -EINVAL; } up(&image[minor].sem); if (retval > 0) *ppos += retval; return retval; } static loff_t vme_user_llseek(struct file *file, loff_t off, int whence) { loff_t absolute = -1; unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev); size_t image_size; if (minor == CONTROL_MINOR) return -EINVAL; down(&image[minor].sem); image_size = vme_get_size(image[minor].resource); switch (whence) { case SEEK_SET: absolute = off; break; case SEEK_CUR: absolute = file->f_pos + off; break; case SEEK_END: absolute = image_size + off; break; default: up(&image[minor].sem); return -EINVAL; break; } if ((absolute < 0) || (absolute >= image_size)) { up(&image[minor].sem); return -EINVAL; } file->f_pos = absolute; up(&image[minor].sem); return absolute; } /* * The ioctls provided by the old VME access method (the one at vmelinux.org) * are most certainly wrong as the effectively push the registers layout * through to user space. Given that the VME core can handle multiple bridges, * with different register layouts this is most certainly not the way to go. * * We aren't using the structures defined in the Motorola driver either - these * are also quite low level, however we should use the definitions that have * already been defined. */ static int vme_user_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { struct vme_master master; struct vme_slave slave; struct vme_irq_id irq_req; unsigned long copied; unsigned int minor = MINOR(inode->i_rdev); int retval; dma_addr_t pci_addr; void __user *argp = (void __user *)arg; statistics.ioctls++; switch (type[minor]) { case CONTROL_MINOR: switch (cmd) { case VME_IRQ_GEN: copied = copy_from_user(&irq_req, (char *)arg, sizeof(struct vme_irq_id)); if (copied != 0) { printk(KERN_WARNING "Partial copy from userspace\n"); return -EFAULT; } retval = vme_irq_generate(vme_user_bridge, irq_req.level, irq_req.statid); return retval; } break; case MASTER_MINOR: switch (cmd) { case VME_GET_MASTER: memset(&master, 0, sizeof(struct vme_master)); /* XXX We do not want to push aspace, cycle and width * to userspace as they are */ retval = vme_master_get(image[minor].resource, &master.enable, &master.vme_addr, &master.size, &master.aspace, &master.cycle, &master.dwidth); copied = copy_to_user(argp, &master, sizeof(struct vme_master)); if (copied != 0) { printk(KERN_WARNING "Partial copy to " "userspace\n"); return -EFAULT; } return retval; break; case VME_SET_MASTER: copied = copy_from_user(&master, argp, sizeof(master)); if (copied != 0) { printk(KERN_WARNING "Partial copy from " "userspace\n"); return -EFAULT; } /* XXX We do not want to push aspace, cycle and width * to userspace as they are */ return vme_master_set(image[minor].resource, master.enable, master.vme_addr, master.size, master.aspace, master.cycle, master.dwidth); break; } break; case SLAVE_MINOR: switch (cmd) { case VME_GET_SLAVE: memset(&slave, 0, sizeof(struct vme_slave)); /* XXX We do not want to push aspace, cycle and width * to userspace as they are */ retval = vme_slave_get(image[minor].resource, &slave.enable, &slave.vme_addr, &slave.size, &pci_addr, &slave.aspace, &slave.cycle); copied = copy_to_user(argp, &slave, sizeof(struct vme_slave)); if (copied != 0) { printk(KERN_WARNING "Partial copy to " "userspace\n"); return -EFAULT; } return retval; break; case VME_SET_SLAVE: copied = copy_from_user(&slave, argp, sizeof(slave)); if (copied != 0) { printk(KERN_WARNING "Partial copy from " "userspace\n"); return -EFAULT; } /* XXX We do not want to push aspace, cycle and width * to userspace as they are */ return vme_slave_set(image[minor].resource, slave.enable, slave.vme_addr, slave.size, image[minor].pci_buf, slave.aspace, slave.cycle); break; } break; } return -EINVAL; } static long vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; mutex_lock(&vme_user_mutex); ret = vme_user_ioctl(file->f_path.dentry->d_inode, file, cmd, arg); mutex_unlock(&vme_user_mutex); return ret; } /* * Unallocate a previously allocated buffer */ static void buf_unalloc(int num) { if (image[num].kern_buf) { #ifdef VME_DEBUG printk(KERN_DEBUG "UniverseII:Releasing buffer at %p\n", image[num].pci_buf); #endif vme_free_consistent(image[num].resource, image[num].size_buf, image[num].kern_buf, image[num].pci_buf); image[num].kern_buf = NULL; image[num].pci_buf = 0; image[num].size_buf = 0; #ifdef VME_DEBUG } else { printk(KERN_DEBUG "UniverseII: Buffer not allocated\n"); #endif } } static struct vme_driver vme_user_driver = { .name = driver_name, .match = vme_user_match, .probe = vme_user_probe, .remove = __devexit_p(vme_user_remove), }; static int __init vme_user_init(void) { int retval = 0; printk(KERN_INFO "VME User Space Access Driver\n"); if (bus_num == 0) { printk(KERN_ERR "%s: No cards, skipping registration\n", driver_name); retval = -ENODEV; goto err_nocard; } /* Let's start by supporting one bus, we can support more than one * in future revisions if that ever becomes necessary. */ if (bus_num > VME_USER_BUS_MAX) { printk(KERN_ERR "%s: Driver only able to handle %d buses\n", driver_name, VME_USER_BUS_MAX); bus_num = VME_USER_BUS_MAX; } /* * Here we just register the maximum number of devices we can and * leave vme_user_match() to allow only 1 to go through to probe(). * This way, if we later want to allow multiple user access devices, * we just change the code in vme_user_match(). */ retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS); if (retval != 0) goto err_reg; return retval; err_reg: err_nocard: return retval; } static int vme_user_match(struct vme_dev *vdev) { if (vdev->num >= VME_USER_BUS_MAX) return 0; return 1; } /* * In this simple access driver, the old behaviour is being preserved as much * as practical. We will therefore reserve the buffers and request the images * here so that we don't have to do it later. */ static int __devinit vme_user_probe(struct vme_dev *vdev) { int i, err; char name[12]; /* Save pointer to the bridge device */ if (vme_user_bridge != NULL) { printk(KERN_ERR "%s: Driver can only be loaded for 1 device\n", driver_name); err = -EINVAL; goto err_dev; } vme_user_bridge = vdev; /* Initialise descriptors */ for (i = 0; i < VME_DEVS; i++) { image[i].kern_buf = NULL; image[i].pci_buf = 0; sema_init(&image[i].sem, 1); image[i].device = NULL; image[i].resource = NULL; image[i].users = 0; } /* Initialise statistics counters */ reset_counters(); /* Assign major and minor numbers for the driver */ err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS, driver_name); if (err) { printk(KERN_WARNING "%s: Error getting Major Number %d for " "driver.\n", driver_name, VME_MAJOR); goto err_region; } /* Register the driver as a char device */ vme_user_cdev = cdev_alloc(); vme_user_cdev->ops = &vme_user_fops; vme_user_cdev->owner = THIS_MODULE; err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS); if (err) { printk(KERN_WARNING "%s: cdev_all failed\n", driver_name); goto err_char; } /* Request slave resources and allocate buffers (128kB wide) */ for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) { /* XXX Need to properly request attributes */ /* For ca91cx42 bridge there are only two slave windows * supporting A16 addressing, so we request A24 supported * by all windows. */ image[i].resource = vme_slave_request(vme_user_bridge, VME_A24, VME_SCT); if (image[i].resource == NULL) { printk(KERN_WARNING "Unable to allocate slave " "resource\n"); goto err_slave; } image[i].size_buf = PCI_BUF_SIZE; image[i].kern_buf = vme_alloc_consistent(image[i].resource, image[i].size_buf, &image[i].pci_buf); if (image[i].kern_buf == NULL) { printk(KERN_WARNING "Unable to allocate memory for " "buffer\n"); image[i].pci_buf = 0; vme_slave_free(image[i].resource); err = -ENOMEM; goto err_slave; } } /* * Request master resources allocate page sized buffers for small * reads and writes */ for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) { /* XXX Need to properly request attributes */ image[i].resource = vme_master_request(vme_user_bridge, VME_A32, VME_SCT, VME_D32); if (image[i].resource == NULL) { printk(KERN_WARNING "Unable to allocate master " "resource\n"); goto err_master; } image[i].size_buf = PCI_BUF_SIZE; image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL); if (image[i].kern_buf == NULL) { printk(KERN_WARNING "Unable to allocate memory for " "master window buffers\n"); err = -ENOMEM; goto err_master_buf; } } /* Create sysfs entries - on udev systems this creates the dev files */ vme_user_sysfs_class = class_create(THIS_MODULE, driver_name); if (IS_ERR(vme_user_sysfs_class)) { printk(KERN_ERR "Error creating vme_user class.\n"); err = PTR_ERR(vme_user_sysfs_class); goto err_class; } /* Add sysfs Entries */ for (i = 0; i < VME_DEVS; i++) { int num; switch (type[i]) { case MASTER_MINOR: sprintf(name, "bus/vme/m%%d"); break; case CONTROL_MINOR: sprintf(name, "bus/vme/ctl"); break; case SLAVE_MINOR: sprintf(name, "bus/vme/s%%d"); break; default: err = -EINVAL; goto err_sysfs; break; } num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i; image[i].device = device_create(vme_user_sysfs_class, NULL, MKDEV(VME_MAJOR, i), NULL, name, num); if (IS_ERR(image[i].device)) { printk(KERN_INFO "%s: Error creating sysfs device\n", driver_name); err = PTR_ERR(image[i].device); goto err_sysfs; } } return 0; /* Ensure counter set correcty to destroy all sysfs devices */ i = VME_DEVS; err_sysfs: while (i > 0) { i--; device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i)); } class_destroy(vme_user_sysfs_class); /* Ensure counter set correcty to unalloc all master windows */ i = MASTER_MAX + 1; err_master_buf: for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) kfree(image[i].kern_buf); err_master: while (i > MASTER_MINOR) { i--; vme_master_free(image[i].resource); } /* * Ensure counter set correcty to unalloc all slave windows and buffers */ i = SLAVE_MAX + 1; err_slave: while (i > SLAVE_MINOR) { i--; buf_unalloc(i); vme_slave_free(image[i].resource); } err_class: cdev_del(vme_user_cdev); err_char: unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS); err_region: err_dev: return err; } static int __devexit vme_user_remove(struct vme_dev *dev) { int i; /* Remove sysfs Entries */ for (i = 0; i < VME_DEVS; i++) device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i)); class_destroy(vme_user_sysfs_class); for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) { kfree(image[i].kern_buf); vme_master_free(image[i].resource); } for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) { vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0); buf_unalloc(i); vme_slave_free(image[i].resource); } /* Unregister device driver */ cdev_del(vme_user_cdev); /* Unregiser the major and minor device numbers */ unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS); return 0; } static void __exit vme_user_exit(void) { vme_unregister_driver(&vme_user_driver); } MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected"); module_param_array(bus, int, &bus_num, 0); MODULE_DESCRIPTION("VME User Space Access Driver"); MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com"); MODULE_LICENSE("GPL"); module_init(vme_user_init); module_exit(vme_user_exit);
gpl-2.0
NebulaOy/linux
fs/reiserfs/xattr_user.c
7864
1290
#include "reiserfs.h" #include <linux/errno.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/xattr.h> #include "xattr.h" #include <asm/uaccess.h> static int user_get(struct dentry *dentry, const char *name, void *buffer, size_t size, int handler_flags) { if (strlen(name) < sizeof(XATTR_USER_PREFIX)) return -EINVAL; if (!reiserfs_xattrs_user(dentry->d_sb)) return -EOPNOTSUPP; return reiserfs_xattr_get(dentry->d_inode, name, buffer, size); } static int user_set(struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags, int handler_flags) { if (strlen(name) < sizeof(XATTR_USER_PREFIX)) return -EINVAL; if (!reiserfs_xattrs_user(dentry->d_sb)) return -EOPNOTSUPP; return reiserfs_xattr_set(dentry->d_inode, name, buffer, size, flags); } static size_t user_list(struct dentry *dentry, char *list, size_t list_size, const char *name, size_t name_len, int handler_flags) { const size_t len = name_len + 1; if (!reiserfs_xattrs_user(dentry->d_sb)) return 0; if (list && len <= list_size) { memcpy(list, name, name_len); list[name_len] = '\0'; } return len; } const struct xattr_handler reiserfs_xattr_user_handler = { .prefix = XATTR_USER_PREFIX, .get = user_get, .set = user_set, .list = user_list, };
gpl-2.0
mastero9017/hammerhead-5.0
net/sctp/probe.c
8376
5288
/* * sctp_probe - Observe the SCTP flow with kprobes. * * The idea for this came from Werner Almesberger's umlsim * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org> * * Modified for SCTP from Stephen Hemminger's code * Copyright (C) 2010, Wei Yongjun <yjwei@cn.fujitsu.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/socket.h> #include <linux/sctp.h> #include <linux/proc_fs.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/kfifo.h> #include <linux/time.h> #include <net/net_namespace.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>"); MODULE_DESCRIPTION("SCTP snooper"); MODULE_LICENSE("GPL"); static int port __read_mostly = 0; MODULE_PARM_DESC(port, "Port to match (0=all)"); module_param(port, int, 0); static int bufsize __read_mostly = 64 * 1024; MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)"); module_param(bufsize, int, 0); static int full __read_mostly = 1; MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)"); module_param(full, int, 0); static const char procname[] = "sctpprobe"; static struct { struct kfifo fifo; spinlock_t lock; wait_queue_head_t wait; struct timespec tstart; } sctpw; static void printl(const char *fmt, ...) { va_list args; int len; char tbuf[256]; va_start(args, fmt); len = vscnprintf(tbuf, sizeof(tbuf), fmt, args); va_end(args); kfifo_in_locked(&sctpw.fifo, tbuf, len, &sctpw.lock); wake_up(&sctpw.wait); } static int sctpprobe_open(struct inode *inode, struct file *file) { kfifo_reset(&sctpw.fifo); getnstimeofday(&sctpw.tstart); return 0; } static ssize_t sctpprobe_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { int error = 0, cnt = 0; unsigned char *tbuf; if (!buf) return -EINVAL; if (len == 0) return 0; tbuf = vmalloc(len); if (!tbuf) return -ENOMEM; error = wait_event_interruptible(sctpw.wait, kfifo_len(&sctpw.fifo) != 0); if (error) goto out_free; cnt = kfifo_out_locked(&sctpw.fifo, tbuf, len, &sctpw.lock); error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0; out_free: vfree(tbuf); return error ? error : cnt; } static const struct file_operations sctpprobe_fops = { .owner = THIS_MODULE, .open = sctpprobe_open, .read = sctpprobe_read, .llseek = noop_llseek, }; sctp_disposition_t jsctp_sf_eat_sack(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_transport *sp; static __u32 lcwnd = 0; struct timespec now; sp = asoc->peer.primary_path; if ((full || sp->cwnd != lcwnd) && (!port || asoc->peer.port == port || ep->base.bind_addr.port == port)) { lcwnd = sp->cwnd; getnstimeofday(&now); now = timespec_sub(now, sctpw.tstart); printl("%lu.%06lu ", (unsigned long) now.tv_sec, (unsigned long) now.tv_nsec / NSEC_PER_USEC); printl("%p %5d %5d %5d %8d %5d ", asoc, ep->base.bind_addr.port, asoc->peer.port, asoc->pathmtu, asoc->peer.rwnd, asoc->unack_data); list_for_each_entry(sp, &asoc->peer.transport_addr_list, transports) { if (sp == asoc->peer.primary_path) printl("*"); if (sp->ipaddr.sa.sa_family == AF_INET) printl("%pI4 ", &sp->ipaddr.v4.sin_addr); else printl("%pI6 ", &sp->ipaddr.v6.sin6_addr); printl("%2u %8u %8u %8u %8u %8u ", sp->state, sp->cwnd, sp->ssthresh, sp->flight_size, sp->partial_bytes_acked, sp->pathmtu); } printl("\n"); } jprobe_return(); return 0; } static struct jprobe sctp_recv_probe = { .kp = { .symbol_name = "sctp_sf_eat_sack_6_2", }, .entry = jsctp_sf_eat_sack, }; static __init int sctpprobe_init(void) { int ret = -ENOMEM; init_waitqueue_head(&sctpw.wait); spin_lock_init(&sctpw.lock); if (kfifo_alloc(&sctpw.fifo, bufsize, GFP_KERNEL)) return ret; if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &sctpprobe_fops)) goto free_kfifo; ret = register_jprobe(&sctp_recv_probe); if (ret) goto remove_proc; pr_info("probe registered (port=%d)\n", port); return 0; remove_proc: proc_net_remove(&init_net, procname); free_kfifo: kfifo_free(&sctpw.fifo); return ret; } static __exit void sctpprobe_exit(void) { kfifo_free(&sctpw.fifo); proc_net_remove(&init_net, procname); unregister_jprobe(&sctp_recv_probe); } module_init(sctpprobe_init); module_exit(sctpprobe_exit);
gpl-2.0
IGGYVIP/lge-kernel-e430
drivers/misc/ep93xx_pwm.c
8632
9721
/* * Simple PWM driver for EP93XX * * (c) Copyright 2009 Matthieu Crapet <mcrapet@gmail.com> * (c) Copyright 2009 H Hartley Sweeten <hsweeten@visionengravers.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * EP9307 has only one channel: * - PWMOUT * * EP9301/02/12/15 have two channels: * - PWMOUT * - PWMOUT1 (alternate function for EGPIO14) */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <mach/platform.h> #define EP93XX_PWMx_TERM_COUNT 0x00 #define EP93XX_PWMx_DUTY_CYCLE 0x04 #define EP93XX_PWMx_ENABLE 0x08 #define EP93XX_PWMx_INVERT 0x0C #define EP93XX_PWM_MAX_COUNT 0xFFFF struct ep93xx_pwm { void __iomem *mmio_base; struct clk *clk; u32 duty_percent; }; static inline void ep93xx_pwm_writel(struct ep93xx_pwm *pwm, unsigned int val, unsigned int off) { __raw_writel(val, pwm->mmio_base + off); } static inline unsigned int ep93xx_pwm_readl(struct ep93xx_pwm *pwm, unsigned int off) { return __raw_readl(pwm->mmio_base + off); } static inline void ep93xx_pwm_write_tc(struct ep93xx_pwm *pwm, u16 value) { ep93xx_pwm_writel(pwm, value, EP93XX_PWMx_TERM_COUNT); } static inline u16 ep93xx_pwm_read_tc(struct ep93xx_pwm *pwm) { return ep93xx_pwm_readl(pwm, EP93XX_PWMx_TERM_COUNT); } static inline void ep93xx_pwm_write_dc(struct ep93xx_pwm *pwm, u16 value) { ep93xx_pwm_writel(pwm, value, EP93XX_PWMx_DUTY_CYCLE); } static inline void ep93xx_pwm_enable(struct ep93xx_pwm *pwm) { ep93xx_pwm_writel(pwm, 0x1, EP93XX_PWMx_ENABLE); } static inline void ep93xx_pwm_disable(struct ep93xx_pwm *pwm) { ep93xx_pwm_writel(pwm, 0x0, EP93XX_PWMx_ENABLE); } static inline int ep93xx_pwm_is_enabled(struct ep93xx_pwm *pwm) { return ep93xx_pwm_readl(pwm, EP93XX_PWMx_ENABLE) & 0x1; } static inline void ep93xx_pwm_invert(struct ep93xx_pwm *pwm) { ep93xx_pwm_writel(pwm, 0x1, EP93XX_PWMx_INVERT); } static inline void ep93xx_pwm_normal(struct ep93xx_pwm *pwm) { ep93xx_pwm_writel(pwm, 0x0, EP93XX_PWMx_INVERT); } static inline int ep93xx_pwm_is_inverted(struct ep93xx_pwm *pwm) { return ep93xx_pwm_readl(pwm, EP93XX_PWMx_INVERT) & 0x1; } /* * /sys/devices/platform/ep93xx-pwm.N * /min_freq read-only minimum pwm output frequency * /max_req read-only maximum pwm output frequency * /freq read-write pwm output frequency (0 = disable output) * /duty_percent read-write pwm duty cycle percent (1..99) * /invert read-write invert pwm output */ static ssize_t ep93xx_pwm_get_min_freq(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); unsigned long rate = clk_get_rate(pwm->clk); return sprintf(buf, "%ld\n", rate / (EP93XX_PWM_MAX_COUNT + 1)); } static ssize_t ep93xx_pwm_get_max_freq(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); unsigned long rate = clk_get_rate(pwm->clk); return sprintf(buf, "%ld\n", rate / 2); } static ssize_t ep93xx_pwm_get_freq(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); if (ep93xx_pwm_is_enabled(pwm)) { unsigned long rate = clk_get_rate(pwm->clk); u16 term = ep93xx_pwm_read_tc(pwm); return sprintf(buf, "%ld\n", rate / (term + 1)); } else { return sprintf(buf, "disabled\n"); } } static ssize_t ep93xx_pwm_set_freq(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); long val; int err; err = strict_strtol(buf, 10, &val); if (err) return -EINVAL; if (val == 0) { ep93xx_pwm_disable(pwm); } else if (val <= (clk_get_rate(pwm->clk) / 2)) { u32 term, duty; val = (clk_get_rate(pwm->clk) / val) - 1; if (val > EP93XX_PWM_MAX_COUNT) val = EP93XX_PWM_MAX_COUNT; if (val < 1) val = 1; term = ep93xx_pwm_read_tc(pwm); duty = ((val + 1) * pwm->duty_percent / 100) - 1; /* If pwm is running, order is important */ if (val > term) { ep93xx_pwm_write_tc(pwm, val); ep93xx_pwm_write_dc(pwm, duty); } else { ep93xx_pwm_write_dc(pwm, duty); ep93xx_pwm_write_tc(pwm, val); } if (!ep93xx_pwm_is_enabled(pwm)) ep93xx_pwm_enable(pwm); } else { return -EINVAL; } return count; } static ssize_t ep93xx_pwm_get_duty_percent(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); return sprintf(buf, "%d\n", pwm->duty_percent); } static ssize_t ep93xx_pwm_set_duty_percent(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); long val; int err; err = strict_strtol(buf, 10, &val); if (err) return -EINVAL; if (val > 0 && val < 100) { u32 term = ep93xx_pwm_read_tc(pwm); ep93xx_pwm_write_dc(pwm, ((term + 1) * val / 100) - 1); pwm->duty_percent = val; return count; } return -EINVAL; } static ssize_t ep93xx_pwm_get_invert(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); return sprintf(buf, "%d\n", ep93xx_pwm_is_inverted(pwm)); } static ssize_t ep93xx_pwm_set_invert(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); long val; int err; err = strict_strtol(buf, 10, &val); if (err) return -EINVAL; if (val == 0) ep93xx_pwm_normal(pwm); else if (val == 1) ep93xx_pwm_invert(pwm); else return -EINVAL; return count; } static DEVICE_ATTR(min_freq, S_IRUGO, ep93xx_pwm_get_min_freq, NULL); static DEVICE_ATTR(max_freq, S_IRUGO, ep93xx_pwm_get_max_freq, NULL); static DEVICE_ATTR(freq, S_IWUSR | S_IRUGO, ep93xx_pwm_get_freq, ep93xx_pwm_set_freq); static DEVICE_ATTR(duty_percent, S_IWUSR | S_IRUGO, ep93xx_pwm_get_duty_percent, ep93xx_pwm_set_duty_percent); static DEVICE_ATTR(invert, S_IWUSR | S_IRUGO, ep93xx_pwm_get_invert, ep93xx_pwm_set_invert); static struct attribute *ep93xx_pwm_attrs[] = { &dev_attr_min_freq.attr, &dev_attr_max_freq.attr, &dev_attr_freq.attr, &dev_attr_duty_percent.attr, &dev_attr_invert.attr, NULL }; static const struct attribute_group ep93xx_pwm_sysfs_files = { .attrs = ep93xx_pwm_attrs, }; static int __init ep93xx_pwm_probe(struct platform_device *pdev) { struct ep93xx_pwm *pwm; struct resource *res; int err; err = ep93xx_pwm_acquire_gpio(pdev); if (err) return err; pwm = kzalloc(sizeof(struct ep93xx_pwm), GFP_KERNEL); if (!pwm) { err = -ENOMEM; goto fail_no_mem; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { err = -ENXIO; goto fail_no_mem_resource; } res = request_mem_region(res->start, resource_size(res), pdev->name); if (res == NULL) { err = -EBUSY; goto fail_no_mem_resource; } pwm->mmio_base = ioremap(res->start, resource_size(res)); if (pwm->mmio_base == NULL) { err = -ENXIO; goto fail_no_ioremap; } err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files); if (err) goto fail_no_sysfs; pwm->clk = clk_get(&pdev->dev, "pwm_clk"); if (IS_ERR(pwm->clk)) { err = PTR_ERR(pwm->clk); goto fail_no_clk; } pwm->duty_percent = 50; platform_set_drvdata(pdev, pwm); /* disable pwm at startup. Avoids zero value. */ ep93xx_pwm_disable(pwm); ep93xx_pwm_write_tc(pwm, EP93XX_PWM_MAX_COUNT); ep93xx_pwm_write_dc(pwm, EP93XX_PWM_MAX_COUNT / 2); clk_enable(pwm->clk); return 0; fail_no_clk: sysfs_remove_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files); fail_no_sysfs: iounmap(pwm->mmio_base); fail_no_ioremap: release_mem_region(res->start, resource_size(res)); fail_no_mem_resource: kfree(pwm); fail_no_mem: ep93xx_pwm_release_gpio(pdev); return err; } static int __exit ep93xx_pwm_remove(struct platform_device *pdev) { struct ep93xx_pwm *pwm = platform_get_drvdata(pdev); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ep93xx_pwm_disable(pwm); clk_disable(pwm->clk); clk_put(pwm->clk); platform_set_drvdata(pdev, NULL); sysfs_remove_group(&pdev->dev.kobj, &ep93xx_pwm_sysfs_files); iounmap(pwm->mmio_base); release_mem_region(res->start, resource_size(res)); kfree(pwm); ep93xx_pwm_release_gpio(pdev); return 0; } static struct platform_driver ep93xx_pwm_driver = { .driver = { .name = "ep93xx-pwm", .owner = THIS_MODULE, }, .remove = __exit_p(ep93xx_pwm_remove), }; static int __init ep93xx_pwm_init(void) { return platform_driver_probe(&ep93xx_pwm_driver, ep93xx_pwm_probe); } static void __exit ep93xx_pwm_exit(void) { platform_driver_unregister(&ep93xx_pwm_driver); } module_init(ep93xx_pwm_init); module_exit(ep93xx_pwm_exit); MODULE_AUTHOR("Matthieu Crapet <mcrapet@gmail.com>, " "H Hartley Sweeten <hsweeten@visionengravers.com>"); MODULE_DESCRIPTION("EP93xx PWM driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ep93xx-pwm");
gpl-2.0
verygreen/green_kernel_omap
arch/cris/arch-v10/mm/tlb.c
12984
4706
/* * linux/arch/cris/arch-v10/mm/tlb.c * * Low level TLB handling * * * Copyright (C) 2000-2007 Axis Communications AB * * Authors: Bjorn Wesen (bjornw@axis.com) * */ #include <asm/tlb.h> #include <asm/mmu_context.h> #include <arch/svinto.h> #define D(x) /* The TLB can host up to 64 different mm contexts at the same time. * The running context is R_MMU_CONTEXT, and each TLB entry contains a * page_id that has to match to give a hit. In page_id_map, we keep track * of which mm's we have assigned which page_id's, so that we know when * to invalidate TLB entries. * * The last page_id is never running - it is used as an invalid page_id * so we can make TLB entries that will never match. * * Notice that we need to make the flushes atomic, otherwise an interrupt * handler that uses vmalloced memory might cause a TLB load in the middle * of a flush causing. */ /* invalidate all TLB entries */ void flush_tlb_all(void) { int i; unsigned long flags; /* the vpn of i & 0xf is so we dont write similar TLB entries * in the same 4-way entry group. details... */ local_irq_save(flags); for(i = 0; i < NUM_TLB_ENTRIES; i++) { *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) ); *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | IO_STATE(R_TLB_LO, valid, no ) | IO_STATE(R_TLB_LO, kernel,no ) | IO_STATE(R_TLB_LO, we, no ) | IO_FIELD(R_TLB_LO, pfn, 0 ) ); } local_irq_restore(flags); D(printk("tlb: flushed all\n")); } /* invalidate the selected mm context only */ void flush_tlb_mm(struct mm_struct *mm) { int i; int page_id = mm->context.page_id; unsigned long flags; D(printk("tlb: flush mm context %d (%p)\n", page_id, mm)); if(page_id == NO_CONTEXT) return; /* mark the TLB entries that match the page_id as invalid. * here we could also check the _PAGE_GLOBAL bit and NOT flush * global pages. is it worth the extra I/O ? */ local_irq_save(flags); for(i = 0; i < NUM_TLB_ENTRIES; i++) { *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) { *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | IO_STATE(R_TLB_LO, valid, no ) | IO_STATE(R_TLB_LO, kernel,no ) | IO_STATE(R_TLB_LO, we, no ) | IO_FIELD(R_TLB_LO, pfn, 0 ) ); } } local_irq_restore(flags); } /* invalidate a single page */ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { struct mm_struct *mm = vma->vm_mm; int page_id = mm->context.page_id; int i; unsigned long flags; D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm)); if(page_id == NO_CONTEXT) return; addr &= PAGE_MASK; /* perhaps not necessary */ /* invalidate those TLB entries that match both the mm context * and the virtual address requested */ local_irq_save(flags); for(i = 0; i < NUM_TLB_ENTRIES; i++) { unsigned long tlb_hi; *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); tlb_hi = *R_TLB_HI; if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id && (tlb_hi & PAGE_MASK) == addr) { *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | addr; /* same addr as before works. */ *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | IO_STATE(R_TLB_LO, valid, no ) | IO_STATE(R_TLB_LO, kernel,no ) | IO_STATE(R_TLB_LO, we, no ) | IO_FIELD(R_TLB_LO, pfn, 0 ) ); } } local_irq_restore(flags); } /* * Initialize the context related info for a new mm_struct * instance. */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { mm->context.page_id = NO_CONTEXT; return 0; } /* called in schedule() just before actually doing the switch_to */ void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { if (prev != next) { /* make sure we have a context */ get_mmu_context(next); /* remember the pgd for the fault handlers * this is similar to the pgd register in some other CPU's. * we need our own copy of it because current and active_mm * might be invalid at points where we still need to derefer * the pgd. */ per_cpu(current_pgd, smp_processor_id()) = next->pgd; /* switch context in the MMU */ D(printk(KERN_DEBUG "switching mmu_context to %d (%p)\n", next->context, next)); *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT, page_id, next->context.page_id); } }
gpl-2.0
ccompiler4pic32/pic32-gcc
gcc/testsuite/gcc.dg/cpp/arith-3.c
185
12489
/* Preprocessor arithmetic semantic tests. */ /* Copyright (C) 2002 Free Software Foundation, Inc. */ /* Source: Neil Booth, 26 May 2002. */ /* The file tests overflow warnings for, and values of, preprocessor arithmetic that are dependent on target precision. Please keep changes to arith-2.c and arith-3.c in sync. */ /* { dg-do preprocess } */ /* { dg-options "-std=c99 -fshow-column" } */ #include <limits.h> #define APPEND2(NUM, SUFF) NUM ## SUFF #define APPEND(NUM, SUFF) APPEND2(NUM, SUFF) #define TARGET_UTYPE_MAX ULLONG_MAX /* The tests in this file depend only on the macros defined in this #if block. Note that it is no good calculating these values, as the intent is to test both the preprocessor's number parser and arithmetic. */ #if TARGET_UTYPE_MAX == 65535ULL # define TARG_PRECISION 16 # define MAX_INT 32767 # define MAX_UINT 65535 # define TARG_MAX_HEX 0x7fff # define TARG_MAX_OCT 077777 # define TARG_MAX_PLUS_1 32768L # define TARG_MAX_PLUS_1_U 32768UL # define TARG_MAX_PLUS_1_HEX 0x8000 # define TARG_MAX_PLUS_1_OCT 0100000 # define UTARG_MAX_HEX 0xffff # define UTARG_MAX_OCT 0177777 # define UTARG_MAX_PLUS_1 65536L # define UTARG_MAX_PLUS_1_HEX 0x10000 # define UTARG_MAX_PLUS_1_OCT 0200000 # define TARG_LOWPART_PLUS_1 256L # define TARG_LOWPART_PLUS_1_U 256UL /* Division and modulo; anything that uses the high half in both dividend and divisor. */ # define LONG_UDIVISION 61234UL / 260L # define LONG_UDIVISION_ANSWER 235 # define LONG_SDIVISION -15000L / 299L # define LONG_SDIVISION_ANSWER -50 # define LONG_UMODULO 61234UL % 260L # define LONG_UMODULO_ANSWER 134 # define LONG_SMODULO -15000L % 299L # define LONG_SMODULO_ANSWER -50 #elif TARGET_UTYPE_MAX == 4294967295ULL # define TARG_PRECISION 32 # define MAX_INT 2147483647 # define MAX_UINT 4294967295 # define TARG_MAX_HEX 0x7fffffff # define TARG_MAX_OCT 017777777777 # define TARG_MAX_PLUS_1 2147483648L # define TARG_MAX_PLUS_1_U 2147483648UL # define TARG_MAX_PLUS_1_HEX 0x80000000 # define TARG_MAX_PLUS_1_OCT 020000000000 # define UTARG_MAX_HEX 0xffffffff # define UTARG_MAX_OCT 037777777777 # define UTARG_MAX_PLUS_1 4294967296L # define UTARG_MAX_PLUS_1_HEX 0x100000000 # define UTARG_MAX_PLUS_1_OCT 040000000000 # define TARG_LOWPART_PLUS_1 65536 # define TARG_LOWPART_PLUS_1_U 65536UL /* Division and modulo; anything that uses the high half in both dividend and divisor. */ # define LONG_UDIVISION 268335456UL / 70000L # define LONG_UDIVISION_ANSWER 3833 # define LONG_SDIVISION -368335456L / 123456L # define LONG_SDIVISION_ANSWER -2983 # define LONG_UMODULO 268335456UL % 70000L # define LONG_UMODULO_ANSWER 25456 # define LONG_SMODULO -368335456L % 123456L # define LONG_SMODULO_ANSWER -66208 #elif TARGET_UTYPE_MAX == 18446744073709551615ULL # define TARG_PRECISION 64 # define MAX_INT 9223372036854775807 # define MAX_UINT 18446744073709551615 # define TARG_MAX_HEX 0x7fffffffffffffff # define TARG_MAX_OCT 0777777777777777777777 # define TARG_MAX_PLUS_1 9223372036854775808L # define TARG_MAX_PLUS_1_U 9223372036854775808UL # define TARG_MAX_PLUS_1_HEX 0x8000000000000000 # define TARG_MAX_PLUS_1_OCT 01000000000000000000000 # define UTARG_MAX_HEX 0xffffffffffffffff # define UTARG_MAX_OCT 01777777777777777777777 # define UTARG_MAX_PLUS_1 18446744073709551616L # define UTARG_MAX_PLUS_1_HEX 0x10000000000000000 # define UTARG_MAX_PLUS_1_OCT 02000000000000000000000 # define TARG_LOWPART_PLUS_1 4294967296 # define TARG_LOWPART_PLUS_1_U 4294967296U /* Division and modulo; anything that uses the high half in both dividend and divisor. */ # define LONG_UDIVISION 235184372088832UL / 17279869184L # define LONG_UDIVISION_ANSWER 13610 # define LONG_SDIVISION -234582345927345L / 12345678901L # define LONG_SDIVISION_ANSWER -19001 # define LONG_UMODULO 235184372088832UL % 17279869184L # define LONG_UMODULO_ANSWER 5352494592L # define LONG_SMODULO -234582345927345L % 12345678901L # define LONG_SMODULO_ANSWER -2101129444L #else # error Please extend the macros here so that this file tests your target #endif /* Create more macros based on the above. */ #define TARG_PART_BITS (TARG_PRECISION / 2) #define TARG_MIN (-TARG_MAX - 1) #define TARG_MAX APPEND (MAX_INT, L) #define TARG_MAX_U APPEND (MAX_INT, UL) #define UTARG_MAX APPEND (MAX_UINT, L) #define UTARG_MAX_U APPEND (MAX_UINT, UL) /* And now the tests. */ #if TARG_MAX /* { dg-bogus "so large" } */ #endif #if TARG_MAX_PLUS_1_HEX /* { dg-bogus "so large" } */ #endif #if TARG_MAX_PLUS_1_OCT /* { dg-bogus "so large" } */ #endif #if UTARG_MAX /* { dg-warning "so large" } */ #endif #if UTARG_MAX_PLUS_1 /* { dg-warning "too large" } */ #endif #if UTARG_MAX_PLUS_1_HEX /* { dg-warning "too large" } */ #endif #if UTARG_MAX_HEX /* { dg-bogus "too large" } */ #endif #if UTARG_MAX_PLUS_1_OCT /* { dg-warning "too large" } */ #endif #if UTARG_MAX_OCT /* { dg-bogus "too large" } */ #endif #if TARG_MAX < 0 || TARG_MAX_PLUS_1 < 0 /* { dg-warning "so large" } */ # error /* { dg-bogus "error" } */ #endif #if UTARG_MAX_HEX < 0 || TARG_MAX_HEX < 0 # error /* { dg-bogus "error" } */ #endif #if UTARG_MAX_OCT < 0 || TARG_MAX_OCT < 0 # error /* { dg-bogus "error" } */ #endif #if -1 != UTARG_MAX_U # error /* { dg-bogus "error" } */ #endif /* Test each operator correctly warns of overflow conditions, and gives the right answer. */ /* Binary +. */ #if TARG_MAX + 1 != TARG_MIN /* { dg-warning "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if -TARG_MAX + -2 != TARG_MAX /* { dg-warning "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if -TARG_MAX + -1 != TARG_MIN /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if TARG_MAX_U + 1 != TARG_MIN /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if -TARG_MAX_U + -2 != TARG_MAX /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif /* Binary -. */ #if TARG_MAX - -1 != TARG_MIN /* { dg-warning "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if -TARG_MAX - 2 != TARG_MAX /* { dg-warning "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if -TARG_MAX - 1 != TARG_MIN /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if TARG_MAX_U - -1 != TARG_MIN /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if -TARG_MAX_U - 2 != TARG_MAX /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif /* Binary *. */ #if TARG_LOWPART_PLUS_1 * (TARG_LOWPART_PLUS_1 >> 1) != TARG_MIN /* { dg-warning "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if (TARG_LOWPART_PLUS_1 >> 1) * TARG_LOWPART_PLUS_1 != TARG_MIN /* { dg-warning "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if (TARG_LOWPART_PLUS_1 << 1) * (TARG_LOWPART_PLUS_1 + 1) != (TARG_LOWPART_PLUS_1 << 1) /* { dg-warning "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if TARG_MAX * 1 != TARG_MAX /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if (TARG_MAX >> 1) * 2 != TARG_MAX - 1 /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if (TARG_LOWPART_PLUS_1_U + 61) * (TARG_LOWPART_PLUS_1 << 1) != 61 * (TARG_LOWPART_PLUS_1 << 1) /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if (TARG_LOWPART_PLUS_1 >> 1) * TARG_LOWPART_PLUS_1_U != TARG_MIN /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if 1 * TARG_MIN != TARG_MIN /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif /* Binary /. */ #if TARG_MIN / -1 != TARG_MIN /* { dg-warning "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if TARG_MIN / 1 != TARG_MIN /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if -TARG_MAX_PLUS_1_U / -1 != 0 /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if -5 / (2 - 2) /* { dg-error "13:division by zero" } */ #endif #if LONG_UDIVISION != LONG_UDIVISION_ANSWER # error /* { dg-bogus "error" } */ #endif #if LONG_SDIVISION != LONG_SDIVISION_ANSWER # error /* { dg-bogus "error" } */ #endif /* Binary %. Cannot overflow. */ #if -5 % (2 - 2) /* { dg-error "13:division by zero" } */ #endif #if TARG_MIN % 1 /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if LONG_UMODULO != LONG_UMODULO_ANSWER # error /* { dg-bogus "error" } */ #endif #if LONG_SMODULO != LONG_SMODULO_ANSWER # error /* { dg-bogus "error" } */ #endif #if 234 % -1U != 234 # error /* { dg-bogus "error" } */ #endif #if TARG_MIN % -1U != TARG_MIN # error /* { dg-bogus "error" } */ #endif /* Binary << and Binary >>, the latter cannot overflow. */ #if -1 >> 3 != -1 /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if TARG_MAX >> 3 != TARG_MAX / 8 /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if 0 << 256 != 0 /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if 1 << 256 != 0 /* { dg-warning "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if 1U << 256 != 0 /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if TARG_MAX << 1 != -2 /* { dg-warning "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if TARG_MAX_U << 1 != -2 /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if TARG_LOWPART_PLUS_1 << TARG_PART_BITS != 0 /* { dg-warning "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if TARG_LOWPART_PLUS_1 << (TARG_PART_BITS - 1) != TARG_MIN /* { dg-warning "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if TARG_LOWPART_PLUS_1_U << (TARG_PART_BITS - 1) != TARG_MIN /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if TARG_LOWPART_PLUS_1 << (TARG_PART_BITS - 2) != (TARG_MAX_PLUS_1_U >> 1) /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif /* Test how the sign bit is handled. */ #if (TARG_MIN << 1) != 0 /* { dg-warning "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if (TARG_MAX_PLUS_1_U << 1) != 0 /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if (TARG_MIN >> 1) != 3U << (TARG_PRECISION - 2) /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if (TARG_MAX_PLUS_1_U >> 1) != 1 << (TARG_PRECISION - 2) /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif /* Unary -. It can overflow in just one case. */ #if -TARG_MIN != TARG_MIN /* { dg-warning "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if - -TARG_MAX != TARG_MAX /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif /* Unary +, ~, and !. They cannot overflow. */ #if +TARG_MAX != TARG_MAX /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if !TARG_MAX + !TARG_MIN != 0 /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if ~TARG_MAX , ~TARG_MIN != TARG_MAX /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif /* Bitwise &, ^, |. They cannot overflow. */ #if (TARG_MAX & -1), (TARG_MIN & -1) != TARG_MIN /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if TARG_MAX | -1, (TARG_MIN | -1) != -1 /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if TARG_MAX ^ -1, (TARG_MIN ^ -1) != TARG_MAX /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif /* Comparison operators. They cannot overflow. */ #if -1 <= TARG_MAX, (TARG_MIN <= 1) != 1 /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if -1 >= TARG_MAX, (TARG_MIN >= 1) != 0 /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if -1 < TARG_MAX, (TARG_MIN < 1) != 1 /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if -1 > TARG_MAX, (TARG_MIN > 1) != 0 /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif /* Comma and ? : operators. They cannot overflow. */ #if -1, TARG_MAX, TARG_MIN != TARG_MIN /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif #if -1 ? TARG_MAX: TARG_MAX, 0 ? 1: TARG_MIN != TARG_MIN /* { dg-bogus "overflow" } */ # error /* { dg-bogus "error" } */ #endif
gpl-2.0
fanfank/maybeatunnel
third/openssl/crypto/des/xcbc_enc.c
185
8516
/* crypto/des/xcbc_enc.c */ /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) * All rights reserved. * * This package is an SSL implementation written * by Eric Young (eay@cryptsoft.com). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson (tjh@cryptsoft.com). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young (eay@cryptsoft.com)" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ #include "des_locl.h" /* RSA's DESX */ #if 0 /* broken code, preserved just in case anyone * specifically looks for this */ static const unsigned char desx_white_in2out[256] = { 0xBD, 0x56, 0xEA, 0xF2, 0xA2, 0xF1, 0xAC, 0x2A, 0xB0, 0x93, 0xD1, 0x9C, 0x1B, 0x33, 0xFD, 0xD0, 0x30, 0x04, 0xB6, 0xDC, 0x7D, 0xDF, 0x32, 0x4B, 0xF7, 0xCB, 0x45, 0x9B, 0x31, 0xBB, 0x21, 0x5A, 0x41, 0x9F, 0xE1, 0xD9, 0x4A, 0x4D, 0x9E, 0xDA, 0xA0, 0x68, 0x2C, 0xC3, 0x27, 0x5F, 0x80, 0x36, 0x3E, 0xEE, 0xFB, 0x95, 0x1A, 0xFE, 0xCE, 0xA8, 0x34, 0xA9, 0x13, 0xF0, 0xA6, 0x3F, 0xD8, 0x0C, 0x78, 0x24, 0xAF, 0x23, 0x52, 0xC1, 0x67, 0x17, 0xF5, 0x66, 0x90, 0xE7, 0xE8, 0x07, 0xB8, 0x60, 0x48, 0xE6, 0x1E, 0x53, 0xF3, 0x92, 0xA4, 0x72, 0x8C, 0x08, 0x15, 0x6E, 0x86, 0x00, 0x84, 0xFA, 0xF4, 0x7F, 0x8A, 0x42, 0x19, 0xF6, 0xDB, 0xCD, 0x14, 0x8D, 0x50, 0x12, 0xBA, 0x3C, 0x06, 0x4E, 0xEC, 0xB3, 0x35, 0x11, 0xA1, 0x88, 0x8E, 0x2B, 0x94, 0x99, 0xB7, 0x71, 0x74, 0xD3, 0xE4, 0xBF, 0x3A, 0xDE, 0x96, 0x0E, 0xBC, 0x0A, 0xED, 0x77, 0xFC, 0x37, 0x6B, 0x03, 0x79, 0x89, 0x62, 0xC6, 0xD7, 0xC0, 0xD2, 0x7C, 0x6A, 0x8B, 0x22, 0xA3, 0x5B, 0x05, 0x5D, 0x02, 0x75, 0xD5, 0x61, 0xE3, 0x18, 0x8F, 0x55, 0x51, 0xAD, 0x1F, 0x0B, 0x5E, 0x85, 0xE5, 0xC2, 0x57, 0x63, 0xCA, 0x3D, 0x6C, 0xB4, 0xC5, 0xCC, 0x70, 0xB2, 0x91, 0x59, 0x0D, 0x47, 0x20, 0xC8, 0x4F, 0x58, 0xE0, 0x01, 0xE2, 0x16, 0x38, 0xC4, 0x6F, 0x3B, 0x0F, 0x65, 0x46, 0xBE, 0x7E, 0x2D, 0x7B, 0x82, 0xF9, 0x40, 0xB5, 0x1D, 0x73, 0xF8, 0xEB, 0x26, 0xC7, 0x87, 0x97, 0x25, 0x54, 0xB1, 0x28, 0xAA, 0x98, 0x9D, 0xA5, 0x64, 0x6D, 0x7A, 0xD4, 0x10, 0x81, 0x44, 0xEF, 0x49, 0xD6, 0xAE, 0x2E, 0xDD, 0x76, 0x5C, 0x2F, 0xA7, 0x1C, 0xC9, 0x09, 0x69, 0x9A, 0x83, 0xCF, 0x29, 0x39, 0xB9, 0xE9, 0x4C, 0xFF, 0x43, 0xAB, }; void DES_xwhite_in2out(const_DES_cblock *des_key, const_DES_cblock *in_white, DES_cblock *out_white) { int out0, out1; int i; const unsigned char *key = &(*des_key)[0]; const unsigned char *in = &(*in_white)[0]; unsigned char *out = &(*out_white)[0]; out[0] = out[1] = out[2] = out[3] = out[4] = out[5] = out[6] = out[7] = 0; out0 = out1 = 0; for (i = 0; i < 8; i++) { out[i] = key[i] ^ desx_white_in2out[out0 ^ out1]; out0 = out1; out1 = (int)out[i & 0x07]; } out0 = out[0]; out1 = out[i]; /* BUG: out-of-bounds read */ for (i = 0; i < 8; i++) { out[i] = in[i] ^ desx_white_in2out[out0 ^ out1]; out0 = out1; out1 = (int)out[i & 0x07]; } } #endif void DES_xcbc_encrypt(const unsigned char *in, unsigned char *out, long length, DES_key_schedule *schedule, DES_cblock *ivec, const_DES_cblock *inw, const_DES_cblock *outw, int enc) { register DES_LONG tin0, tin1; register DES_LONG tout0, tout1, xor0, xor1; register DES_LONG inW0, inW1, outW0, outW1; register const unsigned char *in2; register long l = length; DES_LONG tin[2]; unsigned char *iv; in2 = &(*inw)[0]; c2l(in2, inW0); c2l(in2, inW1); in2 = &(*outw)[0]; c2l(in2, outW0); c2l(in2, outW1); iv = &(*ivec)[0]; if (enc) { c2l(iv, tout0); c2l(iv, tout1); for (l -= 8; l >= 0; l -= 8) { c2l(in, tin0); c2l(in, tin1); tin0 ^= tout0 ^ inW0; tin[0] = tin0; tin1 ^= tout1 ^ inW1; tin[1] = tin1; DES_encrypt1(tin, schedule, DES_ENCRYPT); tout0 = tin[0] ^ outW0; l2c(tout0, out); tout1 = tin[1] ^ outW1; l2c(tout1, out); } if (l != -8) { c2ln(in, tin0, tin1, l + 8); tin0 ^= tout0 ^ inW0; tin[0] = tin0; tin1 ^= tout1 ^ inW1; tin[1] = tin1; DES_encrypt1(tin, schedule, DES_ENCRYPT); tout0 = tin[0] ^ outW0; l2c(tout0, out); tout1 = tin[1] ^ outW1; l2c(tout1, out); } iv = &(*ivec)[0]; l2c(tout0, iv); l2c(tout1, iv); } else { c2l(iv, xor0); c2l(iv, xor1); for (l -= 8; l > 0; l -= 8) { c2l(in, tin0); tin[0] = tin0 ^ outW0; c2l(in, tin1); tin[1] = tin1 ^ outW1; DES_encrypt1(tin, schedule, DES_DECRYPT); tout0 = tin[0] ^ xor0 ^ inW0; tout1 = tin[1] ^ xor1 ^ inW1; l2c(tout0, out); l2c(tout1, out); xor0 = tin0; xor1 = tin1; } if (l != -8) { c2l(in, tin0); tin[0] = tin0 ^ outW0; c2l(in, tin1); tin[1] = tin1 ^ outW1; DES_encrypt1(tin, schedule, DES_DECRYPT); tout0 = tin[0] ^ xor0 ^ inW0; tout1 = tin[1] ^ xor1 ^ inW1; l2cn(tout0, tout1, out, l + 8); xor0 = tin0; xor1 = tin1; } iv = &(*ivec)[0]; l2c(xor0, iv); l2c(xor1, iv); } tin0 = tin1 = tout0 = tout1 = xor0 = xor1 = 0; inW0 = inW1 = outW0 = outW1 = 0; tin[0] = tin[1] = 0; }
gpl-2.0
wan-qy/linux
net/core/net-sysfs.c
185
37235
/* * net-sysfs.c - network device class and attributes * * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/capability.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <net/switchdev.h> #include <linux/if_arp.h> #include <linux/slab.h> #include <linux/nsproxy.h> #include <net/sock.h> #include <net/net_namespace.h> #include <linux/rtnetlink.h> #include <linux/vmalloc.h> #include <linux/export.h> #include <linux/jiffies.h> #include <linux/pm_runtime.h> #include <linux/of.h> #include "net-sysfs.h" #ifdef CONFIG_SYSFS static const char fmt_hex[] = "%#x\n"; static const char fmt_long_hex[] = "%#lx\n"; static const char fmt_dec[] = "%d\n"; static const char fmt_udec[] = "%u\n"; static const char fmt_ulong[] = "%lu\n"; static const char fmt_u64[] = "%llu\n"; static inline int dev_isalive(const struct net_device *dev) { return dev->reg_state <= NETREG_REGISTERED; } /* use same locking rules as GIF* ioctl's */ static ssize_t netdev_show(const struct device *dev, struct device_attribute *attr, char *buf, ssize_t (*format)(const struct net_device *, char *)) { struct net_device *ndev = to_net_dev(dev); ssize_t ret = -EINVAL; read_lock(&dev_base_lock); if (dev_isalive(ndev)) ret = (*format)(ndev, buf); read_unlock(&dev_base_lock); return ret; } /* generate a show function for simple field */ #define NETDEVICE_SHOW(field, format_string) \ static ssize_t format_##field(const struct net_device *dev, char *buf) \ { \ return sprintf(buf, format_string, dev->field); \ } \ static ssize_t field##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ return netdev_show(dev, attr, buf, format_##field); \ } \ #define NETDEVICE_SHOW_RO(field, format_string) \ NETDEVICE_SHOW(field, format_string); \ static DEVICE_ATTR_RO(field) #define NETDEVICE_SHOW_RW(field, format_string) \ NETDEVICE_SHOW(field, format_string); \ static DEVICE_ATTR_RW(field) /* use same locking and permission rules as SIF* ioctl's */ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len, int (*set)(struct net_device *, unsigned long)) { struct net_device *netdev = to_net_dev(dev); struct net *net = dev_net(netdev); unsigned long new; int ret = -EINVAL; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; ret = kstrtoul(buf, 0, &new); if (ret) goto err; if (!rtnl_trylock()) return restart_syscall(); if (dev_isalive(netdev)) { if ((ret = (*set)(netdev, new)) == 0) ret = len; } rtnl_unlock(); err: return ret; } NETDEVICE_SHOW_RO(dev_id, fmt_hex); NETDEVICE_SHOW_RO(dev_port, fmt_dec); NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec); NETDEVICE_SHOW_RO(addr_len, fmt_dec); NETDEVICE_SHOW_RO(ifindex, fmt_dec); NETDEVICE_SHOW_RO(type, fmt_dec); NETDEVICE_SHOW_RO(link_mode, fmt_dec); static ssize_t iflink_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = to_net_dev(dev); return sprintf(buf, fmt_dec, dev_get_iflink(ndev)); } static DEVICE_ATTR_RO(iflink); static ssize_t format_name_assign_type(const struct net_device *dev, char *buf) { return sprintf(buf, fmt_dec, dev->name_assign_type); } static ssize_t name_assign_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = to_net_dev(dev); ssize_t ret = -EINVAL; if (ndev->name_assign_type != NET_NAME_UNKNOWN) ret = netdev_show(dev, attr, buf, format_name_assign_type); return ret; } static DEVICE_ATTR_RO(name_assign_type); /* use same locking rules as GIFHWADDR ioctl's */ static ssize_t address_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = to_net_dev(dev); ssize_t ret = -EINVAL; read_lock(&dev_base_lock); if (dev_isalive(ndev)) ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len); read_unlock(&dev_base_lock); return ret; } static DEVICE_ATTR_RO(address); static ssize_t broadcast_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = to_net_dev(dev); if (dev_isalive(ndev)) return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len); return -EINVAL; } static DEVICE_ATTR_RO(broadcast); static int change_carrier(struct net_device *dev, unsigned long new_carrier) { if (!netif_running(dev)) return -EINVAL; return dev_change_carrier(dev, (bool) new_carrier); } static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { return netdev_store(dev, attr, buf, len, change_carrier); } static ssize_t carrier_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); if (netif_running(netdev)) { return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev)); } return -EINVAL; } static DEVICE_ATTR_RW(carrier); static ssize_t speed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); int ret = -EINVAL; if (!rtnl_trylock()) return restart_syscall(); if (netif_running(netdev)) { struct ethtool_cmd cmd; if (!__ethtool_get_settings(netdev, &cmd)) ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd)); } rtnl_unlock(); return ret; } static DEVICE_ATTR_RO(speed); static ssize_t duplex_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); int ret = -EINVAL; if (!rtnl_trylock()) return restart_syscall(); if (netif_running(netdev)) { struct ethtool_cmd cmd; if (!__ethtool_get_settings(netdev, &cmd)) { const char *duplex; switch (cmd.duplex) { case DUPLEX_HALF: duplex = "half"; break; case DUPLEX_FULL: duplex = "full"; break; default: duplex = "unknown"; break; } ret = sprintf(buf, "%s\n", duplex); } } rtnl_unlock(); return ret; } static DEVICE_ATTR_RO(duplex); static ssize_t dormant_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); if (netif_running(netdev)) return sprintf(buf, fmt_dec, !!netif_dormant(netdev)); return -EINVAL; } static DEVICE_ATTR_RO(dormant); static const char *const operstates[] = { "unknown", "notpresent", /* currently unused */ "down", "lowerlayerdown", "testing", /* currently unused */ "dormant", "up" }; static ssize_t operstate_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct net_device *netdev = to_net_dev(dev); unsigned char operstate; read_lock(&dev_base_lock); operstate = netdev->operstate; if (!netif_running(netdev)) operstate = IF_OPER_DOWN; read_unlock(&dev_base_lock); if (operstate >= ARRAY_SIZE(operstates)) return -EINVAL; /* should not happen */ return sprintf(buf, "%s\n", operstates[operstate]); } static DEVICE_ATTR_RO(operstate); static ssize_t carrier_changes_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_changes)); } static DEVICE_ATTR_RO(carrier_changes); /* read-write attributes */ static int change_mtu(struct net_device *dev, unsigned long new_mtu) { return dev_set_mtu(dev, (int) new_mtu); } static ssize_t mtu_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { return netdev_store(dev, attr, buf, len, change_mtu); } NETDEVICE_SHOW_RW(mtu, fmt_dec); static int change_flags(struct net_device *dev, unsigned long new_flags) { return dev_change_flags(dev, (unsigned int) new_flags); } static ssize_t flags_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { return netdev_store(dev, attr, buf, len, change_flags); } NETDEVICE_SHOW_RW(flags, fmt_hex); static int change_tx_queue_len(struct net_device *dev, unsigned long new_len) { dev->tx_queue_len = new_len; return 0; } static ssize_t tx_queue_len_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { if (!capable(CAP_NET_ADMIN)) return -EPERM; return netdev_store(dev, attr, buf, len, change_tx_queue_len); } NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong); static int change_gro_flush_timeout(struct net_device *dev, unsigned long val) { dev->gro_flush_timeout = val; return 0; } static ssize_t gro_flush_timeout_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { if (!capable(CAP_NET_ADMIN)) return -EPERM; return netdev_store(dev, attr, buf, len, change_gro_flush_timeout); } NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong); static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct net *net = dev_net(netdev); size_t count = len; ssize_t ret; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; /* ignore trailing newline */ if (len > 0 && buf[len - 1] == '\n') --count; if (!rtnl_trylock()) return restart_syscall(); ret = dev_set_alias(netdev, buf, count); rtnl_unlock(); return ret < 0 ? ret : len; } static ssize_t ifalias_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct net_device *netdev = to_net_dev(dev); ssize_t ret = 0; if (!rtnl_trylock()) return restart_syscall(); if (netdev->ifalias) ret = sprintf(buf, "%s\n", netdev->ifalias); rtnl_unlock(); return ret; } static DEVICE_ATTR_RW(ifalias); static int change_group(struct net_device *dev, unsigned long new_group) { dev_set_group(dev, (int) new_group); return 0; } static ssize_t group_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { return netdev_store(dev, attr, buf, len, change_group); } NETDEVICE_SHOW(group, fmt_dec); static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store); static ssize_t phys_port_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL; if (!rtnl_trylock()) return restart_syscall(); if (dev_isalive(netdev)) { struct netdev_phys_item_id ppid; ret = dev_get_phys_port_id(netdev, &ppid); if (!ret) ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id); } rtnl_unlock(); return ret; } static DEVICE_ATTR_RO(phys_port_id); static ssize_t phys_port_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL; if (!rtnl_trylock()) return restart_syscall(); if (dev_isalive(netdev)) { char name[IFNAMSIZ]; ret = dev_get_phys_port_name(netdev, name, sizeof(name)); if (!ret) ret = sprintf(buf, "%s\n", name); } rtnl_unlock(); return ret; } static DEVICE_ATTR_RO(phys_port_name); static ssize_t phys_switch_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL; if (!rtnl_trylock()) return restart_syscall(); if (dev_isalive(netdev)) { struct switchdev_attr attr = { .id = SWITCHDEV_ATTR_PORT_PARENT_ID, .flags = SWITCHDEV_F_NO_RECURSE, }; ret = switchdev_port_attr_get(netdev, &attr); if (!ret) ret = sprintf(buf, "%*phN\n", attr.u.ppid.id_len, attr.u.ppid.id); } rtnl_unlock(); return ret; } static DEVICE_ATTR_RO(phys_switch_id); static struct attribute *net_class_attrs[] = { &dev_attr_netdev_group.attr, &dev_attr_type.attr, &dev_attr_dev_id.attr, &dev_attr_dev_port.attr, &dev_attr_iflink.attr, &dev_attr_ifindex.attr, &dev_attr_name_assign_type.attr, &dev_attr_addr_assign_type.attr, &dev_attr_addr_len.attr, &dev_attr_link_mode.attr, &dev_attr_address.attr, &dev_attr_broadcast.attr, &dev_attr_speed.attr, &dev_attr_duplex.attr, &dev_attr_dormant.attr, &dev_attr_operstate.attr, &dev_attr_carrier_changes.attr, &dev_attr_ifalias.attr, &dev_attr_carrier.attr, &dev_attr_mtu.attr, &dev_attr_flags.attr, &dev_attr_tx_queue_len.attr, &dev_attr_gro_flush_timeout.attr, &dev_attr_phys_port_id.attr, &dev_attr_phys_port_name.attr, &dev_attr_phys_switch_id.attr, NULL, }; ATTRIBUTE_GROUPS(net_class); /* Show a given an attribute in the statistics group */ static ssize_t netstat_show(const struct device *d, struct device_attribute *attr, char *buf, unsigned long offset) { struct net_device *dev = to_net_dev(d); ssize_t ret = -EINVAL; WARN_ON(offset > sizeof(struct rtnl_link_stats64) || offset % sizeof(u64) != 0); read_lock(&dev_base_lock); if (dev_isalive(dev)) { struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset)); } read_unlock(&dev_base_lock); return ret; } /* generate a read-only statistics attribute */ #define NETSTAT_ENTRY(name) \ static ssize_t name##_show(struct device *d, \ struct device_attribute *attr, char *buf) \ { \ return netstat_show(d, attr, buf, \ offsetof(struct rtnl_link_stats64, name)); \ } \ static DEVICE_ATTR_RO(name) NETSTAT_ENTRY(rx_packets); NETSTAT_ENTRY(tx_packets); NETSTAT_ENTRY(rx_bytes); NETSTAT_ENTRY(tx_bytes); NETSTAT_ENTRY(rx_errors); NETSTAT_ENTRY(tx_errors); NETSTAT_ENTRY(rx_dropped); NETSTAT_ENTRY(tx_dropped); NETSTAT_ENTRY(multicast); NETSTAT_ENTRY(collisions); NETSTAT_ENTRY(rx_length_errors); NETSTAT_ENTRY(rx_over_errors); NETSTAT_ENTRY(rx_crc_errors); NETSTAT_ENTRY(rx_frame_errors); NETSTAT_ENTRY(rx_fifo_errors); NETSTAT_ENTRY(rx_missed_errors); NETSTAT_ENTRY(tx_aborted_errors); NETSTAT_ENTRY(tx_carrier_errors); NETSTAT_ENTRY(tx_fifo_errors); NETSTAT_ENTRY(tx_heartbeat_errors); NETSTAT_ENTRY(tx_window_errors); NETSTAT_ENTRY(rx_compressed); NETSTAT_ENTRY(tx_compressed); static struct attribute *netstat_attrs[] = { &dev_attr_rx_packets.attr, &dev_attr_tx_packets.attr, &dev_attr_rx_bytes.attr, &dev_attr_tx_bytes.attr, &dev_attr_rx_errors.attr, &dev_attr_tx_errors.attr, &dev_attr_rx_dropped.attr, &dev_attr_tx_dropped.attr, &dev_attr_multicast.attr, &dev_attr_collisions.attr, &dev_attr_rx_length_errors.attr, &dev_attr_rx_over_errors.attr, &dev_attr_rx_crc_errors.attr, &dev_attr_rx_frame_errors.attr, &dev_attr_rx_fifo_errors.attr, &dev_attr_rx_missed_errors.attr, &dev_attr_tx_aborted_errors.attr, &dev_attr_tx_carrier_errors.attr, &dev_attr_tx_fifo_errors.attr, &dev_attr_tx_heartbeat_errors.attr, &dev_attr_tx_window_errors.attr, &dev_attr_rx_compressed.attr, &dev_attr_tx_compressed.attr, NULL }; static struct attribute_group netstat_group = { .name = "statistics", .attrs = netstat_attrs, }; #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211) static struct attribute *wireless_attrs[] = { NULL }; static struct attribute_group wireless_group = { .name = "wireless", .attrs = wireless_attrs, }; #endif #else /* CONFIG_SYSFS */ #define net_class_groups NULL #endif /* CONFIG_SYSFS */ #ifdef CONFIG_SYSFS #define to_rx_queue_attr(_attr) container_of(_attr, \ struct rx_queue_attribute, attr) #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj) static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); struct netdev_rx_queue *queue = to_rx_queue(kobj); if (!attribute->show) return -EIO; return attribute->show(queue, attribute, buf); } static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); struct netdev_rx_queue *queue = to_rx_queue(kobj); if (!attribute->store) return -EIO; return attribute->store(queue, attribute, buf, count); } static const struct sysfs_ops rx_queue_sysfs_ops = { .show = rx_queue_attr_show, .store = rx_queue_attr_store, }; #ifdef CONFIG_RPS static ssize_t show_rps_map(struct netdev_rx_queue *queue, struct rx_queue_attribute *attribute, char *buf) { struct rps_map *map; cpumask_var_t mask; int i, len; if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; rcu_read_lock(); map = rcu_dereference(queue->rps_map); if (map) for (i = 0; i < map->len; i++) cpumask_set_cpu(map->cpus[i], mask); len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask)); rcu_read_unlock(); free_cpumask_var(mask); return len < PAGE_SIZE ? len : -EINVAL; } static ssize_t store_rps_map(struct netdev_rx_queue *queue, struct rx_queue_attribute *attribute, const char *buf, size_t len) { struct rps_map *old_map, *map; cpumask_var_t mask; int err, cpu, i; static DEFINE_SPINLOCK(rps_map_lock); if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); if (err) { free_cpumask_var(mask); return err; } map = kzalloc(max_t(unsigned int, RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), GFP_KERNEL); if (!map) { free_cpumask_var(mask); return -ENOMEM; } i = 0; for_each_cpu_and(cpu, mask, cpu_online_mask) map->cpus[i++] = cpu; if (i) map->len = i; else { kfree(map); map = NULL; } spin_lock(&rps_map_lock); old_map = rcu_dereference_protected(queue->rps_map, lockdep_is_held(&rps_map_lock)); rcu_assign_pointer(queue->rps_map, map); spin_unlock(&rps_map_lock); if (map) static_key_slow_inc(&rps_needed); if (old_map) { kfree_rcu(old_map, rcu); static_key_slow_dec(&rps_needed); } free_cpumask_var(mask); return len; } static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, struct rx_queue_attribute *attr, char *buf) { struct rps_dev_flow_table *flow_table; unsigned long val = 0; rcu_read_lock(); flow_table = rcu_dereference(queue->rps_flow_table); if (flow_table) val = (unsigned long)flow_table->mask + 1; rcu_read_unlock(); return sprintf(buf, "%lu\n", val); } static void rps_dev_flow_table_release(struct rcu_head *rcu) { struct rps_dev_flow_table *table = container_of(rcu, struct rps_dev_flow_table, rcu); vfree(table); } static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, struct rx_queue_attribute *attr, const char *buf, size_t len) { unsigned long mask, count; struct rps_dev_flow_table *table, *old_table; static DEFINE_SPINLOCK(rps_dev_flow_lock); int rc; if (!capable(CAP_NET_ADMIN)) return -EPERM; rc = kstrtoul(buf, 0, &count); if (rc < 0) return rc; if (count) { mask = count - 1; /* mask = roundup_pow_of_two(count) - 1; * without overflows... */ while ((mask | (mask >> 1)) != mask) mask |= (mask >> 1); /* On 64 bit arches, must check mask fits in table->mask (u32), * and on 32bit arches, must check * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow. */ #if BITS_PER_LONG > 32 if (mask > (unsigned long)(u32)mask) return -EINVAL; #else if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1)) / sizeof(struct rps_dev_flow)) { /* Enforce a limit to prevent overflow */ return -EINVAL; } #endif table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1)); if (!table) return -ENOMEM; table->mask = mask; for (count = 0; count <= mask; count++) table->flows[count].cpu = RPS_NO_CPU; } else table = NULL; spin_lock(&rps_dev_flow_lock); old_table = rcu_dereference_protected(queue->rps_flow_table, lockdep_is_held(&rps_dev_flow_lock)); rcu_assign_pointer(queue->rps_flow_table, table); spin_unlock(&rps_dev_flow_lock); if (old_table) call_rcu(&old_table->rcu, rps_dev_flow_table_release); return len; } static struct rx_queue_attribute rps_cpus_attribute = __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map); static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute = __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR, show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt); #endif /* CONFIG_RPS */ static struct attribute *rx_queue_default_attrs[] = { #ifdef CONFIG_RPS &rps_cpus_attribute.attr, &rps_dev_flow_table_cnt_attribute.attr, #endif NULL }; static void rx_queue_release(struct kobject *kobj) { struct netdev_rx_queue *queue = to_rx_queue(kobj); #ifdef CONFIG_RPS struct rps_map *map; struct rps_dev_flow_table *flow_table; map = rcu_dereference_protected(queue->rps_map, 1); if (map) { RCU_INIT_POINTER(queue->rps_map, NULL); kfree_rcu(map, rcu); } flow_table = rcu_dereference_protected(queue->rps_flow_table, 1); if (flow_table) { RCU_INIT_POINTER(queue->rps_flow_table, NULL); call_rcu(&flow_table->rcu, rps_dev_flow_table_release); } #endif memset(kobj, 0, sizeof(*kobj)); dev_put(queue->dev); } static const void *rx_queue_namespace(struct kobject *kobj) { struct netdev_rx_queue *queue = to_rx_queue(kobj); struct device *dev = &queue->dev->dev; const void *ns = NULL; if (dev->class && dev->class->ns_type) ns = dev->class->namespace(dev); return ns; } static struct kobj_type rx_queue_ktype = { .sysfs_ops = &rx_queue_sysfs_ops, .release = rx_queue_release, .default_attrs = rx_queue_default_attrs, .namespace = rx_queue_namespace }; static int rx_queue_add_kobject(struct net_device *dev, int index) { struct netdev_rx_queue *queue = dev->_rx + index; struct kobject *kobj = &queue->kobj; int error = 0; kobj->kset = dev->queues_kset; error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, "rx-%u", index); if (error) goto exit; if (dev->sysfs_rx_queue_group) { error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); if (error) goto exit; } kobject_uevent(kobj, KOBJ_ADD); dev_hold(queue->dev); return error; exit: kobject_put(kobj); return error; } #endif /* CONFIG_SYSFS */ int net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) { #ifdef CONFIG_SYSFS int i; int error = 0; #ifndef CONFIG_RPS if (!dev->sysfs_rx_queue_group) return 0; #endif for (i = old_num; i < new_num; i++) { error = rx_queue_add_kobject(dev, i); if (error) { new_num = old_num; break; } } while (--i >= new_num) { if (dev->sysfs_rx_queue_group) sysfs_remove_group(&dev->_rx[i].kobj, dev->sysfs_rx_queue_group); kobject_put(&dev->_rx[i].kobj); } return error; #else return 0; #endif } #ifdef CONFIG_SYSFS /* * netdev_queue sysfs structures and functions. */ struct netdev_queue_attribute { struct attribute attr; ssize_t (*show)(struct netdev_queue *queue, struct netdev_queue_attribute *attr, char *buf); ssize_t (*store)(struct netdev_queue *queue, struct netdev_queue_attribute *attr, const char *buf, size_t len); }; #define to_netdev_queue_attr(_attr) container_of(_attr, \ struct netdev_queue_attribute, attr) #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj) static ssize_t netdev_queue_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr); struct netdev_queue *queue = to_netdev_queue(kobj); if (!attribute->show) return -EIO; return attribute->show(queue, attribute, buf); } static ssize_t netdev_queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr); struct netdev_queue *queue = to_netdev_queue(kobj); if (!attribute->store) return -EIO; return attribute->store(queue, attribute, buf, count); } static const struct sysfs_ops netdev_queue_sysfs_ops = { .show = netdev_queue_attr_show, .store = netdev_queue_attr_store, }; static ssize_t show_trans_timeout(struct netdev_queue *queue, struct netdev_queue_attribute *attribute, char *buf) { unsigned long trans_timeout; spin_lock_irq(&queue->_xmit_lock); trans_timeout = queue->trans_timeout; spin_unlock_irq(&queue->_xmit_lock); return sprintf(buf, "%lu", trans_timeout); } #ifdef CONFIG_XPS static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue) { struct net_device *dev = queue->dev; int i; for (i = 0; i < dev->num_tx_queues; i++) if (queue == &dev->_tx[i]) break; BUG_ON(i >= dev->num_tx_queues); return i; } static ssize_t show_tx_maxrate(struct netdev_queue *queue, struct netdev_queue_attribute *attribute, char *buf) { return sprintf(buf, "%lu\n", queue->tx_maxrate); } static ssize_t set_tx_maxrate(struct netdev_queue *queue, struct netdev_queue_attribute *attribute, const char *buf, size_t len) { struct net_device *dev = queue->dev; int err, index = get_netdev_queue_index(queue); u32 rate = 0; err = kstrtou32(buf, 10, &rate); if (err < 0) return err; if (!rtnl_trylock()) return restart_syscall(); err = -EOPNOTSUPP; if (dev->netdev_ops->ndo_set_tx_maxrate) err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate); rtnl_unlock(); if (!err) { queue->tx_maxrate = rate; return len; } return err; } static struct netdev_queue_attribute queue_tx_maxrate = __ATTR(tx_maxrate, S_IRUGO | S_IWUSR, show_tx_maxrate, set_tx_maxrate); #endif static struct netdev_queue_attribute queue_trans_timeout = __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL); #ifdef CONFIG_BQL /* * Byte queue limits sysfs structures and functions. */ static ssize_t bql_show(char *buf, unsigned int value) { return sprintf(buf, "%u\n", value); } static ssize_t bql_set(const char *buf, const size_t count, unsigned int *pvalue) { unsigned int value; int err; if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) value = DQL_MAX_LIMIT; else { err = kstrtouint(buf, 10, &value); if (err < 0) return err; if (value > DQL_MAX_LIMIT) return -EINVAL; } *pvalue = value; return count; } static ssize_t bql_show_hold_time(struct netdev_queue *queue, struct netdev_queue_attribute *attr, char *buf) { struct dql *dql = &queue->dql; return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time)); } static ssize_t bql_set_hold_time(struct netdev_queue *queue, struct netdev_queue_attribute *attribute, const char *buf, size_t len) { struct dql *dql = &queue->dql; unsigned int value; int err; err = kstrtouint(buf, 10, &value); if (err < 0) return err; dql->slack_hold_time = msecs_to_jiffies(value); return len; } static struct netdev_queue_attribute bql_hold_time_attribute = __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time, bql_set_hold_time); static ssize_t bql_show_inflight(struct netdev_queue *queue, struct netdev_queue_attribute *attr, char *buf) { struct dql *dql = &queue->dql; return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed); } static struct netdev_queue_attribute bql_inflight_attribute = __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL); #define BQL_ATTR(NAME, FIELD) \ static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \ struct netdev_queue_attribute *attr, \ char *buf) \ { \ return bql_show(buf, queue->dql.FIELD); \ } \ \ static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \ struct netdev_queue_attribute *attr, \ const char *buf, size_t len) \ { \ return bql_set(buf, len, &queue->dql.FIELD); \ } \ \ static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \ __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \ bql_set_ ## NAME); BQL_ATTR(limit, limit) BQL_ATTR(limit_max, max_limit) BQL_ATTR(limit_min, min_limit) static struct attribute *dql_attrs[] = { &bql_limit_attribute.attr, &bql_limit_max_attribute.attr, &bql_limit_min_attribute.attr, &bql_hold_time_attribute.attr, &bql_inflight_attribute.attr, NULL }; static struct attribute_group dql_group = { .name = "byte_queue_limits", .attrs = dql_attrs, }; #endif /* CONFIG_BQL */ #ifdef CONFIG_XPS static ssize_t show_xps_map(struct netdev_queue *queue, struct netdev_queue_attribute *attribute, char *buf) { struct net_device *dev = queue->dev; struct xps_dev_maps *dev_maps; cpumask_var_t mask; unsigned long index; int i, len; if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; index = get_netdev_queue_index(queue); rcu_read_lock(); dev_maps = rcu_dereference(dev->xps_maps); if (dev_maps) { for_each_possible_cpu(i) { struct xps_map *map = rcu_dereference(dev_maps->cpu_map[i]); if (map) { int j; for (j = 0; j < map->len; j++) { if (map->queues[j] == index) { cpumask_set_cpu(i, mask); break; } } } } } rcu_read_unlock(); len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask)); free_cpumask_var(mask); return len < PAGE_SIZE ? len : -EINVAL; } static ssize_t store_xps_map(struct netdev_queue *queue, struct netdev_queue_attribute *attribute, const char *buf, size_t len) { struct net_device *dev = queue->dev; unsigned long index; cpumask_var_t mask; int err; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; index = get_netdev_queue_index(queue); err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); if (err) { free_cpumask_var(mask); return err; } err = netif_set_xps_queue(dev, mask, index); free_cpumask_var(mask); return err ? : len; } static struct netdev_queue_attribute xps_cpus_attribute = __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map); #endif /* CONFIG_XPS */ static struct attribute *netdev_queue_default_attrs[] = { &queue_trans_timeout.attr, #ifdef CONFIG_XPS &xps_cpus_attribute.attr, &queue_tx_maxrate.attr, #endif NULL }; static void netdev_queue_release(struct kobject *kobj) { struct netdev_queue *queue = to_netdev_queue(kobj); memset(kobj, 0, sizeof(*kobj)); dev_put(queue->dev); } static const void *netdev_queue_namespace(struct kobject *kobj) { struct netdev_queue *queue = to_netdev_queue(kobj); struct device *dev = &queue->dev->dev; const void *ns = NULL; if (dev->class && dev->class->ns_type) ns = dev->class->namespace(dev); return ns; } static struct kobj_type netdev_queue_ktype = { .sysfs_ops = &netdev_queue_sysfs_ops, .release = netdev_queue_release, .default_attrs = netdev_queue_default_attrs, .namespace = netdev_queue_namespace, }; static int netdev_queue_add_kobject(struct net_device *dev, int index) { struct netdev_queue *queue = dev->_tx + index; struct kobject *kobj = &queue->kobj; int error = 0; kobj->kset = dev->queues_kset; error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, "tx-%u", index); if (error) goto exit; #ifdef CONFIG_BQL error = sysfs_create_group(kobj, &dql_group); if (error) goto exit; #endif kobject_uevent(kobj, KOBJ_ADD); dev_hold(queue->dev); return 0; exit: kobject_put(kobj); return error; } #endif /* CONFIG_SYSFS */ int netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) { #ifdef CONFIG_SYSFS int i; int error = 0; for (i = old_num; i < new_num; i++) { error = netdev_queue_add_kobject(dev, i); if (error) { new_num = old_num; break; } } while (--i >= new_num) { struct netdev_queue *queue = dev->_tx + i; #ifdef CONFIG_BQL sysfs_remove_group(&queue->kobj, &dql_group); #endif kobject_put(&queue->kobj); } return error; #else return 0; #endif /* CONFIG_SYSFS */ } static int register_queue_kobjects(struct net_device *dev) { int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0; #ifdef CONFIG_SYSFS dev->queues_kset = kset_create_and_add("queues", NULL, &dev->dev.kobj); if (!dev->queues_kset) return -ENOMEM; real_rx = dev->real_num_rx_queues; #endif real_tx = dev->real_num_tx_queues; error = net_rx_queue_update_kobjects(dev, 0, real_rx); if (error) goto error; rxq = real_rx; error = netdev_queue_update_kobjects(dev, 0, real_tx); if (error) goto error; txq = real_tx; return 0; error: netdev_queue_update_kobjects(dev, txq, 0); net_rx_queue_update_kobjects(dev, rxq, 0); return error; } static void remove_queue_kobjects(struct net_device *dev) { int real_rx = 0, real_tx = 0; #ifdef CONFIG_SYSFS real_rx = dev->real_num_rx_queues; #endif real_tx = dev->real_num_tx_queues; net_rx_queue_update_kobjects(dev, real_rx, 0); netdev_queue_update_kobjects(dev, real_tx, 0); #ifdef CONFIG_SYSFS kset_unregister(dev->queues_kset); #endif } static bool net_current_may_mount(void) { struct net *net = current->nsproxy->net_ns; return ns_capable(net->user_ns, CAP_SYS_ADMIN); } static void *net_grab_current_ns(void) { struct net *ns = current->nsproxy->net_ns; #ifdef CONFIG_NET_NS if (ns) atomic_inc(&ns->passive); #endif return ns; } static const void *net_initial_ns(void) { return &init_net; } static const void *net_netlink_ns(struct sock *sk) { return sock_net(sk); } struct kobj_ns_type_operations net_ns_type_operations = { .type = KOBJ_NS_TYPE_NET, .current_may_mount = net_current_may_mount, .grab_current_ns = net_grab_current_ns, .netlink_ns = net_netlink_ns, .initial_ns = net_initial_ns, .drop_ns = net_drop_ns, }; EXPORT_SYMBOL_GPL(net_ns_type_operations); static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) { struct net_device *dev = to_net_dev(d); int retval; /* pass interface to uevent. */ retval = add_uevent_var(env, "INTERFACE=%s", dev->name); if (retval) goto exit; /* pass ifindex to uevent. * ifindex is useful as it won't change (interface name may change) * and is what RtNetlink uses natively. */ retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex); exit: return retval; } /* * netdev_release -- destroy and free a dead device. * Called when last reference to device kobject is gone. */ static void netdev_release(struct device *d) { struct net_device *dev = to_net_dev(d); BUG_ON(dev->reg_state != NETREG_RELEASED); kfree(dev->ifalias); netdev_freemem(dev); } static const void *net_namespace(struct device *d) { struct net_device *dev; dev = container_of(d, struct net_device, dev); return dev_net(dev); } static struct class net_class = { .name = "net", .dev_release = netdev_release, .dev_groups = net_class_groups, .dev_uevent = netdev_uevent, .ns_type = &net_ns_type_operations, .namespace = net_namespace, }; #ifdef CONFIG_OF_NET static int of_dev_node_match(struct device *dev, const void *data) { int ret = 0; if (dev->parent) ret = dev->parent->of_node == data; return ret == 0 ? dev->of_node == data : ret; } struct net_device *of_find_net_device_by_node(struct device_node *np) { struct device *dev; dev = class_find_device(&net_class, NULL, np, of_dev_node_match); if (!dev) return NULL; return to_net_dev(dev); } EXPORT_SYMBOL(of_find_net_device_by_node); #endif /* Delete sysfs entries but hold kobject reference until after all * netdev references are gone. */ void netdev_unregister_kobject(struct net_device *ndev) { struct device *dev = &(ndev->dev); kobject_get(&dev->kobj); remove_queue_kobjects(ndev); pm_runtime_set_memalloc_noio(dev, false); device_del(dev); } /* Create sysfs entries for network device. */ int netdev_register_kobject(struct net_device *ndev) { struct device *dev = &(ndev->dev); const struct attribute_group **groups = ndev->sysfs_groups; int error = 0; device_initialize(dev); dev->class = &net_class; dev->platform_data = ndev; dev->groups = groups; dev_set_name(dev, "%s", ndev->name); #ifdef CONFIG_SYSFS /* Allow for a device specific group */ if (*groups) groups++; *groups++ = &netstat_group; #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211) if (ndev->ieee80211_ptr) *groups++ = &wireless_group; #if IS_ENABLED(CONFIG_WIRELESS_EXT) else if (ndev->wireless_handlers) *groups++ = &wireless_group; #endif #endif #endif /* CONFIG_SYSFS */ error = device_add(dev); if (error) return error; error = register_queue_kobjects(ndev); if (error) { device_del(dev); return error; } pm_runtime_set_memalloc_noio(dev, true); return error; } int netdev_class_create_file_ns(struct class_attribute *class_attr, const void *ns) { return class_create_file_ns(&net_class, class_attr, ns); } EXPORT_SYMBOL(netdev_class_create_file_ns); void netdev_class_remove_file_ns(struct class_attribute *class_attr, const void *ns) { class_remove_file_ns(&net_class, class_attr, ns); } EXPORT_SYMBOL(netdev_class_remove_file_ns); int __init netdev_kobject_init(void) { kobj_ns_type_register(&net_ns_type_operations); return class_register(&net_class); }
gpl-2.0
imoseyon/leanKernel-tbolt-gingerbread
drivers/media/IR/keymaps/rc-budget-ci-old.c
953
2518
/* budget-ci-old.h - Keytable for budget_ci_old Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> /* From reading the following remotes: * Zenith Universal 7 / TV Mode 807 / VCR Mode 837 * Hauppauge (from NOVA-CI-s box product) * This is a "middle of the road" approach, differences are noted */ static struct ir_scancode budget_ci_old[] = { { 0x00, KEY_0 }, { 0x01, KEY_1 }, { 0x02, KEY_2 }, { 0x03, KEY_3 }, { 0x04, KEY_4 }, { 0x05, KEY_5 }, { 0x06, KEY_6 }, { 0x07, KEY_7 }, { 0x08, KEY_8 }, { 0x09, KEY_9 }, { 0x0a, KEY_ENTER }, { 0x0b, KEY_RED }, { 0x0c, KEY_POWER }, /* RADIO on Hauppauge */ { 0x0d, KEY_MUTE }, { 0x0f, KEY_A }, /* TV on Hauppauge */ { 0x10, KEY_VOLUMEUP }, { 0x11, KEY_VOLUMEDOWN }, { 0x14, KEY_B }, { 0x1c, KEY_UP }, { 0x1d, KEY_DOWN }, { 0x1e, KEY_OPTION }, /* RESERVED on Hauppauge */ { 0x1f, KEY_BREAK }, { 0x20, KEY_CHANNELUP }, { 0x21, KEY_CHANNELDOWN }, { 0x22, KEY_PREVIOUS }, /* Prev Ch on Zenith, SOURCE on Hauppauge */ { 0x24, KEY_RESTART }, { 0x25, KEY_OK }, { 0x26, KEY_CYCLEWINDOWS }, /* MINIMIZE on Hauppauge */ { 0x28, KEY_ENTER }, /* VCR mode on Zenith */ { 0x29, KEY_PAUSE }, { 0x2b, KEY_RIGHT }, { 0x2c, KEY_LEFT }, { 0x2e, KEY_MENU }, /* FULL SCREEN on Hauppauge */ { 0x30, KEY_SLOW }, { 0x31, KEY_PREVIOUS }, /* VCR mode on Zenith */ { 0x32, KEY_REWIND }, { 0x34, KEY_FASTFORWARD }, { 0x35, KEY_PLAY }, { 0x36, KEY_STOP }, { 0x37, KEY_RECORD }, { 0x38, KEY_TUNER }, /* TV/VCR on Zenith */ { 0x3a, KEY_C }, { 0x3c, KEY_EXIT }, { 0x3d, KEY_POWER2 }, { 0x3e, KEY_TUNER }, }; static struct rc_keymap budget_ci_old_map = { .map = { .scan = budget_ci_old, .size = ARRAY_SIZE(budget_ci_old), .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_BUDGET_CI_OLD, } }; static int __init init_rc_map_budget_ci_old(void) { return ir_register_map(&budget_ci_old_map); } static void __exit exit_rc_map_budget_ci_old(void) { ir_unregister_map(&budget_ci_old_map); } module_init(init_rc_map_budget_ci_old) module_exit(exit_rc_map_budget_ci_old) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
TheNameIsNigel/kernel_common
drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
2233
8331
/* * Copyright © 2010 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Li Peng <peng.li@intel.com> */ #include <linux/export.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/delay.h> #include "psb_drv.h" #define HDMI_READ(reg) readl(hdmi_dev->regs + (reg)) #define HDMI_WRITE(reg, val) writel(val, hdmi_dev->regs + (reg)) #define HDMI_HCR 0x1000 #define HCR_DETECT_HDP (1 << 6) #define HCR_ENABLE_HDCP (1 << 5) #define HCR_ENABLE_AUDIO (1 << 2) #define HCR_ENABLE_PIXEL (1 << 1) #define HCR_ENABLE_TMDS (1 << 0) #define HDMI_HICR 0x1004 #define HDMI_INTR_I2C_ERROR (1 << 4) #define HDMI_INTR_I2C_FULL (1 << 3) #define HDMI_INTR_I2C_DONE (1 << 2) #define HDMI_INTR_HPD (1 << 0) #define HDMI_HSR 0x1008 #define HDMI_HISR 0x100C #define HDMI_HI2CRDB0 0x1200 #define HDMI_HI2CHCR 0x1240 #define HI2C_HDCP_WRITE (0 << 2) #define HI2C_HDCP_RI_READ (1 << 2) #define HI2C_HDCP_READ (2 << 2) #define HI2C_EDID_READ (3 << 2) #define HI2C_READ_CONTINUE (1 << 1) #define HI2C_ENABLE_TRANSACTION (1 << 0) #define HDMI_ICRH 0x1100 #define HDMI_HI2CTDR0 0x1244 #define HDMI_HI2CTDR1 0x1248 #define I2C_STAT_INIT 0 #define I2C_READ_DONE 1 #define I2C_TRANSACTION_DONE 2 struct hdmi_i2c_dev { struct i2c_adapter *adap; struct mutex i2c_lock; struct completion complete; int status; struct i2c_msg *msg; int buf_offset; }; static void hdmi_i2c_irq_enable(struct oaktrail_hdmi_dev *hdmi_dev) { u32 temp; temp = HDMI_READ(HDMI_HICR); temp |= (HDMI_INTR_I2C_ERROR | HDMI_INTR_I2C_FULL | HDMI_INTR_I2C_DONE); HDMI_WRITE(HDMI_HICR, temp); HDMI_READ(HDMI_HICR); } static void hdmi_i2c_irq_disable(struct oaktrail_hdmi_dev *hdmi_dev) { HDMI_WRITE(HDMI_HICR, 0x0); HDMI_READ(HDMI_HICR); } static int xfer_read(struct i2c_adapter *adap, struct i2c_msg *pmsg) { struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap); struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev; u32 temp; i2c_dev->status = I2C_STAT_INIT; i2c_dev->msg = pmsg; i2c_dev->buf_offset = 0; INIT_COMPLETION(i2c_dev->complete); /* Enable I2C transaction */ temp = ((pmsg->len) << 20) | HI2C_EDID_READ | HI2C_ENABLE_TRANSACTION; HDMI_WRITE(HDMI_HI2CHCR, temp); HDMI_READ(HDMI_HI2CHCR); while (i2c_dev->status != I2C_TRANSACTION_DONE) wait_for_completion_interruptible_timeout(&i2c_dev->complete, 10 * HZ); return 0; } static int xfer_write(struct i2c_adapter *adap, struct i2c_msg *pmsg) { /* * XXX: i2c write seems isn't useful for EDID probe, don't do anything */ return 0; } static int oaktrail_hdmi_i2c_access(struct i2c_adapter *adap, struct i2c_msg *pmsg, int num) { struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap); struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev; int i; mutex_lock(&i2c_dev->i2c_lock); /* Enable i2c unit */ HDMI_WRITE(HDMI_ICRH, 0x00008760); /* Enable irq */ hdmi_i2c_irq_enable(hdmi_dev); for (i = 0; i < num; i++) { if (pmsg->len && pmsg->buf) { if (pmsg->flags & I2C_M_RD) xfer_read(adap, pmsg); else xfer_write(adap, pmsg); } pmsg++; /* next message */ } /* Disable irq */ hdmi_i2c_irq_disable(hdmi_dev); mutex_unlock(&i2c_dev->i2c_lock); return i; } static u32 oaktrail_hdmi_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR; } static const struct i2c_algorithm oaktrail_hdmi_i2c_algorithm = { .master_xfer = oaktrail_hdmi_i2c_access, .functionality = oaktrail_hdmi_i2c_func, }; static struct i2c_adapter oaktrail_hdmi_i2c_adapter = { .name = "oaktrail_hdmi_i2c", .nr = 3, .owner = THIS_MODULE, .class = I2C_CLASS_DDC, .algo = &oaktrail_hdmi_i2c_algorithm, }; static void hdmi_i2c_read(struct oaktrail_hdmi_dev *hdmi_dev) { struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev; struct i2c_msg *msg = i2c_dev->msg; u8 *buf = msg->buf; u32 temp; int i, offset; offset = i2c_dev->buf_offset; for (i = 0; i < 0x10; i++) { temp = HDMI_READ(HDMI_HI2CRDB0 + (i * 4)); memcpy(buf + (offset + i * 4), &temp, 4); } i2c_dev->buf_offset += (0x10 * 4); /* clearing read buffer full intr */ temp = HDMI_READ(HDMI_HISR); HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_FULL); HDMI_READ(HDMI_HISR); /* continue read transaction */ temp = HDMI_READ(HDMI_HI2CHCR); HDMI_WRITE(HDMI_HI2CHCR, temp | HI2C_READ_CONTINUE); HDMI_READ(HDMI_HI2CHCR); i2c_dev->status = I2C_READ_DONE; return; } static void hdmi_i2c_transaction_done(struct oaktrail_hdmi_dev *hdmi_dev) { struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev; u32 temp; /* clear transaction done intr */ temp = HDMI_READ(HDMI_HISR); HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_DONE); HDMI_READ(HDMI_HISR); temp = HDMI_READ(HDMI_HI2CHCR); HDMI_WRITE(HDMI_HI2CHCR, temp & ~HI2C_ENABLE_TRANSACTION); HDMI_READ(HDMI_HI2CHCR); i2c_dev->status = I2C_TRANSACTION_DONE; return; } static irqreturn_t oaktrail_hdmi_i2c_handler(int this_irq, void *dev) { struct oaktrail_hdmi_dev *hdmi_dev = dev; struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev; u32 stat; stat = HDMI_READ(HDMI_HISR); if (stat & HDMI_INTR_HPD) { HDMI_WRITE(HDMI_HISR, stat | HDMI_INTR_HPD); HDMI_READ(HDMI_HISR); } if (stat & HDMI_INTR_I2C_FULL) hdmi_i2c_read(hdmi_dev); if (stat & HDMI_INTR_I2C_DONE) hdmi_i2c_transaction_done(hdmi_dev); complete(&i2c_dev->complete); return IRQ_HANDLED; } /* * choose alternate function 2 of GPIO pin 52, 53, * which is used by HDMI I2C logic */ static void oaktrail_hdmi_i2c_gpio_fix(void) { void __iomem *base; unsigned int gpio_base = 0xff12c000; int gpio_len = 0x1000; u32 temp; base = ioremap((resource_size_t)gpio_base, gpio_len); if (base == NULL) { DRM_ERROR("gpio ioremap fail\n"); return; } temp = readl(base + 0x44); DRM_DEBUG_DRIVER("old gpio val %x\n", temp); writel((temp | 0x00000a00), (base + 0x44)); temp = readl(base + 0x44); DRM_DEBUG_DRIVER("new gpio val %x\n", temp); iounmap(base); } int oaktrail_hdmi_i2c_init(struct pci_dev *dev) { struct oaktrail_hdmi_dev *hdmi_dev; struct hdmi_i2c_dev *i2c_dev; int ret; hdmi_dev = pci_get_drvdata(dev); i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL); if (i2c_dev == NULL) { DRM_ERROR("Can't allocate interface\n"); ret = -ENOMEM; goto exit; } i2c_dev->adap = &oaktrail_hdmi_i2c_adapter; i2c_dev->status = I2C_STAT_INIT; init_completion(&i2c_dev->complete); mutex_init(&i2c_dev->i2c_lock); i2c_set_adapdata(&oaktrail_hdmi_i2c_adapter, hdmi_dev); hdmi_dev->i2c_dev = i2c_dev; /* Enable HDMI I2C function on gpio */ oaktrail_hdmi_i2c_gpio_fix(); /* request irq */ ret = request_irq(dev->irq, oaktrail_hdmi_i2c_handler, IRQF_SHARED, oaktrail_hdmi_i2c_adapter.name, hdmi_dev); if (ret) { DRM_ERROR("Failed to request IRQ for I2C controller\n"); goto err; } /* Adapter registration */ ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter); return ret; err: kfree(i2c_dev); exit: return ret; } void oaktrail_hdmi_i2c_exit(struct pci_dev *dev) { struct oaktrail_hdmi_dev *hdmi_dev; struct hdmi_i2c_dev *i2c_dev; hdmi_dev = pci_get_drvdata(dev); i2c_del_adapter(&oaktrail_hdmi_i2c_adapter); i2c_dev = hdmi_dev->i2c_dev; kfree(i2c_dev); free_irq(dev->irq, hdmi_dev); }
gpl-2.0
rkharwar/ubuntu-saucy-powerpc
drivers/gpu/host1x/cdma.c
2233
12744
/* * Tegra host1x Command DMA * * Copyright (c) 2010-2013, NVIDIA Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <asm/cacheflush.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/kfifo.h> #include <linux/slab.h> #include <trace/events/host1x.h> #include "cdma.h" #include "channel.h" #include "dev.h" #include "debug.h" #include "host1x_bo.h" #include "job.h" /* * push_buffer * * The push buffer is a circular array of words to be fetched by command DMA. * Note that it works slightly differently to the sync queue; fence == pos * means that the push buffer is full, not empty. */ #define HOST1X_PUSHBUFFER_SLOTS 512 /* * Clean up push buffer resources */ static void host1x_pushbuffer_destroy(struct push_buffer *pb) { struct host1x_cdma *cdma = pb_to_cdma(pb); struct host1x *host1x = cdma_to_host1x(cdma); if (pb->phys != 0) dma_free_writecombine(host1x->dev, pb->size_bytes + 4, pb->mapped, pb->phys); pb->mapped = NULL; pb->phys = 0; } /* * Init push buffer resources */ static int host1x_pushbuffer_init(struct push_buffer *pb) { struct host1x_cdma *cdma = pb_to_cdma(pb); struct host1x *host1x = cdma_to_host1x(cdma); pb->mapped = NULL; pb->phys = 0; pb->size_bytes = HOST1X_PUSHBUFFER_SLOTS * 8; /* initialize buffer pointers */ pb->fence = pb->size_bytes - 8; pb->pos = 0; /* allocate and map pushbuffer memory */ pb->mapped = dma_alloc_writecombine(host1x->dev, pb->size_bytes + 4, &pb->phys, GFP_KERNEL); if (!pb->mapped) goto fail; host1x_hw_pushbuffer_init(host1x, pb); return 0; fail: host1x_pushbuffer_destroy(pb); return -ENOMEM; } /* * Push two words to the push buffer * Caller must ensure push buffer is not full */ static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2) { u32 pos = pb->pos; u32 *p = (u32 *)((u32)pb->mapped + pos); WARN_ON(pos == pb->fence); *(p++) = op1; *(p++) = op2; pb->pos = (pos + 8) & (pb->size_bytes - 1); } /* * Pop a number of two word slots from the push buffer * Caller must ensure push buffer is not empty */ static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots) { /* Advance the next write position */ pb->fence = (pb->fence + slots * 8) & (pb->size_bytes - 1); } /* * Return the number of two word slots free in the push buffer */ static u32 host1x_pushbuffer_space(struct push_buffer *pb) { return ((pb->fence - pb->pos) & (pb->size_bytes - 1)) / 8; } /* * Sleep (if necessary) until the requested event happens * - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty. * - Returns 1 * - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer * - Return the amount of space (> 0) * Must be called with the cdma lock held. */ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma, enum cdma_event event) { for (;;) { unsigned int space; if (event == CDMA_EVENT_SYNC_QUEUE_EMPTY) space = list_empty(&cdma->sync_queue) ? 1 : 0; else if (event == CDMA_EVENT_PUSH_BUFFER_SPACE) { struct push_buffer *pb = &cdma->push_buffer; space = host1x_pushbuffer_space(pb); } else { WARN_ON(1); return -EINVAL; } if (space) return space; trace_host1x_wait_cdma(dev_name(cdma_to_channel(cdma)->dev), event); /* If somebody has managed to already start waiting, yield */ if (cdma->event != CDMA_EVENT_NONE) { mutex_unlock(&cdma->lock); schedule(); mutex_lock(&cdma->lock); continue; } cdma->event = event; mutex_unlock(&cdma->lock); down(&cdma->sem); mutex_lock(&cdma->lock); } return 0; } /* * Start timer that tracks the time spent by the job. * Must be called with the cdma lock held. */ static void cdma_start_timer_locked(struct host1x_cdma *cdma, struct host1x_job *job) { struct host1x *host = cdma_to_host1x(cdma); if (cdma->timeout.client) { /* timer already started */ return; } cdma->timeout.client = job->client; cdma->timeout.syncpt = host1x_syncpt_get(host, job->syncpt_id); cdma->timeout.syncpt_val = job->syncpt_end; cdma->timeout.start_ktime = ktime_get(); schedule_delayed_work(&cdma->timeout.wq, msecs_to_jiffies(job->timeout)); } /* * Stop timer when a buffer submission completes. * Must be called with the cdma lock held. */ static void stop_cdma_timer_locked(struct host1x_cdma *cdma) { cancel_delayed_work(&cdma->timeout.wq); cdma->timeout.client = 0; } /* * For all sync queue entries that have already finished according to the * current sync point registers: * - unpin & unref their mems * - pop their push buffer slots * - remove them from the sync queue * This is normally called from the host code's worker thread, but can be * called manually if necessary. * Must be called with the cdma lock held. */ static void update_cdma_locked(struct host1x_cdma *cdma) { bool signal = false; struct host1x *host1x = cdma_to_host1x(cdma); struct host1x_job *job, *n; /* If CDMA is stopped, queue is cleared and we can return */ if (!cdma->running) return; /* * Walk the sync queue, reading the sync point registers as necessary, * to consume as many sync queue entries as possible without blocking */ list_for_each_entry_safe(job, n, &cdma->sync_queue, list) { struct host1x_syncpt *sp = host1x_syncpt_get(host1x, job->syncpt_id); /* Check whether this syncpt has completed, and bail if not */ if (!host1x_syncpt_is_expired(sp, job->syncpt_end)) { /* Start timer on next pending syncpt */ if (job->timeout) cdma_start_timer_locked(cdma, job); break; } /* Cancel timeout, when a buffer completes */ if (cdma->timeout.client) stop_cdma_timer_locked(cdma); /* Unpin the memory */ host1x_job_unpin(job); /* Pop push buffer slots */ if (job->num_slots) { struct push_buffer *pb = &cdma->push_buffer; host1x_pushbuffer_pop(pb, job->num_slots); if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE) signal = true; } list_del(&job->list); host1x_job_put(job); } if (cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY && list_empty(&cdma->sync_queue)) signal = true; if (signal) { cdma->event = CDMA_EVENT_NONE; up(&cdma->sem); } } void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma, struct device *dev) { u32 restart_addr; u32 syncpt_incrs; struct host1x_job *job = NULL; u32 syncpt_val; struct host1x *host1x = cdma_to_host1x(cdma); syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt); dev_dbg(dev, "%s: starting cleanup (thresh %d)\n", __func__, syncpt_val); /* * Move the sync_queue read pointer to the first entry that hasn't * completed based on the current HW syncpt value. It's likely there * won't be any (i.e. we're still at the head), but covers the case * where a syncpt incr happens just prior/during the teardown. */ dev_dbg(dev, "%s: skip completed buffers still in sync_queue\n", __func__); list_for_each_entry(job, &cdma->sync_queue, list) { if (syncpt_val < job->syncpt_end) break; host1x_job_dump(dev, job); } /* * Walk the sync_queue, first incrementing with the CPU syncpts that * are partially executed (the first buffer) or fully skipped while * still in the current context (slots are also NOP-ed). * * At the point contexts are interleaved, syncpt increments must be * done inline with the pushbuffer from a GATHER buffer to maintain * the order (slots are modified to be a GATHER of syncpt incrs). * * Note: save in restart_addr the location where the timed out buffer * started in the PB, so we can start the refetch from there (with the * modified NOP-ed PB slots). This lets things appear to have completed * properly for this buffer and resources are freed. */ dev_dbg(dev, "%s: perform CPU incr on pending same ctx buffers\n", __func__); if (!list_empty(&cdma->sync_queue)) restart_addr = job->first_get; else restart_addr = cdma->last_pos; /* do CPU increments as long as this context continues */ list_for_each_entry_from(job, &cdma->sync_queue, list) { /* different context, gets us out of this loop */ if (job->client != cdma->timeout.client) break; /* won't need a timeout when replayed */ job->timeout = 0; syncpt_incrs = job->syncpt_end - syncpt_val; dev_dbg(dev, "%s: CPU incr (%d)\n", __func__, syncpt_incrs); host1x_job_dump(dev, job); /* safe to use CPU to incr syncpts */ host1x_hw_cdma_timeout_cpu_incr(host1x, cdma, job->first_get, syncpt_incrs, job->syncpt_end, job->num_slots); syncpt_val += syncpt_incrs; } /* The following sumbits from the same client may be dependent on the * failed submit and therefore they may fail. Force a small timeout * to make the queue cleanup faster */ list_for_each_entry_from(job, &cdma->sync_queue, list) if (job->client == cdma->timeout.client) job->timeout = min_t(unsigned int, job->timeout, 500); dev_dbg(dev, "%s: finished sync_queue modification\n", __func__); /* roll back DMAGET and start up channel again */ host1x_hw_cdma_resume(host1x, cdma, restart_addr); } /* * Create a cdma */ int host1x_cdma_init(struct host1x_cdma *cdma) { int err; mutex_init(&cdma->lock); sema_init(&cdma->sem, 0); INIT_LIST_HEAD(&cdma->sync_queue); cdma->event = CDMA_EVENT_NONE; cdma->running = false; cdma->torndown = false; err = host1x_pushbuffer_init(&cdma->push_buffer); if (err) return err; return 0; } /* * Destroy a cdma */ int host1x_cdma_deinit(struct host1x_cdma *cdma) { struct push_buffer *pb = &cdma->push_buffer; struct host1x *host1x = cdma_to_host1x(cdma); if (cdma->running) { pr_warn("%s: CDMA still running\n", __func__); return -EBUSY; } host1x_pushbuffer_destroy(pb); host1x_hw_cdma_timeout_destroy(host1x, cdma); return 0; } /* * Begin a cdma submit */ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job) { struct host1x *host1x = cdma_to_host1x(cdma); mutex_lock(&cdma->lock); if (job->timeout) { /* init state on first submit with timeout value */ if (!cdma->timeout.initialized) { int err; err = host1x_hw_cdma_timeout_init(host1x, cdma, job->syncpt_id); if (err) { mutex_unlock(&cdma->lock); return err; } } } if (!cdma->running) host1x_hw_cdma_start(host1x, cdma); cdma->slots_free = 0; cdma->slots_used = 0; cdma->first_get = cdma->push_buffer.pos; trace_host1x_cdma_begin(dev_name(job->channel->dev)); return 0; } /* * Push two words into a push buffer slot * Blocks as necessary if the push buffer is full. */ void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2) { struct host1x *host1x = cdma_to_host1x(cdma); struct push_buffer *pb = &cdma->push_buffer; u32 slots_free = cdma->slots_free; if (host1x_debug_trace_cmdbuf) trace_host1x_cdma_push(dev_name(cdma_to_channel(cdma)->dev), op1, op2); if (slots_free == 0) { host1x_hw_cdma_flush(host1x, cdma); slots_free = host1x_cdma_wait_locked(cdma, CDMA_EVENT_PUSH_BUFFER_SPACE); } cdma->slots_free = slots_free - 1; cdma->slots_used++; host1x_pushbuffer_push(pb, op1, op2); } /* * End a cdma submit * Kick off DMA, add job to the sync queue, and a number of slots to be freed * from the pushbuffer. The handles for a submit must all be pinned at the same * time, but they can be unpinned in smaller chunks. */ void host1x_cdma_end(struct host1x_cdma *cdma, struct host1x_job *job) { struct host1x *host1x = cdma_to_host1x(cdma); bool idle = list_empty(&cdma->sync_queue); host1x_hw_cdma_flush(host1x, cdma); job->first_get = cdma->first_get; job->num_slots = cdma->slots_used; host1x_job_get(job); list_add_tail(&job->list, &cdma->sync_queue); /* start timer on idle -> active transitions */ if (job->timeout && idle) cdma_start_timer_locked(cdma, job); trace_host1x_cdma_end(dev_name(job->channel->dev)); mutex_unlock(&cdma->lock); } /* * Update cdma state according to current sync point values */ void host1x_cdma_update(struct host1x_cdma *cdma) { mutex_lock(&cdma->lock); update_cdma_locked(cdma); mutex_unlock(&cdma->lock); }
gpl-2.0
Split-Screen/android_kernel_motorola_msm8939
fs/nfs/nfs3xdr.c
2233
55947
/* * linux/fs/nfs/nfs3xdr.c * * XDR functions to encode/decode NFSv3 RPC arguments and results. * * Copyright (C) 1996, 1997 Olaf Kirch */ #include <linux/param.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/in.h> #include <linux/pagemap.h> #include <linux/proc_fs.h> #include <linux/kdev_t.h> #include <linux/sunrpc/clnt.h> #include <linux/nfs.h> #include <linux/nfs3.h> #include <linux/nfs_fs.h> #include <linux/nfsacl.h> #include "internal.h" #define NFSDBG_FACILITY NFSDBG_XDR /* Mapping from NFS error code to "errno" error code. */ #define errno_NFSERR_IO EIO /* * Declare the space requirements for NFS arguments and replies as * number of 32bit-words */ #define NFS3_fhandle_sz (1+16) #define NFS3_fh_sz (NFS3_fhandle_sz) /* shorthand */ #define NFS3_sattr_sz (15) #define NFS3_filename_sz (1+(NFS3_MAXNAMLEN>>2)) #define NFS3_path_sz (1+(NFS3_MAXPATHLEN>>2)) #define NFS3_fattr_sz (21) #define NFS3_cookieverf_sz (NFS3_COOKIEVERFSIZE>>2) #define NFS3_wcc_attr_sz (6) #define NFS3_pre_op_attr_sz (1+NFS3_wcc_attr_sz) #define NFS3_post_op_attr_sz (1+NFS3_fattr_sz) #define NFS3_wcc_data_sz (NFS3_pre_op_attr_sz+NFS3_post_op_attr_sz) #define NFS3_diropargs_sz (NFS3_fh_sz+NFS3_filename_sz) #define NFS3_getattrargs_sz (NFS3_fh_sz) #define NFS3_setattrargs_sz (NFS3_fh_sz+NFS3_sattr_sz+3) #define NFS3_lookupargs_sz (NFS3_fh_sz+NFS3_filename_sz) #define NFS3_accessargs_sz (NFS3_fh_sz+1) #define NFS3_readlinkargs_sz (NFS3_fh_sz) #define NFS3_readargs_sz (NFS3_fh_sz+3) #define NFS3_writeargs_sz (NFS3_fh_sz+5) #define NFS3_createargs_sz (NFS3_diropargs_sz+NFS3_sattr_sz) #define NFS3_mkdirargs_sz (NFS3_diropargs_sz+NFS3_sattr_sz) #define NFS3_symlinkargs_sz (NFS3_diropargs_sz+1+NFS3_sattr_sz) #define NFS3_mknodargs_sz (NFS3_diropargs_sz+2+NFS3_sattr_sz) #define NFS3_removeargs_sz (NFS3_fh_sz+NFS3_filename_sz) #define NFS3_renameargs_sz (NFS3_diropargs_sz+NFS3_diropargs_sz) #define NFS3_linkargs_sz (NFS3_fh_sz+NFS3_diropargs_sz) #define NFS3_readdirargs_sz (NFS3_fh_sz+NFS3_cookieverf_sz+3) #define NFS3_readdirplusargs_sz (NFS3_fh_sz+NFS3_cookieverf_sz+4) #define NFS3_commitargs_sz (NFS3_fh_sz+3) #define NFS3_getattrres_sz (1+NFS3_fattr_sz) #define NFS3_setattrres_sz (1+NFS3_wcc_data_sz) #define NFS3_removeres_sz (NFS3_setattrres_sz) #define NFS3_lookupres_sz (1+NFS3_fh_sz+(2 * NFS3_post_op_attr_sz)) #define NFS3_accessres_sz (1+NFS3_post_op_attr_sz+1) #define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1) #define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3) #define NFS3_writeres_sz (1+NFS3_wcc_data_sz+4) #define NFS3_createres_sz (1+NFS3_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz) #define NFS3_renameres_sz (1+(2 * NFS3_wcc_data_sz)) #define NFS3_linkres_sz (1+NFS3_post_op_attr_sz+NFS3_wcc_data_sz) #define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2) #define NFS3_fsstatres_sz (1+NFS3_post_op_attr_sz+13) #define NFS3_fsinfores_sz (1+NFS3_post_op_attr_sz+12) #define NFS3_pathconfres_sz (1+NFS3_post_op_attr_sz+6) #define NFS3_commitres_sz (1+NFS3_wcc_data_sz+2) #define ACL3_getaclargs_sz (NFS3_fh_sz+1) #define ACL3_setaclargs_sz (NFS3_fh_sz+1+ \ XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE)) #define ACL3_getaclres_sz (1+NFS3_post_op_attr_sz+1+ \ XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE)) #define ACL3_setaclres_sz (1+NFS3_post_op_attr_sz) static int nfs3_stat_to_errno(enum nfs_stat); /* * Map file type to S_IFMT bits */ static const umode_t nfs_type2fmt[] = { [NF3BAD] = 0, [NF3REG] = S_IFREG, [NF3DIR] = S_IFDIR, [NF3BLK] = S_IFBLK, [NF3CHR] = S_IFCHR, [NF3LNK] = S_IFLNK, [NF3SOCK] = S_IFSOCK, [NF3FIFO] = S_IFIFO, }; /* * While encoding arguments, set up the reply buffer in advance to * receive reply data directly into the page cache. */ static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages, unsigned int base, unsigned int len, unsigned int bufsize) { struct rpc_auth *auth = req->rq_cred->cr_auth; unsigned int replen; replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize; xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len); } /* * Handle decode buffer overflows out-of-line. */ static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) { dprintk("NFS: %s prematurely hit the end of our receive buffer. " "Remaining buffer length is %tu words.\n", func, xdr->end - xdr->p); } /* * Encode/decode NFSv3 basic data types * * Basic NFSv3 data types are defined in section 2.5 of RFC 1813: * "NFS Version 3 Protocol Specification". * * Not all basic data types have their own encoding and decoding * functions. For run-time efficiency, some data types are encoded * or decoded inline. */ static void encode_uint32(struct xdr_stream *xdr, u32 value) { __be32 *p = xdr_reserve_space(xdr, 4); *p = cpu_to_be32(value); } static int decode_uint32(struct xdr_stream *xdr, u32 *value) { __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; *value = be32_to_cpup(p); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_uint64(struct xdr_stream *xdr, u64 *value) { __be32 *p; p = xdr_inline_decode(xdr, 8); if (unlikely(p == NULL)) goto out_overflow; xdr_decode_hyper(p, value); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * fileid3 * * typedef uint64 fileid3; */ static __be32 *xdr_decode_fileid3(__be32 *p, u64 *fileid) { return xdr_decode_hyper(p, fileid); } static int decode_fileid3(struct xdr_stream *xdr, u64 *fileid) { return decode_uint64(xdr, fileid); } /* * filename3 * * typedef string filename3<>; */ static void encode_filename3(struct xdr_stream *xdr, const char *name, u32 length) { __be32 *p; WARN_ON_ONCE(length > NFS3_MAXNAMLEN); p = xdr_reserve_space(xdr, 4 + length); xdr_encode_opaque(p, name, length); } static int decode_inline_filename3(struct xdr_stream *xdr, const char **name, u32 *length) { __be32 *p; u32 count; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; count = be32_to_cpup(p); if (count > NFS3_MAXNAMLEN) goto out_nametoolong; p = xdr_inline_decode(xdr, count); if (unlikely(p == NULL)) goto out_overflow; *name = (const char *)p; *length = count; return 0; out_nametoolong: dprintk("NFS: returned filename too long: %u\n", count); return -ENAMETOOLONG; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * nfspath3 * * typedef string nfspath3<>; */ static void encode_nfspath3(struct xdr_stream *xdr, struct page **pages, const u32 length) { encode_uint32(xdr, length); xdr_write_pages(xdr, pages, 0, length); } static int decode_nfspath3(struct xdr_stream *xdr) { u32 recvd, count; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; count = be32_to_cpup(p); if (unlikely(count >= xdr->buf->page_len || count > NFS3_MAXPATHLEN)) goto out_nametoolong; recvd = xdr_read_pages(xdr, count); if (unlikely(count > recvd)) goto out_cheating; xdr_terminate_string(xdr->buf, count); return 0; out_nametoolong: dprintk("NFS: returned pathname too long: %u\n", count); return -ENAMETOOLONG; out_cheating: dprintk("NFS: server cheating in pathname result: " "count %u > recvd %u\n", count, recvd); return -EIO; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * cookie3 * * typedef uint64 cookie3 */ static __be32 *xdr_encode_cookie3(__be32 *p, u64 cookie) { return xdr_encode_hyper(p, cookie); } static int decode_cookie3(struct xdr_stream *xdr, u64 *cookie) { return decode_uint64(xdr, cookie); } /* * cookieverf3 * * typedef opaque cookieverf3[NFS3_COOKIEVERFSIZE]; */ static __be32 *xdr_encode_cookieverf3(__be32 *p, const __be32 *verifier) { memcpy(p, verifier, NFS3_COOKIEVERFSIZE); return p + XDR_QUADLEN(NFS3_COOKIEVERFSIZE); } static int decode_cookieverf3(struct xdr_stream *xdr, __be32 *verifier) { __be32 *p; p = xdr_inline_decode(xdr, NFS3_COOKIEVERFSIZE); if (unlikely(p == NULL)) goto out_overflow; memcpy(verifier, p, NFS3_COOKIEVERFSIZE); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * createverf3 * * typedef opaque createverf3[NFS3_CREATEVERFSIZE]; */ static void encode_createverf3(struct xdr_stream *xdr, const __be32 *verifier) { __be32 *p; p = xdr_reserve_space(xdr, NFS3_CREATEVERFSIZE); memcpy(p, verifier, NFS3_CREATEVERFSIZE); } static int decode_writeverf3(struct xdr_stream *xdr, struct nfs_write_verifier *verifier) { __be32 *p; p = xdr_inline_decode(xdr, NFS3_WRITEVERFSIZE); if (unlikely(p == NULL)) goto out_overflow; memcpy(verifier->data, p, NFS3_WRITEVERFSIZE); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * size3 * * typedef uint64 size3; */ static __be32 *xdr_decode_size3(__be32 *p, u64 *size) { return xdr_decode_hyper(p, size); } /* * nfsstat3 * * enum nfsstat3 { * NFS3_OK = 0, * ... * } */ #define NFS3_OK NFS_OK static int decode_nfsstat3(struct xdr_stream *xdr, enum nfs_stat *status) { __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; *status = be32_to_cpup(p); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * ftype3 * * enum ftype3 { * NF3REG = 1, * NF3DIR = 2, * NF3BLK = 3, * NF3CHR = 4, * NF3LNK = 5, * NF3SOCK = 6, * NF3FIFO = 7 * }; */ static void encode_ftype3(struct xdr_stream *xdr, const u32 type) { encode_uint32(xdr, type); } static __be32 *xdr_decode_ftype3(__be32 *p, umode_t *mode) { u32 type; type = be32_to_cpup(p++); if (type > NF3FIFO) type = NF3NON; *mode = nfs_type2fmt[type]; return p; } /* * specdata3 * * struct specdata3 { * uint32 specdata1; * uint32 specdata2; * }; */ static void encode_specdata3(struct xdr_stream *xdr, const dev_t rdev) { __be32 *p; p = xdr_reserve_space(xdr, 8); *p++ = cpu_to_be32(MAJOR(rdev)); *p = cpu_to_be32(MINOR(rdev)); } static __be32 *xdr_decode_specdata3(__be32 *p, dev_t *rdev) { unsigned int major, minor; major = be32_to_cpup(p++); minor = be32_to_cpup(p++); *rdev = MKDEV(major, minor); if (MAJOR(*rdev) != major || MINOR(*rdev) != minor) *rdev = 0; return p; } /* * nfs_fh3 * * struct nfs_fh3 { * opaque data<NFS3_FHSIZE>; * }; */ static void encode_nfs_fh3(struct xdr_stream *xdr, const struct nfs_fh *fh) { __be32 *p; WARN_ON_ONCE(fh->size > NFS3_FHSIZE); p = xdr_reserve_space(xdr, 4 + fh->size); xdr_encode_opaque(p, fh->data, fh->size); } static int decode_nfs_fh3(struct xdr_stream *xdr, struct nfs_fh *fh) { u32 length; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; length = be32_to_cpup(p++); if (unlikely(length > NFS3_FHSIZE)) goto out_toobig; p = xdr_inline_decode(xdr, length); if (unlikely(p == NULL)) goto out_overflow; fh->size = length; memcpy(fh->data, p, length); return 0; out_toobig: dprintk("NFS: file handle size (%u) too big\n", length); return -E2BIG; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static void zero_nfs_fh3(struct nfs_fh *fh) { memset(fh, 0, sizeof(*fh)); } /* * nfstime3 * * struct nfstime3 { * uint32 seconds; * uint32 nseconds; * }; */ static __be32 *xdr_encode_nfstime3(__be32 *p, const struct timespec *timep) { *p++ = cpu_to_be32(timep->tv_sec); *p++ = cpu_to_be32(timep->tv_nsec); return p; } static __be32 *xdr_decode_nfstime3(__be32 *p, struct timespec *timep) { timep->tv_sec = be32_to_cpup(p++); timep->tv_nsec = be32_to_cpup(p++); return p; } /* * sattr3 * * enum time_how { * DONT_CHANGE = 0, * SET_TO_SERVER_TIME = 1, * SET_TO_CLIENT_TIME = 2 * }; * * union set_mode3 switch (bool set_it) { * case TRUE: * mode3 mode; * default: * void; * }; * * union set_uid3 switch (bool set_it) { * case TRUE: * uid3 uid; * default: * void; * }; * * union set_gid3 switch (bool set_it) { * case TRUE: * gid3 gid; * default: * void; * }; * * union set_size3 switch (bool set_it) { * case TRUE: * size3 size; * default: * void; * }; * * union set_atime switch (time_how set_it) { * case SET_TO_CLIENT_TIME: * nfstime3 atime; * default: * void; * }; * * union set_mtime switch (time_how set_it) { * case SET_TO_CLIENT_TIME: * nfstime3 mtime; * default: * void; * }; * * struct sattr3 { * set_mode3 mode; * set_uid3 uid; * set_gid3 gid; * set_size3 size; * set_atime atime; * set_mtime mtime; * }; */ static void encode_sattr3(struct xdr_stream *xdr, const struct iattr *attr) { u32 nbytes; __be32 *p; /* * In order to make only a single xdr_reserve_space() call, * pre-compute the total number of bytes to be reserved. * Six boolean values, one for each set_foo field, are always * present in the encoded result, so start there. */ nbytes = 6 * 4; if (attr->ia_valid & ATTR_MODE) nbytes += 4; if (attr->ia_valid & ATTR_UID) nbytes += 4; if (attr->ia_valid & ATTR_GID) nbytes += 4; if (attr->ia_valid & ATTR_SIZE) nbytes += 8; if (attr->ia_valid & ATTR_ATIME_SET) nbytes += 8; if (attr->ia_valid & ATTR_MTIME_SET) nbytes += 8; p = xdr_reserve_space(xdr, nbytes); if (attr->ia_valid & ATTR_MODE) { *p++ = xdr_one; *p++ = cpu_to_be32(attr->ia_mode & S_IALLUGO); } else *p++ = xdr_zero; if (attr->ia_valid & ATTR_UID) { *p++ = xdr_one; *p++ = cpu_to_be32(from_kuid(&init_user_ns, attr->ia_uid)); } else *p++ = xdr_zero; if (attr->ia_valid & ATTR_GID) { *p++ = xdr_one; *p++ = cpu_to_be32(from_kgid(&init_user_ns, attr->ia_gid)); } else *p++ = xdr_zero; if (attr->ia_valid & ATTR_SIZE) { *p++ = xdr_one; p = xdr_encode_hyper(p, (u64)attr->ia_size); } else *p++ = xdr_zero; if (attr->ia_valid & ATTR_ATIME_SET) { *p++ = xdr_two; p = xdr_encode_nfstime3(p, &attr->ia_atime); } else if (attr->ia_valid & ATTR_ATIME) { *p++ = xdr_one; } else *p++ = xdr_zero; if (attr->ia_valid & ATTR_MTIME_SET) { *p++ = xdr_two; xdr_encode_nfstime3(p, &attr->ia_mtime); } else if (attr->ia_valid & ATTR_MTIME) { *p = xdr_one; } else *p = xdr_zero; } /* * fattr3 * * struct fattr3 { * ftype3 type; * mode3 mode; * uint32 nlink; * uid3 uid; * gid3 gid; * size3 size; * size3 used; * specdata3 rdev; * uint64 fsid; * fileid3 fileid; * nfstime3 atime; * nfstime3 mtime; * nfstime3 ctime; * }; */ static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr) { umode_t fmode; __be32 *p; p = xdr_inline_decode(xdr, NFS3_fattr_sz << 2); if (unlikely(p == NULL)) goto out_overflow; p = xdr_decode_ftype3(p, &fmode); fattr->mode = (be32_to_cpup(p++) & ~S_IFMT) | fmode; fattr->nlink = be32_to_cpup(p++); fattr->uid = make_kuid(&init_user_ns, be32_to_cpup(p++)); if (!uid_valid(fattr->uid)) goto out_uid; fattr->gid = make_kgid(&init_user_ns, be32_to_cpup(p++)); if (!gid_valid(fattr->gid)) goto out_gid; p = xdr_decode_size3(p, &fattr->size); p = xdr_decode_size3(p, &fattr->du.nfs3.used); p = xdr_decode_specdata3(p, &fattr->rdev); p = xdr_decode_hyper(p, &fattr->fsid.major); fattr->fsid.minor = 0; p = xdr_decode_fileid3(p, &fattr->fileid); p = xdr_decode_nfstime3(p, &fattr->atime); p = xdr_decode_nfstime3(p, &fattr->mtime); xdr_decode_nfstime3(p, &fattr->ctime); fattr->change_attr = nfs_timespec_to_change_attr(&fattr->ctime); fattr->valid |= NFS_ATTR_FATTR_V3; return 0; out_uid: dprintk("NFS: returned invalid uid\n"); return -EINVAL; out_gid: dprintk("NFS: returned invalid gid\n"); return -EINVAL; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * post_op_attr * * union post_op_attr switch (bool attributes_follow) { * case TRUE: * fattr3 attributes; * case FALSE: * void; * }; */ static int decode_post_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr) { __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; if (*p != xdr_zero) return decode_fattr3(xdr, fattr); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * wcc_attr * struct wcc_attr { * size3 size; * nfstime3 mtime; * nfstime3 ctime; * }; */ static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr) { __be32 *p; p = xdr_inline_decode(xdr, NFS3_wcc_attr_sz << 2); if (unlikely(p == NULL)) goto out_overflow; fattr->valid |= NFS_ATTR_FATTR_PRESIZE | NFS_ATTR_FATTR_PRECHANGE | NFS_ATTR_FATTR_PREMTIME | NFS_ATTR_FATTR_PRECTIME; p = xdr_decode_size3(p, &fattr->pre_size); p = xdr_decode_nfstime3(p, &fattr->pre_mtime); xdr_decode_nfstime3(p, &fattr->pre_ctime); fattr->pre_change_attr = nfs_timespec_to_change_attr(&fattr->pre_ctime); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * pre_op_attr * union pre_op_attr switch (bool attributes_follow) { * case TRUE: * wcc_attr attributes; * case FALSE: * void; * }; * * wcc_data * * struct wcc_data { * pre_op_attr before; * post_op_attr after; * }; */ static int decode_pre_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr) { __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; if (*p != xdr_zero) return decode_wcc_attr(xdr, fattr); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_wcc_data(struct xdr_stream *xdr, struct nfs_fattr *fattr) { int error; error = decode_pre_op_attr(xdr, fattr); if (unlikely(error)) goto out; error = decode_post_op_attr(xdr, fattr); out: return error; } /* * post_op_fh3 * * union post_op_fh3 switch (bool handle_follows) { * case TRUE: * nfs_fh3 handle; * case FALSE: * void; * }; */ static int decode_post_op_fh3(struct xdr_stream *xdr, struct nfs_fh *fh) { __be32 *p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; if (*p != xdr_zero) return decode_nfs_fh3(xdr, fh); zero_nfs_fh3(fh); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * diropargs3 * * struct diropargs3 { * nfs_fh3 dir; * filename3 name; * }; */ static void encode_diropargs3(struct xdr_stream *xdr, const struct nfs_fh *fh, const char *name, u32 length) { encode_nfs_fh3(xdr, fh); encode_filename3(xdr, name, length); } /* * NFSv3 XDR encode functions * * NFSv3 argument types are defined in section 3.3 of RFC 1813: * "NFS Version 3 Protocol Specification". */ /* * 3.3.1 GETATTR3args * * struct GETATTR3args { * nfs_fh3 object; * }; */ static void nfs3_xdr_enc_getattr3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs_fh *fh) { encode_nfs_fh3(xdr, fh); } /* * 3.3.2 SETATTR3args * * union sattrguard3 switch (bool check) { * case TRUE: * nfstime3 obj_ctime; * case FALSE: * void; * }; * * struct SETATTR3args { * nfs_fh3 object; * sattr3 new_attributes; * sattrguard3 guard; * }; */ static void encode_sattrguard3(struct xdr_stream *xdr, const struct nfs3_sattrargs *args) { __be32 *p; if (args->guard) { p = xdr_reserve_space(xdr, 4 + 8); *p++ = xdr_one; xdr_encode_nfstime3(p, &args->guardtime); } else { p = xdr_reserve_space(xdr, 4); *p = xdr_zero; } } static void nfs3_xdr_enc_setattr3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs3_sattrargs *args) { encode_nfs_fh3(xdr, args->fh); encode_sattr3(xdr, args->sattr); encode_sattrguard3(xdr, args); } /* * 3.3.3 LOOKUP3args * * struct LOOKUP3args { * diropargs3 what; * }; */ static void nfs3_xdr_enc_lookup3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs3_diropargs *args) { encode_diropargs3(xdr, args->fh, args->name, args->len); } /* * 3.3.4 ACCESS3args * * struct ACCESS3args { * nfs_fh3 object; * uint32 access; * }; */ static void encode_access3args(struct xdr_stream *xdr, const struct nfs3_accessargs *args) { encode_nfs_fh3(xdr, args->fh); encode_uint32(xdr, args->access); } static void nfs3_xdr_enc_access3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs3_accessargs *args) { encode_access3args(xdr, args); } /* * 3.3.5 READLINK3args * * struct READLINK3args { * nfs_fh3 symlink; * }; */ static void nfs3_xdr_enc_readlink3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs3_readlinkargs *args) { encode_nfs_fh3(xdr, args->fh); prepare_reply_buffer(req, args->pages, args->pgbase, args->pglen, NFS3_readlinkres_sz); } /* * 3.3.6 READ3args * * struct READ3args { * nfs_fh3 file; * offset3 offset; * count3 count; * }; */ static void encode_read3args(struct xdr_stream *xdr, const struct nfs_readargs *args) { __be32 *p; encode_nfs_fh3(xdr, args->fh); p = xdr_reserve_space(xdr, 8 + 4); p = xdr_encode_hyper(p, args->offset); *p = cpu_to_be32(args->count); } static void nfs3_xdr_enc_read3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs_readargs *args) { encode_read3args(xdr, args); prepare_reply_buffer(req, args->pages, args->pgbase, args->count, NFS3_readres_sz); req->rq_rcv_buf.flags |= XDRBUF_READ; } /* * 3.3.7 WRITE3args * * enum stable_how { * UNSTABLE = 0, * DATA_SYNC = 1, * FILE_SYNC = 2 * }; * * struct WRITE3args { * nfs_fh3 file; * offset3 offset; * count3 count; * stable_how stable; * opaque data<>; * }; */ static void encode_write3args(struct xdr_stream *xdr, const struct nfs_writeargs *args) { __be32 *p; encode_nfs_fh3(xdr, args->fh); p = xdr_reserve_space(xdr, 8 + 4 + 4 + 4); p = xdr_encode_hyper(p, args->offset); *p++ = cpu_to_be32(args->count); *p++ = cpu_to_be32(args->stable); *p = cpu_to_be32(args->count); xdr_write_pages(xdr, args->pages, args->pgbase, args->count); } static void nfs3_xdr_enc_write3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs_writeargs *args) { encode_write3args(xdr, args); xdr->buf->flags |= XDRBUF_WRITE; } /* * 3.3.8 CREATE3args * * enum createmode3 { * UNCHECKED = 0, * GUARDED = 1, * EXCLUSIVE = 2 * }; * * union createhow3 switch (createmode3 mode) { * case UNCHECKED: * case GUARDED: * sattr3 obj_attributes; * case EXCLUSIVE: * createverf3 verf; * }; * * struct CREATE3args { * diropargs3 where; * createhow3 how; * }; */ static void encode_createhow3(struct xdr_stream *xdr, const struct nfs3_createargs *args) { encode_uint32(xdr, args->createmode); switch (args->createmode) { case NFS3_CREATE_UNCHECKED: case NFS3_CREATE_GUARDED: encode_sattr3(xdr, args->sattr); break; case NFS3_CREATE_EXCLUSIVE: encode_createverf3(xdr, args->verifier); break; default: BUG(); } } static void nfs3_xdr_enc_create3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs3_createargs *args) { encode_diropargs3(xdr, args->fh, args->name, args->len); encode_createhow3(xdr, args); } /* * 3.3.9 MKDIR3args * * struct MKDIR3args { * diropargs3 where; * sattr3 attributes; * }; */ static void nfs3_xdr_enc_mkdir3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs3_mkdirargs *args) { encode_diropargs3(xdr, args->fh, args->name, args->len); encode_sattr3(xdr, args->sattr); } /* * 3.3.10 SYMLINK3args * * struct symlinkdata3 { * sattr3 symlink_attributes; * nfspath3 symlink_data; * }; * * struct SYMLINK3args { * diropargs3 where; * symlinkdata3 symlink; * }; */ static void encode_symlinkdata3(struct xdr_stream *xdr, const struct nfs3_symlinkargs *args) { encode_sattr3(xdr, args->sattr); encode_nfspath3(xdr, args->pages, args->pathlen); } static void nfs3_xdr_enc_symlink3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs3_symlinkargs *args) { encode_diropargs3(xdr, args->fromfh, args->fromname, args->fromlen); encode_symlinkdata3(xdr, args); } /* * 3.3.11 MKNOD3args * * struct devicedata3 { * sattr3 dev_attributes; * specdata3 spec; * }; * * union mknoddata3 switch (ftype3 type) { * case NF3CHR: * case NF3BLK: * devicedata3 device; * case NF3SOCK: * case NF3FIFO: * sattr3 pipe_attributes; * default: * void; * }; * * struct MKNOD3args { * diropargs3 where; * mknoddata3 what; * }; */ static void encode_devicedata3(struct xdr_stream *xdr, const struct nfs3_mknodargs *args) { encode_sattr3(xdr, args->sattr); encode_specdata3(xdr, args->rdev); } static void encode_mknoddata3(struct xdr_stream *xdr, const struct nfs3_mknodargs *args) { encode_ftype3(xdr, args->type); switch (args->type) { case NF3CHR: case NF3BLK: encode_devicedata3(xdr, args); break; case NF3SOCK: case NF3FIFO: encode_sattr3(xdr, args->sattr); break; case NF3REG: case NF3DIR: break; default: BUG(); } } static void nfs3_xdr_enc_mknod3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs3_mknodargs *args) { encode_diropargs3(xdr, args->fh, args->name, args->len); encode_mknoddata3(xdr, args); } /* * 3.3.12 REMOVE3args * * struct REMOVE3args { * diropargs3 object; * }; */ static void nfs3_xdr_enc_remove3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs_removeargs *args) { encode_diropargs3(xdr, args->fh, args->name.name, args->name.len); } /* * 3.3.14 RENAME3args * * struct RENAME3args { * diropargs3 from; * diropargs3 to; * }; */ static void nfs3_xdr_enc_rename3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs_renameargs *args) { const struct qstr *old = args->old_name; const struct qstr *new = args->new_name; encode_diropargs3(xdr, args->old_dir, old->name, old->len); encode_diropargs3(xdr, args->new_dir, new->name, new->len); } /* * 3.3.15 LINK3args * * struct LINK3args { * nfs_fh3 file; * diropargs3 link; * }; */ static void nfs3_xdr_enc_link3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs3_linkargs *args) { encode_nfs_fh3(xdr, args->fromfh); encode_diropargs3(xdr, args->tofh, args->toname, args->tolen); } /* * 3.3.16 READDIR3args * * struct READDIR3args { * nfs_fh3 dir; * cookie3 cookie; * cookieverf3 cookieverf; * count3 count; * }; */ static void encode_readdir3args(struct xdr_stream *xdr, const struct nfs3_readdirargs *args) { __be32 *p; encode_nfs_fh3(xdr, args->fh); p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4); p = xdr_encode_cookie3(p, args->cookie); p = xdr_encode_cookieverf3(p, args->verf); *p = cpu_to_be32(args->count); } static void nfs3_xdr_enc_readdir3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs3_readdirargs *args) { encode_readdir3args(xdr, args); prepare_reply_buffer(req, args->pages, 0, args->count, NFS3_readdirres_sz); } /* * 3.3.17 READDIRPLUS3args * * struct READDIRPLUS3args { * nfs_fh3 dir; * cookie3 cookie; * cookieverf3 cookieverf; * count3 dircount; * count3 maxcount; * }; */ static void encode_readdirplus3args(struct xdr_stream *xdr, const struct nfs3_readdirargs *args) { __be32 *p; encode_nfs_fh3(xdr, args->fh); p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4 + 4); p = xdr_encode_cookie3(p, args->cookie); p = xdr_encode_cookieverf3(p, args->verf); /* * readdirplus: need dircount + buffer size. * We just make sure we make dircount big enough */ *p++ = cpu_to_be32(args->count >> 3); *p = cpu_to_be32(args->count); } static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs3_readdirargs *args) { encode_readdirplus3args(xdr, args); prepare_reply_buffer(req, args->pages, 0, args->count, NFS3_readdirres_sz); } /* * 3.3.21 COMMIT3args * * struct COMMIT3args { * nfs_fh3 file; * offset3 offset; * count3 count; * }; */ static void encode_commit3args(struct xdr_stream *xdr, const struct nfs_commitargs *args) { __be32 *p; encode_nfs_fh3(xdr, args->fh); p = xdr_reserve_space(xdr, 8 + 4); p = xdr_encode_hyper(p, args->offset); *p = cpu_to_be32(args->count); } static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs_commitargs *args) { encode_commit3args(xdr, args); } #ifdef CONFIG_NFS_V3_ACL static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs3_getaclargs *args) { encode_nfs_fh3(xdr, args->fh); encode_uint32(xdr, args->mask); if (args->mask & (NFS_ACL | NFS_DFACL)) prepare_reply_buffer(req, args->pages, 0, NFSACL_MAXPAGES << PAGE_SHIFT, ACL3_getaclres_sz); } static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs3_setaclargs *args) { unsigned int base; int error; encode_nfs_fh3(xdr, NFS_FH(args->inode)); encode_uint32(xdr, args->mask); base = req->rq_slen; if (args->npages != 0) xdr_write_pages(xdr, args->pages, 0, args->len); else xdr_reserve_space(xdr, NFS_ACL_INLINE_BUFSIZE); error = nfsacl_encode(xdr->buf, base, args->inode, (args->mask & NFS_ACL) ? args->acl_access : NULL, 1, 0); /* FIXME: this is just broken */ BUG_ON(error < 0); error = nfsacl_encode(xdr->buf, base + error, args->inode, (args->mask & NFS_DFACL) ? args->acl_default : NULL, 1, NFS_ACL_DEFAULT); BUG_ON(error < 0); } #endif /* CONFIG_NFS_V3_ACL */ /* * NFSv3 XDR decode functions * * NFSv3 result types are defined in section 3.3 of RFC 1813: * "NFS Version 3 Protocol Specification". */ /* * 3.3.1 GETATTR3res * * struct GETATTR3resok { * fattr3 obj_attributes; * }; * * union GETATTR3res switch (nfsstat3 status) { * case NFS3_OK: * GETATTR3resok resok; * default: * void; * }; */ static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_fattr *result) { enum nfs_stat status; int error; error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_default; error = decode_fattr3(xdr, result); out: return error; out_default: return nfs3_stat_to_errno(status); } /* * 3.3.2 SETATTR3res * * struct SETATTR3resok { * wcc_data obj_wcc; * }; * * struct SETATTR3resfail { * wcc_data obj_wcc; * }; * * union SETATTR3res switch (nfsstat3 status) { * case NFS3_OK: * SETATTR3resok resok; * default: * SETATTR3resfail resfail; * }; */ static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_fattr *result) { enum nfs_stat status; int error; error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; error = decode_wcc_data(xdr, result); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_status; out: return error; out_status: return nfs3_stat_to_errno(status); } /* * 3.3.3 LOOKUP3res * * struct LOOKUP3resok { * nfs_fh3 object; * post_op_attr obj_attributes; * post_op_attr dir_attributes; * }; * * struct LOOKUP3resfail { * post_op_attr dir_attributes; * }; * * union LOOKUP3res switch (nfsstat3 status) { * case NFS3_OK: * LOOKUP3resok resok; * default: * LOOKUP3resfail resfail; * }; */ static int nfs3_xdr_dec_lookup3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs3_diropres *result) { enum nfs_stat status; int error; error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_default; error = decode_nfs_fh3(xdr, result->fh); if (unlikely(error)) goto out; error = decode_post_op_attr(xdr, result->fattr); if (unlikely(error)) goto out; error = decode_post_op_attr(xdr, result->dir_attr); out: return error; out_default: error = decode_post_op_attr(xdr, result->dir_attr); if (unlikely(error)) goto out; return nfs3_stat_to_errno(status); } /* * 3.3.4 ACCESS3res * * struct ACCESS3resok { * post_op_attr obj_attributes; * uint32 access; * }; * * struct ACCESS3resfail { * post_op_attr obj_attributes; * }; * * union ACCESS3res switch (nfsstat3 status) { * case NFS3_OK: * ACCESS3resok resok; * default: * ACCESS3resfail resfail; * }; */ static int nfs3_xdr_dec_access3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs3_accessres *result) { enum nfs_stat status; int error; error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; error = decode_post_op_attr(xdr, result->fattr); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_default; error = decode_uint32(xdr, &result->access); out: return error; out_default: return nfs3_stat_to_errno(status); } /* * 3.3.5 READLINK3res * * struct READLINK3resok { * post_op_attr symlink_attributes; * nfspath3 data; * }; * * struct READLINK3resfail { * post_op_attr symlink_attributes; * }; * * union READLINK3res switch (nfsstat3 status) { * case NFS3_OK: * READLINK3resok resok; * default: * READLINK3resfail resfail; * }; */ static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_fattr *result) { enum nfs_stat status; int error; error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; error = decode_post_op_attr(xdr, result); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_default; error = decode_nfspath3(xdr); out: return error; out_default: return nfs3_stat_to_errno(status); } /* * 3.3.6 READ3res * * struct READ3resok { * post_op_attr file_attributes; * count3 count; * bool eof; * opaque data<>; * }; * * struct READ3resfail { * post_op_attr file_attributes; * }; * * union READ3res switch (nfsstat3 status) { * case NFS3_OK: * READ3resok resok; * default: * READ3resfail resfail; * }; */ static int decode_read3resok(struct xdr_stream *xdr, struct nfs_readres *result) { u32 eof, count, ocount, recvd; __be32 *p; p = xdr_inline_decode(xdr, 4 + 4 + 4); if (unlikely(p == NULL)) goto out_overflow; count = be32_to_cpup(p++); eof = be32_to_cpup(p++); ocount = be32_to_cpup(p++); if (unlikely(ocount != count)) goto out_mismatch; recvd = xdr_read_pages(xdr, count); if (unlikely(count > recvd)) goto out_cheating; out: result->eof = eof; result->count = count; return count; out_mismatch: dprintk("NFS: READ count doesn't match length of opaque: " "count %u != ocount %u\n", count, ocount); return -EIO; out_cheating: dprintk("NFS: server cheating in read result: " "count %u > recvd %u\n", count, recvd); count = recvd; eof = 0; goto out; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_readres *result) { enum nfs_stat status; int error; error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; error = decode_post_op_attr(xdr, result->fattr); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_status; error = decode_read3resok(xdr, result); out: return error; out_status: return nfs3_stat_to_errno(status); } /* * 3.3.7 WRITE3res * * enum stable_how { * UNSTABLE = 0, * DATA_SYNC = 1, * FILE_SYNC = 2 * }; * * struct WRITE3resok { * wcc_data file_wcc; * count3 count; * stable_how committed; * writeverf3 verf; * }; * * struct WRITE3resfail { * wcc_data file_wcc; * }; * * union WRITE3res switch (nfsstat3 status) { * case NFS3_OK: * WRITE3resok resok; * default: * WRITE3resfail resfail; * }; */ static int decode_write3resok(struct xdr_stream *xdr, struct nfs_writeres *result) { __be32 *p; p = xdr_inline_decode(xdr, 4 + 4); if (unlikely(p == NULL)) goto out_overflow; result->count = be32_to_cpup(p++); result->verf->committed = be32_to_cpup(p++); if (unlikely(result->verf->committed > NFS_FILE_SYNC)) goto out_badvalue; if (decode_writeverf3(xdr, &result->verf->verifier)) goto out_eio; return result->count; out_badvalue: dprintk("NFS: bad stable_how value: %u\n", result->verf->committed); return -EIO; out_overflow: print_overflow_msg(__func__, xdr); out_eio: return -EIO; } static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_writeres *result) { enum nfs_stat status; int error; error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; error = decode_wcc_data(xdr, result->fattr); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_status; error = decode_write3resok(xdr, result); out: return error; out_status: return nfs3_stat_to_errno(status); } /* * 3.3.8 CREATE3res * * struct CREATE3resok { * post_op_fh3 obj; * post_op_attr obj_attributes; * wcc_data dir_wcc; * }; * * struct CREATE3resfail { * wcc_data dir_wcc; * }; * * union CREATE3res switch (nfsstat3 status) { * case NFS3_OK: * CREATE3resok resok; * default: * CREATE3resfail resfail; * }; */ static int decode_create3resok(struct xdr_stream *xdr, struct nfs3_diropres *result) { int error; error = decode_post_op_fh3(xdr, result->fh); if (unlikely(error)) goto out; error = decode_post_op_attr(xdr, result->fattr); if (unlikely(error)) goto out; /* The server isn't required to return a file handle. * If it didn't, force the client to perform a LOOKUP * to determine the correct file handle and attribute * values for the new object. */ if (result->fh->size == 0) result->fattr->valid = 0; error = decode_wcc_data(xdr, result->dir_attr); out: return error; } static int nfs3_xdr_dec_create3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs3_diropres *result) { enum nfs_stat status; int error; error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_default; error = decode_create3resok(xdr, result); out: return error; out_default: error = decode_wcc_data(xdr, result->dir_attr); if (unlikely(error)) goto out; return nfs3_stat_to_errno(status); } /* * 3.3.12 REMOVE3res * * struct REMOVE3resok { * wcc_data dir_wcc; * }; * * struct REMOVE3resfail { * wcc_data dir_wcc; * }; * * union REMOVE3res switch (nfsstat3 status) { * case NFS3_OK: * REMOVE3resok resok; * default: * REMOVE3resfail resfail; * }; */ static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_removeres *result) { enum nfs_stat status; int error; error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; error = decode_wcc_data(xdr, result->dir_attr); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_status; out: return error; out_status: return nfs3_stat_to_errno(status); } /* * 3.3.14 RENAME3res * * struct RENAME3resok { * wcc_data fromdir_wcc; * wcc_data todir_wcc; * }; * * struct RENAME3resfail { * wcc_data fromdir_wcc; * wcc_data todir_wcc; * }; * * union RENAME3res switch (nfsstat3 status) { * case NFS3_OK: * RENAME3resok resok; * default: * RENAME3resfail resfail; * }; */ static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_renameres *result) { enum nfs_stat status; int error; error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; error = decode_wcc_data(xdr, result->old_fattr); if (unlikely(error)) goto out; error = decode_wcc_data(xdr, result->new_fattr); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_status; out: return error; out_status: return nfs3_stat_to_errno(status); } /* * 3.3.15 LINK3res * * struct LINK3resok { * post_op_attr file_attributes; * wcc_data linkdir_wcc; * }; * * struct LINK3resfail { * post_op_attr file_attributes; * wcc_data linkdir_wcc; * }; * * union LINK3res switch (nfsstat3 status) { * case NFS3_OK: * LINK3resok resok; * default: * LINK3resfail resfail; * }; */ static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs3_linkres *result) { enum nfs_stat status; int error; error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; error = decode_post_op_attr(xdr, result->fattr); if (unlikely(error)) goto out; error = decode_wcc_data(xdr, result->dir_attr); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_status; out: return error; out_status: return nfs3_stat_to_errno(status); } /** * nfs3_decode_dirent - Decode a single NFSv3 directory entry stored in * the local page cache * @xdr: XDR stream where entry resides * @entry: buffer to fill in with entry data * @plus: boolean indicating whether this should be a readdirplus entry * * Returns zero if successful, otherwise a negative errno value is * returned. * * This function is not invoked during READDIR reply decoding, but * rather whenever an application invokes the getdents(2) system call * on a directory already in our cache. * * 3.3.16 entry3 * * struct entry3 { * fileid3 fileid; * filename3 name; * cookie3 cookie; * fhandle3 filehandle; * post_op_attr3 attributes; * entry3 *nextentry; * }; * * 3.3.17 entryplus3 * struct entryplus3 { * fileid3 fileid; * filename3 name; * cookie3 cookie; * post_op_attr name_attributes; * post_op_fh3 name_handle; * entryplus3 *nextentry; * }; */ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, int plus) { struct nfs_entry old = *entry; __be32 *p; int error; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; if (*p == xdr_zero) { p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; if (*p == xdr_zero) return -EAGAIN; entry->eof = 1; return -EBADCOOKIE; } error = decode_fileid3(xdr, &entry->ino); if (unlikely(error)) return error; error = decode_inline_filename3(xdr, &entry->name, &entry->len); if (unlikely(error)) return error; entry->prev_cookie = entry->cookie; error = decode_cookie3(xdr, &entry->cookie); if (unlikely(error)) return error; entry->d_type = DT_UNKNOWN; if (plus) { entry->fattr->valid = 0; error = decode_post_op_attr(xdr, entry->fattr); if (unlikely(error)) return error; if (entry->fattr->valid & NFS_ATTR_FATTR_V3) entry->d_type = nfs_umode_to_dtype(entry->fattr->mode); /* In fact, a post_op_fh3: */ p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; if (*p != xdr_zero) { error = decode_nfs_fh3(xdr, entry->fh); if (unlikely(error)) { if (error == -E2BIG) goto out_truncated; return error; } } else zero_nfs_fh3(entry->fh); } return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EAGAIN; out_truncated: dprintk("NFS: directory entry contains invalid file handle\n"); *entry = old; return -EAGAIN; } /* * 3.3.16 READDIR3res * * struct dirlist3 { * entry3 *entries; * bool eof; * }; * * struct READDIR3resok { * post_op_attr dir_attributes; * cookieverf3 cookieverf; * dirlist3 reply; * }; * * struct READDIR3resfail { * post_op_attr dir_attributes; * }; * * union READDIR3res switch (nfsstat3 status) { * case NFS3_OK: * READDIR3resok resok; * default: * READDIR3resfail resfail; * }; * * Read the directory contents into the page cache, but otherwise * don't touch them. The actual decoding is done by nfs3_decode_entry() * during subsequent nfs_readdir() calls. */ static int decode_dirlist3(struct xdr_stream *xdr) { return xdr_read_pages(xdr, xdr->buf->page_len); } static int decode_readdir3resok(struct xdr_stream *xdr, struct nfs3_readdirres *result) { int error; error = decode_post_op_attr(xdr, result->dir_attr); if (unlikely(error)) goto out; /* XXX: do we need to check if result->verf != NULL ? */ error = decode_cookieverf3(xdr, result->verf); if (unlikely(error)) goto out; error = decode_dirlist3(xdr); out: return error; } static int nfs3_xdr_dec_readdir3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs3_readdirres *result) { enum nfs_stat status; int error; error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_default; error = decode_readdir3resok(xdr, result); out: return error; out_default: error = decode_post_op_attr(xdr, result->dir_attr); if (unlikely(error)) goto out; return nfs3_stat_to_errno(status); } /* * 3.3.18 FSSTAT3res * * struct FSSTAT3resok { * post_op_attr obj_attributes; * size3 tbytes; * size3 fbytes; * size3 abytes; * size3 tfiles; * size3 ffiles; * size3 afiles; * uint32 invarsec; * }; * * struct FSSTAT3resfail { * post_op_attr obj_attributes; * }; * * union FSSTAT3res switch (nfsstat3 status) { * case NFS3_OK: * FSSTAT3resok resok; * default: * FSSTAT3resfail resfail; * }; */ static int decode_fsstat3resok(struct xdr_stream *xdr, struct nfs_fsstat *result) { __be32 *p; p = xdr_inline_decode(xdr, 8 * 6 + 4); if (unlikely(p == NULL)) goto out_overflow; p = xdr_decode_size3(p, &result->tbytes); p = xdr_decode_size3(p, &result->fbytes); p = xdr_decode_size3(p, &result->abytes); p = xdr_decode_size3(p, &result->tfiles); p = xdr_decode_size3(p, &result->ffiles); xdr_decode_size3(p, &result->afiles); /* ignore invarsec */ return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_fsstat *result) { enum nfs_stat status; int error; error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; error = decode_post_op_attr(xdr, result->fattr); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_status; error = decode_fsstat3resok(xdr, result); out: return error; out_status: return nfs3_stat_to_errno(status); } /* * 3.3.19 FSINFO3res * * struct FSINFO3resok { * post_op_attr obj_attributes; * uint32 rtmax; * uint32 rtpref; * uint32 rtmult; * uint32 wtmax; * uint32 wtpref; * uint32 wtmult; * uint32 dtpref; * size3 maxfilesize; * nfstime3 time_delta; * uint32 properties; * }; * * struct FSINFO3resfail { * post_op_attr obj_attributes; * }; * * union FSINFO3res switch (nfsstat3 status) { * case NFS3_OK: * FSINFO3resok resok; * default: * FSINFO3resfail resfail; * }; */ static int decode_fsinfo3resok(struct xdr_stream *xdr, struct nfs_fsinfo *result) { __be32 *p; p = xdr_inline_decode(xdr, 4 * 7 + 8 + 8 + 4); if (unlikely(p == NULL)) goto out_overflow; result->rtmax = be32_to_cpup(p++); result->rtpref = be32_to_cpup(p++); result->rtmult = be32_to_cpup(p++); result->wtmax = be32_to_cpup(p++); result->wtpref = be32_to_cpup(p++); result->wtmult = be32_to_cpup(p++); result->dtpref = be32_to_cpup(p++); p = xdr_decode_size3(p, &result->maxfilesize); xdr_decode_nfstime3(p, &result->time_delta); /* ignore properties */ result->lease_time = 0; return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_fsinfo *result) { enum nfs_stat status; int error; error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; error = decode_post_op_attr(xdr, result->fattr); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_status; error = decode_fsinfo3resok(xdr, result); out: return error; out_status: return nfs3_stat_to_errno(status); } /* * 3.3.20 PATHCONF3res * * struct PATHCONF3resok { * post_op_attr obj_attributes; * uint32 linkmax; * uint32 name_max; * bool no_trunc; * bool chown_restricted; * bool case_insensitive; * bool case_preserving; * }; * * struct PATHCONF3resfail { * post_op_attr obj_attributes; * }; * * union PATHCONF3res switch (nfsstat3 status) { * case NFS3_OK: * PATHCONF3resok resok; * default: * PATHCONF3resfail resfail; * }; */ static int decode_pathconf3resok(struct xdr_stream *xdr, struct nfs_pathconf *result) { __be32 *p; p = xdr_inline_decode(xdr, 4 * 6); if (unlikely(p == NULL)) goto out_overflow; result->max_link = be32_to_cpup(p++); result->max_namelen = be32_to_cpup(p); /* ignore remaining fields */ return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_pathconf *result) { enum nfs_stat status; int error; error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; error = decode_post_op_attr(xdr, result->fattr); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_status; error = decode_pathconf3resok(xdr, result); out: return error; out_status: return nfs3_stat_to_errno(status); } /* * 3.3.21 COMMIT3res * * struct COMMIT3resok { * wcc_data file_wcc; * writeverf3 verf; * }; * * struct COMMIT3resfail { * wcc_data file_wcc; * }; * * union COMMIT3res switch (nfsstat3 status) { * case NFS3_OK: * COMMIT3resok resok; * default: * COMMIT3resfail resfail; * }; */ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_commitres *result) { enum nfs_stat status; int error; error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; error = decode_wcc_data(xdr, result->fattr); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_status; error = decode_writeverf3(xdr, &result->verf->verifier); out: return error; out_status: return nfs3_stat_to_errno(status); } #ifdef CONFIG_NFS_V3_ACL static inline int decode_getacl3resok(struct xdr_stream *xdr, struct nfs3_getaclres *result) { struct posix_acl **acl; unsigned int *aclcnt; size_t hdrlen; int error; error = decode_post_op_attr(xdr, result->fattr); if (unlikely(error)) goto out; error = decode_uint32(xdr, &result->mask); if (unlikely(error)) goto out; error = -EINVAL; if (result->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT)) goto out; hdrlen = xdr_stream_pos(xdr); acl = NULL; if (result->mask & NFS_ACL) acl = &result->acl_access; aclcnt = NULL; if (result->mask & NFS_ACLCNT) aclcnt = &result->acl_access_count; error = nfsacl_decode(xdr->buf, hdrlen, aclcnt, acl); if (unlikely(error <= 0)) goto out; acl = NULL; if (result->mask & NFS_DFACL) acl = &result->acl_default; aclcnt = NULL; if (result->mask & NFS_DFACLCNT) aclcnt = &result->acl_default_count; error = nfsacl_decode(xdr->buf, hdrlen + error, aclcnt, acl); if (unlikely(error <= 0)) return error; error = 0; out: return error; } static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs3_getaclres *result) { enum nfs_stat status; int error; error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_default; error = decode_getacl3resok(xdr, result); out: return error; out_default: return nfs3_stat_to_errno(status); } static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_fattr *result) { enum nfs_stat status; int error; error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_default; error = decode_post_op_attr(xdr, result); out: return error; out_default: return nfs3_stat_to_errno(status); } #endif /* CONFIG_NFS_V3_ACL */ /* * We need to translate between nfs status return values and * the local errno values which may not be the same. */ static const struct { int stat; int errno; } nfs_errtbl[] = { { NFS_OK, 0 }, { NFSERR_PERM, -EPERM }, { NFSERR_NOENT, -ENOENT }, { NFSERR_IO, -errno_NFSERR_IO}, { NFSERR_NXIO, -ENXIO }, /* { NFSERR_EAGAIN, -EAGAIN }, */ { NFSERR_ACCES, -EACCES }, { NFSERR_EXIST, -EEXIST }, { NFSERR_XDEV, -EXDEV }, { NFSERR_NODEV, -ENODEV }, { NFSERR_NOTDIR, -ENOTDIR }, { NFSERR_ISDIR, -EISDIR }, { NFSERR_INVAL, -EINVAL }, { NFSERR_FBIG, -EFBIG }, { NFSERR_NOSPC, -ENOSPC }, { NFSERR_ROFS, -EROFS }, { NFSERR_MLINK, -EMLINK }, { NFSERR_NAMETOOLONG, -ENAMETOOLONG }, { NFSERR_NOTEMPTY, -ENOTEMPTY }, { NFSERR_DQUOT, -EDQUOT }, { NFSERR_STALE, -ESTALE }, { NFSERR_REMOTE, -EREMOTE }, #ifdef EWFLUSH { NFSERR_WFLUSH, -EWFLUSH }, #endif { NFSERR_BADHANDLE, -EBADHANDLE }, { NFSERR_NOT_SYNC, -ENOTSYNC }, { NFSERR_BAD_COOKIE, -EBADCOOKIE }, { NFSERR_NOTSUPP, -ENOTSUPP }, { NFSERR_TOOSMALL, -ETOOSMALL }, { NFSERR_SERVERFAULT, -EREMOTEIO }, { NFSERR_BADTYPE, -EBADTYPE }, { NFSERR_JUKEBOX, -EJUKEBOX }, { -1, -EIO } }; /** * nfs3_stat_to_errno - convert an NFS status code to a local errno * @status: NFS status code to convert * * Returns a local errno value, or -EIO if the NFS status code is * not recognized. This function is used jointly by NFSv2 and NFSv3. */ static int nfs3_stat_to_errno(enum nfs_stat status) { int i; for (i = 0; nfs_errtbl[i].stat != -1; i++) { if (nfs_errtbl[i].stat == (int)status) return nfs_errtbl[i].errno; } dprintk("NFS: Unrecognized nfs status value: %u\n", status); return nfs_errtbl[i].errno; } #define PROC(proc, argtype, restype, timer) \ [NFS3PROC_##proc] = { \ .p_proc = NFS3PROC_##proc, \ .p_encode = (kxdreproc_t)nfs3_xdr_enc_##argtype##3args, \ .p_decode = (kxdrdproc_t)nfs3_xdr_dec_##restype##3res, \ .p_arglen = NFS3_##argtype##args_sz, \ .p_replen = NFS3_##restype##res_sz, \ .p_timer = timer, \ .p_statidx = NFS3PROC_##proc, \ .p_name = #proc, \ } struct rpc_procinfo nfs3_procedures[] = { PROC(GETATTR, getattr, getattr, 1), PROC(SETATTR, setattr, setattr, 0), PROC(LOOKUP, lookup, lookup, 2), PROC(ACCESS, access, access, 1), PROC(READLINK, readlink, readlink, 3), PROC(READ, read, read, 3), PROC(WRITE, write, write, 4), PROC(CREATE, create, create, 0), PROC(MKDIR, mkdir, create, 0), PROC(SYMLINK, symlink, create, 0), PROC(MKNOD, mknod, create, 0), PROC(REMOVE, remove, remove, 0), PROC(RMDIR, lookup, setattr, 0), PROC(RENAME, rename, rename, 0), PROC(LINK, link, link, 0), PROC(READDIR, readdir, readdir, 3), PROC(READDIRPLUS, readdirplus, readdir, 3), PROC(FSSTAT, getattr, fsstat, 0), PROC(FSINFO, getattr, fsinfo, 0), PROC(PATHCONF, getattr, pathconf, 0), PROC(COMMIT, commit, commit, 5), }; const struct rpc_version nfs_version3 = { .number = 3, .nrprocs = ARRAY_SIZE(nfs3_procedures), .procs = nfs3_procedures }; #ifdef CONFIG_NFS_V3_ACL static struct rpc_procinfo nfs3_acl_procedures[] = { [ACLPROC3_GETACL] = { .p_proc = ACLPROC3_GETACL, .p_encode = (kxdreproc_t)nfs3_xdr_enc_getacl3args, .p_decode = (kxdrdproc_t)nfs3_xdr_dec_getacl3res, .p_arglen = ACL3_getaclargs_sz, .p_replen = ACL3_getaclres_sz, .p_timer = 1, .p_name = "GETACL", }, [ACLPROC3_SETACL] = { .p_proc = ACLPROC3_SETACL, .p_encode = (kxdreproc_t)nfs3_xdr_enc_setacl3args, .p_decode = (kxdrdproc_t)nfs3_xdr_dec_setacl3res, .p_arglen = ACL3_setaclargs_sz, .p_replen = ACL3_setaclres_sz, .p_timer = 0, .p_name = "SETACL", }, }; const struct rpc_version nfsacl_version3 = { .number = 3, .nrprocs = sizeof(nfs3_acl_procedures)/ sizeof(nfs3_acl_procedures[0]), .procs = nfs3_acl_procedures, }; #endif /* CONFIG_NFS_V3_ACL */
gpl-2.0
TeamEOS/kernel_htc_flounder
net/mac80211/wep.c
2489
9741
/* * Software WEP encryption implementation * Copyright 2002, Jouni Malinen <jkmaline@cc.hut.fi> * Copyright 2003, Instant802 Networks, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/netdevice.h> #include <linux/types.h> #include <linux/random.h> #include <linux/compiler.h> #include <linux/crc32.h> #include <linux/crypto.h> #include <linux/err.h> #include <linux/mm.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <asm/unaligned.h> #include <net/mac80211.h> #include "ieee80211_i.h" #include "wep.h" int ieee80211_wep_init(struct ieee80211_local *local) { /* start WEP IV from a random value */ get_random_bytes(&local->wep_iv, WEP_IV_LEN); local->wep_tx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(local->wep_tx_tfm)) { local->wep_rx_tfm = ERR_PTR(-EINVAL); return PTR_ERR(local->wep_tx_tfm); } local->wep_rx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(local->wep_rx_tfm)) { crypto_free_cipher(local->wep_tx_tfm); local->wep_tx_tfm = ERR_PTR(-EINVAL); return PTR_ERR(local->wep_rx_tfm); } return 0; } void ieee80211_wep_free(struct ieee80211_local *local) { if (!IS_ERR(local->wep_tx_tfm)) crypto_free_cipher(local->wep_tx_tfm); if (!IS_ERR(local->wep_rx_tfm)) crypto_free_cipher(local->wep_rx_tfm); } static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen) { /* * Fluhrer, Mantin, and Shamir have reported weaknesses in the * key scheduling algorithm of RC4. At least IVs (KeyByte + 3, * 0xff, N) can be used to speedup attacks, so avoid using them. */ if ((iv & 0xff00) == 0xff00) { u8 B = (iv >> 16) & 0xff; if (B >= 3 && B < 3 + keylen) return true; } return false; } static void ieee80211_wep_get_iv(struct ieee80211_local *local, int keylen, int keyidx, u8 *iv) { local->wep_iv++; if (ieee80211_wep_weak_iv(local->wep_iv, keylen)) local->wep_iv += 0x0100; if (!iv) return; *iv++ = (local->wep_iv >> 16) & 0xff; *iv++ = (local->wep_iv >> 8) & 0xff; *iv++ = local->wep_iv & 0xff; *iv++ = keyidx << 6; } static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local, struct sk_buff *skb, int keylen, int keyidx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); unsigned int hdrlen; u8 *newhdr; hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); if (WARN_ON(skb_tailroom(skb) < WEP_ICV_LEN || skb_headroom(skb) < WEP_IV_LEN)) return NULL; hdrlen = ieee80211_hdrlen(hdr->frame_control); newhdr = skb_push(skb, WEP_IV_LEN); memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen); /* the HW only needs room for the IV, but not the actual IV */ if (info->control.hw_key && (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) return newhdr + hdrlen; skb_set_network_header(skb, skb_network_offset(skb) + WEP_IV_LEN); ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen); return newhdr + hdrlen; } static void ieee80211_wep_remove_iv(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_key *key) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; unsigned int hdrlen; hdrlen = ieee80211_hdrlen(hdr->frame_control); memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen); skb_pull(skb, WEP_IV_LEN); } /* Perform WEP encryption using given key. data buffer must have tailroom * for 4-byte ICV. data_len must not include this ICV. Note: this function * does _not_ add IV. data = RC4(data | CRC32(data)) */ int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key, size_t klen, u8 *data, size_t data_len) { __le32 icv; int i; if (IS_ERR(tfm)) return -1; icv = cpu_to_le32(~crc32_le(~0, data, data_len)); put_unaligned(icv, (__le32 *)(data + data_len)); crypto_cipher_setkey(tfm, rc4key, klen); for (i = 0; i < data_len + WEP_ICV_LEN; i++) crypto_cipher_encrypt_one(tfm, data + i, data + i); return 0; } /* Perform WEP encryption on given skb. 4 bytes of extra space (IV) in the * beginning of the buffer 4 bytes of extra space (ICV) in the end of the * buffer will be added. Both IV and ICV will be transmitted, so the * payload length increases with 8 bytes. * * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data)) */ int ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb, const u8 *key, int keylen, int keyidx) { u8 *iv; size_t len; u8 rc4key[3 + WLAN_KEY_LEN_WEP104]; iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx); if (!iv) return -1; len = skb->len - (iv + WEP_IV_LEN - skb->data); /* Prepend 24-bit IV to RC4 key */ memcpy(rc4key, iv, 3); /* Copy rest of the WEP key (the secret part) */ memcpy(rc4key + 3, key, keylen); /* Add room for ICV */ skb_put(skb, WEP_ICV_LEN); return ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, keylen + 3, iv + WEP_IV_LEN, len); } /* Perform WEP decryption using given key. data buffer includes encrypted * payload, including 4-byte ICV, but _not_ IV. data_len must not include ICV. * Return 0 on success and -1 on ICV mismatch. */ int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key, size_t klen, u8 *data, size_t data_len) { __le32 crc; int i; if (IS_ERR(tfm)) return -1; crypto_cipher_setkey(tfm, rc4key, klen); for (i = 0; i < data_len + WEP_ICV_LEN; i++) crypto_cipher_decrypt_one(tfm, data + i, data + i); crc = cpu_to_le32(~crc32_le(~0, data, data_len)); if (memcmp(&crc, data + data_len, WEP_ICV_LEN) != 0) /* ICV mismatch */ return -1; return 0; } /* Perform WEP decryption on given skb. Buffer includes whole WEP part of * the frame: IV (4 bytes), encrypted payload (including SNAP header), * ICV (4 bytes). skb->len includes both IV and ICV. * * Returns 0 if frame was decrypted successfully and ICV was correct and -1 on * failure. If frame is OK, IV and ICV will be removed, i.e., decrypted payload * is moved to the beginning of the skb and skb length will be reduced. */ static int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_key *key) { u32 klen; u8 rc4key[3 + WLAN_KEY_LEN_WEP104]; u8 keyidx; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; unsigned int hdrlen; size_t len; int ret = 0; if (!ieee80211_has_protected(hdr->frame_control)) return -1; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len < hdrlen + WEP_IV_LEN + WEP_ICV_LEN) return -1; len = skb->len - hdrlen - WEP_IV_LEN - WEP_ICV_LEN; keyidx = skb->data[hdrlen + 3] >> 6; if (!key || keyidx != key->conf.keyidx) return -1; klen = 3 + key->conf.keylen; /* Prepend 24-bit IV to RC4 key */ memcpy(rc4key, skb->data + hdrlen, 3); /* Copy rest of the WEP key (the secret part) */ memcpy(rc4key + 3, key->conf.key, key->conf.keylen); if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen, skb->data + hdrlen + WEP_IV_LEN, len)) ret = -1; /* Trim ICV */ skb_trim(skb, skb->len - WEP_ICV_LEN); /* Remove IV */ memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen); skb_pull(skb, WEP_IV_LEN); return ret; } static bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; unsigned int hdrlen; u8 *ivpos; u32 iv; hdrlen = ieee80211_hdrlen(hdr->frame_control); ivpos = skb->data + hdrlen; iv = (ivpos[0] << 16) | (ivpos[1] << 8) | ivpos[2]; return ieee80211_wep_weak_iv(iv, key->conf.keylen); } ieee80211_rx_result ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) { struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; __le16 fc = hdr->frame_control; if (!ieee80211_is_data(fc) && !ieee80211_is_auth(fc)) return RX_CONTINUE; if (!(status->flag & RX_FLAG_DECRYPTED)) { if (skb_linearize(rx->skb)) return RX_DROP_UNUSABLE; if (rx->sta && ieee80211_wep_is_weak_iv(rx->skb, rx->key)) rx->sta->wep_weak_iv_count++; if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) return RX_DROP_UNUSABLE; } else if (!(status->flag & RX_FLAG_IV_STRIPPED)) { if (!pskb_may_pull(rx->skb, ieee80211_hdrlen(fc) + WEP_IV_LEN)) return RX_DROP_UNUSABLE; if (rx->sta && ieee80211_wep_is_weak_iv(rx->skb, rx->key)) rx->sta->wep_weak_iv_count++; ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); /* remove ICV */ if (pskb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN)) return RX_DROP_UNUSABLE; } return RX_CONTINUE; } static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_key_conf *hw_key = info->control.hw_key; if (!hw_key) { if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key, tx->key->conf.keylen, tx->key->conf.keyidx)) return -1; } else if ((hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) || (hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) { if (!ieee80211_wep_add_iv(tx->local, skb, tx->key->conf.keylen, tx->key->conf.keyidx)) return -1; } return 0; } ieee80211_tx_result ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb; ieee80211_tx_set_protected(tx); skb_queue_walk(&tx->skbs, skb) { if (wep_encrypt_skb(tx, skb) < 0) { I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); return TX_DROP; } } return TX_CONTINUE; }
gpl-2.0
Kra1o5/android_kernel_bq_rk3066
drivers/misc/ad525x_dpot-spi.c
3257
4024
/* * Driver for the Analog Devices digital potentiometers (SPI bus) * * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/spi/spi.h> #include <linux/module.h> #include "ad525x_dpot.h" static const struct ad_dpot_id ad_dpot_spi_devlist[] = { {.name = "ad5160", .devid = AD5160_ID}, {.name = "ad5161", .devid = AD5161_ID}, {.name = "ad5162", .devid = AD5162_ID}, {.name = "ad5165", .devid = AD5165_ID}, {.name = "ad5200", .devid = AD5200_ID}, {.name = "ad5201", .devid = AD5201_ID}, {.name = "ad5203", .devid = AD5203_ID}, {.name = "ad5204", .devid = AD5204_ID}, {.name = "ad5206", .devid = AD5206_ID}, {.name = "ad5207", .devid = AD5207_ID}, {.name = "ad5231", .devid = AD5231_ID}, {.name = "ad5232", .devid = AD5232_ID}, {.name = "ad5233", .devid = AD5233_ID}, {.name = "ad5235", .devid = AD5235_ID}, {.name = "ad5260", .devid = AD5260_ID}, {.name = "ad5262", .devid = AD5262_ID}, {.name = "ad5263", .devid = AD5263_ID}, {.name = "ad5290", .devid = AD5290_ID}, {.name = "ad5291", .devid = AD5291_ID}, {.name = "ad5292", .devid = AD5292_ID}, {.name = "ad5293", .devid = AD5293_ID}, {.name = "ad7376", .devid = AD7376_ID}, {.name = "ad8400", .devid = AD8400_ID}, {.name = "ad8402", .devid = AD8402_ID}, {.name = "ad8403", .devid = AD8403_ID}, {.name = "adn2850", .devid = ADN2850_ID}, {.name = "ad5270", .devid = AD5270_ID}, {.name = "ad5271", .devid = AD5271_ID}, {} }; /* ------------------------------------------------------------------------- */ /* SPI bus functions */ static int write8(void *client, u8 val) { u8 data = val; return spi_write(client, &data, 1); } static int write16(void *client, u8 reg, u8 val) { u8 data[2] = {reg, val}; return spi_write(client, data, 2); } static int write24(void *client, u8 reg, u16 val) { u8 data[3] = {reg, val >> 8, val}; return spi_write(client, data, 3); } static int read8(void *client) { int ret; u8 data; ret = spi_read(client, &data, 1); if (ret < 0) return ret; return data; } static int read16(void *client, u8 reg) { int ret; u8 buf_rx[2]; write16(client, reg, 0); ret = spi_read(client, buf_rx, 2); if (ret < 0) return ret; return (buf_rx[0] << 8) | buf_rx[1]; } static int read24(void *client, u8 reg) { int ret; u8 buf_rx[3]; write24(client, reg, 0); ret = spi_read(client, buf_rx, 3); if (ret < 0) return ret; return (buf_rx[1] << 8) | buf_rx[2]; } static const struct ad_dpot_bus_ops bops = { .read_d8 = read8, .read_r8d8 = read16, .read_r8d16 = read24, .write_d8 = write8, .write_r8d8 = write16, .write_r8d16 = write24, }; static const struct ad_dpot_id *dpot_match_id(const struct ad_dpot_id *id, char *name) { while (id->name && id->name[0]) { if (strcmp(name, id->name) == 0) return id; id++; } return NULL; } static int __devinit ad_dpot_spi_probe(struct spi_device *spi) { char *name = spi->dev.platform_data; const struct ad_dpot_id *dpot_id; struct ad_dpot_bus_data bdata = { .client = spi, .bops = &bops, }; dpot_id = dpot_match_id(ad_dpot_spi_devlist, name); if (dpot_id == NULL) { dev_err(&spi->dev, "%s not in supported device list", name); return -ENODEV; } return ad_dpot_probe(&spi->dev, &bdata, dpot_id); } static int __devexit ad_dpot_spi_remove(struct spi_device *spi) { return ad_dpot_remove(&spi->dev); } static struct spi_driver ad_dpot_spi_driver = { .driver = { .name = "ad_dpot", .bus = &spi_bus_type, .owner = THIS_MODULE, }, .probe = ad_dpot_spi_probe, .remove = __devexit_p(ad_dpot_spi_remove), }; static int __init ad_dpot_spi_init(void) { return spi_register_driver(&ad_dpot_spi_driver); } module_init(ad_dpot_spi_init); static void __exit ad_dpot_spi_exit(void) { spi_unregister_driver(&ad_dpot_spi_driver); } module_exit(ad_dpot_spi_exit); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("digital potentiometer SPI bus driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:ad_dpot");
gpl-2.0
AOSP-TEAM/kernel_i9100g
sound/drivers/vx/vx_hwdep.c
4025
6162
/* * Driver for Digigram VX soundcards * * DSP firmware management * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/device.h> #include <linux/firmware.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <sound/core.h> #include <sound/hwdep.h> #include <sound/vx_core.h> #ifdef SND_VX_FW_LOADER MODULE_FIRMWARE("vx/bx_1_vxp.b56"); MODULE_FIRMWARE("vx/bx_1_vp4.b56"); MODULE_FIRMWARE("vx/x1_1_vx2.xlx"); MODULE_FIRMWARE("vx/x1_2_v22.xlx"); MODULE_FIRMWARE("vx/x1_1_vxp.xlx"); MODULE_FIRMWARE("vx/x1_1_vp4.xlx"); MODULE_FIRMWARE("vx/bd56002.boot"); MODULE_FIRMWARE("vx/bd563v2.boot"); MODULE_FIRMWARE("vx/bd563s3.boot"); MODULE_FIRMWARE("vx/l_1_vx2.d56"); MODULE_FIRMWARE("vx/l_1_v22.d56"); MODULE_FIRMWARE("vx/l_1_vxp.d56"); MODULE_FIRMWARE("vx/l_1_vp4.d56"); int snd_vx_setup_firmware(struct vx_core *chip) { static char *fw_files[VX_TYPE_NUMS][4] = { [VX_TYPE_BOARD] = { NULL, "x1_1_vx2.xlx", "bd56002.boot", "l_1_vx2.d56", }, [VX_TYPE_V2] = { NULL, "x1_2_v22.xlx", "bd563v2.boot", "l_1_v22.d56", }, [VX_TYPE_MIC] = { NULL, "x1_2_v22.xlx", "bd563v2.boot", "l_1_v22.d56", }, [VX_TYPE_VXPOCKET] = { "bx_1_vxp.b56", "x1_1_vxp.xlx", "bd563s3.boot", "l_1_vxp.d56" }, [VX_TYPE_VXP440] = { "bx_1_vp4.b56", "x1_1_vp4.xlx", "bd563s3.boot", "l_1_vp4.d56" }, }; int i, err; for (i = 0; i < 4; i++) { char path[32]; const struct firmware *fw; if (! fw_files[chip->type][i]) continue; sprintf(path, "vx/%s", fw_files[chip->type][i]); if (request_firmware(&fw, path, chip->dev)) { snd_printk(KERN_ERR "vx: can't load firmware %s\n", path); return -ENOENT; } err = chip->ops->load_dsp(chip, i, fw); if (err < 0) { release_firmware(fw); return err; } if (i == 1) chip->chip_status |= VX_STAT_XILINX_LOADED; #ifdef CONFIG_PM chip->firmware[i] = fw; #else release_firmware(fw); #endif } /* ok, we reached to the last one */ /* create the devices if not built yet */ if ((err = snd_vx_pcm_new(chip)) < 0) return err; if ((err = snd_vx_mixer_new(chip)) < 0) return err; if (chip->ops->add_controls) if ((err = chip->ops->add_controls(chip)) < 0) return err; chip->chip_status |= VX_STAT_DEVICE_INIT; chip->chip_status |= VX_STAT_CHIP_INIT; return snd_card_register(chip->card); } /* exported */ void snd_vx_free_firmware(struct vx_core *chip) { #ifdef CONFIG_PM int i; for (i = 0; i < 4; i++) release_firmware(chip->firmware[i]); #endif } #else /* old style firmware loading */ static int vx_hwdep_dsp_status(struct snd_hwdep *hw, struct snd_hwdep_dsp_status *info) { static char *type_ids[VX_TYPE_NUMS] = { [VX_TYPE_BOARD] = "vxboard", [VX_TYPE_V2] = "vx222", [VX_TYPE_MIC] = "vx222", [VX_TYPE_VXPOCKET] = "vxpocket", [VX_TYPE_VXP440] = "vxp440", }; struct vx_core *vx = hw->private_data; if (snd_BUG_ON(!type_ids[vx->type])) return -EINVAL; strcpy(info->id, type_ids[vx->type]); if (vx_is_pcmcia(vx)) info->num_dsps = 4; else info->num_dsps = 3; if (vx->chip_status & VX_STAT_CHIP_INIT) info->chip_ready = 1; info->version = VX_DRIVER_VERSION; return 0; } static void free_fw(const struct firmware *fw) { if (fw) { vfree(fw->data); kfree(fw); } } static int vx_hwdep_dsp_load(struct snd_hwdep *hw, struct snd_hwdep_dsp_image *dsp) { struct vx_core *vx = hw->private_data; int index, err; struct firmware *fw; if (snd_BUG_ON(!vx->ops->load_dsp)) return -ENXIO; fw = kmalloc(sizeof(*fw), GFP_KERNEL); if (! fw) { snd_printk(KERN_ERR "cannot allocate firmware\n"); return -ENOMEM; } fw->size = dsp->length; fw->data = vmalloc(fw->size); if (! fw->data) { snd_printk(KERN_ERR "cannot allocate firmware image (length=%d)\n", (int)fw->size); kfree(fw); return -ENOMEM; } if (copy_from_user((void *)fw->data, dsp->image, dsp->length)) { free_fw(fw); return -EFAULT; } index = dsp->index; if (! vx_is_pcmcia(vx)) index++; err = vx->ops->load_dsp(vx, index, fw); if (err < 0) { free_fw(fw); return err; } #ifdef CONFIG_PM vx->firmware[index] = fw; #else free_fw(fw); #endif if (index == 1) vx->chip_status |= VX_STAT_XILINX_LOADED; if (index < 3) return 0; /* ok, we reached to the last one */ /* create the devices if not built yet */ if (! (vx->chip_status & VX_STAT_DEVICE_INIT)) { if ((err = snd_vx_pcm_new(vx)) < 0) return err; if ((err = snd_vx_mixer_new(vx)) < 0) return err; if (vx->ops->add_controls) if ((err = vx->ops->add_controls(vx)) < 0) return err; if ((err = snd_card_register(vx->card)) < 0) return err; vx->chip_status |= VX_STAT_DEVICE_INIT; } vx->chip_status |= VX_STAT_CHIP_INIT; return 0; } /* exported */ int snd_vx_setup_firmware(struct vx_core *chip) { int err; struct snd_hwdep *hw; if ((err = snd_hwdep_new(chip->card, SND_VX_HWDEP_ID, 0, &hw)) < 0) return err; hw->iface = SNDRV_HWDEP_IFACE_VX; hw->private_data = chip; hw->ops.dsp_status = vx_hwdep_dsp_status; hw->ops.dsp_load = vx_hwdep_dsp_load; hw->exclusive = 1; sprintf(hw->name, "VX Loader (%s)", chip->card->driver); chip->hwdep = hw; return snd_card_register(chip->card); } /* exported */ void snd_vx_free_firmware(struct vx_core *chip) { #ifdef CONFIG_PM int i; for (i = 0; i < 4; i++) free_fw(chip->firmware[i]); #endif } #endif /* SND_VX_FW_LOADER */ EXPORT_SYMBOL(snd_vx_setup_firmware); EXPORT_SYMBOL(snd_vx_free_firmware);
gpl-2.0
akshay-shah/android_kernel_samsung_crater
sound/isa/wavefront/wavefront_fx.c
4025
6293
/* * Copyright (c) 1998-2002 by Paul Davis <pbd@op.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <asm/io.h> #include <linux/init.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/firmware.h> #include <sound/core.h> #include <sound/snd_wavefront.h> #include <sound/initval.h> /* Control bits for the Load Control Register */ #define FX_LSB_TRANSFER 0x01 /* transfer after DSP LSB byte written */ #define FX_MSB_TRANSFER 0x02 /* transfer after DSP MSB byte written */ #define FX_AUTO_INCR 0x04 /* auto-increment DSP address after transfer */ #define WAIT_IDLE 0xff static int wavefront_fx_idle (snd_wavefront_t *dev) { int i; unsigned int x = 0x80; for (i = 0; i < 1000; i++) { x = inb (dev->fx_status); if ((x & 0x80) == 0) { break; } } if (x & 0x80) { snd_printk ("FX device never idle.\n"); return 0; } return (1); } static void wavefront_fx_mute (snd_wavefront_t *dev, int onoff) { if (!wavefront_fx_idle(dev)) { return; } outb (onoff ? 0x02 : 0x00, dev->fx_op); } static int wavefront_fx_memset (snd_wavefront_t *dev, int page, int addr, int cnt, unsigned short *data) { if (page < 0 || page > 7) { snd_printk ("FX memset: " "page must be >= 0 and <= 7\n"); return -(EINVAL); } if (addr < 0 || addr > 0x7f) { snd_printk ("FX memset: " "addr must be >= 0 and <= 7f\n"); return -(EINVAL); } if (cnt == 1) { outb (FX_LSB_TRANSFER, dev->fx_lcr); outb (page, dev->fx_dsp_page); outb (addr, dev->fx_dsp_addr); outb ((data[0] >> 8), dev->fx_dsp_msb); outb ((data[0] & 0xff), dev->fx_dsp_lsb); snd_printk ("FX: addr %d:%x set to 0x%x\n", page, addr, data[0]); } else { int i; outb (FX_AUTO_INCR|FX_LSB_TRANSFER, dev->fx_lcr); outb (page, dev->fx_dsp_page); outb (addr, dev->fx_dsp_addr); for (i = 0; i < cnt; i++) { outb ((data[i] >> 8), dev->fx_dsp_msb); outb ((data[i] & 0xff), dev->fx_dsp_lsb); if (!wavefront_fx_idle (dev)) { break; } } if (i != cnt) { snd_printk ("FX memset " "(0x%x, 0x%x, 0x%lx, %d) incomplete\n", page, addr, (unsigned long) data, cnt); return -(EIO); } } return 0; } int snd_wavefront_fx_detect (snd_wavefront_t *dev) { /* This is a crude check, but its the best one I have for now. Certainly on the Maui and the Tropez, wavefront_fx_idle() will report "never idle", which suggests that this test should work OK. */ if (inb (dev->fx_status) & 0x80) { snd_printk ("Hmm, probably a Maui or Tropez.\n"); return -1; } return 0; } int snd_wavefront_fx_open (struct snd_hwdep *hw, struct file *file) { if (!try_module_get(hw->card->module)) return -EFAULT; file->private_data = hw; return 0; } int snd_wavefront_fx_release (struct snd_hwdep *hw, struct file *file) { module_put(hw->card->module); return 0; } int snd_wavefront_fx_ioctl (struct snd_hwdep *sdev, struct file *file, unsigned int cmd, unsigned long arg) { struct snd_card *card; snd_wavefront_card_t *acard; snd_wavefront_t *dev; wavefront_fx_info r; unsigned short *page_data = NULL; unsigned short *pd; int err = 0; card = sdev->card; if (snd_BUG_ON(!card)) return -ENODEV; if (snd_BUG_ON(!card->private_data)) return -ENODEV; acard = card->private_data; dev = &acard->wavefront; if (copy_from_user (&r, (void __user *)arg, sizeof (wavefront_fx_info))) return -EFAULT; switch (r.request) { case WFFX_MUTE: wavefront_fx_mute (dev, r.data[0]); return -EIO; case WFFX_MEMSET: if (r.data[2] <= 0) { snd_printk ("cannot write " "<= 0 bytes to FX\n"); return -EIO; } else if (r.data[2] == 1) { pd = (unsigned short *) &r.data[3]; } else { if (r.data[2] > 256) { snd_printk ("cannot write " "> 512 bytes to FX\n"); return -EIO; } page_data = memdup_user((unsigned char __user *) r.data[3], r.data[2] * sizeof(short)); if (IS_ERR(page_data)) return PTR_ERR(page_data); pd = page_data; } err = wavefront_fx_memset (dev, r.data[0], /* page */ r.data[1], /* addr */ r.data[2], /* cnt */ pd); kfree(page_data); break; default: snd_printk ("FX: ioctl %d not yet supported\n", r.request); return -ENOTTY; } return err; } /* YSS225 initialization. This code was developed using DOSEMU. The Turtle Beach SETUPSND utility was run with I/O tracing in DOSEMU enabled, and a reconstruction of the port I/O done, using the Yamaha faxback document as a guide to add more logic to the code. Its really pretty weird. This is the approach of just dumping the whole I/O sequence as a series of port/value pairs and a simple loop that outputs it. */ int __devinit snd_wavefront_fx_start (snd_wavefront_t *dev) { unsigned int i; int err; const struct firmware *firmware = NULL; if (dev->fx_initialized) return 0; err = request_firmware(&firmware, "yamaha/yss225_registers.bin", dev->card->dev); if (err < 0) { err = -1; goto out; } for (i = 0; i + 1 < firmware->size; i += 2) { if (firmware->data[i] >= 8 && firmware->data[i] < 16) { outb(firmware->data[i + 1], dev->base + firmware->data[i]); } else if (firmware->data[i] == WAIT_IDLE) { if (!wavefront_fx_idle(dev)) { err = -1; goto out; } } else { snd_printk(KERN_ERR "invalid address" " in register data\n"); err = -1; goto out; } } dev->fx_initialized = 1; err = 0; out: release_firmware(firmware); return err; } MODULE_FIRMWARE("yamaha/yss225_registers.bin");
gpl-2.0
hackerspace/rpi-linux
arch/arm/mach-mmp/clock-pxa168.c
4025
3011
#include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/list.h> #include <linux/io.h> #include <linux/clk.h> #include <mach/addr-map.h> #include "common.h" #include "clock.h" /* * APB clock register offsets for PXA168 */ #define APBC_UART1 APBC_REG(0x000) #define APBC_UART2 APBC_REG(0x004) #define APBC_GPIO APBC_REG(0x008) #define APBC_PWM1 APBC_REG(0x00c) #define APBC_PWM2 APBC_REG(0x010) #define APBC_PWM3 APBC_REG(0x014) #define APBC_PWM4 APBC_REG(0x018) #define APBC_RTC APBC_REG(0x028) #define APBC_TWSI0 APBC_REG(0x02c) #define APBC_KPC APBC_REG(0x030) #define APBC_TWSI1 APBC_REG(0x06c) #define APBC_UART3 APBC_REG(0x070) #define APBC_SSP1 APBC_REG(0x81c) #define APBC_SSP2 APBC_REG(0x820) #define APBC_SSP3 APBC_REG(0x84c) #define APBC_SSP4 APBC_REG(0x858) #define APBC_SSP5 APBC_REG(0x85c) #define APMU_NAND APMU_REG(0x060) #define APMU_LCD APMU_REG(0x04c) #define APMU_ETH APMU_REG(0x0fc) #define APMU_USB APMU_REG(0x05c) /* APB peripheral clocks */ static APBC_CLK(uart1, UART1, 1, 14745600); static APBC_CLK(uart2, UART2, 1, 14745600); static APBC_CLK(uart3, UART3, 1, 14745600); static APBC_CLK(twsi0, TWSI0, 1, 33000000); static APBC_CLK(twsi1, TWSI1, 1, 33000000); static APBC_CLK(pwm1, PWM1, 1, 13000000); static APBC_CLK(pwm2, PWM2, 1, 13000000); static APBC_CLK(pwm3, PWM3, 1, 13000000); static APBC_CLK(pwm4, PWM4, 1, 13000000); static APBC_CLK(ssp1, SSP1, 4, 0); static APBC_CLK(ssp2, SSP2, 4, 0); static APBC_CLK(ssp3, SSP3, 4, 0); static APBC_CLK(ssp4, SSP4, 4, 0); static APBC_CLK(ssp5, SSP5, 4, 0); static APBC_CLK(gpio, GPIO, 0, 13000000); static APBC_CLK(keypad, KPC, 0, 32000); static APBC_CLK(rtc, RTC, 8, 32768); static APMU_CLK(nand, NAND, 0x19b, 156000000); static APMU_CLK(lcd, LCD, 0x7f, 312000000); static APMU_CLK(eth, ETH, 0x09, 0); static APMU_CLK(usb, USB, 0x12, 0); /* device and clock bindings */ static struct clk_lookup pxa168_clkregs[] = { INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL), INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL), INIT_CLKREG(&clk_uart3, "pxa2xx-uart.2", NULL), INIT_CLKREG(&clk_twsi0, "pxa2xx-i2c.0", NULL), INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.1", NULL), INIT_CLKREG(&clk_pwm1, "pxa168-pwm.0", NULL), INIT_CLKREG(&clk_pwm2, "pxa168-pwm.1", NULL), INIT_CLKREG(&clk_pwm3, "pxa168-pwm.2", NULL), INIT_CLKREG(&clk_pwm4, "pxa168-pwm.3", NULL), INIT_CLKREG(&clk_ssp1, "pxa168-ssp.0", NULL), INIT_CLKREG(&clk_ssp2, "pxa168-ssp.1", NULL), INIT_CLKREG(&clk_ssp3, "pxa168-ssp.2", NULL), INIT_CLKREG(&clk_ssp4, "pxa168-ssp.3", NULL), INIT_CLKREG(&clk_ssp5, "pxa168-ssp.4", NULL), INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL), INIT_CLKREG(&clk_lcd, "pxa168-fb", NULL), INIT_CLKREG(&clk_gpio, "mmp-gpio", NULL), INIT_CLKREG(&clk_keypad, "pxa27x-keypad", NULL), INIT_CLKREG(&clk_eth, "pxa168-eth", "MFUCLK"), INIT_CLKREG(&clk_usb, NULL, "PXA168-USBCLK"), INIT_CLKREG(&clk_rtc, "sa1100-rtc", NULL), }; void __init pxa168_clk_init(void) { clkdev_add_table(ARRAY_AND_SIZE(pxa168_clkregs)); }
gpl-2.0
tiny4579/gingertiny-v2
drivers/base/driver.c
4281
6998
/* * driver.c - centralized device driver management * * Copyright (c) 2002-3 Patrick Mochel * Copyright (c) 2002-3 Open Source Development Labs * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (c) 2007 Novell Inc. * * This file is released under the GPLv2 * */ #include <linux/device.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/string.h> #include "base.h" static struct device *next_device(struct klist_iter *i) { struct klist_node *n = klist_next(i); struct device *dev = NULL; struct device_private *dev_prv; if (n) { dev_prv = to_device_private_driver(n); dev = dev_prv->device; } return dev; } /** * driver_for_each_device - Iterator for devices bound to a driver. * @drv: Driver we're iterating. * @start: Device to begin with * @data: Data to pass to the callback. * @fn: Function to call for each device. * * Iterate over the @drv's list of devices calling @fn for each one. */ int driver_for_each_device(struct device_driver *drv, struct device *start, void *data, int (*fn)(struct device *, void *)) { struct klist_iter i; struct device *dev; int error = 0; if (!drv) return -EINVAL; klist_iter_init_node(&drv->p->klist_devices, &i, start ? &start->p->knode_driver : NULL); while ((dev = next_device(&i)) && !error) error = fn(dev, data); klist_iter_exit(&i); return error; } EXPORT_SYMBOL_GPL(driver_for_each_device); /** * driver_find_device - device iterator for locating a particular device. * @drv: The device's driver * @start: Device to begin with * @data: Data to pass to match function * @match: Callback function to check device * * This is similar to the driver_for_each_device() function above, but * it returns a reference to a device that is 'found' for later use, as * determined by the @match callback. * * The callback should return 0 if the device doesn't match and non-zero * if it does. If the callback returns non-zero, this function will * return to the caller and not iterate over any more devices. */ struct device *driver_find_device(struct device_driver *drv, struct device *start, void *data, int (*match)(struct device *dev, void *data)) { struct klist_iter i; struct device *dev; if (!drv) return NULL; klist_iter_init_node(&drv->p->klist_devices, &i, (start ? &start->p->knode_driver : NULL)); while ((dev = next_device(&i))) if (match(dev, data) && get_device(dev)) break; klist_iter_exit(&i); return dev; } EXPORT_SYMBOL_GPL(driver_find_device); /** * driver_create_file - create sysfs file for driver. * @drv: driver. * @attr: driver attribute descriptor. */ int driver_create_file(struct device_driver *drv, const struct driver_attribute *attr) { int error; if (drv) error = sysfs_create_file(&drv->p->kobj, &attr->attr); else error = -EINVAL; return error; } EXPORT_SYMBOL_GPL(driver_create_file); /** * driver_remove_file - remove sysfs file for driver. * @drv: driver. * @attr: driver attribute descriptor. */ void driver_remove_file(struct device_driver *drv, const struct driver_attribute *attr) { if (drv) sysfs_remove_file(&drv->p->kobj, &attr->attr); } EXPORT_SYMBOL_GPL(driver_remove_file); /** * driver_add_kobj - add a kobject below the specified driver * @drv: requesting device driver * @kobj: kobject to add below this driver * @fmt: format string that names the kobject * * You really don't want to do this, this is only here due to one looney * iseries driver, go poke those developers if you are annoyed about * this... */ int driver_add_kobj(struct device_driver *drv, struct kobject *kobj, const char *fmt, ...) { va_list args; char *name; int ret; va_start(args, fmt); name = kvasprintf(GFP_KERNEL, fmt, args); va_end(args); if (!name) return -ENOMEM; ret = kobject_add(kobj, &drv->p->kobj, "%s", name); kfree(name); return ret; } EXPORT_SYMBOL_GPL(driver_add_kobj); /** * get_driver - increment driver reference count. * @drv: driver. */ struct device_driver *get_driver(struct device_driver *drv) { if (drv) { struct driver_private *priv; struct kobject *kobj; kobj = kobject_get(&drv->p->kobj); priv = to_driver(kobj); return priv->driver; } return NULL; } EXPORT_SYMBOL_GPL(get_driver); /** * put_driver - decrement driver's refcount. * @drv: driver. */ void put_driver(struct device_driver *drv) { kobject_put(&drv->p->kobj); } EXPORT_SYMBOL_GPL(put_driver); static int driver_add_groups(struct device_driver *drv, const struct attribute_group **groups) { int error = 0; int i; if (groups) { for (i = 0; groups[i]; i++) { error = sysfs_create_group(&drv->p->kobj, groups[i]); if (error) { while (--i >= 0) sysfs_remove_group(&drv->p->kobj, groups[i]); break; } } } return error; } static void driver_remove_groups(struct device_driver *drv, const struct attribute_group **groups) { int i; if (groups) for (i = 0; groups[i]; i++) sysfs_remove_group(&drv->p->kobj, groups[i]); } /** * driver_register - register driver with bus * @drv: driver to register * * We pass off most of the work to the bus_add_driver() call, * since most of the things we have to do deal with the bus * structures. */ int driver_register(struct device_driver *drv) { int ret; struct device_driver *other; BUG_ON(!drv->bus->p); if ((drv->bus->probe && drv->probe) || (drv->bus->remove && drv->remove) || (drv->bus->shutdown && drv->shutdown)) printk(KERN_WARNING "Driver '%s' needs updating - please use " "bus_type methods\n", drv->name); other = driver_find(drv->name, drv->bus); if (other) { put_driver(other); printk(KERN_ERR "Error: Driver '%s' is already registered, " "aborting...\n", drv->name); return -EBUSY; } ret = bus_add_driver(drv); if (ret) return ret; ret = driver_add_groups(drv, drv->groups); if (ret) bus_remove_driver(drv); return ret; } EXPORT_SYMBOL_GPL(driver_register); /** * driver_unregister - remove driver from system. * @drv: driver. * * Again, we pass off most of the work to the bus-level call. */ void driver_unregister(struct device_driver *drv) { if (!drv || !drv->p) { WARN(1, "Unexpected driver unregister!\n"); return; } driver_remove_groups(drv, drv->groups); bus_remove_driver(drv); } EXPORT_SYMBOL_GPL(driver_unregister); /** * driver_find - locate driver on a bus by its name. * @name: name of the driver. * @bus: bus to scan for the driver. * * Call kset_find_obj() to iterate over list of drivers on * a bus to find driver by name. Return driver if found. * * Note that kset_find_obj increments driver's reference count. */ struct device_driver *driver_find(const char *name, struct bus_type *bus) { struct kobject *k = kset_find_obj(bus->p->drivers_kset, name); struct driver_private *priv; if (k) { priv = to_driver(k); return priv->driver; } return NULL; } EXPORT_SYMBOL_GPL(driver_find);
gpl-2.0
CM11MOD/kernel_pantech_im900s
kernel/rcutiny.c
4537
10488
/* * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright IBM Corporation, 2008 * * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> * * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU */ #include <linux/completion.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/rcupdate.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/init.h> #include <linux/time.h> #include <linux/cpu.h> #include <linux/prefetch.h> #ifdef CONFIG_RCU_TRACE #include <trace/events/rcu.h> #endif /* #else #ifdef CONFIG_RCU_TRACE */ #include "rcu.h" /* Forward declarations for rcutiny_plugin.h. */ struct rcu_ctrlblk; static void invoke_rcu_callbacks(void); static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); static void rcu_process_callbacks(struct softirq_action *unused); static void __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), struct rcu_ctrlblk *rcp); #include "rcutiny_plugin.h" static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */ static void rcu_idle_enter_common(long long oldval) { if (rcu_dynticks_nesting) { RCU_TRACE(trace_rcu_dyntick("--=", oldval, rcu_dynticks_nesting)); return; } RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting)); if (!is_idle_task(current)) { struct task_struct *idle = idle_task(smp_processor_id()); RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task", oldval, rcu_dynticks_nesting)); ftrace_dump(DUMP_ALL); WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", current->pid, current->comm, idle->pid, idle->comm); /* must be idle task! */ } rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ } /* * Enter idle, which is an extended quiescent state if we have fully * entered that mode (i.e., if the new value of dynticks_nesting is zero). */ void rcu_idle_enter(void) { unsigned long flags; long long oldval; local_irq_save(flags); oldval = rcu_dynticks_nesting; WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0); if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) rcu_dynticks_nesting = 0; else rcu_dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; rcu_idle_enter_common(oldval); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(rcu_idle_enter); /* * Exit an interrupt handler towards idle. */ void rcu_irq_exit(void) { unsigned long flags; long long oldval; local_irq_save(flags); oldval = rcu_dynticks_nesting; rcu_dynticks_nesting--; WARN_ON_ONCE(rcu_dynticks_nesting < 0); rcu_idle_enter_common(oldval); local_irq_restore(flags); } /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */ static void rcu_idle_exit_common(long long oldval) { if (oldval) { RCU_TRACE(trace_rcu_dyntick("++=", oldval, rcu_dynticks_nesting)); return; } RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting)); if (!is_idle_task(current)) { struct task_struct *idle = idle_task(smp_processor_id()); RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task", oldval, rcu_dynticks_nesting)); ftrace_dump(DUMP_ALL); WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", current->pid, current->comm, idle->pid, idle->comm); /* must be idle task! */ } } /* * Exit idle, so that we are no longer in an extended quiescent state. */ void rcu_idle_exit(void) { unsigned long flags; long long oldval; local_irq_save(flags); oldval = rcu_dynticks_nesting; WARN_ON_ONCE(rcu_dynticks_nesting < 0); if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE; else rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; rcu_idle_exit_common(oldval); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(rcu_idle_exit); /* * Enter an interrupt handler, moving away from idle. */ void rcu_irq_enter(void) { unsigned long flags; long long oldval; local_irq_save(flags); oldval = rcu_dynticks_nesting; rcu_dynticks_nesting++; WARN_ON_ONCE(rcu_dynticks_nesting == 0); rcu_idle_exit_common(oldval); local_irq_restore(flags); } #ifdef CONFIG_PROVE_RCU /* * Test whether RCU thinks that the current CPU is idle. */ int rcu_is_cpu_idle(void) { return !rcu_dynticks_nesting; } EXPORT_SYMBOL(rcu_is_cpu_idle); #endif /* #ifdef CONFIG_PROVE_RCU */ /* * Test whether the current CPU was interrupted from idle. Nested * interrupts don't count, we must be running at the first interrupt * level. */ int rcu_is_cpu_rrupt_from_idle(void) { return rcu_dynticks_nesting <= 0; } /* * Helper function for rcu_sched_qs() and rcu_bh_qs(). * Also irqs are disabled to avoid confusion due to interrupt handlers * invoking call_rcu(). */ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) { if (rcp->rcucblist != NULL && rcp->donetail != rcp->curtail) { rcp->donetail = rcp->curtail; return 1; } return 0; } /* * Record an rcu quiescent state. And an rcu_bh quiescent state while we * are at it, given that any rcu quiescent state is also an rcu_bh * quiescent state. Use "+" instead of "||" to defeat short circuiting. */ void rcu_sched_qs(int cpu) { unsigned long flags; local_irq_save(flags); if (rcu_qsctr_help(&rcu_sched_ctrlblk) + rcu_qsctr_help(&rcu_bh_ctrlblk)) invoke_rcu_callbacks(); local_irq_restore(flags); } /* * Record an rcu_bh quiescent state. */ void rcu_bh_qs(int cpu) { unsigned long flags; local_irq_save(flags); if (rcu_qsctr_help(&rcu_bh_ctrlblk)) invoke_rcu_callbacks(); local_irq_restore(flags); } /* * Check to see if the scheduling-clock interrupt came from an extended * quiescent state, and, if so, tell RCU about it. This function must * be called from hardirq context. It is normally called from the * scheduling-clock interrupt. */ void rcu_check_callbacks(int cpu, int user) { if (user || rcu_is_cpu_rrupt_from_idle()) rcu_sched_qs(cpu); else if (!in_softirq()) rcu_bh_qs(cpu); rcu_preempt_check_callbacks(); } /* * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure * whose grace period has elapsed. */ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) { char *rn = NULL; struct rcu_head *next, *list; unsigned long flags; RCU_TRACE(int cb_count = 0); /* If no RCU callbacks ready to invoke, just return. */ if (&rcp->rcucblist == rcp->donetail) { RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1)); RCU_TRACE(trace_rcu_batch_end(rcp->name, 0, ACCESS_ONCE(rcp->rcucblist), need_resched(), is_idle_task(current), rcu_is_callbacks_kthread())); return; } /* Move the ready-to-invoke callbacks to a local list. */ local_irq_save(flags); RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1)); list = rcp->rcucblist; rcp->rcucblist = *rcp->donetail; *rcp->donetail = NULL; if (rcp->curtail == rcp->donetail) rcp->curtail = &rcp->rcucblist; rcu_preempt_remove_callbacks(rcp); rcp->donetail = &rcp->rcucblist; local_irq_restore(flags); /* Invoke the callbacks on the local list. */ RCU_TRACE(rn = rcp->name); while (list) { next = list->next; prefetch(next); debug_rcu_head_unqueue(list); local_bh_disable(); __rcu_reclaim(rn, list); local_bh_enable(); list = next; RCU_TRACE(cb_count++); } RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(), is_idle_task(current), rcu_is_callbacks_kthread())); } static void rcu_process_callbacks(struct softirq_action *unused) { __rcu_process_callbacks(&rcu_sched_ctrlblk); __rcu_process_callbacks(&rcu_bh_ctrlblk); rcu_preempt_process_callbacks(); } /* * Wait for a grace period to elapse. But it is illegal to invoke * synchronize_sched() from within an RCU read-side critical section. * Therefore, any legal call to synchronize_sched() is a quiescent * state, and so on a UP system, synchronize_sched() need do nothing. * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the * benefits of doing might_sleep() to reduce latency.) * * Cool, huh? (Due to Josh Triplett.) * * But we want to make this a static inline later. The cond_resched() * currently makes this problematic. */ void synchronize_sched(void) { rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && !lock_is_held(&rcu_lock_map) && !lock_is_held(&rcu_sched_lock_map), "Illegal synchronize_sched() in RCU read-side critical section"); cond_resched(); } EXPORT_SYMBOL_GPL(synchronize_sched); /* * Helper function for call_rcu() and call_rcu_bh(). */ static void __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), struct rcu_ctrlblk *rcp) { unsigned long flags; debug_rcu_head_queue(head); head->func = func; head->next = NULL; local_irq_save(flags); *rcp->curtail = head; rcp->curtail = &head->next; RCU_TRACE(rcp->qlen++); local_irq_restore(flags); } /* * Post an RCU callback to be invoked after the end of an RCU-sched grace * period. But since we have but one CPU, that would be after any * quiescent state. */ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) { __call_rcu(head, func, &rcu_sched_ctrlblk); } EXPORT_SYMBOL_GPL(call_rcu_sched); /* * Post an RCU bottom-half callback to be invoked after any subsequent * quiescent state. */ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) { __call_rcu(head, func, &rcu_bh_ctrlblk); } EXPORT_SYMBOL_GPL(call_rcu_bh);
gpl-2.0
rbheromax/android_kernel_htc_a11
security/keys/trusted.c
4537
28643
/* * Copyright (C) 2010 IBM Corporation * * Author: * David Safford <safford@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2 of the License. * * See Documentation/security/keys-trusted-encrypted.txt */ #include <linux/uaccess.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/parser.h> #include <linux/string.h> #include <linux/err.h> #include <keys/user-type.h> #include <keys/trusted-type.h> #include <linux/key-type.h> #include <linux/rcupdate.h> #include <linux/crypto.h> #include <crypto/hash.h> #include <crypto/sha.h> #include <linux/capability.h> #include <linux/tpm.h> #include <linux/tpm_command.h> #include "trusted.h" static const char hmac_alg[] = "hmac(sha1)"; static const char hash_alg[] = "sha1"; struct sdesc { struct shash_desc shash; char ctx[]; }; static struct crypto_shash *hashalg; static struct crypto_shash *hmacalg; static struct sdesc *init_sdesc(struct crypto_shash *alg) { struct sdesc *sdesc; int size; size = sizeof(struct shash_desc) + crypto_shash_descsize(alg); sdesc = kmalloc(size, GFP_KERNEL); if (!sdesc) return ERR_PTR(-ENOMEM); sdesc->shash.tfm = alg; sdesc->shash.flags = 0x0; return sdesc; } static int TSS_sha1(const unsigned char *data, unsigned int datalen, unsigned char *digest) { struct sdesc *sdesc; int ret; sdesc = init_sdesc(hashalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hash_alg); return PTR_ERR(sdesc); } ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest); kfree(sdesc); return ret; } static int TSS_rawhmac(unsigned char *digest, const unsigned char *key, unsigned int keylen, ...) { struct sdesc *sdesc; va_list argp; unsigned int dlen; unsigned char *data; int ret; sdesc = init_sdesc(hmacalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hmac_alg); return PTR_ERR(sdesc); } ret = crypto_shash_setkey(hmacalg, key, keylen); if (ret < 0) goto out; ret = crypto_shash_init(&sdesc->shash); if (ret < 0) goto out; va_start(argp, keylen); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; data = va_arg(argp, unsigned char *); if (data == NULL) { ret = -EINVAL; break; } ret = crypto_shash_update(&sdesc->shash, data, dlen); if (ret < 0) break; } va_end(argp); if (!ret) ret = crypto_shash_final(&sdesc->shash, digest); out: kfree(sdesc); return ret; } /* * calculate authorization info fields to send to TPM */ static int TSS_authhmac(unsigned char *digest, const unsigned char *key, unsigned int keylen, unsigned char *h1, unsigned char *h2, unsigned char h3, ...) { unsigned char paramdigest[SHA1_DIGEST_SIZE]; struct sdesc *sdesc; unsigned int dlen; unsigned char *data; unsigned char c; int ret; va_list argp; sdesc = init_sdesc(hashalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hash_alg); return PTR_ERR(sdesc); } c = h3; ret = crypto_shash_init(&sdesc->shash); if (ret < 0) goto out; va_start(argp, h3); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; data = va_arg(argp, unsigned char *); if (!data) { ret = -EINVAL; break; } ret = crypto_shash_update(&sdesc->shash, data, dlen); if (ret < 0) break; } va_end(argp); if (!ret) ret = crypto_shash_final(&sdesc->shash, paramdigest); if (!ret) ret = TSS_rawhmac(digest, key, keylen, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, h1, TPM_NONCE_SIZE, h2, 1, &c, 0, 0); out: kfree(sdesc); return ret; } /* * verify the AUTH1_COMMAND (Seal) result from TPM */ static int TSS_checkhmac1(unsigned char *buffer, const uint32_t command, const unsigned char *ononce, const unsigned char *key, unsigned int keylen, ...) { uint32_t bufsize; uint16_t tag; uint32_t ordinal; uint32_t result; unsigned char *enonce; unsigned char *continueflag; unsigned char *authdata; unsigned char testhmac[SHA1_DIGEST_SIZE]; unsigned char paramdigest[SHA1_DIGEST_SIZE]; struct sdesc *sdesc; unsigned int dlen; unsigned int dpos; va_list argp; int ret; bufsize = LOAD32(buffer, TPM_SIZE_OFFSET); tag = LOAD16(buffer, 0); ordinal = command; result = LOAD32N(buffer, TPM_RETURN_OFFSET); if (tag == TPM_TAG_RSP_COMMAND) return 0; if (tag != TPM_TAG_RSP_AUTH1_COMMAND) return -EINVAL; authdata = buffer + bufsize - SHA1_DIGEST_SIZE; continueflag = authdata - 1; enonce = continueflag - TPM_NONCE_SIZE; sdesc = init_sdesc(hashalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hash_alg); return PTR_ERR(sdesc); } ret = crypto_shash_init(&sdesc->shash); if (ret < 0) goto out; ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result, sizeof result); if (ret < 0) goto out; ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal, sizeof ordinal); if (ret < 0) goto out; va_start(argp, keylen); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; dpos = va_arg(argp, unsigned int); ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen); if (ret < 0) break; } va_end(argp); if (!ret) ret = crypto_shash_final(&sdesc->shash, paramdigest); if (ret < 0) goto out; ret = TSS_rawhmac(testhmac, key, keylen, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, enonce, TPM_NONCE_SIZE, ononce, 1, continueflag, 0, 0); if (ret < 0) goto out; if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE)) ret = -EINVAL; out: kfree(sdesc); return ret; } /* * verify the AUTH2_COMMAND (unseal) result from TPM */ static int TSS_checkhmac2(unsigned char *buffer, const uint32_t command, const unsigned char *ononce, const unsigned char *key1, unsigned int keylen1, const unsigned char *key2, unsigned int keylen2, ...) { uint32_t bufsize; uint16_t tag; uint32_t ordinal; uint32_t result; unsigned char *enonce1; unsigned char *continueflag1; unsigned char *authdata1; unsigned char *enonce2; unsigned char *continueflag2; unsigned char *authdata2; unsigned char testhmac1[SHA1_DIGEST_SIZE]; unsigned char testhmac2[SHA1_DIGEST_SIZE]; unsigned char paramdigest[SHA1_DIGEST_SIZE]; struct sdesc *sdesc; unsigned int dlen; unsigned int dpos; va_list argp; int ret; bufsize = LOAD32(buffer, TPM_SIZE_OFFSET); tag = LOAD16(buffer, 0); ordinal = command; result = LOAD32N(buffer, TPM_RETURN_OFFSET); if (tag == TPM_TAG_RSP_COMMAND) return 0; if (tag != TPM_TAG_RSP_AUTH2_COMMAND) return -EINVAL; authdata1 = buffer + bufsize - (SHA1_DIGEST_SIZE + 1 + SHA1_DIGEST_SIZE + SHA1_DIGEST_SIZE); authdata2 = buffer + bufsize - (SHA1_DIGEST_SIZE); continueflag1 = authdata1 - 1; continueflag2 = authdata2 - 1; enonce1 = continueflag1 - TPM_NONCE_SIZE; enonce2 = continueflag2 - TPM_NONCE_SIZE; sdesc = init_sdesc(hashalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hash_alg); return PTR_ERR(sdesc); } ret = crypto_shash_init(&sdesc->shash); if (ret < 0) goto out; ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result, sizeof result); if (ret < 0) goto out; ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal, sizeof ordinal); if (ret < 0) goto out; va_start(argp, keylen2); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; dpos = va_arg(argp, unsigned int); ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen); if (ret < 0) break; } va_end(argp); if (!ret) ret = crypto_shash_final(&sdesc->shash, paramdigest); if (ret < 0) goto out; ret = TSS_rawhmac(testhmac1, key1, keylen1, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, enonce1, TPM_NONCE_SIZE, ononce, 1, continueflag1, 0, 0); if (ret < 0) goto out; if (memcmp(testhmac1, authdata1, SHA1_DIGEST_SIZE)) { ret = -EINVAL; goto out; } ret = TSS_rawhmac(testhmac2, key2, keylen2, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, enonce2, TPM_NONCE_SIZE, ononce, 1, continueflag2, 0, 0); if (ret < 0) goto out; if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE)) ret = -EINVAL; out: kfree(sdesc); return ret; } /* * For key specific tpm requests, we will generate and send our * own TPM command packets using the drivers send function. */ static int trusted_tpm_send(const u32 chip_num, unsigned char *cmd, size_t buflen) { int rc; dump_tpm_buf(cmd); rc = tpm_send(chip_num, cmd, buflen); dump_tpm_buf(cmd); if (rc > 0) /* Can't return positive return codes values to keyctl */ rc = -EPERM; return rc; } /* * get a random value from TPM */ static int tpm_get_random(struct tpm_buf *tb, unsigned char *buf, uint32_t len) { int ret; INIT_BUF(tb); store16(tb, TPM_TAG_RQU_COMMAND); store32(tb, TPM_GETRANDOM_SIZE); store32(tb, TPM_ORD_GETRANDOM); store32(tb, len); ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, sizeof tb->data); if (!ret) memcpy(buf, tb->data + TPM_GETRANDOM_SIZE, len); return ret; } static int my_get_random(unsigned char *buf, int len) { struct tpm_buf *tb; int ret; tb = kmalloc(sizeof *tb, GFP_KERNEL); if (!tb) return -ENOMEM; ret = tpm_get_random(tb, buf, len); kfree(tb); return ret; } /* * Lock a trusted key, by extending a selected PCR. * * Prevents a trusted key that is sealed to PCRs from being accessed. * This uses the tpm driver's extend function. */ static int pcrlock(const int pcrnum) { unsigned char hash[SHA1_DIGEST_SIZE]; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = my_get_random(hash, SHA1_DIGEST_SIZE); if (ret < 0) return ret; return tpm_pcr_extend(TPM_ANY_NUM, pcrnum, hash) ? -EINVAL : 0; } /* * Create an object specific authorisation protocol (OSAP) session */ static int osap(struct tpm_buf *tb, struct osapsess *s, const unsigned char *key, uint16_t type, uint32_t handle) { unsigned char enonce[TPM_NONCE_SIZE]; unsigned char ononce[TPM_NONCE_SIZE]; int ret; ret = tpm_get_random(tb, ononce, TPM_NONCE_SIZE); if (ret < 0) return ret; INIT_BUF(tb); store16(tb, TPM_TAG_RQU_COMMAND); store32(tb, TPM_OSAP_SIZE); store32(tb, TPM_ORD_OSAP); store16(tb, type); store32(tb, handle); storebytes(tb, ononce, TPM_NONCE_SIZE); ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); if (ret < 0) return ret; s->handle = LOAD32(tb->data, TPM_DATA_OFFSET); memcpy(s->enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)]), TPM_NONCE_SIZE); memcpy(enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t) + TPM_NONCE_SIZE]), TPM_NONCE_SIZE); return TSS_rawhmac(s->secret, key, SHA1_DIGEST_SIZE, TPM_NONCE_SIZE, enonce, TPM_NONCE_SIZE, ononce, 0, 0); } /* * Create an object independent authorisation protocol (oiap) session */ static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce) { int ret; INIT_BUF(tb); store16(tb, TPM_TAG_RQU_COMMAND); store32(tb, TPM_OIAP_SIZE); store32(tb, TPM_ORD_OIAP); ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); if (ret < 0) return ret; *handle = LOAD32(tb->data, TPM_DATA_OFFSET); memcpy(nonce, &tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)], TPM_NONCE_SIZE); return 0; } struct tpm_digests { unsigned char encauth[SHA1_DIGEST_SIZE]; unsigned char pubauth[SHA1_DIGEST_SIZE]; unsigned char xorwork[SHA1_DIGEST_SIZE * 2]; unsigned char xorhash[SHA1_DIGEST_SIZE]; unsigned char nonceodd[TPM_NONCE_SIZE]; }; /* * Have the TPM seal(encrypt) the trusted key, possibly based on * Platform Configuration Registers (PCRs). AUTH1 for sealing key. */ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype, uint32_t keyhandle, const unsigned char *keyauth, const unsigned char *data, uint32_t datalen, unsigned char *blob, uint32_t *bloblen, const unsigned char *blobauth, const unsigned char *pcrinfo, uint32_t pcrinfosize) { struct osapsess sess; struct tpm_digests *td; unsigned char cont; uint32_t ordinal; uint32_t pcrsize; uint32_t datsize; int sealinfosize; int encdatasize; int storedsize; int ret; int i; /* alloc some work space for all the hashes */ td = kmalloc(sizeof *td, GFP_KERNEL); if (!td) return -ENOMEM; /* get session for sealing key */ ret = osap(tb, &sess, keyauth, keytype, keyhandle); if (ret < 0) goto out; dump_sess(&sess); /* calculate encrypted authorization value */ memcpy(td->xorwork, sess.secret, SHA1_DIGEST_SIZE); memcpy(td->xorwork + SHA1_DIGEST_SIZE, sess.enonce, SHA1_DIGEST_SIZE); ret = TSS_sha1(td->xorwork, SHA1_DIGEST_SIZE * 2, td->xorhash); if (ret < 0) goto out; ret = tpm_get_random(tb, td->nonceodd, TPM_NONCE_SIZE); if (ret < 0) goto out; ordinal = htonl(TPM_ORD_SEAL); datsize = htonl(datalen); pcrsize = htonl(pcrinfosize); cont = 0; /* encrypt data authorization key */ for (i = 0; i < SHA1_DIGEST_SIZE; ++i) td->encauth[i] = td->xorhash[i] ^ blobauth[i]; /* calculate authorization HMAC value */ if (pcrinfosize == 0) { /* no pcr info specified */ ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE, sess.enonce, td->nonceodd, cont, sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE, td->encauth, sizeof(uint32_t), &pcrsize, sizeof(uint32_t), &datsize, datalen, data, 0, 0); } else { /* pcr info specified */ ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE, sess.enonce, td->nonceodd, cont, sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE, td->encauth, sizeof(uint32_t), &pcrsize, pcrinfosize, pcrinfo, sizeof(uint32_t), &datsize, datalen, data, 0, 0); } if (ret < 0) goto out; /* build and send the TPM request packet */ INIT_BUF(tb); store16(tb, TPM_TAG_RQU_AUTH1_COMMAND); store32(tb, TPM_SEAL_SIZE + pcrinfosize + datalen); store32(tb, TPM_ORD_SEAL); store32(tb, keyhandle); storebytes(tb, td->encauth, SHA1_DIGEST_SIZE); store32(tb, pcrinfosize); storebytes(tb, pcrinfo, pcrinfosize); store32(tb, datalen); storebytes(tb, data, datalen); store32(tb, sess.handle); storebytes(tb, td->nonceodd, TPM_NONCE_SIZE); store8(tb, cont); storebytes(tb, td->pubauth, SHA1_DIGEST_SIZE); ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); if (ret < 0) goto out; /* calculate the size of the returned Blob */ sealinfosize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t)); encdatasize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t) + sizeof(uint32_t) + sealinfosize); storedsize = sizeof(uint32_t) + sizeof(uint32_t) + sealinfosize + sizeof(uint32_t) + encdatasize; /* check the HMAC in the response */ ret = TSS_checkhmac1(tb->data, ordinal, td->nonceodd, sess.secret, SHA1_DIGEST_SIZE, storedsize, TPM_DATA_OFFSET, 0, 0); /* copy the returned blob to caller */ if (!ret) { memcpy(blob, tb->data + TPM_DATA_OFFSET, storedsize); *bloblen = storedsize; } out: kfree(td); return ret; } /* * use the AUTH2_COMMAND form of unseal, to authorize both key and blob */ static int tpm_unseal(struct tpm_buf *tb, uint32_t keyhandle, const unsigned char *keyauth, const unsigned char *blob, int bloblen, const unsigned char *blobauth, unsigned char *data, unsigned int *datalen) { unsigned char nonceodd[TPM_NONCE_SIZE]; unsigned char enonce1[TPM_NONCE_SIZE]; unsigned char enonce2[TPM_NONCE_SIZE]; unsigned char authdata1[SHA1_DIGEST_SIZE]; unsigned char authdata2[SHA1_DIGEST_SIZE]; uint32_t authhandle1 = 0; uint32_t authhandle2 = 0; unsigned char cont = 0; uint32_t ordinal; uint32_t keyhndl; int ret; /* sessions for unsealing key and data */ ret = oiap(tb, &authhandle1, enonce1); if (ret < 0) { pr_info("trusted_key: oiap failed (%d)\n", ret); return ret; } ret = oiap(tb, &authhandle2, enonce2); if (ret < 0) { pr_info("trusted_key: oiap failed (%d)\n", ret); return ret; } ordinal = htonl(TPM_ORD_UNSEAL); keyhndl = htonl(SRKHANDLE); ret = tpm_get_random(tb, nonceodd, TPM_NONCE_SIZE); if (ret < 0) { pr_info("trusted_key: tpm_get_random failed (%d)\n", ret); return ret; } ret = TSS_authhmac(authdata1, keyauth, TPM_NONCE_SIZE, enonce1, nonceodd, cont, sizeof(uint32_t), &ordinal, bloblen, blob, 0, 0); if (ret < 0) return ret; ret = TSS_authhmac(authdata2, blobauth, TPM_NONCE_SIZE, enonce2, nonceodd, cont, sizeof(uint32_t), &ordinal, bloblen, blob, 0, 0); if (ret < 0) return ret; /* build and send TPM request packet */ INIT_BUF(tb); store16(tb, TPM_TAG_RQU_AUTH2_COMMAND); store32(tb, TPM_UNSEAL_SIZE + bloblen); store32(tb, TPM_ORD_UNSEAL); store32(tb, keyhandle); storebytes(tb, blob, bloblen); store32(tb, authhandle1); storebytes(tb, nonceodd, TPM_NONCE_SIZE); store8(tb, cont); storebytes(tb, authdata1, SHA1_DIGEST_SIZE); store32(tb, authhandle2); storebytes(tb, nonceodd, TPM_NONCE_SIZE); store8(tb, cont); storebytes(tb, authdata2, SHA1_DIGEST_SIZE); ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); if (ret < 0) { pr_info("trusted_key: authhmac failed (%d)\n", ret); return ret; } *datalen = LOAD32(tb->data, TPM_DATA_OFFSET); ret = TSS_checkhmac2(tb->data, ordinal, nonceodd, keyauth, SHA1_DIGEST_SIZE, blobauth, SHA1_DIGEST_SIZE, sizeof(uint32_t), TPM_DATA_OFFSET, *datalen, TPM_DATA_OFFSET + sizeof(uint32_t), 0, 0); if (ret < 0) { pr_info("trusted_key: TSS_checkhmac2 failed (%d)\n", ret); return ret; } memcpy(data, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), *datalen); return 0; } /* * Have the TPM seal(encrypt) the symmetric key */ static int key_seal(struct trusted_key_payload *p, struct trusted_key_options *o) { struct tpm_buf *tb; int ret; tb = kzalloc(sizeof *tb, GFP_KERNEL); if (!tb) return -ENOMEM; /* include migratable flag at end of sealed key */ p->key[p->key_len] = p->migratable; ret = tpm_seal(tb, o->keytype, o->keyhandle, o->keyauth, p->key, p->key_len + 1, p->blob, &p->blob_len, o->blobauth, o->pcrinfo, o->pcrinfo_len); if (ret < 0) pr_info("trusted_key: srkseal failed (%d)\n", ret); kfree(tb); return ret; } /* * Have the TPM unseal(decrypt) the symmetric key */ static int key_unseal(struct trusted_key_payload *p, struct trusted_key_options *o) { struct tpm_buf *tb; int ret; tb = kzalloc(sizeof *tb, GFP_KERNEL); if (!tb) return -ENOMEM; ret = tpm_unseal(tb, o->keyhandle, o->keyauth, p->blob, p->blob_len, o->blobauth, p->key, &p->key_len); if (ret < 0) pr_info("trusted_key: srkunseal failed (%d)\n", ret); else /* pull migratable flag out of sealed key */ p->migratable = p->key[--p->key_len]; kfree(tb); return ret; } enum { Opt_err = -1, Opt_new, Opt_load, Opt_update, Opt_keyhandle, Opt_keyauth, Opt_blobauth, Opt_pcrinfo, Opt_pcrlock, Opt_migratable }; static const match_table_t key_tokens = { {Opt_new, "new"}, {Opt_load, "load"}, {Opt_update, "update"}, {Opt_keyhandle, "keyhandle=%s"}, {Opt_keyauth, "keyauth=%s"}, {Opt_blobauth, "blobauth=%s"}, {Opt_pcrinfo, "pcrinfo=%s"}, {Opt_pcrlock, "pcrlock=%s"}, {Opt_migratable, "migratable=%s"}, {Opt_err, NULL} }; /* can have zero or more token= options */ static int getoptions(char *c, struct trusted_key_payload *pay, struct trusted_key_options *opt) { substring_t args[MAX_OPT_ARGS]; char *p = c; int token; int res; unsigned long handle; unsigned long lock; while ((p = strsep(&c, " \t"))) { if (*p == '\0' || *p == ' ' || *p == '\t') continue; token = match_token(p, key_tokens, args); switch (token) { case Opt_pcrinfo: opt->pcrinfo_len = strlen(args[0].from) / 2; if (opt->pcrinfo_len > MAX_PCRINFO_SIZE) return -EINVAL; res = hex2bin(opt->pcrinfo, args[0].from, opt->pcrinfo_len); if (res < 0) return -EINVAL; break; case Opt_keyhandle: res = strict_strtoul(args[0].from, 16, &handle); if (res < 0) return -EINVAL; opt->keytype = SEAL_keytype; opt->keyhandle = handle; break; case Opt_keyauth: if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE) return -EINVAL; res = hex2bin(opt->keyauth, args[0].from, SHA1_DIGEST_SIZE); if (res < 0) return -EINVAL; break; case Opt_blobauth: if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE) return -EINVAL; res = hex2bin(opt->blobauth, args[0].from, SHA1_DIGEST_SIZE); if (res < 0) return -EINVAL; break; case Opt_migratable: if (*args[0].from == '0') pay->migratable = 0; else return -EINVAL; break; case Opt_pcrlock: res = strict_strtoul(args[0].from, 10, &lock); if (res < 0) return -EINVAL; opt->pcrlock = lock; break; default: return -EINVAL; } } return 0; } /* * datablob_parse - parse the keyctl data and fill in the * payload and options structures * * On success returns 0, otherwise -EINVAL. */ static int datablob_parse(char *datablob, struct trusted_key_payload *p, struct trusted_key_options *o) { substring_t args[MAX_OPT_ARGS]; long keylen; int ret = -EINVAL; int key_cmd; char *c; /* main command */ c = strsep(&datablob, " \t"); if (!c) return -EINVAL; key_cmd = match_token(c, key_tokens, args); switch (key_cmd) { case Opt_new: /* first argument is key size */ c = strsep(&datablob, " \t"); if (!c) return -EINVAL; ret = strict_strtol(c, 10, &keylen); if (ret < 0 || keylen < MIN_KEY_SIZE || keylen > MAX_KEY_SIZE) return -EINVAL; p->key_len = keylen; ret = getoptions(datablob, p, o); if (ret < 0) return ret; ret = Opt_new; break; case Opt_load: /* first argument is sealed blob */ c = strsep(&datablob, " \t"); if (!c) return -EINVAL; p->blob_len = strlen(c) / 2; if (p->blob_len > MAX_BLOB_SIZE) return -EINVAL; ret = hex2bin(p->blob, c, p->blob_len); if (ret < 0) return -EINVAL; ret = getoptions(datablob, p, o); if (ret < 0) return ret; ret = Opt_load; break; case Opt_update: /* all arguments are options */ ret = getoptions(datablob, p, o); if (ret < 0) return ret; ret = Opt_update; break; case Opt_err: return -EINVAL; break; } return ret; } static struct trusted_key_options *trusted_options_alloc(void) { struct trusted_key_options *options; options = kzalloc(sizeof *options, GFP_KERNEL); if (options) { /* set any non-zero defaults */ options->keytype = SRK_keytype; options->keyhandle = SRKHANDLE; } return options; } static struct trusted_key_payload *trusted_payload_alloc(struct key *key) { struct trusted_key_payload *p = NULL; int ret; ret = key_payload_reserve(key, sizeof *p); if (ret < 0) return p; p = kzalloc(sizeof *p, GFP_KERNEL); if (p) p->migratable = 1; /* migratable by default */ return p; } /* * trusted_instantiate - create a new trusted key * * Unseal an existing trusted blob or, for a new key, get a * random key, then seal and create a trusted key-type key, * adding it to the specified keyring. * * On success, return 0. Otherwise return errno. */ static int trusted_instantiate(struct key *key, const void *data, size_t datalen) { struct trusted_key_payload *payload = NULL; struct trusted_key_options *options = NULL; char *datablob; int ret = 0; int key_cmd; if (datalen <= 0 || datalen > 32767 || !data) return -EINVAL; datablob = kmalloc(datalen + 1, GFP_KERNEL); if (!datablob) return -ENOMEM; memcpy(datablob, data, datalen); datablob[datalen] = '\0'; options = trusted_options_alloc(); if (!options) { ret = -ENOMEM; goto out; } payload = trusted_payload_alloc(key); if (!payload) { ret = -ENOMEM; goto out; } key_cmd = datablob_parse(datablob, payload, options); if (key_cmd < 0) { ret = key_cmd; goto out; } dump_payload(payload); dump_options(options); switch (key_cmd) { case Opt_load: ret = key_unseal(payload, options); dump_payload(payload); dump_options(options); if (ret < 0) pr_info("trusted_key: key_unseal failed (%d)\n", ret); break; case Opt_new: ret = my_get_random(payload->key, payload->key_len); if (ret < 0) { pr_info("trusted_key: key_create failed (%d)\n", ret); goto out; } ret = key_seal(payload, options); if (ret < 0) pr_info("trusted_key: key_seal failed (%d)\n", ret); break; default: ret = -EINVAL; goto out; } if (!ret && options->pcrlock) ret = pcrlock(options->pcrlock); out: kfree(datablob); kfree(options); if (!ret) rcu_assign_keypointer(key, payload); else kfree(payload); return ret; } static void trusted_rcu_free(struct rcu_head *rcu) { struct trusted_key_payload *p; p = container_of(rcu, struct trusted_key_payload, rcu); memset(p->key, 0, p->key_len); kfree(p); } /* * trusted_update - reseal an existing key with new PCR values */ static int trusted_update(struct key *key, const void *data, size_t datalen) { struct trusted_key_payload *p = key->payload.data; struct trusted_key_payload *new_p; struct trusted_key_options *new_o; char *datablob; int ret = 0; if (!p->migratable) return -EPERM; if (datalen <= 0 || datalen > 32767 || !data) return -EINVAL; datablob = kmalloc(datalen + 1, GFP_KERNEL); if (!datablob) return -ENOMEM; new_o = trusted_options_alloc(); if (!new_o) { ret = -ENOMEM; goto out; } new_p = trusted_payload_alloc(key); if (!new_p) { ret = -ENOMEM; goto out; } memcpy(datablob, data, datalen); datablob[datalen] = '\0'; ret = datablob_parse(datablob, new_p, new_o); if (ret != Opt_update) { ret = -EINVAL; kfree(new_p); goto out; } /* copy old key values, and reseal with new pcrs */ new_p->migratable = p->migratable; new_p->key_len = p->key_len; memcpy(new_p->key, p->key, p->key_len); dump_payload(p); dump_payload(new_p); ret = key_seal(new_p, new_o); if (ret < 0) { pr_info("trusted_key: key_seal failed (%d)\n", ret); kfree(new_p); goto out; } if (new_o->pcrlock) { ret = pcrlock(new_o->pcrlock); if (ret < 0) { pr_info("trusted_key: pcrlock failed (%d)\n", ret); kfree(new_p); goto out; } } rcu_assign_keypointer(key, new_p); call_rcu(&p->rcu, trusted_rcu_free); out: kfree(datablob); kfree(new_o); return ret; } /* * trusted_read - copy the sealed blob data to userspace in hex. * On success, return to userspace the trusted key datablob size. */ static long trusted_read(const struct key *key, char __user *buffer, size_t buflen) { struct trusted_key_payload *p; char *ascii_buf; char *bufp; int i; p = rcu_dereference_key(key); if (!p) return -EINVAL; if (!buffer || buflen <= 0) return 2 * p->blob_len; ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL); if (!ascii_buf) return -ENOMEM; bufp = ascii_buf; for (i = 0; i < p->blob_len; i++) bufp = hex_byte_pack(bufp, p->blob[i]); if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) { kfree(ascii_buf); return -EFAULT; } kfree(ascii_buf); return 2 * p->blob_len; } /* * trusted_destroy - before freeing the key, clear the decrypted data */ static void trusted_destroy(struct key *key) { struct trusted_key_payload *p = key->payload.data; if (!p) return; memset(p->key, 0, p->key_len); kfree(key->payload.data); } struct key_type key_type_trusted = { .name = "trusted", .instantiate = trusted_instantiate, .update = trusted_update, .match = user_match, .destroy = trusted_destroy, .describe = user_describe, .read = trusted_read, }; EXPORT_SYMBOL_GPL(key_type_trusted); static void trusted_shash_release(void) { if (hashalg) crypto_free_shash(hashalg); if (hmacalg) crypto_free_shash(hmacalg); } static int __init trusted_shash_alloc(void) { int ret; hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hmacalg)) { pr_info("trusted_key: could not allocate crypto %s\n", hmac_alg); return PTR_ERR(hmacalg); } hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hashalg)) { pr_info("trusted_key: could not allocate crypto %s\n", hash_alg); ret = PTR_ERR(hashalg); goto hashalg_fail; } return 0; hashalg_fail: crypto_free_shash(hmacalg); return ret; } static int __init init_trusted(void) { int ret; ret = trusted_shash_alloc(); if (ret < 0) return ret; ret = register_key_type(&key_type_trusted); if (ret < 0) trusted_shash_release(); return ret; } static void __exit cleanup_trusted(void) { trusted_shash_release(); unregister_key_type(&key_type_trusted); } late_initcall(init_trusted); module_exit(cleanup_trusted); MODULE_LICENSE("GPL");
gpl-2.0
breeze101792/linux-dev
arch/arm/mach-sa1100/nanoengine.c
4537
3007
/* * linux/arch/arm/mach-sa1100/nanoengine.c * * Bright Star Engineering's nanoEngine board init code. * * Copyright (C) 2010 Marcelo Roberto Jimenez <mroberto@cpti.cetuc.puc-rio.br> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_data/sa11x0-serial.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/root_dev.h> #include <asm/mach-types.h> #include <asm/setup.h> #include <asm/page.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include <mach/nanoengine.h> #include <mach/irqs.h> #include "generic.h" /* Flash bank 0 */ static struct mtd_partition nanoengine_partitions[] = { { .name = "nanoEngine boot firmware and parameter table", .size = 0x00010000, /* 32K */ .offset = 0, .mask_flags = MTD_WRITEABLE, }, { .name = "kernel/initrd reserved", .size = 0x002f0000, .offset = 0x00010000, .mask_flags = MTD_WRITEABLE, }, { .name = "experimental filesystem allocation", .size = 0x00100000, .offset = 0x00300000, .mask_flags = MTD_WRITEABLE, } }; static struct flash_platform_data nanoengine_flash_data = { .map_name = "jedec_probe", .parts = nanoengine_partitions, .nr_parts = ARRAY_SIZE(nanoengine_partitions), }; static struct resource nanoengine_flash_resources[] = { DEFINE_RES_MEM(SA1100_CS0_PHYS, SZ_32M), DEFINE_RES_MEM(SA1100_CS1_PHYS, SZ_32M), }; static struct map_desc nanoengine_io_desc[] __initdata = { { /* System Registers */ .virtual = 0xf0000000, .pfn = __phys_to_pfn(0x10000000), .length = 0x00100000, .type = MT_DEVICE }, { /* Internal PCI Memory Read/Write */ .virtual = NANO_PCI_MEM_RW_VIRT, .pfn = __phys_to_pfn(NANO_PCI_MEM_RW_PHYS), .length = NANO_PCI_MEM_RW_SIZE, .type = MT_DEVICE }, { /* Internal PCI Config Space */ .virtual = NANO_PCI_CONFIG_SPACE_VIRT, .pfn = __phys_to_pfn(NANO_PCI_CONFIG_SPACE_PHYS), .length = NANO_PCI_CONFIG_SPACE_SIZE, .type = MT_DEVICE } }; static void __init nanoengine_map_io(void) { sa1100_map_io(); iotable_init(nanoengine_io_desc, ARRAY_SIZE(nanoengine_io_desc)); sa1100_register_uart(0, 1); sa1100_register_uart(1, 2); sa1100_register_uart(2, 3); Ser1SDCR0 |= SDCR0_UART; /* disable IRDA -- UART2 is used as a normal serial port */ Ser2UTCR4 = 0; Ser2HSCR0 = 0; } static void __init nanoengine_init(void) { sa11x0_register_mtd(&nanoengine_flash_data, nanoengine_flash_resources, ARRAY_SIZE(nanoengine_flash_resources)); } MACHINE_START(NANOENGINE, "BSE nanoEngine") .atag_offset = 0x100, .map_io = nanoengine_map_io, .nr_irqs = SA1100_NR_IRQS, .init_irq = sa1100_init_irq, .init_time = sa1100_timer_init, .init_machine = nanoengine_init, .init_late = sa11x0_init_late, .restart = sa11x0_restart, MACHINE_END
gpl-2.0
yajnab/android_kernel_samsung_i8260
arch/arm/mach-nomadik/i2c-8815nhk.c
5049
1674
#include <linux/module.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/i2c-gpio.h> #include <linux/platform_device.h> #include <plat/gpio-nomadik.h> /* * There are two busses in the 8815NHK. * They could, in theory, be driven by the hardware component, but we * use bit-bang through GPIO by now, to keep things simple */ static struct i2c_gpio_platform_data nhk8815_i2c_data0 = { /* keep defaults for timeouts; pins are push-pull bidirectional */ .scl_pin = 62, .sda_pin = 63, }; static struct i2c_gpio_platform_data nhk8815_i2c_data1 = { /* keep defaults for timeouts; pins are push-pull bidirectional */ .scl_pin = 53, .sda_pin = 54, }; /* first bus: GPIO XX and YY */ static struct platform_device nhk8815_i2c_dev0 = { .name = "i2c-gpio", .id = 0, .dev = { .platform_data = &nhk8815_i2c_data0, }, }; /* second bus: GPIO XX and YY */ static struct platform_device nhk8815_i2c_dev1 = { .name = "i2c-gpio", .id = 1, .dev = { .platform_data = &nhk8815_i2c_data1, }, }; static int __init nhk8815_i2c_init(void) { nmk_gpio_set_mode(nhk8815_i2c_data0.scl_pin, NMK_GPIO_ALT_GPIO); nmk_gpio_set_mode(nhk8815_i2c_data0.sda_pin, NMK_GPIO_ALT_GPIO); platform_device_register(&nhk8815_i2c_dev0); nmk_gpio_set_mode(nhk8815_i2c_data1.scl_pin, NMK_GPIO_ALT_GPIO); nmk_gpio_set_mode(nhk8815_i2c_data1.sda_pin, NMK_GPIO_ALT_GPIO); platform_device_register(&nhk8815_i2c_dev1); return 0; } static void __exit nhk8815_i2c_exit(void) { platform_device_unregister(&nhk8815_i2c_dev0); platform_device_unregister(&nhk8815_i2c_dev1); return; } module_init(nhk8815_i2c_init); module_exit(nhk8815_i2c_exit);
gpl-2.0
ankur850/android_kernel_samsung_msm7x27a
drivers/mtd/nand/bcm_umi_bch.c
9145
6980
/***************************************************************************** * Copyright 2004 - 2009 Broadcom Corporation. All rights reserved. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available at * http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a * license other than the GPL, without Broadcom's express prior written * consent. *****************************************************************************/ /* ---- Include Files ---------------------------------------------------- */ #include "nand_bcm_umi.h" /* ---- External Variable Declarations ----------------------------------- */ /* ---- External Function Prototypes ------------------------------------- */ /* ---- Public Variables ------------------------------------------------- */ /* ---- Private Constants and Types -------------------------------------- */ /* ---- Private Function Prototypes -------------------------------------- */ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int page); static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf); /* ---- Private Variables ------------------------------------------------ */ /* ** nand_hw_eccoob ** New oob placement block for use with hardware ecc generation. */ static struct nand_ecclayout nand_hw_eccoob_512 = { /* Reserve 5 for BI indicator */ .oobfree = { #if (NAND_ECC_NUM_BYTES > 3) {.offset = 0, .length = 2} #else {.offset = 0, .length = 5}, {.offset = 6, .length = 7} #endif } }; /* ** We treat the OOB for a 2K page as if it were 4 512 byte oobs, ** except the BI is at byte 0. */ static struct nand_ecclayout nand_hw_eccoob_2048 = { /* Reserve 0 as BI indicator */ .oobfree = { #if (NAND_ECC_NUM_BYTES > 10) {.offset = 1, .length = 2}, #elif (NAND_ECC_NUM_BYTES > 7) {.offset = 1, .length = 5}, {.offset = 16, .length = 6}, {.offset = 32, .length = 6}, {.offset = 48, .length = 6} #else {.offset = 1, .length = 8}, {.offset = 16, .length = 9}, {.offset = 32, .length = 9}, {.offset = 48, .length = 9} #endif } }; /* We treat the OOB for a 4K page as if it were 8 512 byte oobs, * except the BI is at byte 0. */ static struct nand_ecclayout nand_hw_eccoob_4096 = { /* Reserve 0 as BI indicator */ .oobfree = { #if (NAND_ECC_NUM_BYTES > 10) {.offset = 1, .length = 2}, {.offset = 16, .length = 3}, {.offset = 32, .length = 3}, {.offset = 48, .length = 3}, {.offset = 64, .length = 3}, {.offset = 80, .length = 3}, {.offset = 96, .length = 3}, {.offset = 112, .length = 3} #else {.offset = 1, .length = 5}, {.offset = 16, .length = 6}, {.offset = 32, .length = 6}, {.offset = 48, .length = 6}, {.offset = 64, .length = 6}, {.offset = 80, .length = 6}, {.offset = 96, .length = 6}, {.offset = 112, .length = 6} #endif } }; /* ---- Private Functions ------------------------------------------------ */ /* ==== Public Functions ================================================= */ /**************************************************************************** * * bcm_umi_bch_read_page_hwecc - hardware ecc based page read function * @mtd: mtd info structure * @chip: nand chip info structure * @buf: buffer to store read data * ***************************************************************************/ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t * buf, int page) { int sectorIdx = 0; int eccsize = chip->ecc.size; int eccsteps = chip->ecc.steps; uint8_t *datap = buf; uint8_t eccCalc[NAND_ECC_NUM_BYTES]; int sectorOobSize = mtd->oobsize / eccsteps; int stat; for (sectorIdx = 0; sectorIdx < eccsteps; sectorIdx++, datap += eccsize) { if (sectorIdx > 0) { /* Seek to page location within sector */ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, sectorIdx * eccsize, -1); } /* Enable hardware ECC before reading the buf */ nand_bcm_umi_bch_enable_read_hwecc(); /* Read in data */ bcm_umi_nand_read_buf(mtd, datap, eccsize); /* Pause hardware ECC after reading the buf */ nand_bcm_umi_bch_pause_read_ecc_calc(); /* Read the OOB ECC */ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize + sectorIdx * sectorOobSize, -1); nand_bcm_umi_bch_read_oobEcc(mtd->writesize, eccCalc, NAND_ECC_NUM_BYTES, chip->oob_poi + sectorIdx * sectorOobSize); /* Correct any ECC detected errors */ stat = nand_bcm_umi_bch_correct_page(datap, eccCalc, NAND_ECC_NUM_BYTES); /* Update Stats */ if (stat < 0) { #if defined(NAND_BCM_UMI_DEBUG) printk(KERN_WARNING "%s uncorr_err sectorIdx=%d\n", __func__, sectorIdx); printk(KERN_WARNING "%s data %02x %02x %02x %02x " "%02x %02x %02x %02x\n", __func__, datap[0], datap[1], datap[2], datap[3], datap[4], datap[5], datap[6], datap[7]); printk(KERN_WARNING "%s ecc %02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x " "%02x %02x %02x\n", __func__, eccCalc[0], eccCalc[1], eccCalc[2], eccCalc[3], eccCalc[4], eccCalc[5], eccCalc[6], eccCalc[7], eccCalc[8], eccCalc[9], eccCalc[10], eccCalc[11], eccCalc[12]); BUG(); #endif mtd->ecc_stats.failed++; } else { #if defined(NAND_BCM_UMI_DEBUG) if (stat > 0) { printk(KERN_INFO "%s %d correctable_errors detected\n", __func__, stat); } #endif mtd->ecc_stats.corrected += stat; } } return 0; } /**************************************************************************** * * bcm_umi_bch_write_page_hwecc - hardware ecc based page write function * @mtd: mtd info structure * @chip: nand chip info structure * @buf: data buffer * ***************************************************************************/ static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf) { int sectorIdx = 0; int eccsize = chip->ecc.size; int eccsteps = chip->ecc.steps; const uint8_t *datap = buf; uint8_t *oobp = chip->oob_poi; int sectorOobSize = mtd->oobsize / eccsteps; for (sectorIdx = 0; sectorIdx < eccsteps; sectorIdx++, datap += eccsize, oobp += sectorOobSize) { /* Enable hardware ECC before writing the buf */ nand_bcm_umi_bch_enable_write_hwecc(); bcm_umi_nand_write_buf(mtd, datap, eccsize); nand_bcm_umi_bch_write_oobEcc(mtd->writesize, oobp, NAND_ECC_NUM_BYTES); } bcm_umi_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); }
gpl-2.0
ChaOSChriS/android_kernel_google_msm
drivers/mfd/wm8350-gpio.c
10425
6211
/* * wm8350-core.c -- Device access for Wolfson WM8350 * * Copyright 2007, 2008 Wolfson Microelectronics PLC. * * Author: Liam Girdwood * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/mfd/wm8350/core.h> #include <linux/mfd/wm8350/gpio.h> #include <linux/mfd/wm8350/pmic.h> static int gpio_set_dir(struct wm8350 *wm8350, int gpio, int dir) { int ret; wm8350_reg_unlock(wm8350); if (dir == WM8350_GPIO_DIR_OUT) ret = wm8350_clear_bits(wm8350, WM8350_GPIO_CONFIGURATION_I_O, 1 << gpio); else ret = wm8350_set_bits(wm8350, WM8350_GPIO_CONFIGURATION_I_O, 1 << gpio); wm8350_reg_lock(wm8350); return ret; } static int wm8350_gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db) { if (db == WM8350_GPIO_DEBOUNCE_ON) return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_DEBOUNCE, 1 << gpio); } static int gpio_set_func(struct wm8350 *wm8350, int gpio, int func) { u16 reg; wm8350_reg_unlock(wm8350); switch (gpio) { case 0: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP0_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 0)); break; case 1: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP1_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 4)); break; case 2: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP2_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 8)); break; case 3: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP3_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 12)); break; case 4: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP4_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 0)); break; case 5: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP5_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 4)); break; case 6: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP6_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 8)); break; case 7: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP7_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 12)); break; case 8: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP8_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 0)); break; case 9: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP9_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 4)); break; case 10: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP10_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 8)); break; case 11: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP11_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 12)); break; case 12: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_4) & ~WM8350_GP12_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_4, reg | ((func & 0xf) << 0)); break; default: wm8350_reg_lock(wm8350); return -EINVAL; } wm8350_reg_lock(wm8350); return 0; } static int gpio_set_pull_up(struct wm8350 *wm8350, int gpio, int up) { if (up) return wm8350_set_bits(wm8350, WM8350_GPIO_PIN_PULL_UP_CONTROL, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_PIN_PULL_UP_CONTROL, 1 << gpio); } static int gpio_set_pull_down(struct wm8350 *wm8350, int gpio, int down) { if (down) return wm8350_set_bits(wm8350, WM8350_GPIO_PULL_DOWN_CONTROL, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_PULL_DOWN_CONTROL, 1 << gpio); } static int gpio_set_polarity(struct wm8350 *wm8350, int gpio, int pol) { if (pol == WM8350_GPIO_ACTIVE_HIGH) return wm8350_set_bits(wm8350, WM8350_GPIO_PIN_POLARITY_TYPE, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_PIN_POLARITY_TYPE, 1 << gpio); } static int gpio_set_invert(struct wm8350 *wm8350, int gpio, int invert) { if (invert == WM8350_GPIO_INVERT_ON) return wm8350_set_bits(wm8350, WM8350_GPIO_INT_MODE, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_INT_MODE, 1 << gpio); } int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func, int pol, int pull, int invert, int debounce) { /* make sure we never pull up and down at the same time */ if (pull == WM8350_GPIO_PULL_NONE) { if (gpio_set_pull_up(wm8350, gpio, 0)) goto err; if (gpio_set_pull_down(wm8350, gpio, 0)) goto err; } else if (pull == WM8350_GPIO_PULL_UP) { if (gpio_set_pull_down(wm8350, gpio, 0)) goto err; if (gpio_set_pull_up(wm8350, gpio, 1)) goto err; } else if (pull == WM8350_GPIO_PULL_DOWN) { if (gpio_set_pull_up(wm8350, gpio, 0)) goto err; if (gpio_set_pull_down(wm8350, gpio, 1)) goto err; } if (gpio_set_invert(wm8350, gpio, invert)) goto err; if (gpio_set_polarity(wm8350, gpio, pol)) goto err; if (wm8350_gpio_set_debounce(wm8350, gpio, debounce)) goto err; if (gpio_set_dir(wm8350, gpio, dir)) goto err; return gpio_set_func(wm8350, gpio, func); err: return -EIO; } EXPORT_SYMBOL_GPL(wm8350_gpio_config);
gpl-2.0
rudij7/green_machine_bacon
drivers/mfd/wm8350-gpio.c
10425
6211
/* * wm8350-core.c -- Device access for Wolfson WM8350 * * Copyright 2007, 2008 Wolfson Microelectronics PLC. * * Author: Liam Girdwood * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/mfd/wm8350/core.h> #include <linux/mfd/wm8350/gpio.h> #include <linux/mfd/wm8350/pmic.h> static int gpio_set_dir(struct wm8350 *wm8350, int gpio, int dir) { int ret; wm8350_reg_unlock(wm8350); if (dir == WM8350_GPIO_DIR_OUT) ret = wm8350_clear_bits(wm8350, WM8350_GPIO_CONFIGURATION_I_O, 1 << gpio); else ret = wm8350_set_bits(wm8350, WM8350_GPIO_CONFIGURATION_I_O, 1 << gpio); wm8350_reg_lock(wm8350); return ret; } static int wm8350_gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db) { if (db == WM8350_GPIO_DEBOUNCE_ON) return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_DEBOUNCE, 1 << gpio); } static int gpio_set_func(struct wm8350 *wm8350, int gpio, int func) { u16 reg; wm8350_reg_unlock(wm8350); switch (gpio) { case 0: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP0_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 0)); break; case 1: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP1_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 4)); break; case 2: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP2_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 8)); break; case 3: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP3_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 12)); break; case 4: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP4_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 0)); break; case 5: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP5_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 4)); break; case 6: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP6_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 8)); break; case 7: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP7_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 12)); break; case 8: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP8_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 0)); break; case 9: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP9_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 4)); break; case 10: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP10_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 8)); break; case 11: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP11_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 12)); break; case 12: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_4) & ~WM8350_GP12_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_4, reg | ((func & 0xf) << 0)); break; default: wm8350_reg_lock(wm8350); return -EINVAL; } wm8350_reg_lock(wm8350); return 0; } static int gpio_set_pull_up(struct wm8350 *wm8350, int gpio, int up) { if (up) return wm8350_set_bits(wm8350, WM8350_GPIO_PIN_PULL_UP_CONTROL, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_PIN_PULL_UP_CONTROL, 1 << gpio); } static int gpio_set_pull_down(struct wm8350 *wm8350, int gpio, int down) { if (down) return wm8350_set_bits(wm8350, WM8350_GPIO_PULL_DOWN_CONTROL, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_PULL_DOWN_CONTROL, 1 << gpio); } static int gpio_set_polarity(struct wm8350 *wm8350, int gpio, int pol) { if (pol == WM8350_GPIO_ACTIVE_HIGH) return wm8350_set_bits(wm8350, WM8350_GPIO_PIN_POLARITY_TYPE, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_PIN_POLARITY_TYPE, 1 << gpio); } static int gpio_set_invert(struct wm8350 *wm8350, int gpio, int invert) { if (invert == WM8350_GPIO_INVERT_ON) return wm8350_set_bits(wm8350, WM8350_GPIO_INT_MODE, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_INT_MODE, 1 << gpio); } int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func, int pol, int pull, int invert, int debounce) { /* make sure we never pull up and down at the same time */ if (pull == WM8350_GPIO_PULL_NONE) { if (gpio_set_pull_up(wm8350, gpio, 0)) goto err; if (gpio_set_pull_down(wm8350, gpio, 0)) goto err; } else if (pull == WM8350_GPIO_PULL_UP) { if (gpio_set_pull_down(wm8350, gpio, 0)) goto err; if (gpio_set_pull_up(wm8350, gpio, 1)) goto err; } else if (pull == WM8350_GPIO_PULL_DOWN) { if (gpio_set_pull_up(wm8350, gpio, 0)) goto err; if (gpio_set_pull_down(wm8350, gpio, 1)) goto err; } if (gpio_set_invert(wm8350, gpio, invert)) goto err; if (gpio_set_polarity(wm8350, gpio, pol)) goto err; if (wm8350_gpio_set_debounce(wm8350, gpio, debounce)) goto err; if (gpio_set_dir(wm8350, gpio, dir)) goto err; return gpio_set_func(wm8350, gpio, func); err: return -EIO; } EXPORT_SYMBOL_GPL(wm8350_gpio_config);
gpl-2.0
MinimalOS/android_kernel_lge_mako
arch/m68k/sun3/dvma.c
11705
1265
/* * linux/arch/m68k/sun3/dvma.c * * Written by Sam Creasey * * Sun3 IOMMU routines used for dvma accesses. * */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/bootmem.h> #include <linux/list.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/sun3mmu.h> #include <asm/dvma.h> static unsigned long ptelist[120]; static unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr) { unsigned long pte; unsigned long j; pte_t ptep; j = *(volatile unsigned long *)kaddr; *(volatile unsigned long *)kaddr = j; ptep = pfn_pte(virt_to_pfn(kaddr), PAGE_KERNEL); pte = pte_val(ptep); // printk("dvma_remap: addr %lx -> %lx pte %08lx len %x\n", // kaddr, vaddr, pte, len); if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) { sun3_put_pte(vaddr, pte); ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] = pte; } return (vaddr + (kaddr & ~PAGE_MASK)); } int dvma_map_iommu(unsigned long kaddr, unsigned long baddr, int len) { unsigned long end; unsigned long vaddr; vaddr = dvma_btov(baddr); end = vaddr + len; while(vaddr < end) { dvma_page(kaddr, vaddr); kaddr += PAGE_SIZE; vaddr += PAGE_SIZE; } return 0; } void sun3_dvma_init(void) { memset(ptelist, 0, sizeof(ptelist)); }
gpl-2.0
AOKP/kernel_sony_common
arch/avr32/kernel/stacktrace.c
13753
1223
/* * Stack trace management functions * * Copyright (C) 2007 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/sched.h> #include <linux/stacktrace.h> #include <linux/thread_info.h> #include <linux/module.h> register unsigned long current_frame_pointer asm("r7"); struct stackframe { unsigned long lr; unsigned long fp; }; /* * Save stack-backtrace addresses into a stack_trace buffer. */ void save_stack_trace(struct stack_trace *trace) { unsigned long low, high; unsigned long fp; struct stackframe *frame; int skip = trace->skip; low = (unsigned long)task_stack_page(current); high = low + THREAD_SIZE; fp = current_frame_pointer; while (fp >= low && fp <= (high - 8)) { frame = (struct stackframe *)fp; if (skip) { skip--; } else { trace->entries[trace->nr_entries++] = frame->lr; if (trace->nr_entries >= trace->max_entries) break; } /* * The next frame must be at a higher address than the * current frame. */ low = fp + 8; fp = frame->fp; } } EXPORT_SYMBOL_GPL(save_stack_trace);
gpl-2.0
alef78/x86_ramos_i9_kernel
kernel/arch/avr32/kernel/stacktrace.c
13753
1223
/* * Stack trace management functions * * Copyright (C) 2007 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/sched.h> #include <linux/stacktrace.h> #include <linux/thread_info.h> #include <linux/module.h> register unsigned long current_frame_pointer asm("r7"); struct stackframe { unsigned long lr; unsigned long fp; }; /* * Save stack-backtrace addresses into a stack_trace buffer. */ void save_stack_trace(struct stack_trace *trace) { unsigned long low, high; unsigned long fp; struct stackframe *frame; int skip = trace->skip; low = (unsigned long)task_stack_page(current); high = low + THREAD_SIZE; fp = current_frame_pointer; while (fp >= low && fp <= (high - 8)) { frame = (struct stackframe *)fp; if (skip) { skip--; } else { trace->entries[trace->nr_entries++] = frame->lr; if (trace->nr_entries >= trace->max_entries) break; } /* * The next frame must be at a higher address than the * current frame. */ low = fp + 8; fp = frame->fp; } } EXPORT_SYMBOL_GPL(save_stack_trace);
gpl-2.0
faux123/private-pyramid
drivers/usb/gadget/s3c-hsotg.c
698
88856
/* linux/drivers/usb/gadget/s3c-hsotg.c * * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * S3C USB2.0 High-speed / OtG driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <mach/map.h> #include <plat/regs-usb-hsotg-phy.h> #include <plat/regs-usb-hsotg.h> #include <mach/regs-sys.h> #include <plat/udc-hs.h> #define DMA_ADDR_INVALID (~((dma_addr_t)0)) /* EP0_MPS_LIMIT * * Unfortunately there seems to be a limit of the amount of data that can * be transfered by IN transactions on EP0. This is either 127 bytes or 3 * packets (which practially means 1 packet and 63 bytes of data) when the * MPS is set to 64. * * This means if we are wanting to move >127 bytes of data, we need to * split the transactions up, but just doing one packet at a time does * not work (this may be an implicit DATA0 PID on first packet of the * transaction) and doing 2 packets is outside the controller's limits. * * If we try to lower the MPS size for EP0, then no transfers work properly * for EP0, and the system will fail basic enumeration. As no cause for this * has currently been found, we cannot support any large IN transfers for * EP0. */ #define EP0_MPS_LIMIT 64 struct s3c_hsotg; struct s3c_hsotg_req; /** * struct s3c_hsotg_ep - driver endpoint definition. * @ep: The gadget layer representation of the endpoint. * @name: The driver generated name for the endpoint. * @queue: Queue of requests for this endpoint. * @parent: Reference back to the parent device structure. * @req: The current request that the endpoint is processing. This is * used to indicate an request has been loaded onto the endpoint * and has yet to be completed (maybe due to data move, or simply * awaiting an ack from the core all the data has been completed). * @debugfs: File entry for debugfs file for this endpoint. * @lock: State lock to protect contents of endpoint. * @dir_in: Set to true if this endpoint is of the IN direction, which * means that it is sending data to the Host. * @index: The index for the endpoint registers. * @name: The name array passed to the USB core. * @halted: Set if the endpoint has been halted. * @periodic: Set if this is a periodic ep, such as Interrupt * @sent_zlp: Set if we've sent a zero-length packet. * @total_data: The total number of data bytes done. * @fifo_size: The size of the FIFO (for periodic IN endpoints) * @fifo_load: The amount of data loaded into the FIFO (periodic IN) * @last_load: The offset of data for the last start of request. * @size_loaded: The last loaded size for DxEPTSIZE for periodic IN * * This is the driver's state for each registered enpoint, allowing it * to keep track of transactions that need doing. Each endpoint has a * lock to protect the state, to try and avoid using an overall lock * for the host controller as much as possible. * * For periodic IN endpoints, we have fifo_size and fifo_load to try * and keep track of the amount of data in the periodic FIFO for each * of these as we don't have a status register that tells us how much * is in each of them. */ struct s3c_hsotg_ep { struct usb_ep ep; struct list_head queue; struct s3c_hsotg *parent; struct s3c_hsotg_req *req; struct dentry *debugfs; spinlock_t lock; unsigned long total_data; unsigned int size_loaded; unsigned int last_load; unsigned int fifo_load; unsigned short fifo_size; unsigned char dir_in; unsigned char index; unsigned int halted:1; unsigned int periodic:1; unsigned int sent_zlp:1; char name[10]; }; #define S3C_HSOTG_EPS (8+1) /* limit to 9 for the moment */ /** * struct s3c_hsotg - driver state. * @dev: The parent device supplied to the probe function * @driver: USB gadget driver * @plat: The platform specific configuration data. * @regs: The memory area mapped for accessing registers. * @regs_res: The resource that was allocated when claiming register space. * @irq: The IRQ number we are using * @debug_root: root directrory for debugfs. * @debug_file: main status file for debugfs. * @debug_fifo: FIFO status file for debugfs. * @ep0_reply: Request used for ep0 reply. * @ep0_buff: Buffer for EP0 reply data, if needed. * @ctrl_buff: Buffer for EP0 control requests. * @ctrl_req: Request for EP0 control packets. * @eps: The endpoints being supplied to the gadget framework */ struct s3c_hsotg { struct device *dev; struct usb_gadget_driver *driver; struct s3c_hsotg_plat *plat; void __iomem *regs; struct resource *regs_res; int irq; struct dentry *debug_root; struct dentry *debug_file; struct dentry *debug_fifo; struct usb_request *ep0_reply; struct usb_request *ctrl_req; u8 ep0_buff[8]; u8 ctrl_buff[8]; struct usb_gadget gadget; struct s3c_hsotg_ep eps[]; }; /** * struct s3c_hsotg_req - data transfer request * @req: The USB gadget request * @queue: The list of requests for the endpoint this is queued for. * @in_progress: Has already had size/packets written to core * @mapped: DMA buffer for this request has been mapped via dma_map_single(). */ struct s3c_hsotg_req { struct usb_request req; struct list_head queue; unsigned char in_progress; unsigned char mapped; }; /* conversion functions */ static inline struct s3c_hsotg_req *our_req(struct usb_request *req) { return container_of(req, struct s3c_hsotg_req, req); } static inline struct s3c_hsotg_ep *our_ep(struct usb_ep *ep) { return container_of(ep, struct s3c_hsotg_ep, ep); } static inline struct s3c_hsotg *to_hsotg(struct usb_gadget *gadget) { return container_of(gadget, struct s3c_hsotg, gadget); } static inline void __orr32(void __iomem *ptr, u32 val) { writel(readl(ptr) | val, ptr); } static inline void __bic32(void __iomem *ptr, u32 val) { writel(readl(ptr) & ~val, ptr); } /* forward decleration of functions */ static void s3c_hsotg_dump(struct s3c_hsotg *hsotg); /** * using_dma - return the DMA status of the driver. * @hsotg: The driver state. * * Return true if we're using DMA. * * Currently, we have the DMA support code worked into everywhere * that needs it, but the AMBA DMA implementation in the hardware can * only DMA from 32bit aligned addresses. This means that gadgets such * as the CDC Ethernet cannot work as they often pass packets which are * not 32bit aligned. * * Unfortunately the choice to use DMA or not is global to the controller * and seems to be only settable when the controller is being put through * a core reset. This means we either need to fix the gadgets to take * account of DMA alignment, or add bounce buffers (yuerk). * * Until this issue is sorted out, we always return 'false'. */ static inline bool using_dma(struct s3c_hsotg *hsotg) { return false; /* support is not complete */ } /** * s3c_hsotg_en_gsint - enable one or more of the general interrupt * @hsotg: The device state * @ints: A bitmask of the interrupts to enable */ static void s3c_hsotg_en_gsint(struct s3c_hsotg *hsotg, u32 ints) { u32 gsintmsk = readl(hsotg->regs + S3C_GINTMSK); u32 new_gsintmsk; new_gsintmsk = gsintmsk | ints; if (new_gsintmsk != gsintmsk) { dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk); writel(new_gsintmsk, hsotg->regs + S3C_GINTMSK); } } /** * s3c_hsotg_disable_gsint - disable one or more of the general interrupt * @hsotg: The device state * @ints: A bitmask of the interrupts to enable */ static void s3c_hsotg_disable_gsint(struct s3c_hsotg *hsotg, u32 ints) { u32 gsintmsk = readl(hsotg->regs + S3C_GINTMSK); u32 new_gsintmsk; new_gsintmsk = gsintmsk & ~ints; if (new_gsintmsk != gsintmsk) writel(new_gsintmsk, hsotg->regs + S3C_GINTMSK); } /** * s3c_hsotg_ctrl_epint - enable/disable an endpoint irq * @hsotg: The device state * @ep: The endpoint index * @dir_in: True if direction is in. * @en: The enable value, true to enable * * Set or clear the mask for an individual endpoint's interrupt * request. */ static void s3c_hsotg_ctrl_epint(struct s3c_hsotg *hsotg, unsigned int ep, unsigned int dir_in, unsigned int en) { unsigned long flags; u32 bit = 1 << ep; u32 daint; if (!dir_in) bit <<= 16; local_irq_save(flags); daint = readl(hsotg->regs + S3C_DAINTMSK); if (en) daint |= bit; else daint &= ~bit; writel(daint, hsotg->regs + S3C_DAINTMSK); local_irq_restore(flags); } /** * s3c_hsotg_init_fifo - initialise non-periodic FIFOs * @hsotg: The device instance. */ static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg) { unsigned int ep; unsigned int addr; unsigned int size; int timeout; u32 val; /* the ryu 2.6.24 release ahs writel(0x1C0, hsotg->regs + S3C_GRXFSIZ); writel(S3C_GNPTXFSIZ_NPTxFStAddr(0x200) | S3C_GNPTXFSIZ_NPTxFDep(0x1C0), hsotg->regs + S3C_GNPTXFSIZ); */ /* set FIFO sizes to 2048/0x1C0 */ writel(2048, hsotg->regs + S3C_GRXFSIZ); writel(S3C_GNPTXFSIZ_NPTxFStAddr(2048) | S3C_GNPTXFSIZ_NPTxFDep(0x1C0), hsotg->regs + S3C_GNPTXFSIZ); /* arange all the rest of the TX FIFOs, as some versions of this * block have overlapping default addresses. This also ensures * that if the settings have been changed, then they are set to * known values. */ /* start at the end of the GNPTXFSIZ, rounded up */ addr = 2048 + 1024; size = 768; /* currently we allocate TX FIFOs for all possible endpoints, * and assume that they are all the same size. */ for (ep = 0; ep <= 15; ep++) { val = addr; val |= size << S3C_DPTXFSIZn_DPTxFSize_SHIFT; addr += size; writel(val, hsotg->regs + S3C_DPTXFSIZn(ep)); } /* according to p428 of the design guide, we need to ensure that * all fifos are flushed before continuing */ writel(S3C_GRSTCTL_TxFNum(0x10) | S3C_GRSTCTL_TxFFlsh | S3C_GRSTCTL_RxFFlsh, hsotg->regs + S3C_GRSTCTL); /* wait until the fifos are both flushed */ timeout = 100; while (1) { val = readl(hsotg->regs + S3C_GRSTCTL); if ((val & (S3C_GRSTCTL_TxFFlsh | S3C_GRSTCTL_RxFFlsh)) == 0) break; if (--timeout == 0) { dev_err(hsotg->dev, "%s: timeout flushing fifos (GRSTCTL=%08x)\n", __func__, val); } udelay(1); } dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout); } /** * @ep: USB endpoint to allocate request for. * @flags: Allocation flags * * Allocate a new USB request structure appropriate for the specified endpoint */ static struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep, gfp_t flags) { struct s3c_hsotg_req *req; req = kzalloc(sizeof(struct s3c_hsotg_req), flags); if (!req) return NULL; INIT_LIST_HEAD(&req->queue); req->req.dma = DMA_ADDR_INVALID; return &req->req; } /** * is_ep_periodic - return true if the endpoint is in periodic mode. * @hs_ep: The endpoint to query. * * Returns true if the endpoint is in periodic mode, meaning it is being * used for an Interrupt or ISO transfer. */ static inline int is_ep_periodic(struct s3c_hsotg_ep *hs_ep) { return hs_ep->periodic; } /** * s3c_hsotg_unmap_dma - unmap the DMA memory being used for the request * @hsotg: The device state. * @hs_ep: The endpoint for the request * @hs_req: The request being processed. * * This is the reverse of s3c_hsotg_map_dma(), called for the completion * of a request to ensure the buffer is ready for access by the caller. */ static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg, struct s3c_hsotg_ep *hs_ep, struct s3c_hsotg_req *hs_req) { struct usb_request *req = &hs_req->req; enum dma_data_direction dir; dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE; /* ignore this if we're not moving any data */ if (hs_req->req.length == 0) return; if (hs_req->mapped) { /* we mapped this, so unmap and remove the dma */ dma_unmap_single(hsotg->dev, req->dma, req->length, dir); req->dma = DMA_ADDR_INVALID; hs_req->mapped = 0; } else { dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir); } } /** * s3c_hsotg_write_fifo - write packet Data to the TxFIFO * @hsotg: The controller state. * @hs_ep: The endpoint we're going to write for. * @hs_req: The request to write data for. * * This is called when the TxFIFO has some space in it to hold a new * transmission and we have something to give it. The actual setup of * the data size is done elsewhere, so all we have to do is to actually * write the data. * * The return value is zero if there is more space (or nothing was done) * otherwise -ENOSPC is returned if the FIFO space was used up. * * This routine is only needed for PIO */ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg, struct s3c_hsotg_ep *hs_ep, struct s3c_hsotg_req *hs_req) { bool periodic = is_ep_periodic(hs_ep); u32 gnptxsts = readl(hsotg->regs + S3C_GNPTXSTS); int buf_pos = hs_req->req.actual; int to_write = hs_ep->size_loaded; void *data; int can_write; int pkt_round; to_write -= (buf_pos - hs_ep->last_load); /* if there's nothing to write, get out early */ if (to_write == 0) return 0; if (periodic) { u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index)); int size_left; int size_done; /* work out how much data was loaded so we can calculate * how much data is left in the fifo. */ size_left = S3C_DxEPTSIZ_XferSize_GET(epsize); dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n", __func__, size_left, hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size); /* how much of the data has moved */ size_done = hs_ep->size_loaded - size_left; /* how much data is left in the fifo */ can_write = hs_ep->fifo_load - size_done; dev_dbg(hsotg->dev, "%s: => can_write1=%d\n", __func__, can_write); can_write = hs_ep->fifo_size - can_write; dev_dbg(hsotg->dev, "%s: => can_write2=%d\n", __func__, can_write); if (can_write <= 0) { s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_PTxFEmp); return -ENOSPC; } } else { if (S3C_GNPTXSTS_NPTxQSpcAvail_GET(gnptxsts) == 0) { dev_dbg(hsotg->dev, "%s: no queue slots available (0x%08x)\n", __func__, gnptxsts); s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_NPTxFEmp); return -ENOSPC; } can_write = S3C_GNPTXSTS_NPTxFSpcAvail_GET(gnptxsts); } dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, mps %d\n", __func__, gnptxsts, can_write, to_write, hs_ep->ep.maxpacket); /* limit to 512 bytes of data, it seems at least on the non-periodic * FIFO, requests of >512 cause the endpoint to get stuck with a * fragment of the end of the transfer in it. */ if (can_write > 512) can_write = 512; /* see if we can write data */ if (to_write > can_write) { to_write = can_write; pkt_round = to_write % hs_ep->ep.maxpacket; /* Not sure, but we probably shouldn't be writing partial * packets into the FIFO, so round the write down to an * exact number of packets. * * Note, we do not currently check to see if we can ever * write a full packet or not to the FIFO. */ if (pkt_round) to_write -= pkt_round; /* enable correct FIFO interrupt to alert us when there * is more room left. */ s3c_hsotg_en_gsint(hsotg, periodic ? S3C_GINTSTS_PTxFEmp : S3C_GINTSTS_NPTxFEmp); } dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n", to_write, hs_req->req.length, can_write, buf_pos); if (to_write <= 0) return -ENOSPC; hs_req->req.actual = buf_pos + to_write; hs_ep->total_data += to_write; if (periodic) hs_ep->fifo_load += to_write; to_write = DIV_ROUND_UP(to_write, 4); data = hs_req->req.buf + buf_pos; writesl(hsotg->regs + S3C_EPFIFO(hs_ep->index), data, to_write); return (to_write >= can_write) ? -ENOSPC : 0; } /** * get_ep_limit - get the maximum data legnth for this endpoint * @hs_ep: The endpoint * * Return the maximum data that can be queued in one go on a given endpoint * so that transfers that are too long can be split. */ static unsigned get_ep_limit(struct s3c_hsotg_ep *hs_ep) { int index = hs_ep->index; unsigned maxsize; unsigned maxpkt; if (index != 0) { maxsize = S3C_DxEPTSIZ_XferSize_LIMIT + 1; maxpkt = S3C_DxEPTSIZ_PktCnt_LIMIT + 1; } else { if (hs_ep->dir_in) { /* maxsize = S3C_DIEPTSIZ0_XferSize_LIMIT + 1; */ maxsize = 64+64+1; maxpkt = S3C_DIEPTSIZ0_PktCnt_LIMIT + 1; } else { maxsize = 0x3f; maxpkt = 2; } } /* we made the constant loading easier above by using +1 */ maxpkt--; maxsize--; /* constrain by packet count if maxpkts*pktsize is greater * than the length register size. */ if ((maxpkt * hs_ep->ep.maxpacket) < maxsize) maxsize = maxpkt * hs_ep->ep.maxpacket; return maxsize; } /** * s3c_hsotg_start_req - start a USB request from an endpoint's queue * @hsotg: The controller state. * @hs_ep: The endpoint to process a request for * @hs_req: The request to start. * @continuing: True if we are doing more for the current request. * * Start the given request running by setting the endpoint registers * appropriately, and writing any data to the FIFOs. */ static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg, struct s3c_hsotg_ep *hs_ep, struct s3c_hsotg_req *hs_req, bool continuing) { struct usb_request *ureq = &hs_req->req; int index = hs_ep->index; int dir_in = hs_ep->dir_in; u32 epctrl_reg; u32 epsize_reg; u32 epsize; u32 ctrl; unsigned length; unsigned packets; unsigned maxreq; if (index != 0) { if (hs_ep->req && !continuing) { dev_err(hsotg->dev, "%s: active request\n", __func__); WARN_ON(1); return; } else if (hs_ep->req != hs_req && continuing) { dev_err(hsotg->dev, "%s: continue different req\n", __func__); WARN_ON(1); return; } } epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index); epsize_reg = dir_in ? S3C_DIEPTSIZ(index) : S3C_DOEPTSIZ(index); dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n", __func__, readl(hsotg->regs + epctrl_reg), index, hs_ep->dir_in ? "in" : "out"); length = ureq->length - ureq->actual; if (0) dev_dbg(hsotg->dev, "REQ buf %p len %d dma 0x%08x noi=%d zp=%d snok=%d\n", ureq->buf, length, ureq->dma, ureq->no_interrupt, ureq->zero, ureq->short_not_ok); maxreq = get_ep_limit(hs_ep); if (length > maxreq) { int round = maxreq % hs_ep->ep.maxpacket; dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n", __func__, length, maxreq, round); /* round down to multiple of packets */ if (round) maxreq -= round; length = maxreq; } if (length) packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket); else packets = 1; /* send one packet if length is zero. */ if (dir_in && index != 0) epsize = S3C_DxEPTSIZ_MC(1); else epsize = 0; if (index != 0 && ureq->zero) { /* test for the packets being exactly right for the * transfer */ if (length == (packets * hs_ep->ep.maxpacket)) packets++; } epsize |= S3C_DxEPTSIZ_PktCnt(packets); epsize |= S3C_DxEPTSIZ_XferSize(length); dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n", __func__, packets, length, ureq->length, epsize, epsize_reg); /* store the request as the current one we're doing */ hs_ep->req = hs_req; /* write size / packets */ writel(epsize, hsotg->regs + epsize_reg); ctrl = readl(hsotg->regs + epctrl_reg); if (ctrl & S3C_DxEPCTL_Stall) { dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index); /* not sure what we can do here, if it is EP0 then we should * get this cleared once the endpoint has transmitted the * STALL packet, otherwise it needs to be cleared by the * host. */ } if (using_dma(hsotg)) { unsigned int dma_reg; /* write DMA address to control register, buffer already * synced by s3c_hsotg_ep_queue(). */ dma_reg = dir_in ? S3C_DIEPDMA(index) : S3C_DOEPDMA(index); writel(ureq->dma, hsotg->regs + dma_reg); dev_dbg(hsotg->dev, "%s: 0x%08x => 0x%08x\n", __func__, ureq->dma, dma_reg); } ctrl |= S3C_DxEPCTL_EPEna; /* ensure ep enabled */ ctrl |= S3C_DxEPCTL_USBActEp; ctrl |= S3C_DxEPCTL_CNAK; /* clear NAK set by core */ dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl); writel(ctrl, hsotg->regs + epctrl_reg); /* set these, it seems that DMA support increments past the end * of the packet buffer so we need to calculate the length from * this information. */ hs_ep->size_loaded = length; hs_ep->last_load = ureq->actual; if (dir_in && !using_dma(hsotg)) { /* set these anyway, we may need them for non-periodic in */ hs_ep->fifo_load = 0; s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req); } /* clear the INTknTXFEmpMsk when we start request, more as a aide * to debugging to see what is going on. */ if (dir_in) writel(S3C_DIEPMSK_INTknTXFEmpMsk, hsotg->regs + S3C_DIEPINT(index)); /* Note, trying to clear the NAK here causes problems with transmit * on the S3C6400 ending up with the TXFIFO becomming full. */ /* check ep is enabled */ if (!(readl(hsotg->regs + epctrl_reg) & S3C_DxEPCTL_EPEna)) dev_warn(hsotg->dev, "ep%d: failed to become enabled (DxEPCTL=0x%08x)?\n", index, readl(hsotg->regs + epctrl_reg)); dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, readl(hsotg->regs + epctrl_reg)); } /** * s3c_hsotg_map_dma - map the DMA memory being used for the request * @hsotg: The device state. * @hs_ep: The endpoint the request is on. * @req: The request being processed. * * We've been asked to queue a request, so ensure that the memory buffer * is correctly setup for DMA. If we've been passed an extant DMA address * then ensure the buffer has been synced to memory. If our buffer has no * DMA memory, then we map the memory and mark our request to allow us to * cleanup on completion. */ static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg, struct s3c_hsotg_ep *hs_ep, struct usb_request *req) { enum dma_data_direction dir; struct s3c_hsotg_req *hs_req = our_req(req); dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE; /* if the length is zero, ignore the DMA data */ if (hs_req->req.length == 0) return 0; if (req->dma == DMA_ADDR_INVALID) { dma_addr_t dma; dma = dma_map_single(hsotg->dev, req->buf, req->length, dir); if (unlikely(dma_mapping_error(hsotg->dev, dma))) goto dma_error; if (dma & 3) { dev_err(hsotg->dev, "%s: unaligned dma buffer\n", __func__); dma_unmap_single(hsotg->dev, dma, req->length, dir); return -EINVAL; } hs_req->mapped = 1; req->dma = dma; } else { dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir); hs_req->mapped = 0; } return 0; dma_error: dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n", __func__, req->buf, req->length); return -EIO; } static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { struct s3c_hsotg_req *hs_req = our_req(req); struct s3c_hsotg_ep *hs_ep = our_ep(ep); struct s3c_hsotg *hs = hs_ep->parent; unsigned long irqflags; bool first; dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n", ep->name, req, req->length, req->buf, req->no_interrupt, req->zero, req->short_not_ok); /* initialise status of the request */ INIT_LIST_HEAD(&hs_req->queue); req->actual = 0; req->status = -EINPROGRESS; /* if we're using DMA, sync the buffers as necessary */ if (using_dma(hs)) { int ret = s3c_hsotg_map_dma(hs, hs_ep, req); if (ret) return ret; } spin_lock_irqsave(&hs_ep->lock, irqflags); first = list_empty(&hs_ep->queue); list_add_tail(&hs_req->queue, &hs_ep->queue); if (first) s3c_hsotg_start_req(hs, hs_ep, hs_req, false); spin_unlock_irqrestore(&hs_ep->lock, irqflags); return 0; } static void s3c_hsotg_ep_free_request(struct usb_ep *ep, struct usb_request *req) { struct s3c_hsotg_req *hs_req = our_req(req); kfree(hs_req); } /** * s3c_hsotg_complete_oursetup - setup completion callback * @ep: The endpoint the request was on. * @req: The request completed. * * Called on completion of any requests the driver itself * submitted that need cleaning up. */ static void s3c_hsotg_complete_oursetup(struct usb_ep *ep, struct usb_request *req) { struct s3c_hsotg_ep *hs_ep = our_ep(ep); struct s3c_hsotg *hsotg = hs_ep->parent; dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req); s3c_hsotg_ep_free_request(ep, req); } /** * ep_from_windex - convert control wIndex value to endpoint * @hsotg: The driver state. * @windex: The control request wIndex field (in host order). * * Convert the given wIndex into a pointer to an driver endpoint * structure, or return NULL if it is not a valid endpoint. */ static struct s3c_hsotg_ep *ep_from_windex(struct s3c_hsotg *hsotg, u32 windex) { struct s3c_hsotg_ep *ep = &hsotg->eps[windex & 0x7F]; int dir = (windex & USB_DIR_IN) ? 1 : 0; int idx = windex & 0x7F; if (windex >= 0x100) return NULL; if (idx > S3C_HSOTG_EPS) return NULL; if (idx && ep->dir_in != dir) return NULL; return ep; } /** * s3c_hsotg_send_reply - send reply to control request * @hsotg: The device state * @ep: Endpoint 0 * @buff: Buffer for request * @length: Length of reply. * * Create a request and queue it on the given endpoint. This is useful as * an internal method of sending replies to certain control requests, etc. */ static int s3c_hsotg_send_reply(struct s3c_hsotg *hsotg, struct s3c_hsotg_ep *ep, void *buff, int length) { struct usb_request *req; int ret; dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length); req = s3c_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC); hsotg->ep0_reply = req; if (!req) { dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__); return -ENOMEM; } req->buf = hsotg->ep0_buff; req->length = length; req->zero = 1; /* always do zero-length final transfer */ req->complete = s3c_hsotg_complete_oursetup; if (length) memcpy(req->buf, buff, length); else ep->sent_zlp = 1; ret = s3c_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC); if (ret) { dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__); return ret; } return 0; } /** * s3c_hsotg_process_req_status - process request GET_STATUS * @hsotg: The device state * @ctrl: USB control request */ static int s3c_hsotg_process_req_status(struct s3c_hsotg *hsotg, struct usb_ctrlrequest *ctrl) { struct s3c_hsotg_ep *ep0 = &hsotg->eps[0]; struct s3c_hsotg_ep *ep; __le16 reply; int ret; dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__); if (!ep0->dir_in) { dev_warn(hsotg->dev, "%s: direction out?\n", __func__); return -EINVAL; } switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: reply = cpu_to_le16(0); /* bit 0 => self powered, * bit 1 => remote wakeup */ break; case USB_RECIP_INTERFACE: /* currently, the data result should be zero */ reply = cpu_to_le16(0); break; case USB_RECIP_ENDPOINT: ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex)); if (!ep) return -ENOENT; reply = cpu_to_le16(ep->halted ? 1 : 0); break; default: return 0; } if (le16_to_cpu(ctrl->wLength) != 2) return -EINVAL; ret = s3c_hsotg_send_reply(hsotg, ep0, &reply, 2); if (ret) { dev_err(hsotg->dev, "%s: failed to send reply\n", __func__); return ret; } return 1; } static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value); /** * s3c_hsotg_process_req_featire - process request {SET,CLEAR}_FEATURE * @hsotg: The device state * @ctrl: USB control request */ static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg, struct usb_ctrlrequest *ctrl) { bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE); struct s3c_hsotg_ep *ep; dev_dbg(hsotg->dev, "%s: %s_FEATURE\n", __func__, set ? "SET" : "CLEAR"); if (ctrl->bRequestType == USB_RECIP_ENDPOINT) { ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex)); if (!ep) { dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n", __func__, le16_to_cpu(ctrl->wIndex)); return -ENOENT; } switch (le16_to_cpu(ctrl->wValue)) { case USB_ENDPOINT_HALT: s3c_hsotg_ep_sethalt(&ep->ep, set); break; default: return -ENOENT; } } else return -ENOENT; /* currently only deal with endpoint */ return 1; } /** * s3c_hsotg_process_control - process a control request * @hsotg: The device state * @ctrl: The control request received * * The controller has received the SETUP phase of a control request, and * needs to work out what to do next (and whether to pass it on to the * gadget driver). */ static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg, struct usb_ctrlrequest *ctrl) { struct s3c_hsotg_ep *ep0 = &hsotg->eps[0]; int ret = 0; u32 dcfg; ep0->sent_zlp = 0; dev_dbg(hsotg->dev, "ctrl Req=%02x, Type=%02x, V=%04x, L=%04x\n", ctrl->bRequest, ctrl->bRequestType, ctrl->wValue, ctrl->wLength); /* record the direction of the request, for later use when enquing * packets onto EP0. */ ep0->dir_in = (ctrl->bRequestType & USB_DIR_IN) ? 1 : 0; dev_dbg(hsotg->dev, "ctrl: dir_in=%d\n", ep0->dir_in); /* if we've no data with this request, then the last part of the * transaction is going to implicitly be IN. */ if (ctrl->wLength == 0) ep0->dir_in = 1; if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { switch (ctrl->bRequest) { case USB_REQ_SET_ADDRESS: dcfg = readl(hsotg->regs + S3C_DCFG); dcfg &= ~S3C_DCFG_DevAddr_MASK; dcfg |= ctrl->wValue << S3C_DCFG_DevAddr_SHIFT; writel(dcfg, hsotg->regs + S3C_DCFG); dev_info(hsotg->dev, "new address %d\n", ctrl->wValue); ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0); return; case USB_REQ_GET_STATUS: ret = s3c_hsotg_process_req_status(hsotg, ctrl); break; case USB_REQ_CLEAR_FEATURE: case USB_REQ_SET_FEATURE: ret = s3c_hsotg_process_req_feature(hsotg, ctrl); break; } } /* as a fallback, try delivering it to the driver to deal with */ if (ret == 0 && hsotg->driver) { ret = hsotg->driver->setup(&hsotg->gadget, ctrl); if (ret < 0) dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret); } if (ret > 0) { if (!ep0->dir_in) { /* need to generate zlp in reply or take data */ /* todo - deal with any data we might be sent? */ ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0); } } /* the request is either unhandlable, or is not formatted correctly * so respond with a STALL for the status stage to indicate failure. */ if (ret < 0) { u32 reg; u32 ctrl; dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in); reg = (ep0->dir_in) ? S3C_DIEPCTL0 : S3C_DOEPCTL0; /* S3C_DxEPCTL_Stall will be cleared by EP once it has * taken effect, so no need to clear later. */ ctrl = readl(hsotg->regs + reg); ctrl |= S3C_DxEPCTL_Stall; ctrl |= S3C_DxEPCTL_CNAK; writel(ctrl, hsotg->regs + reg); dev_dbg(hsotg->dev, "writen DxEPCTL=0x%08x to %08x (DxEPCTL=0x%08x)\n", ctrl, reg, readl(hsotg->regs + reg)); /* don't belive we need to anything more to get the EP * to reply with a STALL packet */ } } static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg); /** * s3c_hsotg_complete_setup - completion of a setup transfer * @ep: The endpoint the request was on. * @req: The request completed. * * Called on completion of any requests the driver itself submitted for * EP0 setup packets */ static void s3c_hsotg_complete_setup(struct usb_ep *ep, struct usb_request *req) { struct s3c_hsotg_ep *hs_ep = our_ep(ep); struct s3c_hsotg *hsotg = hs_ep->parent; if (req->status < 0) { dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status); return; } if (req->actual == 0) s3c_hsotg_enqueue_setup(hsotg); else s3c_hsotg_process_control(hsotg, req->buf); } /** * s3c_hsotg_enqueue_setup - start a request for EP0 packets * @hsotg: The device state. * * Enqueue a request on EP0 if necessary to received any SETUP packets * received from the host. */ static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg) { struct usb_request *req = hsotg->ctrl_req; struct s3c_hsotg_req *hs_req = our_req(req); int ret; dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__); req->zero = 0; req->length = 8; req->buf = hsotg->ctrl_buff; req->complete = s3c_hsotg_complete_setup; if (!list_empty(&hs_req->queue)) { dev_dbg(hsotg->dev, "%s already queued???\n", __func__); return; } hsotg->eps[0].dir_in = 0; ret = s3c_hsotg_ep_queue(&hsotg->eps[0].ep, req, GFP_ATOMIC); if (ret < 0) { dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret); /* Don't think there's much we can do other than watch the * driver fail. */ } } /** * get_ep_head - return the first request on the endpoint * @hs_ep: The controller endpoint to get * * Get the first request on the endpoint. */ static struct s3c_hsotg_req *get_ep_head(struct s3c_hsotg_ep *hs_ep) { if (list_empty(&hs_ep->queue)) return NULL; return list_first_entry(&hs_ep->queue, struct s3c_hsotg_req, queue); } /** * s3c_hsotg_complete_request - complete a request given to us * @hsotg: The device state. * @hs_ep: The endpoint the request was on. * @hs_req: The request to complete. * @result: The result code (0 => Ok, otherwise errno) * * The given request has finished, so call the necessary completion * if it has one and then look to see if we can start a new request * on the endpoint. * * Note, expects the ep to already be locked as appropriate. */ static void s3c_hsotg_complete_request(struct s3c_hsotg *hsotg, struct s3c_hsotg_ep *hs_ep, struct s3c_hsotg_req *hs_req, int result) { bool restart; if (!hs_req) { dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__); return; } dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n", hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete); /* only replace the status if we've not already set an error * from a previous transaction */ if (hs_req->req.status == -EINPROGRESS) hs_req->req.status = result; hs_ep->req = NULL; list_del_init(&hs_req->queue); if (using_dma(hsotg)) s3c_hsotg_unmap_dma(hsotg, hs_ep, hs_req); /* call the complete request with the locks off, just in case the * request tries to queue more work for this endpoint. */ if (hs_req->req.complete) { spin_unlock(&hs_ep->lock); hs_req->req.complete(&hs_ep->ep, &hs_req->req); spin_lock(&hs_ep->lock); } /* Look to see if there is anything else to do. Note, the completion * of the previous request may have caused a new request to be started * so be careful when doing this. */ if (!hs_ep->req && result >= 0) { restart = !list_empty(&hs_ep->queue); if (restart) { hs_req = get_ep_head(hs_ep); s3c_hsotg_start_req(hsotg, hs_ep, hs_req, false); } } } /** * s3c_hsotg_complete_request_lock - complete a request given to us (locked) * @hsotg: The device state. * @hs_ep: The endpoint the request was on. * @hs_req: The request to complete. * @result: The result code (0 => Ok, otherwise errno) * * See s3c_hsotg_complete_request(), but called with the endpoint's * lock held. */ static void s3c_hsotg_complete_request_lock(struct s3c_hsotg *hsotg, struct s3c_hsotg_ep *hs_ep, struct s3c_hsotg_req *hs_req, int result) { unsigned long flags; spin_lock_irqsave(&hs_ep->lock, flags); s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, result); spin_unlock_irqrestore(&hs_ep->lock, flags); } /** * s3c_hsotg_rx_data - receive data from the FIFO for an endpoint * @hsotg: The device state. * @ep_idx: The endpoint index for the data * @size: The size of data in the fifo, in bytes * * The FIFO status shows there is data to read from the FIFO for a given * endpoint, so sort out whether we need to read the data into a request * that has been made for that endpoint. */ static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size) { struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep_idx]; struct s3c_hsotg_req *hs_req = hs_ep->req; void __iomem *fifo = hsotg->regs + S3C_EPFIFO(ep_idx); int to_read; int max_req; int read_ptr; if (!hs_req) { u32 epctl = readl(hsotg->regs + S3C_DOEPCTL(ep_idx)); int ptr; dev_warn(hsotg->dev, "%s: FIFO %d bytes on ep%d but no req (DxEPCTl=0x%08x)\n", __func__, size, ep_idx, epctl); /* dump the data from the FIFO, we've nothing we can do */ for (ptr = 0; ptr < size; ptr += 4) (void)readl(fifo); return; } spin_lock(&hs_ep->lock); to_read = size; read_ptr = hs_req->req.actual; max_req = hs_req->req.length - read_ptr; if (to_read > max_req) { /* more data appeared than we where willing * to deal with in this request. */ /* currently we don't deal this */ WARN_ON_ONCE(1); } dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n", __func__, to_read, max_req, read_ptr, hs_req->req.length); hs_ep->total_data += to_read; hs_req->req.actual += to_read; to_read = DIV_ROUND_UP(to_read, 4); /* note, we might over-write the buffer end by 3 bytes depending on * alignment of the data. */ readsl(fifo, hs_req->req.buf + read_ptr, to_read); spin_unlock(&hs_ep->lock); } /** * s3c_hsotg_send_zlp - send zero-length packet on control endpoint * @hsotg: The device instance * @req: The request currently on this endpoint * * Generate a zero-length IN packet request for terminating a SETUP * transaction. * * Note, since we don't write any data to the TxFIFO, then it is * currently belived that we do not need to wait for any space in * the TxFIFO. */ static void s3c_hsotg_send_zlp(struct s3c_hsotg *hsotg, struct s3c_hsotg_req *req) { u32 ctrl; if (!req) { dev_warn(hsotg->dev, "%s: no request?\n", __func__); return; } if (req->req.length == 0) { hsotg->eps[0].sent_zlp = 1; s3c_hsotg_enqueue_setup(hsotg); return; } hsotg->eps[0].dir_in = 1; hsotg->eps[0].sent_zlp = 1; dev_dbg(hsotg->dev, "sending zero-length packet\n"); /* issue a zero-sized packet to terminate this */ writel(S3C_DxEPTSIZ_MC(1) | S3C_DxEPTSIZ_PktCnt(1) | S3C_DxEPTSIZ_XferSize(0), hsotg->regs + S3C_DIEPTSIZ(0)); ctrl = readl(hsotg->regs + S3C_DIEPCTL0); ctrl |= S3C_DxEPCTL_CNAK; /* clear NAK set by core */ ctrl |= S3C_DxEPCTL_EPEna; /* ensure ep enabled */ ctrl |= S3C_DxEPCTL_USBActEp; writel(ctrl, hsotg->regs + S3C_DIEPCTL0); } /** * s3c_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO * @hsotg: The device instance * @epnum: The endpoint received from * @was_setup: Set if processing a SetupDone event. * * The RXFIFO has delivered an OutDone event, which means that the data * transfer for an OUT endpoint has been completed, either by a short * packet or by the finish of a transfer. */ static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg, int epnum, bool was_setup) { struct s3c_hsotg_ep *hs_ep = &hsotg->eps[epnum]; struct s3c_hsotg_req *hs_req = hs_ep->req; struct usb_request *req = &hs_req->req; int result = 0; if (!hs_req) { dev_dbg(hsotg->dev, "%s: no request active\n", __func__); return; } if (using_dma(hsotg)) { u32 epsize = readl(hsotg->regs + S3C_DOEPTSIZ(epnum)); unsigned size_done; unsigned size_left; /* Calculate the size of the transfer by checking how much * is left in the endpoint size register and then working it * out from the amount we loaded for the transfer. * * We need to do this as DMA pointers are always 32bit aligned * so may overshoot/undershoot the transfer. */ size_left = S3C_DxEPTSIZ_XferSize_GET(epsize); size_done = hs_ep->size_loaded - size_left; size_done += hs_ep->last_load; req->actual = size_done; } if (req->actual < req->length && req->short_not_ok) { dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n", __func__, req->actual, req->length); /* todo - what should we return here? there's no one else * even bothering to check the status. */ } if (epnum == 0) { if (!was_setup && req->complete != s3c_hsotg_complete_setup) s3c_hsotg_send_zlp(hsotg, hs_req); } s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, result); } /** * s3c_hsotg_read_frameno - read current frame number * @hsotg: The device instance * * Return the current frame number */ static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg) { u32 dsts; dsts = readl(hsotg->regs + S3C_DSTS); dsts &= S3C_DSTS_SOFFN_MASK; dsts >>= S3C_DSTS_SOFFN_SHIFT; return dsts; } /** * s3c_hsotg_handle_rx - RX FIFO has data * @hsotg: The device instance * * The IRQ handler has detected that the RX FIFO has some data in it * that requires processing, so find out what is in there and do the * appropriate read. * * The RXFIFO is a true FIFO, the packets comming out are still in packet * chunks, so if you have x packets received on an endpoint you'll get x * FIFO events delivered, each with a packet's worth of data in it. * * When using DMA, we should not be processing events from the RXFIFO * as the actual data should be sent to the memory directly and we turn * on the completion interrupts to get notifications of transfer completion. */ static void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg) { u32 grxstsr = readl(hsotg->regs + S3C_GRXSTSP); u32 epnum, status, size; WARN_ON(using_dma(hsotg)); epnum = grxstsr & S3C_GRXSTS_EPNum_MASK; status = grxstsr & S3C_GRXSTS_PktSts_MASK; size = grxstsr & S3C_GRXSTS_ByteCnt_MASK; size >>= S3C_GRXSTS_ByteCnt_SHIFT; if (1) dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n", __func__, grxstsr, size, epnum); #define __status(x) ((x) >> S3C_GRXSTS_PktSts_SHIFT) switch (status >> S3C_GRXSTS_PktSts_SHIFT) { case __status(S3C_GRXSTS_PktSts_GlobalOutNAK): dev_dbg(hsotg->dev, "GlobalOutNAK\n"); break; case __status(S3C_GRXSTS_PktSts_OutDone): dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n", s3c_hsotg_read_frameno(hsotg)); if (!using_dma(hsotg)) s3c_hsotg_handle_outdone(hsotg, epnum, false); break; case __status(S3C_GRXSTS_PktSts_SetupDone): dev_dbg(hsotg->dev, "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n", s3c_hsotg_read_frameno(hsotg), readl(hsotg->regs + S3C_DOEPCTL(0))); s3c_hsotg_handle_outdone(hsotg, epnum, true); break; case __status(S3C_GRXSTS_PktSts_OutRX): s3c_hsotg_rx_data(hsotg, epnum, size); break; case __status(S3C_GRXSTS_PktSts_SetupRX): dev_dbg(hsotg->dev, "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n", s3c_hsotg_read_frameno(hsotg), readl(hsotg->regs + S3C_DOEPCTL(0))); s3c_hsotg_rx_data(hsotg, epnum, size); break; default: dev_warn(hsotg->dev, "%s: unknown status %08x\n", __func__, grxstsr); s3c_hsotg_dump(hsotg); break; } } /** * s3c_hsotg_ep0_mps - turn max packet size into register setting * @mps: The maximum packet size in bytes. */ static u32 s3c_hsotg_ep0_mps(unsigned int mps) { switch (mps) { case 64: return S3C_D0EPCTL_MPS_64; case 32: return S3C_D0EPCTL_MPS_32; case 16: return S3C_D0EPCTL_MPS_16; case 8: return S3C_D0EPCTL_MPS_8; } /* bad max packet size, warn and return invalid result */ WARN_ON(1); return (u32)-1; } /** * s3c_hsotg_set_ep_maxpacket - set endpoint's max-packet field * @hsotg: The driver state. * @ep: The index number of the endpoint * @mps: The maximum packet size in bytes * * Configure the maximum packet size for the given endpoint, updating * the hardware control registers to reflect this. */ static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg, unsigned int ep, unsigned int mps) { struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep]; void __iomem *regs = hsotg->regs; u32 mpsval; u32 reg; if (ep == 0) { /* EP0 is a special case */ mpsval = s3c_hsotg_ep0_mps(mps); if (mpsval > 3) goto bad_mps; } else { if (mps >= S3C_DxEPCTL_MPS_LIMIT+1) goto bad_mps; mpsval = mps; } hs_ep->ep.maxpacket = mps; /* update both the in and out endpoint controldir_ registers, even * if one of the directions may not be in use. */ reg = readl(regs + S3C_DIEPCTL(ep)); reg &= ~S3C_DxEPCTL_MPS_MASK; reg |= mpsval; writel(reg, regs + S3C_DIEPCTL(ep)); reg = readl(regs + S3C_DOEPCTL(ep)); reg &= ~S3C_DxEPCTL_MPS_MASK; reg |= mpsval; writel(reg, regs + S3C_DOEPCTL(ep)); return; bad_mps: dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps); } /** * s3c_hsotg_trytx - check to see if anything needs transmitting * @hsotg: The driver state * @hs_ep: The driver endpoint to check. * * Check to see if there is a request that has data to send, and if so * make an attempt to write data into the FIFO. */ static int s3c_hsotg_trytx(struct s3c_hsotg *hsotg, struct s3c_hsotg_ep *hs_ep) { struct s3c_hsotg_req *hs_req = hs_ep->req; if (!hs_ep->dir_in || !hs_req) return 0; if (hs_req->req.actual < hs_req->req.length) { dev_dbg(hsotg->dev, "trying to write more for ep%d\n", hs_ep->index); return s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req); } return 0; } /** * s3c_hsotg_complete_in - complete IN transfer * @hsotg: The device state. * @hs_ep: The endpoint that has just completed. * * An IN transfer has been completed, update the transfer's state and then * call the relevant completion routines. */ static void s3c_hsotg_complete_in(struct s3c_hsotg *hsotg, struct s3c_hsotg_ep *hs_ep) { struct s3c_hsotg_req *hs_req = hs_ep->req; u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index)); int size_left, size_done; if (!hs_req) { dev_dbg(hsotg->dev, "XferCompl but no req\n"); return; } /* Calculate the size of the transfer by checking how much is left * in the endpoint size register and then working it out from * the amount we loaded for the transfer. * * We do this even for DMA, as the transfer may have incremented * past the end of the buffer (DMA transfers are always 32bit * aligned). */ size_left = S3C_DxEPTSIZ_XferSize_GET(epsize); size_done = hs_ep->size_loaded - size_left; size_done += hs_ep->last_load; if (hs_req->req.actual != size_done) dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n", __func__, hs_req->req.actual, size_done); hs_req->req.actual = size_done; /* if we did all of the transfer, and there is more data left * around, then try restarting the rest of the request */ if (!size_left && hs_req->req.actual < hs_req->req.length) { dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__); s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true); } else s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, 0); } /** * s3c_hsotg_epint - handle an in/out endpoint interrupt * @hsotg: The driver state * @idx: The index for the endpoint (0..15) * @dir_in: Set if this is an IN endpoint * * Process and clear any interrupt pending for an individual endpoint */ static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx, int dir_in) { struct s3c_hsotg_ep *hs_ep = &hsotg->eps[idx]; u32 epint_reg = dir_in ? S3C_DIEPINT(idx) : S3C_DOEPINT(idx); u32 epctl_reg = dir_in ? S3C_DIEPCTL(idx) : S3C_DOEPCTL(idx); u32 epsiz_reg = dir_in ? S3C_DIEPTSIZ(idx) : S3C_DOEPTSIZ(idx); u32 ints; u32 clear = 0; ints = readl(hsotg->regs + epint_reg); dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n", __func__, idx, dir_in ? "in" : "out", ints); if (ints & S3C_DxEPINT_XferCompl) { dev_dbg(hsotg->dev, "%s: XferCompl: DxEPCTL=0x%08x, DxEPTSIZ=%08x\n", __func__, readl(hsotg->regs + epctl_reg), readl(hsotg->regs + epsiz_reg)); /* we get OutDone from the FIFO, so we only need to look * at completing IN requests here */ if (dir_in) { s3c_hsotg_complete_in(hsotg, hs_ep); if (idx == 0) s3c_hsotg_enqueue_setup(hsotg); } else if (using_dma(hsotg)) { /* We're using DMA, we need to fire an OutDone here * as we ignore the RXFIFO. */ s3c_hsotg_handle_outdone(hsotg, idx, false); } clear |= S3C_DxEPINT_XferCompl; } if (ints & S3C_DxEPINT_EPDisbld) { dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__); clear |= S3C_DxEPINT_EPDisbld; } if (ints & S3C_DxEPINT_AHBErr) { dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__); clear |= S3C_DxEPINT_AHBErr; } if (ints & S3C_DxEPINT_Setup) { /* Setup or Timeout */ dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__); if (using_dma(hsotg) && idx == 0) { /* this is the notification we've received a * setup packet. In non-DMA mode we'd get this * from the RXFIFO, instead we need to process * the setup here. */ if (dir_in) WARN_ON_ONCE(1); else s3c_hsotg_handle_outdone(hsotg, 0, true); } clear |= S3C_DxEPINT_Setup; } if (ints & S3C_DxEPINT_Back2BackSetup) { dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__); clear |= S3C_DxEPINT_Back2BackSetup; } if (dir_in) { /* not sure if this is important, but we'll clear it anyway */ if (ints & S3C_DIEPMSK_INTknTXFEmpMsk) { dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n", __func__, idx); clear |= S3C_DIEPMSK_INTknTXFEmpMsk; } /* this probably means something bad is happening */ if (ints & S3C_DIEPMSK_INTknEPMisMsk) { dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n", __func__, idx); clear |= S3C_DIEPMSK_INTknEPMisMsk; } } writel(clear, hsotg->regs + epint_reg); } /** * s3c_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done) * @hsotg: The device state. * * Handle updating the device settings after the enumeration phase has * been completed. */ static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg) { u32 dsts = readl(hsotg->regs + S3C_DSTS); int ep0_mps = 0, ep_mps; /* This should signal the finish of the enumeration phase * of the USB handshaking, so we should now know what rate * we connected at. */ dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts); /* note, since we're limited by the size of transfer on EP0, and * it seems IN transfers must be a even number of packets we do * not advertise a 64byte MPS on EP0. */ /* catch both EnumSpd_FS and EnumSpd_FS48 */ switch (dsts & S3C_DSTS_EnumSpd_MASK) { case S3C_DSTS_EnumSpd_FS: case S3C_DSTS_EnumSpd_FS48: hsotg->gadget.speed = USB_SPEED_FULL; dev_info(hsotg->dev, "new device is full-speed\n"); ep0_mps = EP0_MPS_LIMIT; ep_mps = 64; break; case S3C_DSTS_EnumSpd_HS: dev_info(hsotg->dev, "new device is high-speed\n"); hsotg->gadget.speed = USB_SPEED_HIGH; ep0_mps = EP0_MPS_LIMIT; ep_mps = 512; break; case S3C_DSTS_EnumSpd_LS: hsotg->gadget.speed = USB_SPEED_LOW; dev_info(hsotg->dev, "new device is low-speed\n"); /* note, we don't actually support LS in this driver at the * moment, and the documentation seems to imply that it isn't * supported by the PHYs on some of the devices. */ break; } /* we should now know the maximum packet size for an * endpoint, so set the endpoints to a default value. */ if (ep0_mps) { int i; s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps); for (i = 1; i < S3C_HSOTG_EPS; i++) s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps); } /* ensure after enumeration our EP0 is active */ s3c_hsotg_enqueue_setup(hsotg); dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", readl(hsotg->regs + S3C_DIEPCTL0), readl(hsotg->regs + S3C_DOEPCTL0)); } /** * kill_all_requests - remove all requests from the endpoint's queue * @hsotg: The device state. * @ep: The endpoint the requests may be on. * @result: The result code to use. * @force: Force removal of any current requests * * Go through the requests on the given endpoint and mark them * completed with the given result code. */ static void kill_all_requests(struct s3c_hsotg *hsotg, struct s3c_hsotg_ep *ep, int result, bool force) { struct s3c_hsotg_req *req, *treq; unsigned long flags; spin_lock_irqsave(&ep->lock, flags); list_for_each_entry_safe(req, treq, &ep->queue, queue) { /* currently, we can't do much about an already * running request on an in endpoint */ if (ep->req == req && ep->dir_in && !force) continue; s3c_hsotg_complete_request(hsotg, ep, req, result); } spin_unlock_irqrestore(&ep->lock, flags); } #define call_gadget(_hs, _entry) \ if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN && \ (_hs)->driver && (_hs)->driver->_entry) \ (_hs)->driver->_entry(&(_hs)->gadget); /** * s3c_hsotg_disconnect_irq - disconnect irq service * @hsotg: The device state. * * A disconnect IRQ has been received, meaning that the host has * lost contact with the bus. Remove all current transactions * and signal the gadget driver that this has happened. */ static void s3c_hsotg_disconnect_irq(struct s3c_hsotg *hsotg) { unsigned ep; for (ep = 0; ep < S3C_HSOTG_EPS; ep++) kill_all_requests(hsotg, &hsotg->eps[ep], -ESHUTDOWN, true); call_gadget(hsotg, disconnect); } /** * s3c_hsotg_irq_fifoempty - TX FIFO empty interrupt handler * @hsotg: The device state: * @periodic: True if this is a periodic FIFO interrupt */ static void s3c_hsotg_irq_fifoempty(struct s3c_hsotg *hsotg, bool periodic) { struct s3c_hsotg_ep *ep; int epno, ret; /* look through for any more data to transmit */ for (epno = 0; epno < S3C_HSOTG_EPS; epno++) { ep = &hsotg->eps[epno]; if (!ep->dir_in) continue; if ((periodic && !ep->periodic) || (!periodic && ep->periodic)) continue; ret = s3c_hsotg_trytx(hsotg, ep); if (ret < 0) break; } } static struct s3c_hsotg *our_hsotg; /* IRQ flags which will trigger a retry around the IRQ loop */ #define IRQ_RETRY_MASK (S3C_GINTSTS_NPTxFEmp | \ S3C_GINTSTS_PTxFEmp | \ S3C_GINTSTS_RxFLvl) /** * s3c_hsotg_irq - handle device interrupt * @irq: The IRQ number triggered * @pw: The pw value when registered the handler. */ static irqreturn_t s3c_hsotg_irq(int irq, void *pw) { struct s3c_hsotg *hsotg = pw; int retry_count = 8; u32 gintsts; u32 gintmsk; irq_retry: gintsts = readl(hsotg->regs + S3C_GINTSTS); gintmsk = readl(hsotg->regs + S3C_GINTMSK); dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n", __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count); gintsts &= gintmsk; if (gintsts & S3C_GINTSTS_OTGInt) { u32 otgint = readl(hsotg->regs + S3C_GOTGINT); dev_info(hsotg->dev, "OTGInt: %08x\n", otgint); writel(otgint, hsotg->regs + S3C_GOTGINT); writel(S3C_GINTSTS_OTGInt, hsotg->regs + S3C_GINTSTS); } if (gintsts & S3C_GINTSTS_DisconnInt) { dev_dbg(hsotg->dev, "%s: DisconnInt\n", __func__); writel(S3C_GINTSTS_DisconnInt, hsotg->regs + S3C_GINTSTS); s3c_hsotg_disconnect_irq(hsotg); } if (gintsts & S3C_GINTSTS_SessReqInt) { dev_dbg(hsotg->dev, "%s: SessReqInt\n", __func__); writel(S3C_GINTSTS_SessReqInt, hsotg->regs + S3C_GINTSTS); } if (gintsts & S3C_GINTSTS_EnumDone) { s3c_hsotg_irq_enumdone(hsotg); writel(S3C_GINTSTS_EnumDone, hsotg->regs + S3C_GINTSTS); } if (gintsts & S3C_GINTSTS_ConIDStsChng) { dev_dbg(hsotg->dev, "ConIDStsChg (DSTS=0x%08x, GOTCTL=%08x)\n", readl(hsotg->regs + S3C_DSTS), readl(hsotg->regs + S3C_GOTGCTL)); writel(S3C_GINTSTS_ConIDStsChng, hsotg->regs + S3C_GINTSTS); } if (gintsts & (S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt)) { u32 daint = readl(hsotg->regs + S3C_DAINT); u32 daint_out = daint >> S3C_DAINT_OutEP_SHIFT; u32 daint_in = daint & ~(daint_out << S3C_DAINT_OutEP_SHIFT); int ep; dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint); for (ep = 0; ep < 15 && daint_out; ep++, daint_out >>= 1) { if (daint_out & 1) s3c_hsotg_epint(hsotg, ep, 0); } for (ep = 0; ep < 15 && daint_in; ep++, daint_in >>= 1) { if (daint_in & 1) s3c_hsotg_epint(hsotg, ep, 1); } writel(daint, hsotg->regs + S3C_DAINT); writel(gintsts & (S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt), hsotg->regs + S3C_GINTSTS); } if (gintsts & S3C_GINTSTS_USBRst) { dev_info(hsotg->dev, "%s: USBRst\n", __func__); dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n", readl(hsotg->regs + S3C_GNPTXSTS)); kill_all_requests(hsotg, &hsotg->eps[0], -ECONNRESET, true); /* it seems after a reset we can end up with a situation * where the TXFIFO still has data in it... try flushing * it to remove anything that may still be in it. */ if (1) { writel(S3C_GRSTCTL_TxFNum(0) | S3C_GRSTCTL_TxFFlsh, hsotg->regs + S3C_GRSTCTL); dev_info(hsotg->dev, "GNPTXSTS=%08x\n", readl(hsotg->regs + S3C_GNPTXSTS)); } s3c_hsotg_enqueue_setup(hsotg); writel(S3C_GINTSTS_USBRst, hsotg->regs + S3C_GINTSTS); } /* check both FIFOs */ if (gintsts & S3C_GINTSTS_NPTxFEmp) { dev_dbg(hsotg->dev, "NPTxFEmp\n"); /* Disable the interrupt to stop it happening again * unless one of these endpoint routines decides that * it needs re-enabling */ s3c_hsotg_disable_gsint(hsotg, S3C_GINTSTS_NPTxFEmp); s3c_hsotg_irq_fifoempty(hsotg, false); writel(S3C_GINTSTS_NPTxFEmp, hsotg->regs + S3C_GINTSTS); } if (gintsts & S3C_GINTSTS_PTxFEmp) { dev_dbg(hsotg->dev, "PTxFEmp\n"); /* See note in S3C_GINTSTS_NPTxFEmp */ s3c_hsotg_disable_gsint(hsotg, S3C_GINTSTS_PTxFEmp); s3c_hsotg_irq_fifoempty(hsotg, true); writel(S3C_GINTSTS_PTxFEmp, hsotg->regs + S3C_GINTSTS); } if (gintsts & S3C_GINTSTS_RxFLvl) { /* note, since GINTSTS_RxFLvl doubles as FIFO-not-empty, * we need to retry s3c_hsotg_handle_rx if this is still * set. */ s3c_hsotg_handle_rx(hsotg); writel(S3C_GINTSTS_RxFLvl, hsotg->regs + S3C_GINTSTS); } if (gintsts & S3C_GINTSTS_ModeMis) { dev_warn(hsotg->dev, "warning, mode mismatch triggered\n"); writel(S3C_GINTSTS_ModeMis, hsotg->regs + S3C_GINTSTS); } if (gintsts & S3C_GINTSTS_USBSusp) { dev_info(hsotg->dev, "S3C_GINTSTS_USBSusp\n"); writel(S3C_GINTSTS_USBSusp, hsotg->regs + S3C_GINTSTS); call_gadget(hsotg, suspend); } if (gintsts & S3C_GINTSTS_WkUpInt) { dev_info(hsotg->dev, "S3C_GINTSTS_WkUpIn\n"); writel(S3C_GINTSTS_WkUpInt, hsotg->regs + S3C_GINTSTS); call_gadget(hsotg, resume); } if (gintsts & S3C_GINTSTS_ErlySusp) { dev_dbg(hsotg->dev, "S3C_GINTSTS_ErlySusp\n"); writel(S3C_GINTSTS_ErlySusp, hsotg->regs + S3C_GINTSTS); } /* these next two seem to crop-up occasionally causing the core * to shutdown the USB transfer, so try clearing them and logging * the occurence. */ if (gintsts & S3C_GINTSTS_GOUTNakEff) { dev_info(hsotg->dev, "GOUTNakEff triggered\n"); s3c_hsotg_dump(hsotg); writel(S3C_DCTL_CGOUTNak, hsotg->regs + S3C_DCTL); writel(S3C_GINTSTS_GOUTNakEff, hsotg->regs + S3C_GINTSTS); } if (gintsts & S3C_GINTSTS_GINNakEff) { dev_info(hsotg->dev, "GINNakEff triggered\n"); s3c_hsotg_dump(hsotg); writel(S3C_DCTL_CGNPInNAK, hsotg->regs + S3C_DCTL); writel(S3C_GINTSTS_GINNakEff, hsotg->regs + S3C_GINTSTS); } /* if we've had fifo events, we should try and go around the * loop again to see if there's any point in returning yet. */ if (gintsts & IRQ_RETRY_MASK && --retry_count > 0) goto irq_retry; return IRQ_HANDLED; } /** * s3c_hsotg_ep_enable - enable the given endpoint * @ep: The USB endpint to configure * @desc: The USB endpoint descriptor to configure with. * * This is called from the USB gadget code's usb_ep_enable(). */ static int s3c_hsotg_ep_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) { struct s3c_hsotg_ep *hs_ep = our_ep(ep); struct s3c_hsotg *hsotg = hs_ep->parent; unsigned long flags; int index = hs_ep->index; u32 epctrl_reg; u32 epctrl; u32 mps; int dir_in; int ret = 0; dev_dbg(hsotg->dev, "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n", __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes, desc->wMaxPacketSize, desc->bInterval); /* not to be called for EP0 */ WARN_ON(index == 0); dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0; if (dir_in != hs_ep->dir_in) { dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__); return -EINVAL; } mps = le16_to_cpu(desc->wMaxPacketSize); /* note, we handle this here instead of s3c_hsotg_set_ep_maxpacket */ epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index); epctrl = readl(hsotg->regs + epctrl_reg); dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n", __func__, epctrl, epctrl_reg); spin_lock_irqsave(&hs_ep->lock, flags); epctrl &= ~(S3C_DxEPCTL_EPType_MASK | S3C_DxEPCTL_MPS_MASK); epctrl |= S3C_DxEPCTL_MPS(mps); /* mark the endpoint as active, otherwise the core may ignore * transactions entirely for this endpoint */ epctrl |= S3C_DxEPCTL_USBActEp; /* set the NAK status on the endpoint, otherwise we might try and * do something with data that we've yet got a request to process * since the RXFIFO will take data for an endpoint even if the * size register hasn't been set. */ epctrl |= S3C_DxEPCTL_SNAK; /* update the endpoint state */ hs_ep->ep.maxpacket = mps; /* default, set to non-periodic */ hs_ep->periodic = 0; switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { case USB_ENDPOINT_XFER_ISOC: dev_err(hsotg->dev, "no current ISOC support\n"); ret = -EINVAL; goto out; case USB_ENDPOINT_XFER_BULK: epctrl |= S3C_DxEPCTL_EPType_Bulk; break; case USB_ENDPOINT_XFER_INT: if (dir_in) { /* Allocate our TxFNum by simply using the index * of the endpoint for the moment. We could do * something better if the host indicates how * many FIFOs we are expecting to use. */ hs_ep->periodic = 1; epctrl |= S3C_DxEPCTL_TxFNum(index); } epctrl |= S3C_DxEPCTL_EPType_Intterupt; break; case USB_ENDPOINT_XFER_CONTROL: epctrl |= S3C_DxEPCTL_EPType_Control; break; } /* for non control endpoints, set PID to D0 */ if (index) epctrl |= S3C_DxEPCTL_SetD0PID; dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n", __func__, epctrl); writel(epctrl, hsotg->regs + epctrl_reg); dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n", __func__, readl(hsotg->regs + epctrl_reg)); /* enable the endpoint interrupt */ s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1); out: spin_unlock_irqrestore(&hs_ep->lock, flags); return ret; } static int s3c_hsotg_ep_disable(struct usb_ep *ep) { struct s3c_hsotg_ep *hs_ep = our_ep(ep); struct s3c_hsotg *hsotg = hs_ep->parent; int dir_in = hs_ep->dir_in; int index = hs_ep->index; unsigned long flags; u32 epctrl_reg; u32 ctrl; dev_info(hsotg->dev, "%s(ep %p)\n", __func__, ep); if (ep == &hsotg->eps[0].ep) { dev_err(hsotg->dev, "%s: called for ep0\n", __func__); return -EINVAL; } epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index); /* terminate all requests with shutdown */ kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false); spin_lock_irqsave(&hs_ep->lock, flags); ctrl = readl(hsotg->regs + epctrl_reg); ctrl &= ~S3C_DxEPCTL_EPEna; ctrl &= ~S3C_DxEPCTL_USBActEp; ctrl |= S3C_DxEPCTL_SNAK; dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl); writel(ctrl, hsotg->regs + epctrl_reg); /* disable endpoint interrupts */ s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0); spin_unlock_irqrestore(&hs_ep->lock, flags); return 0; } /** * on_list - check request is on the given endpoint * @ep: The endpoint to check. * @test: The request to test if it is on the endpoint. */ static bool on_list(struct s3c_hsotg_ep *ep, struct s3c_hsotg_req *test) { struct s3c_hsotg_req *req, *treq; list_for_each_entry_safe(req, treq, &ep->queue, queue) { if (req == test) return true; } return false; } static int s3c_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) { struct s3c_hsotg_req *hs_req = our_req(req); struct s3c_hsotg_ep *hs_ep = our_ep(ep); struct s3c_hsotg *hs = hs_ep->parent; unsigned long flags; dev_info(hs->dev, "ep_dequeue(%p,%p)\n", ep, req); if (hs_req == hs_ep->req) { dev_dbg(hs->dev, "%s: already in progress\n", __func__); return -EINPROGRESS; } spin_lock_irqsave(&hs_ep->lock, flags); if (!on_list(hs_ep, hs_req)) { spin_unlock_irqrestore(&hs_ep->lock, flags); return -EINVAL; } s3c_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET); spin_unlock_irqrestore(&hs_ep->lock, flags); return 0; } static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value) { struct s3c_hsotg_ep *hs_ep = our_ep(ep); struct s3c_hsotg *hs = hs_ep->parent; int index = hs_ep->index; unsigned long irqflags; u32 epreg; u32 epctl; dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value); spin_lock_irqsave(&hs_ep->lock, irqflags); /* write both IN and OUT control registers */ epreg = S3C_DIEPCTL(index); epctl = readl(hs->regs + epreg); if (value) epctl |= S3C_DxEPCTL_Stall; else epctl &= ~S3C_DxEPCTL_Stall; writel(epctl, hs->regs + epreg); epreg = S3C_DOEPCTL(index); epctl = readl(hs->regs + epreg); if (value) epctl |= S3C_DxEPCTL_Stall; else epctl &= ~S3C_DxEPCTL_Stall; writel(epctl, hs->regs + epreg); spin_unlock_irqrestore(&hs_ep->lock, irqflags); return 0; } static struct usb_ep_ops s3c_hsotg_ep_ops = { .enable = s3c_hsotg_ep_enable, .disable = s3c_hsotg_ep_disable, .alloc_request = s3c_hsotg_ep_alloc_request, .free_request = s3c_hsotg_ep_free_request, .queue = s3c_hsotg_ep_queue, .dequeue = s3c_hsotg_ep_dequeue, .set_halt = s3c_hsotg_ep_sethalt, /* note, don't belive we have any call for the fifo routines */ }; /** * s3c_hsotg_corereset - issue softreset to the core * @hsotg: The device state * * Issue a soft reset to the core, and await the core finishing it. */ static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg) { int timeout; u32 grstctl; dev_dbg(hsotg->dev, "resetting core\n"); /* issue soft reset */ writel(S3C_GRSTCTL_CSftRst, hsotg->regs + S3C_GRSTCTL); timeout = 1000; do { grstctl = readl(hsotg->regs + S3C_GRSTCTL); } while (!(grstctl & S3C_GRSTCTL_CSftRst) && timeout-- > 0); if (!(grstctl & S3C_GRSTCTL_CSftRst)) { dev_err(hsotg->dev, "Failed to get CSftRst asserted\n"); return -EINVAL; } timeout = 1000; while (1) { u32 grstctl = readl(hsotg->regs + S3C_GRSTCTL); if (timeout-- < 0) { dev_info(hsotg->dev, "%s: reset failed, GRSTCTL=%08x\n", __func__, grstctl); return -ETIMEDOUT; } if (grstctl & S3C_GRSTCTL_CSftRst) continue; if (!(grstctl & S3C_GRSTCTL_AHBIdle)) continue; break; /* reset done */ } dev_dbg(hsotg->dev, "reset successful\n"); return 0; } int usb_gadget_register_driver(struct usb_gadget_driver *driver) { struct s3c_hsotg *hsotg = our_hsotg; int ret; if (!hsotg) { printk(KERN_ERR "%s: called with no device\n", __func__); return -ENODEV; } if (!driver) { dev_err(hsotg->dev, "%s: no driver\n", __func__); return -EINVAL; } if (driver->speed != USB_SPEED_HIGH && driver->speed != USB_SPEED_FULL) { dev_err(hsotg->dev, "%s: bad speed\n", __func__); } if (!driver->bind || !driver->setup) { dev_err(hsotg->dev, "%s: missing entry points\n", __func__); return -EINVAL; } WARN_ON(hsotg->driver); driver->driver.bus = NULL; hsotg->driver = driver; hsotg->gadget.dev.driver = &driver->driver; hsotg->gadget.dev.dma_mask = hsotg->dev->dma_mask; hsotg->gadget.speed = USB_SPEED_UNKNOWN; ret = device_add(&hsotg->gadget.dev); if (ret) { dev_err(hsotg->dev, "failed to register gadget device\n"); goto err; } ret = driver->bind(&hsotg->gadget); if (ret) { dev_err(hsotg->dev, "failed bind %s\n", driver->driver.name); hsotg->gadget.dev.driver = NULL; hsotg->driver = NULL; goto err; } /* we must now enable ep0 ready for host detection and then * set configuration. */ s3c_hsotg_corereset(hsotg); /* set the PLL on, remove the HNP/SRP and set the PHY */ writel(S3C_GUSBCFG_PHYIf16 | S3C_GUSBCFG_TOutCal(7) | (0x5 << 10), hsotg->regs + S3C_GUSBCFG); /* looks like soft-reset changes state of FIFOs */ s3c_hsotg_init_fifo(hsotg); __orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon); writel(1 << 18 | S3C_DCFG_DevSpd_HS, hsotg->regs + S3C_DCFG); writel(S3C_GINTSTS_DisconnInt | S3C_GINTSTS_SessReqInt | S3C_GINTSTS_ConIDStsChng | S3C_GINTSTS_USBRst | S3C_GINTSTS_EnumDone | S3C_GINTSTS_OTGInt | S3C_GINTSTS_USBSusp | S3C_GINTSTS_WkUpInt | S3C_GINTSTS_GOUTNakEff | S3C_GINTSTS_GINNakEff | S3C_GINTSTS_ErlySusp, hsotg->regs + S3C_GINTMSK); if (using_dma(hsotg)) writel(S3C_GAHBCFG_GlblIntrEn | S3C_GAHBCFG_DMAEn | S3C_GAHBCFG_HBstLen_Incr4, hsotg->regs + S3C_GAHBCFG); else writel(S3C_GAHBCFG_GlblIntrEn, hsotg->regs + S3C_GAHBCFG); /* Enabling INTknTXFEmpMsk here seems to be a big mistake, we end * up being flooded with interrupts if the host is polling the * endpoint to try and read data. */ writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk | S3C_DIEPMSK_INTknEPMisMsk | S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk, hsotg->regs + S3C_DIEPMSK); /* don't need XferCompl, we get that from RXFIFO in slave mode. In * DMA mode we may need this. */ writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk | S3C_DOEPMSK_EPDisbldMsk | (using_dma(hsotg) ? (S3C_DIEPMSK_XferComplMsk | S3C_DIEPMSK_TimeOUTMsk) : 0), hsotg->regs + S3C_DOEPMSK); writel(0, hsotg->regs + S3C_DAINTMSK); dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", readl(hsotg->regs + S3C_DIEPCTL0), readl(hsotg->regs + S3C_DOEPCTL0)); /* enable in and out endpoint interrupts */ s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt); /* Enable the RXFIFO when in slave mode, as this is how we collect * the data. In DMA mode, we get events from the FIFO but also * things we cannot process, so do not use it. */ if (!using_dma(hsotg)) s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_RxFLvl); /* Enable interrupts for EP0 in and out */ s3c_hsotg_ctrl_epint(hsotg, 0, 0, 1); s3c_hsotg_ctrl_epint(hsotg, 0, 1, 1); __orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone); udelay(10); /* see openiboot */ __bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone); dev_info(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + S3C_DCTL)); /* S3C_DxEPCTL_USBActEp says RO in manual, but seems to be set by writing to the EPCTL register.. */ /* set to read 1 8byte packet */ writel(S3C_DxEPTSIZ_MC(1) | S3C_DxEPTSIZ_PktCnt(1) | S3C_DxEPTSIZ_XferSize(8), hsotg->regs + DOEPTSIZ0); writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) | S3C_DxEPCTL_CNAK | S3C_DxEPCTL_EPEna | S3C_DxEPCTL_USBActEp, hsotg->regs + S3C_DOEPCTL0); /* enable, but don't activate EP0in */ writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) | S3C_DxEPCTL_USBActEp, hsotg->regs + S3C_DIEPCTL0); s3c_hsotg_enqueue_setup(hsotg); dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", readl(hsotg->regs + S3C_DIEPCTL0), readl(hsotg->regs + S3C_DOEPCTL0)); /* clear global NAKs */ writel(S3C_DCTL_CGOUTNak | S3C_DCTL_CGNPInNAK, hsotg->regs + S3C_DCTL); /* must be at-least 3ms to allow bus to see disconnect */ msleep(3); /* remove the soft-disconnect and let's go */ __bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon); /* report to the user, and return */ dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name); return 0; err: hsotg->driver = NULL; hsotg->gadget.dev.driver = NULL; return ret; } EXPORT_SYMBOL(usb_gadget_register_driver); int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) { struct s3c_hsotg *hsotg = our_hsotg; int ep; if (!hsotg) return -ENODEV; if (!driver || driver != hsotg->driver || !driver->unbind) return -EINVAL; /* all endpoints should be shutdown */ for (ep = 0; ep < S3C_HSOTG_EPS; ep++) s3c_hsotg_ep_disable(&hsotg->eps[ep].ep); call_gadget(hsotg, disconnect); driver->unbind(&hsotg->gadget); hsotg->driver = NULL; hsotg->gadget.speed = USB_SPEED_UNKNOWN; device_del(&hsotg->gadget.dev); dev_info(hsotg->dev, "unregistered gadget driver '%s'\n", driver->driver.name); return 0; } EXPORT_SYMBOL(usb_gadget_unregister_driver); static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget) { return s3c_hsotg_read_frameno(to_hsotg(gadget)); } static struct usb_gadget_ops s3c_hsotg_gadget_ops = { .get_frame = s3c_hsotg_gadget_getframe, }; /** * s3c_hsotg_initep - initialise a single endpoint * @hsotg: The device state. * @hs_ep: The endpoint to be initialised. * @epnum: The endpoint number * * Initialise the given endpoint (as part of the probe and device state * creation) to give to the gadget driver. Setup the endpoint name, any * direction information and other state that may be required. */ static void __devinit s3c_hsotg_initep(struct s3c_hsotg *hsotg, struct s3c_hsotg_ep *hs_ep, int epnum) { u32 ptxfifo; char *dir; if (epnum == 0) dir = ""; else if ((epnum % 2) == 0) { dir = "out"; } else { dir = "in"; hs_ep->dir_in = 1; } hs_ep->index = epnum; snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir); INIT_LIST_HEAD(&hs_ep->queue); INIT_LIST_HEAD(&hs_ep->ep.ep_list); spin_lock_init(&hs_ep->lock); /* add to the list of endpoints known by the gadget driver */ if (epnum) list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list); hs_ep->parent = hsotg; hs_ep->ep.name = hs_ep->name; hs_ep->ep.maxpacket = epnum ? 512 : EP0_MPS_LIMIT; hs_ep->ep.ops = &s3c_hsotg_ep_ops; /* Read the FIFO size for the Periodic TX FIFO, even if we're * an OUT endpoint, we may as well do this if in future the * code is changed to make each endpoint's direction changeable. */ ptxfifo = readl(hsotg->regs + S3C_DPTXFSIZn(epnum)); hs_ep->fifo_size = S3C_DPTXFSIZn_DPTxFSize_GET(ptxfifo); /* if we're using dma, we need to set the next-endpoint pointer * to be something valid. */ if (using_dma(hsotg)) { u32 next = S3C_DxEPCTL_NextEp((epnum + 1) % 15); writel(next, hsotg->regs + S3C_DIEPCTL(epnum)); writel(next, hsotg->regs + S3C_DOEPCTL(epnum)); } } /** * s3c_hsotg_otgreset - reset the OtG phy block * @hsotg: The host state. * * Power up the phy, set the basic configuration and start the PHY. */ static void s3c_hsotg_otgreset(struct s3c_hsotg *hsotg) { u32 osc; writel(0, S3C_PHYPWR); mdelay(1); osc = hsotg->plat->is_osc ? S3C_PHYCLK_EXT_OSC : 0; writel(osc | 0x10, S3C_PHYCLK); /* issue a full set of resets to the otg and core */ writel(S3C_RSTCON_PHY, S3C_RSTCON); udelay(20); /* at-least 10uS */ writel(0, S3C_RSTCON); } static void s3c_hsotg_init(struct s3c_hsotg *hsotg) { /* unmask subset of endpoint interrupts */ writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk | S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk, hsotg->regs + S3C_DIEPMSK); writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk | S3C_DOEPMSK_EPDisbldMsk | S3C_DOEPMSK_XferComplMsk, hsotg->regs + S3C_DOEPMSK); writel(0, hsotg->regs + S3C_DAINTMSK); /* Be in disconnected state until gadget is registered */ __orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon); if (0) { /* post global nak until we're ready */ writel(S3C_DCTL_SGNPInNAK | S3C_DCTL_SGOUTNak, hsotg->regs + S3C_DCTL); } /* setup fifos */ dev_info(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n", readl(hsotg->regs + S3C_GRXFSIZ), readl(hsotg->regs + S3C_GNPTXFSIZ)); s3c_hsotg_init_fifo(hsotg); /* set the PLL on, remove the HNP/SRP and set the PHY */ writel(S3C_GUSBCFG_PHYIf16 | S3C_GUSBCFG_TOutCal(7) | (0x5 << 10), hsotg->regs + S3C_GUSBCFG); writel(using_dma(hsotg) ? S3C_GAHBCFG_DMAEn : 0x0, hsotg->regs + S3C_GAHBCFG); } static void s3c_hsotg_dump(struct s3c_hsotg *hsotg) { struct device *dev = hsotg->dev; void __iomem *regs = hsotg->regs; u32 val; int idx; dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n", readl(regs + S3C_DCFG), readl(regs + S3C_DCTL), readl(regs + S3C_DIEPMSK)); dev_info(dev, "GAHBCFG=0x%08x, 0x44=0x%08x\n", readl(regs + S3C_GAHBCFG), readl(regs + 0x44)); dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n", readl(regs + S3C_GRXFSIZ), readl(regs + S3C_GNPTXFSIZ)); /* show periodic fifo settings */ for (idx = 1; idx <= 15; idx++) { val = readl(regs + S3C_DPTXFSIZn(idx)); dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx, val >> S3C_DPTXFSIZn_DPTxFSize_SHIFT, val & S3C_DPTXFSIZn_DPTxFStAddr_MASK); } for (idx = 0; idx < 15; idx++) { dev_info(dev, "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx, readl(regs + S3C_DIEPCTL(idx)), readl(regs + S3C_DIEPTSIZ(idx)), readl(regs + S3C_DIEPDMA(idx))); val = readl(regs + S3C_DOEPCTL(idx)); dev_info(dev, "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx, readl(regs + S3C_DOEPCTL(idx)), readl(regs + S3C_DOEPTSIZ(idx)), readl(regs + S3C_DOEPDMA(idx))); } dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n", readl(regs + S3C_DVBUSDIS), readl(regs + S3C_DVBUSPULSE)); } /** * state_show - debugfs: show overall driver and device state. * @seq: The seq file to write to. * @v: Unused parameter. * * This debugfs entry shows the overall state of the hardware and * some general information about each of the endpoints available * to the system. */ static int state_show(struct seq_file *seq, void *v) { struct s3c_hsotg *hsotg = seq->private; void __iomem *regs = hsotg->regs; int idx; seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n", readl(regs + S3C_DCFG), readl(regs + S3C_DCTL), readl(regs + S3C_DSTS)); seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n", readl(regs + S3C_DIEPMSK), readl(regs + S3C_DOEPMSK)); seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n", readl(regs + S3C_GINTMSK), readl(regs + S3C_GINTSTS)); seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n", readl(regs + S3C_DAINTMSK), readl(regs + S3C_DAINT)); seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n", readl(regs + S3C_GNPTXSTS), readl(regs + S3C_GRXSTSR)); seq_printf(seq, "\nEndpoint status:\n"); for (idx = 0; idx < 15; idx++) { u32 in, out; in = readl(regs + S3C_DIEPCTL(idx)); out = readl(regs + S3C_DOEPCTL(idx)); seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x", idx, in, out); in = readl(regs + S3C_DIEPTSIZ(idx)); out = readl(regs + S3C_DOEPTSIZ(idx)); seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x", in, out); seq_printf(seq, "\n"); } return 0; } static int state_open(struct inode *inode, struct file *file) { return single_open(file, state_show, inode->i_private); } static const struct file_operations state_fops = { .owner = THIS_MODULE, .open = state_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /** * fifo_show - debugfs: show the fifo information * @seq: The seq_file to write data to. * @v: Unused parameter. * * Show the FIFO information for the overall fifo and all the * periodic transmission FIFOs. */ static int fifo_show(struct seq_file *seq, void *v) { struct s3c_hsotg *hsotg = seq->private; void __iomem *regs = hsotg->regs; u32 val; int idx; seq_printf(seq, "Non-periodic FIFOs:\n"); seq_printf(seq, "RXFIFO: Size %d\n", readl(regs + S3C_GRXFSIZ)); val = readl(regs + S3C_GNPTXFSIZ); seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n", val >> S3C_GNPTXFSIZ_NPTxFDep_SHIFT, val & S3C_GNPTXFSIZ_NPTxFStAddr_MASK); seq_printf(seq, "\nPeriodic TXFIFOs:\n"); for (idx = 1; idx <= 15; idx++) { val = readl(regs + S3C_DPTXFSIZn(idx)); seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx, val >> S3C_DPTXFSIZn_DPTxFSize_SHIFT, val & S3C_DPTXFSIZn_DPTxFStAddr_MASK); } return 0; } static int fifo_open(struct inode *inode, struct file *file) { return single_open(file, fifo_show, inode->i_private); } static const struct file_operations fifo_fops = { .owner = THIS_MODULE, .open = fifo_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const char *decode_direction(int is_in) { return is_in ? "in" : "out"; } /** * ep_show - debugfs: show the state of an endpoint. * @seq: The seq_file to write data to. * @v: Unused parameter. * * This debugfs entry shows the state of the given endpoint (one is * registered for each available). */ static int ep_show(struct seq_file *seq, void *v) { struct s3c_hsotg_ep *ep = seq->private; struct s3c_hsotg *hsotg = ep->parent; struct s3c_hsotg_req *req; void __iomem *regs = hsotg->regs; int index = ep->index; int show_limit = 15; unsigned long flags; seq_printf(seq, "Endpoint index %d, named %s, dir %s:\n", ep->index, ep->ep.name, decode_direction(ep->dir_in)); /* first show the register state */ seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n", readl(regs + S3C_DIEPCTL(index)), readl(regs + S3C_DOEPCTL(index))); seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n", readl(regs + S3C_DIEPDMA(index)), readl(regs + S3C_DOEPDMA(index))); seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n", readl(regs + S3C_DIEPINT(index)), readl(regs + S3C_DOEPINT(index))); seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n", readl(regs + S3C_DIEPTSIZ(index)), readl(regs + S3C_DOEPTSIZ(index))); seq_printf(seq, "\n"); seq_printf(seq, "mps %d\n", ep->ep.maxpacket); seq_printf(seq, "total_data=%ld\n", ep->total_data); seq_printf(seq, "request list (%p,%p):\n", ep->queue.next, ep->queue.prev); spin_lock_irqsave(&ep->lock, flags); list_for_each_entry(req, &ep->queue, queue) { if (--show_limit < 0) { seq_printf(seq, "not showing more requests...\n"); break; } seq_printf(seq, "%c req %p: %d bytes @%p, ", req == ep->req ? '*' : ' ', req, req->req.length, req->req.buf); seq_printf(seq, "%d done, res %d\n", req->req.actual, req->req.status); } spin_unlock_irqrestore(&ep->lock, flags); return 0; } static int ep_open(struct inode *inode, struct file *file) { return single_open(file, ep_show, inode->i_private); } static const struct file_operations ep_fops = { .owner = THIS_MODULE, .open = ep_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /** * s3c_hsotg_create_debug - create debugfs directory and files * @hsotg: The driver state * * Create the debugfs files to allow the user to get information * about the state of the system. The directory name is created * with the same name as the device itself, in case we end up * with multiple blocks in future systems. */ static void __devinit s3c_hsotg_create_debug(struct s3c_hsotg *hsotg) { struct dentry *root; unsigned epidx; root = debugfs_create_dir(dev_name(hsotg->dev), NULL); hsotg->debug_root = root; if (IS_ERR(root)) { dev_err(hsotg->dev, "cannot create debug root\n"); return; } /* create general state file */ hsotg->debug_file = debugfs_create_file("state", 0444, root, hsotg, &state_fops); if (IS_ERR(hsotg->debug_file)) dev_err(hsotg->dev, "%s: failed to create state\n", __func__); hsotg->debug_fifo = debugfs_create_file("fifo", 0444, root, hsotg, &fifo_fops); if (IS_ERR(hsotg->debug_fifo)) dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__); /* create one file for each endpoint */ for (epidx = 0; epidx < S3C_HSOTG_EPS; epidx++) { struct s3c_hsotg_ep *ep = &hsotg->eps[epidx]; ep->debugfs = debugfs_create_file(ep->name, 0444, root, ep, &ep_fops); if (IS_ERR(ep->debugfs)) dev_err(hsotg->dev, "failed to create %s debug file\n", ep->name); } } /** * s3c_hsotg_delete_debug - cleanup debugfs entries * @hsotg: The driver state * * Cleanup (remove) the debugfs files for use on module exit. */ static void __devexit s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg) { unsigned epidx; for (epidx = 0; epidx < S3C_HSOTG_EPS; epidx++) { struct s3c_hsotg_ep *ep = &hsotg->eps[epidx]; debugfs_remove(ep->debugfs); } debugfs_remove(hsotg->debug_file); debugfs_remove(hsotg->debug_fifo); debugfs_remove(hsotg->debug_root); } /** * s3c_hsotg_gate - set the hardware gate for the block * @pdev: The device we bound to * @on: On or off. * * Set the hardware gate setting into the block. If we end up on * something other than an S3C64XX, then we might need to change this * to using a platform data callback, or some other mechanism. */ static void s3c_hsotg_gate(struct platform_device *pdev, bool on) { unsigned long flags; u32 others; local_irq_save(flags); others = __raw_readl(S3C64XX_OTHERS); if (on) others |= S3C64XX_OTHERS_USBMASK; else others &= ~S3C64XX_OTHERS_USBMASK; __raw_writel(others, S3C64XX_OTHERS); local_irq_restore(flags); } static struct s3c_hsotg_plat s3c_hsotg_default_pdata; static int __devinit s3c_hsotg_probe(struct platform_device *pdev) { struct s3c_hsotg_plat *plat = pdev->dev.platform_data; struct device *dev = &pdev->dev; struct s3c_hsotg *hsotg; struct resource *res; int epnum; int ret; if (!plat) plat = &s3c_hsotg_default_pdata; hsotg = kzalloc(sizeof(struct s3c_hsotg) + sizeof(struct s3c_hsotg_ep) * S3C_HSOTG_EPS, GFP_KERNEL); if (!hsotg) { dev_err(dev, "cannot get memory\n"); return -ENOMEM; } hsotg->dev = dev; hsotg->plat = plat; platform_set_drvdata(pdev, hsotg); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "cannot find register resource 0\n"); ret = -EINVAL; goto err_mem; } hsotg->regs_res = request_mem_region(res->start, resource_size(res), dev_name(dev)); if (!hsotg->regs_res) { dev_err(dev, "cannot reserve registers\n"); ret = -ENOENT; goto err_mem; } hsotg->regs = ioremap(res->start, resource_size(res)); if (!hsotg->regs) { dev_err(dev, "cannot map registers\n"); ret = -ENXIO; goto err_regs_res; } ret = platform_get_irq(pdev, 0); if (ret < 0) { dev_err(dev, "cannot find IRQ\n"); goto err_regs; } hsotg->irq = ret; ret = request_irq(ret, s3c_hsotg_irq, 0, dev_name(dev), hsotg); if (ret < 0) { dev_err(dev, "cannot claim IRQ\n"); goto err_regs; } dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq); device_initialize(&hsotg->gadget.dev); dev_set_name(&hsotg->gadget.dev, "gadget"); hsotg->gadget.is_dualspeed = 1; hsotg->gadget.ops = &s3c_hsotg_gadget_ops; hsotg->gadget.name = dev_name(dev); hsotg->gadget.dev.parent = dev; hsotg->gadget.dev.dma_mask = dev->dma_mask; /* setup endpoint information */ INIT_LIST_HEAD(&hsotg->gadget.ep_list); hsotg->gadget.ep0 = &hsotg->eps[0].ep; /* allocate EP0 request */ hsotg->ctrl_req = s3c_hsotg_ep_alloc_request(&hsotg->eps[0].ep, GFP_KERNEL); if (!hsotg->ctrl_req) { dev_err(dev, "failed to allocate ctrl req\n"); goto err_regs; } /* reset the system */ s3c_hsotg_gate(pdev, true); s3c_hsotg_otgreset(hsotg); s3c_hsotg_corereset(hsotg); s3c_hsotg_init(hsotg); /* initialise the endpoints now the core has been initialised */ for (epnum = 0; epnum < S3C_HSOTG_EPS; epnum++) s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum); s3c_hsotg_create_debug(hsotg); s3c_hsotg_dump(hsotg); our_hsotg = hsotg; return 0; err_regs: iounmap(hsotg->regs); err_regs_res: release_resource(hsotg->regs_res); kfree(hsotg->regs_res); err_mem: kfree(hsotg); return ret; } static int __devexit s3c_hsotg_remove(struct platform_device *pdev) { struct s3c_hsotg *hsotg = platform_get_drvdata(pdev); s3c_hsotg_delete_debug(hsotg); usb_gadget_unregister_driver(hsotg->driver); free_irq(hsotg->irq, hsotg); iounmap(hsotg->regs); release_resource(hsotg->regs_res); kfree(hsotg->regs_res); s3c_hsotg_gate(pdev, false); kfree(hsotg); return 0; } #if 1 #define s3c_hsotg_suspend NULL #define s3c_hsotg_resume NULL #endif static struct platform_driver s3c_hsotg_driver = { .driver = { .name = "s3c-hsotg", .owner = THIS_MODULE, }, .probe = s3c_hsotg_probe, .remove = __devexit_p(s3c_hsotg_remove), .suspend = s3c_hsotg_suspend, .resume = s3c_hsotg_resume, }; static int __init s3c_hsotg_modinit(void) { return platform_driver_register(&s3c_hsotg_driver); } static void __exit s3c_hsotg_modexit(void) { platform_driver_unregister(&s3c_hsotg_driver); } module_init(s3c_hsotg_modinit); module_exit(s3c_hsotg_modexit); MODULE_DESCRIPTION("Samsung S3C USB High-speed/OtG device"); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:s3c-hsotg");
gpl-2.0
jdkernel/lexikon_aosp_2.6.35
arch/x86/oprofile/backtrace.c
954
1994
/** * @file backtrace.c * * @remark Copyright 2002 OProfile authors * @remark Read the file COPYING * * @author John Levon * @author David Smith */ #include <linux/oprofile.h> #include <linux/sched.h> #include <linux/mm.h> #include <asm/ptrace.h> #include <asm/uaccess.h> #include <asm/stacktrace.h> static void backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) { /* Ignore warnings */ } static void backtrace_warning(void *data, char *msg) { /* Ignore warnings */ } static int backtrace_stack(void *data, char *name) { /* Yes, we want all stacks */ return 0; } static void backtrace_address(void *data, unsigned long addr, int reliable) { unsigned int *depth = data; if ((*depth)--) oprofile_add_trace(addr); } static struct stacktrace_ops backtrace_ops = { .warning = backtrace_warning, .warning_symbol = backtrace_warning_symbol, .stack = backtrace_stack, .address = backtrace_address, .walk_stack = print_context_stack, }; struct frame_head { struct frame_head *bp; unsigned long ret; } __attribute__((packed)); static struct frame_head *dump_user_backtrace(struct frame_head *head) { struct frame_head bufhead[2]; /* Also check accessibility of one struct frame_head beyond */ if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) return NULL; if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) return NULL; oprofile_add_trace(bufhead[0].ret); /* frame pointers should strictly progress back up the stack * (towards higher addresses) */ if (head >= bufhead[0].bp) return NULL; return bufhead[0].bp; } void x86_backtrace(struct pt_regs * const regs, unsigned int depth) { struct frame_head *head = (struct frame_head *)frame_pointer(regs); if (!user_mode_vm(regs)) { unsigned long stack = kernel_stack_pointer(regs); if (depth) dump_trace(NULL, regs, (unsigned long *)stack, 0, &backtrace_ops, &depth); return; } while (depth-- && head) head = dump_user_backtrace(head); }
gpl-2.0
mtk00874/kernel-mediatek
net/netfilter/nft_payload.c
954
4499
/* * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Development of this code funded by Astaro AG (http://www.astaro.com/) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> #include <net/netfilter/nf_tables.h> static void nft_payload_eval(const struct nft_expr *expr, struct nft_data data[NFT_REG_MAX + 1], const struct nft_pktinfo *pkt) { const struct nft_payload *priv = nft_expr_priv(expr); const struct sk_buff *skb = pkt->skb; struct nft_data *dest = &data[priv->dreg]; int offset; switch (priv->base) { case NFT_PAYLOAD_LL_HEADER: if (!skb_mac_header_was_set(skb)) goto err; offset = skb_mac_header(skb) - skb->data; break; case NFT_PAYLOAD_NETWORK_HEADER: offset = skb_network_offset(skb); break; case NFT_PAYLOAD_TRANSPORT_HEADER: offset = pkt->xt.thoff; break; default: BUG(); } offset += priv->offset; if (skb_copy_bits(skb, offset, dest->data, priv->len) < 0) goto err; return; err: data[NFT_REG_VERDICT].verdict = NFT_BREAK; } static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = { [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 }, [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 }, [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 }, [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 }, }; static int nft_payload_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_payload *priv = nft_expr_priv(expr); int err; priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); priv->dreg = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_DREG])); err = nft_validate_output_register(priv->dreg); if (err < 0) return err; return nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE); } static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr) { const struct nft_payload *priv = nft_expr_priv(expr); if (nla_put_be32(skb, NFTA_PAYLOAD_DREG, htonl(priv->dreg)) || nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) || nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) || nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len))) goto nla_put_failure; return 0; nla_put_failure: return -1; } static struct nft_expr_type nft_payload_type; static const struct nft_expr_ops nft_payload_ops = { .type = &nft_payload_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)), .eval = nft_payload_eval, .init = nft_payload_init, .dump = nft_payload_dump, }; const struct nft_expr_ops nft_payload_fast_ops = { .type = &nft_payload_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)), .eval = nft_payload_eval, .init = nft_payload_init, .dump = nft_payload_dump, }; static const struct nft_expr_ops * nft_payload_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { enum nft_payload_bases base; unsigned int offset, len; if (tb[NFTA_PAYLOAD_DREG] == NULL || tb[NFTA_PAYLOAD_BASE] == NULL || tb[NFTA_PAYLOAD_OFFSET] == NULL || tb[NFTA_PAYLOAD_LEN] == NULL) return ERR_PTR(-EINVAL); base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); switch (base) { case NFT_PAYLOAD_LL_HEADER: case NFT_PAYLOAD_NETWORK_HEADER: case NFT_PAYLOAD_TRANSPORT_HEADER: break; default: return ERR_PTR(-EOPNOTSUPP); } offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); if (len == 0 || len > FIELD_SIZEOF(struct nft_data, data)) return ERR_PTR(-EINVAL); if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) && base != NFT_PAYLOAD_LL_HEADER) return &nft_payload_fast_ops; else return &nft_payload_ops; } static struct nft_expr_type nft_payload_type __read_mostly = { .name = "payload", .select_ops = nft_payload_select_ops, .policy = nft_payload_policy, .maxattr = NFTA_PAYLOAD_MAX, .owner = THIS_MODULE, }; int __init nft_payload_module_init(void) { return nft_register_expr(&nft_payload_type); } void nft_payload_module_exit(void) { nft_unregister_expr(&nft_payload_type); }
gpl-2.0
amphorion/kernel_pyramidv2
drivers/net/sfc/ethtool.c
954
30184
/**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include <linux/in.h> #include "net_driver.h" #include "workarounds.h" #include "selftest.h" #include "efx.h" #include "filter.h" #include "nic.h" struct ethtool_string { char name[ETH_GSTRING_LEN]; }; struct efx_ethtool_stat { const char *name; enum { EFX_ETHTOOL_STAT_SOURCE_mac_stats, EFX_ETHTOOL_STAT_SOURCE_nic, EFX_ETHTOOL_STAT_SOURCE_channel, EFX_ETHTOOL_STAT_SOURCE_tx_queue } source; unsigned offset; u64(*get_stat) (void *field); /* Reader function */ }; /* Initialiser for a struct #efx_ethtool_stat with type-checking */ #define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \ get_stat_function) { \ .name = #stat_name, \ .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \ .offset = ((((field_type *) 0) == \ &((struct efx_##source_name *)0)->field) ? \ offsetof(struct efx_##source_name, field) : \ offsetof(struct efx_##source_name, field)), \ .get_stat = get_stat_function, \ } static u64 efx_get_uint_stat(void *field) { return *(unsigned int *)field; } static u64 efx_get_ulong_stat(void *field) { return *(unsigned long *)field; } static u64 efx_get_u64_stat(void *field) { return *(u64 *) field; } static u64 efx_get_atomic_stat(void *field) { return atomic_read((atomic_t *) field); } #define EFX_ETHTOOL_ULONG_MAC_STAT(field) \ EFX_ETHTOOL_STAT(field, mac_stats, field, \ unsigned long, efx_get_ulong_stat) #define EFX_ETHTOOL_U64_MAC_STAT(field) \ EFX_ETHTOOL_STAT(field, mac_stats, field, \ u64, efx_get_u64_stat) #define EFX_ETHTOOL_UINT_NIC_STAT(name) \ EFX_ETHTOOL_STAT(name, nic, n_##name, \ unsigned int, efx_get_uint_stat) #define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \ EFX_ETHTOOL_STAT(field, nic, field, \ atomic_t, efx_get_atomic_stat) #define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \ EFX_ETHTOOL_STAT(field, channel, n_##field, \ unsigned int, efx_get_uint_stat) #define EFX_ETHTOOL_UINT_TXQ_STAT(field) \ EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \ unsigned int, efx_get_uint_stat) static struct efx_ethtool_stat efx_ethtool_stats[] = { EFX_ETHTOOL_U64_MAC_STAT(tx_bytes), EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes), EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes), EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets), EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad), EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause), EFX_ETHTOOL_ULONG_MAC_STAT(tx_control), EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast), EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast), EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast), EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64), EFX_ETHTOOL_ULONG_MAC_STAT(tx_64), EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127), EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255), EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511), EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023), EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx), EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo), EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo), EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision), EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision), EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision), EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision), EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred), EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision), EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred), EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp), EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error), EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error), EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts), EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), EFX_ETHTOOL_UINT_TXQ_STAT(pushes), EFX_ETHTOOL_U64_MAC_STAT(rx_bytes), EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes), EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes), EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets), EFX_ETHTOOL_ULONG_MAC_STAT(rx_good), EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad), EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause), EFX_ETHTOOL_ULONG_MAC_STAT(rx_control), EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast), EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast), EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast), EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64), EFX_ETHTOOL_ULONG_MAC_STAT(rx_64), EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127), EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255), EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511), EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023), EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx), EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo), EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo), EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64), EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx), EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo), EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo), EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow), EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed), EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier), EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error), EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error), EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error), EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error), EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt), EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), }; /* Number of ethtool statistics */ #define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats) #define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB /************************************************************************** * * Ethtool operations * ************************************************************************** */ /* Identify device by flashing LEDs */ static int efx_ethtool_phys_id(struct net_device *net_dev, enum ethtool_phys_id_state state) { struct efx_nic *efx = netdev_priv(net_dev); enum efx_led_mode mode = EFX_LED_DEFAULT; switch (state) { case ETHTOOL_ID_ON: mode = EFX_LED_ON; break; case ETHTOOL_ID_OFF: mode = EFX_LED_OFF; break; case ETHTOOL_ID_INACTIVE: mode = EFX_LED_DEFAULT; break; case ETHTOOL_ID_ACTIVE: return 1; /* cycle on/off once per second */ } efx->type->set_id_led(efx, mode); return 0; } /* This must be called with rtnl_lock held. */ static int efx_ethtool_get_settings(struct net_device *net_dev, struct ethtool_cmd *ecmd) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_link_state *link_state = &efx->link_state; mutex_lock(&efx->mac_lock); efx->phy_op->get_settings(efx, ecmd); mutex_unlock(&efx->mac_lock); /* GMAC does not support 1000Mbps HD */ ecmd->supported &= ~SUPPORTED_1000baseT_Half; /* Both MACs support pause frames (bidirectional and respond-only) */ ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; if (LOOPBACK_INTERNAL(efx)) { ethtool_cmd_speed_set(ecmd, link_state->speed); ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; } return 0; } /* This must be called with rtnl_lock held. */ static int efx_ethtool_set_settings(struct net_device *net_dev, struct ethtool_cmd *ecmd) { struct efx_nic *efx = netdev_priv(net_dev); int rc; /* GMAC does not support 1000Mbps HD */ if ((ethtool_cmd_speed(ecmd) == SPEED_1000) && (ecmd->duplex != DUPLEX_FULL)) { netif_dbg(efx, drv, efx->net_dev, "rejecting unsupported 1000Mbps HD setting\n"); return -EINVAL; } mutex_lock(&efx->mac_lock); rc = efx->phy_op->set_settings(efx, ecmd); mutex_unlock(&efx->mac_lock); return rc; } static void efx_ethtool_get_drvinfo(struct net_device *net_dev, struct ethtool_drvinfo *info) { struct efx_nic *efx = netdev_priv(net_dev); strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) efx_mcdi_print_fwver(efx, info->fw_version, sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); } static int efx_ethtool_get_regs_len(struct net_device *net_dev) { return efx_nic_get_regs_len(netdev_priv(net_dev)); } static void efx_ethtool_get_regs(struct net_device *net_dev, struct ethtool_regs *regs, void *buf) { struct efx_nic *efx = netdev_priv(net_dev); regs->version = efx->type->revision; efx_nic_get_regs(efx, buf); } static u32 efx_ethtool_get_msglevel(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); return efx->msg_enable; } static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable) { struct efx_nic *efx = netdev_priv(net_dev); efx->msg_enable = msg_enable; } /** * efx_fill_test - fill in an individual self-test entry * @test_index: Index of the test * @strings: Ethtool strings, or %NULL * @data: Ethtool test results, or %NULL * @test: Pointer to test result (used only if data != %NULL) * @unit_format: Unit name format (e.g. "chan\%d") * @unit_id: Unit id (e.g. 0 for "chan0") * @test_format: Test name format (e.g. "loopback.\%s.tx.sent") * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent") * * Fill in an individual self-test entry. */ static void efx_fill_test(unsigned int test_index, struct ethtool_string *strings, u64 *data, int *test, const char *unit_format, int unit_id, const char *test_format, const char *test_id) { struct ethtool_string unit_str, test_str; /* Fill data value, if applicable */ if (data) data[test_index] = *test; /* Fill string, if applicable */ if (strings) { if (strchr(unit_format, '%')) snprintf(unit_str.name, sizeof(unit_str.name), unit_format, unit_id); else strcpy(unit_str.name, unit_format); snprintf(test_str.name, sizeof(test_str.name), test_format, test_id); snprintf(strings[test_index].name, sizeof(strings[test_index].name), "%-6s %-24s", unit_str.name, test_str.name); } } #define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue #define EFX_LOOPBACK_NAME(_mode, _counter) \ "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode) /** * efx_fill_loopback_test - fill in a block of loopback self-test entries * @efx: Efx NIC * @lb_tests: Efx loopback self-test results structure * @mode: Loopback test mode * @test_index: Starting index of the test * @strings: Ethtool strings, or %NULL * @data: Ethtool test results, or %NULL */ static int efx_fill_loopback_test(struct efx_nic *efx, struct efx_loopback_self_tests *lb_tests, enum efx_loopback_mode mode, unsigned int test_index, struct ethtool_string *strings, u64 *data) { struct efx_channel *channel = efx_get_channel(efx, 0); struct efx_tx_queue *tx_queue; efx_for_each_channel_tx_queue(tx_queue, channel) { efx_fill_test(test_index++, strings, data, &lb_tests->tx_sent[tx_queue->queue], EFX_TX_QUEUE_NAME(tx_queue), EFX_LOOPBACK_NAME(mode, "tx_sent")); efx_fill_test(test_index++, strings, data, &lb_tests->tx_done[tx_queue->queue], EFX_TX_QUEUE_NAME(tx_queue), EFX_LOOPBACK_NAME(mode, "tx_done")); } efx_fill_test(test_index++, strings, data, &lb_tests->rx_good, "rx", 0, EFX_LOOPBACK_NAME(mode, "rx_good")); efx_fill_test(test_index++, strings, data, &lb_tests->rx_bad, "rx", 0, EFX_LOOPBACK_NAME(mode, "rx_bad")); return test_index; } /** * efx_ethtool_fill_self_tests - get self-test details * @efx: Efx NIC * @tests: Efx self-test results structure, or %NULL * @strings: Ethtool strings, or %NULL * @data: Ethtool test results, or %NULL */ static int efx_ethtool_fill_self_tests(struct efx_nic *efx, struct efx_self_tests *tests, struct ethtool_string *strings, u64 *data) { struct efx_channel *channel; unsigned int n = 0, i; enum efx_loopback_mode mode; efx_fill_test(n++, strings, data, &tests->phy_alive, "phy", 0, "alive", NULL); efx_fill_test(n++, strings, data, &tests->nvram, "core", 0, "nvram", NULL); efx_fill_test(n++, strings, data, &tests->interrupt, "core", 0, "interrupt", NULL); /* Event queues */ efx_for_each_channel(channel, efx) { efx_fill_test(n++, strings, data, &tests->eventq_dma[channel->channel], EFX_CHANNEL_NAME(channel), "eventq.dma", NULL); efx_fill_test(n++, strings, data, &tests->eventq_int[channel->channel], EFX_CHANNEL_NAME(channel), "eventq.int", NULL); efx_fill_test(n++, strings, data, &tests->eventq_poll[channel->channel], EFX_CHANNEL_NAME(channel), "eventq.poll", NULL); } efx_fill_test(n++, strings, data, &tests->registers, "core", 0, "registers", NULL); if (efx->phy_op->run_tests != NULL) { EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL); for (i = 0; true; ++i) { const char *name; EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS); name = efx->phy_op->test_name(efx, i); if (name == NULL) break; efx_fill_test(n++, strings, data, &tests->phy_ext[i], "phy", 0, name, NULL); } } /* Loopback tests */ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { if (!(efx->loopback_modes & (1 << mode))) continue; n = efx_fill_loopback_test(efx, &tests->loopback[mode], mode, n, strings, data); } return n; } static int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set) { switch (string_set) { case ETH_SS_STATS: return EFX_ETHTOOL_NUM_STATS; case ETH_SS_TEST: return efx_ethtool_fill_self_tests(netdev_priv(net_dev), NULL, NULL, NULL); default: return -EINVAL; } } static void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set, u8 *strings) { struct efx_nic *efx = netdev_priv(net_dev); struct ethtool_string *ethtool_strings = (struct ethtool_string *)strings; int i; switch (string_set) { case ETH_SS_STATS: for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) strncpy(ethtool_strings[i].name, efx_ethtool_stats[i].name, sizeof(ethtool_strings[i].name)); break; case ETH_SS_TEST: efx_ethtool_fill_self_tests(efx, NULL, ethtool_strings, NULL); break; default: /* No other string sets */ break; } } static void efx_ethtool_get_stats(struct net_device *net_dev, struct ethtool_stats *stats, u64 *data) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_mac_stats *mac_stats = &efx->mac_stats; struct efx_ethtool_stat *stat; struct efx_channel *channel; struct efx_tx_queue *tx_queue; struct rtnl_link_stats64 temp; int i; EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS); /* Update MAC and NIC statistics */ dev_get_stats(net_dev, &temp); /* Fill detailed statistics buffer */ for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) { stat = &efx_ethtool_stats[i]; switch (stat->source) { case EFX_ETHTOOL_STAT_SOURCE_mac_stats: data[i] = stat->get_stat((void *)mac_stats + stat->offset); break; case EFX_ETHTOOL_STAT_SOURCE_nic: data[i] = stat->get_stat((void *)efx + stat->offset); break; case EFX_ETHTOOL_STAT_SOURCE_channel: data[i] = 0; efx_for_each_channel(channel, efx) data[i] += stat->get_stat((void *)channel + stat->offset); break; case EFX_ETHTOOL_STAT_SOURCE_tx_queue: data[i] = 0; efx_for_each_channel(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) data[i] += stat->get_stat((void *)tx_queue + stat->offset); } break; } } } static void efx_ethtool_self_test(struct net_device *net_dev, struct ethtool_test *test, u64 *data) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_self_tests *efx_tests; int already_up; int rc = -ENOMEM; efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); if (!efx_tests) goto fail; ASSERT_RTNL(); if (efx->state != STATE_RUNNING) { rc = -EIO; goto fail1; } netif_info(efx, drv, efx->net_dev, "starting %sline testing\n", (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); /* We need rx buffers and interrupts. */ already_up = (efx->net_dev->flags & IFF_UP); if (!already_up) { rc = dev_open(efx->net_dev); if (rc) { netif_err(efx, drv, efx->net_dev, "failed opening device.\n"); goto fail1; } } rc = efx_selftest(efx, efx_tests, test->flags); if (!already_up) dev_close(efx->net_dev); netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n", rc == 0 ? "passed" : "failed", (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); fail1: /* Fill ethtool results structures */ efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); kfree(efx_tests); fail: if (rc) test->flags |= ETH_TEST_FL_FAILED; } /* Restart autonegotiation */ static int efx_ethtool_nway_reset(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); return mdio45_nway_restart(&efx->mdio); } static int efx_ethtool_get_coalesce(struct net_device *net_dev, struct ethtool_coalesce *coalesce) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; memset(coalesce, 0, sizeof(*coalesce)); /* Find lowest IRQ moderation across all used TX queues */ coalesce->tx_coalesce_usecs_irq = ~((u32) 0); efx_for_each_channel(channel, efx) { if (!efx_channel_has_tx_queues(channel)) continue; if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { if (channel->channel < efx->n_rx_channels) coalesce->tx_coalesce_usecs_irq = channel->irq_moderation; else coalesce->tx_coalesce_usecs_irq = 0; } } coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive; coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation; coalesce->tx_coalesce_usecs_irq *= EFX_IRQ_MOD_RESOLUTION; coalesce->rx_coalesce_usecs_irq *= EFX_IRQ_MOD_RESOLUTION; return 0; } /* Set coalescing parameters * The difficulties occur for shared channels */ static int efx_ethtool_set_coalesce(struct net_device *net_dev, struct ethtool_coalesce *coalesce) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; unsigned tx_usecs, rx_usecs, adaptive; if (coalesce->use_adaptive_tx_coalesce) return -EOPNOTSUPP; if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) { netif_err(efx, drv, efx->net_dev, "invalid coalescing setting. " "Only rx/tx_coalesce_usecs_irq are supported\n"); return -EOPNOTSUPP; } rx_usecs = coalesce->rx_coalesce_usecs_irq; tx_usecs = coalesce->tx_coalesce_usecs_irq; adaptive = coalesce->use_adaptive_rx_coalesce; /* If the channel is shared only allow RX parameters to be set */ efx_for_each_channel(channel, efx) { if (efx_channel_has_rx_queue(channel) && efx_channel_has_tx_queues(channel) && tx_usecs) { netif_err(efx, drv, efx->net_dev, "Channel is shared. " "Only RX coalescing may be set\n"); return -EOPNOTSUPP; } } efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive); efx_for_each_channel(channel, efx) efx->type->push_irq_moderation(channel); return 0; } static void efx_ethtool_get_ringparam(struct net_device *net_dev, struct ethtool_ringparam *ring) { struct efx_nic *efx = netdev_priv(net_dev); ring->rx_max_pending = EFX_MAX_DMAQ_SIZE; ring->tx_max_pending = EFX_MAX_DMAQ_SIZE; ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; ring->rx_pending = efx->rxq_entries; ring->tx_pending = efx->txq_entries; ring->rx_mini_pending = 0; ring->rx_jumbo_pending = 0; } static int efx_ethtool_set_ringparam(struct net_device *net_dev, struct ethtool_ringparam *ring) { struct efx_nic *efx = netdev_priv(net_dev); u32 txq_entries; if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->rx_pending > EFX_MAX_DMAQ_SIZE || ring->tx_pending > EFX_MAX_DMAQ_SIZE) return -EINVAL; if (ring->rx_pending < EFX_RXQ_MIN_ENT) { netif_err(efx, drv, efx->net_dev, "RX queues cannot be smaller than %u\n", EFX_RXQ_MIN_ENT); return -EINVAL; } txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx)); if (txq_entries != ring->tx_pending) netif_warn(efx, drv, efx->net_dev, "increasing TX queue size to minimum of %u\n", txq_entries); return efx_realloc_channels(efx, ring->rx_pending, txq_entries); } static int efx_ethtool_set_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *pause) { struct efx_nic *efx = netdev_priv(net_dev); u8 wanted_fc, old_fc; u32 old_adv; bool reset; int rc = 0; mutex_lock(&efx->mac_lock); wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) | (pause->tx_pause ? EFX_FC_TX : 0) | (pause->autoneg ? EFX_FC_AUTO : 0)); if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { netif_dbg(efx, drv, efx->net_dev, "Flow control unsupported: tx ON rx OFF\n"); rc = -EINVAL; goto out; } if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) { netif_dbg(efx, drv, efx->net_dev, "Autonegotiation is disabled\n"); rc = -EINVAL; goto out; } /* TX flow control may automatically turn itself off if the * link partner (intermittently) stops responding to pause * frames. There isn't any indication that this has happened, * so the best we do is leave it up to the user to spot this * and fix it be cycling transmit flow control on this end. */ reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX); if (EFX_WORKAROUND_11482(efx) && reset) { if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) { /* Recover by resetting the EM block */ falcon_stop_nic_stats(efx); falcon_drain_tx_fifo(efx); efx->mac_op->reconfigure(efx); falcon_start_nic_stats(efx); } else { /* Schedule a reset to recover */ efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); } } old_adv = efx->link_advertising; old_fc = efx->wanted_fc; efx_link_set_wanted_fc(efx, wanted_fc); if (efx->link_advertising != old_adv || (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { rc = efx->phy_op->reconfigure(efx); if (rc) { netif_err(efx, drv, efx->net_dev, "Unable to advertise requested flow " "control setting\n"); goto out; } } /* Reconfigure the MAC. The PHY *may* generate a link state change event * if the user just changed the advertised capabilities, but there's no * harm doing this twice */ efx->mac_op->reconfigure(efx); out: mutex_unlock(&efx->mac_lock); return rc; } static void efx_ethtool_get_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *pause) { struct efx_nic *efx = netdev_priv(net_dev); pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX); pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX); pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO); } static void efx_ethtool_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) { struct efx_nic *efx = netdev_priv(net_dev); return efx->type->get_wol(efx, wol); } static int efx_ethtool_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) { struct efx_nic *efx = netdev_priv(net_dev); return efx->type->set_wol(efx, wol->wolopts); } static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) { struct efx_nic *efx = netdev_priv(net_dev); enum reset_type method; enum { ETH_RESET_EFX_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER | ETH_RESET_OFFLOAD | ETH_RESET_MAC) }; /* Check for minimal reset flags */ if ((*flags & ETH_RESET_EFX_INVISIBLE) != ETH_RESET_EFX_INVISIBLE) return -EINVAL; *flags ^= ETH_RESET_EFX_INVISIBLE; method = RESET_TYPE_INVISIBLE; if (*flags & ETH_RESET_PHY) { *flags ^= ETH_RESET_PHY; method = RESET_TYPE_ALL; } if ((*flags & efx->type->reset_world_flags) == efx->type->reset_world_flags) { *flags ^= efx->type->reset_world_flags; method = RESET_TYPE_WORLD; } return efx_reset(efx, method); } static int efx_ethtool_get_rxnfc(struct net_device *net_dev, struct ethtool_rxnfc *info, void *rules __always_unused) { struct efx_nic *efx = netdev_priv(net_dev); switch (info->cmd) { case ETHTOOL_GRXRINGS: info->data = efx->n_rx_channels; return 0; case ETHTOOL_GRXFH: { unsigned min_revision = 0; info->data = 0; switch (info->flow_type) { case TCP_V4_FLOW: info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; /* fall through */ case UDP_V4_FLOW: case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case IPV4_FLOW: info->data |= RXH_IP_SRC | RXH_IP_DST; min_revision = EFX_REV_FALCON_B0; break; case TCP_V6_FLOW: info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; /* fall through */ case UDP_V6_FLOW: case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case IPV6_FLOW: info->data |= RXH_IP_SRC | RXH_IP_DST; min_revision = EFX_REV_SIENA_A0; break; default: break; } if (efx_nic_rev(efx) < min_revision) info->data = 0; return 0; } default: return -EOPNOTSUPP; } } static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev, struct ethtool_rx_ntuple *ntuple) { struct efx_nic *efx = netdev_priv(net_dev); struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec; struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec; struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec; struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec; struct efx_filter_spec filter; int rc; /* Range-check action */ if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR || ntuple->fs.action >= (s32)efx->n_rx_channels) return -EINVAL; if (~ntuple->fs.data_mask) return -EINVAL; efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0, (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ? 0xfff : ntuple->fs.action); switch (ntuple->fs.flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: { u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ? IPPROTO_TCP : IPPROTO_UDP); /* Must match all of destination, */ if (ip_mask->ip4dst | ip_mask->pdst) return -EINVAL; /* all or none of source, */ if ((ip_mask->ip4src | ip_mask->psrc) && ((__force u32)~ip_mask->ip4src | (__force u16)~ip_mask->psrc)) return -EINVAL; /* and nothing else */ if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask) return -EINVAL; if (!ip_mask->ip4src) rc = efx_filter_set_ipv4_full(&filter, proto, ip_entry->ip4dst, ip_entry->pdst, ip_entry->ip4src, ip_entry->psrc); else rc = efx_filter_set_ipv4_local(&filter, proto, ip_entry->ip4dst, ip_entry->pdst); if (rc) return rc; break; } case ETHER_FLOW: /* Must match all of destination, */ if (!is_zero_ether_addr(mac_mask->h_dest)) return -EINVAL; /* all or none of VID, */ if (ntuple->fs.vlan_tag_mask != 0xf000 && ntuple->fs.vlan_tag_mask != 0xffff) return -EINVAL; /* and nothing else */ if (!is_broadcast_ether_addr(mac_mask->h_source) || mac_mask->h_proto != htons(0xffff)) return -EINVAL; rc = efx_filter_set_eth_local( &filter, (ntuple->fs.vlan_tag_mask == 0xf000) ? ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC, mac_entry->h_dest); if (rc) return rc; break; default: return -EINVAL; } if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) return efx_filter_remove_filter(efx, &filter); rc = efx_filter_insert_filter(efx, &filter, true); return rc < 0 ? rc : 0; } static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, struct ethtool_rxfh_indir *indir) { struct efx_nic *efx = netdev_priv(net_dev); size_t copy_size = min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table)); if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) return -EOPNOTSUPP; indir->size = ARRAY_SIZE(efx->rx_indir_table); memcpy(indir->ring_index, efx->rx_indir_table, copy_size * sizeof(indir->ring_index[0])); return 0; } static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, const struct ethtool_rxfh_indir *indir) { struct efx_nic *efx = netdev_priv(net_dev); size_t i; if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) return -EOPNOTSUPP; /* Validate size and indices */ if (indir->size != ARRAY_SIZE(efx->rx_indir_table)) return -EINVAL; for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) if (indir->ring_index[i] >= efx->n_rx_channels) return -EINVAL; memcpy(efx->rx_indir_table, indir->ring_index, sizeof(efx->rx_indir_table)); efx_nic_push_rx_indir_table(efx); return 0; } const struct ethtool_ops efx_ethtool_ops = { .get_settings = efx_ethtool_get_settings, .set_settings = efx_ethtool_set_settings, .get_drvinfo = efx_ethtool_get_drvinfo, .get_regs_len = efx_ethtool_get_regs_len, .get_regs = efx_ethtool_get_regs, .get_msglevel = efx_ethtool_get_msglevel, .set_msglevel = efx_ethtool_set_msglevel, .nway_reset = efx_ethtool_nway_reset, .get_link = ethtool_op_get_link, .get_coalesce = efx_ethtool_get_coalesce, .set_coalesce = efx_ethtool_set_coalesce, .get_ringparam = efx_ethtool_get_ringparam, .set_ringparam = efx_ethtool_set_ringparam, .get_pauseparam = efx_ethtool_get_pauseparam, .set_pauseparam = efx_ethtool_set_pauseparam, .get_sset_count = efx_ethtool_get_sset_count, .self_test = efx_ethtool_self_test, .get_strings = efx_ethtool_get_strings, .set_phys_id = efx_ethtool_phys_id, .get_ethtool_stats = efx_ethtool_get_stats, .get_wol = efx_ethtool_get_wol, .set_wol = efx_ethtool_set_wol, .reset = efx_ethtool_reset, .get_rxnfc = efx_ethtool_get_rxnfc, .set_rx_ntuple = efx_ethtool_set_rx_ntuple, .get_rxfh_indir = efx_ethtool_get_rxfh_indir, .set_rxfh_indir = efx_ethtool_set_rxfh_indir, };
gpl-2.0
playfulgod/Kernel-p930-Nitro
drivers/power/test_power.c
1210
4325
/* * Power supply driver for testing. * * Copyright 2010 Anton Vorontsov <cbouatmailru@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/power_supply.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/vermagic.h> static int test_power_ac_online = 1; static int test_power_battery_status = POWER_SUPPLY_STATUS_CHARGING; static int test_power_get_ac_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = test_power_ac_online; break; default: return -EINVAL; } return 0; } static int test_power_get_battery_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { switch (psp) { case POWER_SUPPLY_PROP_MODEL_NAME: val->strval = "Test battery"; break; case POWER_SUPPLY_PROP_MANUFACTURER: val->strval = "Linux"; break; case POWER_SUPPLY_PROP_SERIAL_NUMBER: val->strval = UTS_RELEASE; break; case POWER_SUPPLY_PROP_STATUS: val->intval = test_power_battery_status; break; case POWER_SUPPLY_PROP_CHARGE_TYPE: val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST; break; case POWER_SUPPLY_PROP_HEALTH: val->intval = POWER_SUPPLY_HEALTH_GOOD; break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = POWER_SUPPLY_TECHNOLOGY_LION; break; case POWER_SUPPLY_PROP_CAPACITY_LEVEL: val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = 50; break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: val->intval = 3600; break; default: pr_info("%s: some properties deliberately report errors.\n", __func__); return -EINVAL; } return 0; } static enum power_supply_property test_power_ac_props[] = { POWER_SUPPLY_PROP_ONLINE, }; static enum power_supply_property test_power_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_CHARGE_FULL, POWER_SUPPLY_PROP_CHARGE_EMPTY, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, POWER_SUPPLY_PROP_SERIAL_NUMBER, }; static char *test_power_ac_supplied_to[] = { "test_battery", }; static struct power_supply test_power_supplies[] = { { .name = "test_ac", .type = POWER_SUPPLY_TYPE_MAINS, .supplied_to = test_power_ac_supplied_to, .num_supplicants = ARRAY_SIZE(test_power_ac_supplied_to), .properties = test_power_ac_props, .num_properties = ARRAY_SIZE(test_power_ac_props), .get_property = test_power_get_ac_property, }, { .name = "test_battery", .type = POWER_SUPPLY_TYPE_BATTERY, .properties = test_power_battery_props, .num_properties = ARRAY_SIZE(test_power_battery_props), .get_property = test_power_get_battery_property, }, }; static int __init test_power_init(void) { int i; int ret; for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++) { ret = power_supply_register(NULL, &test_power_supplies[i]); if (ret) { pr_err("%s: failed to register %s\n", __func__, test_power_supplies[i].name); goto failed; } } return 0; failed: while (--i >= 0) power_supply_unregister(&test_power_supplies[i]); return ret; } module_init(test_power_init); static void __exit test_power_exit(void) { int i; /* Let's see how we handle changes... */ test_power_ac_online = 0; test_power_battery_status = POWER_SUPPLY_STATUS_DISCHARGING; for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++) power_supply_changed(&test_power_supplies[i]); pr_info("%s: 'changed' event sent, sleeping for 10 seconds...\n", __func__); ssleep(10); for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++) power_supply_unregister(&test_power_supplies[i]); } module_exit(test_power_exit); MODULE_DESCRIPTION("Power supply driver for testing"); MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>"); MODULE_LICENSE("GPL");
gpl-2.0
slayher/android_kernel_samsung_trlte
fs/reiserfs/ioctl.c
2234
5492
/* * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README */ #include <linux/capability.h> #include <linux/fs.h> #include <linux/mount.h> #include "reiserfs.h" #include <linux/time.h> #include <asm/uaccess.h> #include <linux/pagemap.h> #include <linux/compat.h> /* * reiserfs_ioctl - handler for ioctl for inode * supported commands: * 1) REISERFS_IOC_UNPACK - try to unpack tail from direct item into indirect * and prevent packing file (argument arg has to be non-zero) * 2) REISERFS_IOC_[GS]ETFLAGS, REISERFS_IOC_[GS]ETVERSION * 3) That's all for a while ... */ long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); unsigned int flags; int err = 0; reiserfs_write_lock(inode->i_sb); switch (cmd) { case REISERFS_IOC_UNPACK: if (S_ISREG(inode->i_mode)) { if (arg) err = reiserfs_unpack(inode, filp); } else err = -ENOTTY; break; /* * following two cases are taken from fs/ext2/ioctl.c by Remy * Card (card@masi.ibp.fr) */ case REISERFS_IOC_GETFLAGS: if (!reiserfs_attrs(inode->i_sb)) { err = -ENOTTY; break; } flags = REISERFS_I(inode)->i_attrs; i_attrs_to_sd_attrs(inode, (__u16 *) & flags); err = put_user(flags, (int __user *)arg); break; case REISERFS_IOC_SETFLAGS:{ if (!reiserfs_attrs(inode->i_sb)) { err = -ENOTTY; break; } err = mnt_want_write_file(filp); if (err) break; if (!inode_owner_or_capable(inode)) { err = -EPERM; goto setflags_out; } if (get_user(flags, (int __user *)arg)) { err = -EFAULT; goto setflags_out; } /* * Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) { err = -EPERM; goto setflags_out; } if (((flags ^ REISERFS_I(inode)-> i_attrs) & (REISERFS_IMMUTABLE_FL | REISERFS_APPEND_FL)) && !capable(CAP_LINUX_IMMUTABLE)) { err = -EPERM; goto setflags_out; } if ((flags & REISERFS_NOTAIL_FL) && S_ISREG(inode->i_mode)) { int result; result = reiserfs_unpack(inode, filp); if (result) { err = result; goto setflags_out; } } sd_attrs_to_i_attrs(flags, inode); REISERFS_I(inode)->i_attrs = flags; inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); setflags_out: mnt_drop_write_file(filp); break; } case REISERFS_IOC_GETVERSION: err = put_user(inode->i_generation, (int __user *)arg); break; case REISERFS_IOC_SETVERSION: if (!inode_owner_or_capable(inode)) { err = -EPERM; break; } err = mnt_want_write_file(filp); if (err) break; if (get_user(inode->i_generation, (int __user *)arg)) { err = -EFAULT; goto setversion_out; } inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); setversion_out: mnt_drop_write_file(filp); break; default: err = -ENOTTY; } reiserfs_write_unlock(inode->i_sb); return err; } #ifdef CONFIG_COMPAT long reiserfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { /* These are just misnamed, they actually get/put from/to user an int */ switch (cmd) { case REISERFS_IOC32_UNPACK: cmd = REISERFS_IOC_UNPACK; break; case REISERFS_IOC32_GETFLAGS: cmd = REISERFS_IOC_GETFLAGS; break; case REISERFS_IOC32_SETFLAGS: cmd = REISERFS_IOC_SETFLAGS; break; case REISERFS_IOC32_GETVERSION: cmd = REISERFS_IOC_GETVERSION; break; case REISERFS_IOC32_SETVERSION: cmd = REISERFS_IOC_SETVERSION; break; default: return -ENOIOCTLCMD; } return reiserfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); } #endif int reiserfs_commit_write(struct file *f, struct page *page, unsigned from, unsigned to); /* ** reiserfs_unpack ** Function try to convert tail from direct item into indirect. ** It set up nopack attribute in the REISERFS_I(inode)->nopack */ int reiserfs_unpack(struct inode *inode, struct file *filp) { int retval = 0; int depth; int index; struct page *page; struct address_space *mapping; unsigned long write_from; unsigned long blocksize = inode->i_sb->s_blocksize; if (inode->i_size == 0) { REISERFS_I(inode)->i_flags |= i_nopack_mask; return 0; } /* ioctl already done */ if (REISERFS_I(inode)->i_flags & i_nopack_mask) { return 0; } depth = reiserfs_write_lock_once(inode->i_sb); /* we need to make sure nobody is changing the file size beneath us */ reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb); write_from = inode->i_size & (blocksize - 1); /* if we are on a block boundary, we are already unpacked. */ if (write_from == 0) { REISERFS_I(inode)->i_flags |= i_nopack_mask; goto out; } /* we unpack by finding the page with the tail, and calling ** __reiserfs_write_begin on that page. This will force a ** reiserfs_get_block to unpack the tail for us. */ index = inode->i_size >> PAGE_CACHE_SHIFT; mapping = inode->i_mapping; page = grab_cache_page(mapping, index); retval = -ENOMEM; if (!page) { goto out; } retval = __reiserfs_write_begin(page, write_from, 0); if (retval) goto out_unlock; /* conversion can change page contents, must flush */ flush_dcache_page(page); retval = reiserfs_commit_write(NULL, page, write_from, write_from); REISERFS_I(inode)->i_flags |= i_nopack_mask; out_unlock: unlock_page(page); page_cache_release(page); out: mutex_unlock(&inode->i_mutex); reiserfs_write_unlock_once(inode->i_sb, depth); return retval; }
gpl-2.0
androidbftab1/bf-kernel
arch/arm/mach-mmp/clock-mmp2.c
4026
3454
#include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/list.h> #include <linux/io.h> #include <linux/clk.h> #include <mach/addr-map.h> #include "common.h" #include "clock.h" /* * APB Clock register offsets for MMP2 */ #define APBC_RTC APBC_REG(0x000) #define APBC_TWSI1 APBC_REG(0x004) #define APBC_TWSI2 APBC_REG(0x008) #define APBC_TWSI3 APBC_REG(0x00c) #define APBC_TWSI4 APBC_REG(0x010) #define APBC_KPC APBC_REG(0x018) #define APBC_UART1 APBC_REG(0x02c) #define APBC_UART2 APBC_REG(0x030) #define APBC_UART3 APBC_REG(0x034) #define APBC_GPIO APBC_REG(0x038) #define APBC_PWM0 APBC_REG(0x03c) #define APBC_PWM1 APBC_REG(0x040) #define APBC_PWM2 APBC_REG(0x044) #define APBC_PWM3 APBC_REG(0x048) #define APBC_SSP0 APBC_REG(0x04c) #define APBC_SSP1 APBC_REG(0x050) #define APBC_SSP2 APBC_REG(0x054) #define APBC_SSP3 APBC_REG(0x058) #define APBC_SSP4 APBC_REG(0x05c) #define APBC_SSP5 APBC_REG(0x060) #define APBC_TWSI5 APBC_REG(0x07c) #define APBC_TWSI6 APBC_REG(0x080) #define APBC_UART4 APBC_REG(0x088) #define APMU_USB APMU_REG(0x05c) #define APMU_NAND APMU_REG(0x060) #define APMU_SDH0 APMU_REG(0x054) #define APMU_SDH1 APMU_REG(0x058) #define APMU_SDH2 APMU_REG(0x0e8) #define APMU_SDH3 APMU_REG(0x0ec) static void sdhc_clk_enable(struct clk *clk) { uint32_t clk_rst; clk_rst = __raw_readl(clk->clk_rst); clk_rst |= clk->enable_val; __raw_writel(clk_rst, clk->clk_rst); } static void sdhc_clk_disable(struct clk *clk) { uint32_t clk_rst; clk_rst = __raw_readl(clk->clk_rst); clk_rst &= ~clk->enable_val; __raw_writel(clk_rst, clk->clk_rst); } struct clkops sdhc_clk_ops = { .enable = sdhc_clk_enable, .disable = sdhc_clk_disable, }; /* APB peripheral clocks */ static APBC_CLK(uart1, UART1, 1, 26000000); static APBC_CLK(uart2, UART2, 1, 26000000); static APBC_CLK(uart3, UART3, 1, 26000000); static APBC_CLK(uart4, UART4, 1, 26000000); static APBC_CLK(twsi1, TWSI1, 0, 26000000); static APBC_CLK(twsi2, TWSI2, 0, 26000000); static APBC_CLK(twsi3, TWSI3, 0, 26000000); static APBC_CLK(twsi4, TWSI4, 0, 26000000); static APBC_CLK(twsi5, TWSI5, 0, 26000000); static APBC_CLK(twsi6, TWSI6, 0, 26000000); static APBC_CLK(gpio, GPIO, 0, 26000000); static APMU_CLK(nand, NAND, 0xbf, 100000000); static APMU_CLK_OPS(sdh0, SDH0, 0x1b, 200000000, &sdhc_clk_ops); static APMU_CLK_OPS(sdh1, SDH1, 0x1b, 200000000, &sdhc_clk_ops); static APMU_CLK_OPS(sdh2, SDH2, 0x1b, 200000000, &sdhc_clk_ops); static APMU_CLK_OPS(sdh3, SDH3, 0x1b, 200000000, &sdhc_clk_ops); static struct clk_lookup mmp2_clkregs[] = { INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL), INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL), INIT_CLKREG(&clk_uart3, "pxa2xx-uart.2", NULL), INIT_CLKREG(&clk_uart4, "pxa2xx-uart.3", NULL), INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.0", NULL), INIT_CLKREG(&clk_twsi2, "pxa2xx-i2c.1", NULL), INIT_CLKREG(&clk_twsi3, "pxa2xx-i2c.2", NULL), INIT_CLKREG(&clk_twsi4, "pxa2xx-i2c.3", NULL), INIT_CLKREG(&clk_twsi5, "pxa2xx-i2c.4", NULL), INIT_CLKREG(&clk_twsi6, "pxa2xx-i2c.5", NULL), INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL), INIT_CLKREG(&clk_gpio, "mmp2-gpio", NULL), INIT_CLKREG(&clk_sdh0, "sdhci-pxav3.0", "PXA-SDHCLK"), INIT_CLKREG(&clk_sdh1, "sdhci-pxav3.1", "PXA-SDHCLK"), INIT_CLKREG(&clk_sdh2, "sdhci-pxav3.2", "PXA-SDHCLK"), INIT_CLKREG(&clk_sdh3, "sdhci-pxav3.3", "PXA-SDHCLK"), }; void __init mmp2_clk_init(void) { clkdev_add_table(ARRAY_AND_SIZE(mmp2_clkregs)); }
gpl-2.0
friedrich420/Note-3-Kernel
arch/arm/mach-sa1100/pm.c
4282
2648
/* * SA1100 Power Management Routines * * Copyright (c) 2001 Cliff Brake <cbrake@accelent.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License. * * History: * * 2001-02-06: Cliff Brake Initial code * * 2001-02-25: Sukjae Cho <sjcho@east.isi.edu> & * Chester Kuo <chester@linux.org.tw> * Save more value for the resume function! Support * Bitsy/Assabet/Freebird board * * 2001-08-29: Nicolas Pitre <nico@fluxnic.net> * Cleaned up, pushed platform dependent stuff * in the platform specific files. * * 2002-05-27: Nicolas Pitre Killed sleep.h and the kmalloced save array. * Storage is local on the stack now. */ #include <linux/init.h> #include <linux/suspend.h> #include <linux/errno.h> #include <linux/time.h> #include <mach/hardware.h> #include <asm/memory.h> #include <asm/suspend.h> #include <asm/mach/time.h> extern int sa1100_finish_suspend(unsigned long); #define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x #define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x] /* * List of global SA11x0 peripheral registers to preserve. * More ones like CP and general purpose register values are preserved * on the stack and then the stack pointer is stored last in sleep.S. */ enum { SLEEP_SAVE_GPDR, SLEEP_SAVE_GAFR, SLEEP_SAVE_PPDR, SLEEP_SAVE_PPSR, SLEEP_SAVE_PPAR, SLEEP_SAVE_PSDR, SLEEP_SAVE_Ser1SDCR0, SLEEP_SAVE_COUNT }; static int sa11x0_pm_enter(suspend_state_t state) { unsigned long gpio, sleep_save[SLEEP_SAVE_COUNT]; gpio = GPLR; /* save vital registers */ SAVE(GPDR); SAVE(GAFR); SAVE(PPDR); SAVE(PPSR); SAVE(PPAR); SAVE(PSDR); SAVE(Ser1SDCR0); /* Clear previous reset status */ RCSR = RCSR_HWR | RCSR_SWR | RCSR_WDR | RCSR_SMR; /* set resume return address */ PSPR = virt_to_phys(cpu_resume); /* go zzz */ cpu_suspend(0, sa1100_finish_suspend); /* * Ensure not to come back here if it wasn't intended */ PSPR = 0; /* * Ensure interrupt sources are disabled; we will re-init * the interrupt subsystem via the device manager. */ ICLR = 0; ICCR = 1; ICMR = 0; /* restore registers */ RESTORE(GPDR); RESTORE(GAFR); RESTORE(PPDR); RESTORE(PPSR); RESTORE(PPAR); RESTORE(PSDR); RESTORE(Ser1SDCR0); GPSR = gpio; GPCR = ~gpio; /* * Clear the peripheral sleep-hold bit. */ PSSR = PSSR_PH; return 0; } static const struct platform_suspend_ops sa11x0_pm_ops = { .enter = sa11x0_pm_enter, .valid = suspend_valid_only_mem, }; static int __init sa11x0_pm_init(void) { suspend_set_ops(&sa11x0_pm_ops); return 0; } late_initcall(sa11x0_pm_init);
gpl-2.0
STR4NG3R/android_kernel_motorola_msm8226
arch/arm/mach-pxa/spitz.c
4282
25709
/* * Support for Sharp SL-Cxx00 Series of PDAs * Models: SL-C3000 (Spitz), SL-C1000 (Akita) and SL-C3100 (Borzoi) * * Copyright (c) 2005 Richard Purdie * * Based on Sharp's 2.4 kernel patches/lubbock.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/gpio_keys.h> #include <linux/gpio.h> #include <linux/leds.h> #include <linux/i2c.h> #include <linux/i2c/pxa-i2c.h> #include <linux/i2c/pca953x.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #include <linux/spi/corgi_lcd.h> #include <linux/spi/pxa2xx_spi.h> #include <linux/mtd/sharpsl.h> #include <linux/mtd/physmap.h> #include <linux/input/matrix_keypad.h> #include <linux/regulator/machine.h> #include <linux/io.h> #include <linux/module.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/sharpsl_param.h> #include <asm/hardware/scoop.h> #include <mach/pxa27x.h> #include <mach/pxa27x-udc.h> #include <mach/reset.h> #include <mach/irda.h> #include <mach/mmc.h> #include <mach/ohci.h> #include <mach/pxafb.h> #include <mach/spitz.h> #include <mach/sharpsl_pm.h> #include <mach/smemc.h> #include "generic.h" #include "devices.h" /****************************************************************************** * Pin configuration ******************************************************************************/ static unsigned long spitz_pin_config[] __initdata = { /* Chip Selects */ GPIO78_nCS_2, /* SCOOP #2 */ GPIO79_nCS_3, /* NAND */ GPIO80_nCS_4, /* SCOOP #1 */ /* LCD - 16bpp Active TFT */ GPIOxx_LCD_TFT_16BPP, /* PC Card */ GPIO48_nPOE, GPIO49_nPWE, GPIO50_nPIOR, GPIO51_nPIOW, GPIO85_nPCE_1, GPIO54_nPCE_2, GPIO55_nPREG, GPIO56_nPWAIT, GPIO57_nIOIS16, GPIO104_PSKTSEL, /* I2S */ GPIO28_I2S_BITCLK_OUT, GPIO29_I2S_SDATA_IN, GPIO30_I2S_SDATA_OUT, GPIO31_I2S_SYNC, /* MMC */ GPIO32_MMC_CLK, GPIO112_MMC_CMD, GPIO92_MMC_DAT_0, GPIO109_MMC_DAT_1, GPIO110_MMC_DAT_2, GPIO111_MMC_DAT_3, /* GPIOs */ GPIO9_GPIO, /* SPITZ_GPIO_nSD_DETECT */ GPIO16_GPIO, /* SPITZ_GPIO_SYNC */ GPIO81_GPIO, /* SPITZ_GPIO_nSD_WP */ GPIO41_GPIO, /* SPITZ_GPIO_USB_CONNECT */ GPIO37_GPIO, /* SPITZ_GPIO_USB_HOST */ GPIO35_GPIO, /* SPITZ_GPIO_USB_DEVICE */ GPIO22_GPIO, /* SPITZ_GPIO_HSYNC */ GPIO94_GPIO, /* SPITZ_GPIO_CF_CD */ GPIO105_GPIO, /* SPITZ_GPIO_CF_IRQ */ GPIO106_GPIO, /* SPITZ_GPIO_CF2_IRQ */ /* GPIO matrix keypad */ GPIO88_GPIO, /* column 0 */ GPIO23_GPIO, /* column 1 */ GPIO24_GPIO, /* column 2 */ GPIO25_GPIO, /* column 3 */ GPIO26_GPIO, /* column 4 */ GPIO27_GPIO, /* column 5 */ GPIO52_GPIO, /* column 6 */ GPIO103_GPIO, /* column 7 */ GPIO107_GPIO, /* column 8 */ GPIO108_GPIO, /* column 9 */ GPIO114_GPIO, /* column 10 */ GPIO12_GPIO, /* row 0 */ GPIO17_GPIO, /* row 1 */ GPIO91_GPIO, /* row 2 */ GPIO34_GPIO, /* row 3 */ GPIO36_GPIO, /* row 4 */ GPIO38_GPIO, /* row 5 */ GPIO39_GPIO, /* row 6 */ /* I2C */ GPIO117_I2C_SCL, GPIO118_I2C_SDA, GPIO0_GPIO | WAKEUP_ON_EDGE_RISE, /* SPITZ_GPIO_KEY_INT */ GPIO1_GPIO | WAKEUP_ON_EDGE_FALL, /* SPITZ_GPIO_RESET */ }; /****************************************************************************** * Scoop GPIO expander ******************************************************************************/ #if defined(CONFIG_SHARP_SCOOP) || defined(CONFIG_SHARP_SCOOP_MODULE) /* SCOOP Device #1 */ static struct resource spitz_scoop_1_resources[] = { [0] = { .start = 0x10800000, .end = 0x10800fff, .flags = IORESOURCE_MEM, }, }; static struct scoop_config spitz_scoop_1_setup = { .io_dir = SPITZ_SCP_IO_DIR, .io_out = SPITZ_SCP_IO_OUT, .suspend_clr = SPITZ_SCP_SUS_CLR, .suspend_set = SPITZ_SCP_SUS_SET, .gpio_base = SPITZ_SCP_GPIO_BASE, }; struct platform_device spitz_scoop_1_device = { .name = "sharp-scoop", .id = 0, .dev = { .platform_data = &spitz_scoop_1_setup, }, .num_resources = ARRAY_SIZE(spitz_scoop_1_resources), .resource = spitz_scoop_1_resources, }; /* SCOOP Device #2 */ static struct resource spitz_scoop_2_resources[] = { [0] = { .start = 0x08800040, .end = 0x08800fff, .flags = IORESOURCE_MEM, }, }; static struct scoop_config spitz_scoop_2_setup = { .io_dir = SPITZ_SCP2_IO_DIR, .io_out = SPITZ_SCP2_IO_OUT, .suspend_clr = SPITZ_SCP2_SUS_CLR, .suspend_set = SPITZ_SCP2_SUS_SET, .gpio_base = SPITZ_SCP2_GPIO_BASE, }; struct platform_device spitz_scoop_2_device = { .name = "sharp-scoop", .id = 1, .dev = { .platform_data = &spitz_scoop_2_setup, }, .num_resources = ARRAY_SIZE(spitz_scoop_2_resources), .resource = spitz_scoop_2_resources, }; static void __init spitz_scoop_init(void) { platform_device_register(&spitz_scoop_1_device); /* Akita doesn't have the second SCOOP chip */ if (!machine_is_akita()) platform_device_register(&spitz_scoop_2_device); } /* Power control is shared with between one of the CF slots and SD */ static void spitz_card_pwr_ctrl(uint8_t enable, uint8_t new_cpr) { unsigned short cpr; unsigned long flags; if (new_cpr & 0x7) { gpio_set_value(SPITZ_GPIO_CF_POWER, 1); mdelay(5); } local_irq_save(flags); cpr = read_scoop_reg(&spitz_scoop_1_device.dev, SCOOP_CPR); if (enable & new_cpr) cpr |= new_cpr; else cpr &= ~enable; write_scoop_reg(&spitz_scoop_1_device.dev, SCOOP_CPR, cpr); local_irq_restore(flags); if (!(cpr & 0x7)) { mdelay(1); gpio_set_value(SPITZ_GPIO_CF_POWER, 0); } } #else static inline void spitz_scoop_init(void) {} static inline void spitz_card_pwr_ctrl(uint8_t enable, uint8_t new_cpr) {} #endif /****************************************************************************** * PCMCIA ******************************************************************************/ #if defined(CONFIG_PCMCIA_PXA2XX) || defined(CONFIG_PCMCIA_PXA2XX_MODULE) static void spitz_pcmcia_pwr(struct device *scoop, uint16_t cpr, int nr) { /* Only need to override behaviour for slot 0 */ if (nr == 0) spitz_card_pwr_ctrl( cpr & (SCOOP_CPR_CF_3V | SCOOP_CPR_CF_XV), cpr); else write_scoop_reg(scoop, SCOOP_CPR, cpr); } static struct scoop_pcmcia_dev spitz_pcmcia_scoop[] = { { .dev = &spitz_scoop_1_device.dev, .irq = SPITZ_IRQ_GPIO_CF_IRQ, .cd_irq = SPITZ_IRQ_GPIO_CF_CD, .cd_irq_str = "PCMCIA0 CD", }, { .dev = &spitz_scoop_2_device.dev, .irq = SPITZ_IRQ_GPIO_CF2_IRQ, .cd_irq = -1, }, }; static struct scoop_pcmcia_config spitz_pcmcia_config = { .devs = &spitz_pcmcia_scoop[0], .num_devs = 2, .power_ctrl = spitz_pcmcia_pwr, }; static void __init spitz_pcmcia_init(void) { /* Akita has only one PCMCIA slot used */ if (machine_is_akita()) spitz_pcmcia_config.num_devs = 1; platform_scoop_config = &spitz_pcmcia_config; } #else static inline void spitz_pcmcia_init(void) {} #endif /****************************************************************************** * GPIO keyboard ******************************************************************************/ #if defined(CONFIG_KEYBOARD_MATRIX) || defined(CONFIG_KEYBOARD_MATRIX_MODULE) #define SPITZ_KEY_CALENDAR KEY_F1 #define SPITZ_KEY_ADDRESS KEY_F2 #define SPITZ_KEY_FN KEY_F3 #define SPITZ_KEY_CANCEL KEY_F4 #define SPITZ_KEY_EXOK KEY_F5 #define SPITZ_KEY_EXCANCEL KEY_F6 #define SPITZ_KEY_EXJOGDOWN KEY_F7 #define SPITZ_KEY_EXJOGUP KEY_F8 #define SPITZ_KEY_JAP1 KEY_LEFTALT #define SPITZ_KEY_JAP2 KEY_RIGHTCTRL #define SPITZ_KEY_SYNC KEY_F9 #define SPITZ_KEY_MAIL KEY_F10 #define SPITZ_KEY_OK KEY_F11 #define SPITZ_KEY_MENU KEY_F12 static const uint32_t spitz_keymap[] = { KEY(0, 0, KEY_LEFTCTRL), KEY(0, 1, KEY_1), KEY(0, 2, KEY_3), KEY(0, 3, KEY_5), KEY(0, 4, KEY_6), KEY(0, 5, KEY_7), KEY(0, 6, KEY_9), KEY(0, 7, KEY_0), KEY(0, 8, KEY_BACKSPACE), KEY(0, 9, SPITZ_KEY_EXOK), /* EXOK */ KEY(0, 10, SPITZ_KEY_EXCANCEL), /* EXCANCEL */ KEY(1, 1, KEY_2), KEY(1, 2, KEY_4), KEY(1, 3, KEY_R), KEY(1, 4, KEY_Y), KEY(1, 5, KEY_8), KEY(1, 6, KEY_I), KEY(1, 7, KEY_O), KEY(1, 8, KEY_P), KEY(1, 9, SPITZ_KEY_EXJOGDOWN), /* EXJOGDOWN */ KEY(1, 10, SPITZ_KEY_EXJOGUP), /* EXJOGUP */ KEY(2, 0, KEY_TAB), KEY(2, 1, KEY_Q), KEY(2, 2, KEY_E), KEY(2, 3, KEY_T), KEY(2, 4, KEY_G), KEY(2, 5, KEY_U), KEY(2, 6, KEY_J), KEY(2, 7, KEY_K), KEY(3, 0, SPITZ_KEY_ADDRESS), /* ADDRESS */ KEY(3, 1, KEY_W), KEY(3, 2, KEY_S), KEY(3, 3, KEY_F), KEY(3, 4, KEY_V), KEY(3, 5, KEY_H), KEY(3, 6, KEY_M), KEY(3, 7, KEY_L), KEY(3, 9, KEY_RIGHTSHIFT), KEY(4, 0, SPITZ_KEY_CALENDAR), /* CALENDAR */ KEY(4, 1, KEY_A), KEY(4, 2, KEY_D), KEY(4, 3, KEY_C), KEY(4, 4, KEY_B), KEY(4, 5, KEY_N), KEY(4, 6, KEY_DOT), KEY(4, 8, KEY_ENTER), KEY(4, 9, KEY_LEFTSHIFT), KEY(5, 0, SPITZ_KEY_MAIL), /* MAIL */ KEY(5, 1, KEY_Z), KEY(5, 2, KEY_X), KEY(5, 3, KEY_MINUS), KEY(5, 4, KEY_SPACE), KEY(5, 5, KEY_COMMA), KEY(5, 7, KEY_UP), KEY(5, 10, SPITZ_KEY_FN), /* FN */ KEY(6, 0, KEY_SYSRQ), KEY(6, 1, SPITZ_KEY_JAP1), /* JAP1 */ KEY(6, 2, SPITZ_KEY_JAP2), /* JAP2 */ KEY(6, 3, SPITZ_KEY_CANCEL), /* CANCEL */ KEY(6, 4, SPITZ_KEY_OK), /* OK */ KEY(6, 5, SPITZ_KEY_MENU), /* MENU */ KEY(6, 6, KEY_LEFT), KEY(6, 7, KEY_DOWN), KEY(6, 8, KEY_RIGHT), }; static const struct matrix_keymap_data spitz_keymap_data = { .keymap = spitz_keymap, .keymap_size = ARRAY_SIZE(spitz_keymap), }; static const uint32_t spitz_row_gpios[] = { 12, 17, 91, 34, 36, 38, 39 }; static const uint32_t spitz_col_gpios[] = { 88, 23, 24, 25, 26, 27, 52, 103, 107, 108, 114 }; static struct matrix_keypad_platform_data spitz_mkp_pdata = { .keymap_data = &spitz_keymap_data, .row_gpios = spitz_row_gpios, .col_gpios = spitz_col_gpios, .num_row_gpios = ARRAY_SIZE(spitz_row_gpios), .num_col_gpios = ARRAY_SIZE(spitz_col_gpios), .col_scan_delay_us = 10, .debounce_ms = 10, .wakeup = 1, }; static struct platform_device spitz_mkp_device = { .name = "matrix-keypad", .id = -1, .dev = { .platform_data = &spitz_mkp_pdata, }, }; static void __init spitz_mkp_init(void) { platform_device_register(&spitz_mkp_device); } #else static inline void spitz_mkp_init(void) {} #endif /****************************************************************************** * GPIO keys ******************************************************************************/ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) static struct gpio_keys_button spitz_gpio_keys[] = { { .type = EV_PWR, .code = KEY_SUSPEND, .gpio = SPITZ_GPIO_ON_KEY, .desc = "On Off", .wakeup = 1, }, /* Two buttons detecting the lid state */ { .type = EV_SW, .code = 0, .gpio = SPITZ_GPIO_SWA, .desc = "Display Down", }, { .type = EV_SW, .code = 1, .gpio = SPITZ_GPIO_SWB, .desc = "Lid Closed", }, }; static struct gpio_keys_platform_data spitz_gpio_keys_platform_data = { .buttons = spitz_gpio_keys, .nbuttons = ARRAY_SIZE(spitz_gpio_keys), }; static struct platform_device spitz_gpio_keys_device = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &spitz_gpio_keys_platform_data, }, }; static void __init spitz_keys_init(void) { platform_device_register(&spitz_gpio_keys_device); } #else static inline void spitz_keys_init(void) {} #endif /****************************************************************************** * LEDs ******************************************************************************/ #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) static struct gpio_led spitz_gpio_leds[] = { { .name = "spitz:amber:charge", .default_trigger = "sharpsl-charge", .gpio = SPITZ_GPIO_LED_ORANGE, }, { .name = "spitz:green:hddactivity", .default_trigger = "ide-disk", .gpio = SPITZ_GPIO_LED_GREEN, }, }; static struct gpio_led_platform_data spitz_gpio_leds_info = { .leds = spitz_gpio_leds, .num_leds = ARRAY_SIZE(spitz_gpio_leds), }; static struct platform_device spitz_led_device = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &spitz_gpio_leds_info, }, }; static void __init spitz_leds_init(void) { platform_device_register(&spitz_led_device); } #else static inline void spitz_leds_init(void) {} #endif /****************************************************************************** * SSP Devices ******************************************************************************/ #if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE) static void spitz_ads7846_wait_for_hsync(void) { while (gpio_get_value(SPITZ_GPIO_HSYNC)) cpu_relax(); while (!gpio_get_value(SPITZ_GPIO_HSYNC)) cpu_relax(); } static struct ads7846_platform_data spitz_ads7846_info = { .model = 7846, .vref_delay_usecs = 100, .x_plate_ohms = 419, .y_plate_ohms = 486, .pressure_max = 1024, .gpio_pendown = SPITZ_GPIO_TP_INT, .wait_for_sync = spitz_ads7846_wait_for_hsync, }; static struct pxa2xx_spi_chip spitz_ads7846_chip = { .gpio_cs = SPITZ_GPIO_ADS7846_CS, }; static void spitz_bl_kick_battery(void) { void (*kick_batt)(void); kick_batt = symbol_get(sharpsl_battery_kick); if (kick_batt) { kick_batt(); symbol_put(sharpsl_battery_kick); } } static struct corgi_lcd_platform_data spitz_lcdcon_info = { .init_mode = CORGI_LCD_MODE_VGA, .max_intensity = 0x2f, .default_intensity = 0x1f, .limit_mask = 0x0b, .gpio_backlight_cont = SPITZ_GPIO_BACKLIGHT_CONT, .gpio_backlight_on = SPITZ_GPIO_BACKLIGHT_ON, .kick_battery = spitz_bl_kick_battery, }; static struct pxa2xx_spi_chip spitz_lcdcon_chip = { .gpio_cs = SPITZ_GPIO_LCDCON_CS, }; static struct pxa2xx_spi_chip spitz_max1111_chip = { .gpio_cs = SPITZ_GPIO_MAX1111_CS, }; static struct spi_board_info spitz_spi_devices[] = { { .modalias = "ads7846", .max_speed_hz = 1200000, .bus_num = 2, .chip_select = 0, .platform_data = &spitz_ads7846_info, .controller_data = &spitz_ads7846_chip, .irq = PXA_GPIO_TO_IRQ(SPITZ_GPIO_TP_INT), }, { .modalias = "corgi-lcd", .max_speed_hz = 50000, .bus_num = 2, .chip_select = 1, .platform_data = &spitz_lcdcon_info, .controller_data = &spitz_lcdcon_chip, }, { .modalias = "max1111", .max_speed_hz = 450000, .bus_num = 2, .chip_select = 2, .controller_data = &spitz_max1111_chip, }, }; static struct pxa2xx_spi_master spitz_spi_info = { .num_chipselect = 3, }; static void __init spitz_spi_init(void) { struct corgi_lcd_platform_data *lcd_data = &spitz_lcdcon_info; if (machine_is_akita()) { lcd_data->gpio_backlight_cont = AKITA_GPIO_BACKLIGHT_CONT; lcd_data->gpio_backlight_on = AKITA_GPIO_BACKLIGHT_ON; } pxa2xx_set_spi_info(2, &spitz_spi_info); spi_register_board_info(ARRAY_AND_SIZE(spitz_spi_devices)); } #else static inline void spitz_spi_init(void) {} #endif /****************************************************************************** * SD/MMC card controller ******************************************************************************/ #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) /* * NOTE: The card detect interrupt isn't debounced so we delay it by 250ms to * give the card a chance to fully insert/eject. */ static void spitz_mci_setpower(struct device *dev, unsigned int vdd) { struct pxamci_platform_data* p_d = dev->platform_data; if ((1 << vdd) & p_d->ocr_mask) spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, SCOOP_CPR_SD_3V); else spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, 0x0); } static struct pxamci_platform_data spitz_mci_platform_data = { .detect_delay_ms = 250, .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .setpower = spitz_mci_setpower, .gpio_card_detect = SPITZ_GPIO_nSD_DETECT, .gpio_card_ro = SPITZ_GPIO_nSD_WP, .gpio_power = -1, }; static void __init spitz_mmc_init(void) { pxa_set_mci_info(&spitz_mci_platform_data); } #else static inline void spitz_mmc_init(void) {} #endif /****************************************************************************** * USB Host ******************************************************************************/ #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) static int spitz_ohci_init(struct device *dev) { int err; err = gpio_request(SPITZ_GPIO_USB_HOST, "USB_HOST"); if (err) return err; /* Only Port 2 is connected, setup USB Port 2 Output Control Register */ UP2OCR = UP2OCR_HXS | UP2OCR_HXOE | UP2OCR_DPPDE | UP2OCR_DMPDE; return gpio_direction_output(SPITZ_GPIO_USB_HOST, 1); } static void spitz_ohci_exit(struct device *dev) { gpio_free(SPITZ_GPIO_USB_HOST); } static struct pxaohci_platform_data spitz_ohci_platform_data = { .port_mode = PMM_NPS_MODE, .init = spitz_ohci_init, .exit = spitz_ohci_exit, .flags = ENABLE_PORT_ALL | NO_OC_PROTECTION, .power_budget = 150, }; static void __init spitz_uhc_init(void) { pxa_set_ohci_info(&spitz_ohci_platform_data); } #else static inline void spitz_uhc_init(void) {} #endif /****************************************************************************** * IrDA ******************************************************************************/ #if defined(CONFIG_PXA_FICP) || defined(CONFIG_PXA_FICP_MODULE) static struct pxaficp_platform_data spitz_ficp_platform_data = { .transceiver_cap = IR_SIRMODE | IR_OFF, }; static void __init spitz_irda_init(void) { if (machine_is_akita()) spitz_ficp_platform_data.gpio_pwdown = AKITA_GPIO_IR_ON; else spitz_ficp_platform_data.gpio_pwdown = SPITZ_GPIO_IR_ON; pxa_set_ficp_info(&spitz_ficp_platform_data); } #else static inline void spitz_irda_init(void) {} #endif /****************************************************************************** * Framebuffer ******************************************************************************/ #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) static struct pxafb_mode_info spitz_pxafb_modes[] = { { .pixclock = 19231, .xres = 480, .yres = 640, .bpp = 16, .hsync_len = 40, .left_margin = 46, .right_margin = 125, .vsync_len = 3, .upper_margin = 1, .lower_margin = 0, .sync = 0, }, { .pixclock = 134617, .xres = 240, .yres = 320, .bpp = 16, .hsync_len = 20, .left_margin = 20, .right_margin = 46, .vsync_len = 2, .upper_margin = 1, .lower_margin = 0, .sync = 0, }, }; static struct pxafb_mach_info spitz_pxafb_info = { .modes = spitz_pxafb_modes, .num_modes = ARRAY_SIZE(spitz_pxafb_modes), .fixed_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_ALTERNATE_MAPPING, }; static void __init spitz_lcd_init(void) { pxa_set_fb_info(NULL, &spitz_pxafb_info); } #else static inline void spitz_lcd_init(void) {} #endif /****************************************************************************** * Framebuffer ******************************************************************************/ #if defined(CONFIG_MTD_NAND_SHARPSL) || defined(CONFIG_MTD_NAND_SHARPSL_MODULE) static struct mtd_partition spitz_nand_partitions[] = { { .name = "System Area", .offset = 0, .size = 7 * 1024 * 1024, }, { .name = "Root Filesystem", .offset = 7 * 1024 * 1024, }, { .name = "Home Filesystem", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; static struct nand_bbt_descr spitz_nand_bbt = { .options = 0, .offs = 4, .len = 2, .pattern = scan_ff_pattern }; static struct nand_ecclayout akita_oobinfo = { .oobfree = { {0x08, 0x09} }, .eccbytes = 24, .eccpos = { 0x05, 0x01, 0x02, 0x03, 0x06, 0x07, 0x15, 0x11, 0x12, 0x13, 0x16, 0x17, 0x25, 0x21, 0x22, 0x23, 0x26, 0x27, 0x35, 0x31, 0x32, 0x33, 0x36, 0x37, }, }; static struct sharpsl_nand_platform_data spitz_nand_pdata = { .badblock_pattern = &spitz_nand_bbt, .partitions = spitz_nand_partitions, .nr_partitions = ARRAY_SIZE(spitz_nand_partitions), }; static struct resource spitz_nand_resources[] = { { .start = PXA_CS3_PHYS, .end = PXA_CS3_PHYS + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device spitz_nand_device = { .name = "sharpsl-nand", .id = -1, .resource = spitz_nand_resources, .num_resources = ARRAY_SIZE(spitz_nand_resources), .dev = { .platform_data = &spitz_nand_pdata, } }; static void __init spitz_nand_init(void) { if (machine_is_spitz()) { spitz_nand_partitions[1].size = 5 * 1024 * 1024; } else if (machine_is_akita()) { spitz_nand_partitions[1].size = 58 * 1024 * 1024; spitz_nand_bbt.len = 1; spitz_nand_pdata.ecc_layout = &akita_oobinfo; } else if (machine_is_borzoi()) { spitz_nand_partitions[1].size = 32 * 1024 * 1024; spitz_nand_bbt.len = 1; spitz_nand_pdata.ecc_layout = &akita_oobinfo; } platform_device_register(&spitz_nand_device); } #else static inline void spitz_nand_init(void) {} #endif /****************************************************************************** * NOR Flash ******************************************************************************/ #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct mtd_partition spitz_rom_parts[] = { { .name ="Boot PROM Filesystem", .offset = 0x00140000, .size = MTDPART_SIZ_FULL, }, }; static struct physmap_flash_data spitz_rom_data = { .width = 2, .nr_parts = ARRAY_SIZE(spitz_rom_parts), .parts = spitz_rom_parts, }; static struct resource spitz_rom_resources[] = { { .start = PXA_CS0_PHYS, .end = PXA_CS0_PHYS + SZ_8M - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device spitz_rom_device = { .name = "physmap-flash", .id = -1, .resource = spitz_rom_resources, .num_resources = ARRAY_SIZE(spitz_rom_resources), .dev = { .platform_data = &spitz_rom_data, }, }; static void __init spitz_nor_init(void) { platform_device_register(&spitz_rom_device); } #else static inline void spitz_nor_init(void) {} #endif /****************************************************************************** * GPIO expander ******************************************************************************/ #if defined(CONFIG_I2C_PXA) || defined(CONFIG_I2C_PXA_MODULE) static struct pca953x_platform_data akita_pca953x_pdata = { .gpio_base = AKITA_IOEXP_GPIO_BASE, }; static struct i2c_board_info spitz_i2c_devs[] = { { .type = "wm8750", .addr = 0x1b, }, { .type = "max7310", .addr = 0x18, .platform_data = &akita_pca953x_pdata, }, }; static struct regulator_consumer_supply isl6271a_consumers[] = { { .supply = "vcc_core", } }; static struct regulator_init_data isl6271a_info[] = { { .constraints = { .name = "vcc_core range", .min_uV = 850000, .max_uV = 1600000, .always_on = 1, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, }, .consumer_supplies = isl6271a_consumers, .num_consumer_supplies = ARRAY_SIZE(isl6271a_consumers), } }; static struct i2c_board_info spitz_pi2c_devs[] = { { .type = "isl6271a", .addr = 0x0c, .platform_data = &isl6271a_info, }, }; static void __init spitz_i2c_init(void) { int size = ARRAY_SIZE(spitz_i2c_devs); /* Only Akita has the max7310 chip */ if (!machine_is_akita()) size--; pxa_set_i2c_info(NULL); pxa27x_set_i2c_power_info(NULL); i2c_register_board_info(0, spitz_i2c_devs, size); i2c_register_board_info(1, ARRAY_AND_SIZE(spitz_pi2c_devs)); } #else static inline void spitz_i2c_init(void) {} #endif /****************************************************************************** * Machine init ******************************************************************************/ static void spitz_poweroff(void) { pxa_restart('g', NULL); } static void spitz_restart(char mode, const char *cmd) { uint32_t msc0 = __raw_readl(MSC0); /* Bootloader magic for a reboot */ if ((msc0 & 0xffff0000) == 0x7ff00000) __raw_writel((msc0 & 0xffff) | 0x7ee00000, MSC0); spitz_poweroff(); } static void __init spitz_init(void) { init_gpio_reset(SPITZ_GPIO_ON_RESET, 1, 0); pm_power_off = spitz_poweroff; PMCR = 0x00; /* Stop 3.6MHz and drive HIGH to PCMCIA and CS */ PCFR |= PCFR_OPDE; pxa2xx_mfp_config(ARRAY_AND_SIZE(spitz_pin_config)); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); spitz_spi_init(); spitz_scoop_init(); spitz_mkp_init(); spitz_keys_init(); spitz_leds_init(); spitz_mmc_init(); spitz_pcmcia_init(); spitz_irda_init(); spitz_uhc_init(); spitz_lcd_init(); spitz_nor_init(); spitz_nand_init(); spitz_i2c_init(); } static void __init spitz_fixup(struct tag *tags, char **cmdline, struct meminfo *mi) { sharpsl_save_param(); mi->nr_banks = 1; mi->bank[0].start = 0xa0000000; mi->bank[0].size = (64*1024*1024); } #ifdef CONFIG_MACH_SPITZ MACHINE_START(SPITZ, "SHARP Spitz") .restart_mode = 'g', .fixup = spitz_fixup, .map_io = pxa27x_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = pxa27x_init_irq, .handle_irq = pxa27x_handle_irq, .init_machine = spitz_init, .timer = &pxa_timer, .restart = spitz_restart, MACHINE_END #endif #ifdef CONFIG_MACH_BORZOI MACHINE_START(BORZOI, "SHARP Borzoi") .restart_mode = 'g', .fixup = spitz_fixup, .map_io = pxa27x_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = pxa27x_init_irq, .handle_irq = pxa27x_handle_irq, .init_machine = spitz_init, .timer = &pxa_timer, .restart = spitz_restart, MACHINE_END #endif #ifdef CONFIG_MACH_AKITA MACHINE_START(AKITA, "SHARP Akita") .restart_mode = 'g', .fixup = spitz_fixup, .map_io = pxa27x_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = pxa27x_init_irq, .handle_irq = pxa27x_handle_irq, .init_machine = spitz_init, .timer = &pxa_timer, .restart = spitz_restart, MACHINE_END #endif
gpl-2.0
Tkkg1994/Hulk-Kernel
net/ax25/ax25_ip.c
4794
5405
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/netfilter.h> #include <linux/sysctl.h> #include <net/ip.h> #include <net/arp.h> /* * IP over AX.25 encapsulation. */ /* * Shove an AX.25 UI header on an IP packet and handle ARP */ #ifdef CONFIG_INET int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { unsigned char *buff; /* they sometimes come back to us... */ if (type == ETH_P_AX25) return 0; /* header is an AX.25 UI frame from us to them */ buff = skb_push(skb, AX25_HEADER_LEN); *buff++ = 0x00; /* KISS DATA */ if (daddr != NULL) memcpy(buff, daddr, dev->addr_len); /* Address specified */ buff[6] &= ~AX25_CBIT; buff[6] &= ~AX25_EBIT; buff[6] |= AX25_SSSID_SPARE; buff += AX25_ADDR_LEN; if (saddr != NULL) memcpy(buff, saddr, dev->addr_len); else memcpy(buff, dev->dev_addr, dev->addr_len); buff[6] &= ~AX25_CBIT; buff[6] |= AX25_EBIT; buff[6] |= AX25_SSSID_SPARE; buff += AX25_ADDR_LEN; *buff++ = AX25_UI; /* UI */ /* Append a suitable AX.25 PID */ switch (type) { case ETH_P_IP: *buff++ = AX25_P_IP; break; case ETH_P_ARP: *buff++ = AX25_P_ARP; break; default: printk(KERN_ERR "AX.25: ax25_hard_header - wrong protocol type 0x%2.2x\n", type); *buff++ = 0; break; } if (daddr != NULL) return AX25_HEADER_LEN; return -AX25_HEADER_LEN; /* Unfinished header */ } int ax25_rebuild_header(struct sk_buff *skb) { struct sk_buff *ourskb; unsigned char *bp = skb->data; ax25_route *route; struct net_device *dev = NULL; ax25_address *src, *dst; ax25_digi *digipeat = NULL; ax25_dev *ax25_dev; ax25_cb *ax25; char ip_mode = ' '; dst = (ax25_address *)(bp + 1); src = (ax25_address *)(bp + 8); if (arp_find(bp + 1, skb)) return 1; route = ax25_get_route(dst, NULL); if (route) { digipeat = route->digipeat; dev = route->dev; ip_mode = route->ip_mode; } if (dev == NULL) dev = skb->dev; if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) { goto put; } if (bp[16] == AX25_P_IP) { if (ip_mode == 'V' || (ip_mode == ' ' && ax25_dev->values[AX25_VALUES_IPDEFMODE])) { /* * We copy the buffer and release the original thereby * keeping it straight * * Note: we report 1 back so the caller will * not feed the frame direct to the physical device * We don't want that to happen. (It won't be upset * as we have pulled the frame from the queue by * freeing it). * * NB: TCP modifies buffers that are still * on a device queue, thus we use skb_copy() * instead of using skb_clone() unless this * gets fixed. */ ax25_address src_c; ax25_address dst_c; if ((ourskb = skb_copy(skb, GFP_ATOMIC)) == NULL) { kfree_skb(skb); goto put; } if (skb->sk != NULL) skb_set_owner_w(ourskb, skb->sk); kfree_skb(skb); /* dl9sau: bugfix * after kfree_skb(), dst and src which were pointer * to bp which is part of skb->data would not be valid * anymore hope that after skb_pull(ourskb, ..) our * dsc_c and src_c will not become invalid */ bp = ourskb->data; dst_c = *(ax25_address *)(bp + 1); src_c = *(ax25_address *)(bp + 8); skb_pull(ourskb, AX25_HEADER_LEN - 1); /* Keep PID */ skb_reset_network_header(ourskb); ax25=ax25_send_frame( ourskb, ax25_dev->values[AX25_VALUES_PACLEN], &src_c, &dst_c, digipeat, dev); if (ax25) { ax25_cb_put(ax25); } goto put; } } bp[7] &= ~AX25_CBIT; bp[7] &= ~AX25_EBIT; bp[7] |= AX25_SSSID_SPARE; bp[14] &= ~AX25_CBIT; bp[14] |= AX25_EBIT; bp[14] |= AX25_SSSID_SPARE; skb_pull(skb, AX25_KISS_HEADER_LEN); if (digipeat != NULL) { if ((ourskb = ax25_rt_build_path(skb, src, dst, route->digipeat)) == NULL) { kfree_skb(skb); goto put; } skb = ourskb; } ax25_queue_xmit(skb, dev); put: if (route) ax25_put_route(route); return 1; } #else /* INET */ int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { return -AX25_HEADER_LEN; } int ax25_rebuild_header(struct sk_buff *skb) { return 1; } #endif const struct header_ops ax25_header_ops = { .create = ax25_hard_header, .rebuild = ax25_rebuild_header, }; EXPORT_SYMBOL(ax25_hard_header); EXPORT_SYMBOL(ax25_rebuild_header); EXPORT_SYMBOL(ax25_header_ops);
gpl-2.0
sbreen94/Zeus_d2tmo
drivers/isdn/hardware/eicon/idifunc.c
5050
6425
/* $Id: idifunc.c,v 1.14.4.4 2004/08/28 20:03:53 armin Exp $ * * Driver for Eicon DIVA Server ISDN cards. * User Mode IDI Interface * * Copyright 2000-2003 by Armin Schindler (mac@melware.de) * Copyright 2000-2003 Cytronics & Melware (info@melware.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. */ #include "platform.h" #include "di_defs.h" #include "divasync.h" #include "um_xdi.h" #include "um_idi.h" #define DBG_MINIMUM (DL_LOG + DL_FTL + DL_ERR) #define DBG_DEFAULT (DBG_MINIMUM + DL_XLOG + DL_REG) extern char *DRIVERRELEASE_IDI; extern void DIVA_DIDD_Read(void *, int); extern int diva_user_mode_idi_create_adapter(const DESCRIPTOR *, int); extern void diva_user_mode_idi_remove_adapter(int); static dword notify_handle; static DESCRIPTOR DAdapter; static DESCRIPTOR MAdapter; static void no_printf(unsigned char *x, ...) { /* dummy debug function */ } #include "debuglib.c" /* * stop debug */ static void stop_dbg(void) { DbgDeregister(); memset(&MAdapter, 0, sizeof(MAdapter)); dprintf = no_printf; } typedef struct _udiva_card { struct list_head list; int Id; DESCRIPTOR d; } udiva_card; static LIST_HEAD(cards); static diva_os_spin_lock_t ll_lock; /* * find card in list */ static udiva_card *find_card_in_list(DESCRIPTOR * d) { udiva_card *card; struct list_head *tmp; diva_os_spin_lock_magic_t old_irql; diva_os_enter_spin_lock(&ll_lock, &old_irql, "find card"); list_for_each(tmp, &cards) { card = list_entry(tmp, udiva_card, list); if (card->d.request == d->request) { diva_os_leave_spin_lock(&ll_lock, &old_irql, "find card"); return (card); } } diva_os_leave_spin_lock(&ll_lock, &old_irql, "find card"); return ((udiva_card *) NULL); } /* * new card */ static void um_new_card(DESCRIPTOR * d) { int adapter_nr = 0; udiva_card *card = NULL; IDI_SYNC_REQ sync_req; diva_os_spin_lock_magic_t old_irql; if (!(card = diva_os_malloc(0, sizeof(udiva_card)))) { DBG_ERR(("cannot get buffer for card")); return; } memcpy(&card->d, d, sizeof(DESCRIPTOR)); sync_req.xdi_logical_adapter_number.Req = 0; sync_req.xdi_logical_adapter_number.Rc = IDI_SYNC_REQ_XDI_GET_LOGICAL_ADAPTER_NUMBER; card->d.request((ENTITY *) & sync_req); adapter_nr = sync_req.xdi_logical_adapter_number.info.logical_adapter_number; card->Id = adapter_nr; if (!(diva_user_mode_idi_create_adapter(d, adapter_nr))) { diva_os_enter_spin_lock(&ll_lock, &old_irql, "add card"); list_add_tail(&card->list, &cards); diva_os_leave_spin_lock(&ll_lock, &old_irql, "add card"); } else { DBG_ERR(("could not create user mode idi card %d", adapter_nr)); diva_os_free(0, card); } } /* * remove card */ static void um_remove_card(DESCRIPTOR * d) { diva_os_spin_lock_magic_t old_irql; udiva_card *card = NULL; if (!(card = find_card_in_list(d))) { DBG_ERR(("cannot find card to remove")); return; } diva_user_mode_idi_remove_adapter(card->Id); diva_os_enter_spin_lock(&ll_lock, &old_irql, "remove card"); list_del(&card->list); diva_os_leave_spin_lock(&ll_lock, &old_irql, "remove card"); DBG_LOG(("idi proc entry removed for card %d", card->Id)); diva_os_free(0, card); } /* * remove all adapter */ static void DIVA_EXIT_FUNCTION remove_all_idi_proc(void) { udiva_card *card; diva_os_spin_lock_magic_t old_irql; rescan: diva_os_enter_spin_lock(&ll_lock, &old_irql, "remove all"); if (!list_empty(&cards)) { card = list_entry(cards.next, udiva_card, list); list_del(&card->list); diva_os_leave_spin_lock(&ll_lock, &old_irql, "remove all"); diva_user_mode_idi_remove_adapter(card->Id); diva_os_free(0, card); goto rescan; } diva_os_leave_spin_lock(&ll_lock, &old_irql, "remove all"); } /* * DIDD notify callback */ static void *didd_callback(void *context, DESCRIPTOR * adapter, int removal) { if (adapter->type == IDI_DADAPTER) { DBG_ERR(("Notification about IDI_DADAPTER change ! Oops.")); return (NULL); } else if (adapter->type == IDI_DIMAINT) { if (removal) { stop_dbg(); } else { memcpy(&MAdapter, adapter, sizeof(MAdapter)); dprintf = (DIVA_DI_PRINTF) MAdapter.request; DbgRegister("User IDI", DRIVERRELEASE_IDI, DBG_DEFAULT); } } else if ((adapter->type > 0) && (adapter->type < 16)) { /* IDI Adapter */ if (removal) { um_remove_card(adapter); } else { um_new_card(adapter); } } return (NULL); } /* * connect DIDD */ static int DIVA_INIT_FUNCTION connect_didd(void) { int x = 0; int dadapter = 0; IDI_SYNC_REQ req; DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS]; DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table)); for (x = 0; x < MAX_DESCRIPTORS; x++) { if (DIDD_Table[x].type == IDI_DADAPTER) { /* DADAPTER found */ dadapter = 1; memcpy(&DAdapter, &DIDD_Table[x], sizeof(DAdapter)); req.didd_notify.e.Req = 0; req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY; req.didd_notify.info.callback = (void *)didd_callback; req.didd_notify.info.context = NULL; DAdapter.request((ENTITY *) & req); if (req.didd_notify.e.Rc != 0xff) { stop_dbg(); return (0); } notify_handle = req.didd_notify.info.handle; } else if (DIDD_Table[x].type == IDI_DIMAINT) { /* MAINT found */ memcpy(&MAdapter, &DIDD_Table[x], sizeof(DAdapter)); dprintf = (DIVA_DI_PRINTF) MAdapter.request; DbgRegister("User IDI", DRIVERRELEASE_IDI, DBG_DEFAULT); } else if ((DIDD_Table[x].type > 0) && (DIDD_Table[x].type < 16)) { /* IDI Adapter found */ um_new_card(&DIDD_Table[x]); } } if (!dadapter) { stop_dbg(); } return (dadapter); } /* * Disconnect from DIDD */ static void DIVA_EXIT_FUNCTION disconnect_didd(void) { IDI_SYNC_REQ req; stop_dbg(); req.didd_notify.e.Req = 0; req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY; req.didd_notify.info.handle = notify_handle; DAdapter.request((ENTITY *) & req); } /* * init */ int DIVA_INIT_FUNCTION idifunc_init(void) { diva_os_initialize_spin_lock(&ll_lock, "idifunc"); if (diva_user_mode_idi_init()) { DBG_ERR(("init: init failed.")); return (0); } if (!connect_didd()) { diva_user_mode_idi_finit(); DBG_ERR(("init: failed to connect to DIDD.")); return (0); } return (1); } /* * finit */ void DIVA_EXIT_FUNCTION idifunc_finit(void) { diva_user_mode_idi_finit(); disconnect_didd(); remove_all_idi_proc(); }
gpl-2.0
l0rdg3x/AK-OnePlusOne-CAF
arch/arm/plat-mxc/iomux-v1.c
5562
4884
/* * arch/arm/plat-mxc/iomux-v1.c * * Copyright (C) 2004 Sascha Hauer, Synertronixx GmbH * Copyright (C) 2009 Uwe Kleine-Koenig, Pengutronix * * Common code for i.MX1, i.MX21 and i.MX27 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/gpio.h> #include <mach/hardware.h> #include <asm/mach/map.h> #include <mach/iomux-v1.h> static void __iomem *imx_iomuxv1_baseaddr; static unsigned imx_iomuxv1_numports; static inline unsigned long imx_iomuxv1_readl(unsigned offset) { return __raw_readl(imx_iomuxv1_baseaddr + offset); } static inline void imx_iomuxv1_writel(unsigned long val, unsigned offset) { __raw_writel(val, imx_iomuxv1_baseaddr + offset); } static inline void imx_iomuxv1_rmwl(unsigned offset, unsigned long mask, unsigned long value) { unsigned long reg = imx_iomuxv1_readl(offset); reg &= ~mask; reg |= value; imx_iomuxv1_writel(reg, offset); } static inline void imx_iomuxv1_set_puen( unsigned int port, unsigned int pin, int on) { unsigned long mask = 1 << pin; imx_iomuxv1_rmwl(MXC_PUEN(port), mask, on ? mask : 0); } static inline void imx_iomuxv1_set_ddir( unsigned int port, unsigned int pin, int out) { unsigned long mask = 1 << pin; imx_iomuxv1_rmwl(MXC_DDIR(port), mask, out ? mask : 0); } static inline void imx_iomuxv1_set_gpr( unsigned int port, unsigned int pin, int af) { unsigned long mask = 1 << pin; imx_iomuxv1_rmwl(MXC_GPR(port), mask, af ? mask : 0); } static inline void imx_iomuxv1_set_gius( unsigned int port, unsigned int pin, int inuse) { unsigned long mask = 1 << pin; imx_iomuxv1_rmwl(MXC_GIUS(port), mask, inuse ? mask : 0); } static inline void imx_iomuxv1_set_ocr( unsigned int port, unsigned int pin, unsigned int ocr) { unsigned long shift = (pin & 0xf) << 1; unsigned long mask = 3 << shift; unsigned long value = ocr << shift; unsigned long offset = pin < 16 ? MXC_OCR1(port) : MXC_OCR2(port); imx_iomuxv1_rmwl(offset, mask, value); } static inline void imx_iomuxv1_set_iconfa( unsigned int port, unsigned int pin, unsigned int aout) { unsigned long shift = (pin & 0xf) << 1; unsigned long mask = 3 << shift; unsigned long value = aout << shift; unsigned long offset = pin < 16 ? MXC_ICONFA1(port) : MXC_ICONFA2(port); imx_iomuxv1_rmwl(offset, mask, value); } static inline void imx_iomuxv1_set_iconfb( unsigned int port, unsigned int pin, unsigned int bout) { unsigned long shift = (pin & 0xf) << 1; unsigned long mask = 3 << shift; unsigned long value = bout << shift; unsigned long offset = pin < 16 ? MXC_ICONFB1(port) : MXC_ICONFB2(port); imx_iomuxv1_rmwl(offset, mask, value); } int mxc_gpio_mode(int gpio_mode) { unsigned int pin = gpio_mode & GPIO_PIN_MASK; unsigned int port = (gpio_mode & GPIO_PORT_MASK) >> GPIO_PORT_SHIFT; unsigned int ocr = (gpio_mode & GPIO_OCR_MASK) >> GPIO_OCR_SHIFT; unsigned int aout = (gpio_mode >> GPIO_AOUT_SHIFT) & 3; unsigned int bout = (gpio_mode >> GPIO_BOUT_SHIFT) & 3; if (port >= imx_iomuxv1_numports) return -EINVAL; /* Pullup enable */ imx_iomuxv1_set_puen(port, pin, gpio_mode & GPIO_PUEN); /* Data direction */ imx_iomuxv1_set_ddir(port, pin, gpio_mode & GPIO_OUT); /* Primary / alternate function */ imx_iomuxv1_set_gpr(port, pin, gpio_mode & GPIO_AF); /* use as gpio? */ imx_iomuxv1_set_gius(port, pin, !(gpio_mode & (GPIO_PF | GPIO_AF))); imx_iomuxv1_set_ocr(port, pin, ocr); imx_iomuxv1_set_iconfa(port, pin, aout); imx_iomuxv1_set_iconfb(port, pin, bout); return 0; } EXPORT_SYMBOL(mxc_gpio_mode); static int imx_iomuxv1_setup_multiple(const int *list, unsigned count) { size_t i; int ret = 0; for (i = 0; i < count; ++i) { ret = mxc_gpio_mode(list[i]); if (ret) return ret; } return ret; } int mxc_gpio_setup_multiple_pins(const int *pin_list, unsigned count, const char *label) { int ret; ret = imx_iomuxv1_setup_multiple(pin_list, count); return ret; } EXPORT_SYMBOL(mxc_gpio_setup_multiple_pins); int __init imx_iomuxv1_init(void __iomem *base, int numports) { imx_iomuxv1_baseaddr = base; imx_iomuxv1_numports = numports; return 0; }
gpl-2.0
omerjerk/CodyKernel-hammerhead
drivers/scsi/scsi_ioctl.c
11962
9361
/* * Changes: * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 08/23/2000 * - get rid of some verify_areas and use __copy*user and __get/put_user * for the ones that remain */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/string.h> #include <asm/uaccess.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_ioctl.h> #include <scsi/sg.h> #include <scsi/scsi_dbg.h> #include "scsi_logging.h" #define NORMAL_RETRIES 5 #define IOCTL_NORMAL_TIMEOUT (10 * HZ) #define MAX_BUF PAGE_SIZE /** * ioctl_probe -- return host identification * @host: host to identify * @buffer: userspace buffer for identification * * Return an identifying string at @buffer, if @buffer is non-NULL, filling * to the length stored at * (int *) @buffer. */ static int ioctl_probe(struct Scsi_Host *host, void __user *buffer) { unsigned int len, slen; const char *string; if (buffer) { if (get_user(len, (unsigned int __user *) buffer)) return -EFAULT; if (host->hostt->info) string = host->hostt->info(host); else string = host->hostt->name; if (string) { slen = strlen(string); if (len > slen) len = slen + 1; if (copy_to_user(buffer, string, len)) return -EFAULT; } } return 1; } /* * The SCSI_IOCTL_SEND_COMMAND ioctl sends a command out to the SCSI host. * The IOCTL_NORMAL_TIMEOUT and NORMAL_RETRIES variables are used. * * dev is the SCSI device struct ptr, *(int *) arg is the length of the * input data, if any, not including the command string & counts, * *((int *)arg + 1) is the output buffer size in bytes. * * *(char *) ((int *) arg)[2] the actual command byte. * * Note that if more than MAX_BUF bytes are requested to be transferred, * the ioctl will fail with error EINVAL. * * This size *does not* include the initial lengths that were passed. * * The SCSI command is read from the memory location immediately after the * length words, and the input data is right after the command. The SCSI * routines know the command size based on the opcode decode. * * The output area is then filled in starting from the command byte. */ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, int timeout, int retries) { int result; struct scsi_sense_hdr sshdr; SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", *cmd)); result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr, timeout, retries, NULL); SCSI_LOG_IOCTL(2, printk("Ioctl returned 0x%x\n", result)); if ((driver_byte(result) & DRIVER_SENSE) && (scsi_sense_valid(&sshdr))) { switch (sshdr.sense_key) { case ILLEGAL_REQUEST: if (cmd[0] == ALLOW_MEDIUM_REMOVAL) sdev->lockable = 0; else printk(KERN_INFO "ioctl_internal_command: " "ILLEGAL REQUEST asc=0x%x ascq=0x%x\n", sshdr.asc, sshdr.ascq); break; case NOT_READY: /* This happens if there is no disc in drive */ if (sdev->removable) break; case UNIT_ATTENTION: if (sdev->removable) { sdev->changed = 1; result = 0; /* This is no longer considered an error */ break; } default: /* Fall through for non-removable media */ sdev_printk(KERN_INFO, sdev, "ioctl_internal_command return code = %x\n", result); scsi_print_sense_hdr(" ", &sshdr); break; } } SCSI_LOG_IOCTL(2, printk("IOCTL Releasing command\n")); return result; } int scsi_set_medium_removal(struct scsi_device *sdev, char state) { char scsi_cmd[MAX_COMMAND_SIZE]; int ret; if (!sdev->removable || !sdev->lockable) return 0; scsi_cmd[0] = ALLOW_MEDIUM_REMOVAL; scsi_cmd[1] = 0; scsi_cmd[2] = 0; scsi_cmd[3] = 0; scsi_cmd[4] = state; scsi_cmd[5] = 0; ret = ioctl_internal_command(sdev, scsi_cmd, IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES); if (ret == 0) sdev->locked = (state == SCSI_REMOVAL_PREVENT); return ret; } EXPORT_SYMBOL(scsi_set_medium_removal); /* * The scsi_ioctl_get_pci() function places into arg the value * pci_dev::slot_name (8 characters) for the PCI device (if any). * Returns: 0 on success * -ENXIO if there isn't a PCI device pointer * (could be because the SCSI driver hasn't been * updated yet, or because it isn't a SCSI * device) * any copy_to_user() error on failure there */ static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg) { struct device *dev = scsi_get_device(sdev->host); const char *name; if (!dev) return -ENXIO; name = dev_name(dev); /* compatibility with old ioctl which only returned * 20 characters */ return copy_to_user(arg, name, min(strlen(name), (size_t)20)) ? -EFAULT: 0; } /** * scsi_ioctl - Dispatch ioctl to scsi device * @sdev: scsi device receiving ioctl * @cmd: which ioctl is it * @arg: data associated with ioctl * * Description: The scsi_ioctl() function differs from most ioctls in that it * does not take a major/minor number as the dev field. Rather, it takes * a pointer to a &struct scsi_device. */ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) { char scsi_cmd[MAX_COMMAND_SIZE]; /* No idea how this happens.... */ if (!sdev) return -ENXIO; /* * If we are in the middle of error recovery, don't let anyone * else try and use this device. Also, if error recovery fails, it * may try and take the device offline, in which case all further * access to the device is prohibited. */ if (!scsi_block_when_processing_errors(sdev)) return -ENODEV; /* Check for deprecated ioctls ... all the ioctls which don't * follow the new unique numbering scheme are deprecated */ switch (cmd) { case SCSI_IOCTL_SEND_COMMAND: case SCSI_IOCTL_TEST_UNIT_READY: case SCSI_IOCTL_BENCHMARK_COMMAND: case SCSI_IOCTL_SYNC: case SCSI_IOCTL_START_UNIT: case SCSI_IOCTL_STOP_UNIT: printk(KERN_WARNING "program %s is using a deprecated SCSI " "ioctl, please convert it to SG_IO\n", current->comm); break; default: break; } switch (cmd) { case SCSI_IOCTL_GET_IDLUN: if (!access_ok(VERIFY_WRITE, arg, sizeof(struct scsi_idlun))) return -EFAULT; __put_user((sdev->id & 0xff) + ((sdev->lun & 0xff) << 8) + ((sdev->channel & 0xff) << 16) + ((sdev->host->host_no & 0xff) << 24), &((struct scsi_idlun __user *)arg)->dev_id); __put_user(sdev->host->unique_id, &((struct scsi_idlun __user *)arg)->host_unique_id); return 0; case SCSI_IOCTL_GET_BUS_NUMBER: return put_user(sdev->host->host_no, (int __user *)arg); case SCSI_IOCTL_PROBE_HOST: return ioctl_probe(sdev->host, arg); case SCSI_IOCTL_SEND_COMMAND: if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; return sg_scsi_ioctl(sdev->request_queue, NULL, 0, arg); case SCSI_IOCTL_DOORLOCK: return scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT); case SCSI_IOCTL_DOORUNLOCK: return scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); case SCSI_IOCTL_TEST_UNIT_READY: return scsi_test_unit_ready(sdev, IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES, NULL); case SCSI_IOCTL_START_UNIT: scsi_cmd[0] = START_STOP; scsi_cmd[1] = 0; scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0; scsi_cmd[4] = 1; return ioctl_internal_command(sdev, scsi_cmd, START_STOP_TIMEOUT, NORMAL_RETRIES); case SCSI_IOCTL_STOP_UNIT: scsi_cmd[0] = START_STOP; scsi_cmd[1] = 0; scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0; scsi_cmd[4] = 0; return ioctl_internal_command(sdev, scsi_cmd, START_STOP_TIMEOUT, NORMAL_RETRIES); case SCSI_IOCTL_GET_PCI: return scsi_ioctl_get_pci(sdev, arg); default: if (sdev->host->hostt->ioctl) return sdev->host->hostt->ioctl(sdev, cmd, arg); } return -EINVAL; } EXPORT_SYMBOL(scsi_ioctl); /** * scsi_nonblockable_ioctl() - Handle SG_SCSI_RESET * @sdev: scsi device receiving ioctl * @cmd: Must be SC_SCSI_RESET * @arg: pointer to int containing SG_SCSI_RESET_{DEVICE,BUS,HOST} * @ndelay: file mode O_NDELAY flag */ int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd, void __user *arg, int ndelay) { int val, result; /* The first set of iocts may be executed even if we're doing * error processing, as long as the device was opened * non-blocking */ if (ndelay) { if (scsi_host_in_recovery(sdev->host)) return -ENODEV; } else if (!scsi_block_when_processing_errors(sdev)) return -ENODEV; switch (cmd) { case SG_SCSI_RESET: result = get_user(val, (int __user *)arg); if (result) return result; if (val == SG_SCSI_RESET_NOTHING) return 0; switch (val) { case SG_SCSI_RESET_DEVICE: val = SCSI_TRY_RESET_DEVICE; break; case SG_SCSI_RESET_TARGET: val = SCSI_TRY_RESET_TARGET; break; case SG_SCSI_RESET_BUS: val = SCSI_TRY_RESET_BUS; break; case SG_SCSI_RESET_HOST: val = SCSI_TRY_RESET_HOST; break; default: return -EINVAL; } if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; return (scsi_reset_provider(sdev, val) == SUCCESS) ? 0 : -EIO; } return -ENODEV; } EXPORT_SYMBOL(scsi_nonblockable_ioctl);
gpl-2.0
Envious-Data/shinano-sirius_msm8974abpro
arch/sh/kernel/cpu/sh4a/ubc.c
13242
2998
/* * arch/sh/kernel/cpu/sh4a/ubc.c * * On-chip UBC support for SH-4A CPUs. * * Copyright (C) 2009 - 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <asm/hw_breakpoint.h> #define UBC_CBR(idx) (0xff200000 + (0x20 * idx)) #define UBC_CRR(idx) (0xff200004 + (0x20 * idx)) #define UBC_CAR(idx) (0xff200008 + (0x20 * idx)) #define UBC_CAMR(idx) (0xff20000c + (0x20 * idx)) #define UBC_CCMFR 0xff200600 #define UBC_CBCR 0xff200620 /* CRR */ #define UBC_CRR_PCB (1 << 1) #define UBC_CRR_BIE (1 << 0) /* CBR */ #define UBC_CBR_CE (1 << 0) static struct sh_ubc sh4a_ubc; static void sh4a_ubc_enable(struct arch_hw_breakpoint *info, int idx) { __raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR(idx)); __raw_writel(info->address, UBC_CAR(idx)); } static void sh4a_ubc_disable(struct arch_hw_breakpoint *info, int idx) { __raw_writel(0, UBC_CBR(idx)); __raw_writel(0, UBC_CAR(idx)); } static void sh4a_ubc_enable_all(unsigned long mask) { int i; for (i = 0; i < sh4a_ubc.num_events; i++) if (mask & (1 << i)) __raw_writel(__raw_readl(UBC_CBR(i)) | UBC_CBR_CE, UBC_CBR(i)); } static void sh4a_ubc_disable_all(void) { int i; for (i = 0; i < sh4a_ubc.num_events; i++) __raw_writel(__raw_readl(UBC_CBR(i)) & ~UBC_CBR_CE, UBC_CBR(i)); } static unsigned long sh4a_ubc_active_mask(void) { unsigned long active = 0; int i; for (i = 0; i < sh4a_ubc.num_events; i++) if (__raw_readl(UBC_CBR(i)) & UBC_CBR_CE) active |= (1 << i); return active; } static unsigned long sh4a_ubc_triggered_mask(void) { return __raw_readl(UBC_CCMFR); } static void sh4a_ubc_clear_triggered_mask(unsigned long mask) { __raw_writel(__raw_readl(UBC_CCMFR) & ~mask, UBC_CCMFR); } static struct sh_ubc sh4a_ubc = { .name = "SH-4A", .num_events = 2, .trap_nr = 0x1e0, .enable = sh4a_ubc_enable, .disable = sh4a_ubc_disable, .enable_all = sh4a_ubc_enable_all, .disable_all = sh4a_ubc_disable_all, .active_mask = sh4a_ubc_active_mask, .triggered_mask = sh4a_ubc_triggered_mask, .clear_triggered_mask = sh4a_ubc_clear_triggered_mask, }; static int __init sh4a_ubc_init(void) { struct clk *ubc_iclk = clk_get(NULL, "ubc0"); int i; /* * The UBC MSTP bit is optional, as not all platforms will have * it. Just ignore it if we can't find it. */ if (IS_ERR(ubc_iclk)) ubc_iclk = NULL; clk_enable(ubc_iclk); __raw_writel(0, UBC_CBCR); for (i = 0; i < sh4a_ubc.num_events; i++) { __raw_writel(0, UBC_CAMR(i)); __raw_writel(0, UBC_CBR(i)); __raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR(i)); /* dummy read for write posting */ (void)__raw_readl(UBC_CRR(i)); } clk_disable(ubc_iclk); sh4a_ubc.clk = ubc_iclk; return register_sh_ubc(&sh4a_ubc); } arch_initcall(sh4a_ubc_init);
gpl-2.0
namagi/android_kernel_motorola_msm8960-common
arch/sh/kernel/cpu/sh4a/ubc.c
13242
2998
/* * arch/sh/kernel/cpu/sh4a/ubc.c * * On-chip UBC support for SH-4A CPUs. * * Copyright (C) 2009 - 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <asm/hw_breakpoint.h> #define UBC_CBR(idx) (0xff200000 + (0x20 * idx)) #define UBC_CRR(idx) (0xff200004 + (0x20 * idx)) #define UBC_CAR(idx) (0xff200008 + (0x20 * idx)) #define UBC_CAMR(idx) (0xff20000c + (0x20 * idx)) #define UBC_CCMFR 0xff200600 #define UBC_CBCR 0xff200620 /* CRR */ #define UBC_CRR_PCB (1 << 1) #define UBC_CRR_BIE (1 << 0) /* CBR */ #define UBC_CBR_CE (1 << 0) static struct sh_ubc sh4a_ubc; static void sh4a_ubc_enable(struct arch_hw_breakpoint *info, int idx) { __raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR(idx)); __raw_writel(info->address, UBC_CAR(idx)); } static void sh4a_ubc_disable(struct arch_hw_breakpoint *info, int idx) { __raw_writel(0, UBC_CBR(idx)); __raw_writel(0, UBC_CAR(idx)); } static void sh4a_ubc_enable_all(unsigned long mask) { int i; for (i = 0; i < sh4a_ubc.num_events; i++) if (mask & (1 << i)) __raw_writel(__raw_readl(UBC_CBR(i)) | UBC_CBR_CE, UBC_CBR(i)); } static void sh4a_ubc_disable_all(void) { int i; for (i = 0; i < sh4a_ubc.num_events; i++) __raw_writel(__raw_readl(UBC_CBR(i)) & ~UBC_CBR_CE, UBC_CBR(i)); } static unsigned long sh4a_ubc_active_mask(void) { unsigned long active = 0; int i; for (i = 0; i < sh4a_ubc.num_events; i++) if (__raw_readl(UBC_CBR(i)) & UBC_CBR_CE) active |= (1 << i); return active; } static unsigned long sh4a_ubc_triggered_mask(void) { return __raw_readl(UBC_CCMFR); } static void sh4a_ubc_clear_triggered_mask(unsigned long mask) { __raw_writel(__raw_readl(UBC_CCMFR) & ~mask, UBC_CCMFR); } static struct sh_ubc sh4a_ubc = { .name = "SH-4A", .num_events = 2, .trap_nr = 0x1e0, .enable = sh4a_ubc_enable, .disable = sh4a_ubc_disable, .enable_all = sh4a_ubc_enable_all, .disable_all = sh4a_ubc_disable_all, .active_mask = sh4a_ubc_active_mask, .triggered_mask = sh4a_ubc_triggered_mask, .clear_triggered_mask = sh4a_ubc_clear_triggered_mask, }; static int __init sh4a_ubc_init(void) { struct clk *ubc_iclk = clk_get(NULL, "ubc0"); int i; /* * The UBC MSTP bit is optional, as not all platforms will have * it. Just ignore it if we can't find it. */ if (IS_ERR(ubc_iclk)) ubc_iclk = NULL; clk_enable(ubc_iclk); __raw_writel(0, UBC_CBCR); for (i = 0; i < sh4a_ubc.num_events; i++) { __raw_writel(0, UBC_CAMR(i)); __raw_writel(0, UBC_CBR(i)); __raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR(i)); /* dummy read for write posting */ (void)__raw_readl(UBC_CRR(i)); } clk_disable(ubc_iclk); sh4a_ubc.clk = ubc_iclk; return register_sh_ubc(&sh4a_ubc); } arch_initcall(sh4a_ubc_init);
gpl-2.0
FireLord1/android_kernel_motorola_msm8916
drivers/usb/serial/ark3116.c
1979
21804
/* * Copyright (C) 2009 by Bart Hartgers (bart.hartgers+ark3116@gmail.com) * Original version: * Copyright (C) 2006 * Simon Schulz (ark3116_driver <at> auctionant.de) * * ark3116 * - implements a driver for the arkmicro ark3116 chipset (vendor=0x6547, * productid=0x0232) (used in a datacable called KQ-U8A) * * Supports full modem status lines, break, hardware flow control. Does not * support software flow control, since I do not know how to enable it in hw. * * This driver is a essentially new implementation. I initially dug * into the old ark3116.c driver and suddenly realized the ark3116 is * a 16450 with a USB interface glued to it. See comments at the * bottom of this file. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/ioctl.h> #include <linux/tty.h> #include <linux/slab.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/serial.h> #include <linux/serial_reg.h> #include <linux/uaccess.h> #include <linux/mutex.h> #include <linux/spinlock.h> #define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>" #define DRIVER_DESC "USB ARK3116 serial/IrDA driver" #define DRIVER_DEV_DESC "ARK3116 RS232/IrDA" #define DRIVER_NAME "ark3116" /* usb timeout of 1 second */ #define ARK_TIMEOUT 1000 static const struct usb_device_id id_table[] = { { USB_DEVICE(0x6547, 0x0232) }, { USB_DEVICE(0x18ec, 0x3118) }, /* USB to IrDA adapter */ { }, }; MODULE_DEVICE_TABLE(usb, id_table); static int is_irda(struct usb_serial *serial) { struct usb_device *dev = serial->dev; if (le16_to_cpu(dev->descriptor.idVendor) == 0x18ec && le16_to_cpu(dev->descriptor.idProduct) == 0x3118) return 1; return 0; } struct ark3116_private { int irda; /* 1 for irda device */ /* protects hw register updates */ struct mutex hw_lock; int quot; /* baudrate divisor */ __u32 lcr; /* line control register value */ __u32 hcr; /* handshake control register (0x8) * value */ __u32 mcr; /* modem contol register value */ /* protects the status values below */ spinlock_t status_lock; __u32 msr; /* modem status register value */ __u32 lsr; /* line status register value */ }; static int ark3116_write_reg(struct usb_serial *serial, unsigned reg, __u8 val) { int result; /* 0xfe 0x40 are magic values taken from original driver */ result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 0xfe, 0x40, val, reg, NULL, 0, ARK_TIMEOUT); return result; } static int ark3116_read_reg(struct usb_serial *serial, unsigned reg, unsigned char *buf) { int result; /* 0xfe 0xc0 are magic values taken from original driver */ result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 0xfe, 0xc0, 0, reg, buf, 1, ARK_TIMEOUT); if (result < 0) return result; else return buf[0]; } static inline int calc_divisor(int bps) { /* Original ark3116 made some exceptions in rounding here * because windows did the same. Assume that is not really * necessary. * Crystal is 12MHz, probably because of USB, but we divide by 4? */ return (12000000 + 2*bps) / (4*bps); } static int ark3116_attach(struct usb_serial *serial) { /* make sure we have our end-points */ if ((serial->num_bulk_in == 0) || (serial->num_bulk_out == 0) || (serial->num_interrupt_in == 0)) { dev_err(&serial->dev->dev, "%s - missing endpoint - " "bulk in: %d, bulk out: %d, int in %d\n", KBUILD_MODNAME, serial->num_bulk_in, serial->num_bulk_out, serial->num_interrupt_in); return -EINVAL; } return 0; } static int ark3116_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct ark3116_private *priv; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; mutex_init(&priv->hw_lock); spin_lock_init(&priv->status_lock); priv->irda = is_irda(serial); usb_set_serial_port_data(port, priv); /* setup the hardware */ ark3116_write_reg(serial, UART_IER, 0); /* disable DMA */ ark3116_write_reg(serial, UART_FCR, 0); /* handshake control */ priv->hcr = 0; ark3116_write_reg(serial, 0x8 , 0); /* modem control */ priv->mcr = 0; ark3116_write_reg(serial, UART_MCR, 0); if (!(priv->irda)) { ark3116_write_reg(serial, 0xb , 0); } else { ark3116_write_reg(serial, 0xb , 1); ark3116_write_reg(serial, 0xc , 0); ark3116_write_reg(serial, 0xd , 0x41); ark3116_write_reg(serial, 0xa , 1); } /* setup baudrate */ ark3116_write_reg(serial, UART_LCR, UART_LCR_DLAB); /* setup for 9600 8N1 */ priv->quot = calc_divisor(9600); ark3116_write_reg(serial, UART_DLL, priv->quot & 0xff); ark3116_write_reg(serial, UART_DLM, (priv->quot>>8) & 0xff); priv->lcr = UART_LCR_WLEN8; ark3116_write_reg(serial, UART_LCR, UART_LCR_WLEN8); ark3116_write_reg(serial, 0xe, 0); if (priv->irda) ark3116_write_reg(serial, 0x9, 0); dev_info(&serial->dev->dev, "%s using %s mode\n", KBUILD_MODNAME, priv->irda ? "IrDA" : "RS232"); return 0; } static int ark3116_port_remove(struct usb_serial_port *port) { struct ark3116_private *priv = usb_get_serial_port_data(port); /* device is closed, so URBs and DMA should be down */ mutex_destroy(&priv->hw_lock); kfree(priv); return 0; } static void ark3116_init_termios(struct tty_struct *tty) { struct ktermios *termios = &tty->termios; *termios = tty_std_termios; termios->c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; termios->c_ispeed = 9600; termios->c_ospeed = 9600; } static void ark3116_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { struct usb_serial *serial = port->serial; struct ark3116_private *priv = usb_get_serial_port_data(port); struct ktermios *termios = &tty->termios; unsigned int cflag = termios->c_cflag; int bps = tty_get_baud_rate(tty); int quot; __u8 lcr, hcr, eval; /* set data bit count */ switch (cflag & CSIZE) { case CS5: lcr = UART_LCR_WLEN5; break; case CS6: lcr = UART_LCR_WLEN6; break; case CS7: lcr = UART_LCR_WLEN7; break; default: case CS8: lcr = UART_LCR_WLEN8; break; } if (cflag & CSTOPB) lcr |= UART_LCR_STOP; if (cflag & PARENB) lcr |= UART_LCR_PARITY; if (!(cflag & PARODD)) lcr |= UART_LCR_EPAR; #ifdef CMSPAR if (cflag & CMSPAR) lcr |= UART_LCR_SPAR; #endif /* handshake control */ hcr = (cflag & CRTSCTS) ? 0x03 : 0x00; /* calc baudrate */ dev_dbg(&port->dev, "%s - setting bps to %d\n", __func__, bps); eval = 0; switch (bps) { case 0: quot = calc_divisor(9600); break; default: if ((bps < 75) || (bps > 3000000)) bps = 9600; quot = calc_divisor(bps); break; case 460800: eval = 1; quot = calc_divisor(bps); break; case 921600: eval = 2; quot = calc_divisor(bps); break; } /* Update state: synchronize */ mutex_lock(&priv->hw_lock); /* keep old LCR_SBC bit */ lcr |= (priv->lcr & UART_LCR_SBC); dev_dbg(&port->dev, "%s - setting hcr:0x%02x,lcr:0x%02x,quot:%d\n", __func__, hcr, lcr, quot); /* handshake control */ if (priv->hcr != hcr) { priv->hcr = hcr; ark3116_write_reg(serial, 0x8, hcr); } /* baudrate */ if (priv->quot != quot) { priv->quot = quot; priv->lcr = lcr; /* need to write lcr anyway */ /* disable DMA since transmit/receive is * shadowed by UART_DLL */ ark3116_write_reg(serial, UART_FCR, 0); ark3116_write_reg(serial, UART_LCR, lcr|UART_LCR_DLAB); ark3116_write_reg(serial, UART_DLL, quot & 0xff); ark3116_write_reg(serial, UART_DLM, (quot>>8) & 0xff); /* restore lcr */ ark3116_write_reg(serial, UART_LCR, lcr); /* magic baudrate thingy: not sure what it does, * but windows does this as well. */ ark3116_write_reg(serial, 0xe, eval); /* enable DMA */ ark3116_write_reg(serial, UART_FCR, UART_FCR_DMA_SELECT); } else if (priv->lcr != lcr) { priv->lcr = lcr; ark3116_write_reg(serial, UART_LCR, lcr); } mutex_unlock(&priv->hw_lock); /* check for software flow control */ if (I_IXOFF(tty) || I_IXON(tty)) { dev_warn(&serial->dev->dev, "%s: don't know how to do software flow control\n", KBUILD_MODNAME); } /* Don't rewrite B0 */ if (tty_termios_baud_rate(termios)) tty_termios_encode_baud_rate(termios, bps, bps); } static void ark3116_close(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; /* disable DMA */ ark3116_write_reg(serial, UART_FCR, 0); /* deactivate interrupts */ ark3116_write_reg(serial, UART_IER, 0); usb_serial_generic_close(port); if (serial->num_interrupt_in) usb_kill_urb(port->interrupt_in_urb); } static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port) { struct ark3116_private *priv = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; unsigned char *buf; int result; buf = kmalloc(1, GFP_KERNEL); if (buf == NULL) return -ENOMEM; result = usb_serial_generic_open(tty, port); if (result) { dev_dbg(&port->dev, "%s - usb_serial_generic_open failed: %d\n", __func__, result); goto err_out; } /* remove any data still left: also clears error state */ ark3116_read_reg(serial, UART_RX, buf); /* read modem status */ priv->msr = ark3116_read_reg(serial, UART_MSR, buf); /* read line status */ priv->lsr = ark3116_read_reg(serial, UART_LSR, buf); result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (result) { dev_err(&port->dev, "submit irq_in urb failed %d\n", result); ark3116_close(port); goto err_out; } /* activate interrupts */ ark3116_write_reg(port->serial, UART_IER, UART_IER_MSI|UART_IER_RLSI); /* enable DMA */ ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT); /* setup termios */ if (tty) ark3116_set_termios(tty, port, NULL); err_out: kfree(buf); return result; } static int ark3116_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; struct serial_struct serstruct; void __user *user_arg = (void __user *)arg; switch (cmd) { case TIOCGSERIAL: /* XXX: Some of these values are probably wrong. */ memset(&serstruct, 0, sizeof(serstruct)); serstruct.type = PORT_16654; serstruct.line = port->serial->minor; serstruct.port = port->number; serstruct.custom_divisor = 0; serstruct.baud_base = 460800; if (copy_to_user(user_arg, &serstruct, sizeof(serstruct))) return -EFAULT; return 0; case TIOCSSERIAL: if (copy_from_user(&serstruct, user_arg, sizeof(serstruct))) return -EFAULT; return 0; } return -ENOIOCTLCMD; } static int ark3116_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct ark3116_private *priv = usb_get_serial_port_data(port); __u32 status; __u32 ctrl; unsigned long flags; mutex_lock(&priv->hw_lock); ctrl = priv->mcr; mutex_unlock(&priv->hw_lock); spin_lock_irqsave(&priv->status_lock, flags); status = priv->msr; spin_unlock_irqrestore(&priv->status_lock, flags); return (status & UART_MSR_DSR ? TIOCM_DSR : 0) | (status & UART_MSR_CTS ? TIOCM_CTS : 0) | (status & UART_MSR_RI ? TIOCM_RI : 0) | (status & UART_MSR_DCD ? TIOCM_CD : 0) | (ctrl & UART_MCR_DTR ? TIOCM_DTR : 0) | (ctrl & UART_MCR_RTS ? TIOCM_RTS : 0) | (ctrl & UART_MCR_OUT1 ? TIOCM_OUT1 : 0) | (ctrl & UART_MCR_OUT2 ? TIOCM_OUT2 : 0); } static int ark3116_tiocmset(struct tty_struct *tty, unsigned set, unsigned clr) { struct usb_serial_port *port = tty->driver_data; struct ark3116_private *priv = usb_get_serial_port_data(port); /* we need to take the mutex here, to make sure that the value * in priv->mcr is actually the one that is in the hardware */ mutex_lock(&priv->hw_lock); if (set & TIOCM_RTS) priv->mcr |= UART_MCR_RTS; if (set & TIOCM_DTR) priv->mcr |= UART_MCR_DTR; if (set & TIOCM_OUT1) priv->mcr |= UART_MCR_OUT1; if (set & TIOCM_OUT2) priv->mcr |= UART_MCR_OUT2; if (clr & TIOCM_RTS) priv->mcr &= ~UART_MCR_RTS; if (clr & TIOCM_DTR) priv->mcr &= ~UART_MCR_DTR; if (clr & TIOCM_OUT1) priv->mcr &= ~UART_MCR_OUT1; if (clr & TIOCM_OUT2) priv->mcr &= ~UART_MCR_OUT2; ark3116_write_reg(port->serial, UART_MCR, priv->mcr); mutex_unlock(&priv->hw_lock); return 0; } static void ark3116_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct ark3116_private *priv = usb_get_serial_port_data(port); /* LCR is also used for other things: protect access */ mutex_lock(&priv->hw_lock); if (break_state) priv->lcr |= UART_LCR_SBC; else priv->lcr &= ~UART_LCR_SBC; ark3116_write_reg(port->serial, UART_LCR, priv->lcr); mutex_unlock(&priv->hw_lock); } static void ark3116_update_msr(struct usb_serial_port *port, __u8 msr) { struct ark3116_private *priv = usb_get_serial_port_data(port); unsigned long flags; spin_lock_irqsave(&priv->status_lock, flags); priv->msr = msr; spin_unlock_irqrestore(&priv->status_lock, flags); if (msr & UART_MSR_ANY_DELTA) { /* update input line counters */ if (msr & UART_MSR_DCTS) port->icount.cts++; if (msr & UART_MSR_DDSR) port->icount.dsr++; if (msr & UART_MSR_DDCD) port->icount.dcd++; if (msr & UART_MSR_TERI) port->icount.rng++; wake_up_interruptible(&port->port.delta_msr_wait); } } static void ark3116_update_lsr(struct usb_serial_port *port, __u8 lsr) { struct ark3116_private *priv = usb_get_serial_port_data(port); unsigned long flags; spin_lock_irqsave(&priv->status_lock, flags); /* combine bits */ priv->lsr |= lsr; spin_unlock_irqrestore(&priv->status_lock, flags); if (lsr&UART_LSR_BRK_ERROR_BITS) { if (lsr & UART_LSR_BI) port->icount.brk++; if (lsr & UART_LSR_FE) port->icount.frame++; if (lsr & UART_LSR_PE) port->icount.parity++; if (lsr & UART_LSR_OE) port->icount.overrun++; } } static void ark3116_read_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; int status = urb->status; const __u8 *data = urb->transfer_buffer; int result; switch (status) { case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n", __func__, status); break; case 0: /* success */ /* discovered this by trail and error... */ if ((urb->actual_length == 4) && (data[0] == 0xe8)) { const __u8 id = data[1]&UART_IIR_ID; dev_dbg(&port->dev, "%s: iir=%02x\n", __func__, data[1]); if (id == UART_IIR_MSI) { dev_dbg(&port->dev, "%s: msr=%02x\n", __func__, data[3]); ark3116_update_msr(port, data[3]); break; } else if (id == UART_IIR_RLSI) { dev_dbg(&port->dev, "%s: lsr=%02x\n", __func__, data[2]); ark3116_update_lsr(port, data[2]); break; } } /* * Not sure what this data meant... */ usb_serial_debug_data(&port->dev, __func__, urb->actual_length, urb->transfer_buffer); break; } result = usb_submit_urb(urb, GFP_ATOMIC); if (result) dev_err(&urb->dev->dev, "%s - Error %d submitting interrupt urb\n", __func__, result); } /* Data comes in via the bulk (data) URB, erors/interrupts via the int URB. * This means that we cannot be sure which data byte has an associated error * condition, so we report an error for all data in the next bulk read. * * Actually, there might even be a window between the bulk data leaving the * ark and reading/resetting the lsr in the read_bulk_callback where an * interrupt for the next data block could come in. * Without somekind of ordering on the ark, we would have to report the * error for the next block of data as well... * For now, let's pretend this can't happen. */ static void ark3116_process_read_urb(struct urb *urb) { struct usb_serial_port *port = urb->context; struct ark3116_private *priv = usb_get_serial_port_data(port); unsigned char *data = urb->transfer_buffer; char tty_flag = TTY_NORMAL; unsigned long flags; __u32 lsr; /* update line status */ spin_lock_irqsave(&priv->status_lock, flags); lsr = priv->lsr; priv->lsr &= ~UART_LSR_BRK_ERROR_BITS; spin_unlock_irqrestore(&priv->status_lock, flags); if (!urb->actual_length) return; if (lsr & UART_LSR_BRK_ERROR_BITS) { if (lsr & UART_LSR_BI) tty_flag = TTY_BREAK; else if (lsr & UART_LSR_PE) tty_flag = TTY_PARITY; else if (lsr & UART_LSR_FE) tty_flag = TTY_FRAME; /* overrun is special, not associated with a char */ if (lsr & UART_LSR_OE) tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); } tty_insert_flip_string_fixed_flag(&port->port, data, tty_flag, urb->actual_length); tty_flip_buffer_push(&port->port); } static struct usb_serial_driver ark3116_device = { .driver = { .owner = THIS_MODULE, .name = "ark3116", }, .id_table = id_table, .num_ports = 1, .attach = ark3116_attach, .port_probe = ark3116_port_probe, .port_remove = ark3116_port_remove, .set_termios = ark3116_set_termios, .init_termios = ark3116_init_termios, .ioctl = ark3116_ioctl, .tiocmget = ark3116_tiocmget, .tiocmset = ark3116_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .get_icount = usb_serial_generic_get_icount, .open = ark3116_open, .close = ark3116_close, .break_ctl = ark3116_break_ctl, .read_int_callback = ark3116_read_int_callback, .process_read_urb = ark3116_process_read_urb, }; static struct usb_serial_driver * const serial_drivers[] = { &ark3116_device, NULL }; module_usb_serial_driver(serial_drivers, id_table); MODULE_LICENSE("GPL"); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); /* * The following describes what I learned from studying the old * ark3116.c driver, disassembling the windows driver, and some lucky * guesses. Since I do not have any datasheet or other * documentation, inaccuracies are almost guaranteed. * * Some specs for the ARK3116 can be found here: * http://web.archive.org/web/20060318000438/ * www.arkmicro.com/en/products/view.php?id=10 * On that page, 2 GPIO pins are mentioned: I assume these are the * OUT1 and OUT2 pins of the UART, so I added support for those * through the MCR. Since the pins are not available on my hardware, * I could not verify this. * Also, it states there is "on-chip hardware flow control". I have * discovered how to enable that. Unfortunately, I do not know how to * enable XON/XOFF (software) flow control, which would need support * from the chip as well to work. Because of the wording on the web * page there is a real possibility the chip simply does not support * software flow control. * * I got my ark3116 as part of a mobile phone adapter cable. On the * PCB, the following numbered contacts are present: * * 1:- +5V * 2:o DTR * 3:i RX * 4:i DCD * 5:o RTS * 6:o TX * 7:i RI * 8:i DSR * 10:- 0V * 11:i CTS * * On my chip, all signals seem to be 3.3V, but 5V tolerant. But that * may be different for the one you have ;-). * * The windows driver limits the registers to 0-F, so I assume there * are actually 16 present on the device. * * On an UART interrupt, 4 bytes of data come in on the interrupt * endpoint. The bytes are 0xe8 IIR LSR MSR. * * The baudrate seems to be generated from the 12MHz crystal, using * 4-times subsampling. So quot=12e6/(4*baud). Also see description * of register E. * * Registers 0-7: * These seem to be the same as for a regular 16450. The FCR is set * to UART_FCR_DMA_SELECT (0x8), I guess to enable transfers between * the UART and the USB bridge/DMA engine. * * Register 8: * By trial and error, I found out that bit 0 enables hardware CTS, * stopping TX when CTS is +5V. Bit 1 does the same for RTS, making * RTS +5V when the 3116 cannot transfer the data to the USB bus * (verified by disabling the reading URB). Note that as far as I can * tell, the windows driver does NOT use this, so there might be some * hardware bug or something. * * According to a patch provided here * (http://lkml.org/lkml/2009/7/26/56), the ARK3116 can also be used * as an IrDA dongle. Since I do not have such a thing, I could not * investigate that aspect. However, I can speculate ;-). * * - IrDA encodes data differently than RS232. Most likely, one of * the bits in registers 9..E enables the IR ENDEC (encoder/decoder). * - Depending on the IR transceiver, the input and output need to be * inverted, so there are probably bits for that as well. * - IrDA is half-duplex, so there should be a bit for selecting that. * * This still leaves at least two registers unaccounted for. Perhaps * The chip can do XON/XOFF or CRC in HW? * * Register 9: * Set to 0x00 for IrDA, when the baudrate is initialised. * * Register A: * Set to 0x01 for IrDA, at init. * * Register B: * Set to 0x01 for IrDA, 0x00 for RS232, at init. * * Register C: * Set to 00 for IrDA, at init. * * Register D: * Set to 0x41 for IrDA, at init. * * Register E: * Somekind of baudrate override. The windows driver seems to set * this to 0x00 for normal baudrates, 0x01 for 460800, 0x02 for 921600. * Since 460800 and 921600 cannot be obtained by dividing 3MHz by an integer, * it could be somekind of subdivisor thingy. * However,it does not seem to do anything: selecting 921600 (divisor 3, * reg E=2), still gets 1 MHz. I also checked if registers 9, C or F would * work, but they don't. * * Register F: unknown */
gpl-2.0
dohclude/Chucky-LINARO-DragunBall
drivers/mfd/twl4030-madc.c
2235
22294
/* * * TWL4030 MADC module driver-This driver monitors the real time * conversion of analog signals like battery temperature, * battery type, battery level etc. * * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ * J Keerthy <j-keerthy@ti.com> * * Based on twl4030-madc.c * Copyright (C) 2008 Nokia Corporation * Mikko Ylinen <mikko.k.ylinen@nokia.com> * * Amit Kucheria <amit.kucheria@canonical.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/init.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/i2c/twl.h> #include <linux/i2c/twl4030-madc.h> #include <linux/module.h> #include <linux/stddef.h> #include <linux/mutex.h> #include <linux/bitops.h> #include <linux/jiffies.h> #include <linux/types.h> #include <linux/gfp.h> #include <linux/err.h> /* * struct twl4030_madc_data - a container for madc info * @dev - pointer to device structure for madc * @lock - mutex protecting this data structure * @requests - Array of request struct corresponding to SW1, SW2 and RT * @imr - Interrupt mask register of MADC * @isr - Interrupt status register of MADC */ struct twl4030_madc_data { struct device *dev; struct mutex lock; /* mutex protecting this data structure */ struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS]; int imr; int isr; }; static struct twl4030_madc_data *twl4030_madc; struct twl4030_prescale_divider_ratios { s16 numerator; s16 denominator; }; static const struct twl4030_prescale_divider_ratios twl4030_divider_ratios[16] = { {1, 1}, /* CHANNEL 0 No Prescaler */ {1, 1}, /* CHANNEL 1 No Prescaler */ {6, 10}, /* CHANNEL 2 */ {6, 10}, /* CHANNEL 3 */ {6, 10}, /* CHANNEL 4 */ {6, 10}, /* CHANNEL 5 */ {6, 10}, /* CHANNEL 6 */ {6, 10}, /* CHANNEL 7 */ {3, 14}, /* CHANNEL 8 */ {1, 3}, /* CHANNEL 9 */ {1, 1}, /* CHANNEL 10 No Prescaler */ {15, 100}, /* CHANNEL 11 */ {1, 4}, /* CHANNEL 12 */ {1, 1}, /* CHANNEL 13 Reserved channels */ {1, 1}, /* CHANNEL 14 Reseved channels */ {5, 11}, /* CHANNEL 15 */ }; /* * Conversion table from -3 to 55 degree Celcius */ static int therm_tbl[] = { 30800, 29500, 28300, 27100, 26000, 24900, 23900, 22900, 22000, 21100, 20300, 19400, 18700, 17900, 17200, 16500, 15900, 15300, 14700, 14100, 13600, 13100, 12600, 12100, 11600, 11200, 10800, 10400, 10000, 9630, 9280, 8950, 8620, 8310, 8020, 7730, 7460, 7200, 6950, 6710, 6470, 6250, 6040, 5830, 5640, 5450, 5260, 5090, 4920, 4760, 4600, 4450, 4310, 4170, 4040, 3910, 3790, 3670, 3550 }; /* * Structure containing the registers * of different conversion methods supported by MADC. * Hardware or RT real time conversion request initiated by external host * processor for RT Signal conversions. * External host processors can also request for non RT conversions * SW1 and SW2 software conversions also called asynchronous or GPC request. */ static const struct twl4030_madc_conversion_method twl4030_conversion_methods[] = { [TWL4030_MADC_RT] = { .sel = TWL4030_MADC_RTSELECT_LSB, .avg = TWL4030_MADC_RTAVERAGE_LSB, .rbase = TWL4030_MADC_RTCH0_LSB, }, [TWL4030_MADC_SW1] = { .sel = TWL4030_MADC_SW1SELECT_LSB, .avg = TWL4030_MADC_SW1AVERAGE_LSB, .rbase = TWL4030_MADC_GPCH0_LSB, .ctrl = TWL4030_MADC_CTRL_SW1, }, [TWL4030_MADC_SW2] = { .sel = TWL4030_MADC_SW2SELECT_LSB, .avg = TWL4030_MADC_SW2AVERAGE_LSB, .rbase = TWL4030_MADC_GPCH0_LSB, .ctrl = TWL4030_MADC_CTRL_SW2, }, }; /* * Function to read a particular channel value. * @madc - pointer to struct twl4030_madc_data * @reg - lsb of ADC Channel * If the i2c read fails it returns an error else returns 0. */ static int twl4030_madc_channel_raw_read(struct twl4030_madc_data *madc, u8 reg) { u8 msb, lsb; int ret; /* * For each ADC channel, we have MSB and LSB register pair. MSB address * is always LSB address+1. reg parameter is the address of LSB register */ ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &msb, reg + 1); if (ret) { dev_err(madc->dev, "unable to read MSB register 0x%X\n", reg + 1); return ret; } ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &lsb, reg); if (ret) { dev_err(madc->dev, "unable to read LSB register 0x%X\n", reg); return ret; } return (int)(((msb << 8) | lsb) >> 6); } /* * Return battery temperature * Or < 0 on failure. */ static int twl4030battery_temperature(int raw_volt) { u8 val; int temp, curr, volt, res, ret; volt = (raw_volt * TEMP_STEP_SIZE) / TEMP_PSR_R; /* Getting and calculating the supply current in micro ampers */ ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &val, REG_BCICTL2); if (ret < 0) return ret; curr = ((val & TWL4030_BCI_ITHEN) + 1) * 10; /* Getting and calculating the thermistor resistance in ohms */ res = volt * 1000 / curr; /* calculating temperature */ for (temp = 58; temp >= 0; temp--) { int actual = therm_tbl[temp]; if ((actual - res) >= 0) break; } return temp + 1; } static int twl4030battery_current(int raw_volt) { int ret; u8 val; ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &val, TWL4030_BCI_BCICTL1); if (ret) return ret; if (val & TWL4030_BCI_CGAIN) /* slope of 0.44 mV/mA */ return (raw_volt * CURR_STEP_SIZE) / CURR_PSR_R1; else /* slope of 0.88 mV/mA */ return (raw_volt * CURR_STEP_SIZE) / CURR_PSR_R2; } /* * Function to read channel values * @madc - pointer to twl4030_madc_data struct * @reg_base - Base address of the first channel * @Channels - 16 bit bitmap. If the bit is set, channel value is read * @buf - The channel values are stored here. if read fails error * value is stored * Returns the number of successfully read channels. */ static int twl4030_madc_read_channels(struct twl4030_madc_data *madc, u8 reg_base, unsigned long channels, int *buf) { int count = 0, count_req = 0, i; u8 reg; for_each_set_bit(i, &channels, TWL4030_MADC_MAX_CHANNELS) { reg = reg_base + 2 * i; buf[i] = twl4030_madc_channel_raw_read(madc, reg); if (buf[i] < 0) { dev_err(madc->dev, "Unable to read register 0x%X\n", reg); count_req++; continue; } switch (i) { case 10: buf[i] = twl4030battery_current(buf[i]); if (buf[i] < 0) { dev_err(madc->dev, "err reading current\n"); count_req++; } else { count++; buf[i] = buf[i] - 750; } break; case 1: buf[i] = twl4030battery_temperature(buf[i]); if (buf[i] < 0) { dev_err(madc->dev, "err reading temperature\n"); count_req++; } else { buf[i] -= 3; count++; } break; default: count++; /* Analog Input (V) = conv_result * step_size / R * conv_result = decimal value of 10-bit conversion * result * step size = 1.5 / (2 ^ 10 -1) * R = Prescaler ratio for input channels. * Result given in mV hence multiplied by 1000. */ buf[i] = (buf[i] * 3 * 1000 * twl4030_divider_ratios[i].denominator) / (2 * 1023 * twl4030_divider_ratios[i].numerator); } } if (count_req) dev_err(madc->dev, "%d channel conversion failed\n", count_req); return count; } /* * Enables irq. * @madc - pointer to twl4030_madc_data struct * @id - irq number to be enabled * can take one of TWL4030_MADC_RT, TWL4030_MADC_SW1, TWL4030_MADC_SW2 * corresponding to RT, SW1, SW2 conversion requests. * If the i2c read fails it returns an error else returns 0. */ static int twl4030_madc_enable_irq(struct twl4030_madc_data *madc, u8 id) { u8 val; int ret; ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &val, madc->imr); if (ret) { dev_err(madc->dev, "unable to read imr register 0x%X\n", madc->imr); return ret; } val &= ~(1 << id); ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, val, madc->imr); if (ret) { dev_err(madc->dev, "unable to write imr register 0x%X\n", madc->imr); return ret; } return 0; } /* * Disables irq. * @madc - pointer to twl4030_madc_data struct * @id - irq number to be disabled * can take one of TWL4030_MADC_RT, TWL4030_MADC_SW1, TWL4030_MADC_SW2 * corresponding to RT, SW1, SW2 conversion requests. * Returns error if i2c read/write fails. */ static int twl4030_madc_disable_irq(struct twl4030_madc_data *madc, u8 id) { u8 val; int ret; ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &val, madc->imr); if (ret) { dev_err(madc->dev, "unable to read imr register 0x%X\n", madc->imr); return ret; } val |= (1 << id); ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, val, madc->imr); if (ret) { dev_err(madc->dev, "unable to write imr register 0x%X\n", madc->imr); return ret; } return 0; } static irqreturn_t twl4030_madc_threaded_irq_handler(int irq, void *_madc) { struct twl4030_madc_data *madc = _madc; const struct twl4030_madc_conversion_method *method; u8 isr_val, imr_val; int i, len, ret; struct twl4030_madc_request *r; mutex_lock(&madc->lock); ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &isr_val, madc->isr); if (ret) { dev_err(madc->dev, "unable to read isr register 0x%X\n", madc->isr); goto err_i2c; } ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &imr_val, madc->imr); if (ret) { dev_err(madc->dev, "unable to read imr register 0x%X\n", madc->imr); goto err_i2c; } isr_val &= ~imr_val; for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) { if (!(isr_val & (1 << i))) continue; ret = twl4030_madc_disable_irq(madc, i); if (ret < 0) dev_dbg(madc->dev, "Disable interrupt failed%d\n", i); madc->requests[i].result_pending = 1; } for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) { r = &madc->requests[i]; /* No pending results for this method, move to next one */ if (!r->result_pending) continue; method = &twl4030_conversion_methods[r->method]; /* Read results */ len = twl4030_madc_read_channels(madc, method->rbase, r->channels, r->rbuf); /* Return results to caller */ if (r->func_cb != NULL) { r->func_cb(len, r->channels, r->rbuf); r->func_cb = NULL; } /* Free request */ r->result_pending = 0; r->active = 0; } mutex_unlock(&madc->lock); return IRQ_HANDLED; err_i2c: /* * In case of error check whichever request is active * and service the same. */ for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) { r = &madc->requests[i]; if (r->active == 0) continue; method = &twl4030_conversion_methods[r->method]; /* Read results */ len = twl4030_madc_read_channels(madc, method->rbase, r->channels, r->rbuf); /* Return results to caller */ if (r->func_cb != NULL) { r->func_cb(len, r->channels, r->rbuf); r->func_cb = NULL; } /* Free request */ r->result_pending = 0; r->active = 0; } mutex_unlock(&madc->lock); return IRQ_HANDLED; } static int twl4030_madc_set_irq(struct twl4030_madc_data *madc, struct twl4030_madc_request *req) { struct twl4030_madc_request *p; int ret; p = &madc->requests[req->method]; memcpy(p, req, sizeof(*req)); ret = twl4030_madc_enable_irq(madc, req->method); if (ret < 0) { dev_err(madc->dev, "enable irq failed!!\n"); return ret; } return 0; } /* * Function which enables the madc conversion * by writing to the control register. * @madc - pointer to twl4030_madc_data struct * @conv_method - can be TWL4030_MADC_RT, TWL4030_MADC_SW2, TWL4030_MADC_SW1 * corresponding to RT SW1 or SW2 conversion methods. * Returns 0 if succeeds else a negative error value */ static int twl4030_madc_start_conversion(struct twl4030_madc_data *madc, int conv_method) { const struct twl4030_madc_conversion_method *method; int ret = 0; method = &twl4030_conversion_methods[conv_method]; switch (conv_method) { case TWL4030_MADC_SW1: case TWL4030_MADC_SW2: ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, TWL4030_MADC_SW_START, method->ctrl); if (ret) { dev_err(madc->dev, "unable to write ctrl register 0x%X\n", method->ctrl); return ret; } break; default: break; } return 0; } /* * Function that waits for conversion to be ready * @madc - pointer to twl4030_madc_data struct * @timeout_ms - timeout value in milliseconds * @status_reg - ctrl register * returns 0 if succeeds else a negative error value */ static int twl4030_madc_wait_conversion_ready(struct twl4030_madc_data *madc, unsigned int timeout_ms, u8 status_reg) { unsigned long timeout; int ret; timeout = jiffies + msecs_to_jiffies(timeout_ms); do { u8 reg; ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &reg, status_reg); if (ret) { dev_err(madc->dev, "unable to read status register 0x%X\n", status_reg); return ret; } if (!(reg & TWL4030_MADC_BUSY) && (reg & TWL4030_MADC_EOC_SW)) return 0; usleep_range(500, 2000); } while (!time_after(jiffies, timeout)); dev_err(madc->dev, "conversion timeout!\n"); return -EAGAIN; } /* * An exported function which can be called from other kernel drivers. * @req twl4030_madc_request structure * req->rbuf will be filled with read values of channels based on the * channel index. If a particular channel reading fails there will * be a negative error value in the corresponding array element. * returns 0 if succeeds else error value */ int twl4030_madc_conversion(struct twl4030_madc_request *req) { const struct twl4030_madc_conversion_method *method; u8 ch_msb, ch_lsb; int ret; if (!req || !twl4030_madc) return -EINVAL; mutex_lock(&twl4030_madc->lock); if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) { ret = -EINVAL; goto out; } /* Do we have a conversion request ongoing */ if (twl4030_madc->requests[req->method].active) { ret = -EBUSY; goto out; } ch_msb = (req->channels >> 8) & 0xff; ch_lsb = req->channels & 0xff; method = &twl4030_conversion_methods[req->method]; /* Select channels to be converted */ ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_msb, method->sel + 1); if (ret) { dev_err(twl4030_madc->dev, "unable to write sel register 0x%X\n", method->sel + 1); goto out; } ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_lsb, method->sel); if (ret) { dev_err(twl4030_madc->dev, "unable to write sel register 0x%X\n", method->sel + 1); goto out; } /* Select averaging for all channels if do_avg is set */ if (req->do_avg) { ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_msb, method->avg + 1); if (ret) { dev_err(twl4030_madc->dev, "unable to write avg register 0x%X\n", method->avg + 1); goto out; } ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_lsb, method->avg); if (ret) { dev_err(twl4030_madc->dev, "unable to write sel reg 0x%X\n", method->sel + 1); goto out; } } if (req->type == TWL4030_MADC_IRQ_ONESHOT && req->func_cb != NULL) { ret = twl4030_madc_set_irq(twl4030_madc, req); if (ret < 0) goto out; ret = twl4030_madc_start_conversion(twl4030_madc, req->method); if (ret < 0) goto out; twl4030_madc->requests[req->method].active = 1; ret = 0; goto out; } /* With RT method we should not be here anymore */ if (req->method == TWL4030_MADC_RT) { ret = -EINVAL; goto out; } ret = twl4030_madc_start_conversion(twl4030_madc, req->method); if (ret < 0) goto out; twl4030_madc->requests[req->method].active = 1; /* Wait until conversion is ready (ctrl register returns EOC) */ ret = twl4030_madc_wait_conversion_ready(twl4030_madc, 5, method->ctrl); if (ret) { twl4030_madc->requests[req->method].active = 0; goto out; } ret = twl4030_madc_read_channels(twl4030_madc, method->rbase, req->channels, req->rbuf); twl4030_madc->requests[req->method].active = 0; out: mutex_unlock(&twl4030_madc->lock); return ret; } EXPORT_SYMBOL_GPL(twl4030_madc_conversion); /* * Return channel value * Or < 0 on failure. */ int twl4030_get_madc_conversion(int channel_no) { struct twl4030_madc_request req; int temp = 0; int ret; req.channels = (1 << channel_no); req.method = TWL4030_MADC_SW2; req.active = 0; req.func_cb = NULL; ret = twl4030_madc_conversion(&req); if (ret < 0) return ret; if (req.rbuf[channel_no] > 0) temp = req.rbuf[channel_no]; return temp; } EXPORT_SYMBOL_GPL(twl4030_get_madc_conversion); /* * Function to enable or disable bias current for * main battery type reading or temperature sensing * @madc - pointer to twl4030_madc_data struct * @chan - can be one of the two values * TWL4030_BCI_ITHEN - Enables bias current for main battery type reading * TWL4030_BCI_TYPEN - Enables bias current for main battery temperature * sensing * @on - enable or disable chan. */ static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc, int chan, int on) { int ret; u8 regval; ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &regval, TWL4030_BCI_BCICTL1); if (ret) { dev_err(madc->dev, "unable to read BCICTL1 reg 0x%X", TWL4030_BCI_BCICTL1); return ret; } if (on) regval |= chan ? TWL4030_BCI_ITHEN : TWL4030_BCI_TYPEN; else regval &= chan ? ~TWL4030_BCI_ITHEN : ~TWL4030_BCI_TYPEN; ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE, regval, TWL4030_BCI_BCICTL1); if (ret) { dev_err(madc->dev, "unable to write BCICTL1 reg 0x%X\n", TWL4030_BCI_BCICTL1); return ret; } return 0; } /* * Function that sets MADC software power on bit to enable MADC * @madc - pointer to twl4030_madc_data struct * @on - Enable or disable MADC software powen on bit. * returns error if i2c read/write fails else 0 */ static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on) { u8 regval; int ret; ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &regval, TWL4030_MADC_CTRL1); if (ret) { dev_err(madc->dev, "unable to read madc ctrl1 reg 0x%X\n", TWL4030_MADC_CTRL1); return ret; } if (on) regval |= TWL4030_MADC_MADCON; else regval &= ~TWL4030_MADC_MADCON; ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, regval, TWL4030_MADC_CTRL1); if (ret) { dev_err(madc->dev, "unable to write madc ctrl1 reg 0x%X\n", TWL4030_MADC_CTRL1); return ret; } return 0; } /* * Initialize MADC and request for threaded irq */ static int __devinit twl4030_madc_probe(struct platform_device *pdev) { struct twl4030_madc_data *madc; struct twl4030_madc_platform_data *pdata = pdev->dev.platform_data; int ret; u8 regval; if (!pdata) { dev_err(&pdev->dev, "platform_data not available\n"); return -EINVAL; } madc = kzalloc(sizeof(*madc), GFP_KERNEL); if (!madc) return -ENOMEM; madc->dev = &pdev->dev; /* * Phoenix provides 2 interrupt lines. The first one is connected to * the OMAP. The other one can be connected to the other processor such * as modem. Hence two separate ISR and IMR registers. */ madc->imr = (pdata->irq_line == 1) ? TWL4030_MADC_IMR1 : TWL4030_MADC_IMR2; madc->isr = (pdata->irq_line == 1) ? TWL4030_MADC_ISR1 : TWL4030_MADC_ISR2; ret = twl4030_madc_set_power(madc, 1); if (ret < 0) goto err_power; ret = twl4030_madc_set_current_generator(madc, 0, 1); if (ret < 0) goto err_current_generator; ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &regval, TWL4030_BCI_BCICTL1); if (ret) { dev_err(&pdev->dev, "unable to read reg BCI CTL1 0x%X\n", TWL4030_BCI_BCICTL1); goto err_i2c; } regval |= TWL4030_BCI_MESBAT; ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE, regval, TWL4030_BCI_BCICTL1); if (ret) { dev_err(&pdev->dev, "unable to write reg BCI Ctl1 0x%X\n", TWL4030_BCI_BCICTL1); goto err_i2c; } /* Check that MADC clock is on */ ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &regval, TWL4030_REG_GPBR1); if (ret) { dev_err(&pdev->dev, "unable to read reg GPBR1 0x%X\n", TWL4030_REG_GPBR1); goto err_i2c; } /* If MADC clk is not on, turn it on */ if (!(regval & TWL4030_GPBR1_MADC_HFCLK_EN)) { dev_info(&pdev->dev, "clk disabled, enabling\n"); regval |= TWL4030_GPBR1_MADC_HFCLK_EN; ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, regval, TWL4030_REG_GPBR1); if (ret) { dev_err(&pdev->dev, "unable to write reg GPBR1 0x%X\n", TWL4030_REG_GPBR1); goto err_i2c; } } platform_set_drvdata(pdev, madc); mutex_init(&madc->lock); ret = request_threaded_irq(platform_get_irq(pdev, 0), NULL, twl4030_madc_threaded_irq_handler, IRQF_TRIGGER_RISING, "twl4030_madc", madc); if (ret) { dev_dbg(&pdev->dev, "could not request irq\n"); goto err_irq; } twl4030_madc = madc; return 0; err_irq: platform_set_drvdata(pdev, NULL); err_i2c: twl4030_madc_set_current_generator(madc, 0, 0); err_current_generator: twl4030_madc_set_power(madc, 0); err_power: kfree(madc); return ret; } static int __devexit twl4030_madc_remove(struct platform_device *pdev) { struct twl4030_madc_data *madc = platform_get_drvdata(pdev); free_irq(platform_get_irq(pdev, 0), madc); platform_set_drvdata(pdev, NULL); twl4030_madc_set_current_generator(madc, 0, 0); twl4030_madc_set_power(madc, 0); kfree(madc); return 0; } static struct platform_driver twl4030_madc_driver = { .probe = twl4030_madc_probe, .remove = __exit_p(twl4030_madc_remove), .driver = { .name = "twl4030_madc", .owner = THIS_MODULE, }, }; static int __init twl4030_madc_init(void) { return platform_driver_register(&twl4030_madc_driver); } module_init(twl4030_madc_init); static void __exit twl4030_madc_exit(void) { platform_driver_unregister(&twl4030_madc_driver); } module_exit(twl4030_madc_exit); MODULE_DESCRIPTION("TWL4030 ADC driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("J Keerthy"); MODULE_ALIAS("platform:twl4030_madc");
gpl-2.0
jomeister15/ICS-SGH-I727-kernel
fs/reiserfs/namei.c
2491
45139
/* * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README * * Trivial changes by Alan Cox to remove EHASHCOLLISION for compatibility * * Trivial Changes: * Rights granted to Hans Reiser to redistribute under other terms providing * he accepts all liability including but not limited to patent, fitness * for purpose, and direct or indirect claims arising from failure to perform. * * NO WARRANTY */ #include <linux/time.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/reiserfs_fs.h> #include <linux/reiserfs_acl.h> #include <linux/reiserfs_xattr.h> #include <linux/quotaops.h> #define INC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) { inc_nlink(i); if (i->i_nlink >= REISERFS_LINK_MAX) i->i_nlink=1; } #define DEC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) drop_nlink(i); // directory item contains array of entry headers. This performs // binary search through that array static int bin_search_in_dir_item(struct reiserfs_dir_entry *de, loff_t off) { struct item_head *ih = de->de_ih; struct reiserfs_de_head *deh = de->de_deh; int rbound, lbound, j; lbound = 0; rbound = I_ENTRY_COUNT(ih) - 1; for (j = (rbound + lbound) / 2; lbound <= rbound; j = (rbound + lbound) / 2) { if (off < deh_offset(deh + j)) { rbound = j - 1; continue; } if (off > deh_offset(deh + j)) { lbound = j + 1; continue; } // this is not name found, but matched third key component de->de_entry_num = j; return NAME_FOUND; } de->de_entry_num = lbound; return NAME_NOT_FOUND; } // comment? maybe something like set de to point to what the path points to? static inline void set_de_item_location(struct reiserfs_dir_entry *de, struct treepath *path) { de->de_bh = get_last_bh(path); de->de_ih = get_ih(path); de->de_deh = B_I_DEH(de->de_bh, de->de_ih); de->de_item_num = PATH_LAST_POSITION(path); } // de_bh, de_ih, de_deh (points to first element of array), de_item_num is set inline void set_de_name_and_namelen(struct reiserfs_dir_entry *de) { struct reiserfs_de_head *deh = de->de_deh + de->de_entry_num; BUG_ON(de->de_entry_num >= ih_entry_count(de->de_ih)); de->de_entrylen = entry_length(de->de_bh, de->de_ih, de->de_entry_num); de->de_namelen = de->de_entrylen - (de_with_sd(deh) ? SD_SIZE : 0); de->de_name = B_I_PITEM(de->de_bh, de->de_ih) + deh_location(deh); if (de->de_name[de->de_namelen - 1] == 0) de->de_namelen = strlen(de->de_name); } // what entry points to static inline void set_de_object_key(struct reiserfs_dir_entry *de) { BUG_ON(de->de_entry_num >= ih_entry_count(de->de_ih)); de->de_dir_id = deh_dir_id(&(de->de_deh[de->de_entry_num])); de->de_objectid = deh_objectid(&(de->de_deh[de->de_entry_num])); } static inline void store_de_entry_key(struct reiserfs_dir_entry *de) { struct reiserfs_de_head *deh = de->de_deh + de->de_entry_num; BUG_ON(de->de_entry_num >= ih_entry_count(de->de_ih)); /* store key of the found entry */ de->de_entry_key.version = KEY_FORMAT_3_5; de->de_entry_key.on_disk_key.k_dir_id = le32_to_cpu(de->de_ih->ih_key.k_dir_id); de->de_entry_key.on_disk_key.k_objectid = le32_to_cpu(de->de_ih->ih_key.k_objectid); set_cpu_key_k_offset(&(de->de_entry_key), deh_offset(deh)); set_cpu_key_k_type(&(de->de_entry_key), TYPE_DIRENTRY); } /* We assign a key to each directory item, and place multiple entries in a single directory item. A directory item has a key equal to the key of the first directory entry in it. This function first calls search_by_key, then, if item whose first entry matches is not found it looks for the entry inside directory item found by search_by_key. Fills the path to the entry, and to the entry position in the item */ /* The function is NOT SCHEDULE-SAFE! */ int search_by_entry_key(struct super_block *sb, const struct cpu_key *key, struct treepath *path, struct reiserfs_dir_entry *de) { int retval; retval = search_item(sb, key, path); switch (retval) { case ITEM_NOT_FOUND: if (!PATH_LAST_POSITION(path)) { reiserfs_error(sb, "vs-7000", "search_by_key " "returned item position == 0"); pathrelse(path); return IO_ERROR; } PATH_LAST_POSITION(path)--; case ITEM_FOUND: break; case IO_ERROR: return retval; default: pathrelse(path); reiserfs_error(sb, "vs-7002", "no path to here"); return IO_ERROR; } set_de_item_location(de, path); #ifdef CONFIG_REISERFS_CHECK if (!is_direntry_le_ih(de->de_ih) || COMP_SHORT_KEYS(&(de->de_ih->ih_key), key)) { print_block(de->de_bh, 0, -1, -1); reiserfs_panic(sb, "vs-7005", "found item %h is not directory " "item or does not belong to the same directory " "as key %K", de->de_ih, key); } #endif /* CONFIG_REISERFS_CHECK */ /* binary search in directory item by third componen t of the key. sets de->de_entry_num of de */ retval = bin_search_in_dir_item(de, cpu_key_k_offset(key)); path->pos_in_item = de->de_entry_num; if (retval != NAME_NOT_FOUND) { // ugly, but rename needs de_bh, de_deh, de_name, de_namelen, de_objectid set set_de_name_and_namelen(de); set_de_object_key(de); } return retval; } /* Keyed 32-bit hash function using TEA in a Davis-Meyer function */ /* The third component is hashed, and you can choose from more than one hash function. Per directory hashes are not yet implemented but are thought about. This function should be moved to hashes.c Jedi, please do so. -Hans */ static __u32 get_third_component(struct super_block *s, const char *name, int len) { __u32 res; if (!len || (len == 1 && name[0] == '.')) return DOT_OFFSET; if (len == 2 && name[0] == '.' && name[1] == '.') return DOT_DOT_OFFSET; res = REISERFS_SB(s)->s_hash_function(name, len); // take bits from 7-th to 30-th including both bounds res = GET_HASH_VALUE(res); if (res == 0) // needed to have no names before "." and ".." those have hash // value == 0 and generation conters 1 and 2 accordingly res = 128; return res + MAX_GENERATION_NUMBER; } static int reiserfs_match(struct reiserfs_dir_entry *de, const char *name, int namelen) { int retval = NAME_NOT_FOUND; if ((namelen == de->de_namelen) && !memcmp(de->de_name, name, de->de_namelen)) retval = (de_visible(de->de_deh + de->de_entry_num) ? NAME_FOUND : NAME_FOUND_INVISIBLE); return retval; } /* de's de_bh, de_ih, de_deh, de_item_num, de_entry_num are set already */ /* used when hash collisions exist */ static int linear_search_in_dir_item(struct cpu_key *key, struct reiserfs_dir_entry *de, const char *name, int namelen) { struct reiserfs_de_head *deh = de->de_deh; int retval; int i; i = de->de_entry_num; if (i == I_ENTRY_COUNT(de->de_ih) || GET_HASH_VALUE(deh_offset(deh + i)) != GET_HASH_VALUE(cpu_key_k_offset(key))) { i--; } RFALSE(de->de_deh != B_I_DEH(de->de_bh, de->de_ih), "vs-7010: array of entry headers not found"); deh += i; for (; i >= 0; i--, deh--) { if (GET_HASH_VALUE(deh_offset(deh)) != GET_HASH_VALUE(cpu_key_k_offset(key))) { // hash value does not match, no need to check whole name return NAME_NOT_FOUND; } /* mark, that this generation number is used */ if (de->de_gen_number_bit_string) set_bit(GET_GENERATION_NUMBER(deh_offset(deh)), de->de_gen_number_bit_string); // calculate pointer to name and namelen de->de_entry_num = i; set_de_name_and_namelen(de); if ((retval = reiserfs_match(de, name, namelen)) != NAME_NOT_FOUND) { // de's de_name, de_namelen, de_recordlen are set. Fill the rest: // key of pointed object set_de_object_key(de); store_de_entry_key(de); // retval can be NAME_FOUND or NAME_FOUND_INVISIBLE return retval; } } if (GET_GENERATION_NUMBER(le_ih_k_offset(de->de_ih)) == 0) /* we have reached left most entry in the node. In common we have to go to the left neighbor, but if generation counter is 0 already, we know for sure, that there is no name with the same hash value */ // FIXME: this work correctly only because hash value can not // be 0. Btw, in case of Yura's hash it is probably possible, // so, this is a bug return NAME_NOT_FOUND; RFALSE(de->de_item_num, "vs-7015: two diritems of the same directory in one node?"); return GOTO_PREVIOUS_ITEM; } // may return NAME_FOUND, NAME_FOUND_INVISIBLE, NAME_NOT_FOUND // FIXME: should add something like IOERROR static int reiserfs_find_entry(struct inode *dir, const char *name, int namelen, struct treepath *path_to_entry, struct reiserfs_dir_entry *de) { struct cpu_key key_to_search; int retval; if (namelen > REISERFS_MAX_NAME(dir->i_sb->s_blocksize)) return NAME_NOT_FOUND; /* we will search for this key in the tree */ make_cpu_key(&key_to_search, dir, get_third_component(dir->i_sb, name, namelen), TYPE_DIRENTRY, 3); while (1) { retval = search_by_entry_key(dir->i_sb, &key_to_search, path_to_entry, de); if (retval == IO_ERROR) { reiserfs_error(dir->i_sb, "zam-7001", "io error"); return IO_ERROR; } /* compare names for all entries having given hash value */ retval = linear_search_in_dir_item(&key_to_search, de, name, namelen); if (retval != GOTO_PREVIOUS_ITEM) { /* there is no need to scan directory anymore. Given entry found or does not exist */ path_to_entry->pos_in_item = de->de_entry_num; return retval; } /* there is left neighboring item of this directory and given entry can be there */ set_cpu_key_k_offset(&key_to_search, le_ih_k_offset(de->de_ih) - 1); pathrelse(path_to_entry); } /* while (1) */ } static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { int retval; int lock_depth; struct inode *inode = NULL; struct reiserfs_dir_entry de; INITIALIZE_PATH(path_to_entry); if (REISERFS_MAX_NAME(dir->i_sb->s_blocksize) < dentry->d_name.len) return ERR_PTR(-ENAMETOOLONG); /* * Might be called with or without the write lock, must be careful * to not recursively hold it in case we want to release the lock * before rescheduling. */ lock_depth = reiserfs_write_lock_once(dir->i_sb); de.de_gen_number_bit_string = NULL; retval = reiserfs_find_entry(dir, dentry->d_name.name, dentry->d_name.len, &path_to_entry, &de); pathrelse(&path_to_entry); if (retval == NAME_FOUND) { inode = reiserfs_iget(dir->i_sb, (struct cpu_key *)&(de.de_dir_id)); if (!inode || IS_ERR(inode)) { reiserfs_write_unlock_once(dir->i_sb, lock_depth); return ERR_PTR(-EACCES); } /* Propagate the private flag so we know we're * in the priv tree */ if (IS_PRIVATE(dir)) inode->i_flags |= S_PRIVATE; } reiserfs_write_unlock_once(dir->i_sb, lock_depth); if (retval == IO_ERROR) { return ERR_PTR(-EIO); } return d_splice_alias(inode, dentry); } /* ** looks up the dentry of the parent directory for child. ** taken from ext2_get_parent */ struct dentry *reiserfs_get_parent(struct dentry *child) { int retval; struct inode *inode = NULL; struct reiserfs_dir_entry de; INITIALIZE_PATH(path_to_entry); struct inode *dir = child->d_inode; if (dir->i_nlink == 0) { return ERR_PTR(-ENOENT); } de.de_gen_number_bit_string = NULL; reiserfs_write_lock(dir->i_sb); retval = reiserfs_find_entry(dir, "..", 2, &path_to_entry, &de); pathrelse(&path_to_entry); if (retval != NAME_FOUND) { reiserfs_write_unlock(dir->i_sb); return ERR_PTR(-ENOENT); } inode = reiserfs_iget(dir->i_sb, (struct cpu_key *)&(de.de_dir_id)); reiserfs_write_unlock(dir->i_sb); return d_obtain_alias(inode); } /* add entry to the directory (entry can be hidden). insert definition of when hidden directories are used here -Hans Does not mark dir inode dirty, do it after successesfull call to it */ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th, struct inode *dir, const char *name, int namelen, struct inode *inode, int visible) { struct cpu_key entry_key; struct reiserfs_de_head *deh; INITIALIZE_PATH(path); struct reiserfs_dir_entry de; DECLARE_BITMAP(bit_string, MAX_GENERATION_NUMBER + 1); int gen_number; char small_buf[32 + DEH_SIZE]; /* 48 bytes now and we avoid kmalloc if we create file with short name */ char *buffer; int buflen, paste_size; int retval; BUG_ON(!th->t_trans_id); /* cannot allow items to be added into a busy deleted directory */ if (!namelen) return -EINVAL; if (namelen > REISERFS_MAX_NAME(dir->i_sb->s_blocksize)) return -ENAMETOOLONG; /* each entry has unique key. compose it */ make_cpu_key(&entry_key, dir, get_third_component(dir->i_sb, name, namelen), TYPE_DIRENTRY, 3); /* get memory for composing the entry */ buflen = DEH_SIZE + ROUND_UP(namelen); if (buflen > sizeof(small_buf)) { buffer = kmalloc(buflen, GFP_NOFS); if (!buffer) return -ENOMEM; } else buffer = small_buf; paste_size = (get_inode_sd_version(dir) == STAT_DATA_V1) ? (DEH_SIZE + namelen) : buflen; /* fill buffer : directory entry head, name[, dir objectid | , stat data | ,stat data, dir objectid ] */ deh = (struct reiserfs_de_head *)buffer; deh->deh_location = 0; /* JDM Endian safe if 0 */ put_deh_offset(deh, cpu_key_k_offset(&entry_key)); deh->deh_state = 0; /* JDM Endian safe if 0 */ /* put key (ino analog) to de */ deh->deh_dir_id = INODE_PKEY(inode)->k_dir_id; /* safe: k_dir_id is le */ deh->deh_objectid = INODE_PKEY(inode)->k_objectid; /* safe: k_objectid is le */ /* copy name */ memcpy((char *)(deh + 1), name, namelen); /* padd by 0s to the 4 byte boundary */ padd_item((char *)(deh + 1), ROUND_UP(namelen), namelen); /* entry is ready to be pasted into tree, set 'visibility' and 'stat data in entry' attributes */ mark_de_without_sd(deh); visible ? mark_de_visible(deh) : mark_de_hidden(deh); /* find the proper place for the new entry */ memset(bit_string, 0, sizeof(bit_string)); de.de_gen_number_bit_string = bit_string; retval = reiserfs_find_entry(dir, name, namelen, &path, &de); if (retval != NAME_NOT_FOUND) { if (buffer != small_buf) kfree(buffer); pathrelse(&path); if (retval == IO_ERROR) { return -EIO; } if (retval != NAME_FOUND) { reiserfs_error(dir->i_sb, "zam-7002", "reiserfs_find_entry() returned " "unexpected value (%d)", retval); } return -EEXIST; } gen_number = find_first_zero_bit(bit_string, MAX_GENERATION_NUMBER + 1); if (gen_number > MAX_GENERATION_NUMBER) { /* there is no free generation number */ reiserfs_warning(dir->i_sb, "reiserfs-7010", "Congratulations! we have got hash function " "screwed up"); if (buffer != small_buf) kfree(buffer); pathrelse(&path); return -EBUSY; } /* adjust offset of directory enrty */ put_deh_offset(deh, SET_GENERATION_NUMBER(deh_offset(deh), gen_number)); set_cpu_key_k_offset(&entry_key, deh_offset(deh)); /* update max-hash-collisions counter in reiserfs_sb_info */ PROC_INFO_MAX(th->t_super, max_hash_collisions, gen_number); if (gen_number != 0) { /* we need to re-search for the insertion point */ if (search_by_entry_key(dir->i_sb, &entry_key, &path, &de) != NAME_NOT_FOUND) { reiserfs_warning(dir->i_sb, "vs-7032", "entry with this key (%K) already " "exists", &entry_key); if (buffer != small_buf) kfree(buffer); pathrelse(&path); return -EBUSY; } } /* perform the insertion of the entry that we have prepared */ retval = reiserfs_paste_into_item(th, &path, &entry_key, dir, buffer, paste_size); if (buffer != small_buf) kfree(buffer); if (retval) { reiserfs_check_path(&path); return retval; } dir->i_size += paste_size; dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; if (!S_ISDIR(inode->i_mode) && visible) // reiserfs_mkdir or reiserfs_rename will do that by itself reiserfs_update_sd(th, dir); reiserfs_check_path(&path); return 0; } /* quota utility function, call if you've had to abort after calling ** new_inode_init, and have not called reiserfs_new_inode yet. ** This should only be called on inodes that do not have stat data ** inserted into the tree yet. */ static int drop_new_inode(struct inode *inode) { dquot_drop(inode); make_bad_inode(inode); inode->i_flags |= S_NOQUOTA; iput(inode); return 0; } /* utility function that does setup for reiserfs_new_inode. ** dquot_initialize needs lots of credits so it's better to have it ** outside of a transaction, so we had to pull some bits of ** reiserfs_new_inode out into this func. */ static int new_inode_init(struct inode *inode, struct inode *dir, int mode) { /* Make inode invalid - just in case we are going to drop it before * the initialization happens */ INODE_PKEY(inode)->k_objectid = 0; /* the quota init calls have to know who to charge the quota to, so ** we have to set uid and gid here */ inode_init_owner(inode, dir, mode); dquot_initialize(inode); return 0; } static int reiserfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { int retval; struct inode *inode; /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */ int jbegin_count = JOURNAL_PER_BALANCE_CNT * 2 + 2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) + REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb)); struct reiserfs_transaction_handle th; struct reiserfs_security_handle security; dquot_initialize(dir); if (!(inode = new_inode(dir->i_sb))) { return -ENOMEM; } new_inode_init(inode, dir, mode); jbegin_count += reiserfs_cache_default_acl(dir); retval = reiserfs_security_init(dir, inode, &dentry->d_name, &security); if (retval < 0) { drop_new_inode(inode); return retval; } jbegin_count += retval; reiserfs_write_lock(dir->i_sb); retval = journal_begin(&th, dir->i_sb, jbegin_count); if (retval) { drop_new_inode(inode); goto out_failed; } retval = reiserfs_new_inode(&th, dir, mode, NULL, 0 /*i_size */ , dentry, inode, &security); if (retval) goto out_failed; inode->i_op = &reiserfs_file_inode_operations; inode->i_fop = &reiserfs_file_operations; inode->i_mapping->a_ops = &reiserfs_address_space_operations; retval = reiserfs_add_entry(&th, dir, dentry->d_name.name, dentry->d_name.len, inode, 1 /*visible */ ); if (retval) { int err; inode->i_nlink--; reiserfs_update_sd(&th, inode); err = journal_end(&th, dir->i_sb, jbegin_count); if (err) retval = err; unlock_new_inode(inode); iput(inode); goto out_failed; } reiserfs_update_inode_transaction(inode); reiserfs_update_inode_transaction(dir); d_instantiate(dentry, inode); unlock_new_inode(inode); retval = journal_end(&th, dir->i_sb, jbegin_count); out_failed: reiserfs_write_unlock(dir->i_sb); return retval; } static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) { int retval; struct inode *inode; struct reiserfs_transaction_handle th; struct reiserfs_security_handle security; /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */ int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3 + 2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) + REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb)); if (!new_valid_dev(rdev)) return -EINVAL; dquot_initialize(dir); if (!(inode = new_inode(dir->i_sb))) { return -ENOMEM; } new_inode_init(inode, dir, mode); jbegin_count += reiserfs_cache_default_acl(dir); retval = reiserfs_security_init(dir, inode, &dentry->d_name, &security); if (retval < 0) { drop_new_inode(inode); return retval; } jbegin_count += retval; reiserfs_write_lock(dir->i_sb); retval = journal_begin(&th, dir->i_sb, jbegin_count); if (retval) { drop_new_inode(inode); goto out_failed; } retval = reiserfs_new_inode(&th, dir, mode, NULL, 0 /*i_size */ , dentry, inode, &security); if (retval) { goto out_failed; } inode->i_op = &reiserfs_special_inode_operations; init_special_inode(inode, inode->i_mode, rdev); //FIXME: needed for block and char devices only reiserfs_update_sd(&th, inode); reiserfs_update_inode_transaction(inode); reiserfs_update_inode_transaction(dir); retval = reiserfs_add_entry(&th, dir, dentry->d_name.name, dentry->d_name.len, inode, 1 /*visible */ ); if (retval) { int err; inode->i_nlink--; reiserfs_update_sd(&th, inode); err = journal_end(&th, dir->i_sb, jbegin_count); if (err) retval = err; unlock_new_inode(inode); iput(inode); goto out_failed; } d_instantiate(dentry, inode); unlock_new_inode(inode); retval = journal_end(&th, dir->i_sb, jbegin_count); out_failed: reiserfs_write_unlock(dir->i_sb); return retval; } static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) { int retval; struct inode *inode; struct reiserfs_transaction_handle th; struct reiserfs_security_handle security; int lock_depth; /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */ int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3 + 2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) + REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb)); dquot_initialize(dir); #ifdef DISPLACE_NEW_PACKING_LOCALITIES /* set flag that new packing locality created and new blocks for the content * of that directory are not displaced yet */ REISERFS_I(dir)->new_packing_locality = 1; #endif mode = S_IFDIR | mode; if (!(inode = new_inode(dir->i_sb))) { return -ENOMEM; } new_inode_init(inode, dir, mode); jbegin_count += reiserfs_cache_default_acl(dir); retval = reiserfs_security_init(dir, inode, &dentry->d_name, &security); if (retval < 0) { drop_new_inode(inode); return retval; } jbegin_count += retval; lock_depth = reiserfs_write_lock_once(dir->i_sb); retval = journal_begin(&th, dir->i_sb, jbegin_count); if (retval) { drop_new_inode(inode); goto out_failed; } /* inc the link count now, so another writer doesn't overflow it while ** we sleep later on. */ INC_DIR_INODE_NLINK(dir) retval = reiserfs_new_inode(&th, dir, mode, NULL /*symlink */ , old_format_only(dir->i_sb) ? EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE, dentry, inode, &security); if (retval) { DEC_DIR_INODE_NLINK(dir) goto out_failed; } reiserfs_update_inode_transaction(inode); reiserfs_update_inode_transaction(dir); inode->i_op = &reiserfs_dir_inode_operations; inode->i_fop = &reiserfs_dir_operations; // note, _this_ add_entry will not update dir's stat data retval = reiserfs_add_entry(&th, dir, dentry->d_name.name, dentry->d_name.len, inode, 1 /*visible */ ); if (retval) { int err; inode->i_nlink = 0; DEC_DIR_INODE_NLINK(dir); reiserfs_update_sd(&th, inode); err = journal_end(&th, dir->i_sb, jbegin_count); if (err) retval = err; unlock_new_inode(inode); iput(inode); goto out_failed; } // the above add_entry did not update dir's stat data reiserfs_update_sd(&th, dir); d_instantiate(dentry, inode); unlock_new_inode(inode); retval = journal_end(&th, dir->i_sb, jbegin_count); out_failed: reiserfs_write_unlock_once(dir->i_sb, lock_depth); return retval; } static inline int reiserfs_empty_dir(struct inode *inode) { /* we can cheat because an old format dir cannot have ** EMPTY_DIR_SIZE, and a new format dir cannot have ** EMPTY_DIR_SIZE_V1. So, if the inode is either size, ** regardless of disk format version, the directory is empty. */ if (inode->i_size != EMPTY_DIR_SIZE && inode->i_size != EMPTY_DIR_SIZE_V1) { return 0; } return 1; } static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry) { int retval, err; struct inode *inode; struct reiserfs_transaction_handle th; int jbegin_count; INITIALIZE_PATH(path); struct reiserfs_dir_entry de; /* we will be doing 2 balancings and update 2 stat data, we change quotas * of the owner of the directory and of the owner of the parent directory. * The quota structure is possibly deleted only on last iput => outside * of this transaction */ jbegin_count = JOURNAL_PER_BALANCE_CNT * 2 + 2 + 4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb); dquot_initialize(dir); reiserfs_write_lock(dir->i_sb); retval = journal_begin(&th, dir->i_sb, jbegin_count); if (retval) goto out_rmdir; de.de_gen_number_bit_string = NULL; if ((retval = reiserfs_find_entry(dir, dentry->d_name.name, dentry->d_name.len, &path, &de)) == NAME_NOT_FOUND) { retval = -ENOENT; goto end_rmdir; } else if (retval == IO_ERROR) { retval = -EIO; goto end_rmdir; } inode = dentry->d_inode; reiserfs_update_inode_transaction(inode); reiserfs_update_inode_transaction(dir); if (de.de_objectid != inode->i_ino) { // FIXME: compare key of an object and a key found in the // entry retval = -EIO; goto end_rmdir; } if (!reiserfs_empty_dir(inode)) { retval = -ENOTEMPTY; goto end_rmdir; } /* cut entry from dir directory */ retval = reiserfs_cut_from_item(&th, &path, &(de.de_entry_key), dir, NULL, /* page */ 0 /*new file size - not used here */ ); if (retval < 0) goto end_rmdir; if (inode->i_nlink != 2 && inode->i_nlink != 1) reiserfs_error(inode->i_sb, "reiserfs-7040", "empty directory has nlink != 2 (%d)", inode->i_nlink); clear_nlink(inode); inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC; reiserfs_update_sd(&th, inode); DEC_DIR_INODE_NLINK(dir) dir->i_size -= (DEH_SIZE + de.de_entrylen); reiserfs_update_sd(&th, dir); /* prevent empty directory from getting lost */ add_save_link(&th, inode, 0 /* not truncate */ ); retval = journal_end(&th, dir->i_sb, jbegin_count); reiserfs_check_path(&path); out_rmdir: reiserfs_write_unlock(dir->i_sb); return retval; end_rmdir: /* we must release path, because we did not call reiserfs_cut_from_item, or reiserfs_cut_from_item does not release path if operation was not complete */ pathrelse(&path); err = journal_end(&th, dir->i_sb, jbegin_count); reiserfs_write_unlock(dir->i_sb); return err ? err : retval; } static int reiserfs_unlink(struct inode *dir, struct dentry *dentry) { int retval, err; struct inode *inode; struct reiserfs_dir_entry de; INITIALIZE_PATH(path); struct reiserfs_transaction_handle th; int jbegin_count; unsigned long savelink; int depth; dquot_initialize(dir); inode = dentry->d_inode; /* in this transaction we can be doing at max two balancings and update * two stat datas, we change quotas of the owner of the directory and of * the owner of the parent directory. The quota structure is possibly * deleted only on iput => outside of this transaction */ jbegin_count = JOURNAL_PER_BALANCE_CNT * 2 + 2 + 4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb); depth = reiserfs_write_lock_once(dir->i_sb); retval = journal_begin(&th, dir->i_sb, jbegin_count); if (retval) goto out_unlink; de.de_gen_number_bit_string = NULL; if ((retval = reiserfs_find_entry(dir, dentry->d_name.name, dentry->d_name.len, &path, &de)) == NAME_NOT_FOUND) { retval = -ENOENT; goto end_unlink; } else if (retval == IO_ERROR) { retval = -EIO; goto end_unlink; } reiserfs_update_inode_transaction(inode); reiserfs_update_inode_transaction(dir); if (de.de_objectid != inode->i_ino) { // FIXME: compare key of an object and a key found in the // entry retval = -EIO; goto end_unlink; } if (!inode->i_nlink) { reiserfs_warning(inode->i_sb, "reiserfs-7042", "deleting nonexistent file (%lu), %d", inode->i_ino, inode->i_nlink); inode->i_nlink = 1; } drop_nlink(inode); /* * we schedule before doing the add_save_link call, save the link * count so we don't race */ savelink = inode->i_nlink; retval = reiserfs_cut_from_item(&th, &path, &(de.de_entry_key), dir, NULL, 0); if (retval < 0) { inc_nlink(inode); goto end_unlink; } inode->i_ctime = CURRENT_TIME_SEC; reiserfs_update_sd(&th, inode); dir->i_size -= (de.de_entrylen + DEH_SIZE); dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC; reiserfs_update_sd(&th, dir); if (!savelink) /* prevent file from getting lost */ add_save_link(&th, inode, 0 /* not truncate */ ); retval = journal_end(&th, dir->i_sb, jbegin_count); reiserfs_check_path(&path); reiserfs_write_unlock_once(dir->i_sb, depth); return retval; end_unlink: pathrelse(&path); err = journal_end(&th, dir->i_sb, jbegin_count); reiserfs_check_path(&path); if (err) retval = err; out_unlink: reiserfs_write_unlock_once(dir->i_sb, depth); return retval; } static int reiserfs_symlink(struct inode *parent_dir, struct dentry *dentry, const char *symname) { int retval; struct inode *inode; char *name; int item_len; struct reiserfs_transaction_handle th; struct reiserfs_security_handle security; int mode = S_IFLNK | S_IRWXUGO; /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */ int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3 + 2 * (REISERFS_QUOTA_INIT_BLOCKS(parent_dir->i_sb) + REISERFS_QUOTA_TRANS_BLOCKS(parent_dir->i_sb)); dquot_initialize(parent_dir); if (!(inode = new_inode(parent_dir->i_sb))) { return -ENOMEM; } new_inode_init(inode, parent_dir, mode); retval = reiserfs_security_init(parent_dir, inode, &dentry->d_name, &security); if (retval < 0) { drop_new_inode(inode); return retval; } jbegin_count += retval; reiserfs_write_lock(parent_dir->i_sb); item_len = ROUND_UP(strlen(symname)); if (item_len > MAX_DIRECT_ITEM_LEN(parent_dir->i_sb->s_blocksize)) { retval = -ENAMETOOLONG; drop_new_inode(inode); goto out_failed; } name = kmalloc(item_len, GFP_NOFS); if (!name) { drop_new_inode(inode); retval = -ENOMEM; goto out_failed; } memcpy(name, symname, strlen(symname)); padd_item(name, item_len, strlen(symname)); retval = journal_begin(&th, parent_dir->i_sb, jbegin_count); if (retval) { drop_new_inode(inode); kfree(name); goto out_failed; } retval = reiserfs_new_inode(&th, parent_dir, mode, name, strlen(symname), dentry, inode, &security); kfree(name); if (retval) { /* reiserfs_new_inode iputs for us */ goto out_failed; } reiserfs_update_inode_transaction(inode); reiserfs_update_inode_transaction(parent_dir); inode->i_op = &reiserfs_symlink_inode_operations; inode->i_mapping->a_ops = &reiserfs_address_space_operations; // must be sure this inode is written with this transaction // //reiserfs_update_sd (&th, inode, READ_BLOCKS); retval = reiserfs_add_entry(&th, parent_dir, dentry->d_name.name, dentry->d_name.len, inode, 1 /*visible */ ); if (retval) { int err; inode->i_nlink--; reiserfs_update_sd(&th, inode); err = journal_end(&th, parent_dir->i_sb, jbegin_count); if (err) retval = err; unlock_new_inode(inode); iput(inode); goto out_failed; } d_instantiate(dentry, inode); unlock_new_inode(inode); retval = journal_end(&th, parent_dir->i_sb, jbegin_count); out_failed: reiserfs_write_unlock(parent_dir->i_sb); return retval; } static int reiserfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { int retval; struct inode *inode = old_dentry->d_inode; struct reiserfs_transaction_handle th; /* We need blocks for transaction + update of quotas for the owners of the directory */ int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3 + 2 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb); dquot_initialize(dir); reiserfs_write_lock(dir->i_sb); if (inode->i_nlink >= REISERFS_LINK_MAX) { //FIXME: sd_nlink is 32 bit for new files reiserfs_write_unlock(dir->i_sb); return -EMLINK; } /* inc before scheduling so reiserfs_unlink knows we are here */ inc_nlink(inode); retval = journal_begin(&th, dir->i_sb, jbegin_count); if (retval) { inode->i_nlink--; reiserfs_write_unlock(dir->i_sb); return retval; } /* create new entry */ retval = reiserfs_add_entry(&th, dir, dentry->d_name.name, dentry->d_name.len, inode, 1 /*visible */ ); reiserfs_update_inode_transaction(inode); reiserfs_update_inode_transaction(dir); if (retval) { int err; inode->i_nlink--; err = journal_end(&th, dir->i_sb, jbegin_count); reiserfs_write_unlock(dir->i_sb); return err ? err : retval; } inode->i_ctime = CURRENT_TIME_SEC; reiserfs_update_sd(&th, inode); ihold(inode); d_instantiate(dentry, inode); retval = journal_end(&th, dir->i_sb, jbegin_count); reiserfs_write_unlock(dir->i_sb); return retval; } /* de contains information pointing to an entry which */ static int de_still_valid(const char *name, int len, struct reiserfs_dir_entry *de) { struct reiserfs_dir_entry tmp = *de; // recalculate pointer to name and name length set_de_name_and_namelen(&tmp); // FIXME: could check more if (tmp.de_namelen != len || memcmp(name, de->de_name, len)) return 0; return 1; } static int entry_points_to_object(const char *name, int len, struct reiserfs_dir_entry *de, struct inode *inode) { if (!de_still_valid(name, len, de)) return 0; if (inode) { if (!de_visible(de->de_deh + de->de_entry_num)) reiserfs_panic(inode->i_sb, "vs-7042", "entry must be visible"); return (de->de_objectid == inode->i_ino) ? 1 : 0; } /* this must be added hidden entry */ if (de_visible(de->de_deh + de->de_entry_num)) reiserfs_panic(NULL, "vs-7043", "entry must be visible"); return 1; } /* sets key of objectid the entry has to point to */ static void set_ino_in_dir_entry(struct reiserfs_dir_entry *de, struct reiserfs_key *key) { /* JDM These operations are endian safe - both are le */ de->de_deh[de->de_entry_num].deh_dir_id = key->k_dir_id; de->de_deh[de->de_entry_num].deh_objectid = key->k_objectid; } /* * process, that is going to call fix_nodes/do_balance must hold only * one path. If it holds 2 or more, it can get into endless waiting in * get_empty_nodes or its clones */ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { int retval; INITIALIZE_PATH(old_entry_path); INITIALIZE_PATH(new_entry_path); INITIALIZE_PATH(dot_dot_entry_path); struct item_head new_entry_ih, old_entry_ih, dot_dot_ih; struct reiserfs_dir_entry old_de, new_de, dot_dot_de; struct inode *old_inode, *new_dentry_inode; struct reiserfs_transaction_handle th; int jbegin_count; umode_t old_inode_mode; unsigned long savelink = 1; struct timespec ctime; /* three balancings: (1) old name removal, (2) new name insertion and (3) maybe "save" link insertion stat data updates: (1) old directory, (2) new directory and (3) maybe old object stat data (when it is directory) and (4) maybe stat data of object to which new entry pointed initially and (5) maybe block containing ".." of renamed directory quota updates: two parent directories */ jbegin_count = JOURNAL_PER_BALANCE_CNT * 3 + 5 + 4 * REISERFS_QUOTA_TRANS_BLOCKS(old_dir->i_sb); dquot_initialize(old_dir); dquot_initialize(new_dir); old_inode = old_dentry->d_inode; new_dentry_inode = new_dentry->d_inode; // make sure, that oldname still exists and points to an object we // are going to rename old_de.de_gen_number_bit_string = NULL; reiserfs_write_lock(old_dir->i_sb); retval = reiserfs_find_entry(old_dir, old_dentry->d_name.name, old_dentry->d_name.len, &old_entry_path, &old_de); pathrelse(&old_entry_path); if (retval == IO_ERROR) { reiserfs_write_unlock(old_dir->i_sb); return -EIO; } if (retval != NAME_FOUND || old_de.de_objectid != old_inode->i_ino) { reiserfs_write_unlock(old_dir->i_sb); return -ENOENT; } old_inode_mode = old_inode->i_mode; if (S_ISDIR(old_inode_mode)) { // make sure, that directory being renamed has correct ".." // and that its new parent directory has not too many links // already if (new_dentry_inode) { if (!reiserfs_empty_dir(new_dentry_inode)) { reiserfs_write_unlock(old_dir->i_sb); return -ENOTEMPTY; } } /* directory is renamed, its parent directory will be changed, ** so find ".." entry */ dot_dot_de.de_gen_number_bit_string = NULL; retval = reiserfs_find_entry(old_inode, "..", 2, &dot_dot_entry_path, &dot_dot_de); pathrelse(&dot_dot_entry_path); if (retval != NAME_FOUND) { reiserfs_write_unlock(old_dir->i_sb); return -EIO; } /* inode number of .. must equal old_dir->i_ino */ if (dot_dot_de.de_objectid != old_dir->i_ino) { reiserfs_write_unlock(old_dir->i_sb); return -EIO; } } retval = journal_begin(&th, old_dir->i_sb, jbegin_count); if (retval) { reiserfs_write_unlock(old_dir->i_sb); return retval; } /* add new entry (or find the existing one) */ retval = reiserfs_add_entry(&th, new_dir, new_dentry->d_name.name, new_dentry->d_name.len, old_inode, 0); if (retval == -EEXIST) { if (!new_dentry_inode) { reiserfs_panic(old_dir->i_sb, "vs-7050", "new entry is found, new inode == 0"); } } else if (retval) { int err = journal_end(&th, old_dir->i_sb, jbegin_count); reiserfs_write_unlock(old_dir->i_sb); return err ? err : retval; } reiserfs_update_inode_transaction(old_dir); reiserfs_update_inode_transaction(new_dir); /* this makes it so an fsync on an open fd for the old name will ** commit the rename operation */ reiserfs_update_inode_transaction(old_inode); if (new_dentry_inode) reiserfs_update_inode_transaction(new_dentry_inode); while (1) { // look for old name using corresponding entry key (found by reiserfs_find_entry) if ((retval = search_by_entry_key(new_dir->i_sb, &old_de.de_entry_key, &old_entry_path, &old_de)) != NAME_FOUND) { pathrelse(&old_entry_path); journal_end(&th, old_dir->i_sb, jbegin_count); reiserfs_write_unlock(old_dir->i_sb); return -EIO; } copy_item_head(&old_entry_ih, get_ih(&old_entry_path)); reiserfs_prepare_for_journal(old_inode->i_sb, old_de.de_bh, 1); // look for new name by reiserfs_find_entry new_de.de_gen_number_bit_string = NULL; retval = reiserfs_find_entry(new_dir, new_dentry->d_name.name, new_dentry->d_name.len, &new_entry_path, &new_de); // reiserfs_add_entry should not return IO_ERROR, because it is called with essentially same parameters from // reiserfs_add_entry above, and we'll catch any i/o errors before we get here. if (retval != NAME_FOUND_INVISIBLE && retval != NAME_FOUND) { pathrelse(&new_entry_path); pathrelse(&old_entry_path); journal_end(&th, old_dir->i_sb, jbegin_count); reiserfs_write_unlock(old_dir->i_sb); return -EIO; } copy_item_head(&new_entry_ih, get_ih(&new_entry_path)); reiserfs_prepare_for_journal(old_inode->i_sb, new_de.de_bh, 1); if (S_ISDIR(old_inode->i_mode)) { if ((retval = search_by_entry_key(new_dir->i_sb, &dot_dot_de.de_entry_key, &dot_dot_entry_path, &dot_dot_de)) != NAME_FOUND) { pathrelse(&dot_dot_entry_path); pathrelse(&new_entry_path); pathrelse(&old_entry_path); journal_end(&th, old_dir->i_sb, jbegin_count); reiserfs_write_unlock(old_dir->i_sb); return -EIO; } copy_item_head(&dot_dot_ih, get_ih(&dot_dot_entry_path)); // node containing ".." gets into transaction reiserfs_prepare_for_journal(old_inode->i_sb, dot_dot_de.de_bh, 1); } /* we should check seals here, not do this stuff, yes? Then, having gathered everything into RAM we should lock the buffers, yes? -Hans */ /* probably. our rename needs to hold more ** than one path at once. The seals would ** have to be written to deal with multi-path ** issues -chris */ /* sanity checking before doing the rename - avoid races many ** of the above checks could have scheduled. We have to be ** sure our items haven't been shifted by another process. */ if (item_moved(&new_entry_ih, &new_entry_path) || !entry_points_to_object(new_dentry->d_name.name, new_dentry->d_name.len, &new_de, new_dentry_inode) || item_moved(&old_entry_ih, &old_entry_path) || !entry_points_to_object(old_dentry->d_name.name, old_dentry->d_name.len, &old_de, old_inode)) { reiserfs_restore_prepared_buffer(old_inode->i_sb, new_de.de_bh); reiserfs_restore_prepared_buffer(old_inode->i_sb, old_de.de_bh); if (S_ISDIR(old_inode_mode)) reiserfs_restore_prepared_buffer(old_inode-> i_sb, dot_dot_de. de_bh); continue; } if (S_ISDIR(old_inode_mode)) { if (item_moved(&dot_dot_ih, &dot_dot_entry_path) || !entry_points_to_object("..", 2, &dot_dot_de, old_dir)) { reiserfs_restore_prepared_buffer(old_inode-> i_sb, old_de.de_bh); reiserfs_restore_prepared_buffer(old_inode-> i_sb, new_de.de_bh); reiserfs_restore_prepared_buffer(old_inode-> i_sb, dot_dot_de. de_bh); continue; } } RFALSE(S_ISDIR(old_inode_mode) && !buffer_journal_prepared(dot_dot_de.de_bh), ""); break; } /* ok, all the changes can be done in one fell swoop when we have claimed all the buffers needed. */ mark_de_visible(new_de.de_deh + new_de.de_entry_num); set_ino_in_dir_entry(&new_de, INODE_PKEY(old_inode)); journal_mark_dirty(&th, old_dir->i_sb, new_de.de_bh); mark_de_hidden(old_de.de_deh + old_de.de_entry_num); journal_mark_dirty(&th, old_dir->i_sb, old_de.de_bh); ctime = CURRENT_TIME_SEC; old_dir->i_ctime = old_dir->i_mtime = ctime; new_dir->i_ctime = new_dir->i_mtime = ctime; /* thanks to Alex Adriaanse <alex_a@caltech.edu> for patch which adds ctime update of renamed object */ old_inode->i_ctime = ctime; if (new_dentry_inode) { // adjust link number of the victim if (S_ISDIR(new_dentry_inode->i_mode)) { clear_nlink(new_dentry_inode); } else { drop_nlink(new_dentry_inode); } new_dentry_inode->i_ctime = ctime; savelink = new_dentry_inode->i_nlink; } if (S_ISDIR(old_inode_mode)) { /* adjust ".." of renamed directory */ set_ino_in_dir_entry(&dot_dot_de, INODE_PKEY(new_dir)); journal_mark_dirty(&th, new_dir->i_sb, dot_dot_de.de_bh); if (!new_dentry_inode) /* there (in new_dir) was no directory, so it got new link (".." of renamed directory) */ INC_DIR_INODE_NLINK(new_dir); /* old directory lost one link - ".. " of renamed directory */ DEC_DIR_INODE_NLINK(old_dir); } // looks like in 2.3.99pre3 brelse is atomic. so we can use pathrelse pathrelse(&new_entry_path); pathrelse(&dot_dot_entry_path); // FIXME: this reiserfs_cut_from_item's return value may screw up // anybody, but it will panic if will not be able to find the // entry. This needs one more clean up if (reiserfs_cut_from_item (&th, &old_entry_path, &(old_de.de_entry_key), old_dir, NULL, 0) < 0) reiserfs_error(old_dir->i_sb, "vs-7060", "couldn't not cut old name. Fsck later?"); old_dir->i_size -= DEH_SIZE + old_de.de_entrylen; reiserfs_update_sd(&th, old_dir); reiserfs_update_sd(&th, new_dir); reiserfs_update_sd(&th, old_inode); if (new_dentry_inode) { if (savelink == 0) add_save_link(&th, new_dentry_inode, 0 /* not truncate */ ); reiserfs_update_sd(&th, new_dentry_inode); } retval = journal_end(&th, old_dir->i_sb, jbegin_count); reiserfs_write_unlock(old_dir->i_sb); return retval; } /* * directories can handle most operations... */ const struct inode_operations reiserfs_dir_inode_operations = { //&reiserfs_dir_operations, /* default_file_ops */ .create = reiserfs_create, .lookup = reiserfs_lookup, .link = reiserfs_link, .unlink = reiserfs_unlink, .symlink = reiserfs_symlink, .mkdir = reiserfs_mkdir, .rmdir = reiserfs_rmdir, .mknod = reiserfs_mknod, .rename = reiserfs_rename, .setattr = reiserfs_setattr, .setxattr = reiserfs_setxattr, .getxattr = reiserfs_getxattr, .listxattr = reiserfs_listxattr, .removexattr = reiserfs_removexattr, .permission = reiserfs_permission, }; /* * symlink operations.. same as page_symlink_inode_operations, with xattr * stuff added */ const struct inode_operations reiserfs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .setattr = reiserfs_setattr, .setxattr = reiserfs_setxattr, .getxattr = reiserfs_getxattr, .listxattr = reiserfs_listxattr, .removexattr = reiserfs_removexattr, .permission = reiserfs_permission, }; /* * special file operations.. just xattr/acl stuff */ const struct inode_operations reiserfs_special_inode_operations = { .setattr = reiserfs_setattr, .setxattr = reiserfs_setxattr, .getxattr = reiserfs_getxattr, .listxattr = reiserfs_listxattr, .removexattr = reiserfs_removexattr, .permission = reiserfs_permission, };
gpl-2.0
djcapelis/linux-kernel-opensparc-fpga
drivers/staging/octeon/ethernet-rx.c
2491
15546
/********************************************************************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2010 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information **********************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/cache.h> #include <linux/cpumask.h> #include <linux/netdevice.h> #include <linux/init.h> #include <linux/etherdevice.h> #include <linux/ip.h> #include <linux/string.h> #include <linux/prefetch.h> #include <linux/ratelimit.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <net/dst.h> #ifdef CONFIG_XFRM #include <linux/xfrm.h> #include <net/xfrm.h> #endif /* CONFIG_XFRM */ #include <linux/atomic.h> #include <asm/octeon/octeon.h> #include "ethernet-defines.h" #include "ethernet-mem.h" #include "ethernet-rx.h" #include "octeon-ethernet.h" #include "ethernet-util.h" #include <asm/octeon/cvmx-helper.h> #include <asm/octeon/cvmx-wqe.h> #include <asm/octeon/cvmx-fau.h> #include <asm/octeon/cvmx-pow.h> #include <asm/octeon/cvmx-pip.h> #include <asm/octeon/cvmx-scratch.h> #include <asm/octeon/cvmx-gmxx-defs.h> struct cvm_napi_wrapper { struct napi_struct napi; } ____cacheline_aligned_in_smp; static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp; struct cvm_oct_core_state { int baseline_cores; /* * The number of additional cores that could be processing * input packtes. */ atomic_t available_cores; cpumask_t cpu_state; } ____cacheline_aligned_in_smp; static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp; static void cvm_oct_enable_napi(void *_) { int cpu = smp_processor_id(); napi_schedule(&cvm_oct_napi[cpu].napi); } static void cvm_oct_enable_one_cpu(void) { int v; int cpu; /* Check to see if more CPUs are available for receive processing... */ v = atomic_sub_if_positive(1, &core_state.available_cores); if (v < 0) return; /* ... if a CPU is available, Turn on NAPI polling for that CPU. */ for_each_online_cpu(cpu) { if (!cpu_test_and_set(cpu, core_state.cpu_state)) { v = smp_call_function_single(cpu, cvm_oct_enable_napi, NULL, 0); if (v) panic("Can't enable NAPI."); break; } } } static void cvm_oct_no_more_work(void) { int cpu = smp_processor_id(); /* * CPU zero is special. It always has the irq enabled when * waiting for incoming packets. */ if (cpu == 0) { enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group); return; } cpu_clear(cpu, core_state.cpu_state); atomic_add(1, &core_state.available_cores); } /** * cvm_oct_do_interrupt - interrupt handler. * * The interrupt occurs whenever the POW has packets in our group. * */ static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id) { /* Disable the IRQ and start napi_poll. */ disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); cvm_oct_enable_napi(NULL); return IRQ_HANDLED; } /** * cvm_oct_check_rcv_error - process receive errors * @work: Work queue entry pointing to the packet. * * Returns Non-zero if the packet can be dropped, zero otherwise. */ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) { if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) { /* * Ignore length errors on min size packets. Some * equipment incorrectly pads packets to 64+4FCS * instead of 60+4FCS. Note these packets still get * counted as frame errors. */ } else if (USE_10MBPS_PREAMBLE_WORKAROUND && ((work->word2.snoip.err_code == 5) || (work->word2.snoip.err_code == 7))) { /* * We received a packet with either an alignment error * or a FCS error. This may be signalling that we are * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK] * off. If this is the case we need to parse the * packet to determine if we can remove a non spec * preamble and generate a correct packet. */ int interface = cvmx_helper_get_interface_num(work->ipprt); int index = cvmx_helper_get_interface_index_num(work->ipprt); union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface)); if (gmxx_rxx_frm_ctl.s.pre_chk == 0) { uint8_t *ptr = cvmx_phys_to_ptr(work->packet_ptr.s.addr); int i = 0; while (i < work->len - 1) { if (*ptr != 0x55) break; ptr++; i++; } if (*ptr == 0xd5) { /* printk_ratelimited("Port %d received 0xd5 preamble\n", work->ipprt); */ work->packet_ptr.s.addr += i + 1; work->len -= i + 5; } else if ((*ptr & 0xf) == 0xd) { /* printk_ratelimited("Port %d received 0x?d preamble\n", work->ipprt); */ work->packet_ptr.s.addr += i; work->len -= i + 4; for (i = 0; i < work->len; i++) { *ptr = ((*ptr & 0xf0) >> 4) | ((*(ptr + 1) & 0xf) << 4); ptr++; } } else { printk_ratelimited("Port %d unknown preamble, packet " "dropped\n", work->ipprt); /* cvmx_helper_dump_packet(work); */ cvm_oct_free_work(work); return 1; } } } else { printk_ratelimited("Port %d receive error code %d, packet dropped\n", work->ipprt, work->word2.snoip.err_code); cvm_oct_free_work(work); return 1; } return 0; } /** * cvm_oct_napi_poll - the NAPI poll function. * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller * @budget: Maximum number of packets to receive. * * Returns the number of packets processed. */ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) { const int coreid = cvmx_get_core_num(); uint64_t old_group_mask; uint64_t old_scratch; int rx_count = 0; int did_work_request = 0; int packet_not_copied; /* Prefetch cvm_oct_device since we know we need it soon */ prefetch(cvm_oct_device); if (USE_ASYNC_IOBDMA) { /* Save scratch in case userspace is using it */ CVMX_SYNCIOBDMA; old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); } /* Only allow work for our group (and preserve priorities) */ old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid)); cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group); if (USE_ASYNC_IOBDMA) { cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); did_work_request = 1; } while (rx_count < budget) { struct sk_buff *skb = NULL; struct sk_buff **pskb = NULL; int skb_in_hw; cvmx_wqe_t *work; if (USE_ASYNC_IOBDMA && did_work_request) work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH); else work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT); prefetch(work); did_work_request = 0; if (work == NULL) { union cvmx_pow_wq_int wq_int; wq_int.u64 = 0; wq_int.s.iq_dis = 1 << pow_receive_group; wq_int.s.wq_int = 1 << pow_receive_group; cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64); break; } pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *)); prefetch(pskb); if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) { cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); did_work_request = 1; } if (rx_count == 0) { /* * First time through, see if there is enough * work waiting to merit waking another * CPU. */ union cvmx_pow_wq_int_cntx counts; int backlog; int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores); counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group)); backlog = counts.s.iq_cnt + counts.s.ds_cnt; if (backlog > budget * cores_in_use && napi != NULL) cvm_oct_enable_one_cpu(); } skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1; if (likely(skb_in_hw)) { skb = *pskb; prefetch(&skb->head); prefetch(&skb->len); } prefetch(cvm_oct_device[work->ipprt]); /* Immediately throw away all packets with receive errors */ if (unlikely(work->word2.snoip.rcv_error)) { if (cvm_oct_check_rcv_error(work)) continue; } /* * We can only use the zero copy path if skbuffs are * in the FPA pool and the packet fits in a single * buffer. */ if (likely(skb_in_hw)) { skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head); prefetch(skb->data); skb->len = work->len; skb_set_tail_pointer(skb, skb->len); packet_not_copied = 1; } else { /* * We have to copy the packet. First allocate * an skbuff for it. */ skb = dev_alloc_skb(work->len); if (!skb) { printk_ratelimited("Port %d failed to allocate " "skbuff, packet dropped\n", work->ipprt); cvm_oct_free_work(work); continue; } /* * Check if we've received a packet that was * entirely stored in the work entry. */ if (unlikely(work->word2.s.bufs == 0)) { uint8_t *ptr = work->packet_data; if (likely(!work->word2.s.not_IP)) { /* * The beginning of the packet * moves for IP packets. */ if (work->word2.s.is_v6) ptr += 2; else ptr += 6; } memcpy(skb_put(skb, work->len), ptr, work->len); /* No packet buffers to free */ } else { int segments = work->word2.s.bufs; union cvmx_buf_ptr segment_ptr = work->packet_ptr; int len = work->len; while (segments--) { union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8); /* * Octeon Errata PKI-100: The segment size is * wrong. Until it is fixed, calculate the * segment size based on the packet pool * buffer size. When it is fixed, the * following line should be replaced with this * one: int segment_size = * segment_ptr.s.size; */ int segment_size = CVMX_FPA_PACKET_POOL_SIZE - (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7)); /* * Don't copy more than what * is left in the packet. */ if (segment_size > len) segment_size = len; /* Copy the data into the packet */ memcpy(skb_put(skb, segment_size), cvmx_phys_to_ptr(segment_ptr.s.addr), segment_size); len -= segment_size; segment_ptr = next_ptr; } } packet_not_copied = 0; } if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) && cvm_oct_device[work->ipprt])) { struct net_device *dev = cvm_oct_device[work->ipprt]; struct octeon_ethernet *priv = netdev_priv(dev); /* * Only accept packets for devices that are * currently up. */ if (likely(dev->flags & IFF_UP)) { skb->protocol = eth_type_trans(skb, dev); skb->dev = dev; if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error || !work->word2.s.tcp_or_udp)) skb->ip_summed = CHECKSUM_NONE; else skb->ip_summed = CHECKSUM_UNNECESSARY; /* Increment RX stats for virtual ports */ if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) { #ifdef CONFIG_64BIT atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets); atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes); #else atomic_add(1, (atomic_t *)&priv->stats.rx_packets); atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes); #endif } netif_receive_skb(skb); rx_count++; } else { /* Drop any packet received for a device that isn't up */ /* printk_ratelimited("%s: Device not up, packet dropped\n", dev->name); */ #ifdef CONFIG_64BIT atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped); #else atomic_add(1, (atomic_t *)&priv->stats.rx_dropped); #endif dev_kfree_skb_irq(skb); } } else { /* * Drop any packet received for a device that * doesn't exist. */ printk_ratelimited("Port %d not controlled by Linux, packet dropped\n", work->ipprt); dev_kfree_skb_irq(skb); } /* * Check to see if the skbuff and work share the same * packet buffer. */ if (USE_SKBUFFS_IN_HW && likely(packet_not_copied)) { /* * This buffer needs to be replaced, increment * the number of buffers we need to free by * one. */ cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 1); cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); } else { cvm_oct_free_work(work); } } /* Restore the original POW group mask */ cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask); if (USE_ASYNC_IOBDMA) { /* Restore the scratch area */ cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); } cvm_oct_rx_refill_pool(0); if (rx_count < budget && napi != NULL) { /* No more work */ napi_complete(napi); cvm_oct_no_more_work(); } return rx_count; } #ifdef CONFIG_NET_POLL_CONTROLLER /** * cvm_oct_poll_controller - poll for receive packets * device. * * @dev: Device to poll. Unused */ void cvm_oct_poll_controller(struct net_device *dev) { cvm_oct_napi_poll(NULL, 16); } #endif void cvm_oct_rx_initialize(void) { int i; struct net_device *dev_for_napi = NULL; union cvmx_pow_wq_int_thrx int_thr; union cvmx_pow_wq_int_pc int_pc; for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) { if (cvm_oct_device[i]) { dev_for_napi = cvm_oct_device[i]; break; } } if (NULL == dev_for_napi) panic("No net_devices were allocated."); if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus()) atomic_set(&core_state.available_cores, max_rx_cpus); else atomic_set(&core_state.available_cores, num_online_cpus()); core_state.baseline_cores = atomic_read(&core_state.available_cores); core_state.cpu_state = CPU_MASK_NONE; for_each_possible_cpu(i) { netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi, cvm_oct_napi_poll, rx_napi_weight); napi_enable(&cvm_oct_napi[i].napi); } /* Register an IRQ hander for to receive POW interrupts */ i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device); if (i) panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_WORKQ0 + pow_receive_group); disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); int_thr.u64 = 0; int_thr.s.tc_en = 1; int_thr.s.tc_thr = 1; /* Enable POW interrupt when our port has at least one packet */ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64); int_pc.u64 = 0; int_pc.s.pc_thr = 5; cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64); /* Scheduld NAPI now. This will indirectly enable interrupts. */ cvm_oct_enable_one_cpu(); } void cvm_oct_rx_shutdown(void) { int i; /* Shutdown all of the NAPIs */ for_each_possible_cpu(i) netif_napi_del(&cvm_oct_napi[i].napi); }
gpl-2.0
Oebbler/elite-boeffla-kernel-cm12.1-i9300
drivers/regulator/tps6507x-regulator.c
2747
16204
/* * tps6507x-regulator.c * * Regulator driver for TPS65073 PMIC * * Copyright (C) 2009 Texas Instrument Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind, * whether express or implied; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/tps6507x.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/mfd/tps6507x.h> /* DCDC's */ #define TPS6507X_DCDC_1 0 #define TPS6507X_DCDC_2 1 #define TPS6507X_DCDC_3 2 /* LDOs */ #define TPS6507X_LDO_1 3 #define TPS6507X_LDO_2 4 #define TPS6507X_MAX_REG_ID TPS6507X_LDO_2 /* Number of step-down converters available */ #define TPS6507X_NUM_DCDC 3 /* Number of LDO voltage regulators available */ #define TPS6507X_NUM_LDO 2 /* Number of total regulators available */ #define TPS6507X_NUM_REGULATOR (TPS6507X_NUM_DCDC + TPS6507X_NUM_LDO) /* Supported voltage values for regulators (in milliVolts) */ static const u16 VDCDCx_VSEL_table[] = { 725, 750, 775, 800, 825, 850, 875, 900, 925, 950, 975, 1000, 1025, 1050, 1075, 1100, 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300, 1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500, 1550, 1600, 1650, 1700, 1750, 1800, 1850, 1900, 1950, 2000, 2050, 2100, 2150, 2200, 2250, 2300, 2350, 2400, 2450, 2500, 2550, 2600, 2650, 2700, 2750, 2800, 2850, 2900, 3000, 3100, 3200, 3300, }; static const u16 LDO1_VSEL_table[] = { 1000, 1100, 1200, 1250, 1300, 1350, 1400, 1500, 1600, 1800, 2500, 2750, 2800, 3000, 3100, 3300, }; static const u16 LDO2_VSEL_table[] = { 725, 750, 775, 800, 825, 850, 875, 900, 925, 950, 975, 1000, 1025, 1050, 1075, 1100, 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300, 1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500, 1550, 1600, 1650, 1700, 1750, 1800, 1850, 1900, 1950, 2000, 2050, 2100, 2150, 2200, 2250, 2300, 2350, 2400, 2450, 2500, 2550, 2600, 2650, 2700, 2750, 2800, 2850, 2900, 3000, 3100, 3200, 3300, }; static unsigned int num_voltages[] = {ARRAY_SIZE(VDCDCx_VSEL_table), ARRAY_SIZE(VDCDCx_VSEL_table), ARRAY_SIZE(VDCDCx_VSEL_table), ARRAY_SIZE(LDO1_VSEL_table), ARRAY_SIZE(LDO2_VSEL_table)}; struct tps_info { const char *name; unsigned min_uV; unsigned max_uV; u8 table_len; const u16 *table; /* Does DCDC high or the low register defines output voltage? */ bool defdcdc_default; }; static struct tps_info tps6507x_pmic_regs[] = { { .name = "VDCDC1", .min_uV = 725000, .max_uV = 3300000, .table_len = ARRAY_SIZE(VDCDCx_VSEL_table), .table = VDCDCx_VSEL_table, }, { .name = "VDCDC2", .min_uV = 725000, .max_uV = 3300000, .table_len = ARRAY_SIZE(VDCDCx_VSEL_table), .table = VDCDCx_VSEL_table, }, { .name = "VDCDC3", .min_uV = 725000, .max_uV = 3300000, .table_len = ARRAY_SIZE(VDCDCx_VSEL_table), .table = VDCDCx_VSEL_table, }, { .name = "LDO1", .min_uV = 1000000, .max_uV = 3300000, .table_len = ARRAY_SIZE(LDO1_VSEL_table), .table = LDO1_VSEL_table, }, { .name = "LDO2", .min_uV = 725000, .max_uV = 3300000, .table_len = ARRAY_SIZE(LDO2_VSEL_table), .table = LDO2_VSEL_table, }, }; struct tps6507x_pmic { struct regulator_desc desc[TPS6507X_NUM_REGULATOR]; struct tps6507x_dev *mfd; struct regulator_dev *rdev[TPS6507X_NUM_REGULATOR]; struct tps_info *info[TPS6507X_NUM_REGULATOR]; struct mutex io_lock; }; static inline int tps6507x_pmic_read(struct tps6507x_pmic *tps, u8 reg) { u8 val; int err; err = tps->mfd->read_dev(tps->mfd, reg, 1, &val); if (err) return err; return val; } static inline int tps6507x_pmic_write(struct tps6507x_pmic *tps, u8 reg, u8 val) { return tps->mfd->write_dev(tps->mfd, reg, 1, &val); } static int tps6507x_pmic_set_bits(struct tps6507x_pmic *tps, u8 reg, u8 mask) { int err, data; mutex_lock(&tps->io_lock); data = tps6507x_pmic_read(tps, reg); if (data < 0) { dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg); err = data; goto out; } data |= mask; err = tps6507x_pmic_write(tps, reg, data); if (err) dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg); out: mutex_unlock(&tps->io_lock); return err; } static int tps6507x_pmic_clear_bits(struct tps6507x_pmic *tps, u8 reg, u8 mask) { int err, data; mutex_lock(&tps->io_lock); data = tps6507x_pmic_read(tps, reg); if (data < 0) { dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg); err = data; goto out; } data &= ~mask; err = tps6507x_pmic_write(tps, reg, data); if (err) dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg); out: mutex_unlock(&tps->io_lock); return err; } static int tps6507x_pmic_reg_read(struct tps6507x_pmic *tps, u8 reg) { int data; mutex_lock(&tps->io_lock); data = tps6507x_pmic_read(tps, reg); if (data < 0) dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg); mutex_unlock(&tps->io_lock); return data; } static int tps6507x_pmic_reg_write(struct tps6507x_pmic *tps, u8 reg, u8 val) { int err; mutex_lock(&tps->io_lock); err = tps6507x_pmic_write(tps, reg, val); if (err < 0) dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg); mutex_unlock(&tps->io_lock); return err; } static int tps6507x_pmic_dcdc_is_enabled(struct regulator_dev *dev) { struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int data, dcdc = rdev_get_id(dev); u8 shift; if (dcdc < TPS6507X_DCDC_1 || dcdc > TPS6507X_DCDC_3) return -EINVAL; shift = TPS6507X_MAX_REG_ID - dcdc; data = tps6507x_pmic_reg_read(tps, TPS6507X_REG_CON_CTRL1); if (data < 0) return data; else return (data & 1<<shift) ? 1 : 0; } static int tps6507x_pmic_ldo_is_enabled(struct regulator_dev *dev) { struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int data, ldo = rdev_get_id(dev); u8 shift; if (ldo < TPS6507X_LDO_1 || ldo > TPS6507X_LDO_2) return -EINVAL; shift = TPS6507X_MAX_REG_ID - ldo; data = tps6507x_pmic_reg_read(tps, TPS6507X_REG_CON_CTRL1); if (data < 0) return data; else return (data & 1<<shift) ? 1 : 0; } static int tps6507x_pmic_dcdc_enable(struct regulator_dev *dev) { struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int dcdc = rdev_get_id(dev); u8 shift; if (dcdc < TPS6507X_DCDC_1 || dcdc > TPS6507X_DCDC_3) return -EINVAL; shift = TPS6507X_MAX_REG_ID - dcdc; return tps6507x_pmic_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift); } static int tps6507x_pmic_dcdc_disable(struct regulator_dev *dev) { struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int dcdc = rdev_get_id(dev); u8 shift; if (dcdc < TPS6507X_DCDC_1 || dcdc > TPS6507X_DCDC_3) return -EINVAL; shift = TPS6507X_MAX_REG_ID - dcdc; return tps6507x_pmic_clear_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift); } static int tps6507x_pmic_ldo_enable(struct regulator_dev *dev) { struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int ldo = rdev_get_id(dev); u8 shift; if (ldo < TPS6507X_LDO_1 || ldo > TPS6507X_LDO_2) return -EINVAL; shift = TPS6507X_MAX_REG_ID - ldo; return tps6507x_pmic_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift); } static int tps6507x_pmic_ldo_disable(struct regulator_dev *dev) { struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int ldo = rdev_get_id(dev); u8 shift; if (ldo < TPS6507X_LDO_1 || ldo > TPS6507X_LDO_2) return -EINVAL; shift = TPS6507X_MAX_REG_ID - ldo; return tps6507x_pmic_clear_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift); } static int tps6507x_pmic_dcdc_get_voltage(struct regulator_dev *dev) { struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int data, dcdc = rdev_get_id(dev); u8 reg; switch (dcdc) { case TPS6507X_DCDC_1: reg = TPS6507X_REG_DEFDCDC1; break; case TPS6507X_DCDC_2: if (tps->info[dcdc]->defdcdc_default) reg = TPS6507X_REG_DEFDCDC2_HIGH; else reg = TPS6507X_REG_DEFDCDC2_LOW; break; case TPS6507X_DCDC_3: if (tps->info[dcdc]->defdcdc_default) reg = TPS6507X_REG_DEFDCDC3_HIGH; else reg = TPS6507X_REG_DEFDCDC3_LOW; break; default: return -EINVAL; } data = tps6507x_pmic_reg_read(tps, reg); if (data < 0) return data; data &= TPS6507X_DEFDCDCX_DCDC_MASK; return tps->info[dcdc]->table[data] * 1000; } static int tps6507x_pmic_dcdc_set_voltage(struct regulator_dev *dev, int min_uV, int max_uV, unsigned *selector) { struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int data, vsel, dcdc = rdev_get_id(dev); u8 reg; switch (dcdc) { case TPS6507X_DCDC_1: reg = TPS6507X_REG_DEFDCDC1; break; case TPS6507X_DCDC_2: if (tps->info[dcdc]->defdcdc_default) reg = TPS6507X_REG_DEFDCDC2_HIGH; else reg = TPS6507X_REG_DEFDCDC2_LOW; break; case TPS6507X_DCDC_3: if (tps->info[dcdc]->defdcdc_default) reg = TPS6507X_REG_DEFDCDC3_HIGH; else reg = TPS6507X_REG_DEFDCDC3_LOW; break; default: return -EINVAL; } if (min_uV < tps->info[dcdc]->min_uV || min_uV > tps->info[dcdc]->max_uV) return -EINVAL; if (max_uV < tps->info[dcdc]->min_uV || max_uV > tps->info[dcdc]->max_uV) return -EINVAL; for (vsel = 0; vsel < tps->info[dcdc]->table_len; vsel++) { int mV = tps->info[dcdc]->table[vsel]; int uV = mV * 1000; /* Break at the first in-range value */ if (min_uV <= uV && uV <= max_uV) break; } /* write to the register in case we found a match */ if (vsel == tps->info[dcdc]->table_len) return -EINVAL; *selector = vsel; data = tps6507x_pmic_reg_read(tps, reg); if (data < 0) return data; data &= ~TPS6507X_DEFDCDCX_DCDC_MASK; data |= vsel; return tps6507x_pmic_reg_write(tps, reg, data); } static int tps6507x_pmic_ldo_get_voltage(struct regulator_dev *dev) { struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int data, ldo = rdev_get_id(dev); u8 reg, mask; if (ldo < TPS6507X_LDO_1 || ldo > TPS6507X_LDO_2) return -EINVAL; else { reg = (ldo == TPS6507X_LDO_1 ? TPS6507X_REG_LDO_CTRL1 : TPS6507X_REG_DEFLDO2); mask = (ldo == TPS6507X_LDO_1 ? TPS6507X_REG_LDO_CTRL1_LDO1_MASK : TPS6507X_REG_DEFLDO2_LDO2_MASK); } data = tps6507x_pmic_reg_read(tps, reg); if (data < 0) return data; data &= mask; return tps->info[ldo]->table[data] * 1000; } static int tps6507x_pmic_ldo_set_voltage(struct regulator_dev *dev, int min_uV, int max_uV, unsigned *selector) { struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int data, vsel, ldo = rdev_get_id(dev); u8 reg, mask; if (ldo < TPS6507X_LDO_1 || ldo > TPS6507X_LDO_2) return -EINVAL; else { reg = (ldo == TPS6507X_LDO_1 ? TPS6507X_REG_LDO_CTRL1 : TPS6507X_REG_DEFLDO2); mask = (ldo == TPS6507X_LDO_1 ? TPS6507X_REG_LDO_CTRL1_LDO1_MASK : TPS6507X_REG_DEFLDO2_LDO2_MASK); } if (min_uV < tps->info[ldo]->min_uV || min_uV > tps->info[ldo]->max_uV) return -EINVAL; if (max_uV < tps->info[ldo]->min_uV || max_uV > tps->info[ldo]->max_uV) return -EINVAL; for (vsel = 0; vsel < tps->info[ldo]->table_len; vsel++) { int mV = tps->info[ldo]->table[vsel]; int uV = mV * 1000; /* Break at the first in-range value */ if (min_uV <= uV && uV <= max_uV) break; } if (vsel == tps->info[ldo]->table_len) return -EINVAL; *selector = vsel; data = tps6507x_pmic_reg_read(tps, reg); if (data < 0) return data; data &= ~mask; data |= vsel; return tps6507x_pmic_reg_write(tps, reg, data); } static int tps6507x_pmic_dcdc_list_voltage(struct regulator_dev *dev, unsigned selector) { struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int dcdc = rdev_get_id(dev); if (dcdc < TPS6507X_DCDC_1 || dcdc > TPS6507X_DCDC_3) return -EINVAL; if (selector >= tps->info[dcdc]->table_len) return -EINVAL; else return tps->info[dcdc]->table[selector] * 1000; } static int tps6507x_pmic_ldo_list_voltage(struct regulator_dev *dev, unsigned selector) { struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int ldo = rdev_get_id(dev); if (ldo < TPS6507X_LDO_1 || ldo > TPS6507X_LDO_2) return -EINVAL; if (selector >= tps->info[ldo]->table_len) return -EINVAL; else return tps->info[ldo]->table[selector] * 1000; } /* Operations permitted on VDCDCx */ static struct regulator_ops tps6507x_pmic_dcdc_ops = { .is_enabled = tps6507x_pmic_dcdc_is_enabled, .enable = tps6507x_pmic_dcdc_enable, .disable = tps6507x_pmic_dcdc_disable, .get_voltage = tps6507x_pmic_dcdc_get_voltage, .set_voltage = tps6507x_pmic_dcdc_set_voltage, .list_voltage = tps6507x_pmic_dcdc_list_voltage, }; /* Operations permitted on LDOx */ static struct regulator_ops tps6507x_pmic_ldo_ops = { .is_enabled = tps6507x_pmic_ldo_is_enabled, .enable = tps6507x_pmic_ldo_enable, .disable = tps6507x_pmic_ldo_disable, .get_voltage = tps6507x_pmic_ldo_get_voltage, .set_voltage = tps6507x_pmic_ldo_set_voltage, .list_voltage = tps6507x_pmic_ldo_list_voltage, }; static __devinit int tps6507x_pmic_probe(struct platform_device *pdev) { struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent); struct tps_info *info = &tps6507x_pmic_regs[0]; struct regulator_init_data *init_data; struct regulator_dev *rdev; struct tps6507x_pmic *tps; struct tps6507x_board *tps_board; int i; int error; /** * tps_board points to pmic related constants * coming from the board-evm file. */ tps_board = dev_get_platdata(tps6507x_dev->dev); if (!tps_board) return -EINVAL; /** * init_data points to array of regulator_init structures * coming from the board-evm file. */ init_data = tps_board->tps6507x_pmic_init_data; if (!init_data) return -EINVAL; tps = kzalloc(sizeof(*tps), GFP_KERNEL); if (!tps) return -ENOMEM; mutex_init(&tps->io_lock); /* common for all regulators */ tps->mfd = tps6507x_dev; for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) { /* Register the regulators */ tps->info[i] = info; if (init_data->driver_data) { struct tps6507x_reg_platform_data *data = init_data->driver_data; tps->info[i]->defdcdc_default = data->defdcdc_default; } tps->desc[i].name = info->name; tps->desc[i].id = i; tps->desc[i].n_voltages = num_voltages[i]; tps->desc[i].ops = (i > TPS6507X_DCDC_3 ? &tps6507x_pmic_ldo_ops : &tps6507x_pmic_dcdc_ops); tps->desc[i].type = REGULATOR_VOLTAGE; tps->desc[i].owner = THIS_MODULE; rdev = regulator_register(&tps->desc[i], tps6507x_dev->dev, init_data, tps); if (IS_ERR(rdev)) { dev_err(tps6507x_dev->dev, "failed to register %s regulator\n", pdev->name); error = PTR_ERR(rdev); goto fail; } /* Save regulator for cleanup */ tps->rdev[i] = rdev; } tps6507x_dev->pmic = tps; platform_set_drvdata(pdev, tps6507x_dev); return 0; fail: while (--i >= 0) regulator_unregister(tps->rdev[i]); kfree(tps); return error; } static int __devexit tps6507x_pmic_remove(struct platform_device *pdev) { struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev); struct tps6507x_pmic *tps = tps6507x_dev->pmic; int i; for (i = 0; i < TPS6507X_NUM_REGULATOR; i++) regulator_unregister(tps->rdev[i]); kfree(tps); return 0; } static struct platform_driver tps6507x_pmic_driver = { .driver = { .name = "tps6507x-pmic", .owner = THIS_MODULE, }, .probe = tps6507x_pmic_probe, .remove = __devexit_p(tps6507x_pmic_remove), }; /** * tps6507x_pmic_init * * Module init function */ static int __init tps6507x_pmic_init(void) { return platform_driver_register(&tps6507x_pmic_driver); } subsys_initcall(tps6507x_pmic_init); /** * tps6507x_pmic_cleanup * * Module exit function */ static void __exit tps6507x_pmic_cleanup(void) { platform_driver_unregister(&tps6507x_pmic_driver); } module_exit(tps6507x_pmic_cleanup); MODULE_AUTHOR("Texas Instruments"); MODULE_DESCRIPTION("TPS6507x voltage regulator driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:tps6507x-pmic");
gpl-2.0
miv8229/android_kernel_dns_msm8610
drivers/net/ethernet/sfc/ethtool.c
3259
33161
/**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include <linux/in.h> #include "net_driver.h" #include "workarounds.h" #include "selftest.h" #include "efx.h" #include "filter.h" #include "nic.h" struct ethtool_string { char name[ETH_GSTRING_LEN]; }; struct efx_ethtool_stat { const char *name; enum { EFX_ETHTOOL_STAT_SOURCE_mac_stats, EFX_ETHTOOL_STAT_SOURCE_nic, EFX_ETHTOOL_STAT_SOURCE_channel, EFX_ETHTOOL_STAT_SOURCE_tx_queue } source; unsigned offset; u64(*get_stat) (void *field); /* Reader function */ }; /* Initialiser for a struct #efx_ethtool_stat with type-checking */ #define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \ get_stat_function) { \ .name = #stat_name, \ .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \ .offset = ((((field_type *) 0) == \ &((struct efx_##source_name *)0)->field) ? \ offsetof(struct efx_##source_name, field) : \ offsetof(struct efx_##source_name, field)), \ .get_stat = get_stat_function, \ } static u64 efx_get_uint_stat(void *field) { return *(unsigned int *)field; } static u64 efx_get_u64_stat(void *field) { return *(u64 *) field; } static u64 efx_get_atomic_stat(void *field) { return atomic_read((atomic_t *) field); } #define EFX_ETHTOOL_U64_MAC_STAT(field) \ EFX_ETHTOOL_STAT(field, mac_stats, field, \ u64, efx_get_u64_stat) #define EFX_ETHTOOL_UINT_NIC_STAT(name) \ EFX_ETHTOOL_STAT(name, nic, n_##name, \ unsigned int, efx_get_uint_stat) #define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \ EFX_ETHTOOL_STAT(field, nic, field, \ atomic_t, efx_get_atomic_stat) #define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \ EFX_ETHTOOL_STAT(field, channel, n_##field, \ unsigned int, efx_get_uint_stat) #define EFX_ETHTOOL_UINT_TXQ_STAT(field) \ EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \ unsigned int, efx_get_uint_stat) static const struct efx_ethtool_stat efx_ethtool_stats[] = { EFX_ETHTOOL_U64_MAC_STAT(tx_bytes), EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes), EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes), EFX_ETHTOOL_U64_MAC_STAT(tx_packets), EFX_ETHTOOL_U64_MAC_STAT(tx_bad), EFX_ETHTOOL_U64_MAC_STAT(tx_pause), EFX_ETHTOOL_U64_MAC_STAT(tx_control), EFX_ETHTOOL_U64_MAC_STAT(tx_unicast), EFX_ETHTOOL_U64_MAC_STAT(tx_multicast), EFX_ETHTOOL_U64_MAC_STAT(tx_broadcast), EFX_ETHTOOL_U64_MAC_STAT(tx_lt64), EFX_ETHTOOL_U64_MAC_STAT(tx_64), EFX_ETHTOOL_U64_MAC_STAT(tx_65_to_127), EFX_ETHTOOL_U64_MAC_STAT(tx_128_to_255), EFX_ETHTOOL_U64_MAC_STAT(tx_256_to_511), EFX_ETHTOOL_U64_MAC_STAT(tx_512_to_1023), EFX_ETHTOOL_U64_MAC_STAT(tx_1024_to_15xx), EFX_ETHTOOL_U64_MAC_STAT(tx_15xx_to_jumbo), EFX_ETHTOOL_U64_MAC_STAT(tx_gtjumbo), EFX_ETHTOOL_U64_MAC_STAT(tx_collision), EFX_ETHTOOL_U64_MAC_STAT(tx_single_collision), EFX_ETHTOOL_U64_MAC_STAT(tx_multiple_collision), EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_collision), EFX_ETHTOOL_U64_MAC_STAT(tx_deferred), EFX_ETHTOOL_U64_MAC_STAT(tx_late_collision), EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_deferred), EFX_ETHTOOL_U64_MAC_STAT(tx_non_tcpudp), EFX_ETHTOOL_U64_MAC_STAT(tx_mac_src_error), EFX_ETHTOOL_U64_MAC_STAT(tx_ip_src_error), EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts), EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), EFX_ETHTOOL_UINT_TXQ_STAT(pushes), EFX_ETHTOOL_U64_MAC_STAT(rx_bytes), EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes), EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes), EFX_ETHTOOL_U64_MAC_STAT(rx_packets), EFX_ETHTOOL_U64_MAC_STAT(rx_good), EFX_ETHTOOL_U64_MAC_STAT(rx_bad), EFX_ETHTOOL_U64_MAC_STAT(rx_pause), EFX_ETHTOOL_U64_MAC_STAT(rx_control), EFX_ETHTOOL_U64_MAC_STAT(rx_unicast), EFX_ETHTOOL_U64_MAC_STAT(rx_multicast), EFX_ETHTOOL_U64_MAC_STAT(rx_broadcast), EFX_ETHTOOL_U64_MAC_STAT(rx_lt64), EFX_ETHTOOL_U64_MAC_STAT(rx_64), EFX_ETHTOOL_U64_MAC_STAT(rx_65_to_127), EFX_ETHTOOL_U64_MAC_STAT(rx_128_to_255), EFX_ETHTOOL_U64_MAC_STAT(rx_256_to_511), EFX_ETHTOOL_U64_MAC_STAT(rx_512_to_1023), EFX_ETHTOOL_U64_MAC_STAT(rx_1024_to_15xx), EFX_ETHTOOL_U64_MAC_STAT(rx_15xx_to_jumbo), EFX_ETHTOOL_U64_MAC_STAT(rx_gtjumbo), EFX_ETHTOOL_U64_MAC_STAT(rx_bad_lt64), EFX_ETHTOOL_U64_MAC_STAT(rx_bad_64_to_15xx), EFX_ETHTOOL_U64_MAC_STAT(rx_bad_15xx_to_jumbo), EFX_ETHTOOL_U64_MAC_STAT(rx_bad_gtjumbo), EFX_ETHTOOL_U64_MAC_STAT(rx_overflow), EFX_ETHTOOL_U64_MAC_STAT(rx_missed), EFX_ETHTOOL_U64_MAC_STAT(rx_false_carrier), EFX_ETHTOOL_U64_MAC_STAT(rx_symbol_error), EFX_ETHTOOL_U64_MAC_STAT(rx_align_error), EFX_ETHTOOL_U64_MAC_STAT(rx_length_error), EFX_ETHTOOL_U64_MAC_STAT(rx_internal_error), EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt), EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), }; /* Number of ethtool statistics */ #define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats) #define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB /************************************************************************** * * Ethtool operations * ************************************************************************** */ /* Identify device by flashing LEDs */ static int efx_ethtool_phys_id(struct net_device *net_dev, enum ethtool_phys_id_state state) { struct efx_nic *efx = netdev_priv(net_dev); enum efx_led_mode mode = EFX_LED_DEFAULT; switch (state) { case ETHTOOL_ID_ON: mode = EFX_LED_ON; break; case ETHTOOL_ID_OFF: mode = EFX_LED_OFF; break; case ETHTOOL_ID_INACTIVE: mode = EFX_LED_DEFAULT; break; case ETHTOOL_ID_ACTIVE: return 1; /* cycle on/off once per second */ } efx->type->set_id_led(efx, mode); return 0; } /* This must be called with rtnl_lock held. */ static int efx_ethtool_get_settings(struct net_device *net_dev, struct ethtool_cmd *ecmd) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_link_state *link_state = &efx->link_state; mutex_lock(&efx->mac_lock); efx->phy_op->get_settings(efx, ecmd); mutex_unlock(&efx->mac_lock); /* GMAC does not support 1000Mbps HD */ ecmd->supported &= ~SUPPORTED_1000baseT_Half; /* Both MACs support pause frames (bidirectional and respond-only) */ ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; if (LOOPBACK_INTERNAL(efx)) { ethtool_cmd_speed_set(ecmd, link_state->speed); ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; } return 0; } /* This must be called with rtnl_lock held. */ static int efx_ethtool_set_settings(struct net_device *net_dev, struct ethtool_cmd *ecmd) { struct efx_nic *efx = netdev_priv(net_dev); int rc; /* GMAC does not support 1000Mbps HD */ if ((ethtool_cmd_speed(ecmd) == SPEED_1000) && (ecmd->duplex != DUPLEX_FULL)) { netif_dbg(efx, drv, efx->net_dev, "rejecting unsupported 1000Mbps HD setting\n"); return -EINVAL; } mutex_lock(&efx->mac_lock); rc = efx->phy_op->set_settings(efx, ecmd); mutex_unlock(&efx->mac_lock); return rc; } static void efx_ethtool_get_drvinfo(struct net_device *net_dev, struct ethtool_drvinfo *info) { struct efx_nic *efx = netdev_priv(net_dev); strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) efx_mcdi_print_fwver(efx, info->fw_version, sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); } static int efx_ethtool_get_regs_len(struct net_device *net_dev) { return efx_nic_get_regs_len(netdev_priv(net_dev)); } static void efx_ethtool_get_regs(struct net_device *net_dev, struct ethtool_regs *regs, void *buf) { struct efx_nic *efx = netdev_priv(net_dev); regs->version = efx->type->revision; efx_nic_get_regs(efx, buf); } static u32 efx_ethtool_get_msglevel(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); return efx->msg_enable; } static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable) { struct efx_nic *efx = netdev_priv(net_dev); efx->msg_enable = msg_enable; } /** * efx_fill_test - fill in an individual self-test entry * @test_index: Index of the test * @strings: Ethtool strings, or %NULL * @data: Ethtool test results, or %NULL * @test: Pointer to test result (used only if data != %NULL) * @unit_format: Unit name format (e.g. "chan\%d") * @unit_id: Unit id (e.g. 0 for "chan0") * @test_format: Test name format (e.g. "loopback.\%s.tx.sent") * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent") * * Fill in an individual self-test entry. */ static void efx_fill_test(unsigned int test_index, struct ethtool_string *strings, u64 *data, int *test, const char *unit_format, int unit_id, const char *test_format, const char *test_id) { struct ethtool_string unit_str, test_str; /* Fill data value, if applicable */ if (data) data[test_index] = *test; /* Fill string, if applicable */ if (strings) { if (strchr(unit_format, '%')) snprintf(unit_str.name, sizeof(unit_str.name), unit_format, unit_id); else strcpy(unit_str.name, unit_format); snprintf(test_str.name, sizeof(test_str.name), test_format, test_id); snprintf(strings[test_index].name, sizeof(strings[test_index].name), "%-6s %-24s", unit_str.name, test_str.name); } } #define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue #define EFX_LOOPBACK_NAME(_mode, _counter) \ "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode) /** * efx_fill_loopback_test - fill in a block of loopback self-test entries * @efx: Efx NIC * @lb_tests: Efx loopback self-test results structure * @mode: Loopback test mode * @test_index: Starting index of the test * @strings: Ethtool strings, or %NULL * @data: Ethtool test results, or %NULL */ static int efx_fill_loopback_test(struct efx_nic *efx, struct efx_loopback_self_tests *lb_tests, enum efx_loopback_mode mode, unsigned int test_index, struct ethtool_string *strings, u64 *data) { struct efx_channel *channel = efx_get_channel(efx, 0); struct efx_tx_queue *tx_queue; efx_for_each_channel_tx_queue(tx_queue, channel) { efx_fill_test(test_index++, strings, data, &lb_tests->tx_sent[tx_queue->queue], EFX_TX_QUEUE_NAME(tx_queue), EFX_LOOPBACK_NAME(mode, "tx_sent")); efx_fill_test(test_index++, strings, data, &lb_tests->tx_done[tx_queue->queue], EFX_TX_QUEUE_NAME(tx_queue), EFX_LOOPBACK_NAME(mode, "tx_done")); } efx_fill_test(test_index++, strings, data, &lb_tests->rx_good, "rx", 0, EFX_LOOPBACK_NAME(mode, "rx_good")); efx_fill_test(test_index++, strings, data, &lb_tests->rx_bad, "rx", 0, EFX_LOOPBACK_NAME(mode, "rx_bad")); return test_index; } /** * efx_ethtool_fill_self_tests - get self-test details * @efx: Efx NIC * @tests: Efx self-test results structure, or %NULL * @strings: Ethtool strings, or %NULL * @data: Ethtool test results, or %NULL */ static int efx_ethtool_fill_self_tests(struct efx_nic *efx, struct efx_self_tests *tests, struct ethtool_string *strings, u64 *data) { struct efx_channel *channel; unsigned int n = 0, i; enum efx_loopback_mode mode; efx_fill_test(n++, strings, data, &tests->phy_alive, "phy", 0, "alive", NULL); efx_fill_test(n++, strings, data, &tests->nvram, "core", 0, "nvram", NULL); efx_fill_test(n++, strings, data, &tests->interrupt, "core", 0, "interrupt", NULL); /* Event queues */ efx_for_each_channel(channel, efx) { efx_fill_test(n++, strings, data, &tests->eventq_dma[channel->channel], EFX_CHANNEL_NAME(channel), "eventq.dma", NULL); efx_fill_test(n++, strings, data, &tests->eventq_int[channel->channel], EFX_CHANNEL_NAME(channel), "eventq.int", NULL); } efx_fill_test(n++, strings, data, &tests->registers, "core", 0, "registers", NULL); if (efx->phy_op->run_tests != NULL) { EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL); for (i = 0; true; ++i) { const char *name; EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS); name = efx->phy_op->test_name(efx, i); if (name == NULL) break; efx_fill_test(n++, strings, data, &tests->phy_ext[i], "phy", 0, name, NULL); } } /* Loopback tests */ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { if (!(efx->loopback_modes & (1 << mode))) continue; n = efx_fill_loopback_test(efx, &tests->loopback[mode], mode, n, strings, data); } return n; } static int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set) { switch (string_set) { case ETH_SS_STATS: return EFX_ETHTOOL_NUM_STATS; case ETH_SS_TEST: return efx_ethtool_fill_self_tests(netdev_priv(net_dev), NULL, NULL, NULL); default: return -EINVAL; } } static void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set, u8 *strings) { struct efx_nic *efx = netdev_priv(net_dev); struct ethtool_string *ethtool_strings = (struct ethtool_string *)strings; int i; switch (string_set) { case ETH_SS_STATS: for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) strncpy(ethtool_strings[i].name, efx_ethtool_stats[i].name, sizeof(ethtool_strings[i].name)); break; case ETH_SS_TEST: efx_ethtool_fill_self_tests(efx, NULL, ethtool_strings, NULL); break; default: /* No other string sets */ break; } } static void efx_ethtool_get_stats(struct net_device *net_dev, struct ethtool_stats *stats, u64 *data) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_mac_stats *mac_stats = &efx->mac_stats; const struct efx_ethtool_stat *stat; struct efx_channel *channel; struct efx_tx_queue *tx_queue; int i; EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS); spin_lock_bh(&efx->stats_lock); /* Update MAC and NIC statistics */ efx->type->update_stats(efx); /* Fill detailed statistics buffer */ for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) { stat = &efx_ethtool_stats[i]; switch (stat->source) { case EFX_ETHTOOL_STAT_SOURCE_mac_stats: data[i] = stat->get_stat((void *)mac_stats + stat->offset); break; case EFX_ETHTOOL_STAT_SOURCE_nic: data[i] = stat->get_stat((void *)efx + stat->offset); break; case EFX_ETHTOOL_STAT_SOURCE_channel: data[i] = 0; efx_for_each_channel(channel, efx) data[i] += stat->get_stat((void *)channel + stat->offset); break; case EFX_ETHTOOL_STAT_SOURCE_tx_queue: data[i] = 0; efx_for_each_channel(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) data[i] += stat->get_stat((void *)tx_queue + stat->offset); } break; } } spin_unlock_bh(&efx->stats_lock); } static void efx_ethtool_self_test(struct net_device *net_dev, struct ethtool_test *test, u64 *data) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_self_tests *efx_tests; int already_up; int rc = -ENOMEM; efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); if (!efx_tests) goto fail; ASSERT_RTNL(); if (efx->state != STATE_RUNNING) { rc = -EIO; goto fail1; } netif_info(efx, drv, efx->net_dev, "starting %sline testing\n", (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); /* We need rx buffers and interrupts. */ already_up = (efx->net_dev->flags & IFF_UP); if (!already_up) { rc = dev_open(efx->net_dev); if (rc) { netif_err(efx, drv, efx->net_dev, "failed opening device.\n"); goto fail1; } } rc = efx_selftest(efx, efx_tests, test->flags); if (!already_up) dev_close(efx->net_dev); netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n", rc == 0 ? "passed" : "failed", (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); fail1: /* Fill ethtool results structures */ efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); kfree(efx_tests); fail: if (rc) test->flags |= ETH_TEST_FL_FAILED; } /* Restart autonegotiation */ static int efx_ethtool_nway_reset(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); return mdio45_nway_restart(&efx->mdio); } /* * Each channel has a single IRQ and moderation timer, started by any * completion (or other event). Unless the module parameter * separate_tx_channels is set, IRQs and moderation are therefore * shared between RX and TX completions. In this case, when RX IRQ * moderation is explicitly changed then TX IRQ moderation is * automatically changed too, but otherwise we fail if the two values * are requested to be different. * * The hardware does not support a limit on the number of completions * before an IRQ, so we do not use the max_frames fields. We should * report and require that max_frames == (usecs != 0), but this would * invalidate existing user documentation. * * The hardware does not have distinct settings for interrupt * moderation while the previous IRQ is being handled, so we should * not use the 'irq' fields. However, an earlier developer * misunderstood the meaning of the 'irq' fields and the driver did * not support the standard fields. To avoid invalidating existing * user documentation, we report and accept changes through either the * standard or 'irq' fields. If both are changed at the same time, we * prefer the standard field. * * We implement adaptive IRQ moderation, but use a different algorithm * from that assumed in the definition of struct ethtool_coalesce. * Therefore we do not use any of the adaptive moderation parameters * in it. */ static int efx_ethtool_get_coalesce(struct net_device *net_dev, struct ethtool_coalesce *coalesce) { struct efx_nic *efx = netdev_priv(net_dev); unsigned int tx_usecs, rx_usecs; bool rx_adaptive; efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive); coalesce->tx_coalesce_usecs = tx_usecs; coalesce->tx_coalesce_usecs_irq = tx_usecs; coalesce->rx_coalesce_usecs = rx_usecs; coalesce->rx_coalesce_usecs_irq = rx_usecs; coalesce->use_adaptive_rx_coalesce = rx_adaptive; return 0; } static int efx_ethtool_set_coalesce(struct net_device *net_dev, struct ethtool_coalesce *coalesce) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; unsigned int tx_usecs, rx_usecs; bool adaptive, rx_may_override_tx; int rc; if (coalesce->use_adaptive_tx_coalesce) return -EINVAL; efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive); if (coalesce->rx_coalesce_usecs != rx_usecs) rx_usecs = coalesce->rx_coalesce_usecs; else rx_usecs = coalesce->rx_coalesce_usecs_irq; adaptive = coalesce->use_adaptive_rx_coalesce; /* If channels are shared, TX IRQ moderation can be quietly * overridden unless it is changed from its old value. */ rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs && coalesce->tx_coalesce_usecs_irq == tx_usecs); if (coalesce->tx_coalesce_usecs != tx_usecs) tx_usecs = coalesce->tx_coalesce_usecs; else tx_usecs = coalesce->tx_coalesce_usecs_irq; rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive, rx_may_override_tx); if (rc != 0) return rc; efx_for_each_channel(channel, efx) efx->type->push_irq_moderation(channel); return 0; } static void efx_ethtool_get_ringparam(struct net_device *net_dev, struct ethtool_ringparam *ring) { struct efx_nic *efx = netdev_priv(net_dev); ring->rx_max_pending = EFX_MAX_DMAQ_SIZE; ring->tx_max_pending = EFX_MAX_DMAQ_SIZE; ring->rx_pending = efx->rxq_entries; ring->tx_pending = efx->txq_entries; } static int efx_ethtool_set_ringparam(struct net_device *net_dev, struct ethtool_ringparam *ring) { struct efx_nic *efx = netdev_priv(net_dev); if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->rx_pending > EFX_MAX_DMAQ_SIZE || ring->tx_pending > EFX_MAX_DMAQ_SIZE) return -EINVAL; if (ring->rx_pending < EFX_MIN_RING_SIZE || ring->tx_pending < EFX_MIN_RING_SIZE) { netif_err(efx, drv, efx->net_dev, "TX and RX queues cannot be smaller than %ld\n", EFX_MIN_RING_SIZE); return -EINVAL; } return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending); } static int efx_ethtool_set_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *pause) { struct efx_nic *efx = netdev_priv(net_dev); u8 wanted_fc, old_fc; u32 old_adv; bool reset; int rc = 0; mutex_lock(&efx->mac_lock); wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) | (pause->tx_pause ? EFX_FC_TX : 0) | (pause->autoneg ? EFX_FC_AUTO : 0)); if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { netif_dbg(efx, drv, efx->net_dev, "Flow control unsupported: tx ON rx OFF\n"); rc = -EINVAL; goto out; } if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) { netif_dbg(efx, drv, efx->net_dev, "Autonegotiation is disabled\n"); rc = -EINVAL; goto out; } /* TX flow control may automatically turn itself off if the * link partner (intermittently) stops responding to pause * frames. There isn't any indication that this has happened, * so the best we do is leave it up to the user to spot this * and fix it be cycling transmit flow control on this end. */ reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX); if (EFX_WORKAROUND_11482(efx) && reset) { if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) { /* Recover by resetting the EM block */ falcon_stop_nic_stats(efx); falcon_drain_tx_fifo(efx); falcon_reconfigure_xmac(efx); falcon_start_nic_stats(efx); } else { /* Schedule a reset to recover */ efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); } } old_adv = efx->link_advertising; old_fc = efx->wanted_fc; efx_link_set_wanted_fc(efx, wanted_fc); if (efx->link_advertising != old_adv || (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { rc = efx->phy_op->reconfigure(efx); if (rc) { netif_err(efx, drv, efx->net_dev, "Unable to advertise requested flow " "control setting\n"); goto out; } } /* Reconfigure the MAC. The PHY *may* generate a link state change event * if the user just changed the advertised capabilities, but there's no * harm doing this twice */ efx->type->reconfigure_mac(efx); out: mutex_unlock(&efx->mac_lock); return rc; } static void efx_ethtool_get_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *pause) { struct efx_nic *efx = netdev_priv(net_dev); pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX); pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX); pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO); } static void efx_ethtool_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) { struct efx_nic *efx = netdev_priv(net_dev); return efx->type->get_wol(efx, wol); } static int efx_ethtool_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) { struct efx_nic *efx = netdev_priv(net_dev); return efx->type->set_wol(efx, wol->wolopts); } static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) { struct efx_nic *efx = netdev_priv(net_dev); int rc; rc = efx->type->map_reset_flags(flags); if (rc < 0) return rc; return efx_reset(efx, rc); } /* MAC address mask including only MC flag */ static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; static int efx_ethtool_get_class_rule(struct efx_nic *efx, struct ethtool_rx_flow_spec *rule) { struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; struct ethhdr *mac_entry = &rule->h_u.ether_spec; struct ethhdr *mac_mask = &rule->m_u.ether_spec; struct efx_filter_spec spec; u16 vid; u8 proto; int rc; rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL, rule->location, &spec); if (rc) return rc; if (spec.dmaq_id == 0xfff) rule->ring_cookie = RX_CLS_FLOW_DISC; else rule->ring_cookie = spec.dmaq_id; if (spec.type == EFX_FILTER_MC_DEF || spec.type == EFX_FILTER_UC_DEF) { rule->flow_type = ETHER_FLOW; memcpy(mac_mask->h_dest, mac_addr_mc_mask, ETH_ALEN); if (spec.type == EFX_FILTER_MC_DEF) memcpy(mac_entry->h_dest, mac_addr_mc_mask, ETH_ALEN); return 0; } rc = efx_filter_get_eth_local(&spec, &vid, mac_entry->h_dest); if (rc == 0) { rule->flow_type = ETHER_FLOW; memset(mac_mask->h_dest, ~0, ETH_ALEN); if (vid != EFX_FILTER_VID_UNSPEC) { rule->flow_type |= FLOW_EXT; rule->h_ext.vlan_tci = htons(vid); rule->m_ext.vlan_tci = htons(0xfff); } return 0; } rc = efx_filter_get_ipv4_local(&spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst); if (rc != 0) { rc = efx_filter_get_ipv4_full( &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc, &ip_entry->ip4dst, &ip_entry->pdst); EFX_WARN_ON_PARANOID(rc); ip_mask->ip4src = ~0; ip_mask->psrc = ~0; } rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW; ip_mask->ip4dst = ~0; ip_mask->pdst = ~0; return rc; } static int efx_ethtool_get_rxnfc(struct net_device *net_dev, struct ethtool_rxnfc *info, u32 *rule_locs) { struct efx_nic *efx = netdev_priv(net_dev); switch (info->cmd) { case ETHTOOL_GRXRINGS: info->data = efx->n_rx_channels; return 0; case ETHTOOL_GRXFH: { unsigned min_revision = 0; info->data = 0; switch (info->flow_type) { case TCP_V4_FLOW: info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; /* fall through */ case UDP_V4_FLOW: case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case IPV4_FLOW: info->data |= RXH_IP_SRC | RXH_IP_DST; min_revision = EFX_REV_FALCON_B0; break; case TCP_V6_FLOW: info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; /* fall through */ case UDP_V6_FLOW: case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case IPV6_FLOW: info->data |= RXH_IP_SRC | RXH_IP_DST; min_revision = EFX_REV_SIENA_A0; break; default: break; } if (efx_nic_rev(efx) < min_revision) info->data = 0; return 0; } case ETHTOOL_GRXCLSRLCNT: info->data = efx_filter_get_rx_id_limit(efx); if (info->data == 0) return -EOPNOTSUPP; info->data |= RX_CLS_LOC_SPECIAL; info->rule_cnt = efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL); return 0; case ETHTOOL_GRXCLSRULE: if (efx_filter_get_rx_id_limit(efx) == 0) return -EOPNOTSUPP; return efx_ethtool_get_class_rule(efx, &info->fs); case ETHTOOL_GRXCLSRLALL: { s32 rc; info->data = efx_filter_get_rx_id_limit(efx); if (info->data == 0) return -EOPNOTSUPP; rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL, rule_locs, info->rule_cnt); if (rc < 0) return rc; info->rule_cnt = rc; return 0; } default: return -EOPNOTSUPP; } } static int efx_ethtool_set_class_rule(struct efx_nic *efx, struct ethtool_rx_flow_spec *rule) { struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; struct ethhdr *mac_entry = &rule->h_u.ether_spec; struct ethhdr *mac_mask = &rule->m_u.ether_spec; struct efx_filter_spec spec; int rc; /* Check that user wants us to choose the location */ if (rule->location != RX_CLS_LOC_ANY && rule->location != RX_CLS_LOC_FIRST && rule->location != RX_CLS_LOC_LAST) return -EINVAL; /* Range-check ring_cookie */ if (rule->ring_cookie >= efx->n_rx_channels && rule->ring_cookie != RX_CLS_FLOW_DISC) return -EINVAL; /* Check for unsupported extensions */ if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_etype | rule->m_ext.data[0] | rule->m_ext.data[1])) return -EINVAL; efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, (rule->location == RX_CLS_LOC_FIRST) ? EFX_FILTER_FLAG_RX_OVERRIDE_IP : 0, (rule->ring_cookie == RX_CLS_FLOW_DISC) ? 0xfff : rule->ring_cookie); switch (rule->flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: { u8 proto = (rule->flow_type == TCP_V4_FLOW ? IPPROTO_TCP : IPPROTO_UDP); /* Must match all of destination, */ if ((__force u32)~ip_mask->ip4dst | (__force u16)~ip_mask->pdst) return -EINVAL; /* all or none of source, */ if ((ip_mask->ip4src | ip_mask->psrc) && ((__force u32)~ip_mask->ip4src | (__force u16)~ip_mask->psrc)) return -EINVAL; /* and nothing else */ if (ip_mask->tos | rule->m_ext.vlan_tci) return -EINVAL; if (ip_mask->ip4src) rc = efx_filter_set_ipv4_full(&spec, proto, ip_entry->ip4dst, ip_entry->pdst, ip_entry->ip4src, ip_entry->psrc); else rc = efx_filter_set_ipv4_local(&spec, proto, ip_entry->ip4dst, ip_entry->pdst); if (rc) return rc; break; } case ETHER_FLOW | FLOW_EXT: case ETHER_FLOW: { u16 vlan_tag_mask = (rule->flow_type & FLOW_EXT ? ntohs(rule->m_ext.vlan_tci) : 0); /* Must not match on source address or Ethertype */ if (!is_zero_ether_addr(mac_mask->h_source) || mac_mask->h_proto) return -EINVAL; /* Is it a default UC or MC filter? */ if (!compare_ether_addr(mac_mask->h_dest, mac_addr_mc_mask) && vlan_tag_mask == 0) { if (is_multicast_ether_addr(mac_entry->h_dest)) rc = efx_filter_set_mc_def(&spec); else rc = efx_filter_set_uc_def(&spec); } /* Otherwise, it must match all of destination and all * or none of VID. */ else if (is_broadcast_ether_addr(mac_mask->h_dest) && (vlan_tag_mask == 0xfff || vlan_tag_mask == 0)) { rc = efx_filter_set_eth_local( &spec, vlan_tag_mask ? ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC, mac_entry->h_dest); } else { rc = -EINVAL; } if (rc) return rc; break; } default: return -EINVAL; } rc = efx_filter_insert_filter(efx, &spec, true); if (rc < 0) return rc; rule->location = rc; return 0; } static int efx_ethtool_set_rxnfc(struct net_device *net_dev, struct ethtool_rxnfc *info) { struct efx_nic *efx = netdev_priv(net_dev); if (efx_filter_get_rx_id_limit(efx) == 0) return -EOPNOTSUPP; switch (info->cmd) { case ETHTOOL_SRXCLSRLINS: return efx_ethtool_set_class_rule(efx, &info->fs); case ETHTOOL_SRXCLSRLDEL: return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL, info->fs.location); default: return -EOPNOTSUPP; } } static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); return ((efx_nic_rev(efx) < EFX_REV_FALCON_B0 || efx->n_rx_channels == 1) ? 0 : ARRAY_SIZE(efx->rx_indir_table)); } static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir) { struct efx_nic *efx = netdev_priv(net_dev); memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table)); return 0; } static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, const u32 *indir) { struct efx_nic *efx = netdev_priv(net_dev); memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table)); efx_nic_push_rx_indir_table(efx); return 0; } const struct ethtool_ops efx_ethtool_ops = { .get_settings = efx_ethtool_get_settings, .set_settings = efx_ethtool_set_settings, .get_drvinfo = efx_ethtool_get_drvinfo, .get_regs_len = efx_ethtool_get_regs_len, .get_regs = efx_ethtool_get_regs, .get_msglevel = efx_ethtool_get_msglevel, .set_msglevel = efx_ethtool_set_msglevel, .nway_reset = efx_ethtool_nway_reset, .get_link = ethtool_op_get_link, .get_coalesce = efx_ethtool_get_coalesce, .set_coalesce = efx_ethtool_set_coalesce, .get_ringparam = efx_ethtool_get_ringparam, .set_ringparam = efx_ethtool_set_ringparam, .get_pauseparam = efx_ethtool_get_pauseparam, .set_pauseparam = efx_ethtool_set_pauseparam, .get_sset_count = efx_ethtool_get_sset_count, .self_test = efx_ethtool_self_test, .get_strings = efx_ethtool_get_strings, .set_phys_id = efx_ethtool_phys_id, .get_ethtool_stats = efx_ethtool_get_stats, .get_wol = efx_ethtool_get_wol, .set_wol = efx_ethtool_set_wol, .reset = efx_ethtool_reset, .get_rxnfc = efx_ethtool_get_rxnfc, .set_rxnfc = efx_ethtool_set_rxnfc, .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, .get_rxfh_indir = efx_ethtool_get_rxfh_indir, .set_rxfh_indir = efx_ethtool_set_rxfh_indir, };
gpl-2.0
Kurre/kernel_msm
drivers/gpu/drm/exynos/exynos_drm_drv.c
4795
9360
/* * Copyright (c) 2011 Samsung Electronics Co., Ltd. * Authors: * Inki Dae <inki.dae@samsung.com> * Joonyoung Shim <jy0922.shim@samsung.com> * Seung-Woo Kim <sw0312.kim@samsung.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "drmP.h" #include "drm.h" #include "drm_crtc_helper.h" #include <drm/exynos_drm.h> #include "exynos_drm_drv.h" #include "exynos_drm_crtc.h" #include "exynos_drm_encoder.h" #include "exynos_drm_fbdev.h" #include "exynos_drm_fb.h" #include "exynos_drm_gem.h" #include "exynos_drm_plane.h" #include "exynos_drm_vidi.h" #define DRIVER_NAME "exynos" #define DRIVER_DESC "Samsung SoC DRM" #define DRIVER_DATE "20110530" #define DRIVER_MAJOR 1 #define DRIVER_MINOR 0 #define VBLANK_OFF_DELAY 50000 static int exynos_drm_load(struct drm_device *dev, unsigned long flags) { struct exynos_drm_private *private; int ret; int nr; DRM_DEBUG_DRIVER("%s\n", __FILE__); private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL); if (!private) { DRM_ERROR("failed to allocate private\n"); return -ENOMEM; } INIT_LIST_HEAD(&private->pageflip_event_list); dev->dev_private = (void *)private; drm_mode_config_init(dev); /* init kms poll for handling hpd */ drm_kms_helper_poll_init(dev); exynos_drm_mode_config_init(dev); /* * EXYNOS4 is enough to have two CRTCs and each crtc would be used * without dependency of hardware. */ for (nr = 0; nr < MAX_CRTC; nr++) { ret = exynos_drm_crtc_create(dev, nr); if (ret) goto err_crtc; } for (nr = 0; nr < MAX_PLANE; nr++) { ret = exynos_plane_init(dev, nr); if (ret) goto err_crtc; } ret = drm_vblank_init(dev, MAX_CRTC); if (ret) goto err_crtc; /* * probe sub drivers such as display controller and hdmi driver, * that were registered at probe() of platform driver * to the sub driver and create encoder and connector for them. */ ret = exynos_drm_device_register(dev); if (ret) goto err_vblank; /* setup possible_clones. */ exynos_drm_encoder_setup(dev); /* * create and configure fb helper and also exynos specific * fbdev object. */ ret = exynos_drm_fbdev_init(dev); if (ret) { DRM_ERROR("failed to initialize drm fbdev\n"); goto err_drm_device; } drm_vblank_offdelay = VBLANK_OFF_DELAY; return 0; err_drm_device: exynos_drm_device_unregister(dev); err_vblank: drm_vblank_cleanup(dev); err_crtc: drm_mode_config_cleanup(dev); kfree(private); return ret; } static int exynos_drm_unload(struct drm_device *dev) { DRM_DEBUG_DRIVER("%s\n", __FILE__); exynos_drm_fbdev_fini(dev); exynos_drm_device_unregister(dev); drm_vblank_cleanup(dev); drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); kfree(dev->dev_private); dev->dev_private = NULL; return 0; } static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) { DRM_DEBUG_DRIVER("%s\n", __FILE__); return exynos_drm_subdrv_open(dev, file); } static void exynos_drm_preclose(struct drm_device *dev, struct drm_file *file) { struct exynos_drm_private *private = dev->dev_private; struct drm_pending_vblank_event *e, *t; unsigned long flags; DRM_DEBUG_DRIVER("%s\n", __FILE__); /* release events of current file */ spin_lock_irqsave(&dev->event_lock, flags); list_for_each_entry_safe(e, t, &private->pageflip_event_list, base.link) { if (e->base.file_priv == file) { list_del(&e->base.link); e->base.destroy(&e->base); } } spin_unlock_irqrestore(&dev->event_lock, flags); exynos_drm_subdrv_close(dev, file); } static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) { DRM_DEBUG_DRIVER("%s\n", __FILE__); if (!file->driver_priv) return; kfree(file->driver_priv); file->driver_priv = NULL; } static void exynos_drm_lastclose(struct drm_device *dev) { DRM_DEBUG_DRIVER("%s\n", __FILE__); exynos_drm_fbdev_restore_mode(dev); } static struct vm_operations_struct exynos_drm_gem_vm_ops = { .fault = exynos_drm_gem_fault, .open = drm_gem_vm_open, .close = drm_gem_vm_close, }; static struct drm_ioctl_desc exynos_ioctls[] = { DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl, DRM_UNLOCKED | DRM_AUTH), DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET, exynos_drm_gem_map_offset_ioctl, DRM_UNLOCKED | DRM_AUTH), DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP, exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH), DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl, DRM_UNLOCKED | DRM_AUTH), DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH), }; static const struct file_operations exynos_drm_driver_fops = { .owner = THIS_MODULE, .open = drm_open, .mmap = exynos_drm_gem_mmap, .poll = drm_poll, .read = drm_read, .unlocked_ioctl = drm_ioctl, .release = drm_release, }; static struct drm_driver exynos_drm_driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM, .load = exynos_drm_load, .unload = exynos_drm_unload, .open = exynos_drm_open, .preclose = exynos_drm_preclose, .lastclose = exynos_drm_lastclose, .postclose = exynos_drm_postclose, .get_vblank_counter = drm_vblank_count, .enable_vblank = exynos_drm_crtc_enable_vblank, .disable_vblank = exynos_drm_crtc_disable_vblank, .gem_init_object = exynos_drm_gem_init_object, .gem_free_object = exynos_drm_gem_free_object, .gem_vm_ops = &exynos_drm_gem_vm_ops, .dumb_create = exynos_drm_gem_dumb_create, .dumb_map_offset = exynos_drm_gem_dumb_map_offset, .dumb_destroy = exynos_drm_gem_dumb_destroy, .ioctls = exynos_ioctls, .fops = &exynos_drm_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, }; static int exynos_drm_platform_probe(struct platform_device *pdev) { DRM_DEBUG_DRIVER("%s\n", __FILE__); exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls); return drm_platform_init(&exynos_drm_driver, pdev); } static int exynos_drm_platform_remove(struct platform_device *pdev) { DRM_DEBUG_DRIVER("%s\n", __FILE__); drm_platform_exit(&exynos_drm_driver, pdev); return 0; } static struct platform_driver exynos_drm_platform_driver = { .probe = exynos_drm_platform_probe, .remove = __devexit_p(exynos_drm_platform_remove), .driver = { .owner = THIS_MODULE, .name = "exynos-drm", }, }; static int __init exynos_drm_init(void) { int ret; DRM_DEBUG_DRIVER("%s\n", __FILE__); #ifdef CONFIG_DRM_EXYNOS_FIMD ret = platform_driver_register(&fimd_driver); if (ret < 0) goto out_fimd; #endif #ifdef CONFIG_DRM_EXYNOS_HDMI ret = platform_driver_register(&hdmi_driver); if (ret < 0) goto out_hdmi; ret = platform_driver_register(&mixer_driver); if (ret < 0) goto out_mixer; ret = platform_driver_register(&exynos_drm_common_hdmi_driver); if (ret < 0) goto out_common_hdmi; #endif #ifdef CONFIG_DRM_EXYNOS_VIDI ret = platform_driver_register(&vidi_driver); if (ret < 0) goto out_vidi; #endif ret = platform_driver_register(&exynos_drm_platform_driver); if (ret < 0) goto out; return 0; out: #ifdef CONFIG_DRM_EXYNOS_VIDI out_vidi: platform_driver_unregister(&vidi_driver); #endif #ifdef CONFIG_DRM_EXYNOS_HDMI platform_driver_unregister(&exynos_drm_common_hdmi_driver); out_common_hdmi: platform_driver_unregister(&mixer_driver); out_mixer: platform_driver_unregister(&hdmi_driver); out_hdmi: #endif #ifdef CONFIG_DRM_EXYNOS_FIMD platform_driver_unregister(&fimd_driver); out_fimd: #endif return ret; } static void __exit exynos_drm_exit(void) { DRM_DEBUG_DRIVER("%s\n", __FILE__); platform_driver_unregister(&exynos_drm_platform_driver); #ifdef CONFIG_DRM_EXYNOS_HDMI platform_driver_unregister(&exynos_drm_common_hdmi_driver); platform_driver_unregister(&mixer_driver); platform_driver_unregister(&hdmi_driver); #endif #ifdef CONFIG_DRM_EXYNOS_VIDI platform_driver_unregister(&vidi_driver); #endif #ifdef CONFIG_DRM_EXYNOS_FIMD platform_driver_unregister(&fimd_driver); #endif } module_init(exynos_drm_init); module_exit(exynos_drm_exit); MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>"); MODULE_DESCRIPTION("Samsung SoC DRM Driver"); MODULE_LICENSE("GPL");
gpl-2.0
Pantech-Discover/android_kernel_pantech_magnus
arch/arm/mach-netx/generic.c
5051
4511
/* * arch/arm/mach-netx/generic.c * * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/device.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/mach/map.h> #include <asm/hardware/vic.h> #include <mach/netx-regs.h> #include <asm/mach/irq.h> static struct map_desc netx_io_desc[] __initdata = { { .virtual = NETX_IO_VIRT, .pfn = __phys_to_pfn(NETX_IO_PHYS), .length = NETX_IO_SIZE, .type = MT_DEVICE } }; void __init netx_map_io(void) { iotable_init(netx_io_desc, ARRAY_SIZE(netx_io_desc)); } static struct resource netx_rtc_resources[] = { [0] = { .start = 0x00101200, .end = 0x00101220, .flags = IORESOURCE_MEM, }, }; static struct platform_device netx_rtc_device = { .name = "netx-rtc", .id = 0, .num_resources = ARRAY_SIZE(netx_rtc_resources), .resource = netx_rtc_resources, }; static struct platform_device *devices[] __initdata = { &netx_rtc_device, }; #if 0 #define DEBUG_IRQ(fmt...) printk(fmt) #else #define DEBUG_IRQ(fmt...) while (0) {} #endif static void netx_hif_demux_handler(unsigned int irq_unused, struct irq_desc *desc) { unsigned int irq = NETX_IRQ_HIF_CHAINED(0); unsigned int stat; stat = ((readl(NETX_DPMAS_INT_EN) & readl(NETX_DPMAS_INT_STAT)) >> 24) & 0x1f; while (stat) { if (stat & 1) { DEBUG_IRQ("handling irq %d\n", irq); generic_handle_irq(irq); } irq++; stat >>= 1; } } static int netx_hif_irq_type(struct irq_data *d, unsigned int type) { unsigned int val, irq; val = readl(NETX_DPMAS_IF_CONF1); irq = d->irq - NETX_IRQ_HIF_CHAINED(0); if (type & IRQ_TYPE_EDGE_RISING) { DEBUG_IRQ("rising edges\n"); val |= (1 << 26) << irq; } if (type & IRQ_TYPE_EDGE_FALLING) { DEBUG_IRQ("falling edges\n"); val &= ~((1 << 26) << irq); } if (type & IRQ_TYPE_LEVEL_LOW) { DEBUG_IRQ("low level\n"); val &= ~((1 << 26) << irq); } if (type & IRQ_TYPE_LEVEL_HIGH) { DEBUG_IRQ("high level\n"); val |= (1 << 26) << irq; } writel(val, NETX_DPMAS_IF_CONF1); return 0; } static void netx_hif_ack_irq(struct irq_data *d) { unsigned int val, irq; irq = d->irq - NETX_IRQ_HIF_CHAINED(0); writel((1 << 24) << irq, NETX_DPMAS_INT_STAT); val = readl(NETX_DPMAS_INT_EN); val &= ~((1 << 24) << irq); writel(val, NETX_DPMAS_INT_EN); DEBUG_IRQ("%s: irq %d\n", __func__, d->irq); } static void netx_hif_mask_irq(struct irq_data *d) { unsigned int val, irq; irq = d->irq - NETX_IRQ_HIF_CHAINED(0); val = readl(NETX_DPMAS_INT_EN); val &= ~((1 << 24) << irq); writel(val, NETX_DPMAS_INT_EN); DEBUG_IRQ("%s: irq %d\n", __func__, d->irq); } static void netx_hif_unmask_irq(struct irq_data *d) { unsigned int val, irq; irq = d->irq - NETX_IRQ_HIF_CHAINED(0); val = readl(NETX_DPMAS_INT_EN); val |= (1 << 24) << irq; writel(val, NETX_DPMAS_INT_EN); DEBUG_IRQ("%s: irq %d\n", __func__, d->irq); } static struct irq_chip netx_hif_chip = { .irq_ack = netx_hif_ack_irq, .irq_mask = netx_hif_mask_irq, .irq_unmask = netx_hif_unmask_irq, .irq_set_type = netx_hif_irq_type, }; void __init netx_init_irq(void) { int irq; vic_init(io_p2v(NETX_PA_VIC), 0, ~0, 0); for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) { irq_set_chip_and_handler(irq, &netx_hif_chip, handle_level_irq); set_irq_flags(irq, IRQF_VALID); } writel(NETX_DPMAS_INT_EN_GLB_EN, NETX_DPMAS_INT_EN); irq_set_chained_handler(NETX_IRQ_HIF, netx_hif_demux_handler); } static int __init netx_init(void) { return platform_add_devices(devices, ARRAY_SIZE(devices)); } subsys_initcall(netx_init); void netx_restart(char mode, const char *cmd) { writel(NETX_SYSTEM_RES_CR_FIRMW_RES_EN | NETX_SYSTEM_RES_CR_FIRMW_RES, NETX_SYSTEM_RES_CR); }
gpl-2.0
mightysween/vs980-kernel
drivers/zorro/proc.c
5307
3848
/* * Procfs interface for the Zorro bus. * * Copyright (C) 1998-2003 Geert Uytterhoeven * * Heavily based on the procfs interface for the PCI bus, which is * * Copyright (C) 1997, 1998 Martin Mares <mj@atrey.karlin.mff.cuni.cz> */ #include <linux/types.h> #include <linux/zorro.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/export.h> #include <asm/uaccess.h> #include <asm/amigahw.h> #include <asm/setup.h> static loff_t proc_bus_zorro_lseek(struct file *file, loff_t off, int whence) { loff_t new = -1; struct inode *inode = file->f_path.dentry->d_inode; mutex_lock(&inode->i_mutex); switch (whence) { case 0: new = off; break; case 1: new = file->f_pos + off; break; case 2: new = sizeof(struct ConfigDev) + off; break; } if (new < 0 || new > sizeof(struct ConfigDev)) new = -EINVAL; else file->f_pos = new; mutex_unlock(&inode->i_mutex); return new; } static ssize_t proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct inode *ino = file->f_path.dentry->d_inode; struct proc_dir_entry *dp = PDE(ino); struct zorro_dev *z = dp->data; struct ConfigDev cd; loff_t pos = *ppos; if (pos >= sizeof(struct ConfigDev)) return 0; if (nbytes >= sizeof(struct ConfigDev)) nbytes = sizeof(struct ConfigDev); if (pos + nbytes > sizeof(struct ConfigDev)) nbytes = sizeof(struct ConfigDev) - pos; /* Construct a ConfigDev */ memset(&cd, 0, sizeof(cd)); cd.cd_Rom = z->rom; cd.cd_SlotAddr = z->slotaddr; cd.cd_SlotSize = z->slotsize; cd.cd_BoardAddr = (void *)zorro_resource_start(z); cd.cd_BoardSize = zorro_resource_len(z); if (copy_to_user(buf, (void *)&cd + pos, nbytes)) return -EFAULT; *ppos += nbytes; return nbytes; } static const struct file_operations proc_bus_zorro_operations = { .owner = THIS_MODULE, .llseek = proc_bus_zorro_lseek, .read = proc_bus_zorro_read, }; static void * zorro_seq_start(struct seq_file *m, loff_t *pos) { return (*pos < zorro_num_autocon) ? pos : NULL; } static void * zorro_seq_next(struct seq_file *m, void *v, loff_t *pos) { (*pos)++; return (*pos < zorro_num_autocon) ? pos : NULL; } static void zorro_seq_stop(struct seq_file *m, void *v) { } static int zorro_seq_show(struct seq_file *m, void *v) { unsigned int slot = *(loff_t *)v; struct zorro_dev *z = &zorro_autocon[slot]; seq_printf(m, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, z->id, (unsigned long)zorro_resource_start(z), (unsigned long)zorro_resource_len(z), z->rom.er_Type); return 0; } static const struct seq_operations zorro_devices_seq_ops = { .start = zorro_seq_start, .next = zorro_seq_next, .stop = zorro_seq_stop, .show = zorro_seq_show, }; static int zorro_devices_proc_open(struct inode *inode, struct file *file) { return seq_open(file, &zorro_devices_seq_ops); } static const struct file_operations zorro_devices_proc_fops = { .owner = THIS_MODULE, .open = zorro_devices_proc_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static struct proc_dir_entry *proc_bus_zorro_dir; static int __init zorro_proc_attach_device(unsigned int slot) { struct proc_dir_entry *entry; char name[4]; sprintf(name, "%02x", slot); entry = proc_create_data(name, 0, proc_bus_zorro_dir, &proc_bus_zorro_operations, &zorro_autocon[slot]); if (!entry) return -ENOMEM; entry->size = sizeof(struct zorro_dev); return 0; } static int __init zorro_proc_init(void) { unsigned int slot; if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) { proc_bus_zorro_dir = proc_mkdir("bus/zorro", NULL); proc_create("devices", 0, proc_bus_zorro_dir, &zorro_devices_proc_fops); for (slot = 0; slot < zorro_num_autocon; slot++) zorro_proc_attach_device(slot); } return 0; } device_initcall(zorro_proc_init);
gpl-2.0
AndroidDeveloperAlliance/ZenKernel_TUNA
fs/jffs2/xattr_user.c
12731
1418
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2006 NEC Corporation * * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/jffs2.h> #include <linux/xattr.h> #include <linux/mtd/mtd.h> #include "nodelist.h" static int jffs2_user_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { if (!strcmp(name, "")) return -EINVAL; return do_jffs2_getxattr(dentry->d_inode, JFFS2_XPREFIX_USER, name, buffer, size); } static int jffs2_user_setxattr(struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags, int type) { if (!strcmp(name, "")) return -EINVAL; return do_jffs2_setxattr(dentry->d_inode, JFFS2_XPREFIX_USER, name, buffer, size, flags); } static size_t jffs2_user_listxattr(struct dentry *dentry, char *list, size_t list_size, const char *name, size_t name_len, int type) { size_t retlen = XATTR_USER_PREFIX_LEN + name_len + 1; if (list && retlen <= list_size) { strcpy(list, XATTR_USER_PREFIX); strcpy(list + XATTR_USER_PREFIX_LEN, name); } return retlen; } const struct xattr_handler jffs2_user_xattr_handler = { .prefix = XATTR_USER_PREFIX, .list = jffs2_user_listxattr, .set = jffs2_user_setxattr, .get = jffs2_user_getxattr };
gpl-2.0
Red--Code/base-andro-
fs/jffs2/xattr_trusted.c
12731
1447
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2006 NEC Corporation * * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/jffs2.h> #include <linux/xattr.h> #include <linux/mtd/mtd.h> #include "nodelist.h" static int jffs2_trusted_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { if (!strcmp(name, "")) return -EINVAL; return do_jffs2_getxattr(dentry->d_inode, JFFS2_XPREFIX_TRUSTED, name, buffer, size); } static int jffs2_trusted_setxattr(struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags, int type) { if (!strcmp(name, "")) return -EINVAL; return do_jffs2_setxattr(dentry->d_inode, JFFS2_XPREFIX_TRUSTED, name, buffer, size, flags); } static size_t jffs2_trusted_listxattr(struct dentry *dentry, char *list, size_t list_size, const char *name, size_t name_len, int type) { size_t retlen = XATTR_TRUSTED_PREFIX_LEN + name_len + 1; if (list && retlen<=list_size) { strcpy(list, XATTR_TRUSTED_PREFIX); strcpy(list + XATTR_TRUSTED_PREFIX_LEN, name); } return retlen; } const struct xattr_handler jffs2_trusted_xattr_handler = { .prefix = XATTR_TRUSTED_PREFIX, .list = jffs2_trusted_listxattr, .set = jffs2_trusted_setxattr, .get = jffs2_trusted_getxattr };
gpl-2.0
jakeclawson/linux
kernel/sched/stop_task.c
444
2970
#include "sched.h" /* * stop-task scheduling class. * * The stop task is the highest priority task in the system, it preempts * everything and will be preempted by nothing. * * See kernel/stop_machine.c */ #ifdef CONFIG_SMP static int select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags) { return task_cpu(p); /* stop tasks as never migrate */ } #endif /* CONFIG_SMP */ static void check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) { /* we're never preempted */ } static struct task_struct * pick_next_task_stop(struct rq *rq, struct task_struct *prev) { struct task_struct *stop = rq->stop; if (!stop || !task_on_rq_queued(stop)) return NULL; put_prev_task(rq, prev); stop->se.exec_start = rq_clock_task(rq); return stop; } static void enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) { add_nr_running(rq, 1); } static void dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) { sub_nr_running(rq, 1); } static void yield_task_stop(struct rq *rq) { BUG(); /* the stop task should never yield, its pointless. */ } static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) { struct task_struct *curr = rq->curr; u64 delta_exec; delta_exec = rq_clock_task(rq) - curr->se.exec_start; if (unlikely((s64)delta_exec < 0)) delta_exec = 0; schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec)); curr->se.sum_exec_runtime += delta_exec; account_group_exec_runtime(curr, delta_exec); curr->se.exec_start = rq_clock_task(rq); cpuacct_charge(curr, delta_exec); } static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) { } static void set_curr_task_stop(struct rq *rq) { struct task_struct *stop = rq->stop; stop->se.exec_start = rq_clock_task(rq); } static void switched_to_stop(struct rq *rq, struct task_struct *p) { BUG(); /* its impossible to change to this class */ } static void prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio) { BUG(); /* how!?, what priority? */ } static unsigned int get_rr_interval_stop(struct rq *rq, struct task_struct *task) { return 0; } static void update_curr_stop(struct rq *rq) { } /* * Simple, special scheduling class for the per-CPU stop tasks: */ const struct sched_class stop_sched_class = { .next = &dl_sched_class, .enqueue_task = enqueue_task_stop, .dequeue_task = dequeue_task_stop, .yield_task = yield_task_stop, .check_preempt_curr = check_preempt_curr_stop, .pick_next_task = pick_next_task_stop, .put_prev_task = put_prev_task_stop, #ifdef CONFIG_SMP .select_task_rq = select_task_rq_stop, .set_cpus_allowed = set_cpus_allowed_common, #endif .set_curr_task = set_curr_task_stop, .task_tick = task_tick_stop, .get_rr_interval = get_rr_interval_stop, .prio_changed = prio_changed_stop, .switched_to = switched_to_stop, .update_curr = update_curr_stop, };
gpl-2.0
houzhenggang/bcm63xx-next
drivers/media/platform/davinci/vpss.c
956
13681
/* * Copyright (C) 2009 Texas Instruments. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * common vpss system module platform driver for all video drivers. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/pm_runtime.h> #include <linux/err.h> #include <media/davinci/vpss.h> MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("VPSS Driver"); MODULE_AUTHOR("Texas Instruments"); /* DM644x defines */ #define DM644X_SBL_PCR_VPSS (4) #define DM355_VPSSBL_INTSEL 0x10 #define DM355_VPSSBL_EVTSEL 0x14 /* vpss BL register offsets */ #define DM355_VPSSBL_CCDCMUX 0x1c /* vpss CLK register offsets */ #define DM355_VPSSCLK_CLKCTRL 0x04 /* masks and shifts */ #define VPSS_HSSISEL_SHIFT 4 /* * VDINT0 - vpss_int0, VDINT1 - vpss_int1, H3A - vpss_int4, * IPIPE_INT1_SDR - vpss_int5 */ #define DM355_VPSSBL_INTSEL_DEFAULT 0xff83ff10 /* VENCINT - vpss_int8 */ #define DM355_VPSSBL_EVTSEL_DEFAULT 0x4 #define DM365_ISP5_PCCR 0x04 #define DM365_ISP5_PCCR_BL_CLK_ENABLE BIT(0) #define DM365_ISP5_PCCR_ISIF_CLK_ENABLE BIT(1) #define DM365_ISP5_PCCR_H3A_CLK_ENABLE BIT(2) #define DM365_ISP5_PCCR_RSZ_CLK_ENABLE BIT(3) #define DM365_ISP5_PCCR_IPIPE_CLK_ENABLE BIT(4) #define DM365_ISP5_PCCR_IPIPEIF_CLK_ENABLE BIT(5) #define DM365_ISP5_PCCR_RSV BIT(6) #define DM365_ISP5_BCR 0x08 #define DM365_ISP5_BCR_ISIF_OUT_ENABLE BIT(1) #define DM365_ISP5_INTSEL1 0x10 #define DM365_ISP5_INTSEL2 0x14 #define DM365_ISP5_INTSEL3 0x18 #define DM365_ISP5_CCDCMUX 0x20 #define DM365_ISP5_PG_FRAME_SIZE 0x28 #define DM365_VPBE_CLK_CTRL 0x00 #define VPSS_CLK_CTRL 0x01c40044 #define VPSS_CLK_CTRL_VENCCLKEN BIT(3) #define VPSS_CLK_CTRL_DACCLKEN BIT(4) /* * vpss interrupts. VDINT0 - vpss_int0, VDINT1 - vpss_int1, * AF - vpss_int3 */ #define DM365_ISP5_INTSEL1_DEFAULT 0x0b1f0100 /* AEW - vpss_int6, RSZ_INT_DMA - vpss_int5 */ #define DM365_ISP5_INTSEL2_DEFAULT 0x1f0a0f1f /* VENC - vpss_int8 */ #define DM365_ISP5_INTSEL3_DEFAULT 0x00000015 /* masks and shifts for DM365*/ #define DM365_CCDC_PG_VD_POL_SHIFT 0 #define DM365_CCDC_PG_HD_POL_SHIFT 1 #define CCD_SRC_SEL_MASK (BIT_MASK(5) | BIT_MASK(4)) #define CCD_SRC_SEL_SHIFT 4 /* Different SoC platforms supported by this driver */ enum vpss_platform_type { DM644X, DM355, DM365, }; /* * vpss operations. Depends on platform. Not all functions are available * on all platforms. The api, first check if a function is available before * invoking it. In the probe, the function ptrs are initialized based on * vpss name. vpss name can be "dm355_vpss", "dm644x_vpss" etc. */ struct vpss_hw_ops { /* enable clock */ int (*enable_clock)(enum vpss_clock_sel clock_sel, int en); /* select input to ccdc */ void (*select_ccdc_source)(enum vpss_ccdc_source_sel src_sel); /* clear wbl overflow bit */ int (*clear_wbl_overflow)(enum vpss_wbl_sel wbl_sel); /* set sync polarity */ void (*set_sync_pol)(struct vpss_sync_pol); /* set the PG_FRAME_SIZE register*/ void (*set_pg_frame_size)(struct vpss_pg_frame_size); /* check and clear interrupt if occurred */ int (*dma_complete_interrupt)(void); }; /* vpss configuration */ struct vpss_oper_config { __iomem void *vpss_regs_base0; __iomem void *vpss_regs_base1; resource_size_t *vpss_regs_base2; enum vpss_platform_type platform; spinlock_t vpss_lock; struct vpss_hw_ops hw_ops; }; static struct vpss_oper_config oper_cfg; /* register access routines */ static inline u32 bl_regr(u32 offset) { return __raw_readl(oper_cfg.vpss_regs_base0 + offset); } static inline void bl_regw(u32 val, u32 offset) { __raw_writel(val, oper_cfg.vpss_regs_base0 + offset); } static inline u32 vpss_regr(u32 offset) { return __raw_readl(oper_cfg.vpss_regs_base1 + offset); } static inline void vpss_regw(u32 val, u32 offset) { __raw_writel(val, oper_cfg.vpss_regs_base1 + offset); } /* For DM365 only */ static inline u32 isp5_read(u32 offset) { return __raw_readl(oper_cfg.vpss_regs_base0 + offset); } /* For DM365 only */ static inline void isp5_write(u32 val, u32 offset) { __raw_writel(val, oper_cfg.vpss_regs_base0 + offset); } static void dm365_select_ccdc_source(enum vpss_ccdc_source_sel src_sel) { u32 temp = isp5_read(DM365_ISP5_CCDCMUX) & ~CCD_SRC_SEL_MASK; /* if we are using pattern generator, enable it */ if (src_sel == VPSS_PGLPBK || src_sel == VPSS_CCDCPG) temp |= 0x08; temp |= (src_sel << CCD_SRC_SEL_SHIFT); isp5_write(temp, DM365_ISP5_CCDCMUX); } static void dm355_select_ccdc_source(enum vpss_ccdc_source_sel src_sel) { bl_regw(src_sel << VPSS_HSSISEL_SHIFT, DM355_VPSSBL_CCDCMUX); } int vpss_dma_complete_interrupt(void) { if (!oper_cfg.hw_ops.dma_complete_interrupt) return 2; return oper_cfg.hw_ops.dma_complete_interrupt(); } EXPORT_SYMBOL(vpss_dma_complete_interrupt); int vpss_select_ccdc_source(enum vpss_ccdc_source_sel src_sel) { if (!oper_cfg.hw_ops.select_ccdc_source) return -EINVAL; oper_cfg.hw_ops.select_ccdc_source(src_sel); return 0; } EXPORT_SYMBOL(vpss_select_ccdc_source); static int dm644x_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel) { u32 mask = 1, val; if (wbl_sel < VPSS_PCR_AEW_WBL_0 || wbl_sel > VPSS_PCR_CCDC_WBL_O) return -EINVAL; /* writing a 0 clear the overflow */ mask = ~(mask << wbl_sel); val = bl_regr(DM644X_SBL_PCR_VPSS) & mask; bl_regw(val, DM644X_SBL_PCR_VPSS); return 0; } void vpss_set_sync_pol(struct vpss_sync_pol sync) { if (!oper_cfg.hw_ops.set_sync_pol) return; oper_cfg.hw_ops.set_sync_pol(sync); } EXPORT_SYMBOL(vpss_set_sync_pol); int vpss_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel) { if (!oper_cfg.hw_ops.clear_wbl_overflow) return -EINVAL; return oper_cfg.hw_ops.clear_wbl_overflow(wbl_sel); } EXPORT_SYMBOL(vpss_clear_wbl_overflow); /* * dm355_enable_clock - Enable VPSS Clock * @clock_sel: Clock to be enabled/disabled * @en: enable/disable flag * * This is called to enable or disable a vpss clock */ static int dm355_enable_clock(enum vpss_clock_sel clock_sel, int en) { unsigned long flags; u32 utemp, mask = 0x1, shift = 0; switch (clock_sel) { case VPSS_VPBE_CLOCK: /* nothing since lsb */ break; case VPSS_VENC_CLOCK_SEL: shift = 2; break; case VPSS_CFALD_CLOCK: shift = 3; break; case VPSS_H3A_CLOCK: shift = 4; break; case VPSS_IPIPE_CLOCK: shift = 5; break; case VPSS_CCDC_CLOCK: shift = 6; break; default: printk(KERN_ERR "dm355_enable_clock:" " Invalid selector: %d\n", clock_sel); return -EINVAL; } spin_lock_irqsave(&oper_cfg.vpss_lock, flags); utemp = vpss_regr(DM355_VPSSCLK_CLKCTRL); if (!en) utemp &= ~(mask << shift); else utemp |= (mask << shift); vpss_regw(utemp, DM355_VPSSCLK_CLKCTRL); spin_unlock_irqrestore(&oper_cfg.vpss_lock, flags); return 0; } static int dm365_enable_clock(enum vpss_clock_sel clock_sel, int en) { unsigned long flags; u32 utemp, mask = 0x1, shift = 0, offset = DM365_ISP5_PCCR; u32 (*read)(u32 offset) = isp5_read; void(*write)(u32 val, u32 offset) = isp5_write; switch (clock_sel) { case VPSS_BL_CLOCK: break; case VPSS_CCDC_CLOCK: shift = 1; break; case VPSS_H3A_CLOCK: shift = 2; break; case VPSS_RSZ_CLOCK: shift = 3; break; case VPSS_IPIPE_CLOCK: shift = 4; break; case VPSS_IPIPEIF_CLOCK: shift = 5; break; case VPSS_PCLK_INTERNAL: shift = 6; break; case VPSS_PSYNC_CLOCK_SEL: shift = 7; break; case VPSS_VPBE_CLOCK: read = vpss_regr; write = vpss_regw; offset = DM365_VPBE_CLK_CTRL; break; case VPSS_VENC_CLOCK_SEL: shift = 2; read = vpss_regr; write = vpss_regw; offset = DM365_VPBE_CLK_CTRL; break; case VPSS_LDC_CLOCK: shift = 3; read = vpss_regr; write = vpss_regw; offset = DM365_VPBE_CLK_CTRL; break; case VPSS_FDIF_CLOCK: shift = 4; read = vpss_regr; write = vpss_regw; offset = DM365_VPBE_CLK_CTRL; break; case VPSS_OSD_CLOCK_SEL: shift = 6; read = vpss_regr; write = vpss_regw; offset = DM365_VPBE_CLK_CTRL; break; case VPSS_LDC_CLOCK_SEL: shift = 7; read = vpss_regr; write = vpss_regw; offset = DM365_VPBE_CLK_CTRL; break; default: printk(KERN_ERR "dm365_enable_clock: Invalid selector: %d\n", clock_sel); return -1; } spin_lock_irqsave(&oper_cfg.vpss_lock, flags); utemp = read(offset); if (!en) { mask = ~mask; utemp &= (mask << shift); } else utemp |= (mask << shift); write(utemp, offset); spin_unlock_irqrestore(&oper_cfg.vpss_lock, flags); return 0; } int vpss_enable_clock(enum vpss_clock_sel clock_sel, int en) { if (!oper_cfg.hw_ops.enable_clock) return -EINVAL; return oper_cfg.hw_ops.enable_clock(clock_sel, en); } EXPORT_SYMBOL(vpss_enable_clock); void dm365_vpss_set_sync_pol(struct vpss_sync_pol sync) { int val = 0; val = isp5_read(DM365_ISP5_CCDCMUX); val |= (sync.ccdpg_hdpol << DM365_CCDC_PG_HD_POL_SHIFT); val |= (sync.ccdpg_vdpol << DM365_CCDC_PG_VD_POL_SHIFT); isp5_write(val, DM365_ISP5_CCDCMUX); } EXPORT_SYMBOL(dm365_vpss_set_sync_pol); void vpss_set_pg_frame_size(struct vpss_pg_frame_size frame_size) { if (!oper_cfg.hw_ops.set_pg_frame_size) return; oper_cfg.hw_ops.set_pg_frame_size(frame_size); } EXPORT_SYMBOL(vpss_set_pg_frame_size); void dm365_vpss_set_pg_frame_size(struct vpss_pg_frame_size frame_size) { int current_reg = ((frame_size.hlpfr >> 1) - 1) << 16; current_reg |= (frame_size.pplen - 1); isp5_write(current_reg, DM365_ISP5_PG_FRAME_SIZE); } EXPORT_SYMBOL(dm365_vpss_set_pg_frame_size); static int vpss_probe(struct platform_device *pdev) { struct resource *res; char *platform_name; if (!pdev->dev.platform_data) { dev_err(&pdev->dev, "no platform data\n"); return -ENOENT; } platform_name = pdev->dev.platform_data; if (!strcmp(platform_name, "dm355_vpss")) oper_cfg.platform = DM355; else if (!strcmp(platform_name, "dm365_vpss")) oper_cfg.platform = DM365; else if (!strcmp(platform_name, "dm644x_vpss")) oper_cfg.platform = DM644X; else { dev_err(&pdev->dev, "vpss driver not supported on" " this platform\n"); return -ENODEV; } dev_info(&pdev->dev, "%s vpss probed\n", platform_name); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); oper_cfg.vpss_regs_base0 = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(oper_cfg.vpss_regs_base0)) return PTR_ERR(oper_cfg.vpss_regs_base0); if (oper_cfg.platform == DM355 || oper_cfg.platform == DM365) { res = platform_get_resource(pdev, IORESOURCE_MEM, 1); oper_cfg.vpss_regs_base1 = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(oper_cfg.vpss_regs_base1)) return PTR_ERR(oper_cfg.vpss_regs_base1); } if (oper_cfg.platform == DM355) { oper_cfg.hw_ops.enable_clock = dm355_enable_clock; oper_cfg.hw_ops.select_ccdc_source = dm355_select_ccdc_source; /* Setup vpss interrupts */ bl_regw(DM355_VPSSBL_INTSEL_DEFAULT, DM355_VPSSBL_INTSEL); bl_regw(DM355_VPSSBL_EVTSEL_DEFAULT, DM355_VPSSBL_EVTSEL); } else if (oper_cfg.platform == DM365) { oper_cfg.hw_ops.enable_clock = dm365_enable_clock; oper_cfg.hw_ops.select_ccdc_source = dm365_select_ccdc_source; /* Setup vpss interrupts */ isp5_write((isp5_read(DM365_ISP5_PCCR) | DM365_ISP5_PCCR_BL_CLK_ENABLE | DM365_ISP5_PCCR_ISIF_CLK_ENABLE | DM365_ISP5_PCCR_H3A_CLK_ENABLE | DM365_ISP5_PCCR_RSZ_CLK_ENABLE | DM365_ISP5_PCCR_IPIPE_CLK_ENABLE | DM365_ISP5_PCCR_IPIPEIF_CLK_ENABLE | DM365_ISP5_PCCR_RSV), DM365_ISP5_PCCR); isp5_write((isp5_read(DM365_ISP5_BCR) | DM365_ISP5_BCR_ISIF_OUT_ENABLE), DM365_ISP5_BCR); isp5_write(DM365_ISP5_INTSEL1_DEFAULT, DM365_ISP5_INTSEL1); isp5_write(DM365_ISP5_INTSEL2_DEFAULT, DM365_ISP5_INTSEL2); isp5_write(DM365_ISP5_INTSEL3_DEFAULT, DM365_ISP5_INTSEL3); } else oper_cfg.hw_ops.clear_wbl_overflow = dm644x_clear_wbl_overflow; pm_runtime_enable(&pdev->dev); pm_runtime_get(&pdev->dev); spin_lock_init(&oper_cfg.vpss_lock); dev_info(&pdev->dev, "%s vpss probe success\n", platform_name); return 0; } static int vpss_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); return 0; } static int vpss_suspend(struct device *dev) { pm_runtime_put(dev); return 0; } static int vpss_resume(struct device *dev) { pm_runtime_get(dev); return 0; } static const struct dev_pm_ops vpss_pm_ops = { .suspend = vpss_suspend, .resume = vpss_resume, }; static struct platform_driver vpss_driver = { .driver = { .name = "vpss", .owner = THIS_MODULE, .pm = &vpss_pm_ops, }, .remove = vpss_remove, .probe = vpss_probe, }; static void vpss_exit(void) { iounmap(oper_cfg.vpss_regs_base2); release_mem_region(VPSS_CLK_CTRL, 4); platform_driver_unregister(&vpss_driver); } static int __init vpss_init(void) { if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control")) return -EBUSY; oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4); writel(VPSS_CLK_CTRL_VENCCLKEN | VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2); return platform_driver_register(&vpss_driver); } subsys_initcall(vpss_init); module_exit(vpss_exit);
gpl-2.0
lucaspcamargo/litmus-rt
sound/soc/codecs/ad1836.c
956
10677
/* * Audio Codec driver supporting: * AD1835A, AD1836, AD1837A, AD1838A, AD1839A * * Copyright 2009-2011 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include <sound/soc.h> #include <sound/tlv.h> #include <linux/spi/spi.h> #include <linux/regmap.h> #include "ad1836.h" enum ad1836_type { AD1835, AD1836, AD1838, }; /* codec private data */ struct ad1836_priv { enum ad1836_type type; struct regmap *regmap; }; /* * AD1836 volume/mute/de-emphasis etc. controls */ static const char *ad1836_deemp[] = {"None", "44.1kHz", "32kHz", "48kHz"}; static SOC_ENUM_SINGLE_DECL(ad1836_deemp_enum, AD1836_DAC_CTRL1, 8, ad1836_deemp); #define AD1836_DAC_VOLUME(x) \ SOC_DOUBLE_R("DAC" #x " Playback Volume", AD1836_DAC_L_VOL(x), \ AD1836_DAC_R_VOL(x), 0, 0x3FF, 0) #define AD1836_DAC_SWITCH(x) \ SOC_DOUBLE("DAC" #x " Playback Switch", AD1836_DAC_CTRL2, \ AD1836_MUTE_LEFT(x), AD1836_MUTE_RIGHT(x), 1, 1) #define AD1836_ADC_SWITCH(x) \ SOC_DOUBLE("ADC" #x " Capture Switch", AD1836_ADC_CTRL2, \ AD1836_MUTE_LEFT(x), AD1836_MUTE_RIGHT(x), 1, 1) static const struct snd_kcontrol_new ad183x_dac_controls[] = { AD1836_DAC_VOLUME(1), AD1836_DAC_SWITCH(1), AD1836_DAC_VOLUME(2), AD1836_DAC_SWITCH(2), AD1836_DAC_VOLUME(3), AD1836_DAC_SWITCH(3), AD1836_DAC_VOLUME(4), AD1836_DAC_SWITCH(4), }; static const struct snd_soc_dapm_widget ad183x_dac_dapm_widgets[] = { SND_SOC_DAPM_OUTPUT("DAC1OUT"), SND_SOC_DAPM_OUTPUT("DAC2OUT"), SND_SOC_DAPM_OUTPUT("DAC3OUT"), SND_SOC_DAPM_OUTPUT("DAC4OUT"), }; static const struct snd_soc_dapm_route ad183x_dac_routes[] = { { "DAC1OUT", NULL, "DAC" }, { "DAC2OUT", NULL, "DAC" }, { "DAC3OUT", NULL, "DAC" }, { "DAC4OUT", NULL, "DAC" }, }; static const struct snd_kcontrol_new ad183x_adc_controls[] = { AD1836_ADC_SWITCH(1), AD1836_ADC_SWITCH(2), AD1836_ADC_SWITCH(3), }; static const struct snd_soc_dapm_widget ad183x_adc_dapm_widgets[] = { SND_SOC_DAPM_INPUT("ADC1IN"), SND_SOC_DAPM_INPUT("ADC2IN"), }; static const struct snd_soc_dapm_route ad183x_adc_routes[] = { { "ADC", NULL, "ADC1IN" }, { "ADC", NULL, "ADC2IN" }, }; static const struct snd_kcontrol_new ad183x_controls[] = { /* ADC high-pass filter */ SOC_SINGLE("ADC High Pass Filter Switch", AD1836_ADC_CTRL1, AD1836_ADC_HIGHPASS_FILTER, 1, 0), /* DAC de-emphasis */ SOC_ENUM("Playback Deemphasis", ad1836_deemp_enum), }; static const struct snd_soc_dapm_widget ad183x_dapm_widgets[] = { SND_SOC_DAPM_DAC("DAC", "Playback", AD1836_DAC_CTRL1, AD1836_DAC_POWERDOWN, 1), SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_SUPPLY("ADC_PWR", AD1836_ADC_CTRL1, AD1836_ADC_POWERDOWN, 1, NULL, 0), }; static const struct snd_soc_dapm_route ad183x_dapm_routes[] = { { "DAC", NULL, "ADC_PWR" }, { "ADC", NULL, "ADC_PWR" }, }; static const DECLARE_TLV_DB_SCALE(ad1836_in_tlv, 0, 300, 0); static const struct snd_kcontrol_new ad1836_controls[] = { SOC_DOUBLE_TLV("ADC2 Capture Volume", AD1836_ADC_CTRL1, 3, 0, 4, 0, ad1836_in_tlv), }; /* * DAI ops entries */ static int ad1836_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { /* at present, we support adc aux mode to interface with * blackfin sport tdm mode */ case SND_SOC_DAIFMT_DSP_A: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_IB_IF: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { /* ALCLK,ABCLK are both output, AD1836 can only be master */ case SND_SOC_DAIFMT_CBM_CFM: break; default: return -EINVAL; } return 0; } static int ad1836_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct ad1836_priv *ad1836 = snd_soc_codec_get_drvdata(dai->codec); int word_len = 0; /* bit size */ switch (params_width(params)) { case 16: word_len = AD1836_WORD_LEN_16; break; case 20: word_len = AD1836_WORD_LEN_20; break; case 24: case 32: word_len = AD1836_WORD_LEN_24; break; default: return -EINVAL; } regmap_update_bits(ad1836->regmap, AD1836_DAC_CTRL1, AD1836_DAC_WORD_LEN_MASK, word_len << AD1836_DAC_WORD_LEN_OFFSET); regmap_update_bits(ad1836->regmap, AD1836_ADC_CTRL2, AD1836_ADC_WORD_LEN_MASK, word_len << AD1836_ADC_WORD_OFFSET); return 0; } static const struct snd_soc_dai_ops ad1836_dai_ops = { .hw_params = ad1836_hw_params, .set_fmt = ad1836_set_dai_fmt, }; #define AD183X_DAI(_name, num_dacs, num_adcs) \ { \ .name = _name "-hifi", \ .playback = { \ .stream_name = "Playback", \ .channels_min = 2, \ .channels_max = (num_dacs) * 2, \ .rates = SNDRV_PCM_RATE_48000, \ .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE, \ }, \ .capture = { \ .stream_name = "Capture", \ .channels_min = 2, \ .channels_max = (num_adcs) * 2, \ .rates = SNDRV_PCM_RATE_48000, \ .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE, \ }, \ .ops = &ad1836_dai_ops, \ } static struct snd_soc_dai_driver ad183x_dais[] = { [AD1835] = AD183X_DAI("ad1835", 4, 1), [AD1836] = AD183X_DAI("ad1836", 3, 2), [AD1838] = AD183X_DAI("ad1838", 3, 1), }; #ifdef CONFIG_PM static int ad1836_suspend(struct snd_soc_codec *codec) { struct ad1836_priv *ad1836 = snd_soc_codec_get_drvdata(codec); /* reset clock control mode */ return regmap_update_bits(ad1836->regmap, AD1836_ADC_CTRL2, AD1836_ADC_SERFMT_MASK, 0); } static int ad1836_resume(struct snd_soc_codec *codec) { struct ad1836_priv *ad1836 = snd_soc_codec_get_drvdata(codec); /* restore clock control mode */ return regmap_update_bits(ad1836->regmap, AD1836_ADC_CTRL2, AD1836_ADC_SERFMT_MASK, AD1836_ADC_AUX); } #else #define ad1836_suspend NULL #define ad1836_resume NULL #endif static int ad1836_probe(struct snd_soc_codec *codec) { struct ad1836_priv *ad1836 = snd_soc_codec_get_drvdata(codec); struct snd_soc_dapm_context *dapm = &codec->dapm; int num_dacs, num_adcs; int ret = 0; int i; num_dacs = ad183x_dais[ad1836->type].playback.channels_max / 2; num_adcs = ad183x_dais[ad1836->type].capture.channels_max / 2; /* default setting for ad1836 */ /* de-emphasis: 48kHz, power-on dac */ regmap_write(ad1836->regmap, AD1836_DAC_CTRL1, 0x300); /* unmute dac channels */ regmap_write(ad1836->regmap, AD1836_DAC_CTRL2, 0x0); /* high-pass filter enable, power-on adc */ regmap_write(ad1836->regmap, AD1836_ADC_CTRL1, 0x100); /* unmute adc channles, adc aux mode */ regmap_write(ad1836->regmap, AD1836_ADC_CTRL2, 0x180); /* volume */ for (i = 1; i <= num_dacs; ++i) { regmap_write(ad1836->regmap, AD1836_DAC_L_VOL(i), 0x3FF); regmap_write(ad1836->regmap, AD1836_DAC_R_VOL(i), 0x3FF); } if (ad1836->type == AD1836) { /* left/right diff:PGA/MUX */ regmap_write(ad1836->regmap, AD1836_ADC_CTRL3, 0x3A); ret = snd_soc_add_codec_controls(codec, ad1836_controls, ARRAY_SIZE(ad1836_controls)); if (ret) return ret; } else { regmap_write(ad1836->regmap, AD1836_ADC_CTRL3, 0x00); } ret = snd_soc_add_codec_controls(codec, ad183x_dac_controls, num_dacs * 2); if (ret) return ret; ret = snd_soc_add_codec_controls(codec, ad183x_adc_controls, num_adcs); if (ret) return ret; ret = snd_soc_dapm_new_controls(dapm, ad183x_dac_dapm_widgets, num_dacs); if (ret) return ret; ret = snd_soc_dapm_new_controls(dapm, ad183x_adc_dapm_widgets, num_adcs); if (ret) return ret; ret = snd_soc_dapm_add_routes(dapm, ad183x_dac_routes, num_dacs); if (ret) return ret; ret = snd_soc_dapm_add_routes(dapm, ad183x_adc_routes, num_adcs); if (ret) return ret; return ret; } /* power down chip */ static int ad1836_remove(struct snd_soc_codec *codec) { struct ad1836_priv *ad1836 = snd_soc_codec_get_drvdata(codec); /* reset clock control mode */ return regmap_update_bits(ad1836->regmap, AD1836_ADC_CTRL2, AD1836_ADC_SERFMT_MASK, 0); } static struct snd_soc_codec_driver soc_codec_dev_ad1836 = { .probe = ad1836_probe, .remove = ad1836_remove, .suspend = ad1836_suspend, .resume = ad1836_resume, .controls = ad183x_controls, .num_controls = ARRAY_SIZE(ad183x_controls), .dapm_widgets = ad183x_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(ad183x_dapm_widgets), .dapm_routes = ad183x_dapm_routes, .num_dapm_routes = ARRAY_SIZE(ad183x_dapm_routes), }; static const struct reg_default ad1836_reg_defaults[] = { { AD1836_DAC_CTRL1, 0x0000 }, { AD1836_DAC_CTRL2, 0x0000 }, { AD1836_DAC_L_VOL(0), 0x0000 }, { AD1836_DAC_R_VOL(0), 0x0000 }, { AD1836_DAC_L_VOL(1), 0x0000 }, { AD1836_DAC_R_VOL(1), 0x0000 }, { AD1836_DAC_L_VOL(2), 0x0000 }, { AD1836_DAC_R_VOL(2), 0x0000 }, { AD1836_DAC_L_VOL(3), 0x0000 }, { AD1836_DAC_R_VOL(3), 0x0000 }, { AD1836_ADC_CTRL1, 0x0000 }, { AD1836_ADC_CTRL2, 0x0000 }, { AD1836_ADC_CTRL3, 0x0000 }, }; static const struct regmap_config ad1836_regmap_config = { .val_bits = 12, .reg_bits = 4, .read_flag_mask = 0x08, .max_register = AD1836_ADC_CTRL3, .reg_defaults = ad1836_reg_defaults, .num_reg_defaults = ARRAY_SIZE(ad1836_reg_defaults), .cache_type = REGCACHE_RBTREE, }; static int ad1836_spi_probe(struct spi_device *spi) { struct ad1836_priv *ad1836; int ret; ad1836 = devm_kzalloc(&spi->dev, sizeof(struct ad1836_priv), GFP_KERNEL); if (ad1836 == NULL) return -ENOMEM; ad1836->regmap = devm_regmap_init_spi(spi, &ad1836_regmap_config); if (IS_ERR(ad1836->regmap)) return PTR_ERR(ad1836->regmap); ad1836->type = spi_get_device_id(spi)->driver_data; spi_set_drvdata(spi, ad1836); ret = snd_soc_register_codec(&spi->dev, &soc_codec_dev_ad1836, &ad183x_dais[ad1836->type], 1); return ret; } static int ad1836_spi_remove(struct spi_device *spi) { snd_soc_unregister_codec(&spi->dev); return 0; } static const struct spi_device_id ad1836_ids[] = { { "ad1835", AD1835 }, { "ad1836", AD1836 }, { "ad1837", AD1835 }, { "ad1838", AD1838 }, { "ad1839", AD1838 }, { }, }; MODULE_DEVICE_TABLE(spi, ad1836_ids); static struct spi_driver ad1836_spi_driver = { .driver = { .name = "ad1836", .owner = THIS_MODULE, }, .probe = ad1836_spi_probe, .remove = ad1836_spi_remove, .id_table = ad1836_ids, }; module_spi_driver(ad1836_spi_driver); MODULE_DESCRIPTION("ASoC ad1836 driver"); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_LICENSE("GPL");
gpl-2.0
Nick73/King_Kernel
drivers/net/sfc/falcon.c
1724
50611
/**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/bitops.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/i2c.h> #include <linux/mii.h> #include <linux/slab.h> #include "net_driver.h" #include "bitfield.h" #include "efx.h" #include "mac.h" #include "spi.h" #include "nic.h" #include "regs.h" #include "io.h" #include "phy.h" #include "workarounds.h" /* Hardware control for SFC4000 (aka Falcon). */ static const unsigned int /* "Large" EEPROM device: Atmel AT25640 or similar * 8 KB, 16-bit address, 32 B write block */ large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN) | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN) | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)), /* Default flash device: Atmel AT25F1024 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */ default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN) | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN) | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN) | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN) | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)); /************************************************************************** * * I2C bus - this is a bit-bashing interface using GPIO pins * Note that it uses the output enables to tristate the outputs * SDA is the data pin and SCL is the clock * ************************************************************************** */ static void falcon_setsda(void *data, int state) { struct efx_nic *efx = (struct efx_nic *)data; efx_oword_t reg; efx_reado(efx, &reg, FR_AB_GPIO_CTL); EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state); efx_writeo(efx, &reg, FR_AB_GPIO_CTL); } static void falcon_setscl(void *data, int state) { struct efx_nic *efx = (struct efx_nic *)data; efx_oword_t reg; efx_reado(efx, &reg, FR_AB_GPIO_CTL); EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state); efx_writeo(efx, &reg, FR_AB_GPIO_CTL); } static int falcon_getsda(void *data) { struct efx_nic *efx = (struct efx_nic *)data; efx_oword_t reg; efx_reado(efx, &reg, FR_AB_GPIO_CTL); return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN); } static int falcon_getscl(void *data) { struct efx_nic *efx = (struct efx_nic *)data; efx_oword_t reg; efx_reado(efx, &reg, FR_AB_GPIO_CTL); return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN); } static struct i2c_algo_bit_data falcon_i2c_bit_operations = { .setsda = falcon_setsda, .setscl = falcon_setscl, .getsda = falcon_getsda, .getscl = falcon_getscl, .udelay = 5, /* Wait up to 50 ms for slave to let us pull SCL high */ .timeout = DIV_ROUND_UP(HZ, 20), }; static void falcon_push_irq_moderation(struct efx_channel *channel) { efx_dword_t timer_cmd; struct efx_nic *efx = channel->efx; /* Set timer register */ if (channel->irq_moderation) { EFX_POPULATE_DWORD_2(timer_cmd, FRF_AB_TC_TIMER_MODE, FFE_BB_TIMER_MODE_INT_HLDOFF, FRF_AB_TC_TIMER_VAL, channel->irq_moderation - 1); } else { EFX_POPULATE_DWORD_2(timer_cmd, FRF_AB_TC_TIMER_MODE, FFE_BB_TIMER_MODE_DIS, FRF_AB_TC_TIMER_VAL, 0); } BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0); efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, channel->channel); } static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx); static void falcon_prepare_flush(struct efx_nic *efx) { falcon_deconfigure_mac_wrapper(efx); /* Wait for the tx and rx fifo's to get to the next packet boundary * (~1ms without back-pressure), then to drain the remainder of the * fifo's at data path speeds (negligible), with a healthy margin. */ msleep(10); } /* Acknowledge a legacy interrupt from Falcon * * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG. * * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the * BIU. Interrupt acknowledge is read sensitive so must write instead * (then read to ensure the BIU collector is flushed) * * NB most hardware supports MSI interrupts */ inline void falcon_irq_ack_a1(struct efx_nic *efx) { efx_dword_t reg; EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e); efx_writed(efx, &reg, FR_AA_INT_ACK_KER); efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS); } irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) { struct efx_nic *efx = dev_id; efx_oword_t *int_ker = efx->irq_status.addr; int syserr; int queues; /* Check to see if this is our interrupt. If it isn't, we * exit without having touched the hardware. */ if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) { netif_vdbg(efx, intr, efx->net_dev, "IRQ %d on CPU %d not for me\n", irq, raw_smp_processor_id()); return IRQ_NONE; } efx->last_irq_cpu = raw_smp_processor_id(); netif_vdbg(efx, intr, efx->net_dev, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); /* Determine interrupting queues, clear interrupt status * register and acknowledge the device interrupt. */ BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS); queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q); /* Check to see if we have a serious error condition */ if (queues & (1U << efx->fatal_irq_level)) { syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); if (unlikely(syserr)) return efx_nic_fatal_interrupt(efx); } EFX_ZERO_OWORD(*int_ker); wmb(); /* Ensure the vector is cleared before interrupt ack */ falcon_irq_ack_a1(efx); if (queues & 1) efx_schedule_channel(efx_get_channel(efx, 0)); if (queues & 2) efx_schedule_channel(efx_get_channel(efx, 1)); return IRQ_HANDLED; } /************************************************************************** * * EEPROM/flash * ************************************************************************** */ #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t) static int falcon_spi_poll(struct efx_nic *efx) { efx_oword_t reg; efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD); return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0; } /* Wait for SPI command completion */ static int falcon_spi_wait(struct efx_nic *efx) { /* Most commands will finish quickly, so we start polling at * very short intervals. Sometimes the command may have to * wait for VPD or expansion ROM access outside of our * control, so we allow up to 100 ms. */ unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10); int i; for (i = 0; i < 10; i++) { if (!falcon_spi_poll(efx)) return 0; udelay(10); } for (;;) { if (!falcon_spi_poll(efx)) return 0; if (time_after_eq(jiffies, timeout)) { netif_err(efx, hw, efx->net_dev, "timed out waiting for SPI\n"); return -ETIMEDOUT; } schedule_timeout_uninterruptible(1); } } int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi, unsigned int command, int address, const void *in, void *out, size_t len) { bool addressed = (address >= 0); bool reading = (out != NULL); efx_oword_t reg; int rc; /* Input validation */ if (len > FALCON_SPI_MAX_LEN) return -EINVAL; /* Check that previous command is not still running */ rc = falcon_spi_poll(efx); if (rc) return rc; /* Program address register, if we have an address */ if (addressed) { EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address); efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR); } /* Program data register, if we have data */ if (in != NULL) { memcpy(&reg, in, len); efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA); } /* Issue read/write command */ EFX_POPULATE_OWORD_7(reg, FRF_AB_EE_SPI_HCMD_CMD_EN, 1, FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id, FRF_AB_EE_SPI_HCMD_DABCNT, len, FRF_AB_EE_SPI_HCMD_READ, reading, FRF_AB_EE_SPI_HCMD_DUBCNT, 0, FRF_AB_EE_SPI_HCMD_ADBCNT, (addressed ? spi->addr_len : 0), FRF_AB_EE_SPI_HCMD_ENC, command); efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD); /* Wait for read/write to complete */ rc = falcon_spi_wait(efx); if (rc) return rc; /* Read data */ if (out != NULL) { efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA); memcpy(out, &reg, len); } return 0; } static size_t falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start) { return min(FALCON_SPI_MAX_LEN, (spi->block_size - (start & (spi->block_size - 1)))); } static inline u8 efx_spi_munge_command(const struct efx_spi_device *spi, const u8 command, const unsigned int address) { return command | (((address >> 8) & spi->munge_address) << 3); } /* Wait up to 10 ms for buffered write completion */ int falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi) { unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100); u8 status; int rc; for (;;) { rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, &status, sizeof(status)); if (rc) return rc; if (!(status & SPI_STATUS_NRDY)) return 0; if (time_after_eq(jiffies, timeout)) { netif_err(efx, hw, efx->net_dev, "SPI write timeout on device %d" " last status=0x%02x\n", spi->device_id, status); return -ETIMEDOUT; } schedule_timeout_uninterruptible(1); } } int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi, loff_t start, size_t len, size_t *retlen, u8 *buffer) { size_t block_len, pos = 0; unsigned int command; int rc = 0; while (pos < len) { block_len = min(len - pos, FALCON_SPI_MAX_LEN); command = efx_spi_munge_command(spi, SPI_READ, start + pos); rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL, buffer + pos, block_len); if (rc) break; pos += block_len; /* Avoid locking up the system */ cond_resched(); if (signal_pending(current)) { rc = -EINTR; break; } } if (retlen) *retlen = pos; return rc; } int falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi, loff_t start, size_t len, size_t *retlen, const u8 *buffer) { u8 verify_buffer[FALCON_SPI_MAX_LEN]; size_t block_len, pos = 0; unsigned int command; int rc = 0; while (pos < len) { rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); if (rc) break; block_len = min(len - pos, falcon_spi_write_limit(spi, start + pos)); command = efx_spi_munge_command(spi, SPI_WRITE, start + pos); rc = falcon_spi_cmd(efx, spi, command, start + pos, buffer + pos, NULL, block_len); if (rc) break; rc = falcon_spi_wait_write(efx, spi); if (rc) break; command = efx_spi_munge_command(spi, SPI_READ, start + pos); rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL, verify_buffer, block_len); if (memcmp(verify_buffer, buffer + pos, block_len)) { rc = -EIO; break; } pos += block_len; /* Avoid locking up the system */ cond_resched(); if (signal_pending(current)) { rc = -EINTR; break; } } if (retlen) *retlen = pos; return rc; } /************************************************************************** * * MAC wrapper * ************************************************************************** */ static void falcon_push_multicast_hash(struct efx_nic *efx) { union efx_multicast_hash *mc_hash = &efx->multicast_hash; WARN_ON(!mutex_is_locked(&efx->mac_lock)); efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0); efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1); } static void falcon_reset_macs(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t reg, mac_ctrl; int count; if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { /* It's not safe to use GLB_CTL_REG to reset the * macs, so instead use the internal MAC resets */ EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1); efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG); for (count = 0; count < 10000; count++) { efx_reado(efx, &reg, FR_AB_XM_GLB_CFG); if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) == 0) return; udelay(10); } netif_err(efx, hw, efx->net_dev, "timed out waiting for XMAC core reset\n"); } /* Mac stats will fail whist the TX fifo is draining */ WARN_ON(nic_data->stats_disable_count == 0); efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL); EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1); efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); efx_reado(efx, &reg, FR_AB_GLB_CTL); EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1); EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1); EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1); efx_writeo(efx, &reg, FR_AB_GLB_CTL); count = 0; while (1) { efx_reado(efx, &reg, FR_AB_GLB_CTL); if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) && !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) && !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) { netif_dbg(efx, hw, efx->net_dev, "Completed MAC reset after %d loops\n", count); break; } if (count > 20) { netif_err(efx, hw, efx->net_dev, "MAC reset failed\n"); break; } count++; udelay(10); } /* Ensure the correct MAC is selected before statistics * are re-enabled by the caller */ efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); falcon_setup_xaui(efx); } void falcon_drain_tx_fifo(struct efx_nic *efx) { efx_oword_t reg; if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) || (efx->loopback_mode != LOOPBACK_NONE)) return; efx_reado(efx, &reg, FR_AB_MAC_CTRL); /* There is no point in draining more than once */ if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN)) return; falcon_reset_macs(efx); } static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) { efx_oword_t reg; if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) return; /* Isolate the MAC -> RX */ efx_reado(efx, &reg, FR_AZ_RX_CFG); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0); efx_writeo(efx, &reg, FR_AZ_RX_CFG); /* Isolate TX -> MAC */ falcon_drain_tx_fifo(efx); } void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) { struct efx_link_state *link_state = &efx->link_state; efx_oword_t reg; int link_speed, isolate; isolate = (efx->reset_pending != RESET_TYPE_NONE); switch (link_state->speed) { case 10000: link_speed = 3; break; case 1000: link_speed = 2; break; case 100: link_speed = 1; break; default: link_speed = 0; break; } /* MAC_LINK_STATUS controls MAC backpressure but doesn't work * as advertised. Disable to ensure packets are not * indefinitely held and TX queue can be flushed at any point * while the link is down. */ EFX_POPULATE_OWORD_5(reg, FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */, FRF_AB_MAC_BCAD_ACPT, 1, FRF_AB_MAC_UC_PROM, efx->promiscuous, FRF_AB_MAC_LINK_STATUS, 1, /* always set */ FRF_AB_MAC_SPEED, link_speed); /* On B0, MAC backpressure can be disabled and packets get * discarded. */ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, !link_state->up || isolate); } efx_writeo(efx, &reg, FR_AB_MAC_CTRL); /* Restore the multicast hash registers. */ falcon_push_multicast_hash(efx); efx_reado(efx, &reg, FR_AZ_RX_CFG); /* Enable XOFF signal from RX FIFO (we enabled it during NIC * initialisation but it may read back as 0) */ EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); /* Unisolate the MAC -> RX */ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate); efx_writeo(efx, &reg, FR_AZ_RX_CFG); } static void falcon_stats_request(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t reg; WARN_ON(nic_data->stats_pending); WARN_ON(nic_data->stats_disable_count); if (nic_data->stats_dma_done == NULL) return; /* no mac selected */ *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE; nic_data->stats_pending = true; wmb(); /* ensure done flag is clear */ /* Initiate DMA transfer of stats */ EFX_POPULATE_OWORD_2(reg, FRF_AB_MAC_STAT_DMA_CMD, 1, FRF_AB_MAC_STAT_DMA_ADR, efx->stats_buffer.dma_addr); efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA); mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2)); } static void falcon_stats_complete(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; if (!nic_data->stats_pending) return; nic_data->stats_pending = 0; if (*nic_data->stats_dma_done == FALCON_STATS_DONE) { rmb(); /* read the done flag before the stats */ efx->mac_op->update_stats(efx); } else { netif_err(efx, hw, efx->net_dev, "timed out waiting for statistics\n"); } } static void falcon_stats_timer_func(unsigned long context) { struct efx_nic *efx = (struct efx_nic *)context; struct falcon_nic_data *nic_data = efx->nic_data; spin_lock(&efx->stats_lock); falcon_stats_complete(efx); if (nic_data->stats_disable_count == 0) falcon_stats_request(efx); spin_unlock(&efx->stats_lock); } static bool falcon_loopback_link_poll(struct efx_nic *efx) { struct efx_link_state old_state = efx->link_state; WARN_ON(!mutex_is_locked(&efx->mac_lock)); WARN_ON(!LOOPBACK_INTERNAL(efx)); efx->link_state.fd = true; efx->link_state.fc = efx->wanted_fc; efx->link_state.up = true; efx->link_state.speed = 10000; return !efx_link_state_equal(&efx->link_state, &old_state); } static int falcon_reconfigure_port(struct efx_nic *efx) { int rc; WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0); /* Poll the PHY link state *before* reconfiguring it. This means we * will pick up the correct speed (in loopback) to select the correct * MAC. */ if (LOOPBACK_INTERNAL(efx)) falcon_loopback_link_poll(efx); else efx->phy_op->poll(efx); falcon_stop_nic_stats(efx); falcon_deconfigure_mac_wrapper(efx); falcon_reset_macs(efx); efx->phy_op->reconfigure(efx); rc = efx->mac_op->reconfigure(efx); BUG_ON(rc); falcon_start_nic_stats(efx); /* Synchronise efx->link_state with the kernel */ efx_link_status_changed(efx); return 0; } /************************************************************************** * * PHY access via GMII * ************************************************************************** */ /* Wait for GMII access to complete */ static int falcon_gmii_wait(struct efx_nic *efx) { efx_oword_t md_stat; int count; /* wait up to 50ms - taken max from datasheet */ for (count = 0; count < 5000; count++) { efx_reado(efx, &md_stat, FR_AB_MD_STAT); if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) { if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 || EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) { netif_err(efx, hw, efx->net_dev, "error from GMII access " EFX_OWORD_FMT"\n", EFX_OWORD_VAL(md_stat)); return -EIO; } return 0; } udelay(10); } netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n"); return -ETIMEDOUT; } /* Write an MDIO register of a PHY connected to Falcon. */ static int falcon_mdio_write(struct net_device *net_dev, int prtad, int devad, u16 addr, u16 value) { struct efx_nic *efx = netdev_priv(net_dev); struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t reg; int rc; netif_vdbg(efx, hw, efx->net_dev, "writing MDIO %d register %d.%d with 0x%04x\n", prtad, devad, addr, value); mutex_lock(&nic_data->mdio_lock); /* Check MDIO not currently being accessed */ rc = falcon_gmii_wait(efx); if (rc) goto out; /* Write the address/ID register */ EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR); EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, FRF_AB_MD_DEV_ADR, devad); efx_writeo(efx, &reg, FR_AB_MD_ID); /* Write data */ EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value); efx_writeo(efx, &reg, FR_AB_MD_TXD); EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_WRC, 1, FRF_AB_MD_GC, 0); efx_writeo(efx, &reg, FR_AB_MD_CS); /* Wait for data to be written */ rc = falcon_gmii_wait(efx); if (rc) { /* Abort the write operation */ EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_WRC, 0, FRF_AB_MD_GC, 1); efx_writeo(efx, &reg, FR_AB_MD_CS); udelay(10); } out: mutex_unlock(&nic_data->mdio_lock); return rc; } /* Read an MDIO register of a PHY connected to Falcon. */ static int falcon_mdio_read(struct net_device *net_dev, int prtad, int devad, u16 addr) { struct efx_nic *efx = netdev_priv(net_dev); struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t reg; int rc; mutex_lock(&nic_data->mdio_lock); /* Check MDIO not currently being accessed */ rc = falcon_gmii_wait(efx); if (rc) goto out; EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR); EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, FRF_AB_MD_DEV_ADR, devad); efx_writeo(efx, &reg, FR_AB_MD_ID); /* Request data to be read */ EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0); efx_writeo(efx, &reg, FR_AB_MD_CS); /* Wait for data to become available */ rc = falcon_gmii_wait(efx); if (rc == 0) { efx_reado(efx, &reg, FR_AB_MD_RXD); rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD); netif_vdbg(efx, hw, efx->net_dev, "read from MDIO %d register %d.%d, got %04x\n", prtad, devad, addr, rc); } else { /* Abort the read operation */ EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RIC, 0, FRF_AB_MD_GC, 1); efx_writeo(efx, &reg, FR_AB_MD_CS); netif_dbg(efx, hw, efx->net_dev, "read from MDIO %d register %d.%d, got error %d\n", prtad, devad, addr, rc); } out: mutex_unlock(&nic_data->mdio_lock); return rc; } /* This call is responsible for hooking in the MAC and PHY operations */ static int falcon_probe_port(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; int rc; switch (efx->phy_type) { case PHY_TYPE_SFX7101: efx->phy_op = &falcon_sfx7101_phy_ops; break; case PHY_TYPE_QT2022C2: case PHY_TYPE_QT2025C: efx->phy_op = &falcon_qt202x_phy_ops; break; case PHY_TYPE_TXC43128: efx->phy_op = &falcon_txc_phy_ops; break; default: netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n", efx->phy_type); return -ENODEV; } /* Fill out MDIO structure and loopback modes */ mutex_init(&nic_data->mdio_lock); efx->mdio.mdio_read = falcon_mdio_read; efx->mdio.mdio_write = falcon_mdio_write; rc = efx->phy_op->probe(efx); if (rc != 0) return rc; /* Initial assumption */ efx->link_state.speed = 10000; efx->link_state.fd = true; /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; else efx->wanted_fc = EFX_FC_RX; if (efx->mdio.mmds & MDIO_DEVS_AN) efx->wanted_fc |= EFX_FC_AUTO; /* Allocate buffer for stats */ rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, FALCON_MAC_STATS_SIZE); if (rc) return rc; netif_dbg(efx, probe, efx->net_dev, "stats buffer at %llx (virt %p phys %llx)\n", (u64)efx->stats_buffer.dma_addr, efx->stats_buffer.addr, (u64)virt_to_phys(efx->stats_buffer.addr)); nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset; return 0; } static void falcon_remove_port(struct efx_nic *efx) { efx->phy_op->remove(efx); efx_nic_free_buffer(efx, &efx->stats_buffer); } /* Global events are basically PHY events */ static bool falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event) { struct efx_nic *efx = channel->efx; struct falcon_nic_data *nic_data = efx->nic_data; if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) || EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) || EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) /* Ignored */ return true; if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) && EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) { nic_data->xmac_poll_required = true; return true; } if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { netif_err(efx, rx_err, efx->net_dev, "channel %d seen global RX_RESET event. Resetting.\n", channel->channel); atomic_inc(&efx->rx_reset); efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); return true; } return false; } /************************************************************************** * * Falcon test code * **************************************************************************/ static int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) { struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_nvconfig *nvconfig; struct efx_spi_device *spi; void *region; int rc, magic_num, struct_ver; __le16 *word, *limit; u32 csum; if (efx_spi_present(&nic_data->spi_flash)) spi = &nic_data->spi_flash; else if (efx_spi_present(&nic_data->spi_eeprom)) spi = &nic_data->spi_eeprom; else return -EINVAL; region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL); if (!region) return -ENOMEM; nvconfig = region + FALCON_NVCONFIG_OFFSET; mutex_lock(&nic_data->spi_lock); rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region); mutex_unlock(&nic_data->spi_lock); if (rc) { netif_err(efx, hw, efx->net_dev, "Failed to read %s\n", efx_spi_present(&nic_data->spi_flash) ? "flash" : "EEPROM"); rc = -EIO; goto out; } magic_num = le16_to_cpu(nvconfig->board_magic_num); struct_ver = le16_to_cpu(nvconfig->board_struct_ver); rc = -EINVAL; if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) { netif_err(efx, hw, efx->net_dev, "NVRAM bad magic 0x%x\n", magic_num); goto out; } if (struct_ver < 2) { netif_err(efx, hw, efx->net_dev, "NVRAM has ancient version 0x%x\n", struct_ver); goto out; } else if (struct_ver < 4) { word = &nvconfig->board_magic_num; limit = (__le16 *) (nvconfig + 1); } else { word = region; limit = region + FALCON_NVCONFIG_END; } for (csum = 0; word < limit; ++word) csum += le16_to_cpu(*word); if (~csum & 0xffff) { netif_err(efx, hw, efx->net_dev, "NVRAM has incorrect checksum\n"); goto out; } rc = 0; if (nvconfig_out) memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig)); out: kfree(region); return rc; } static int falcon_test_nvram(struct efx_nic *efx) { return falcon_read_nvram(efx, NULL); } static const struct efx_nic_register_test falcon_b0_register_tests[] = { { FR_AZ_ADR_REGION, EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, { FR_AZ_RX_CFG, EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) }, { FR_AZ_TX_CFG, EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) }, { FR_AZ_TX_RESERVED, EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, { FR_AB_MAC_CTRL, EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) }, { FR_AZ_SRM_TX_DC_CFG, EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, { FR_AZ_RX_DC_CFG, EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) }, { FR_AZ_RX_DC_PF_WM, EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, { FR_BZ_DP_CTRL, EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, { FR_AB_GM_CFG2, EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) }, { FR_AB_GMF_CFG0, EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) }, { FR_AB_XM_GLB_CFG, EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) }, { FR_AB_XM_TX_CFG, EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) }, { FR_AB_XM_RX_CFG, EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) }, { FR_AB_XM_RX_PARAM, EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) }, { FR_AB_XM_FC, EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) }, { FR_AB_XM_ADR_LO, EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) }, { FR_AB_XX_SD_CTL, EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, }; static int falcon_b0_test_registers(struct efx_nic *efx) { return efx_nic_test_registers(efx, falcon_b0_register_tests, ARRAY_SIZE(falcon_b0_register_tests)); } /************************************************************************** * * Device reset * ************************************************************************** */ /* Resets NIC to known state. This routine must be called in process * context and is allowed to sleep. */ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method) { struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t glb_ctl_reg_ker; int rc; netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n", RESET_TYPE(method)); /* Initiate device reset */ if (method == RESET_TYPE_WORLD) { rc = pci_save_state(efx->pci_dev); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to backup PCI state of primary " "function prior to hardware reset\n"); goto fail1; } if (efx_nic_is_dual_func(efx)) { rc = pci_save_state(nic_data->pci_dev2); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to backup PCI state of " "secondary function prior to " "hardware reset\n"); goto fail2; } } EFX_POPULATE_OWORD_2(glb_ctl_reg_ker, FRF_AB_EXT_PHY_RST_DUR, FFE_AB_EXT_PHY_RST_DUR_10240US, FRF_AB_SWRST, 1); } else { EFX_POPULATE_OWORD_7(glb_ctl_reg_ker, /* exclude PHY from "invisible" reset */ FRF_AB_EXT_PHY_RST_CTL, method == RESET_TYPE_INVISIBLE, /* exclude EEPROM/flash and PCIe */ FRF_AB_PCIE_CORE_RST_CTL, 1, FRF_AB_PCIE_NSTKY_RST_CTL, 1, FRF_AB_PCIE_SD_RST_CTL, 1, FRF_AB_EE_RST_CTL, 1, FRF_AB_EXT_PHY_RST_DUR, FFE_AB_EXT_PHY_RST_DUR_10240US, FRF_AB_SWRST, 1); } efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n"); schedule_timeout_uninterruptible(HZ / 20); /* Restore PCI configuration if needed */ if (method == RESET_TYPE_WORLD) { if (efx_nic_is_dual_func(efx)) pci_restore_state(nic_data->pci_dev2); pci_restore_state(efx->pci_dev); netif_dbg(efx, drv, efx->net_dev, "successfully restored PCI config\n"); } /* Assert that reset complete */ efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) { rc = -ETIMEDOUT; netif_err(efx, hw, efx->net_dev, "timed out waiting for hardware reset\n"); goto fail3; } netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n"); return 0; /* pci_save_state() and pci_restore_state() MUST be called in pairs */ fail2: pci_restore_state(efx->pci_dev); fail1: fail3: return rc; } static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) { struct falcon_nic_data *nic_data = efx->nic_data; int rc; mutex_lock(&nic_data->spi_lock); rc = __falcon_reset_hw(efx, method); mutex_unlock(&nic_data->spi_lock); return rc; } static void falcon_monitor(struct efx_nic *efx) { bool link_changed; int rc; BUG_ON(!mutex_is_locked(&efx->mac_lock)); rc = falcon_board(efx)->type->monitor(efx); if (rc) { netif_err(efx, hw, efx->net_dev, "Board sensor %s; shutting down PHY\n", (rc == -ERANGE) ? "reported fault" : "failed"); efx->phy_mode |= PHY_MODE_LOW_POWER; rc = __efx_reconfigure_port(efx); WARN_ON(rc); } if (LOOPBACK_INTERNAL(efx)) link_changed = falcon_loopback_link_poll(efx); else link_changed = efx->phy_op->poll(efx); if (link_changed) { falcon_stop_nic_stats(efx); falcon_deconfigure_mac_wrapper(efx); falcon_reset_macs(efx); rc = efx->mac_op->reconfigure(efx); BUG_ON(rc); falcon_start_nic_stats(efx); efx_link_status_changed(efx); } falcon_poll_xmac(efx); } /* Zeroes out the SRAM contents. This routine must be called in * process context and is allowed to sleep. */ static int falcon_reset_sram(struct efx_nic *efx) { efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker; int count; /* Set the SRAM wake/sleep GPIO appropriately. */ efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1); EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1); efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); /* Initiate SRAM reset */ EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN, 1, FRF_AZ_SRM_NB_SZ, 0); efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); /* Wait for SRAM reset to complete */ count = 0; do { netif_dbg(efx, hw, efx->net_dev, "waiting for SRAM reset (attempt %d)...\n", count); /* SRAM reset is slow; expect around 16ms */ schedule_timeout_uninterruptible(HZ / 50); /* Check for reset complete */ efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) { netif_dbg(efx, hw, efx->net_dev, "SRAM reset complete\n"); return 0; } } while (++count < 20); /* wait up to 0.4 sec */ netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n"); return -ETIMEDOUT; } static void falcon_spi_device_init(struct efx_nic *efx, struct efx_spi_device *spi_device, unsigned int device_id, u32 device_type) { if (device_type != 0) { spi_device->device_id = device_id; spi_device->size = 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE); spi_device->addr_len = SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN); spi_device->munge_address = (spi_device->size == 1 << 9 && spi_device->addr_len == 1); spi_device->erase_command = SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD); spi_device->erase_size = 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_SIZE); spi_device->block_size = 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_BLOCK_SIZE); } else { spi_device->size = 0; } } /* Extract non-volatile configuration */ static int falcon_probe_nvconfig(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_nvconfig *nvconfig; int rc; nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL); if (!nvconfig) return -ENOMEM; rc = falcon_read_nvram(efx, nvconfig); if (rc) goto out; efx->phy_type = nvconfig->board_v2.port0_phy_type; efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr; if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) { falcon_spi_device_init( efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH, le32_to_cpu(nvconfig->board_v3 .spi_device_type[FFE_AB_SPI_DEVICE_FLASH])); falcon_spi_device_init( efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM, le32_to_cpu(nvconfig->board_v3 .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM])); } /* Read the MAC addresses */ memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN); netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); rc = falcon_probe_board(efx, le16_to_cpu(nvconfig->board_v2.board_revision)); out: kfree(nvconfig); return rc; } /* Probe all SPI devices on the NIC */ static void falcon_probe_spi_devices(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; int boot_dev; efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL); efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) { boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ? FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM); netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n", boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM"); } else { /* Disable VPD and set clock dividers to safe * values for initial programming. */ boot_dev = -1; netif_dbg(efx, probe, efx->net_dev, "Booted from internal ASIC settings;" " setting SPI config\n"); EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0, /* 125 MHz / 7 ~= 20 MHz */ FRF_AB_EE_SF_CLOCK_DIV, 7, /* 125 MHz / 63 ~= 2 MHz */ FRF_AB_EE_EE_CLOCK_DIV, 63); efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); } mutex_init(&nic_data->spi_lock); if (boot_dev == FFE_AB_SPI_DEVICE_FLASH) falcon_spi_device_init(efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH, default_flash_type); if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM) falcon_spi_device_init(efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM, large_eeprom_type); } static int falcon_probe_nic(struct efx_nic *efx) { struct falcon_nic_data *nic_data; struct falcon_board *board; int rc; /* Allocate storage for hardware specific data */ nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); if (!nic_data) return -ENOMEM; efx->nic_data = nic_data; rc = -ENODEV; if (efx_nic_fpga_ver(efx) != 0) { netif_err(efx, probe, efx->net_dev, "Falcon FPGA not supported\n"); goto fail1; } if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { efx_oword_t nic_stat; struct pci_dev *dev; u8 pci_rev = efx->pci_dev->revision; if ((pci_rev == 0xff) || (pci_rev == 0)) { netif_err(efx, probe, efx->net_dev, "Falcon rev A0 not supported\n"); goto fail1; } efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) { netif_err(efx, probe, efx->net_dev, "Falcon rev A1 1G not supported\n"); goto fail1; } if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) { netif_err(efx, probe, efx->net_dev, "Falcon rev A1 PCI-X not supported\n"); goto fail1; } dev = pci_dev_get(efx->pci_dev); while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID, dev))) { if (dev->bus == efx->pci_dev->bus && dev->devfn == efx->pci_dev->devfn + 1) { nic_data->pci_dev2 = dev; break; } } if (!nic_data->pci_dev2) { netif_err(efx, probe, efx->net_dev, "failed to find secondary function\n"); rc = -ENODEV; goto fail2; } } /* Now we can reset the NIC */ rc = __falcon_reset_hw(efx, RESET_TYPE_ALL); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); goto fail3; } /* Allocate memory for INT_KER */ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); if (rc) goto fail4; BUG_ON(efx->irq_status.dma_addr & 0x0f); netif_dbg(efx, probe, efx->net_dev, "INT_KER at %llx (virt %p phys %llx)\n", (u64)efx->irq_status.dma_addr, efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr)); falcon_probe_spi_devices(efx); /* Read in the non-volatile configuration */ rc = falcon_probe_nvconfig(efx); if (rc) { if (rc == -EINVAL) netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n"); goto fail5; } /* Initialise I2C adapter */ board = falcon_board(efx); board->i2c_adap.owner = THIS_MODULE; board->i2c_data = falcon_i2c_bit_operations; board->i2c_data.data = efx; board->i2c_adap.algo_data = &board->i2c_data; board->i2c_adap.dev.parent = &efx->pci_dev->dev; strlcpy(board->i2c_adap.name, "SFC4000 GPIO", sizeof(board->i2c_adap.name)); rc = i2c_bit_add_bus(&board->i2c_adap); if (rc) goto fail5; rc = falcon_board(efx)->type->init(efx); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to initialise board\n"); goto fail6; } nic_data->stats_disable_count = 1; setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func, (unsigned long)efx); return 0; fail6: BUG_ON(i2c_del_adapter(&board->i2c_adap)); memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); fail5: efx_nic_free_buffer(efx, &efx->irq_status); fail4: fail3: if (nic_data->pci_dev2) { pci_dev_put(nic_data->pci_dev2); nic_data->pci_dev2 = NULL; } fail2: fail1: kfree(efx->nic_data); return rc; } static void falcon_init_rx_cfg(struct efx_nic *efx) { /* Prior to Siena the RX DMA engine will split each frame at * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to * be so large that that never happens. */ const unsigned huge_buf_size = (3 * 4096) >> 5; /* RX control FIFO thresholds (32 entries) */ const unsigned ctrl_xon_thr = 20; const unsigned ctrl_xoff_thr = 25; efx_oword_t reg; efx_reado(efx, &reg, FR_AZ_RX_CFG); if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { /* Data FIFO size is 5.5K */ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, huge_buf_size); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr); } else { /* Data FIFO size is 80K; register fields moved */ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, huge_buf_size); /* Send XON and XOFF at ~3 * max MTU away from empty/full */ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); /* Enable hash insertion. This is broken for the * 'Falcon' hash so also select Toeplitz TCP/IPv4 and * IPv4 hashes. */ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1); } /* Always enable XOFF signal from RX FIFO. We enable * or disable transmission of pause frames at the MAC. */ EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); efx_writeo(efx, &reg, FR_AZ_RX_CFG); } /* This call performs hardware-specific global initialisation, such as * defining the descriptor cache sizes and number of RSS channels. * It does not set up any buffers, descriptor rings or event queues. */ static int falcon_init_nic(struct efx_nic *efx) { efx_oword_t temp; int rc; /* Use on-chip SRAM */ efx_reado(efx, &temp, FR_AB_NIC_STAT); EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1); efx_writeo(efx, &temp, FR_AB_NIC_STAT); rc = falcon_reset_sram(efx); if (rc) return rc; /* Clear the parity enables on the TX data fifos as * they produce false parity errors because of timing issues */ if (EFX_WORKAROUND_5129(efx)) { efx_reado(efx, &temp, FR_AZ_CSR_SPARE); EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0); efx_writeo(efx, &temp, FR_AZ_CSR_SPARE); } if (EFX_WORKAROUND_7244(efx)) { efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL); EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8); EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8); EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8); EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8); efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL); } /* XXX This is documented only for Falcon A0/A1 */ /* Setup RX. Wait for descriptor is broken and must * be disabled. RXDP recovery shouldn't be needed, but is. */ efx_reado(efx, &temp, FR_AA_RX_SELF_RST); EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1); EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1); if (EFX_WORKAROUND_5583(efx)) EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1); efx_writeo(efx, &temp, FR_AA_RX_SELF_RST); /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 * descriptors (which is bad). */ efx_reado(efx, &temp, FR_AZ_TX_CFG); EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); efx_writeo(efx, &temp, FR_AZ_TX_CFG); falcon_init_rx_cfg(efx); if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { /* Set hash key for IPv4 */ memcpy(&temp, efx->rx_hash_key, sizeof(temp)); efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); /* Set destination of both TX and RX Flush events */ EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); efx_writeo(efx, &temp, FR_BZ_DP_CTRL); } efx_nic_init_common(efx); return 0; } static void falcon_remove_nic(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_board *board = falcon_board(efx); int rc; board->type->fini(efx); /* Remove I2C adapter and clear it in preparation for a retry */ rc = i2c_del_adapter(&board->i2c_adap); BUG_ON(rc); memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); efx_nic_free_buffer(efx, &efx->irq_status); __falcon_reset_hw(efx, RESET_TYPE_ALL); /* Release the second function after the reset */ if (nic_data->pci_dev2) { pci_dev_put(nic_data->pci_dev2); nic_data->pci_dev2 = NULL; } /* Tear down the private nic state */ kfree(efx->nic_data); efx->nic_data = NULL; } static void falcon_update_nic_stats(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t cnt; if (nic_data->stats_disable_count) return; efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP); efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT); if (nic_data->stats_pending && *nic_data->stats_dma_done == FALCON_STATS_DONE) { nic_data->stats_pending = false; rmb(); /* read the done flag before the stats */ efx->mac_op->update_stats(efx); } } void falcon_start_nic_stats(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; spin_lock_bh(&efx->stats_lock); if (--nic_data->stats_disable_count == 0) falcon_stats_request(efx); spin_unlock_bh(&efx->stats_lock); } void falcon_stop_nic_stats(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; int i; might_sleep(); spin_lock_bh(&efx->stats_lock); ++nic_data->stats_disable_count; spin_unlock_bh(&efx->stats_lock); del_timer_sync(&nic_data->stats_timer); /* Wait enough time for the most recent transfer to * complete. */ for (i = 0; i < 4 && nic_data->stats_pending; i++) { if (*nic_data->stats_dma_done == FALCON_STATS_DONE) break; msleep(1); } spin_lock_bh(&efx->stats_lock); falcon_stats_complete(efx); spin_unlock_bh(&efx->stats_lock); } static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) { falcon_board(efx)->type->set_id_led(efx, mode); } /************************************************************************** * * Wake on LAN * ************************************************************************** */ static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) { wol->supported = 0; wol->wolopts = 0; memset(&wol->sopass, 0, sizeof(wol->sopass)); } static int falcon_set_wol(struct efx_nic *efx, u32 type) { if (type != 0) return -EINVAL; return 0; } /************************************************************************** * * Revision-dependent attributes used by efx.c and nic.c * ************************************************************************** */ const struct efx_nic_type falcon_a1_nic_type = { .probe = falcon_probe_nic, .remove = falcon_remove_nic, .init = falcon_init_nic, .fini = efx_port_dummy_op_void, .monitor = falcon_monitor, .reset = falcon_reset_hw, .probe_port = falcon_probe_port, .remove_port = falcon_remove_port, .handle_global_event = falcon_handle_global_event, .prepare_flush = falcon_prepare_flush, .update_stats = falcon_update_nic_stats, .start_stats = falcon_start_nic_stats, .stop_stats = falcon_stop_nic_stats, .set_id_led = falcon_set_id_led, .push_irq_moderation = falcon_push_irq_moderation, .push_multicast_hash = falcon_push_multicast_hash, .reconfigure_port = falcon_reconfigure_port, .get_wol = falcon_get_wol, .set_wol = falcon_set_wol, .resume_wol = efx_port_dummy_op_void, .test_nvram = falcon_test_nvram, .default_mac_ops = &falcon_xmac_operations, .revision = EFX_REV_FALCON_A1, .mem_map_size = 0x20000, .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER, .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER, .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER, .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER, .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), .rx_buffer_padding = 0x24, .max_interrupt_mode = EFX_INT_MODE_MSI, .phys_addr_channels = 4, .tx_dc_base = 0x130000, .rx_dc_base = 0x100000, .offload_features = NETIF_F_IP_CSUM, .reset_world_flags = ETH_RESET_IRQ, }; const struct efx_nic_type falcon_b0_nic_type = { .probe = falcon_probe_nic, .remove = falcon_remove_nic, .init = falcon_init_nic, .fini = efx_port_dummy_op_void, .monitor = falcon_monitor, .reset = falcon_reset_hw, .probe_port = falcon_probe_port, .remove_port = falcon_remove_port, .handle_global_event = falcon_handle_global_event, .prepare_flush = falcon_prepare_flush, .update_stats = falcon_update_nic_stats, .start_stats = falcon_start_nic_stats, .stop_stats = falcon_stop_nic_stats, .set_id_led = falcon_set_id_led, .push_irq_moderation = falcon_push_irq_moderation, .push_multicast_hash = falcon_push_multicast_hash, .reconfigure_port = falcon_reconfigure_port, .get_wol = falcon_get_wol, .set_wol = falcon_set_wol, .resume_wol = efx_port_dummy_op_void, .test_registers = falcon_b0_test_registers, .test_nvram = falcon_test_nvram, .default_mac_ops = &falcon_xmac_operations, .revision = EFX_REV_FALCON_B0, /* Map everything up to and including the RSS indirection * table. Don't map MSI-X table, MSI-X PBA since Linux * requires that they not be mapped. */ .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL + FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS), .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, .buf_tbl_base = FR_BZ_BUF_FULL_TBL, .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), .rx_buffer_hash_size = 0x10, .rx_buffer_padding = 0, .max_interrupt_mode = EFX_INT_MODE_MSIX, .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy * interrupt handler only supports 32 * channels */ .tx_dc_base = 0x130000, .rx_dc_base = 0x100000, .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, .reset_world_flags = ETH_RESET_IRQ, };
gpl-2.0
menghang/android_kernel_xiaomi_msm8996
arch/mn10300/mm/init.c
2236
3754
/* MN10300 Memory management initialisation * * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Modified by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/initrd.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/bootmem.h> #include <linux/gfp.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/dma.h> #include <asm/tlb.h> #include <asm/sections.h> unsigned long highstart_pfn, highend_pfn; #ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT static struct vm_struct user_iomap_vm; #endif /* * set up paging */ void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES] = {0,}; pte_t *ppte; int loop; /* main kernel space -> RAM mapping is handled as 1:1 transparent by * the MMU */ memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); memset(kernel_vmalloc_ptes, 0, sizeof(kernel_vmalloc_ptes)); /* load the VMALLOC area PTE table addresses into the kernel PGD */ ppte = kernel_vmalloc_ptes; for (loop = VMALLOC_START / (PAGE_SIZE * PTRS_PER_PTE); loop < VMALLOC_END / (PAGE_SIZE * PTRS_PER_PTE); loop++ ) { set_pgd(swapper_pg_dir + loop, __pgd(__pa(ppte) | _PAGE_TABLE)); ppte += PAGE_SIZE / sizeof(pte_t); } /* declare the sizes of the RAM zones (only use the normal zone) */ zones_size[ZONE_NORMAL] = contig_page_data.bdata->node_low_pfn - contig_page_data.bdata->node_min_pfn; /* pass the memory from the bootmem allocator to the main allocator */ free_area_init(zones_size); #ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT /* The Atomic Operation Unit registers need to be mapped to userspace * for all processes. The following uses vm_area_register_early() to * reserve the first page of the vmalloc area and sets the pte for that * page. * * glibc hardcodes this virtual mapping, so we're pretty much stuck with * it from now on. */ user_iomap_vm.flags = VM_USERMAP; user_iomap_vm.size = 1 << PAGE_SHIFT; vm_area_register_early(&user_iomap_vm, PAGE_SIZE); ppte = kernel_vmalloc_ptes; set_pte(ppte, pfn_pte(USER_ATOMIC_OPS_PAGE_ADDR >> PAGE_SHIFT, PAGE_USERIO)); #endif local_flush_tlb_all(); } /* * transfer all the memory from the bootmem allocator to the runtime allocator */ void __init mem_init(void) { BUG_ON(!mem_map); #define START_PFN (contig_page_data.bdata->node_min_pfn) #define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn) max_mapnr = MAX_LOW_PFN - START_PFN; high_memory = (void *) __va(MAX_LOW_PFN * PAGE_SIZE); /* clear the zero-page */ memset(empty_zero_page, 0, PAGE_SIZE); /* this will put all low memory onto the freelists */ free_all_bootmem(); mem_init_print_info(NULL); } /* * recycle memory containing stuff only required for initialisation */ void free_initmem(void) { free_initmem_default(POISON_FREE_INITMEM); } /* * dispose of the memory on which the initial ramdisk resided */ #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, "initrd"); } #endif
gpl-2.0
glewarne/testing
drivers/staging/tidspbridge/core/io_sm.c
2748
60597
/* * io_sm.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * IO dispatcher for a shared memory channel driver. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* * Channel Invariant: * There is an important invariant condition which must be maintained per * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of * which may cause timeouts and/or failure of the sync_wait_on_event * function. */ #include <linux/types.h> #include <linux/list.h> /* Host OS */ #include <dspbridge/host_os.h> #include <linux/workqueue.h> /* ----------------------------------- DSP/BIOS Bridge */ #include <dspbridge/dbdefs.h> /* Services Layer */ #include <dspbridge/ntfy.h> #include <dspbridge/sync.h> /* Hardware Abstraction Layer */ #include <hw_defs.h> #include <hw_mmu.h> /* Bridge Driver */ #include <dspbridge/dspdeh.h> #include <dspbridge/dspio.h> #include <dspbridge/dspioctl.h> #include <dspbridge/wdt.h> #include <_tiomap.h> #include <tiomap_io.h> #include <_tiomap_pwr.h> /* Platform Manager */ #include <dspbridge/cod.h> #include <dspbridge/node.h> #include <dspbridge/dev.h> /* Others */ #include <dspbridge/rms_sh.h> #include <dspbridge/mgr.h> #include <dspbridge/drv.h> #include "_cmm.h" #include "module_list.h" /* This */ #include <dspbridge/io_sm.h> #include "_msg_sm.h" /* Defines, Data Structures, Typedefs */ #define OUTPUTNOTREADY 0xffff #define NOTENABLED 0xffff /* Channel(s) not enabled */ #define EXTEND "_EXT_END" #define SWAP_WORD(x) (x) #define UL_PAGE_ALIGN_SIZE 0x10000 /* Page Align Size */ #define MAX_PM_REQS 32 #define MMU_FAULT_HEAD1 0xa5a5a5a5 #define MMU_FAULT_HEAD2 0x96969696 #define POLL_MAX 1000 #define MAX_MMU_DBGBUFF 10240 /* IO Manager: only one created per board */ struct io_mgr { /* These four fields must be the first fields in a io_mgr_ struct */ /* Bridge device context */ struct bridge_dev_context *bridge_context; /* Function interface to Bridge driver */ struct bridge_drv_interface *intf_fxns; struct dev_object *dev_obj; /* Device this board represents */ /* These fields initialized in bridge_io_create() */ struct chnl_mgr *chnl_mgr; struct shm *shared_mem; /* Shared Memory control */ u8 *input; /* Address of input channel */ u8 *output; /* Address of output channel */ struct msg_mgr *msg_mgr; /* Message manager */ /* Msg control for from DSP messages */ struct msg_ctrl *msg_input_ctrl; /* Msg control for to DSP messages */ struct msg_ctrl *msg_output_ctrl; u8 *msg_input; /* Address of input messages */ u8 *msg_output; /* Address of output messages */ u32 sm_buf_size; /* Size of a shared memory I/O channel */ bool shared_irq; /* Is this IRQ shared? */ u32 word_size; /* Size in bytes of DSP word */ u16 intr_val; /* Interrupt value */ /* Private extnd proc info; mmu setup */ struct mgr_processorextinfo ext_proc_info; struct cmm_object *cmm_mgr; /* Shared Mem Mngr */ struct work_struct io_workq; /* workqueue */ #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) u32 trace_buffer_begin; /* Trace message start address */ u32 trace_buffer_end; /* Trace message end address */ u32 trace_buffer_current; /* Trace message current address */ u32 gpp_read_pointer; /* GPP Read pointer to Trace buffer */ u8 *msg; u32 gpp_va; u32 dsp_va; #endif /* IO Dpc */ u32 dpc_req; /* Number of requested DPC's. */ u32 dpc_sched; /* Number of executed DPC's. */ struct tasklet_struct dpc_tasklet; spinlock_t dpc_lock; }; struct shm_symbol_val { u32 shm_base; u32 shm_lim; u32 msg_base; u32 msg_lim; u32 shm0_end; u32 dyn_ext; u32 ext_end; }; /* Function Prototypes */ static void io_dispatch_pm(struct io_mgr *pio_mgr); static void notify_chnl_complete(struct chnl_object *pchnl, struct chnl_irp *chnl_packet_obj); static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl, u8 io_mode); static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl, u8 io_mode); static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr); static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr); static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj, struct chnl_object *pchnl, u32 mask); /* Bus Addr (cached kernel) */ static int register_shm_segs(struct io_mgr *hio_mgr, struct cod_manager *cod_man, u32 dw_gpp_base_pa); static inline void set_chnl_free(struct shm *sm, u32 chnl) { sm->host_free_mask &= ~(1 << chnl); } static inline void set_chnl_busy(struct shm *sm, u32 chnl) { sm->host_free_mask |= 1 << chnl; } /* * ======== bridge_io_create ======== * Create an IO manager object. */ int bridge_io_create(struct io_mgr **io_man, struct dev_object *hdev_obj, const struct io_attrs *mgr_attrts) { struct io_mgr *pio_mgr = NULL; struct bridge_dev_context *hbridge_context = NULL; struct cfg_devnode *dev_node_obj; struct chnl_mgr *hchnl_mgr; u8 dev_type; /* Check requirements */ if (!io_man || !mgr_attrts || mgr_attrts->word_size == 0) return -EFAULT; *io_man = NULL; dev_get_chnl_mgr(hdev_obj, &hchnl_mgr); if (!hchnl_mgr || hchnl_mgr->iomgr) return -EFAULT; /* * Message manager will be created when a file is loaded, since * size of message buffer in shared memory is configurable in * the base image. */ dev_get_bridge_context(hdev_obj, &hbridge_context); if (!hbridge_context) return -EFAULT; dev_get_dev_type(hdev_obj, &dev_type); /* Allocate IO manager object */ pio_mgr = kzalloc(sizeof(struct io_mgr), GFP_KERNEL); if (!pio_mgr) return -ENOMEM; /* Initialize chnl_mgr object */ pio_mgr->chnl_mgr = hchnl_mgr; pio_mgr->word_size = mgr_attrts->word_size; if (dev_type == DSP_UNIT) { /* Create an IO DPC */ tasklet_init(&pio_mgr->dpc_tasklet, io_dpc, (u32) pio_mgr); /* Initialize DPC counters */ pio_mgr->dpc_req = 0; pio_mgr->dpc_sched = 0; spin_lock_init(&pio_mgr->dpc_lock); if (dev_get_dev_node(hdev_obj, &dev_node_obj)) { bridge_io_destroy(pio_mgr); return -EIO; } } pio_mgr->bridge_context = hbridge_context; pio_mgr->shared_irq = mgr_attrts->irq_shared; if (dsp_wdt_init()) { bridge_io_destroy(pio_mgr); return -EPERM; } /* Return IO manager object to caller... */ hchnl_mgr->iomgr = pio_mgr; *io_man = pio_mgr; return 0; } /* * ======== bridge_io_destroy ======== * Purpose: * Disable interrupts, destroy the IO manager. */ int bridge_io_destroy(struct io_mgr *hio_mgr) { int status = 0; if (hio_mgr) { /* Free IO DPC object */ tasklet_kill(&hio_mgr->dpc_tasklet); #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) kfree(hio_mgr->msg); #endif dsp_wdt_exit(); /* Free this IO manager object */ kfree(hio_mgr); } else { status = -EFAULT; } return status; } struct shm_symbol_val *_get_shm_symbol_values(struct io_mgr *hio_mgr) { struct shm_symbol_val *s; struct cod_manager *cod_man; int status; s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) return ERR_PTR(-ENOMEM); status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man); if (status) goto free_symbol; /* Get start and length of channel part of shared memory */ status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM, &s->shm_base); if (status) goto free_symbol; status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM, &s->shm_lim); if (status) goto free_symbol; if (s->shm_lim <= s->shm_base) { status = -EINVAL; goto free_symbol; } /* Get start and length of message part of shared memory */ status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM, &s->msg_base); if (status) goto free_symbol; status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM, &s->msg_lim); if (status) goto free_symbol; if (s->msg_lim <= s->msg_base) { status = -EINVAL; goto free_symbol; } #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE status = cod_get_sym_value(cod_man, DSP_TRACESEC_END, &s->shm0_end); #else status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM, &s->shm0_end); #endif if (status) goto free_symbol; status = cod_get_sym_value(cod_man, DYNEXTBASE, &s->dyn_ext); if (status) goto free_symbol; status = cod_get_sym_value(cod_man, EXTEND, &s->ext_end); if (status) goto free_symbol; return s; free_symbol: kfree(s); return ERR_PTR(status); } /* * ======== bridge_io_on_loaded ======== * Purpose: * Called when a new program is loaded to get shared memory buffer * parameters from COFF file. ulSharedBufferBase and ulSharedBufferLimit * are in DSP address units. */ int bridge_io_on_loaded(struct io_mgr *hio_mgr) { struct bridge_dev_context *dc = hio_mgr->bridge_context; struct cfg_hostres *cfg_res = dc->resources; struct bridge_ioctl_extproc *eproc; struct cod_manager *cod_man; struct chnl_mgr *hchnl_mgr; struct msg_mgr *hmsg_mgr; struct shm_symbol_val *s; int status; u8 num_procs; s32 ndx; u32 i; u32 mem_sz, msg_sz, pad_sz, shm_sz, shm_base_offs; u32 seg0_sz, seg1_sz; u32 pa, va, da; u32 pa_curr, va_curr, da_curr; u32 bytes; u32 all_bits = 0; u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB, HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB }; u32 map_attrs = DSP_MAPLITTLEENDIAN | DSP_MAPPHYSICALADDR | DSP_MAPELEMSIZE32 | DSP_MAPDONOTLOCK; status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man); if (status) return status; hchnl_mgr = hio_mgr->chnl_mgr; /* The message manager is destroyed when the board is stopped */ dev_get_msg_mgr(hio_mgr->dev_obj, &hio_mgr->msg_mgr); hmsg_mgr = hio_mgr->msg_mgr; if (!hchnl_mgr || !hmsg_mgr) return -EFAULT; if (hio_mgr->shared_mem) hio_mgr->shared_mem = NULL; s = _get_shm_symbol_values(hio_mgr); if (IS_ERR(s)) return PTR_ERR(s); /* Get total length in bytes */ shm_sz = (s->shm_lim - s->shm_base + 1) * hio_mgr->word_size; /* Calculate size of a PROCCOPY shared memory region */ dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n", __func__, shm_sz - sizeof(struct shm)); /* Length (bytes) of messaging part of shared memory */ msg_sz = (s->msg_lim - s->msg_base + 1) * hio_mgr->word_size; /* Total length (bytes) of shared memory: chnl + msg */ mem_sz = shm_sz + msg_sz; /* Get memory reserved in host resources */ (void)mgr_enum_processor_info(0, (struct dsp_processorinfo *) &hio_mgr->ext_proc_info, sizeof(struct mgr_processorextinfo), &num_procs); /* IO supports only one DSP for now */ if (num_procs != 1) { status = -EINVAL; goto free_symbol; } /* The first MMU TLB entry(TLB_0) in DCD is ShmBase */ pa = cfg_res->mem_phys[1]; va = cfg_res->mem_base[1]; /* This is the virtual uncached ioremapped address!!! */ /* Why can't we directly take the DSPVA from the symbols? */ da = hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt; seg0_sz = (s->shm0_end - da) * hio_mgr->word_size; seg1_sz = (s->ext_end - s->dyn_ext) * hio_mgr->word_size; /* 4K align */ seg1_sz = (seg1_sz + 0xFFF) & (~0xFFFUL); /* 64K align */ seg0_sz = (seg0_sz + 0xFFFF) & (~0xFFFFUL); pad_sz = UL_PAGE_ALIGN_SIZE - ((pa + seg1_sz) % UL_PAGE_ALIGN_SIZE); if (pad_sz == UL_PAGE_ALIGN_SIZE) pad_sz = 0x0; dev_dbg(bridge, "%s: pa %x, va %x, da %x\n", __func__, pa, va, da); dev_dbg(bridge, "shm0_end %x, dyn_ext %x, ext_end %x, seg0_sz %x seg1_sz %x\n", s->shm0_end, s->dyn_ext, s->ext_end, seg0_sz, seg1_sz); if ((seg0_sz + seg1_sz + pad_sz) > cfg_res->mem_length[1]) { pr_err("%s: shm Error, reserved 0x%x required 0x%x\n", __func__, cfg_res->mem_length[1], seg0_sz + seg1_sz + pad_sz); status = -ENOMEM; goto free_symbol; } pa_curr = pa; va_curr = s->dyn_ext * hio_mgr->word_size; da_curr = va; bytes = seg1_sz; /* * Try to fit into TLB entries. If not possible, push them to page * tables. It is quite possible that if sections are not on * bigger page boundary, we may end up making several small pages. * So, push them onto page tables, if that is the case. */ while (bytes) { /* * To find the max. page size with which both PA & VA are * aligned. */ all_bits = pa_curr | va_curr; dev_dbg(bridge, "seg all_bits %x, pa_curr %x, va_curr %x, bytes %x\n", all_bits, pa_curr, va_curr, bytes); for (i = 0; i < 4; i++) { if ((bytes >= page_size[i]) && ((all_bits & (page_size[i] - 1)) == 0)) { status = hio_mgr->intf_fxns->brd_mem_map(dc, pa_curr, va_curr, page_size[i], map_attrs, NULL); if (status) goto free_symbol; pa_curr += page_size[i]; va_curr += page_size[i]; da_curr += page_size[i]; bytes -= page_size[i]; /* * Don't try smaller sizes. Hopefully we have * reached an address aligned to a bigger page * size. */ break; } } } pa_curr += pad_sz; va_curr += pad_sz; da_curr += pad_sz; bytes = seg0_sz; va_curr = da * hio_mgr->word_size; eproc = kzalloc(sizeof(*eproc) * BRDIOCTL_NUMOFMMUTLB, GFP_KERNEL); if (!eproc) { status = -ENOMEM; goto free_symbol; } ndx = 0; /* Configure the TLB entries for the next cacheable segment */ while (bytes) { /* * To find the max. page size with which both PA & VA are * aligned. */ all_bits = pa_curr | va_curr; dev_dbg(bridge, "seg1 all_bits %x, pa_curr %x, va_curr %x, bytes %x\n", all_bits, pa_curr, va_curr, bytes); for (i = 0; i < 4; i++) { if (!(bytes >= page_size[i]) || !((all_bits & (page_size[i] - 1)) == 0)) continue; if (ndx >= MAX_LOCK_TLB_ENTRIES) { status = hio_mgr->intf_fxns->brd_mem_map(dc, pa_curr, va_curr, page_size[i], map_attrs, NULL); dev_dbg(bridge, "PTE pa %x va %x dsp_va %x sz %x\n", eproc[ndx].gpp_pa, eproc[ndx].gpp_va, eproc[ndx].dsp_va * hio_mgr->word_size, page_size[i]); if (status) goto free_eproc; } /* This is the physical address written to DSP MMU */ eproc[ndx].gpp_pa = pa_curr; /* * This is the virtual uncached ioremapped * address!!! */ eproc[ndx].gpp_va = da_curr; eproc[ndx].dsp_va = va_curr / hio_mgr->word_size; eproc[ndx].size = page_size[i]; eproc[ndx].endianism = HW_LITTLE_ENDIAN; eproc[ndx].elem_size = HW_ELEM_SIZE16BIT; eproc[ndx].mixed_mode = HW_MMU_CPUES; dev_dbg(bridge, "%s: tlb pa %x va %x dsp_va %x sz %x\n", __func__, eproc[ndx].gpp_pa, eproc[ndx].gpp_va, eproc[ndx].dsp_va * hio_mgr->word_size, page_size[i]); ndx++; pa_curr += page_size[i]; va_curr += page_size[i]; da_curr += page_size[i]; bytes -= page_size[i]; /* * Don't try smaller sizes. Hopefully we have reached * an address aligned to a bigger page size. */ break; } } /* * Copy remaining entries from CDB. All entries are 1 MB and * should not conflict with shm entries on MPU or DSP side. */ for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) { struct mgr_processorextinfo *ep = &hio_mgr->ext_proc_info; u32 word_sz = hio_mgr->word_size; if (ep->ty_tlb[i].gpp_phys == 0) continue; if ((ep->ty_tlb[i].gpp_phys > pa - 0x100000 && ep->ty_tlb[i].gpp_phys <= pa + seg0_sz) || (ep->ty_tlb[i].dsp_virt > da - 0x100000 / word_sz && ep->ty_tlb[i].dsp_virt <= da + seg0_sz / word_sz)) { dev_dbg(bridge, "err cdb%d pa %x da %x shm pa %x da %x sz %x\n", i, ep->ty_tlb[i].gpp_phys, ep->ty_tlb[i].dsp_virt, pa, da, seg0_sz); status = -EPERM; goto free_eproc; } if (ndx >= MAX_LOCK_TLB_ENTRIES) { status = hio_mgr->intf_fxns->brd_mem_map(dc, ep->ty_tlb[i].gpp_phys, ep->ty_tlb[i].dsp_virt, 0x100000, map_attrs, NULL); if (status) goto free_eproc; } eproc[ndx].dsp_va = ep->ty_tlb[i].dsp_virt; eproc[ndx].gpp_pa = ep->ty_tlb[i].gpp_phys; eproc[ndx].gpp_va = 0; /* 1 MB */ eproc[ndx].size = 0x100000; dev_dbg(bridge, "shm MMU entry pa %x da 0x%x\n", eproc[ndx].gpp_pa, eproc[ndx].dsp_va); ndx++; } /* Map the L4 peripherals */ i = 0; while (l4_peripheral_table[i].phys_addr) { status = hio_mgr->intf_fxns->brd_mem_map(dc, l4_peripheral_table[i].phys_addr, l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB, map_attrs, NULL); if (status) goto free_eproc; i++; } for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { eproc[i].dsp_va = 0; eproc[i].gpp_pa = 0; eproc[i].gpp_va = 0; eproc[i].size = 0; } /* * Set the shm physical address entry (grayed out in CDB file) * to the virtual uncached ioremapped address of shm reserved * on MPU. */ hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys = (va + seg1_sz + pad_sz); /* * Need shm Phys addr. IO supports only one DSP for now: * num_procs = 1. */ if (!hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys) return -EFAULT; if (eproc[0].dsp_va > s->shm_base) return -EPERM; /* shm_base may not be at ul_dsp_va address */ shm_base_offs = (s->shm_base - eproc[0].dsp_va) * hio_mgr->word_size; /* * bridge_dev_ctrl() will set dev context dsp-mmu info. In * bridge_brd_start() the MMU will be re-programed with MMU * DSPVa-GPPPa pair info while DSP is in a known * (reset) state. */ status = hio_mgr->intf_fxns->dev_cntrl(hio_mgr->bridge_context, BRDIOCTL_SETMMUCONFIG, eproc); if (status) goto free_eproc; s->shm_base = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys; s->shm_base += shm_base_offs; s->shm_base = (u32) MEM_LINEAR_ADDRESS((void *)s->shm_base, mem_sz); if (!s->shm_base) { status = -EFAULT; goto free_eproc; } /* Register SM */ status = register_shm_segs(hio_mgr, cod_man, eproc[0].gpp_pa); hio_mgr->shared_mem = (struct shm *)s->shm_base; hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm); hio_mgr->output = hio_mgr->input + (shm_sz - sizeof(struct shm)) / 2; hio_mgr->sm_buf_size = hio_mgr->output - hio_mgr->input; /* Set up Shared memory addresses for messaging */ hio_mgr->msg_input_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem + shm_sz); hio_mgr->msg_input = (u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl); hio_mgr->msg_output_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl + msg_sz / 2); hio_mgr->msg_output = (u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl); hmsg_mgr->max_msgs = ((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input) / sizeof(struct msg_dspmsg); dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, " "output %p, msg_input_ctrl %p, msg_input %p, " "msg_output_ctrl %p, msg_output %p\n", (u8 *) hio_mgr->shared_mem, hio_mgr->input, hio_mgr->output, (u8 *) hio_mgr->msg_input_ctrl, hio_mgr->msg_input, (u8 *) hio_mgr->msg_output_ctrl, hio_mgr->msg_output); dev_dbg(bridge, "(proc) Mas msgs in shared memory: 0x%x\n", hmsg_mgr->max_msgs); memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm)); #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) /* Get the start address of trace buffer */ status = cod_get_sym_value(cod_man, SYS_PUTCBEG, &hio_mgr->trace_buffer_begin); if (status) goto free_eproc; hio_mgr->gpp_read_pointer = hio_mgr->trace_buffer_begin = (va + seg1_sz + pad_sz) + (hio_mgr->trace_buffer_begin - da); /* Get the end address of trace buffer */ status = cod_get_sym_value(cod_man, SYS_PUTCEND, &hio_mgr->trace_buffer_end); if (status) goto free_eproc; hio_mgr->trace_buffer_end = (va + seg1_sz + pad_sz) + (hio_mgr->trace_buffer_end - da); /* Get the current address of DSP write pointer */ status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT, &hio_mgr->trace_buffer_current); if (status) goto free_eproc; hio_mgr->trace_buffer_current = (va + seg1_sz + pad_sz) + (hio_mgr->trace_buffer_current - da); /* Calculate the size of trace buffer */ kfree(hio_mgr->msg); hio_mgr->msg = kmalloc(((hio_mgr->trace_buffer_end - hio_mgr->trace_buffer_begin) * hio_mgr->word_size) + 2, GFP_KERNEL); if (!hio_mgr->msg) { status = -ENOMEM; goto free_eproc; } hio_mgr->dsp_va = da; hio_mgr->gpp_va = (va + seg1_sz + pad_sz); #endif free_eproc: kfree(eproc); free_symbol: kfree(s); return status; } /* * ======== io_buf_size ======== * Size of shared memory I/O channel. */ u32 io_buf_size(struct io_mgr *hio_mgr) { if (hio_mgr) return hio_mgr->sm_buf_size; else return 0; } /* * ======== io_cancel_chnl ======== * Cancel IO on a given PCPY channel. */ void io_cancel_chnl(struct io_mgr *hio_mgr, u32 chnl) { struct io_mgr *pio_mgr = (struct io_mgr *)hio_mgr; struct shm *sm; if (!hio_mgr) goto func_end; sm = hio_mgr->shared_mem; /* Inform DSP that we have no more buffers on this channel */ set_chnl_free(sm, chnl); sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); func_end: return; } /* * ======== io_dispatch_pm ======== * Performs I/O dispatch on PM related messages from DSP */ static void io_dispatch_pm(struct io_mgr *pio_mgr) { int status; u32 parg[2]; /* Perform Power message processing here */ parg[0] = pio_mgr->intr_val; /* Send the command to the Bridge clk/pwr manager to handle */ if (parg[0] == MBX_PM_HIBERNATE_EN) { dev_dbg(bridge, "PM: Hibernate command\n"); status = pio_mgr->intf_fxns-> dev_cntrl(pio_mgr->bridge_context, BRDIOCTL_PWR_HIBERNATE, parg); if (status) pr_err("%s: hibernate cmd failed 0x%x\n", __func__, status); } else if (parg[0] == MBX_PM_OPP_REQ) { parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt; dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]); status = pio_mgr->intf_fxns-> dev_cntrl(pio_mgr->bridge_context, BRDIOCTL_CONSTRAINT_REQUEST, parg); if (status) dev_dbg(bridge, "PM: Failed to set constraint " "= 0x%x\n", parg[1]); } else { dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n", parg[0]); status = pio_mgr->intf_fxns-> dev_cntrl(pio_mgr->bridge_context, BRDIOCTL_CLK_CTRL, parg); if (status) dev_dbg(bridge, "PM: Failed to ctrl the DSP clk" "= 0x%x\n", *parg); } } /* * ======== io_dpc ======== * Deferred procedure call for shared memory channel driver ISR. Carries * out the dispatch of I/O as a non-preemptible event. It can only be * pre-empted by an ISR. */ void io_dpc(unsigned long ref_data) { struct io_mgr *pio_mgr = (struct io_mgr *)ref_data; struct chnl_mgr *chnl_mgr_obj; struct msg_mgr *msg_mgr_obj; struct deh_mgr *hdeh_mgr; u32 requested; u32 serviced; if (!pio_mgr) goto func_end; chnl_mgr_obj = pio_mgr->chnl_mgr; dev_get_msg_mgr(pio_mgr->dev_obj, &msg_mgr_obj); dev_get_deh_mgr(pio_mgr->dev_obj, &hdeh_mgr); if (!chnl_mgr_obj) goto func_end; requested = pio_mgr->dpc_req; serviced = pio_mgr->dpc_sched; if (serviced == requested) goto func_end; /* Process pending DPC's */ do { /* Check value of interrupt reg to ensure it's a valid error */ if ((pio_mgr->intr_val > DEH_BASE) && (pio_mgr->intr_val < DEH_LIMIT)) { /* Notify DSP/BIOS exception */ if (hdeh_mgr) { #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE print_dsp_debug_trace(pio_mgr); #endif bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, pio_mgr->intr_val); } } /* Proc-copy channel dispatch */ input_chnl(pio_mgr, NULL, IO_SERVICE); output_chnl(pio_mgr, NULL, IO_SERVICE); #ifdef CHNL_MESSAGES if (msg_mgr_obj) { /* Perform I/O dispatch on message queues */ input_msg(pio_mgr, msg_mgr_obj); output_msg(pio_mgr, msg_mgr_obj); } #endif #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) { /* Notify DSP Trace message */ print_dsp_debug_trace(pio_mgr); } #endif serviced++; } while (serviced != requested); pio_mgr->dpc_sched = requested; func_end: return; } /* * ======== io_mbox_msg ======== * Main interrupt handler for the shared memory IO manager. * Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then * schedules a DPC to dispatch I/O. */ int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg) { struct io_mgr *pio_mgr; struct dev_object *dev_obj; unsigned long flags; dev_obj = dev_get_first(); dev_get_io_mgr(dev_obj, &pio_mgr); if (!pio_mgr) return NOTIFY_BAD; pio_mgr->intr_val = (u16)((u32)msg); if (pio_mgr->intr_val & MBX_PM_CLASS) io_dispatch_pm(pio_mgr); if (pio_mgr->intr_val == MBX_DEH_RESET) { pio_mgr->intr_val = 0; } else { spin_lock_irqsave(&pio_mgr->dpc_lock, flags); pio_mgr->dpc_req++; spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags); tasklet_schedule(&pio_mgr->dpc_tasklet); } return NOTIFY_OK; } /* * ======== io_request_chnl ======== * Purpose: * Request channel I/O from the DSP. Sets flags in shared memory, then * interrupts the DSP. */ void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl, u8 io_mode, u16 *mbx_val) { struct chnl_mgr *chnl_mgr_obj; struct shm *sm; if (!pchnl || !mbx_val) goto func_end; chnl_mgr_obj = io_manager->chnl_mgr; sm = io_manager->shared_mem; if (io_mode == IO_INPUT) { /* Indicate to the DSP we have a buffer available for input */ set_chnl_busy(sm, pchnl->chnl_id); *mbx_val = MBX_PCPY_CLASS; } else if (io_mode == IO_OUTPUT) { /* * Record the fact that we have a buffer available for * output. */ chnl_mgr_obj->output_mask |= (1 << pchnl->chnl_id); } else { } func_end: return; } /* * ======== iosm_schedule ======== * Schedule DPC for IO. */ void iosm_schedule(struct io_mgr *io_manager) { unsigned long flags; if (!io_manager) return; /* Increment count of DPC's pending. */ spin_lock_irqsave(&io_manager->dpc_lock, flags); io_manager->dpc_req++; spin_unlock_irqrestore(&io_manager->dpc_lock, flags); /* Schedule DPC */ tasklet_schedule(&io_manager->dpc_tasklet); } /* * ======== find_ready_output ======== * Search for a host output channel which is ready to send. If this is * called as a result of servicing the DPC, then implement a round * robin search; otherwise, this was called by a client thread (via * IO_Dispatch()), so just start searching from the current channel id. */ static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj, struct chnl_object *pchnl, u32 mask) { u32 ret = OUTPUTNOTREADY; u32 id, start_id; u32 shift; id = (pchnl != NULL ? pchnl->chnl_id : (chnl_mgr_obj->last_output + 1)); id = ((id == CHNL_MAXCHANNELS) ? 0 : id); if (id >= CHNL_MAXCHANNELS) goto func_end; if (mask) { shift = (1 << id); start_id = id; do { if (mask & shift) { ret = id; if (pchnl == NULL) chnl_mgr_obj->last_output = id; break; } id = id + 1; id = ((id == CHNL_MAXCHANNELS) ? 0 : id); shift = (1 << id); } while (id != start_id); } func_end: return ret; } /* * ======== input_chnl ======== * Dispatch a buffer on an input channel. */ static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl, u8 io_mode) { struct chnl_mgr *chnl_mgr_obj; struct shm *sm; u32 chnl_id; u32 bytes; struct chnl_irp *chnl_packet_obj = NULL; u32 dw_arg; bool clear_chnl = false; bool notify_client = false; sm = pio_mgr->shared_mem; chnl_mgr_obj = pio_mgr->chnl_mgr; /* Attempt to perform input */ if (!sm->input_full) goto func_end; bytes = sm->input_size * chnl_mgr_obj->word_size; chnl_id = sm->input_id; dw_arg = sm->arg; if (chnl_id >= CHNL_MAXCHANNELS) { /* Shouldn't be here: would indicate corrupted shm. */ goto func_end; } pchnl = chnl_mgr_obj->channels[chnl_id]; if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) { if ((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY) { /* Get the I/O request, and attempt a transfer */ if (!list_empty(&pchnl->io_requests)) { if (!pchnl->cio_reqs) goto func_end; chnl_packet_obj = list_first_entry( &pchnl->io_requests, struct chnl_irp, link); list_del(&chnl_packet_obj->link); pchnl->cio_reqs--; /* * Ensure we don't overflow the client's * buffer. */ bytes = min(bytes, chnl_packet_obj->byte_size); memcpy(chnl_packet_obj->host_sys_buf, pio_mgr->input, bytes); pchnl->bytes_moved += bytes; chnl_packet_obj->byte_size = bytes; chnl_packet_obj->arg = dw_arg; chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE; if (bytes == 0) { /* * This assertion fails if the DSP * sends EOS more than once on this * channel. */ if (pchnl->state & CHNL_STATEEOS) goto func_end; /* * Zero bytes indicates EOS. Update * IOC status for this chirp, and also * the channel state. */ chnl_packet_obj->status |= CHNL_IOCSTATEOS; pchnl->state |= CHNL_STATEEOS; /* * Notify that end of stream has * occurred. */ ntfy_notify(pchnl->ntfy_obj, DSP_STREAMDONE); } /* Tell DSP if no more I/O buffers available */ if (list_empty(&pchnl->io_requests)) set_chnl_free(sm, pchnl->chnl_id); clear_chnl = true; notify_client = true; } else { /* * Input full for this channel, but we have no * buffers available. The channel must be * "idling". Clear out the physical input * channel. */ clear_chnl = true; } } else { /* Input channel cancelled: clear input channel */ clear_chnl = true; } } else { /* DPC fired after host closed channel: clear input channel */ clear_chnl = true; } if (clear_chnl) { /* Indicate to the DSP we have read the input */ sm->input_full = 0; sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); } if (notify_client) { /* Notify client with IO completion record */ notify_chnl_complete(pchnl, chnl_packet_obj); } func_end: return; } /* * ======== input_msg ======== * Copies messages from shared memory to the message queues. */ static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr) { u32 num_msgs; u32 i; u8 *msg_input; struct msg_queue *msg_queue_obj; struct msg_frame *pmsg; struct msg_dspmsg msg; struct msg_ctrl *msg_ctr_obj; u32 input_empty; u32 addr; msg_ctr_obj = pio_mgr->msg_input_ctrl; /* Get the number of input messages to be read */ input_empty = msg_ctr_obj->buf_empty; num_msgs = msg_ctr_obj->size; if (input_empty) return; msg_input = pio_mgr->msg_input; for (i = 0; i < num_msgs; i++) { /* Read the next message */ addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.cmd); msg.msg.cmd = read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr); addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg1); msg.msg.arg1 = read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr); addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg2); msg.msg.arg2 = read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr); addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id); msg.msgq_id = read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr); msg_input += sizeof(struct msg_dspmsg); /* Determine which queue to put the message in */ dev_dbg(bridge, "input msg: cmd=0x%x arg1=0x%x " "arg2=0x%x msgq_id=0x%x\n", msg.msg.cmd, msg.msg.arg1, msg.msg.arg2, msg.msgq_id); /* * Interrupt may occur before shared memory and message * input locations have been set up. If all nodes were * cleaned up, hmsg_mgr->max_msgs should be 0. */ list_for_each_entry(msg_queue_obj, &hmsg_mgr->queue_list, list_elem) { if (msg.msgq_id != msg_queue_obj->msgq_id) continue; /* Found it */ if (msg.msg.cmd == RMS_EXITACK) { /* * Call the node exit notification. * The exit message does not get * queued. */ (*hmsg_mgr->on_exit)(msg_queue_obj->arg, msg.msg.arg1); break; } /* * Not an exit acknowledgement, queue * the message. */ if (list_empty(&msg_queue_obj->msg_free_list)) { /* * No free frame to copy the * message into. */ pr_err("%s: no free msg frames," " discarding msg\n", __func__); break; } pmsg = list_first_entry(&msg_queue_obj->msg_free_list, struct msg_frame, list_elem); list_del(&pmsg->list_elem); pmsg->msg_data = msg; list_add_tail(&pmsg->list_elem, &msg_queue_obj->msg_used_list); ntfy_notify(msg_queue_obj->ntfy_obj, DSP_NODEMESSAGEREADY); sync_set_event(msg_queue_obj->sync_event); } } /* Set the post SWI flag */ if (num_msgs > 0) { /* Tell the DSP we've read the messages */ msg_ctr_obj->buf_empty = true; msg_ctr_obj->post_swi = true; sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); } } /* * ======== notify_chnl_complete ======== * Purpose: * Signal the channel event, notifying the client that I/O has completed. */ static void notify_chnl_complete(struct chnl_object *pchnl, struct chnl_irp *chnl_packet_obj) { bool signal_event; if (!pchnl || !pchnl->sync_event || !chnl_packet_obj) goto func_end; /* * Note: we signal the channel event only if the queue of IO * completions is empty. If it is not empty, the event is sure to be * signalled by the only IO completion list consumer: * bridge_chnl_get_ioc(). */ signal_event = list_empty(&pchnl->io_completions); /* Enqueue the IO completion info for the client */ list_add_tail(&chnl_packet_obj->link, &pchnl->io_completions); pchnl->cio_cs++; if (pchnl->cio_cs > pchnl->chnl_packets) goto func_end; /* Signal the channel event (if not already set) that IO is complete */ if (signal_event) sync_set_event(pchnl->sync_event); /* Notify that IO is complete */ ntfy_notify(pchnl->ntfy_obj, DSP_STREAMIOCOMPLETION); func_end: return; } /* * ======== output_chnl ======== * Purpose: * Dispatch a buffer on an output channel. */ static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl, u8 io_mode) { struct chnl_mgr *chnl_mgr_obj; struct shm *sm; u32 chnl_id; struct chnl_irp *chnl_packet_obj; u32 dw_dsp_f_mask; chnl_mgr_obj = pio_mgr->chnl_mgr; sm = pio_mgr->shared_mem; /* Attempt to perform output */ if (sm->output_full) goto func_end; if (pchnl && !((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY)) goto func_end; /* Look to see if both a PC and DSP output channel are ready */ dw_dsp_f_mask = sm->dsp_free_mask; chnl_id = find_ready_output(chnl_mgr_obj, pchnl, (chnl_mgr_obj->output_mask & dw_dsp_f_mask)); if (chnl_id == OUTPUTNOTREADY) goto func_end; pchnl = chnl_mgr_obj->channels[chnl_id]; if (!pchnl || list_empty(&pchnl->io_requests)) { /* Shouldn't get here */ goto func_end; } if (!pchnl->cio_reqs) goto func_end; /* Get the I/O request, and attempt a transfer */ chnl_packet_obj = list_first_entry(&pchnl->io_requests, struct chnl_irp, link); list_del(&chnl_packet_obj->link); pchnl->cio_reqs--; /* Record fact that no more I/O buffers available */ if (list_empty(&pchnl->io_requests)) chnl_mgr_obj->output_mask &= ~(1 << chnl_id); /* Transfer buffer to DSP side */ chnl_packet_obj->byte_size = min(pio_mgr->sm_buf_size, chnl_packet_obj->byte_size); memcpy(pio_mgr->output, chnl_packet_obj->host_sys_buf, chnl_packet_obj->byte_size); pchnl->bytes_moved += chnl_packet_obj->byte_size; /* Write all 32 bits of arg */ sm->arg = chnl_packet_obj->arg; #if _CHNL_WORDSIZE == 2 /* Access can be different SM access word size (e.g. 16/32 bit words) */ sm->output_id = (u16) chnl_id; sm->output_size = (u16) (chnl_packet_obj->byte_size + chnl_mgr_obj->word_size - 1) / (u16) chnl_mgr_obj->word_size; #else sm->output_id = chnl_id; sm->output_size = (chnl_packet_obj->byte_size + chnl_mgr_obj->word_size - 1) / chnl_mgr_obj->word_size; #endif sm->output_full = 1; /* Indicate to the DSP we have written the output */ sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); /* Notify client with IO completion record (keep EOS) */ chnl_packet_obj->status &= CHNL_IOCSTATEOS; notify_chnl_complete(pchnl, chnl_packet_obj); /* Notify if stream is done. */ if (chnl_packet_obj->status & CHNL_IOCSTATEOS) ntfy_notify(pchnl->ntfy_obj, DSP_STREAMDONE); func_end: return; } /* * ======== output_msg ======== * Copies messages from the message queues to the shared memory. */ static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr) { u32 num_msgs = 0; u32 i; struct msg_dspmsg *msg_output; struct msg_frame *pmsg; struct msg_ctrl *msg_ctr_obj; u32 val; u32 addr; msg_ctr_obj = pio_mgr->msg_output_ctrl; /* Check if output has been cleared */ if (!msg_ctr_obj->buf_empty) return; num_msgs = (hmsg_mgr->msgs_pending > hmsg_mgr->max_msgs) ? hmsg_mgr->max_msgs : hmsg_mgr->msgs_pending; msg_output = (struct msg_dspmsg *) pio_mgr->msg_output; /* Copy num_msgs messages into shared memory */ for (i = 0; i < num_msgs; i++) { if (list_empty(&hmsg_mgr->msg_used_list)) continue; pmsg = list_first_entry(&hmsg_mgr->msg_used_list, struct msg_frame, list_elem); list_del(&pmsg->list_elem); val = (pmsg->msg_data).msgq_id; addr = (u32) &msg_output->msgq_id; write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val); val = (pmsg->msg_data).msg.cmd; addr = (u32) &msg_output->msg.cmd; write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val); val = (pmsg->msg_data).msg.arg1; addr = (u32) &msg_output->msg.arg1; write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val); val = (pmsg->msg_data).msg.arg2; addr = (u32) &msg_output->msg.arg2; write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val); msg_output++; list_add_tail(&pmsg->list_elem, &hmsg_mgr->msg_free_list); sync_set_event(hmsg_mgr->sync_event); } if (num_msgs > 0) { hmsg_mgr->msgs_pending -= num_msgs; #if _CHNL_WORDSIZE == 2 /* * Access can be different SM access word size * (e.g. 16/32 bit words) */ msg_ctr_obj->size = (u16) num_msgs; #else msg_ctr_obj->size = num_msgs; #endif msg_ctr_obj->buf_empty = false; /* Set the post SWI flag */ msg_ctr_obj->post_swi = true; /* Tell the DSP we have written the output. */ sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); } } /* * ======== register_shm_segs ======== * purpose: * Registers GPP SM segment with CMM. */ static int register_shm_segs(struct io_mgr *hio_mgr, struct cod_manager *cod_man, u32 dw_gpp_base_pa) { int status = 0; u32 ul_shm0_base = 0; u32 shm0_end = 0; u32 ul_shm0_rsrvd_start = 0; u32 ul_rsrvd_size = 0; u32 ul_gpp_phys; u32 ul_dsp_virt; u32 ul_shm_seg_id0 = 0; u32 dw_offset, dw_gpp_base_va, ul_dsp_size; /* * Read address and size info for first SM region. * Get start of 1st SM Heap region. */ status = cod_get_sym_value(cod_man, SHM0_SHARED_BASE_SYM, &ul_shm0_base); if (ul_shm0_base == 0) { status = -EPERM; goto func_end; } /* Get end of 1st SM Heap region */ if (!status) { /* Get start and length of message part of shared memory */ status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM, &shm0_end); if (shm0_end == 0) { status = -EPERM; goto func_end; } } /* Start of Gpp reserved region */ if (!status) { /* Get start and length of message part of shared memory */ status = cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM, &ul_shm0_rsrvd_start); if (ul_shm0_rsrvd_start == 0) { status = -EPERM; goto func_end; } } /* Register with CMM */ if (!status) { status = dev_get_cmm_mgr(hio_mgr->dev_obj, &hio_mgr->cmm_mgr); if (!status) { status = cmm_un_register_gppsm_seg(hio_mgr->cmm_mgr, CMM_ALLSEGMENTS); } } /* Register new SM region(s) */ if (!status && (shm0_end - ul_shm0_base) > 0) { /* Calc size (bytes) of SM the GPP can alloc from */ ul_rsrvd_size = (shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size; if (ul_rsrvd_size <= 0) { status = -EPERM; goto func_end; } /* Calc size of SM DSP can alloc from */ ul_dsp_size = (ul_shm0_rsrvd_start - ul_shm0_base) * hio_mgr->word_size; if (ul_dsp_size <= 0) { status = -EPERM; goto func_end; } /* First TLB entry reserved for Bridge SM use. */ ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys; /* Get size in bytes */ ul_dsp_virt = hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt * hio_mgr->word_size; /* * Calc byte offset used to convert GPP phys <-> DSP byte * address. */ if (dw_gpp_base_pa > ul_dsp_virt) dw_offset = dw_gpp_base_pa - ul_dsp_virt; else dw_offset = ul_dsp_virt - dw_gpp_base_pa; if (ul_shm0_rsrvd_start * hio_mgr->word_size < ul_dsp_virt) { status = -EPERM; goto func_end; } /* * Calc Gpp phys base of SM region. * This is actually uncached kernel virtual address. */ dw_gpp_base_va = ul_gpp_phys + ul_shm0_rsrvd_start * hio_mgr->word_size - ul_dsp_virt; /* * Calc Gpp phys base of SM region. * This is the physical address. */ dw_gpp_base_pa = dw_gpp_base_pa + ul_shm0_rsrvd_start * hio_mgr->word_size - ul_dsp_virt; /* Register SM Segment 0. */ status = cmm_register_gppsm_seg(hio_mgr->cmm_mgr, dw_gpp_base_pa, ul_rsrvd_size, dw_offset, (dw_gpp_base_pa > ul_dsp_virt) ? CMM_ADDTODSPPA : CMM_SUBFROMDSPPA, (u32) (ul_shm0_base * hio_mgr->word_size), ul_dsp_size, &ul_shm_seg_id0, dw_gpp_base_va); /* First SM region is seg_id = 1 */ if (ul_shm_seg_id0 != 1) status = -EPERM; } func_end: return status; } /* ZCPY IO routines. */ /* * ======== IO_SHMcontrol ======== * Sets the requested shm setting. */ int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs) { #ifdef CONFIG_TIDSPBRIDGE_DVFS u32 i; struct dspbridge_platform_data *pdata = omap_dspbridge_dev->dev.platform_data; switch (desc) { case SHM_CURROPP: /* Update the shared memory with requested OPP information */ if (pargs != NULL) hio_mgr->shared_mem->opp_table_struct.curr_opp_pt = *(u32 *) pargs; else return -EPERM; break; case SHM_OPPINFO: /* * Update the shared memory with the voltage, frequency, * min and max frequency values for an OPP. */ for (i = 0; i <= dsp_max_opps; i++) { hio_mgr->shared_mem->opp_table_struct.opp_point[i]. voltage = vdd1_dsp_freq[i][0]; dev_dbg(bridge, "OPP-shm: voltage: %d\n", vdd1_dsp_freq[i][0]); hio_mgr->shared_mem->opp_table_struct. opp_point[i].frequency = vdd1_dsp_freq[i][1]; dev_dbg(bridge, "OPP-shm: frequency: %d\n", vdd1_dsp_freq[i][1]); hio_mgr->shared_mem->opp_table_struct.opp_point[i]. min_freq = vdd1_dsp_freq[i][2]; dev_dbg(bridge, "OPP-shm: min freq: %d\n", vdd1_dsp_freq[i][2]); hio_mgr->shared_mem->opp_table_struct.opp_point[i]. max_freq = vdd1_dsp_freq[i][3]; dev_dbg(bridge, "OPP-shm: max freq: %d\n", vdd1_dsp_freq[i][3]); } hio_mgr->shared_mem->opp_table_struct.num_opp_pts = dsp_max_opps; dev_dbg(bridge, "OPP-shm: max OPP number: %d\n", dsp_max_opps); /* Update the current OPP number */ if (pdata->dsp_get_opp) i = (*pdata->dsp_get_opp) (); hio_mgr->shared_mem->opp_table_struct.curr_opp_pt = i; dev_dbg(bridge, "OPP-shm: value programmed = %d\n", i); break; case SHM_GETOPP: /* Get the OPP that DSP has requested */ *(u32 *) pargs = hio_mgr->shared_mem->opp_request.rqst_opp_pt; break; default: break; } #endif return 0; } /* * ======== bridge_io_get_proc_load ======== * Gets the Processor's Load information */ int bridge_io_get_proc_load(struct io_mgr *hio_mgr, struct dsp_procloadstat *proc_lstat) { if (!hio_mgr->shared_mem) return -EFAULT; proc_lstat->curr_load = hio_mgr->shared_mem->load_mon_info.curr_dsp_load; proc_lstat->predicted_load = hio_mgr->shared_mem->load_mon_info.pred_dsp_load; proc_lstat->curr_dsp_freq = hio_mgr->shared_mem->load_mon_info.curr_dsp_freq; proc_lstat->predicted_freq = hio_mgr->shared_mem->load_mon_info.pred_dsp_freq; dev_dbg(bridge, "Curr Load = %d, Pred Load = %d, Curr Freq = %d, " "Pred Freq = %d\n", proc_lstat->curr_load, proc_lstat->predicted_load, proc_lstat->curr_dsp_freq, proc_lstat->predicted_freq); return 0; } #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) void print_dsp_debug_trace(struct io_mgr *hio_mgr) { u32 ul_new_message_length = 0, ul_gpp_cur_pointer; while (true) { /* Get the DSP current pointer */ ul_gpp_cur_pointer = *(u32 *) (hio_mgr->trace_buffer_current); ul_gpp_cur_pointer = hio_mgr->gpp_va + (ul_gpp_cur_pointer - hio_mgr->dsp_va); /* No new debug messages available yet */ if (ul_gpp_cur_pointer == hio_mgr->gpp_read_pointer) { break; } else if (ul_gpp_cur_pointer > hio_mgr->gpp_read_pointer) { /* Continuous data */ ul_new_message_length = ul_gpp_cur_pointer - hio_mgr->gpp_read_pointer; memcpy(hio_mgr->msg, (char *)hio_mgr->gpp_read_pointer, ul_new_message_length); hio_mgr->msg[ul_new_message_length] = '\0'; /* * Advance the GPP trace pointer to DSP current * pointer. */ hio_mgr->gpp_read_pointer += ul_new_message_length; /* Print the trace messages */ pr_info("DSPTrace: %s\n", hio_mgr->msg); } else if (ul_gpp_cur_pointer < hio_mgr->gpp_read_pointer) { /* Handle trace buffer wraparound */ memcpy(hio_mgr->msg, (char *)hio_mgr->gpp_read_pointer, hio_mgr->trace_buffer_end - hio_mgr->gpp_read_pointer); ul_new_message_length = ul_gpp_cur_pointer - hio_mgr->trace_buffer_begin; memcpy(&hio_mgr->msg[hio_mgr->trace_buffer_end - hio_mgr->gpp_read_pointer], (char *)hio_mgr->trace_buffer_begin, ul_new_message_length); hio_mgr->msg[hio_mgr->trace_buffer_end - hio_mgr->gpp_read_pointer + ul_new_message_length] = '\0'; /* * Advance the GPP trace pointer to DSP current * pointer. */ hio_mgr->gpp_read_pointer = hio_mgr->trace_buffer_begin + ul_new_message_length; /* Print the trace messages */ pr_info("DSPTrace: %s\n", hio_mgr->msg); } } } #endif #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE /* * ======== print_dsp_trace_buffer ======== * Prints the trace buffer returned from the DSP (if DBG_Trace is enabled). * Parameters: * hdeh_mgr: Handle to DEH manager object * number of extra carriage returns to generate. * Returns: * 0: Success. * -ENOMEM: Unable to allocate memory. * Requires: * hdeh_mgr muse be valid. Checked in bridge_deh_notify. */ int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context) { int status = 0; struct cod_manager *cod_mgr; u32 ul_trace_end; u32 ul_trace_begin; u32 trace_cur_pos; u32 ul_num_bytes = 0; u32 ul_num_words = 0; u32 ul_word_size = 2; char *psz_buf; char *str_beg; char *trace_end; char *buf_end; char *new_line; struct bridge_dev_context *pbridge_context = hbridge_context; struct bridge_drv_interface *intf_fxns; struct dev_object *dev_obj = (struct dev_object *) pbridge_context->dev_obj; status = dev_get_cod_mgr(dev_obj, &cod_mgr); if (cod_mgr) { /* Look for SYS_PUTCBEG/SYS_PUTCEND */ status = cod_get_sym_value(cod_mgr, COD_TRACEBEG, &ul_trace_begin); } else { status = -EFAULT; } if (!status) status = cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end); if (!status) /* trace_cur_pos will hold the address of a DSP pointer */ status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS, &trace_cur_pos); if (status) goto func_end; ul_num_bytes = (ul_trace_end - ul_trace_begin); ul_num_words = ul_num_bytes * ul_word_size; status = dev_get_intf_fxns(dev_obj, &intf_fxns); if (status) goto func_end; psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC); if (psz_buf != NULL) { /* Read trace buffer data */ status = (*intf_fxns->brd_read)(pbridge_context, (u8 *)psz_buf, (u32)ul_trace_begin, ul_num_bytes, 0); if (status) goto func_end; /* Pack and do newline conversion */ pr_debug("PrintDspTraceBuffer: " "before pack and unpack.\n"); pr_debug("%s: DSP Trace Buffer Begin:\n" "=======================\n%s\n", __func__, psz_buf); /* Read the value at the DSP address in trace_cur_pos. */ status = (*intf_fxns->brd_read)(pbridge_context, (u8 *)&trace_cur_pos, (u32)trace_cur_pos, 4, 0); if (status) goto func_end; /* Pack and do newline conversion */ pr_info("DSP Trace Buffer Begin:\n" "=======================\n%s\n", psz_buf); /* convert to offset */ trace_cur_pos = trace_cur_pos - ul_trace_begin; if (ul_num_bytes) { /* * The buffer is not full, find the end of the * data -- buf_end will be >= pszBuf after * while. */ buf_end = &psz_buf[ul_num_bytes+1]; /* DSP print position */ trace_end = &psz_buf[trace_cur_pos]; /* * Search buffer for a new_line and replace it * with '\0', then print as string. * Continue until end of buffer is reached. */ str_beg = trace_end; ul_num_bytes = buf_end - str_beg; while (str_beg < buf_end) { new_line = strnchr(str_beg, ul_num_bytes, '\n'); if (new_line && new_line < buf_end) { *new_line = 0; pr_debug("%s\n", str_beg); str_beg = ++new_line; ul_num_bytes = buf_end - str_beg; } else { /* * Assume buffer empty if it contains * a zero */ if (*str_beg != '\0') { str_beg[ul_num_bytes] = 0; pr_debug("%s\n", str_beg); } str_beg = buf_end; ul_num_bytes = 0; } } /* * Search buffer for a nNewLine and replace it * with '\0', then print as string. * Continue until buffer is exhausted. */ str_beg = psz_buf; ul_num_bytes = trace_end - str_beg; while (str_beg < trace_end) { new_line = strnchr(str_beg, ul_num_bytes, '\n'); if (new_line != NULL && new_line < trace_end) { *new_line = 0; pr_debug("%s\n", str_beg); str_beg = ++new_line; ul_num_bytes = trace_end - str_beg; } else { /* * Assume buffer empty if it contains * a zero */ if (*str_beg != '\0') { str_beg[ul_num_bytes] = 0; pr_debug("%s\n", str_beg); } str_beg = trace_end; ul_num_bytes = 0; } } } pr_info("\n=======================\n" "DSP Trace Buffer End:\n"); kfree(psz_buf); } else { status = -ENOMEM; } func_end: if (status) dev_dbg(bridge, "%s Failed, status 0x%x\n", __func__, status); return status; } /** * dump_dsp_stack() - This function dumps the data on the DSP stack. * @bridge_context: Bridge driver's device context pointer. * */ int dump_dsp_stack(struct bridge_dev_context *bridge_context) { int status = 0; struct cod_manager *code_mgr; struct node_mgr *node_mgr; u32 trace_begin; char name[256]; struct { u32 head[2]; u32 size; } mmu_fault_dbg_info; u32 *buffer; u32 *buffer_beg; u32 *buffer_end; u32 exc_type; u32 dyn_ext_base; u32 i; u32 offset_output; u32 total_size; u32 poll_cnt; const char *dsp_regs[] = {"EFR", "IERR", "ITSR", "NTSR", "IRP", "NRP", "AMR", "SSR", "ILC", "RILC", "IER", "CSR"}; const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"}; struct bridge_drv_interface *intf_fxns; struct dev_object *dev_object = bridge_context->dev_obj; status = dev_get_cod_mgr(dev_object, &code_mgr); if (!code_mgr) { pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__); status = -EFAULT; } if (!status) { status = dev_get_node_manager(dev_object, &node_mgr); if (!node_mgr) { pr_debug("%s: Failed on dev_get_node_manager.\n", __func__); status = -EFAULT; } } if (!status) { /* Look for SYS_PUTCBEG/SYS_PUTCEND: */ status = cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin); pr_debug("%s: trace_begin Value 0x%x\n", __func__, trace_begin); if (status) pr_debug("%s: Failed on cod_get_sym_value.\n", __func__); } if (!status) status = dev_get_intf_fxns(dev_object, &intf_fxns); /* * Check for the "magic number" in the trace buffer. If it has * yet to appear then poll the trace buffer to wait for it. Its * appearance signals that the DSP has finished dumping its state. */ mmu_fault_dbg_info.head[0] = 0; mmu_fault_dbg_info.head[1] = 0; if (!status) { poll_cnt = 0; while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 || mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) && poll_cnt < POLL_MAX) { /* Read DSP dump size from the DSP trace buffer... */ status = (*intf_fxns->brd_read)(bridge_context, (u8 *)&mmu_fault_dbg_info, (u32)trace_begin, sizeof(mmu_fault_dbg_info), 0); if (status) break; poll_cnt++; } if (mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 && mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) { status = -ETIME; pr_err("%s:No DSP MMU-Fault information available.\n", __func__); } } if (!status) { total_size = mmu_fault_dbg_info.size; /* Limit the size in case DSP went crazy */ if (total_size > MAX_MMU_DBGBUFF) total_size = MAX_MMU_DBGBUFF; buffer = kzalloc(total_size, GFP_ATOMIC); if (!buffer) { status = -ENOMEM; pr_debug("%s: Failed to " "allocate stack dump buffer.\n", __func__); goto func_end; } buffer_beg = buffer; buffer_end = buffer + total_size / 4; /* Read bytes from the DSP trace buffer... */ status = (*intf_fxns->brd_read)(bridge_context, (u8 *)buffer, (u32)trace_begin, total_size, 0); if (status) { pr_debug("%s: Failed to Read Trace Buffer.\n", __func__); goto func_end; } pr_err("\nAproximate Crash Position:\n" "--------------------------\n"); exc_type = buffer[3]; if (!exc_type) i = buffer[79]; /* IRP */ else i = buffer[80]; /* NRP */ status = cod_get_sym_value(code_mgr, DYNEXTBASE, &dyn_ext_base); if (status) { status = -EFAULT; goto func_end; } if ((i > dyn_ext_base) && (node_find_addr(node_mgr, i, 0x1000, &offset_output, name) == 0)) pr_err("0x%-8x [\"%s\" + 0x%x]\n", i, name, i - offset_output); else pr_err("0x%-8x [Unable to match to a symbol.]\n", i); buffer += 4; pr_err("\nExecution Info:\n" "---------------\n"); if (*buffer < ARRAY_SIZE(exec_ctxt)) { pr_err("Execution context \t%s\n", exec_ctxt[*buffer++]); } else { pr_err("Execution context corrupt\n"); kfree(buffer_beg); return -EFAULT; } pr_err("Task Handle\t\t0x%x\n", *buffer++); pr_err("Stack Pointer\t\t0x%x\n", *buffer++); pr_err("Stack Top\t\t0x%x\n", *buffer++); pr_err("Stack Bottom\t\t0x%x\n", *buffer++); pr_err("Stack Size\t\t0x%x\n", *buffer++); pr_err("Stack Size In Use\t0x%x\n", *buffer++); pr_err("\nCPU Registers\n" "---------------\n"); for (i = 0; i < 32; i++) { if (i == 4 || i == 6 || i == 8) pr_err("A%d 0x%-8x [Function Argument %d]\n", i, *buffer++, i-3); else if (i == 15) pr_err("A15 0x%-8x [Frame Pointer]\n", *buffer++); else pr_err("A%d 0x%x\n", i, *buffer++); } pr_err("\nB0 0x%x\n", *buffer++); pr_err("B1 0x%x\n", *buffer++); pr_err("B2 0x%x\n", *buffer++); if ((*buffer > dyn_ext_base) && (node_find_addr(node_mgr, *buffer, 0x1000, &offset_output, name) == 0)) pr_err("B3 0x%-8x [Function Return Pointer:" " \"%s\" + 0x%x]\n", *buffer, name, *buffer - offset_output); else pr_err("B3 0x%-8x [Function Return Pointer:" "Unable to match to a symbol.]\n", *buffer); buffer++; for (i = 4; i < 32; i++) { if (i == 4 || i == 6 || i == 8) pr_err("B%d 0x%-8x [Function Argument %d]\n", i, *buffer++, i-2); else if (i == 14) pr_err("B14 0x%-8x [Data Page Pointer]\n", *buffer++); else pr_err("B%d 0x%x\n", i, *buffer++); } pr_err("\n"); for (i = 0; i < ARRAY_SIZE(dsp_regs); i++) pr_err("%s 0x%x\n", dsp_regs[i], *buffer++); pr_err("\nStack:\n" "------\n"); for (i = 0; buffer < buffer_end; i++, buffer++) { if ((*buffer > dyn_ext_base) && ( node_find_addr(node_mgr, *buffer , 0x600, &offset_output, name) == 0)) pr_err("[%d] 0x%-8x [\"%s\" + 0x%x]\n", i, *buffer, name, *buffer - offset_output); else pr_err("[%d] 0x%x\n", i, *buffer); } kfree(buffer_beg); } func_end: return status; } /** * dump_dl_modules() - This functions dumps the _DLModules loaded in DSP side * @bridge_context: Bridge driver's device context pointer. * */ void dump_dl_modules(struct bridge_dev_context *bridge_context) { struct cod_manager *code_mgr; struct bridge_drv_interface *intf_fxns; struct bridge_dev_context *bridge_ctxt = bridge_context; struct dev_object *dev_object = bridge_ctxt->dev_obj; struct modules_header modules_hdr; struct dll_module *module_struct = NULL; u32 module_dsp_addr; u32 module_size; u32 module_struct_size = 0; u32 sect_ndx; char *sect_str ; int status = 0; status = dev_get_intf_fxns(dev_object, &intf_fxns); if (status) { pr_debug("%s: Failed on dev_get_intf_fxns.\n", __func__); goto func_end; } status = dev_get_cod_mgr(dev_object, &code_mgr); if (!code_mgr) { pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__); status = -EFAULT; goto func_end; } /* Lookup the address of the modules_header structure */ status = cod_get_sym_value(code_mgr, "_DLModules", &module_dsp_addr); if (status) { pr_debug("%s: Failed on cod_get_sym_value for _DLModules.\n", __func__); goto func_end; } pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr); /* Copy the modules_header structure from DSP memory. */ status = (*intf_fxns->brd_read)(bridge_context, (u8 *) &modules_hdr, (u32) module_dsp_addr, sizeof(modules_hdr), 0); if (status) { pr_debug("%s: Failed failed to read modules header.\n", __func__); goto func_end; } module_dsp_addr = modules_hdr.first_module; module_size = modules_hdr.first_module_size; pr_debug("%s: dll_module_header 0x%x %d\n", __func__, module_dsp_addr, module_size); pr_err("\nDynamically Loaded Modules:\n" "---------------------------\n"); /* For each dll_module structure in the list... */ while (module_size) { /* * Allocate/re-allocate memory to hold the dll_module * structure. The memory is re-allocated only if the existing * allocation is too small. */ if (module_size > module_struct_size) { kfree(module_struct); module_struct = kzalloc(module_size+128, GFP_ATOMIC); module_struct_size = module_size+128; pr_debug("%s: allocated module struct %p %d\n", __func__, module_struct, module_struct_size); if (!module_struct) goto func_end; } /* Copy the dll_module structure from DSP memory */ status = (*intf_fxns->brd_read)(bridge_context, (u8 *)module_struct, module_dsp_addr, module_size, 0); if (status) { pr_debug( "%s: Failed to read dll_module struct for 0x%x.\n", __func__, module_dsp_addr); break; } /* Update info regarding the _next_ module in the list. */ module_dsp_addr = module_struct->next_module; module_size = module_struct->next_module_size; pr_debug("%s: next module 0x%x %d, this module num sects %d\n", __func__, module_dsp_addr, module_size, module_struct->num_sects); /* * The section name strings start immediately following * the array of dll_sect structures. */ sect_str = (char *) &module_struct-> sects[module_struct->num_sects]; pr_err("%s\n", sect_str); /* * Advance to the first section name string. * Each string follows the one before. */ sect_str += strlen(sect_str) + 1; /* Access each dll_sect structure and its name string. */ for (sect_ndx = 0; sect_ndx < module_struct->num_sects; sect_ndx++) { pr_err(" Section: 0x%x ", module_struct->sects[sect_ndx].sect_load_adr); if (((u32) sect_str - (u32) module_struct) < module_struct_size) { pr_err("%s\n", sect_str); /* Each string follows the one before. */ sect_str += strlen(sect_str)+1; } else { pr_err("<string error>\n"); pr_debug("%s: section name sting address " "is invalid %p\n", __func__, sect_str); } } } func_end: kfree(module_struct); } #endif
gpl-2.0
RoGod/Kernel_SleeDry_HTC
arch/mips/ath79/common.c
3004
2301
/* * Atheros AR71XX/AR724X/AR913X common routines * * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/spinlock.h> #include <asm/mach-ath79/ath79.h> #include <asm/mach-ath79/ar71xx_regs.h> #include "common.h" static DEFINE_SPINLOCK(ath79_device_reset_lock); u32 ath79_cpu_freq; EXPORT_SYMBOL_GPL(ath79_cpu_freq); u32 ath79_ahb_freq; EXPORT_SYMBOL_GPL(ath79_ahb_freq); u32 ath79_ddr_freq; EXPORT_SYMBOL_GPL(ath79_ddr_freq); enum ath79_soc_type ath79_soc; void __iomem *ath79_pll_base; void __iomem *ath79_reset_base; EXPORT_SYMBOL_GPL(ath79_reset_base); void __iomem *ath79_ddr_base; void ath79_ddr_wb_flush(u32 reg) { void __iomem *flush_reg = ath79_ddr_base + reg; /* Flush the DDR write buffer. */ __raw_writel(0x1, flush_reg); while (__raw_readl(flush_reg) & 0x1) ; /* It must be run twice. */ __raw_writel(0x1, flush_reg); while (__raw_readl(flush_reg) & 0x1) ; } EXPORT_SYMBOL_GPL(ath79_ddr_wb_flush); void ath79_device_reset_set(u32 mask) { unsigned long flags; u32 reg; u32 t; if (soc_is_ar71xx()) reg = AR71XX_RESET_REG_RESET_MODULE; else if (soc_is_ar724x()) reg = AR724X_RESET_REG_RESET_MODULE; else if (soc_is_ar913x()) reg = AR913X_RESET_REG_RESET_MODULE; else BUG(); spin_lock_irqsave(&ath79_device_reset_lock, flags); t = ath79_reset_rr(reg); ath79_reset_wr(reg, t | mask); spin_unlock_irqrestore(&ath79_device_reset_lock, flags); } EXPORT_SYMBOL_GPL(ath79_device_reset_set); void ath79_device_reset_clear(u32 mask) { unsigned long flags; u32 reg; u32 t; if (soc_is_ar71xx()) reg = AR71XX_RESET_REG_RESET_MODULE; else if (soc_is_ar724x()) reg = AR724X_RESET_REG_RESET_MODULE; else if (soc_is_ar913x()) reg = AR913X_RESET_REG_RESET_MODULE; else BUG(); spin_lock_irqsave(&ath79_device_reset_lock, flags); t = ath79_reset_rr(reg); ath79_reset_wr(reg, t & ~mask); spin_unlock_irqrestore(&ath79_device_reset_lock, flags); } EXPORT_SYMBOL_GPL(ath79_device_reset_clear);
gpl-2.0
gchild320/flo
drivers/video/msm/mipi_renesas_video_fwvga_pt.c
3516
4509
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "msm_fb.h" #include "mipi_dsi.h" #include "mipi_renesas.h" static struct msm_panel_info pinfo; static struct mipi_dsi_phy_ctrl dsi_video_mode_phy_db = { #ifdef CONFIG_FB_MSM_MDP303 /* DSI Bit Clock at 500 MHz, 2 lane, RGB888 */ /* regulator */ {0x03, 0x01, 0x01, 0x00}, /* timing */ {0xb9, 0x8e, 0x1f, 0x00, 0x98, 0x9c, 0x22, 0x90, 0x18, 0x03, 0x04}, /* phy ctrl */ {0x7f, 0x00, 0x00, 0x00}, /* strength */ {0xbb, 0x02, 0x06, 0x00}, /* pll control */ {0x00, 0xec, 0x31, 0xd2, 0x00, 0x40, 0x37, 0x62, 0x01, 0x0f, 0x07, 0x05, 0x14, 0x03, 0x0, 0x0, 0x0, 0x20, 0x0, 0x02, 0x0}, #else /* DSI_BIT_CLK at 400MHz, 1 lane, RGB888 */ /* regulator */ {0x03, 0x01, 0x01, 0x00}, /* timing */ {0xaa, 0x3b, 0x1b, 0x00, 0x52, 0x58, 0x20, 0x3f, 0x2e, 0x03, 0x04}, /* phy ctrl */ {0x7f, 0x00, 0x00, 0x00}, /* strength */ {0xee, 0x00, 0x86, 0x00}, /* pll control */ {0x40, 0xc7, 0xb0, 0xda, 0x00, 0x50, 0x48, 0x63, #if defined(RENESAS_FWVGA_TWO_LANE) 0x30, 0x07, 0x03, #else /* default set to 1 lane */ 0x30, 0x07, 0x07, #endif 0x05, 0x14, 0x03, 0x0, 0x0, 0x54, 0x06, 0x10, 0x04, 0x0}, #endif }; static int __init mipi_video_renesas_fwvga_pt_init(void) { int ret; if (msm_fb_detect_client("mipi_video_renesas_fwvga")) return 0; pinfo.xres = 480; pinfo.yres = 864; pinfo.type = MIPI_VIDEO_PANEL; pinfo.pdest = DISPLAY_1; pinfo.wait_cycle = 0; pinfo.bpp = 24; #ifdef CONFIG_FB_MSM_MDP303 pinfo.lcdc.h_back_porch = 100; pinfo.lcdc.h_front_porch = 100; pinfo.lcdc.h_pulse_width = 8; pinfo.lcdc.v_back_porch = 20; pinfo.lcdc.v_front_porch = 20; pinfo.lcdc.v_pulse_width = 1; pinfo.clk_rate = 499000000; #else #if defined(RENESAS_FWVGA_TWO_LANE) pinfo.lcdc.h_back_porch = 400; #else pinfo.lcdc.h_back_porch = 50; #endif pinfo.lcdc.h_front_porch = 50; #if defined(RENESAS_FWVGA_TWO_LANE) pinfo.lcdc.h_pulse_width = 5; #else pinfo.lcdc.h_pulse_width = 20; #endif #if defined(RENESAS_FWVGA_TWO_LANE) pinfo.lcdc.v_back_porch = 75; pinfo.lcdc.v_front_porch = 5; pinfo.lcdc.v_pulse_width = 1; #else pinfo.lcdc.v_back_porch = 10; pinfo.lcdc.v_front_porch = 10; pinfo.lcdc.v_pulse_width = 5; #endif #endif pinfo.lcdc.border_clr = 0; /* blk */ pinfo.lcdc.underflow_clr = 0xff; /* blue */ pinfo.lcdc.hsync_skew = 0; pinfo.bl_max = 255; pinfo.bl_min = 1; pinfo.fb_num = 2; pinfo.mipi.mode = DSI_VIDEO_MODE; pinfo.mipi.pulse_mode_hsa_he = TRUE; pinfo.mipi.hfp_power_stop = TRUE; pinfo.mipi.hbp_power_stop = TRUE; pinfo.mipi.hsa_power_stop = TRUE; pinfo.mipi.eof_bllp_power_stop = TRUE; pinfo.mipi.bllp_power_stop = TRUE; #ifdef CONFIG_FB_MSM_MDP303 pinfo.mipi.traffic_mode = DSI_BURST_MODE; pinfo.mipi.dst_format = DSI_VIDEO_DST_FORMAT_RGB888; pinfo.mipi.vc = 0; pinfo.mipi.rgb_swap = DSI_RGB_SWAP_RGB; pinfo.mipi.data_lane0 = TRUE; pinfo.mipi.data_lane1 = TRUE; pinfo.mipi.t_clk_post = 0x20; pinfo.mipi.t_clk_pre = 0x2F; pinfo.mipi.stream = 0; /* dma_p */ pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_NONE; pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW; pinfo.mipi.frame_rate = 60; pinfo.mipi.dsi_phy_db = &dsi_video_mode_phy_db; pinfo.mipi.dlane_swap = 0x01; pinfo.mipi.tx_eot_append = 0x01; #else pinfo.mipi.traffic_mode = DSI_NON_BURST_SYNCH_PULSE; pinfo.mipi.dst_format = DSI_VIDEO_DST_FORMAT_RGB888; pinfo.mipi.vc = 0; pinfo.mipi.rgb_swap = DSI_RGB_SWAP_BGR; pinfo.mipi.data_lane0 = TRUE; #if defined(RENESAS_FWVGA_TWO_LANE) pinfo.mipi.data_lane1 = TRUE; #else pinfo.mipi.data_lane1 = FALSE; #endif pinfo.mipi.t_clk_post = 0x03; pinfo.mipi.t_clk_pre = 0x24; pinfo.mipi.stream = 0; /* dma_p */ pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW; pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW; pinfo.mipi.frame_rate = 60; pinfo.mipi.dsi_phy_db = &dsi_video_mode_phy_db; #endif ret = mipi_renesas_device_register(&pinfo, MIPI_DSI_PRIM, MIPI_DSI_PANEL_FWVGA_PT); if (ret) pr_err("%s: failed to register device!\n", __func__); return ret; } module_init(mipi_video_renesas_fwvga_pt_init);
gpl-2.0
troth/linux-kernel
drivers/media/dvb-frontends/lnbp22.c
3772
3903
/* * lnbp22.h - driver for lnb supply and control ic lnbp22 * * Copyright (C) 2006 Dominik Kuhlen * Based on lnbp21 driver * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * * the project's page is at http://www.linuxtv.org */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/string.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "lnbp22.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); #define dprintk(lvl, arg...) if (debug >= (lvl)) printk(arg) struct lnbp22 { u8 config[4]; struct i2c_adapter *i2c; }; static int lnbp22_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct lnbp22 *lnbp22 = (struct lnbp22 *)fe->sec_priv; struct i2c_msg msg = { .addr = 0x08, .flags = 0, .buf = (char *)&lnbp22->config, .len = sizeof(lnbp22->config), }; dprintk(1, "%s: %d (18V=%d 13V=%d)\n", __func__, voltage, SEC_VOLTAGE_18, SEC_VOLTAGE_13); lnbp22->config[3] = 0x60; /* Power down */ switch (voltage) { case SEC_VOLTAGE_OFF: break; case SEC_VOLTAGE_13: lnbp22->config[3] |= LNBP22_EN; break; case SEC_VOLTAGE_18: lnbp22->config[3] |= (LNBP22_EN | LNBP22_VSEL); break; default: return -EINVAL; } dprintk(1, "%s: 0x%02x)\n", __func__, lnbp22->config[3]); return (i2c_transfer(lnbp22->i2c, &msg, 1) == 1) ? 0 : -EIO; } static int lnbp22_enable_high_lnb_voltage(struct dvb_frontend *fe, long arg) { struct lnbp22 *lnbp22 = (struct lnbp22 *) fe->sec_priv; struct i2c_msg msg = { .addr = 0x08, .flags = 0, .buf = (char *)&lnbp22->config, .len = sizeof(lnbp22->config), }; dprintk(1, "%s: %d\n", __func__, (int)arg); if (arg) lnbp22->config[3] |= LNBP22_LLC; else lnbp22->config[3] &= ~LNBP22_LLC; return (i2c_transfer(lnbp22->i2c, &msg, 1) == 1) ? 0 : -EIO; } static void lnbp22_release(struct dvb_frontend *fe) { dprintk(1, "%s\n", __func__); /* LNBP power off */ lnbp22_set_voltage(fe, SEC_VOLTAGE_OFF); /* free data */ kfree(fe->sec_priv); fe->sec_priv = NULL; } struct dvb_frontend *lnbp22_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c) { struct lnbp22 *lnbp22 = kmalloc(sizeof(struct lnbp22), GFP_KERNEL); if (!lnbp22) return NULL; /* default configuration */ lnbp22->config[0] = 0x00; /* ? */ lnbp22->config[1] = 0x28; /* ? */ lnbp22->config[2] = 0x48; /* ? */ lnbp22->config[3] = 0x60; /* Power down */ lnbp22->i2c = i2c; fe->sec_priv = lnbp22; /* detect if it is present or not */ if (lnbp22_set_voltage(fe, SEC_VOLTAGE_OFF)) { dprintk(0, "%s LNBP22 not found\n", __func__); kfree(lnbp22); fe->sec_priv = NULL; return NULL; } /* install release callback */ fe->ops.release_sec = lnbp22_release; /* override frontend ops */ fe->ops.set_voltage = lnbp22_set_voltage; fe->ops.enable_high_lnb_voltage = lnbp22_enable_high_lnb_voltage; return fe; } EXPORT_SYMBOL(lnbp22_attach); MODULE_DESCRIPTION("Driver for lnb supply and control ic lnbp22"); MODULE_AUTHOR("Dominik Kuhlen"); MODULE_LICENSE("GPL");
gpl-2.0
ktd2004/linux-stable
drivers/media/dvb-frontends/dvb_dummy_fe.c
3772
6942
/* * Driver for Dummy Frontend * * Written by Emard <emard@softhome.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.= */ #include <linux/module.h> #include <linux/init.h> #include <linux/string.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "dvb_dummy_fe.h" struct dvb_dummy_fe_state { struct dvb_frontend frontend; }; static int dvb_dummy_fe_read_status(struct dvb_frontend* fe, fe_status_t* status) { *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; return 0; } static int dvb_dummy_fe_read_ber(struct dvb_frontend* fe, u32* ber) { *ber = 0; return 0; } static int dvb_dummy_fe_read_signal_strength(struct dvb_frontend* fe, u16* strength) { *strength = 0; return 0; } static int dvb_dummy_fe_read_snr(struct dvb_frontend* fe, u16* snr) { *snr = 0; return 0; } static int dvb_dummy_fe_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks) { *ucblocks = 0; return 0; } /* * Only needed if it actually reads something from the hardware */ static int dvb_dummy_fe_get_frontend(struct dvb_frontend *fe) { return 0; } static int dvb_dummy_fe_set_frontend(struct dvb_frontend *fe) { if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } return 0; } static int dvb_dummy_fe_sleep(struct dvb_frontend* fe) { return 0; } static int dvb_dummy_fe_init(struct dvb_frontend* fe) { return 0; } static int dvb_dummy_fe_set_tone(struct dvb_frontend* fe, fe_sec_tone_mode_t tone) { return 0; } static int dvb_dummy_fe_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage) { return 0; } static void dvb_dummy_fe_release(struct dvb_frontend* fe) { struct dvb_dummy_fe_state* state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops; struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void) { struct dvb_dummy_fe_state* state = NULL; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); if (!state) return NULL; /* create dvb_frontend */ memcpy(&state->frontend.ops, &dvb_dummy_fe_ofdm_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; } static struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops; struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void) { struct dvb_dummy_fe_state* state = NULL; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); if (!state) return NULL; /* create dvb_frontend */ memcpy(&state->frontend.ops, &dvb_dummy_fe_qpsk_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; } static struct dvb_frontend_ops dvb_dummy_fe_qam_ops; struct dvb_frontend *dvb_dummy_fe_qam_attach(void) { struct dvb_dummy_fe_state* state = NULL; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL); if (!state) return NULL; /* create dvb_frontend */ memcpy(&state->frontend.ops, &dvb_dummy_fe_qam_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; } static struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = { .delsys = { SYS_DVBT }, .info = { .name = "Dummy DVB-T", .frequency_min = 0, .frequency_max = 863250000, .frequency_stepsize = 62500, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 | FE_CAN_FEC_7_8 | FE_CAN_FEC_8_9 | FE_CAN_FEC_AUTO | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO, }, .release = dvb_dummy_fe_release, .init = dvb_dummy_fe_init, .sleep = dvb_dummy_fe_sleep, .set_frontend = dvb_dummy_fe_set_frontend, .get_frontend = dvb_dummy_fe_get_frontend, .read_status = dvb_dummy_fe_read_status, .read_ber = dvb_dummy_fe_read_ber, .read_signal_strength = dvb_dummy_fe_read_signal_strength, .read_snr = dvb_dummy_fe_read_snr, .read_ucblocks = dvb_dummy_fe_read_ucblocks, }; static struct dvb_frontend_ops dvb_dummy_fe_qam_ops = { .delsys = { SYS_DVBC_ANNEX_A }, .info = { .name = "Dummy DVB-C", .frequency_stepsize = 62500, .frequency_min = 51000000, .frequency_max = 858000000, .symbol_rate_min = (57840000/2)/64, /* SACLK/64 == (XIN/2)/64 */ .symbol_rate_max = (57840000/2)/4, /* SACLK/4 */ .caps = FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 | FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_FEC_AUTO | FE_CAN_INVERSION_AUTO }, .release = dvb_dummy_fe_release, .init = dvb_dummy_fe_init, .sleep = dvb_dummy_fe_sleep, .set_frontend = dvb_dummy_fe_set_frontend, .get_frontend = dvb_dummy_fe_get_frontend, .read_status = dvb_dummy_fe_read_status, .read_ber = dvb_dummy_fe_read_ber, .read_signal_strength = dvb_dummy_fe_read_signal_strength, .read_snr = dvb_dummy_fe_read_snr, .read_ucblocks = dvb_dummy_fe_read_ucblocks, }; static struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops = { .delsys = { SYS_DVBS }, .info = { .name = "Dummy DVB-S", .frequency_min = 950000, .frequency_max = 2150000, .frequency_stepsize = 250, /* kHz for QPSK frontends */ .frequency_tolerance = 29500, .symbol_rate_min = 1000000, .symbol_rate_max = 45000000, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK }, .release = dvb_dummy_fe_release, .init = dvb_dummy_fe_init, .sleep = dvb_dummy_fe_sleep, .set_frontend = dvb_dummy_fe_set_frontend, .get_frontend = dvb_dummy_fe_get_frontend, .read_status = dvb_dummy_fe_read_status, .read_ber = dvb_dummy_fe_read_ber, .read_signal_strength = dvb_dummy_fe_read_signal_strength, .read_snr = dvb_dummy_fe_read_snr, .read_ucblocks = dvb_dummy_fe_read_ucblocks, .set_voltage = dvb_dummy_fe_set_voltage, .set_tone = dvb_dummy_fe_set_tone, }; MODULE_DESCRIPTION("DVB DUMMY Frontend"); MODULE_AUTHOR("Emard"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(dvb_dummy_fe_ofdm_attach); EXPORT_SYMBOL(dvb_dummy_fe_qam_attach); EXPORT_SYMBOL(dvb_dummy_fe_qpsk_attach);
gpl-2.0
djvoleur/V_925P_BOF7
sound/core/oss/pcm_oss.c
4028
87234
/* * Digital Audio (PCM) abstract layer / OSS compatible * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #if 0 #define PLUGIN_DEBUG #endif #if 0 #define OSS_DEBUG #endif #include <linux/init.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/math64.h> #include <linux/string.h> #include <sound/core.h> #include <sound/minors.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "pcm_plugin.h" #include <sound/info.h> #include <linux/soundcard.h> #include <sound/initval.h> #include <sound/mixer_oss.h> #define OSS_ALSAEMULVER _SIOR ('M', 249, int) static int dsp_map[SNDRV_CARDS]; static int adsp_map[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = 1}; static bool nonblock_open = 1; MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Abramo Bagnara <abramo@alsa-project.org>"); MODULE_DESCRIPTION("PCM OSS emulation for ALSA."); MODULE_LICENSE("GPL"); module_param_array(dsp_map, int, NULL, 0444); MODULE_PARM_DESC(dsp_map, "PCM device number assigned to 1st OSS device."); module_param_array(adsp_map, int, NULL, 0444); MODULE_PARM_DESC(adsp_map, "PCM device number assigned to 2nd OSS device."); module_param(nonblock_open, bool, 0644); MODULE_PARM_DESC(nonblock_open, "Don't block opening busy PCM devices."); MODULE_ALIAS_SNDRV_MINOR(SNDRV_MINOR_OSS_PCM); MODULE_ALIAS_SNDRV_MINOR(SNDRV_MINOR_OSS_PCM1); static int snd_pcm_oss_get_rate(struct snd_pcm_oss_file *pcm_oss_file); static int snd_pcm_oss_get_channels(struct snd_pcm_oss_file *pcm_oss_file); static int snd_pcm_oss_get_format(struct snd_pcm_oss_file *pcm_oss_file); static inline mm_segment_t snd_enter_user(void) { mm_segment_t fs = get_fs(); set_fs(get_ds()); return fs; } static inline void snd_leave_user(mm_segment_t fs) { set_fs(fs); } /* * helper functions to process hw_params */ static int snd_interval_refine_min(struct snd_interval *i, unsigned int min, int openmin) { int changed = 0; if (i->min < min) { i->min = min; i->openmin = openmin; changed = 1; } else if (i->min == min && !i->openmin && openmin) { i->openmin = 1; changed = 1; } if (i->integer) { if (i->openmin) { i->min++; i->openmin = 0; } } if (snd_interval_checkempty(i)) { snd_interval_none(i); return -EINVAL; } return changed; } static int snd_interval_refine_max(struct snd_interval *i, unsigned int max, int openmax) { int changed = 0; if (i->max > max) { i->max = max; i->openmax = openmax; changed = 1; } else if (i->max == max && !i->openmax && openmax) { i->openmax = 1; changed = 1; } if (i->integer) { if (i->openmax) { i->max--; i->openmax = 0; } } if (snd_interval_checkempty(i)) { snd_interval_none(i); return -EINVAL; } return changed; } static int snd_interval_refine_set(struct snd_interval *i, unsigned int val) { struct snd_interval t; t.empty = 0; t.min = t.max = val; t.openmin = t.openmax = 0; t.integer = 1; return snd_interval_refine(i, &t); } /** * snd_pcm_hw_param_value_min * @params: the hw_params instance * @var: parameter to retrieve * @dir: pointer to the direction (-1,0,1) or NULL * * Return the minimum value for field PAR. */ static unsigned int snd_pcm_hw_param_value_min(const struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir) { if (hw_is_mask(var)) { if (dir) *dir = 0; return snd_mask_min(hw_param_mask_c(params, var)); } if (hw_is_interval(var)) { const struct snd_interval *i = hw_param_interval_c(params, var); if (dir) *dir = i->openmin; return snd_interval_min(i); } return -EINVAL; } /** * snd_pcm_hw_param_value_max * @params: the hw_params instance * @var: parameter to retrieve * @dir: pointer to the direction (-1,0,1) or NULL * * Return the maximum value for field PAR. */ static unsigned int snd_pcm_hw_param_value_max(const struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir) { if (hw_is_mask(var)) { if (dir) *dir = 0; return snd_mask_max(hw_param_mask_c(params, var)); } if (hw_is_interval(var)) { const struct snd_interval *i = hw_param_interval_c(params, var); if (dir) *dir = - (int) i->openmax; return snd_interval_max(i); } return -EINVAL; } static int _snd_pcm_hw_param_mask(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, const struct snd_mask *val) { int changed; changed = snd_mask_refine(hw_param_mask(params, var), val); if (changed) { params->cmask |= 1 << var; params->rmask |= 1 << var; } return changed; } static int snd_pcm_hw_param_mask(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, const struct snd_mask *val) { int changed = _snd_pcm_hw_param_mask(params, var, val); if (changed < 0) return changed; if (params->rmask) { int err = snd_pcm_hw_refine(pcm, params); if (err < 0) return err; } return 0; } static int _snd_pcm_hw_param_min(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, unsigned int val, int dir) { int changed; int open = 0; if (dir) { if (dir > 0) { open = 1; } else if (dir < 0) { if (val > 0) { open = 1; val--; } } } if (hw_is_mask(var)) changed = snd_mask_refine_min(hw_param_mask(params, var), val + !!open); else if (hw_is_interval(var)) changed = snd_interval_refine_min(hw_param_interval(params, var), val, open); else return -EINVAL; if (changed) { params->cmask |= 1 << var; params->rmask |= 1 << var; } return changed; } /** * snd_pcm_hw_param_min * @pcm: PCM instance * @params: the hw_params instance * @var: parameter to retrieve * @val: minimal value * @dir: pointer to the direction (-1,0,1) or NULL * * Inside configuration space defined by PARAMS remove from PAR all * values < VAL. Reduce configuration space accordingly. * Return new minimum or -EINVAL if the configuration space is empty */ static int snd_pcm_hw_param_min(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, unsigned int val, int *dir) { int changed = _snd_pcm_hw_param_min(params, var, val, dir ? *dir : 0); if (changed < 0) return changed; if (params->rmask) { int err = snd_pcm_hw_refine(pcm, params); if (err < 0) return err; } return snd_pcm_hw_param_value_min(params, var, dir); } static int _snd_pcm_hw_param_max(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, unsigned int val, int dir) { int changed; int open = 0; if (dir) { if (dir < 0) { open = 1; } else if (dir > 0) { open = 1; val++; } } if (hw_is_mask(var)) { if (val == 0 && open) { snd_mask_none(hw_param_mask(params, var)); changed = -EINVAL; } else changed = snd_mask_refine_max(hw_param_mask(params, var), val - !!open); } else if (hw_is_interval(var)) changed = snd_interval_refine_max(hw_param_interval(params, var), val, open); else return -EINVAL; if (changed) { params->cmask |= 1 << var; params->rmask |= 1 << var; } return changed; } /** * snd_pcm_hw_param_max * @pcm: PCM instance * @params: the hw_params instance * @var: parameter to retrieve * @val: maximal value * @dir: pointer to the direction (-1,0,1) or NULL * * Inside configuration space defined by PARAMS remove from PAR all * values >= VAL + 1. Reduce configuration space accordingly. * Return new maximum or -EINVAL if the configuration space is empty */ static int snd_pcm_hw_param_max(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, unsigned int val, int *dir) { int changed = _snd_pcm_hw_param_max(params, var, val, dir ? *dir : 0); if (changed < 0) return changed; if (params->rmask) { int err = snd_pcm_hw_refine(pcm, params); if (err < 0) return err; } return snd_pcm_hw_param_value_max(params, var, dir); } static int boundary_sub(int a, int adir, int b, int bdir, int *c, int *cdir) { adir = adir < 0 ? -1 : (adir > 0 ? 1 : 0); bdir = bdir < 0 ? -1 : (bdir > 0 ? 1 : 0); *c = a - b; *cdir = adir - bdir; if (*cdir == -2) { (*c)--; } else if (*cdir == 2) { (*c)++; } return 0; } static int boundary_lt(unsigned int a, int adir, unsigned int b, int bdir) { if (adir < 0) { a--; adir = 1; } else if (adir > 0) adir = 1; if (bdir < 0) { b--; bdir = 1; } else if (bdir > 0) bdir = 1; return a < b || (a == b && adir < bdir); } /* Return 1 if min is nearer to best than max */ static int boundary_nearer(int min, int mindir, int best, int bestdir, int max, int maxdir) { int dmin, dmindir; int dmax, dmaxdir; boundary_sub(best, bestdir, min, mindir, &dmin, &dmindir); boundary_sub(max, maxdir, best, bestdir, &dmax, &dmaxdir); return boundary_lt(dmin, dmindir, dmax, dmaxdir); } /** * snd_pcm_hw_param_near * @pcm: PCM instance * @params: the hw_params instance * @var: parameter to retrieve * @best: value to set * @dir: pointer to the direction (-1,0,1) or NULL * * Inside configuration space defined by PARAMS set PAR to the available value * nearest to VAL. Reduce configuration space accordingly. * This function cannot be called for SNDRV_PCM_HW_PARAM_ACCESS, * SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_SUBFORMAT. * Return the value found. */ static int snd_pcm_hw_param_near(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, unsigned int best, int *dir) { struct snd_pcm_hw_params *save = NULL; int v; unsigned int saved_min; int last = 0; int min, max; int mindir, maxdir; int valdir = dir ? *dir : 0; /* FIXME */ if (best > INT_MAX) best = INT_MAX; min = max = best; mindir = maxdir = valdir; if (maxdir > 0) maxdir = 0; else if (maxdir == 0) maxdir = -1; else { maxdir = 1; max--; } save = kmalloc(sizeof(*save), GFP_KERNEL); if (save == NULL) return -ENOMEM; *save = *params; saved_min = min; min = snd_pcm_hw_param_min(pcm, params, var, min, &mindir); if (min >= 0) { struct snd_pcm_hw_params *params1; if (max < 0) goto _end; if ((unsigned int)min == saved_min && mindir == valdir) goto _end; params1 = kmalloc(sizeof(*params1), GFP_KERNEL); if (params1 == NULL) { kfree(save); return -ENOMEM; } *params1 = *save; max = snd_pcm_hw_param_max(pcm, params1, var, max, &maxdir); if (max < 0) { kfree(params1); goto _end; } if (boundary_nearer(max, maxdir, best, valdir, min, mindir)) { *params = *params1; last = 1; } kfree(params1); } else { *params = *save; max = snd_pcm_hw_param_max(pcm, params, var, max, &maxdir); if (max < 0) { kfree(save); return max; } last = 1; } _end: kfree(save); if (last) v = snd_pcm_hw_param_last(pcm, params, var, dir); else v = snd_pcm_hw_param_first(pcm, params, var, dir); snd_BUG_ON(v < 0); return v; } static int _snd_pcm_hw_param_set(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, unsigned int val, int dir) { int changed; if (hw_is_mask(var)) { struct snd_mask *m = hw_param_mask(params, var); if (val == 0 && dir < 0) { changed = -EINVAL; snd_mask_none(m); } else { if (dir > 0) val++; else if (dir < 0) val--; changed = snd_mask_refine_set(hw_param_mask(params, var), val); } } else if (hw_is_interval(var)) { struct snd_interval *i = hw_param_interval(params, var); if (val == 0 && dir < 0) { changed = -EINVAL; snd_interval_none(i); } else if (dir == 0) changed = snd_interval_refine_set(i, val); else { struct snd_interval t; t.openmin = 1; t.openmax = 1; t.empty = 0; t.integer = 0; if (dir < 0) { t.min = val - 1; t.max = val; } else { t.min = val; t.max = val+1; } changed = snd_interval_refine(i, &t); } } else return -EINVAL; if (changed) { params->cmask |= 1 << var; params->rmask |= 1 << var; } return changed; } /** * snd_pcm_hw_param_set * @pcm: PCM instance * @params: the hw_params instance * @var: parameter to retrieve * @val: value to set * @dir: pointer to the direction (-1,0,1) or NULL * * Inside configuration space defined by PARAMS remove from PAR all * values != VAL. Reduce configuration space accordingly. * Return VAL or -EINVAL if the configuration space is empty */ static int snd_pcm_hw_param_set(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, unsigned int val, int dir) { int changed = _snd_pcm_hw_param_set(params, var, val, dir); if (changed < 0) return changed; if (params->rmask) { int err = snd_pcm_hw_refine(pcm, params); if (err < 0) return err; } return snd_pcm_hw_param_value(params, var, NULL); } static int _snd_pcm_hw_param_setinteger(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { int changed; changed = snd_interval_setinteger(hw_param_interval(params, var)); if (changed) { params->cmask |= 1 << var; params->rmask |= 1 << var; } return changed; } /* * plugin */ #ifdef CONFIG_SND_PCM_OSS_PLUGINS static int snd_pcm_oss_plugin_clear(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_plugin *plugin, *next; plugin = runtime->oss.plugin_first; while (plugin) { next = plugin->next; snd_pcm_plugin_free(plugin); plugin = next; } runtime->oss.plugin_first = runtime->oss.plugin_last = NULL; return 0; } static int snd_pcm_plugin_insert(struct snd_pcm_plugin *plugin) { struct snd_pcm_runtime *runtime = plugin->plug->runtime; plugin->next = runtime->oss.plugin_first; plugin->prev = NULL; if (runtime->oss.plugin_first) { runtime->oss.plugin_first->prev = plugin; runtime->oss.plugin_first = plugin; } else { runtime->oss.plugin_last = runtime->oss.plugin_first = plugin; } return 0; } int snd_pcm_plugin_append(struct snd_pcm_plugin *plugin) { struct snd_pcm_runtime *runtime = plugin->plug->runtime; plugin->next = NULL; plugin->prev = runtime->oss.plugin_last; if (runtime->oss.plugin_last) { runtime->oss.plugin_last->next = plugin; runtime->oss.plugin_last = plugin; } else { runtime->oss.plugin_last = runtime->oss.plugin_first = plugin; } return 0; } #endif /* CONFIG_SND_PCM_OSS_PLUGINS */ static long snd_pcm_oss_bytes(struct snd_pcm_substream *substream, long frames) { struct snd_pcm_runtime *runtime = substream->runtime; long buffer_size = snd_pcm_lib_buffer_bytes(substream); long bytes = frames_to_bytes(runtime, frames); if (buffer_size == runtime->oss.buffer_bytes) return bytes; #if BITS_PER_LONG >= 64 return runtime->oss.buffer_bytes * bytes / buffer_size; #else { u64 bsize = (u64)runtime->oss.buffer_bytes * (u64)bytes; return div_u64(bsize, buffer_size); } #endif } static long snd_pcm_alsa_frames(struct snd_pcm_substream *substream, long bytes) { struct snd_pcm_runtime *runtime = substream->runtime; long buffer_size = snd_pcm_lib_buffer_bytes(substream); if (buffer_size == runtime->oss.buffer_bytes) return bytes_to_frames(runtime, bytes); return bytes_to_frames(runtime, (buffer_size * bytes) / runtime->oss.buffer_bytes); } static inline snd_pcm_uframes_t get_hw_ptr_period(struct snd_pcm_runtime *runtime) { return runtime->hw_ptr_interrupt; } /* define extended formats in the recent OSS versions (if any) */ /* linear formats */ #define AFMT_S32_LE 0x00001000 #define AFMT_S32_BE 0x00002000 #define AFMT_S24_LE 0x00008000 #define AFMT_S24_BE 0x00010000 #define AFMT_S24_PACKED 0x00040000 /* other supported formats */ #define AFMT_FLOAT 0x00004000 #define AFMT_SPDIF_RAW 0x00020000 /* unsupported formats */ #define AFMT_AC3 0x00000400 #define AFMT_VORBIS 0x00000800 static snd_pcm_format_t snd_pcm_oss_format_from(int format) { switch (format) { case AFMT_MU_LAW: return SNDRV_PCM_FORMAT_MU_LAW; case AFMT_A_LAW: return SNDRV_PCM_FORMAT_A_LAW; case AFMT_IMA_ADPCM: return SNDRV_PCM_FORMAT_IMA_ADPCM; case AFMT_U8: return SNDRV_PCM_FORMAT_U8; case AFMT_S16_LE: return SNDRV_PCM_FORMAT_S16_LE; case AFMT_S16_BE: return SNDRV_PCM_FORMAT_S16_BE; case AFMT_S8: return SNDRV_PCM_FORMAT_S8; case AFMT_U16_LE: return SNDRV_PCM_FORMAT_U16_LE; case AFMT_U16_BE: return SNDRV_PCM_FORMAT_U16_BE; case AFMT_MPEG: return SNDRV_PCM_FORMAT_MPEG; case AFMT_S32_LE: return SNDRV_PCM_FORMAT_S32_LE; case AFMT_S32_BE: return SNDRV_PCM_FORMAT_S32_BE; case AFMT_S24_LE: return SNDRV_PCM_FORMAT_S24_LE; case AFMT_S24_BE: return SNDRV_PCM_FORMAT_S24_BE; case AFMT_S24_PACKED: return SNDRV_PCM_FORMAT_S24_3LE; case AFMT_FLOAT: return SNDRV_PCM_FORMAT_FLOAT; case AFMT_SPDIF_RAW: return SNDRV_PCM_FORMAT_IEC958_SUBFRAME; default: return SNDRV_PCM_FORMAT_U8; } } static int snd_pcm_oss_format_to(snd_pcm_format_t format) { switch (format) { case SNDRV_PCM_FORMAT_MU_LAW: return AFMT_MU_LAW; case SNDRV_PCM_FORMAT_A_LAW: return AFMT_A_LAW; case SNDRV_PCM_FORMAT_IMA_ADPCM: return AFMT_IMA_ADPCM; case SNDRV_PCM_FORMAT_U8: return AFMT_U8; case SNDRV_PCM_FORMAT_S16_LE: return AFMT_S16_LE; case SNDRV_PCM_FORMAT_S16_BE: return AFMT_S16_BE; case SNDRV_PCM_FORMAT_S8: return AFMT_S8; case SNDRV_PCM_FORMAT_U16_LE: return AFMT_U16_LE; case SNDRV_PCM_FORMAT_U16_BE: return AFMT_U16_BE; case SNDRV_PCM_FORMAT_MPEG: return AFMT_MPEG; case SNDRV_PCM_FORMAT_S32_LE: return AFMT_S32_LE; case SNDRV_PCM_FORMAT_S32_BE: return AFMT_S32_BE; case SNDRV_PCM_FORMAT_S24_LE: return AFMT_S24_LE; case SNDRV_PCM_FORMAT_S24_BE: return AFMT_S24_BE; case SNDRV_PCM_FORMAT_S24_3LE: return AFMT_S24_PACKED; case SNDRV_PCM_FORMAT_FLOAT: return AFMT_FLOAT; case SNDRV_PCM_FORMAT_IEC958_SUBFRAME: return AFMT_SPDIF_RAW; default: return -EINVAL; } } static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *oss_params, struct snd_pcm_hw_params *slave_params) { size_t s; size_t oss_buffer_size, oss_period_size, oss_periods; size_t min_period_size, max_period_size; struct snd_pcm_runtime *runtime = substream->runtime; size_t oss_frame_size; oss_frame_size = snd_pcm_format_physical_width(params_format(oss_params)) * params_channels(oss_params) / 8; oss_buffer_size = snd_pcm_plug_client_size(substream, snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size; oss_buffer_size = 1 << ld2(oss_buffer_size); if (atomic_read(&substream->mmap_count)) { if (oss_buffer_size > runtime->oss.mmap_bytes) oss_buffer_size = runtime->oss.mmap_bytes; } if (substream->oss.setup.period_size > 16) oss_period_size = substream->oss.setup.period_size; else if (runtime->oss.fragshift) { oss_period_size = 1 << runtime->oss.fragshift; if (oss_period_size > oss_buffer_size / 2) oss_period_size = oss_buffer_size / 2; } else { int sd; size_t bytes_per_sec = params_rate(oss_params) * snd_pcm_format_physical_width(params_format(oss_params)) * params_channels(oss_params) / 8; oss_period_size = oss_buffer_size; do { oss_period_size /= 2; } while (oss_period_size > bytes_per_sec); if (runtime->oss.subdivision == 0) { sd = 4; if (oss_period_size / sd > 4096) sd *= 2; if (oss_period_size / sd < 4096) sd = 1; } else sd = runtime->oss.subdivision; oss_period_size /= sd; if (oss_period_size < 16) oss_period_size = 16; } min_period_size = snd_pcm_plug_client_size(substream, snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL)); min_period_size *= oss_frame_size; min_period_size = 1 << (ld2(min_period_size - 1) + 1); if (oss_period_size < min_period_size) oss_period_size = min_period_size; max_period_size = snd_pcm_plug_client_size(substream, snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL)); max_period_size *= oss_frame_size; max_period_size = 1 << ld2(max_period_size); if (oss_period_size > max_period_size) oss_period_size = max_period_size; oss_periods = oss_buffer_size / oss_period_size; if (substream->oss.setup.periods > 1) oss_periods = substream->oss.setup.periods; s = snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIODS, NULL); if (runtime->oss.maxfrags && s > runtime->oss.maxfrags) s = runtime->oss.maxfrags; if (oss_periods > s) oss_periods = s; s = snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIODS, NULL); if (s < 2) s = 2; if (oss_periods < s) oss_periods = s; while (oss_period_size * oss_periods > oss_buffer_size) oss_period_size /= 2; if (oss_period_size < 16) return -EINVAL; runtime->oss.period_bytes = oss_period_size; runtime->oss.period_frames = 1; runtime->oss.periods = oss_periods; return 0; } static int choose_rate(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, unsigned int best_rate) { struct snd_interval *it; struct snd_pcm_hw_params *save; unsigned int rate, prev; save = kmalloc(sizeof(*save), GFP_KERNEL); if (save == NULL) return -ENOMEM; *save = *params; it = hw_param_interval(save, SNDRV_PCM_HW_PARAM_RATE); /* try multiples of the best rate */ rate = best_rate; for (;;) { if (it->max < rate || (it->max == rate && it->openmax)) break; if (it->min < rate || (it->min == rate && !it->openmin)) { int ret; ret = snd_pcm_hw_param_set(substream, params, SNDRV_PCM_HW_PARAM_RATE, rate, 0); if (ret == (int)rate) { kfree(save); return rate; } *params = *save; } prev = rate; rate += best_rate; if (rate <= prev) break; } /* not found, use the nearest rate */ kfree(save); return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL); } static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_hw_params *params, *sparams; struct snd_pcm_sw_params *sw_params; ssize_t oss_buffer_size, oss_period_size; size_t oss_frame_size; int err; int direct; snd_pcm_format_t format, sformat; int n; struct snd_mask sformat_mask; struct snd_mask mask; if (mutex_lock_interruptible(&runtime->oss.params_lock)) return -EINTR; sw_params = kmalloc(sizeof(*sw_params), GFP_KERNEL); params = kmalloc(sizeof(*params), GFP_KERNEL); sparams = kmalloc(sizeof(*sparams), GFP_KERNEL); if (!sw_params || !params || !sparams) { snd_printd("No memory\n"); err = -ENOMEM; goto failure; } if (atomic_read(&substream->mmap_count)) direct = 1; else direct = substream->oss.setup.direct; _snd_pcm_hw_params_any(sparams); _snd_pcm_hw_param_setinteger(sparams, SNDRV_PCM_HW_PARAM_PERIODS); _snd_pcm_hw_param_min(sparams, SNDRV_PCM_HW_PARAM_PERIODS, 2, 0); snd_mask_none(&mask); if (atomic_read(&substream->mmap_count)) snd_mask_set(&mask, (__force int)SNDRV_PCM_ACCESS_MMAP_INTERLEAVED); else { snd_mask_set(&mask, (__force int)SNDRV_PCM_ACCESS_RW_INTERLEAVED); if (!direct) snd_mask_set(&mask, (__force int)SNDRV_PCM_ACCESS_RW_NONINTERLEAVED); } err = snd_pcm_hw_param_mask(substream, sparams, SNDRV_PCM_HW_PARAM_ACCESS, &mask); if (err < 0) { snd_printd("No usable accesses\n"); err = -EINVAL; goto failure; } choose_rate(substream, sparams, runtime->oss.rate); snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_CHANNELS, runtime->oss.channels, NULL); format = snd_pcm_oss_format_from(runtime->oss.format); sformat_mask = *hw_param_mask(sparams, SNDRV_PCM_HW_PARAM_FORMAT); if (direct) sformat = format; else sformat = snd_pcm_plug_slave_format(format, &sformat_mask); if ((__force int)sformat < 0 || !snd_mask_test(&sformat_mask, (__force int)sformat)) { for (sformat = (__force snd_pcm_format_t)0; (__force int)sformat <= (__force int)SNDRV_PCM_FORMAT_LAST; sformat = (__force snd_pcm_format_t)((__force int)sformat + 1)) { if (snd_mask_test(&sformat_mask, (__force int)sformat) && snd_pcm_oss_format_to(sformat) >= 0) break; } if ((__force int)sformat > (__force int)SNDRV_PCM_FORMAT_LAST) { snd_printd("Cannot find a format!!!\n"); err = -EINVAL; goto failure; } } err = _snd_pcm_hw_param_set(sparams, SNDRV_PCM_HW_PARAM_FORMAT, (__force int)sformat, 0); if (err < 0) goto failure; if (direct) { memcpy(params, sparams, sizeof(*params)); } else { _snd_pcm_hw_params_any(params); _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_ACCESS, (__force int)SNDRV_PCM_ACCESS_RW_INTERLEAVED, 0); _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_FORMAT, (__force int)snd_pcm_oss_format_from(runtime->oss.format), 0); _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_CHANNELS, runtime->oss.channels, 0); _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_RATE, runtime->oss.rate, 0); pdprintf("client: access = %i, format = %i, channels = %i, rate = %i\n", params_access(params), params_format(params), params_channels(params), params_rate(params)); } pdprintf("slave: access = %i, format = %i, channels = %i, rate = %i\n", params_access(sparams), params_format(sparams), params_channels(sparams), params_rate(sparams)); oss_frame_size = snd_pcm_format_physical_width(params_format(params)) * params_channels(params) / 8; #ifdef CONFIG_SND_PCM_OSS_PLUGINS snd_pcm_oss_plugin_clear(substream); if (!direct) { /* add necessary plugins */ snd_pcm_oss_plugin_clear(substream); if ((err = snd_pcm_plug_format_plugins(substream, params, sparams)) < 0) { snd_printd("snd_pcm_plug_format_plugins failed: %i\n", err); snd_pcm_oss_plugin_clear(substream); goto failure; } if (runtime->oss.plugin_first) { struct snd_pcm_plugin *plugin; if ((err = snd_pcm_plugin_build_io(substream, sparams, &plugin)) < 0) { snd_printd("snd_pcm_plugin_build_io failed: %i\n", err); snd_pcm_oss_plugin_clear(substream); goto failure; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { err = snd_pcm_plugin_append(plugin); } else { err = snd_pcm_plugin_insert(plugin); } if (err < 0) { snd_pcm_oss_plugin_clear(substream); goto failure; } } } #endif err = snd_pcm_oss_period_size(substream, params, sparams); if (err < 0) goto failure; n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size); err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL); if (err < 0) goto failure; err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS, runtime->oss.periods, NULL); if (err < 0) goto failure; snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) { snd_printd("HW_PARAMS failed: %i\n", err); goto failure; } memset(sw_params, 0, sizeof(*sw_params)); if (runtime->oss.trigger) { sw_params->start_threshold = 1; } else { sw_params->start_threshold = runtime->boundary; } if (atomic_read(&substream->mmap_count) || substream->stream == SNDRV_PCM_STREAM_CAPTURE) sw_params->stop_threshold = runtime->boundary; else sw_params->stop_threshold = runtime->buffer_size; sw_params->tstamp_mode = SNDRV_PCM_TSTAMP_NONE; sw_params->period_step = 1; sw_params->avail_min = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? 1 : runtime->period_size; if (atomic_read(&substream->mmap_count) || substream->oss.setup.nosilence) { sw_params->silence_threshold = 0; sw_params->silence_size = 0; } else { snd_pcm_uframes_t frames; frames = runtime->period_size + 16; if (frames > runtime->buffer_size) frames = runtime->buffer_size; sw_params->silence_threshold = frames; sw_params->silence_size = frames; } if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_SW_PARAMS, sw_params)) < 0) { snd_printd("SW_PARAMS failed: %i\n", err); goto failure; } runtime->oss.periods = params_periods(sparams); oss_period_size = snd_pcm_plug_client_size(substream, params_period_size(sparams)); if (oss_period_size < 0) { err = -EINVAL; goto failure; } #ifdef CONFIG_SND_PCM_OSS_PLUGINS if (runtime->oss.plugin_first) { err = snd_pcm_plug_alloc(substream, oss_period_size); if (err < 0) goto failure; } #endif oss_period_size *= oss_frame_size; oss_buffer_size = oss_period_size * runtime->oss.periods; if (oss_buffer_size < 0) { err = -EINVAL; goto failure; } runtime->oss.period_bytes = oss_period_size; runtime->oss.buffer_bytes = oss_buffer_size; pdprintf("oss: period bytes = %i, buffer bytes = %i\n", runtime->oss.period_bytes, runtime->oss.buffer_bytes); pdprintf("slave: period_size = %i, buffer_size = %i\n", params_period_size(sparams), params_buffer_size(sparams)); runtime->oss.format = snd_pcm_oss_format_to(params_format(params)); runtime->oss.channels = params_channels(params); runtime->oss.rate = params_rate(params); vfree(runtime->oss.buffer); runtime->oss.buffer = vmalloc(runtime->oss.period_bytes); if (!runtime->oss.buffer) { err = -ENOMEM; goto failure; } runtime->oss.params = 0; runtime->oss.prepare = 1; runtime->oss.buffer_used = 0; if (runtime->dma_area) snd_pcm_format_set_silence(runtime->format, runtime->dma_area, bytes_to_samples(runtime, runtime->dma_bytes)); runtime->oss.period_frames = snd_pcm_alsa_frames(substream, oss_period_size); err = 0; failure: kfree(sw_params); kfree(params); kfree(sparams); mutex_unlock(&runtime->oss.params_lock); return err; } static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_file, struct snd_pcm_substream **r_substream) { int idx, err; struct snd_pcm_substream *asubstream = NULL, *substream; for (idx = 0; idx < 2; idx++) { substream = pcm_oss_file->streams[idx]; if (substream == NULL) continue; if (asubstream == NULL) asubstream = substream; if (substream->runtime->oss.params) { err = snd_pcm_oss_change_params(substream); if (err < 0) return err; } } if (!asubstream) return -EIO; if (r_substream) *r_substream = asubstream; return 0; } static int snd_pcm_oss_prepare(struct snd_pcm_substream *substream) { int err; struct snd_pcm_runtime *runtime = substream->runtime; err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_PREPARE, NULL); if (err < 0) { snd_printd("snd_pcm_oss_prepare: SNDRV_PCM_IOCTL_PREPARE failed\n"); return err; } runtime->oss.prepare = 0; runtime->oss.prev_hw_ptr_period = 0; runtime->oss.period_ptr = 0; runtime->oss.buffer_used = 0; return 0; } static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; int err; if (substream == NULL) return 0; runtime = substream->runtime; if (runtime->oss.params) { err = snd_pcm_oss_change_params(substream); if (err < 0) return err; } if (runtime->oss.prepare) { err = snd_pcm_oss_prepare(substream); if (err < 0) return err; } return 0; } static int snd_pcm_oss_capture_position_fixup(struct snd_pcm_substream *substream, snd_pcm_sframes_t *delay) { struct snd_pcm_runtime *runtime; snd_pcm_uframes_t frames; int err = 0; while (1) { err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DELAY, delay); if (err < 0) break; runtime = substream->runtime; if (*delay <= (snd_pcm_sframes_t)runtime->buffer_size) break; /* in case of overrun, skip whole periods like OSS/Linux driver does */ /* until avail(delay) <= buffer_size */ frames = (*delay - runtime->buffer_size) + runtime->period_size - 1; frames /= runtime->period_size; frames *= runtime->period_size; err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_FORWARD, &frames); if (err < 0) break; } return err; } snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const char *ptr, snd_pcm_uframes_t frames, int in_kernel) { struct snd_pcm_runtime *runtime = substream->runtime; int ret; while (1) { if (runtime->status->state == SNDRV_PCM_STATE_XRUN || runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { #ifdef OSS_DEBUG if (runtime->status->state == SNDRV_PCM_STATE_XRUN) printk(KERN_DEBUG "pcm_oss: write: " "recovering from XRUN\n"); else printk(KERN_DEBUG "pcm_oss: write: " "recovering from SUSPEND\n"); #endif ret = snd_pcm_oss_prepare(substream); if (ret < 0) break; } if (in_kernel) { mm_segment_t fs; fs = snd_enter_user(); ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames); snd_leave_user(fs); } else { ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames); } if (ret != -EPIPE && ret != -ESTRPIPE) break; /* test, if we can't store new data, because the stream */ /* has not been started */ if (runtime->status->state == SNDRV_PCM_STATE_PREPARED) return -EAGAIN; } return ret; } snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *ptr, snd_pcm_uframes_t frames, int in_kernel) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t delay; int ret; while (1) { if (runtime->status->state == SNDRV_PCM_STATE_XRUN || runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { #ifdef OSS_DEBUG if (runtime->status->state == SNDRV_PCM_STATE_XRUN) printk(KERN_DEBUG "pcm_oss: read: " "recovering from XRUN\n"); else printk(KERN_DEBUG "pcm_oss: read: " "recovering from SUSPEND\n"); #endif ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, NULL); if (ret < 0) break; } else if (runtime->status->state == SNDRV_PCM_STATE_SETUP) { ret = snd_pcm_oss_prepare(substream); if (ret < 0) break; } ret = snd_pcm_oss_capture_position_fixup(substream, &delay); if (ret < 0) break; if (in_kernel) { mm_segment_t fs; fs = snd_enter_user(); ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames); snd_leave_user(fs); } else { ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames); } if (ret == -EPIPE) { if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) { ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); if (ret < 0) break; } continue; } if (ret != -ESTRPIPE) break; } return ret; } snd_pcm_sframes_t snd_pcm_oss_writev3(struct snd_pcm_substream *substream, void **bufs, snd_pcm_uframes_t frames, int in_kernel) { struct snd_pcm_runtime *runtime = substream->runtime; int ret; while (1) { if (runtime->status->state == SNDRV_PCM_STATE_XRUN || runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { #ifdef OSS_DEBUG if (runtime->status->state == SNDRV_PCM_STATE_XRUN) printk(KERN_DEBUG "pcm_oss: writev: " "recovering from XRUN\n"); else printk(KERN_DEBUG "pcm_oss: writev: " "recovering from SUSPEND\n"); #endif ret = snd_pcm_oss_prepare(substream); if (ret < 0) break; } if (in_kernel) { mm_segment_t fs; fs = snd_enter_user(); ret = snd_pcm_lib_writev(substream, (void __user **)bufs, frames); snd_leave_user(fs); } else { ret = snd_pcm_lib_writev(substream, (void __user **)bufs, frames); } if (ret != -EPIPE && ret != -ESTRPIPE) break; /* test, if we can't store new data, because the stream */ /* has not been started */ if (runtime->status->state == SNDRV_PCM_STATE_PREPARED) return -EAGAIN; } return ret; } snd_pcm_sframes_t snd_pcm_oss_readv3(struct snd_pcm_substream *substream, void **bufs, snd_pcm_uframes_t frames, int in_kernel) { struct snd_pcm_runtime *runtime = substream->runtime; int ret; while (1) { if (runtime->status->state == SNDRV_PCM_STATE_XRUN || runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { #ifdef OSS_DEBUG if (runtime->status->state == SNDRV_PCM_STATE_XRUN) printk(KERN_DEBUG "pcm_oss: readv: " "recovering from XRUN\n"); else printk(KERN_DEBUG "pcm_oss: readv: " "recovering from SUSPEND\n"); #endif ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, NULL); if (ret < 0) break; } else if (runtime->status->state == SNDRV_PCM_STATE_SETUP) { ret = snd_pcm_oss_prepare(substream); if (ret < 0) break; } if (in_kernel) { mm_segment_t fs; fs = snd_enter_user(); ret = snd_pcm_lib_readv(substream, (void __user **)bufs, frames); snd_leave_user(fs); } else { ret = snd_pcm_lib_readv(substream, (void __user **)bufs, frames); } if (ret != -EPIPE && ret != -ESTRPIPE) break; } return ret; } static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const char *buf, size_t bytes, int in_kernel) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t frames, frames1; #ifdef CONFIG_SND_PCM_OSS_PLUGINS if (runtime->oss.plugin_first) { struct snd_pcm_plugin_channel *channels; size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8; if (!in_kernel) { if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes)) return -EFAULT; buf = runtime->oss.buffer; } frames = bytes / oss_frame_bytes; frames1 = snd_pcm_plug_client_channels_buf(substream, (char *)buf, frames, &channels); if (frames1 < 0) return frames1; frames1 = snd_pcm_plug_write_transfer(substream, channels, frames1); if (frames1 <= 0) return frames1; bytes = frames1 * oss_frame_bytes; } else #endif { frames = bytes_to_frames(runtime, bytes); frames1 = snd_pcm_oss_write3(substream, buf, frames, in_kernel); if (frames1 <= 0) return frames1; bytes = frames_to_bytes(runtime, frames1); } return bytes; } static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const char __user *buf, size_t bytes) { size_t xfer = 0; ssize_t tmp; struct snd_pcm_runtime *runtime = substream->runtime; if (atomic_read(&substream->mmap_count)) return -ENXIO; if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) return tmp; mutex_lock(&runtime->oss.params_lock); while (bytes > 0) { if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { tmp = bytes; if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes) tmp = runtime->oss.period_bytes - runtime->oss.buffer_used; if (tmp > 0) { if (copy_from_user(runtime->oss.buffer + runtime->oss.buffer_used, buf, tmp)) { tmp = -EFAULT; goto err; } } runtime->oss.buffer_used += tmp; buf += tmp; bytes -= tmp; xfer += tmp; if (substream->oss.setup.partialfrag || runtime->oss.buffer_used == runtime->oss.period_bytes) { tmp = snd_pcm_oss_write2(substream, runtime->oss.buffer + runtime->oss.period_ptr, runtime->oss.buffer_used - runtime->oss.period_ptr, 1); if (tmp <= 0) goto err; runtime->oss.bytes += tmp; runtime->oss.period_ptr += tmp; runtime->oss.period_ptr %= runtime->oss.period_bytes; if (runtime->oss.period_ptr == 0 || runtime->oss.period_ptr == runtime->oss.buffer_used) runtime->oss.buffer_used = 0; else if ((substream->f_flags & O_NONBLOCK) != 0) { tmp = -EAGAIN; goto err; } } } else { tmp = snd_pcm_oss_write2(substream, (const char __force *)buf, runtime->oss.period_bytes, 0); if (tmp <= 0) goto err; runtime->oss.bytes += tmp; buf += tmp; bytes -= tmp; xfer += tmp; if ((substream->f_flags & O_NONBLOCK) != 0 && tmp != runtime->oss.period_bytes) break; } } mutex_unlock(&runtime->oss.params_lock); return xfer; err: mutex_unlock(&runtime->oss.params_lock); return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; } static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf, size_t bytes, int in_kernel) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_sframes_t frames, frames1; #ifdef CONFIG_SND_PCM_OSS_PLUGINS char __user *final_dst = (char __force __user *)buf; if (runtime->oss.plugin_first) { struct snd_pcm_plugin_channel *channels; size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8; if (!in_kernel) buf = runtime->oss.buffer; frames = bytes / oss_frame_bytes; frames1 = snd_pcm_plug_client_channels_buf(substream, buf, frames, &channels); if (frames1 < 0) return frames1; frames1 = snd_pcm_plug_read_transfer(substream, channels, frames1); if (frames1 <= 0) return frames1; bytes = frames1 * oss_frame_bytes; if (!in_kernel && copy_to_user(final_dst, buf, bytes)) return -EFAULT; } else #endif { frames = bytes_to_frames(runtime, bytes); frames1 = snd_pcm_oss_read3(substream, buf, frames, in_kernel); if (frames1 <= 0) return frames1; bytes = frames_to_bytes(runtime, frames1); } return bytes; } static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __user *buf, size_t bytes) { size_t xfer = 0; ssize_t tmp; struct snd_pcm_runtime *runtime = substream->runtime; if (atomic_read(&substream->mmap_count)) return -ENXIO; if ((tmp = snd_pcm_oss_make_ready(substream)) < 0) return tmp; mutex_lock(&runtime->oss.params_lock); while (bytes > 0) { if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) { if (runtime->oss.buffer_used == 0) { tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1); if (tmp <= 0) goto err; runtime->oss.bytes += tmp; runtime->oss.period_ptr = tmp; runtime->oss.buffer_used = tmp; } tmp = bytes; if ((size_t) tmp > runtime->oss.buffer_used) tmp = runtime->oss.buffer_used; if (copy_to_user(buf, runtime->oss.buffer + (runtime->oss.period_ptr - runtime->oss.buffer_used), tmp)) { tmp = -EFAULT; goto err; } buf += tmp; bytes -= tmp; xfer += tmp; runtime->oss.buffer_used -= tmp; } else { tmp = snd_pcm_oss_read2(substream, (char __force *)buf, runtime->oss.period_bytes, 0); if (tmp <= 0) goto err; runtime->oss.bytes += tmp; buf += tmp; bytes -= tmp; xfer += tmp; } } mutex_unlock(&runtime->oss.params_lock); return xfer; err: mutex_unlock(&runtime->oss.params_lock); return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp; } static int snd_pcm_oss_reset(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; int i; for (i = 0; i < 2; i++) { substream = pcm_oss_file->streams[i]; if (!substream) continue; runtime = substream->runtime; snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); runtime->oss.prepare = 1; runtime->oss.buffer_used = 0; runtime->oss.prev_hw_ptr_period = 0; runtime->oss.period_ptr = 0; } return 0; } static int snd_pcm_oss_post(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *substream; int err; substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; if (substream != NULL) { if ((err = snd_pcm_oss_make_ready(substream)) < 0) return err; snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_START, NULL); } /* note: all errors from the start action are ignored */ /* OSS apps do not know, how to handle them */ return 0; } static int snd_pcm_oss_sync1(struct snd_pcm_substream *substream, size_t size) { struct snd_pcm_runtime *runtime; ssize_t result = 0; snd_pcm_state_t state; long res; wait_queue_t wait; runtime = substream->runtime; init_waitqueue_entry(&wait, current); add_wait_queue(&runtime->sleep, &wait); #ifdef OSS_DEBUG printk(KERN_DEBUG "sync1: size = %li\n", size); #endif while (1) { result = snd_pcm_oss_write2(substream, runtime->oss.buffer, size, 1); if (result > 0) { runtime->oss.buffer_used = 0; result = 0; break; } if (result != 0 && result != -EAGAIN) break; result = 0; set_current_state(TASK_INTERRUPTIBLE); snd_pcm_stream_lock_irq(substream); state = runtime->status->state; snd_pcm_stream_unlock_irq(substream); if (state != SNDRV_PCM_STATE_RUNNING) { set_current_state(TASK_RUNNING); break; } res = schedule_timeout(10 * HZ); if (signal_pending(current)) { result = -ERESTARTSYS; break; } if (res == 0) { snd_printk(KERN_ERR "OSS sync error - DMA timeout\n"); result = -EIO; break; } } remove_wait_queue(&runtime->sleep, &wait); return result; } static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file) { int err = 0; unsigned int saved_f_flags; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_format_t format; unsigned long width; size_t size; substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; if (substream != NULL) { runtime = substream->runtime; if (atomic_read(&substream->mmap_count)) goto __direct; if ((err = snd_pcm_oss_make_ready(substream)) < 0) return err; format = snd_pcm_oss_format_from(runtime->oss.format); width = snd_pcm_format_physical_width(format); mutex_lock(&runtime->oss.params_lock); if (runtime->oss.buffer_used > 0) { #ifdef OSS_DEBUG printk(KERN_DEBUG "sync: buffer_used\n"); #endif size = (8 * (runtime->oss.period_bytes - runtime->oss.buffer_used) + 7) / width; snd_pcm_format_set_silence(format, runtime->oss.buffer + runtime->oss.buffer_used, size); err = snd_pcm_oss_sync1(substream, runtime->oss.period_bytes); if (err < 0) { mutex_unlock(&runtime->oss.params_lock); return err; } } else if (runtime->oss.period_ptr > 0) { #ifdef OSS_DEBUG printk(KERN_DEBUG "sync: period_ptr\n"); #endif size = runtime->oss.period_bytes - runtime->oss.period_ptr; snd_pcm_format_set_silence(format, runtime->oss.buffer, size * 8 / width); err = snd_pcm_oss_sync1(substream, size); if (err < 0) { mutex_unlock(&runtime->oss.params_lock); return err; } } /* * The ALSA's period might be a bit large than OSS one. * Fill the remain portion of ALSA period with zeros. */ size = runtime->control->appl_ptr % runtime->period_size; if (size > 0) { size = runtime->period_size - size; if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED) { size = (runtime->frame_bits * size) / 8; while (size > 0) { mm_segment_t fs; size_t size1 = size < runtime->oss.period_bytes ? size : runtime->oss.period_bytes; size -= size1; size1 *= 8; size1 /= runtime->sample_bits; snd_pcm_format_set_silence(runtime->format, runtime->oss.buffer, size1); size1 /= runtime->channels; /* frames */ fs = snd_enter_user(); snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1); snd_leave_user(fs); } } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) { void __user *buffers[runtime->channels]; memset(buffers, 0, runtime->channels * sizeof(void *)); snd_pcm_lib_writev(substream, buffers, size); } } mutex_unlock(&runtime->oss.params_lock); /* * finish sync: drain the buffer */ __direct: saved_f_flags = substream->f_flags; substream->f_flags &= ~O_NONBLOCK; err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DRAIN, NULL); substream->f_flags = saved_f_flags; if (err < 0) return err; runtime->oss.prepare = 1; } substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; if (substream != NULL) { if ((err = snd_pcm_oss_make_ready(substream)) < 0) return err; runtime = substream->runtime; err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); if (err < 0) return err; runtime->oss.buffer_used = 0; runtime->oss.prepare = 1; } return 0; } static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate) { int idx; for (idx = 1; idx >= 0; --idx) { struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; struct snd_pcm_runtime *runtime; if (substream == NULL) continue; runtime = substream->runtime; if (rate < 1000) rate = 1000; else if (rate > 192000) rate = 192000; if (runtime->oss.rate != rate) { runtime->oss.params = 1; runtime->oss.rate = rate; } } return snd_pcm_oss_get_rate(pcm_oss_file); } static int snd_pcm_oss_get_rate(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *substream; int err; if ((err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream)) < 0) return err; return substream->runtime->oss.rate; } static int snd_pcm_oss_set_channels(struct snd_pcm_oss_file *pcm_oss_file, unsigned int channels) { int idx; if (channels < 1) channels = 1; if (channels > 128) return -EINVAL; for (idx = 1; idx >= 0; --idx) { struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; struct snd_pcm_runtime *runtime; if (substream == NULL) continue; runtime = substream->runtime; if (runtime->oss.channels != channels) { runtime->oss.params = 1; runtime->oss.channels = channels; } } return snd_pcm_oss_get_channels(pcm_oss_file); } static int snd_pcm_oss_get_channels(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *substream; int err; if ((err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream)) < 0) return err; return substream->runtime->oss.channels; } static int snd_pcm_oss_get_block_size(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *substream; int err; if ((err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream)) < 0) return err; return substream->runtime->oss.period_bytes; } static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *substream; int err; int direct; struct snd_pcm_hw_params *params; unsigned int formats = 0; struct snd_mask format_mask; int fmt; if ((err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream)) < 0) return err; if (atomic_read(&substream->mmap_count)) direct = 1; else direct = substream->oss.setup.direct; if (!direct) return AFMT_MU_LAW | AFMT_U8 | AFMT_S16_LE | AFMT_S16_BE | AFMT_S8 | AFMT_U16_LE | AFMT_U16_BE | AFMT_S32_LE | AFMT_S32_BE | AFMT_S24_LE | AFMT_S24_BE | AFMT_S24_PACKED; params = kmalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; _snd_pcm_hw_params_any(params); err = snd_pcm_hw_refine(substream, params); format_mask = *hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); kfree(params); if (err < 0) return err; for (fmt = 0; fmt < 32; ++fmt) { if (snd_mask_test(&format_mask, fmt)) { int f = snd_pcm_oss_format_to(fmt); if (f >= 0) formats |= f; } } return formats; } static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format) { int formats, idx; if (format != AFMT_QUERY) { formats = snd_pcm_oss_get_formats(pcm_oss_file); if (formats < 0) return formats; if (!(formats & format)) format = AFMT_U8; for (idx = 1; idx >= 0; --idx) { struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; struct snd_pcm_runtime *runtime; if (substream == NULL) continue; runtime = substream->runtime; if (runtime->oss.format != format) { runtime->oss.params = 1; runtime->oss.format = format; } } } return snd_pcm_oss_get_format(pcm_oss_file); } static int snd_pcm_oss_get_format(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *substream; int err; if ((err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream)) < 0) return err; return substream->runtime->oss.format; } static int snd_pcm_oss_set_subdivide1(struct snd_pcm_substream *substream, int subdivide) { struct snd_pcm_runtime *runtime; if (substream == NULL) return 0; runtime = substream->runtime; if (subdivide == 0) { subdivide = runtime->oss.subdivision; if (subdivide == 0) subdivide = 1; return subdivide; } if (runtime->oss.subdivision || runtime->oss.fragshift) return -EINVAL; if (subdivide != 1 && subdivide != 2 && subdivide != 4 && subdivide != 8 && subdivide != 16) return -EINVAL; runtime->oss.subdivision = subdivide; runtime->oss.params = 1; return subdivide; } static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int subdivide) { int err = -EINVAL, idx; for (idx = 1; idx >= 0; --idx) { struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; if (substream == NULL) continue; if ((err = snd_pcm_oss_set_subdivide1(substream, subdivide)) < 0) return err; } return err; } static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsigned int val) { struct snd_pcm_runtime *runtime; if (substream == NULL) return 0; runtime = substream->runtime; if (runtime->oss.subdivision || runtime->oss.fragshift) return -EINVAL; runtime->oss.fragshift = val & 0xffff; runtime->oss.maxfrags = (val >> 16) & 0xffff; if (runtime->oss.fragshift < 4) /* < 16 */ runtime->oss.fragshift = 4; if (runtime->oss.maxfrags < 2) runtime->oss.maxfrags = 2; runtime->oss.params = 1; return 0; } static int snd_pcm_oss_set_fragment(struct snd_pcm_oss_file *pcm_oss_file, unsigned int val) { int err = -EINVAL, idx; for (idx = 1; idx >= 0; --idx) { struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; if (substream == NULL) continue; if ((err = snd_pcm_oss_set_fragment1(substream, val)) < 0) return err; } return err; } static int snd_pcm_oss_nonblock(struct file * file) { spin_lock(&file->f_lock); file->f_flags |= O_NONBLOCK; spin_unlock(&file->f_lock); return 0; } static int snd_pcm_oss_get_caps1(struct snd_pcm_substream *substream, int res) { if (substream == NULL) { res &= ~DSP_CAP_DUPLEX; return res; } #ifdef DSP_CAP_MULTI if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) if (substream->pstr->substream_count > 1) res |= DSP_CAP_MULTI; #endif /* DSP_CAP_REALTIME is set all times: */ /* all ALSA drivers can return actual pointer in ring buffer */ #if defined(DSP_CAP_REALTIME) && 0 { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->info & (SNDRV_PCM_INFO_BLOCK_TRANSFER|SNDRV_PCM_INFO_BATCH)) res &= ~DSP_CAP_REALTIME; } #endif return res; } static int snd_pcm_oss_get_caps(struct snd_pcm_oss_file *pcm_oss_file) { int result, idx; result = DSP_CAP_TRIGGER | DSP_CAP_MMAP | DSP_CAP_DUPLEX | DSP_CAP_REALTIME; for (idx = 0; idx < 2; idx++) { struct snd_pcm_substream *substream = pcm_oss_file->streams[idx]; result = snd_pcm_oss_get_caps1(substream, result); } result |= 0x0001; /* revision - same as SB AWE 64 */ return result; } static void snd_pcm_oss_simulate_fill(struct snd_pcm_substream *substream, snd_pcm_uframes_t hw_ptr) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t appl_ptr; appl_ptr = hw_ptr + runtime->buffer_size; appl_ptr %= runtime->boundary; runtime->control->appl_ptr = appl_ptr; } static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int trigger) { struct snd_pcm_runtime *runtime; struct snd_pcm_substream *psubstream = NULL, *csubstream = NULL; int err, cmd; #ifdef OSS_DEBUG printk(KERN_DEBUG "pcm_oss: trigger = 0x%x\n", trigger); #endif psubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; csubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; if (psubstream) { if ((err = snd_pcm_oss_make_ready(psubstream)) < 0) return err; } if (csubstream) { if ((err = snd_pcm_oss_make_ready(csubstream)) < 0) return err; } if (psubstream) { runtime = psubstream->runtime; if (trigger & PCM_ENABLE_OUTPUT) { if (runtime->oss.trigger) goto _skip1; if (atomic_read(&psubstream->mmap_count)) snd_pcm_oss_simulate_fill(psubstream, get_hw_ptr_period(runtime)); runtime->oss.trigger = 1; runtime->start_threshold = 1; cmd = SNDRV_PCM_IOCTL_START; } else { if (!runtime->oss.trigger) goto _skip1; runtime->oss.trigger = 0; runtime->start_threshold = runtime->boundary; cmd = SNDRV_PCM_IOCTL_DROP; runtime->oss.prepare = 1; } err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL); if (err < 0) return err; } _skip1: if (csubstream) { runtime = csubstream->runtime; if (trigger & PCM_ENABLE_INPUT) { if (runtime->oss.trigger) goto _skip2; runtime->oss.trigger = 1; runtime->start_threshold = 1; cmd = SNDRV_PCM_IOCTL_START; } else { if (!runtime->oss.trigger) goto _skip2; runtime->oss.trigger = 0; runtime->start_threshold = runtime->boundary; cmd = SNDRV_PCM_IOCTL_DROP; runtime->oss.prepare = 1; } err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL); if (err < 0) return err; } _skip2: return 0; } static int snd_pcm_oss_get_trigger(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *psubstream = NULL, *csubstream = NULL; int result = 0; psubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; csubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; if (psubstream && psubstream->runtime && psubstream->runtime->oss.trigger) result |= PCM_ENABLE_OUTPUT; if (csubstream && csubstream->runtime && csubstream->runtime->oss.trigger) result |= PCM_ENABLE_INPUT; return result; } static int snd_pcm_oss_get_odelay(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_sframes_t delay; int err; substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; if (substream == NULL) return -EINVAL; if ((err = snd_pcm_oss_make_ready(substream)) < 0) return err; runtime = substream->runtime; if (runtime->oss.params || runtime->oss.prepare) return 0; err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DELAY, &delay); if (err == -EPIPE) delay = 0; /* hack for broken OSS applications */ else if (err < 0) return err; return snd_pcm_oss_bytes(substream, delay); } static int snd_pcm_oss_get_ptr(struct snd_pcm_oss_file *pcm_oss_file, int stream, struct count_info __user * _info) { struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_sframes_t delay; int fixup; struct count_info info; int err; if (_info == NULL) return -EFAULT; substream = pcm_oss_file->streams[stream]; if (substream == NULL) return -EINVAL; if ((err = snd_pcm_oss_make_ready(substream)) < 0) return err; runtime = substream->runtime; if (runtime->oss.params || runtime->oss.prepare) { memset(&info, 0, sizeof(info)); if (copy_to_user(_info, &info, sizeof(info))) return -EFAULT; return 0; } if (stream == SNDRV_PCM_STREAM_PLAYBACK) { err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DELAY, &delay); if (err == -EPIPE || err == -ESTRPIPE || (! err && delay < 0)) { err = 0; delay = 0; fixup = 0; } else { fixup = runtime->oss.buffer_used; } } else { err = snd_pcm_oss_capture_position_fixup(substream, &delay); fixup = -runtime->oss.buffer_used; } if (err < 0) return err; info.ptr = snd_pcm_oss_bytes(substream, runtime->status->hw_ptr % runtime->buffer_size); if (atomic_read(&substream->mmap_count)) { snd_pcm_sframes_t n; delay = get_hw_ptr_period(runtime); n = delay - runtime->oss.prev_hw_ptr_period; if (n < 0) n += runtime->boundary; info.blocks = n / runtime->period_size; runtime->oss.prev_hw_ptr_period = delay; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) snd_pcm_oss_simulate_fill(substream, delay); info.bytes = snd_pcm_oss_bytes(substream, runtime->status->hw_ptr) & INT_MAX; } else { delay = snd_pcm_oss_bytes(substream, delay); if (stream == SNDRV_PCM_STREAM_PLAYBACK) { if (substream->oss.setup.buggyptr) info.blocks = (runtime->oss.buffer_bytes - delay - fixup) / runtime->oss.period_bytes; else info.blocks = (delay + fixup) / runtime->oss.period_bytes; info.bytes = (runtime->oss.bytes - delay) & INT_MAX; } else { delay += fixup; info.blocks = delay / runtime->oss.period_bytes; info.bytes = (runtime->oss.bytes + delay) & INT_MAX; } } if (copy_to_user(_info, &info, sizeof(info))) return -EFAULT; return 0; } static int snd_pcm_oss_get_space(struct snd_pcm_oss_file *pcm_oss_file, int stream, struct audio_buf_info __user *_info) { struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; snd_pcm_sframes_t avail; int fixup; struct audio_buf_info info; int err; if (_info == NULL) return -EFAULT; substream = pcm_oss_file->streams[stream]; if (substream == NULL) return -EINVAL; runtime = substream->runtime; if (runtime->oss.params && (err = snd_pcm_oss_change_params(substream)) < 0) return err; info.fragsize = runtime->oss.period_bytes; info.fragstotal = runtime->periods; if (runtime->oss.prepare) { if (stream == SNDRV_PCM_STREAM_PLAYBACK) { info.bytes = runtime->oss.period_bytes * runtime->oss.periods; info.fragments = runtime->oss.periods; } else { info.bytes = 0; info.fragments = 0; } } else { if (stream == SNDRV_PCM_STREAM_PLAYBACK) { err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DELAY, &avail); if (err == -EPIPE || err == -ESTRPIPE || (! err && avail < 0)) { avail = runtime->buffer_size; err = 0; fixup = 0; } else { avail = runtime->buffer_size - avail; fixup = -runtime->oss.buffer_used; } } else { err = snd_pcm_oss_capture_position_fixup(substream, &avail); fixup = runtime->oss.buffer_used; } if (err < 0) return err; info.bytes = snd_pcm_oss_bytes(substream, avail) + fixup; info.fragments = info.bytes / runtime->oss.period_bytes; } #ifdef OSS_DEBUG printk(KERN_DEBUG "pcm_oss: space: bytes = %i, fragments = %i, " "fragstotal = %i, fragsize = %i\n", info.bytes, info.fragments, info.fragstotal, info.fragsize); #endif if (copy_to_user(_info, &info, sizeof(info))) return -EFAULT; return 0; } static int snd_pcm_oss_get_mapbuf(struct snd_pcm_oss_file *pcm_oss_file, int stream, struct buffmem_desc __user * _info) { // it won't be probably implemented // snd_printd("TODO: snd_pcm_oss_get_mapbuf\n"); return -EINVAL; } static const char *strip_task_path(const char *path) { const char *ptr, *ptrl = NULL; for (ptr = path; *ptr; ptr++) { if (*ptr == '/') ptrl = ptr + 1; } return ptrl; } static void snd_pcm_oss_look_for_setup(struct snd_pcm *pcm, int stream, const char *task_name, struct snd_pcm_oss_setup *rsetup) { struct snd_pcm_oss_setup *setup; mutex_lock(&pcm->streams[stream].oss.setup_mutex); do { for (setup = pcm->streams[stream].oss.setup_list; setup; setup = setup->next) { if (!strcmp(setup->task_name, task_name)) goto out; } } while ((task_name = strip_task_path(task_name)) != NULL); out: if (setup) *rsetup = *setup; mutex_unlock(&pcm->streams[stream].oss.setup_mutex); } static void snd_pcm_oss_release_substream(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; runtime = substream->runtime; vfree(runtime->oss.buffer); runtime->oss.buffer = NULL; #ifdef CONFIG_SND_PCM_OSS_PLUGINS snd_pcm_oss_plugin_clear(substream); #endif substream->oss.oss = 0; } static void snd_pcm_oss_init_substream(struct snd_pcm_substream *substream, struct snd_pcm_oss_setup *setup, int minor) { struct snd_pcm_runtime *runtime; substream->oss.oss = 1; substream->oss.setup = *setup; if (setup->nonblock) substream->f_flags |= O_NONBLOCK; else if (setup->block) substream->f_flags &= ~O_NONBLOCK; runtime = substream->runtime; runtime->oss.params = 1; runtime->oss.trigger = 1; runtime->oss.rate = 8000; mutex_init(&runtime->oss.params_lock); switch (SNDRV_MINOR_OSS_DEVICE(minor)) { case SNDRV_MINOR_OSS_PCM_8: runtime->oss.format = AFMT_U8; break; case SNDRV_MINOR_OSS_PCM_16: runtime->oss.format = AFMT_S16_LE; break; default: runtime->oss.format = AFMT_MU_LAW; } runtime->oss.channels = 1; runtime->oss.fragshift = 0; runtime->oss.maxfrags = 0; runtime->oss.subdivision = 0; substream->pcm_release = snd_pcm_oss_release_substream; } static int snd_pcm_oss_release_file(struct snd_pcm_oss_file *pcm_oss_file) { int cidx; if (!pcm_oss_file) return 0; for (cidx = 0; cidx < 2; ++cidx) { struct snd_pcm_substream *substream = pcm_oss_file->streams[cidx]; if (substream) snd_pcm_release_substream(substream); } kfree(pcm_oss_file); return 0; } static int snd_pcm_oss_open_file(struct file *file, struct snd_pcm *pcm, struct snd_pcm_oss_file **rpcm_oss_file, int minor, struct snd_pcm_oss_setup *setup) { int idx, err; struct snd_pcm_oss_file *pcm_oss_file; struct snd_pcm_substream *substream; fmode_t f_mode = file->f_mode; if (rpcm_oss_file) *rpcm_oss_file = NULL; pcm_oss_file = kzalloc(sizeof(*pcm_oss_file), GFP_KERNEL); if (pcm_oss_file == NULL) return -ENOMEM; if ((f_mode & (FMODE_WRITE|FMODE_READ)) == (FMODE_WRITE|FMODE_READ) && (pcm->info_flags & SNDRV_PCM_INFO_HALF_DUPLEX)) f_mode = FMODE_WRITE; file->f_flags &= ~O_APPEND; for (idx = 0; idx < 2; idx++) { if (setup[idx].disable) continue; if (! pcm->streams[idx].substream_count) continue; /* no matching substream */ if (idx == SNDRV_PCM_STREAM_PLAYBACK) { if (! (f_mode & FMODE_WRITE)) continue; } else { if (! (f_mode & FMODE_READ)) continue; } err = snd_pcm_open_substream(pcm, idx, file, &substream); if (err < 0) { snd_pcm_oss_release_file(pcm_oss_file); return err; } pcm_oss_file->streams[idx] = substream; substream->file = pcm_oss_file; snd_pcm_oss_init_substream(substream, &setup[idx], minor); } if (!pcm_oss_file->streams[0] && !pcm_oss_file->streams[1]) { snd_pcm_oss_release_file(pcm_oss_file); return -EINVAL; } file->private_data = pcm_oss_file; if (rpcm_oss_file) *rpcm_oss_file = pcm_oss_file; return 0; } static int snd_task_name(struct task_struct *task, char *name, size_t size) { unsigned int idx; if (snd_BUG_ON(!task || !name || size < 2)) return -EINVAL; for (idx = 0; idx < sizeof(task->comm) && idx + 1 < size; idx++) name[idx] = task->comm[idx]; name[idx] = '\0'; return 0; } static int snd_pcm_oss_open(struct inode *inode, struct file *file) { int err; char task_name[32]; struct snd_pcm *pcm; struct snd_pcm_oss_file *pcm_oss_file; struct snd_pcm_oss_setup setup[2]; int nonblock; wait_queue_t wait; err = nonseekable_open(inode, file); if (err < 0) return err; pcm = snd_lookup_oss_minor_data(iminor(inode), SNDRV_OSS_DEVICE_TYPE_PCM); if (pcm == NULL) { err = -ENODEV; goto __error1; } err = snd_card_file_add(pcm->card, file); if (err < 0) goto __error1; if (!try_module_get(pcm->card->module)) { err = -EFAULT; goto __error2; } if (snd_task_name(current, task_name, sizeof(task_name)) < 0) { err = -EFAULT; goto __error; } memset(setup, 0, sizeof(setup)); if (file->f_mode & FMODE_WRITE) snd_pcm_oss_look_for_setup(pcm, SNDRV_PCM_STREAM_PLAYBACK, task_name, &setup[0]); if (file->f_mode & FMODE_READ) snd_pcm_oss_look_for_setup(pcm, SNDRV_PCM_STREAM_CAPTURE, task_name, &setup[1]); nonblock = !!(file->f_flags & O_NONBLOCK); if (!nonblock) nonblock = nonblock_open; init_waitqueue_entry(&wait, current); add_wait_queue(&pcm->open_wait, &wait); mutex_lock(&pcm->open_mutex); while (1) { err = snd_pcm_oss_open_file(file, pcm, &pcm_oss_file, iminor(inode), setup); if (err >= 0) break; if (err == -EAGAIN) { if (nonblock) { err = -EBUSY; break; } } else break; set_current_state(TASK_INTERRUPTIBLE); mutex_unlock(&pcm->open_mutex); schedule(); mutex_lock(&pcm->open_mutex); if (pcm->card->shutdown) { err = -ENODEV; break; } if (signal_pending(current)) { err = -ERESTARTSYS; break; } } remove_wait_queue(&pcm->open_wait, &wait); mutex_unlock(&pcm->open_mutex); if (err < 0) goto __error; snd_card_unref(pcm->card); return err; __error: module_put(pcm->card->module); __error2: snd_card_file_remove(pcm->card, file); __error1: if (pcm) snd_card_unref(pcm->card); return err; } static int snd_pcm_oss_release(struct inode *inode, struct file *file) { struct snd_pcm *pcm; struct snd_pcm_substream *substream; struct snd_pcm_oss_file *pcm_oss_file; pcm_oss_file = file->private_data; substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; if (substream == NULL) substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; if (snd_BUG_ON(!substream)) return -ENXIO; pcm = substream->pcm; if (!pcm->card->shutdown) snd_pcm_oss_sync(pcm_oss_file); mutex_lock(&pcm->open_mutex); snd_pcm_oss_release_file(pcm_oss_file); mutex_unlock(&pcm->open_mutex); wake_up(&pcm->open_wait); module_put(pcm->card->module); snd_card_file_remove(pcm->card, file); return 0; } static long snd_pcm_oss_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_pcm_oss_file *pcm_oss_file; int __user *p = (int __user *)arg; int res; pcm_oss_file = file->private_data; if (cmd == OSS_GETVERSION) return put_user(SNDRV_OSS_VERSION, p); if (cmd == OSS_ALSAEMULVER) return put_user(1, p); #if defined(CONFIG_SND_MIXER_OSS) || (defined(MODULE) && defined(CONFIG_SND_MIXER_OSS_MODULE)) if (((cmd >> 8) & 0xff) == 'M') { /* mixer ioctl - for OSS compatibility */ struct snd_pcm_substream *substream; int idx; for (idx = 0; idx < 2; ++idx) { substream = pcm_oss_file->streams[idx]; if (substream != NULL) break; } if (snd_BUG_ON(idx >= 2)) return -ENXIO; return snd_mixer_oss_ioctl_card(substream->pcm->card, cmd, arg); } #endif if (((cmd >> 8) & 0xff) != 'P') return -EINVAL; #ifdef OSS_DEBUG printk(KERN_DEBUG "pcm_oss: ioctl = 0x%x\n", cmd); #endif switch (cmd) { case SNDCTL_DSP_RESET: return snd_pcm_oss_reset(pcm_oss_file); case SNDCTL_DSP_SYNC: return snd_pcm_oss_sync(pcm_oss_file); case SNDCTL_DSP_SPEED: if (get_user(res, p)) return -EFAULT; if ((res = snd_pcm_oss_set_rate(pcm_oss_file, res))<0) return res; return put_user(res, p); case SOUND_PCM_READ_RATE: res = snd_pcm_oss_get_rate(pcm_oss_file); if (res < 0) return res; return put_user(res, p); case SNDCTL_DSP_STEREO: if (get_user(res, p)) return -EFAULT; res = res > 0 ? 2 : 1; if ((res = snd_pcm_oss_set_channels(pcm_oss_file, res)) < 0) return res; return put_user(--res, p); case SNDCTL_DSP_GETBLKSIZE: res = snd_pcm_oss_get_block_size(pcm_oss_file); if (res < 0) return res; return put_user(res, p); case SNDCTL_DSP_SETFMT: if (get_user(res, p)) return -EFAULT; res = snd_pcm_oss_set_format(pcm_oss_file, res); if (res < 0) return res; return put_user(res, p); case SOUND_PCM_READ_BITS: res = snd_pcm_oss_get_format(pcm_oss_file); if (res < 0) return res; return put_user(res, p); case SNDCTL_DSP_CHANNELS: if (get_user(res, p)) return -EFAULT; res = snd_pcm_oss_set_channels(pcm_oss_file, res); if (res < 0) return res; return put_user(res, p); case SOUND_PCM_READ_CHANNELS: res = snd_pcm_oss_get_channels(pcm_oss_file); if (res < 0) return res; return put_user(res, p); case SOUND_PCM_WRITE_FILTER: case SOUND_PCM_READ_FILTER: return -EIO; case SNDCTL_DSP_POST: return snd_pcm_oss_post(pcm_oss_file); case SNDCTL_DSP_SUBDIVIDE: if (get_user(res, p)) return -EFAULT; res = snd_pcm_oss_set_subdivide(pcm_oss_file, res); if (res < 0) return res; return put_user(res, p); case SNDCTL_DSP_SETFRAGMENT: if (get_user(res, p)) return -EFAULT; return snd_pcm_oss_set_fragment(pcm_oss_file, res); case SNDCTL_DSP_GETFMTS: res = snd_pcm_oss_get_formats(pcm_oss_file); if (res < 0) return res; return put_user(res, p); case SNDCTL_DSP_GETOSPACE: case SNDCTL_DSP_GETISPACE: return snd_pcm_oss_get_space(pcm_oss_file, cmd == SNDCTL_DSP_GETISPACE ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK, (struct audio_buf_info __user *) arg); case SNDCTL_DSP_NONBLOCK: return snd_pcm_oss_nonblock(file); case SNDCTL_DSP_GETCAPS: res = snd_pcm_oss_get_caps(pcm_oss_file); if (res < 0) return res; return put_user(res, p); case SNDCTL_DSP_GETTRIGGER: res = snd_pcm_oss_get_trigger(pcm_oss_file); if (res < 0) return res; return put_user(res, p); case SNDCTL_DSP_SETTRIGGER: if (get_user(res, p)) return -EFAULT; return snd_pcm_oss_set_trigger(pcm_oss_file, res); case SNDCTL_DSP_GETIPTR: case SNDCTL_DSP_GETOPTR: return snd_pcm_oss_get_ptr(pcm_oss_file, cmd == SNDCTL_DSP_GETIPTR ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK, (struct count_info __user *) arg); case SNDCTL_DSP_MAPINBUF: case SNDCTL_DSP_MAPOUTBUF: return snd_pcm_oss_get_mapbuf(pcm_oss_file, cmd == SNDCTL_DSP_MAPINBUF ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK, (struct buffmem_desc __user *) arg); case SNDCTL_DSP_SETSYNCRO: /* stop DMA now.. */ return 0; case SNDCTL_DSP_SETDUPLEX: if (snd_pcm_oss_get_caps(pcm_oss_file) & DSP_CAP_DUPLEX) return 0; return -EIO; case SNDCTL_DSP_GETODELAY: res = snd_pcm_oss_get_odelay(pcm_oss_file); if (res < 0) { /* it's for sure, some broken apps don't check for error codes */ put_user(0, p); return res; } return put_user(res, p); case SNDCTL_DSP_PROFILE: return 0; /* silently ignore */ default: snd_printd("pcm_oss: unknown command = 0x%x\n", cmd); } return -EINVAL; } #ifdef CONFIG_COMPAT /* all compatible */ #define snd_pcm_oss_ioctl_compat snd_pcm_oss_ioctl #else #define snd_pcm_oss_ioctl_compat NULL #endif static ssize_t snd_pcm_oss_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { struct snd_pcm_oss_file *pcm_oss_file; struct snd_pcm_substream *substream; pcm_oss_file = file->private_data; substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; if (substream == NULL) return -ENXIO; substream->f_flags = file->f_flags & O_NONBLOCK; #ifndef OSS_DEBUG return snd_pcm_oss_read1(substream, buf, count); #else { ssize_t res = snd_pcm_oss_read1(substream, buf, count); printk(KERN_DEBUG "pcm_oss: read %li bytes " "(returned %li bytes)\n", (long)count, (long)res); return res; } #endif } static ssize_t snd_pcm_oss_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { struct snd_pcm_oss_file *pcm_oss_file; struct snd_pcm_substream *substream; long result; pcm_oss_file = file->private_data; substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; if (substream == NULL) return -ENXIO; substream->f_flags = file->f_flags & O_NONBLOCK; result = snd_pcm_oss_write1(substream, buf, count); #ifdef OSS_DEBUG printk(KERN_DEBUG "pcm_oss: write %li bytes (wrote %li bytes)\n", (long)count, (long)result); #endif return result; } static int snd_pcm_oss_playback_ready(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; if (atomic_read(&substream->mmap_count)) return runtime->oss.prev_hw_ptr_period != get_hw_ptr_period(runtime); else return snd_pcm_playback_avail(runtime) >= runtime->oss.period_frames; } static int snd_pcm_oss_capture_ready(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; if (atomic_read(&substream->mmap_count)) return runtime->oss.prev_hw_ptr_period != get_hw_ptr_period(runtime); else return snd_pcm_capture_avail(runtime) >= runtime->oss.period_frames; } static unsigned int snd_pcm_oss_poll(struct file *file, poll_table * wait) { struct snd_pcm_oss_file *pcm_oss_file; unsigned int mask; struct snd_pcm_substream *psubstream = NULL, *csubstream = NULL; pcm_oss_file = file->private_data; psubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; csubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; mask = 0; if (psubstream != NULL) { struct snd_pcm_runtime *runtime = psubstream->runtime; poll_wait(file, &runtime->sleep, wait); snd_pcm_stream_lock_irq(psubstream); if (runtime->status->state != SNDRV_PCM_STATE_DRAINING && (runtime->status->state != SNDRV_PCM_STATE_RUNNING || snd_pcm_oss_playback_ready(psubstream))) mask |= POLLOUT | POLLWRNORM; snd_pcm_stream_unlock_irq(psubstream); } if (csubstream != NULL) { struct snd_pcm_runtime *runtime = csubstream->runtime; snd_pcm_state_t ostate; poll_wait(file, &runtime->sleep, wait); snd_pcm_stream_lock_irq(csubstream); if ((ostate = runtime->status->state) != SNDRV_PCM_STATE_RUNNING || snd_pcm_oss_capture_ready(csubstream)) mask |= POLLIN | POLLRDNORM; snd_pcm_stream_unlock_irq(csubstream); if (ostate != SNDRV_PCM_STATE_RUNNING && runtime->oss.trigger) { struct snd_pcm_oss_file ofile; memset(&ofile, 0, sizeof(ofile)); ofile.streams[SNDRV_PCM_STREAM_CAPTURE] = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; runtime->oss.trigger = 0; snd_pcm_oss_set_trigger(&ofile, PCM_ENABLE_INPUT); } } return mask; } static int snd_pcm_oss_mmap(struct file *file, struct vm_area_struct *area) { struct snd_pcm_oss_file *pcm_oss_file; struct snd_pcm_substream *substream = NULL; struct snd_pcm_runtime *runtime; int err; #ifdef OSS_DEBUG printk(KERN_DEBUG "pcm_oss: mmap begin\n"); #endif pcm_oss_file = file->private_data; switch ((area->vm_flags & (VM_READ | VM_WRITE))) { case VM_READ | VM_WRITE: substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; if (substream) break; /* Fall through */ case VM_READ: substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; break; case VM_WRITE: substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; break; default: return -EINVAL; } /* set VM_READ access as well to fix memset() routines that do reads before writes (to improve performance) */ area->vm_flags |= VM_READ; if (substream == NULL) return -ENXIO; runtime = substream->runtime; if (!(runtime->info & SNDRV_PCM_INFO_MMAP_VALID)) return -EIO; if (runtime->info & SNDRV_PCM_INFO_INTERLEAVED) runtime->access = SNDRV_PCM_ACCESS_MMAP_INTERLEAVED; else return -EIO; if (runtime->oss.params) { if ((err = snd_pcm_oss_change_params(substream)) < 0) return err; } #ifdef CONFIG_SND_PCM_OSS_PLUGINS if (runtime->oss.plugin_first != NULL) return -EIO; #endif if (area->vm_pgoff != 0) return -EINVAL; err = snd_pcm_mmap_data(substream, file, area); if (err < 0) return err; runtime->oss.mmap_bytes = area->vm_end - area->vm_start; runtime->silence_threshold = 0; runtime->silence_size = 0; #ifdef OSS_DEBUG printk(KERN_DEBUG "pcm_oss: mmap ok, bytes = 0x%x\n", runtime->oss.mmap_bytes); #endif /* In mmap mode we never stop */ runtime->stop_threshold = runtime->boundary; return 0; } #ifdef CONFIG_SND_VERBOSE_PROCFS /* * /proc interface */ static void snd_pcm_oss_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_pcm_str *pstr = entry->private_data; struct snd_pcm_oss_setup *setup = pstr->oss.setup_list; mutex_lock(&pstr->oss.setup_mutex); while (setup) { snd_iprintf(buffer, "%s %u %u%s%s%s%s%s%s\n", setup->task_name, setup->periods, setup->period_size, setup->disable ? " disable" : "", setup->direct ? " direct" : "", setup->block ? " block" : "", setup->nonblock ? " non-block" : "", setup->partialfrag ? " partial-frag" : "", setup->nosilence ? " no-silence" : ""); setup = setup->next; } mutex_unlock(&pstr->oss.setup_mutex); } static void snd_pcm_oss_proc_free_setup_list(struct snd_pcm_str * pstr) { struct snd_pcm_oss_setup *setup, *setupn; for (setup = pstr->oss.setup_list, pstr->oss.setup_list = NULL; setup; setup = setupn) { setupn = setup->next; kfree(setup->task_name); kfree(setup); } pstr->oss.setup_list = NULL; } static void snd_pcm_oss_proc_write(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_pcm_str *pstr = entry->private_data; char line[128], str[32], task_name[32]; const char *ptr; int idx1; struct snd_pcm_oss_setup *setup, *setup1, template; while (!snd_info_get_line(buffer, line, sizeof(line))) { mutex_lock(&pstr->oss.setup_mutex); memset(&template, 0, sizeof(template)); ptr = snd_info_get_str(task_name, line, sizeof(task_name)); if (!strcmp(task_name, "clear") || !strcmp(task_name, "erase")) { snd_pcm_oss_proc_free_setup_list(pstr); mutex_unlock(&pstr->oss.setup_mutex); continue; } for (setup = pstr->oss.setup_list; setup; setup = setup->next) { if (!strcmp(setup->task_name, task_name)) { template = *setup; break; } } ptr = snd_info_get_str(str, ptr, sizeof(str)); template.periods = simple_strtoul(str, NULL, 10); ptr = snd_info_get_str(str, ptr, sizeof(str)); template.period_size = simple_strtoul(str, NULL, 10); for (idx1 = 31; idx1 >= 0; idx1--) if (template.period_size & (1 << idx1)) break; for (idx1--; idx1 >= 0; idx1--) template.period_size &= ~(1 << idx1); do { ptr = snd_info_get_str(str, ptr, sizeof(str)); if (!strcmp(str, "disable")) { template.disable = 1; } else if (!strcmp(str, "direct")) { template.direct = 1; } else if (!strcmp(str, "block")) { template.block = 1; } else if (!strcmp(str, "non-block")) { template.nonblock = 1; } else if (!strcmp(str, "partial-frag")) { template.partialfrag = 1; } else if (!strcmp(str, "no-silence")) { template.nosilence = 1; } else if (!strcmp(str, "buggy-ptr")) { template.buggyptr = 1; } } while (*str); if (setup == NULL) { setup = kmalloc(sizeof(*setup), GFP_KERNEL); if (! setup) { buffer->error = -ENOMEM; mutex_unlock(&pstr->oss.setup_mutex); return; } if (pstr->oss.setup_list == NULL) pstr->oss.setup_list = setup; else { for (setup1 = pstr->oss.setup_list; setup1->next; setup1 = setup1->next); setup1->next = setup; } template.task_name = kstrdup(task_name, GFP_KERNEL); if (! template.task_name) { kfree(setup); buffer->error = -ENOMEM; mutex_unlock(&pstr->oss.setup_mutex); return; } } *setup = template; mutex_unlock(&pstr->oss.setup_mutex); } } static void snd_pcm_oss_proc_init(struct snd_pcm *pcm) { int stream; for (stream = 0; stream < 2; ++stream) { struct snd_info_entry *entry; struct snd_pcm_str *pstr = &pcm->streams[stream]; if (pstr->substream_count == 0) continue; if ((entry = snd_info_create_card_entry(pcm->card, "oss", pstr->proc_root)) != NULL) { entry->content = SNDRV_INFO_CONTENT_TEXT; entry->mode = S_IFREG | S_IRUGO | S_IWUSR; entry->c.text.read = snd_pcm_oss_proc_read; entry->c.text.write = snd_pcm_oss_proc_write; entry->private_data = pstr; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } } pstr->oss.proc_entry = entry; } } static void snd_pcm_oss_proc_done(struct snd_pcm *pcm) { int stream; for (stream = 0; stream < 2; ++stream) { struct snd_pcm_str *pstr = &pcm->streams[stream]; snd_info_free_entry(pstr->oss.proc_entry); pstr->oss.proc_entry = NULL; snd_pcm_oss_proc_free_setup_list(pstr); } } #else /* !CONFIG_SND_VERBOSE_PROCFS */ #define snd_pcm_oss_proc_init(pcm) #define snd_pcm_oss_proc_done(pcm) #endif /* CONFIG_SND_VERBOSE_PROCFS */ /* * ENTRY functions */ static const struct file_operations snd_pcm_oss_f_reg = { .owner = THIS_MODULE, .read = snd_pcm_oss_read, .write = snd_pcm_oss_write, .open = snd_pcm_oss_open, .release = snd_pcm_oss_release, .llseek = no_llseek, .poll = snd_pcm_oss_poll, .unlocked_ioctl = snd_pcm_oss_ioctl, .compat_ioctl = snd_pcm_oss_ioctl_compat, .mmap = snd_pcm_oss_mmap, }; static void register_oss_dsp(struct snd_pcm *pcm, int index) { char name[128]; sprintf(name, "dsp%i%i", pcm->card->number, pcm->device); if (snd_register_oss_device(SNDRV_OSS_DEVICE_TYPE_PCM, pcm->card, index, &snd_pcm_oss_f_reg, pcm, name) < 0) { snd_printk(KERN_ERR "unable to register OSS PCM device %i:%i\n", pcm->card->number, pcm->device); } } static int snd_pcm_oss_register_minor(struct snd_pcm *pcm) { pcm->oss.reg = 0; if (dsp_map[pcm->card->number] == (int)pcm->device) { char name[128]; int duplex; register_oss_dsp(pcm, 0); duplex = (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream_count > 0 && pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream_count && !(pcm->info_flags & SNDRV_PCM_INFO_HALF_DUPLEX)); sprintf(name, "%s%s", pcm->name, duplex ? " (DUPLEX)" : ""); #ifdef SNDRV_OSS_INFO_DEV_AUDIO snd_oss_info_register(SNDRV_OSS_INFO_DEV_AUDIO, pcm->card->number, name); #endif pcm->oss.reg++; pcm->oss.reg_mask |= 1; } if (adsp_map[pcm->card->number] == (int)pcm->device) { register_oss_dsp(pcm, 1); pcm->oss.reg++; pcm->oss.reg_mask |= 2; } if (pcm->oss.reg) snd_pcm_oss_proc_init(pcm); return 0; } static int snd_pcm_oss_disconnect_minor(struct snd_pcm *pcm) { if (pcm->oss.reg) { if (pcm->oss.reg_mask & 1) { pcm->oss.reg_mask &= ~1; snd_unregister_oss_device(SNDRV_OSS_DEVICE_TYPE_PCM, pcm->card, 0); } if (pcm->oss.reg_mask & 2) { pcm->oss.reg_mask &= ~2; snd_unregister_oss_device(SNDRV_OSS_DEVICE_TYPE_PCM, pcm->card, 1); } if (dsp_map[pcm->card->number] == (int)pcm->device) { #ifdef SNDRV_OSS_INFO_DEV_AUDIO snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_AUDIO, pcm->card->number); #endif } pcm->oss.reg = 0; } return 0; } static int snd_pcm_oss_unregister_minor(struct snd_pcm *pcm) { snd_pcm_oss_disconnect_minor(pcm); snd_pcm_oss_proc_done(pcm); return 0; } static struct snd_pcm_notify snd_pcm_oss_notify = { .n_register = snd_pcm_oss_register_minor, .n_disconnect = snd_pcm_oss_disconnect_minor, .n_unregister = snd_pcm_oss_unregister_minor, }; static int __init alsa_pcm_oss_init(void) { int i; int err; /* check device map table */ for (i = 0; i < SNDRV_CARDS; i++) { if (dsp_map[i] < 0 || dsp_map[i] >= SNDRV_PCM_DEVICES) { snd_printk(KERN_ERR "invalid dsp_map[%d] = %d\n", i, dsp_map[i]); dsp_map[i] = 0; } if (adsp_map[i] < 0 || adsp_map[i] >= SNDRV_PCM_DEVICES) { snd_printk(KERN_ERR "invalid adsp_map[%d] = %d\n", i, adsp_map[i]); adsp_map[i] = 1; } } if ((err = snd_pcm_notify(&snd_pcm_oss_notify, 0)) < 0) return err; return 0; } static void __exit alsa_pcm_oss_exit(void) { snd_pcm_notify(&snd_pcm_oss_notify, 1); } module_init(alsa_pcm_oss_init) module_exit(alsa_pcm_oss_exit)
gpl-2.0
lostemp/port_linux-2.6.30.4
arch/ia64/xen/hypervisor.c
4540
2837
/****************************************************************************** * arch/ia64/xen/hypervisor.c * * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/efi.h> #include <asm/xen/hypervisor.h> #include <asm/xen/privop.h> #include "irq_xen.h" struct shared_info *HYPERVISOR_shared_info __read_mostly = (struct shared_info *)XSI_BASE; EXPORT_SYMBOL(HYPERVISOR_shared_info); DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); struct start_info *xen_start_info; EXPORT_SYMBOL(xen_start_info); EXPORT_SYMBOL(xen_domain_type); EXPORT_SYMBOL(__hypercall); /* Stolen from arch/x86/xen/enlighten.c */ /* * Flag to determine whether vcpu info placement is available on all * VCPUs. We assume it is to start with, and then set it to zero on * the first failure. This is because it can succeed on some VCPUs * and not others, since it can involve hypervisor memory allocation, * or because the guest failed to guarantee all the appropriate * constraints on all VCPUs (ie buffer can't cross a page boundary). * * Note that any particular CPU may be using a placed vcpu structure, * but we can only optimise if the all are. * * 0: not available, 1: available */ static void __init xen_vcpu_setup(int cpu) { /* * WARNING: * before changing MAX_VIRT_CPUS, * check that shared_info fits on a page */ BUILD_BUG_ON(sizeof(struct shared_info) > PAGE_SIZE); per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; } void __init xen_setup_vcpu_info_placement(void) { int cpu; for_each_possible_cpu(cpu) xen_vcpu_setup(cpu); } void __cpuinit xen_cpu_init(void) { xen_smp_intr_init(); } /************************************************************************** * opt feature */ void xen_ia64_enable_opt_feature(void) { /* Enable region 7 identity map optimizations in Xen */ struct xen_ia64_opt_feature optf; optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG7; optf.on = XEN_IA64_OPTF_ON; optf.pgprot = pgprot_val(PAGE_KERNEL); optf.key = 0; /* No key on linux. */ HYPERVISOR_opt_feature(&optf); }
gpl-2.0
Solitarily/jolla-kernel-Stock_KK
arch/powerpc/platforms/pseries/msi.c
4540
10862
/* * Copyright 2006 Jake Moilanen <moilanen@austin.ibm.com>, IBM Corp. * Copyright 2006-2007 Michael Ellerman, IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 of the * License. * */ #include <linux/device.h> #include <linux/irq.h> #include <linux/msi.h> #include <asm/rtas.h> #include <asm/hw_irq.h> #include <asm/ppc-pci.h> static int query_token, change_token; #define RTAS_QUERY_FN 0 #define RTAS_CHANGE_FN 1 #define RTAS_RESET_FN 2 #define RTAS_CHANGE_MSI_FN 3 #define RTAS_CHANGE_MSIX_FN 4 static struct pci_dn *get_pdn(struct pci_dev *pdev) { struct device_node *dn; struct pci_dn *pdn; dn = pci_device_to_OF_node(pdev); if (!dn) { dev_dbg(&pdev->dev, "rtas_msi: No OF device node\n"); return NULL; } pdn = PCI_DN(dn); if (!pdn) { dev_dbg(&pdev->dev, "rtas_msi: No PCI DN\n"); return NULL; } return pdn; } /* RTAS Helpers */ static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs) { u32 addr, seq_num, rtas_ret[3]; unsigned long buid; int rc; addr = rtas_config_addr(pdn->busno, pdn->devfn, 0); buid = pdn->phb->buid; seq_num = 1; do { if (func == RTAS_CHANGE_MSI_FN || func == RTAS_CHANGE_MSIX_FN) rc = rtas_call(change_token, 6, 4, rtas_ret, addr, BUID_HI(buid), BUID_LO(buid), func, num_irqs, seq_num); else rc = rtas_call(change_token, 6, 3, rtas_ret, addr, BUID_HI(buid), BUID_LO(buid), func, num_irqs, seq_num); seq_num = rtas_ret[1]; } while (rtas_busy_delay(rc)); /* * If the RTAS call succeeded, return the number of irqs allocated. * If not, make sure we return a negative error code. */ if (rc == 0) rc = rtas_ret[0]; else if (rc > 0) rc = -rc; pr_debug("rtas_msi: ibm,change_msi(func=%d,num=%d), got %d rc = %d\n", func, num_irqs, rtas_ret[0], rc); return rc; } static void rtas_disable_msi(struct pci_dev *pdev) { struct pci_dn *pdn; pdn = get_pdn(pdev); if (!pdn) return; /* * disabling MSI with the explicit interface also disables MSI-X */ if (rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, 0) != 0) { /* * may have failed because explicit interface is not * present */ if (rtas_change_msi(pdn, RTAS_CHANGE_FN, 0) != 0) { pr_debug("rtas_msi: Setting MSIs to 0 failed!\n"); } } } static int rtas_query_irq_number(struct pci_dn *pdn, int offset) { u32 addr, rtas_ret[2]; unsigned long buid; int rc; addr = rtas_config_addr(pdn->busno, pdn->devfn, 0); buid = pdn->phb->buid; do { rc = rtas_call(query_token, 4, 3, rtas_ret, addr, BUID_HI(buid), BUID_LO(buid), offset); } while (rtas_busy_delay(rc)); if (rc) { pr_debug("rtas_msi: error (%d) querying source number\n", rc); return rc; } return rtas_ret[0]; } static void rtas_teardown_msi_irqs(struct pci_dev *pdev) { struct msi_desc *entry; list_for_each_entry(entry, &pdev->msi_list, list) { if (entry->irq == NO_IRQ) continue; irq_set_msi_desc(entry->irq, NULL); irq_dispose_mapping(entry->irq); } rtas_disable_msi(pdev); } static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) { struct device_node *dn; struct pci_dn *pdn; const u32 *req_msi; pdn = get_pdn(pdev); if (!pdn) return -ENODEV; dn = pdn->node; req_msi = of_get_property(dn, prop_name, NULL); if (!req_msi) { pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); return -ENOENT; } if (*req_msi < nvec) { pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); if (*req_msi == 0) /* Be paranoid */ return -ENOSPC; return *req_msi; } return 0; } static int check_req_msi(struct pci_dev *pdev, int nvec) { return check_req(pdev, nvec, "ibm,req#msi"); } static int check_req_msix(struct pci_dev *pdev, int nvec) { return check_req(pdev, nvec, "ibm,req#msi-x"); } /* Quota calculation */ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) { struct device_node *dn; const u32 *p; dn = of_node_get(pci_device_to_OF_node(dev)); while (dn) { p = of_get_property(dn, "ibm,pe-total-#msi", NULL); if (p) { pr_debug("rtas_msi: found prop on dn %s\n", dn->full_name); *total = *p; return dn; } dn = of_get_next_parent(dn); } return NULL; } static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) { struct device_node *dn; /* Found our PE and assume 8 at that point. */ dn = pci_device_to_OF_node(dev); if (!dn) return NULL; dn = eeh_find_device_pe(dn); if (!dn) return NULL; /* We actually want the parent */ dn = of_get_parent(dn); if (!dn) return NULL; /* Hardcode of 8 for old firmwares */ *total = 8; pr_debug("rtas_msi: using PE dn %s\n", dn->full_name); return dn; } struct msi_counts { struct device_node *requestor; int num_devices; int request; int quota; int spare; int over_quota; }; static void *count_non_bridge_devices(struct device_node *dn, void *data) { struct msi_counts *counts = data; const u32 *p; u32 class; pr_debug("rtas_msi: counting %s\n", dn->full_name); p = of_get_property(dn, "class-code", NULL); class = p ? *p : 0; if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) counts->num_devices++; return NULL; } static void *count_spare_msis(struct device_node *dn, void *data) { struct msi_counts *counts = data; const u32 *p; int req; if (dn == counts->requestor) req = counts->request; else { /* We don't know if a driver will try to use MSI or MSI-X, * so we just have to punt and use the larger of the two. */ req = 0; p = of_get_property(dn, "ibm,req#msi", NULL); if (p) req = *p; p = of_get_property(dn, "ibm,req#msi-x", NULL); if (p) req = max(req, (int)*p); } if (req < counts->quota) counts->spare += counts->quota - req; else if (req > counts->quota) counts->over_quota++; return NULL; } static int msi_quota_for_device(struct pci_dev *dev, int request) { struct device_node *pe_dn; struct msi_counts counts; int total; pr_debug("rtas_msi: calc quota for %s, request %d\n", pci_name(dev), request); pe_dn = find_pe_total_msi(dev, &total); if (!pe_dn) pe_dn = find_pe_dn(dev, &total); if (!pe_dn) { pr_err("rtas_msi: couldn't find PE for %s\n", pci_name(dev)); goto out; } pr_debug("rtas_msi: found PE %s\n", pe_dn->full_name); memset(&counts, 0, sizeof(struct msi_counts)); /* Work out how many devices we have below this PE */ traverse_pci_devices(pe_dn, count_non_bridge_devices, &counts); if (counts.num_devices == 0) { pr_err("rtas_msi: found 0 devices under PE for %s\n", pci_name(dev)); goto out; } counts.quota = total / counts.num_devices; if (request <= counts.quota) goto out; /* else, we have some more calculating to do */ counts.requestor = pci_device_to_OF_node(dev); counts.request = request; traverse_pci_devices(pe_dn, count_spare_msis, &counts); /* If the quota isn't an integer multiple of the total, we can * use the remainder as spare MSIs for anyone that wants them. */ counts.spare += total % counts.num_devices; /* Divide any spare by the number of over-quota requestors */ if (counts.over_quota) counts.quota += counts.spare / counts.over_quota; /* And finally clamp the request to the possibly adjusted quota */ request = min(counts.quota, request); pr_debug("rtas_msi: request clamped to quota %d\n", request); out: of_node_put(pe_dn); return request; } static int rtas_msi_check_device(struct pci_dev *pdev, int nvec, int type) { int quota, rc; if (type == PCI_CAP_ID_MSIX) rc = check_req_msix(pdev, nvec); else rc = check_req_msi(pdev, nvec); if (rc) return rc; quota = msi_quota_for_device(pdev, nvec); if (quota && quota < nvec) return quota; return 0; } static int check_msix_entries(struct pci_dev *pdev) { struct msi_desc *entry; int expected; /* There's no way for us to express to firmware that we want * a discontiguous, or non-zero based, range of MSI-X entries. * So we must reject such requests. */ expected = 0; list_for_each_entry(entry, &pdev->msi_list, list) { if (entry->msi_attrib.entry_nr != expected) { pr_debug("rtas_msi: bad MSI-X entries.\n"); return -EINVAL; } expected++; } return 0; } static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) { struct pci_dn *pdn; int hwirq, virq, i, rc; struct msi_desc *entry; struct msi_msg msg; pdn = get_pdn(pdev); if (!pdn) return -ENODEV; if (type == PCI_CAP_ID_MSIX && check_msix_entries(pdev)) return -EINVAL; /* * Try the new more explicit firmware interface, if that fails fall * back to the old interface. The old interface is known to never * return MSI-Xs. */ if (type == PCI_CAP_ID_MSI) { rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec); if (rc < 0) { pr_debug("rtas_msi: trying the old firmware call.\n"); rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec); } } else rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec); if (rc != nvec) { pr_debug("rtas_msi: rtas_change_msi() failed\n"); return rc; } i = 0; list_for_each_entry(entry, &pdev->msi_list, list) { hwirq = rtas_query_irq_number(pdn, i++); if (hwirq < 0) { pr_debug("rtas_msi: error (%d) getting hwirq\n", rc); return hwirq; } virq = irq_create_mapping(NULL, hwirq); if (virq == NO_IRQ) { pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq); return -ENOSPC; } dev_dbg(&pdev->dev, "rtas_msi: allocated virq %d\n", virq); irq_set_msi_desc(virq, entry); /* Read config space back so we can restore after reset */ read_msi_msg(virq, &msg); entry->msg = msg; } return 0; } static void rtas_msi_pci_irq_fixup(struct pci_dev *pdev) { /* No LSI -> leave MSIs (if any) configured */ if (pdev->irq == NO_IRQ) { dev_dbg(&pdev->dev, "rtas_msi: no LSI, nothing to do.\n"); return; } /* No MSI -> MSIs can't have been assigned by fw, leave LSI */ if (check_req_msi(pdev, 1) && check_req_msix(pdev, 1)) { dev_dbg(&pdev->dev, "rtas_msi: no req#msi/x, nothing to do.\n"); return; } dev_dbg(&pdev->dev, "rtas_msi: disabling existing MSI.\n"); rtas_disable_msi(pdev); } static int rtas_msi_init(void) { query_token = rtas_token("ibm,query-interrupt-source-number"); change_token = rtas_token("ibm,change-msi"); if ((query_token == RTAS_UNKNOWN_SERVICE) || (change_token == RTAS_UNKNOWN_SERVICE)) { pr_debug("rtas_msi: no RTAS tokens, no MSI support.\n"); return -1; } pr_debug("rtas_msi: Registering RTAS MSI callbacks.\n"); WARN_ON(ppc_md.setup_msi_irqs); ppc_md.setup_msi_irqs = rtas_setup_msi_irqs; ppc_md.teardown_msi_irqs = rtas_teardown_msi_irqs; ppc_md.msi_check_device = rtas_msi_check_device; WARN_ON(ppc_md.pci_irq_fixup); ppc_md.pci_irq_fixup = rtas_msi_pci_irq_fixup; return 0; } arch_initcall(rtas_msi_init);
gpl-2.0
meefik/tinykernel-flo
drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
5052
3846
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../base.h" #include "../pci.h" #include "reg.h" #include "def.h" #include "phy.h" #include "dm.h" #include "../rtl8192c/fw_common.h" void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); long undecorated_smoothed_pwdb; if (!rtlpriv->dm.dynamic_txpower_enable) return; if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; return; } if ((mac->link_state < MAC80211_LINKED) && (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) { RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, "Not connected to any\n"); rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; return; } if (mac->link_state >= MAC80211_LINKED) { if (mac->opmode == NL80211_IFTYPE_ADHOC) { undecorated_smoothed_pwdb = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "AP Client PWDB = 0x%lx\n", undecorated_smoothed_pwdb); } else { undecorated_smoothed_pwdb = rtlpriv->dm.undecorated_smoothed_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "STA Default Port PWDB = 0x%lx\n", undecorated_smoothed_pwdb); } } else { undecorated_smoothed_pwdb = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "AP Ext Port PWDB = 0x%lx\n", undecorated_smoothed_pwdb); } if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"); } else if ((undecorated_smoothed_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) && (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"); } else if (undecorated_smoothed_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_NORMAL\n"); } if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "PHY_SetTxPowerLevel8192S() Channel = %d\n", rtlphy->current_channel); rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel); } rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl; }
gpl-2.0
RickeysWorld/linux
drivers/media/video/pwc/pwc-ctrl.c
5052
14614
/* Driver for Philips webcam Functions that send various control messages to the webcam, including video modes. (C) 1999-2003 Nemosoft Unv. (C) 2004-2006 Luc Saillard (luc@saillard.org) (C) 2011 Hans de Goede <hdegoede@redhat.com> NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx driver and thus may have bugs that are not present in the original version. Please send bug reports and support requests to <luc@saillard.org>. NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx driver and thus may have bugs that are not present in the original version. Please send bug reports and support requests to <luc@saillard.org>. The decompression routines have been implemented by reverse-engineering the Nemosoft binary pwcx module. Caveat emptor. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Changes 2001/08/03 Alvarado Added methods for changing white balance and red/green gains */ /* Control functions for the cam; brightness, contrast, video mode, etc. */ #ifdef __KERNEL__ #include <asm/uaccess.h> #endif #include <asm/errno.h> #include "pwc.h" #include "pwc-kiara.h" #include "pwc-timon.h" #include "pwc-dec1.h" #include "pwc-dec23.h" /* Selectors for status controls used only in this file */ #define GET_STATUS_B00 0x0B00 #define SENSOR_TYPE_FORMATTER1 0x0C00 #define GET_STATUS_3000 0x3000 #define READ_RAW_Y_MEAN_FORMATTER 0x3100 #define SET_POWER_SAVE_MODE_FORMATTER 0x3200 #define MIRROR_IMAGE_FORMATTER 0x3300 #define LED_FORMATTER 0x3400 #define LOWLIGHT 0x3500 #define GET_STATUS_3600 0x3600 #define SENSOR_TYPE_FORMATTER2 0x3700 #define GET_STATUS_3800 0x3800 #define GET_STATUS_4000 0x4000 #define GET_STATUS_4100 0x4100 /* Get */ #define CTL_STATUS_4200 0x4200 /* [GS] 1 */ /* Formatters for the Video Endpoint controls [GS]ET_EP_STREAM_CTL */ #define VIDEO_OUTPUT_CONTROL_FORMATTER 0x0100 static const char *size2name[PSZ_MAX] = { "subQCIF", "QSIF", "QCIF", "SIF", "CIF", "VGA", }; /********/ /* Entries for the Nala (645/646) camera; the Nala doesn't have compression preferences, so you either get compressed or non-compressed streams. An alternate value of 0 means this mode is not available at all. */ #define PWC_FPS_MAX_NALA 8 struct Nala_table_entry { char alternate; /* USB alternate setting */ int compressed; /* Compressed yes/no */ unsigned char mode[3]; /* precomputed mode table */ }; static unsigned int Nala_fps_vector[PWC_FPS_MAX_NALA] = { 4, 5, 7, 10, 12, 15, 20, 24 }; static struct Nala_table_entry Nala_table[PSZ_MAX][PWC_FPS_MAX_NALA] = { #include "pwc-nala.h" }; /****************************************************************************/ static int recv_control_msg(struct pwc_device *pdev, u8 request, u16 value, int recv_count) { int rc; rc = usb_control_msg(pdev->udev, usb_rcvctrlpipe(pdev->udev, 0), request, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, pdev->vcinterface, pdev->ctrl_buf, recv_count, USB_CTRL_GET_TIMEOUT); if (rc < 0) PWC_ERROR("recv_control_msg error %d req %02x val %04x\n", rc, request, value); return rc; } static inline int send_video_command(struct pwc_device *pdev, int index, const unsigned char *buf, int buflen) { int rc; memcpy(pdev->ctrl_buf, buf, buflen); rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0), SET_EP_STREAM_CTL, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, VIDEO_OUTPUT_CONTROL_FORMATTER, index, pdev->ctrl_buf, buflen, USB_CTRL_SET_TIMEOUT); if (rc >= 0) memcpy(pdev->cmd_buf, buf, buflen); else PWC_ERROR("send_video_command error %d\n", rc); return rc; } int send_control_msg(struct pwc_device *pdev, u8 request, u16 value, void *buf, int buflen) { return usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0), request, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, pdev->vcinterface, buf, buflen, USB_CTRL_SET_TIMEOUT); } static int set_video_mode_Nala(struct pwc_device *pdev, int size, int pixfmt, int frames, int *compression, int send_to_cam) { int fps, ret = 0; struct Nala_table_entry *pEntry; int frames2frames[31] = { /* closest match of framerate */ 0, 0, 0, 0, 4, /* 0-4 */ 5, 5, 7, 7, 10, /* 5-9 */ 10, 10, 12, 12, 15, /* 10-14 */ 15, 15, 15, 20, 20, /* 15-19 */ 20, 20, 20, 24, 24, /* 20-24 */ 24, 24, 24, 24, 24, /* 25-29 */ 24 /* 30 */ }; int frames2table[31] = { 0, 0, 0, 0, 0, /* 0-4 */ 1, 1, 1, 2, 2, /* 5-9 */ 3, 3, 4, 4, 4, /* 10-14 */ 5, 5, 5, 5, 5, /* 15-19 */ 6, 6, 6, 6, 7, /* 20-24 */ 7, 7, 7, 7, 7, /* 25-29 */ 7 /* 30 */ }; if (size < 0 || size > PSZ_CIF) return -EINVAL; if (frames < 4) frames = 4; else if (frames > 25) frames = 25; frames = frames2frames[frames]; fps = frames2table[frames]; pEntry = &Nala_table[size][fps]; if (pEntry->alternate == 0) return -EINVAL; if (send_to_cam) ret = send_video_command(pdev, pdev->vendpoint, pEntry->mode, 3); if (ret < 0) return ret; if (pEntry->compressed && pixfmt == V4L2_PIX_FMT_YUV420) pwc_dec1_init(pdev, pEntry->mode); /* Set various parameters */ pdev->pixfmt = pixfmt; pdev->vframes = frames; pdev->valternate = pEntry->alternate; pdev->width = pwc_image_sizes[size][0]; pdev->height = pwc_image_sizes[size][1]; pdev->frame_size = (pdev->width * pdev->height * 3) / 2; if (pEntry->compressed) { if (pdev->release < 5) { /* 4 fold compression */ pdev->vbandlength = 528; pdev->frame_size /= 4; } else { pdev->vbandlength = 704; pdev->frame_size /= 3; } } else pdev->vbandlength = 0; /* Let pwc-if.c:isoc_init know we don't support higher compression */ *compression = 3; return 0; } static int set_video_mode_Timon(struct pwc_device *pdev, int size, int pixfmt, int frames, int *compression, int send_to_cam) { const struct Timon_table_entry *pChoose; int fps, ret = 0; if (size >= PSZ_MAX || *compression < 0 || *compression > 3) return -EINVAL; if (frames < 5) frames = 5; else if (size == PSZ_VGA && frames > 15) frames = 15; else if (frames > 30) frames = 30; fps = (frames / 5) - 1; /* Find a supported framerate with progressively higher compression */ pChoose = NULL; while (*compression <= 3) { pChoose = &Timon_table[size][fps][*compression]; if (pChoose->alternate != 0) break; (*compression)++; } if (pChoose == NULL || pChoose->alternate == 0) return -ENOENT; /* Not supported. */ if (send_to_cam) ret = send_video_command(pdev, pdev->vendpoint, pChoose->mode, 13); if (ret < 0) return ret; if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420) pwc_dec23_init(pdev, pChoose->mode); /* Set various parameters */ pdev->pixfmt = pixfmt; pdev->vframes = (fps + 1) * 5; pdev->valternate = pChoose->alternate; pdev->width = pwc_image_sizes[size][0]; pdev->height = pwc_image_sizes[size][1]; pdev->vbandlength = pChoose->bandlength; if (pChoose->bandlength > 0) pdev->frame_size = (pChoose->bandlength * pdev->height) / 4; else pdev->frame_size = (pdev->width * pdev->height * 12) / 8; return 0; } static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int pixfmt, int frames, int *compression, int send_to_cam) { const struct Kiara_table_entry *pChoose = NULL; int fps, ret = 0; if (size >= PSZ_MAX || *compression < 0 || *compression > 3) return -EINVAL; if (frames < 5) frames = 5; else if (size == PSZ_VGA && frames > 15) frames = 15; else if (frames > 30) frames = 30; fps = (frames / 5) - 1; /* Find a supported framerate with progressively higher compression */ while (*compression <= 3) { pChoose = &Kiara_table[size][fps][*compression]; if (pChoose->alternate != 0) break; (*compression)++; } if (pChoose == NULL || pChoose->alternate == 0) return -ENOENT; /* Not supported. */ /* Firmware bug: video endpoint is 5, but commands are sent to endpoint 4 */ if (send_to_cam) ret = send_video_command(pdev, 4, pChoose->mode, 12); if (ret < 0) return ret; if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420) pwc_dec23_init(pdev, pChoose->mode); /* All set and go */ pdev->pixfmt = pixfmt; pdev->vframes = (fps + 1) * 5; pdev->valternate = pChoose->alternate; pdev->width = pwc_image_sizes[size][0]; pdev->height = pwc_image_sizes[size][1]; pdev->vbandlength = pChoose->bandlength; if (pdev->vbandlength > 0) pdev->frame_size = (pdev->vbandlength * pdev->height) / 4; else pdev->frame_size = (pdev->width * pdev->height * 12) / 8; PWC_TRACE("frame_size=%d, vframes=%d, vsize=%d, vbandlength=%d\n", pdev->frame_size, pdev->vframes, size, pdev->vbandlength); return 0; } int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int pixfmt, int frames, int *compression, int send_to_cam) { int ret, size; PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n", width, height, frames, pixfmt); size = pwc_get_size(pdev, width, height); PWC_TRACE("decode_size = %d.\n", size); if (DEVICE_USE_CODEC1(pdev->type)) { ret = set_video_mode_Nala(pdev, size, pixfmt, frames, compression, send_to_cam); } else if (DEVICE_USE_CODEC3(pdev->type)) { ret = set_video_mode_Kiara(pdev, size, pixfmt, frames, compression, send_to_cam); } else { ret = set_video_mode_Timon(pdev, size, pixfmt, frames, compression, send_to_cam); } if (ret < 0) { PWC_ERROR("Failed to set video mode %s@%d fps; return code = %d\n", size2name[size], frames, ret); return ret; } pdev->frame_total_size = pdev->frame_size + pdev->frame_header_size + pdev->frame_trailer_size; PWC_DEBUG_SIZE("Set resolution to %dx%d\n", pdev->width, pdev->height); return 0; } static unsigned int pwc_get_fps_Nala(struct pwc_device *pdev, unsigned int index, unsigned int size) { unsigned int i; for (i = 0; i < PWC_FPS_MAX_NALA; i++) { if (Nala_table[size][i].alternate) { if (index--==0) return Nala_fps_vector[i]; } } return 0; } static unsigned int pwc_get_fps_Kiara(struct pwc_device *pdev, unsigned int index, unsigned int size) { unsigned int i; for (i = 0; i < PWC_FPS_MAX_KIARA; i++) { if (Kiara_table[size][i][3].alternate) { if (index--==0) return Kiara_fps_vector[i]; } } return 0; } static unsigned int pwc_get_fps_Timon(struct pwc_device *pdev, unsigned int index, unsigned int size) { unsigned int i; for (i=0; i < PWC_FPS_MAX_TIMON; i++) { if (Timon_table[size][i][3].alternate) { if (index--==0) return Timon_fps_vector[i]; } } return 0; } unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size) { unsigned int ret; if (DEVICE_USE_CODEC1(pdev->type)) { ret = pwc_get_fps_Nala(pdev, index, size); } else if (DEVICE_USE_CODEC3(pdev->type)) { ret = pwc_get_fps_Kiara(pdev, index, size); } else { ret = pwc_get_fps_Timon(pdev, index, size); } return ret; } int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data) { int ret; ret = recv_control_msg(pdev, request, value, 1); if (ret < 0) return ret; *data = pdev->ctrl_buf[0]; return 0; } int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data) { int ret; pdev->ctrl_buf[0] = data; ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 1); if (ret < 0) return ret; return 0; } int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data) { int ret; ret = recv_control_msg(pdev, request, value, 1); if (ret < 0) return ret; *data = ((s8 *)pdev->ctrl_buf)[0]; return 0; } int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data) { int ret; ret = recv_control_msg(pdev, request, value, 2); if (ret < 0) return ret; *data = (pdev->ctrl_buf[1] << 8) | pdev->ctrl_buf[0]; return 0; } int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data) { int ret; pdev->ctrl_buf[0] = data & 0xff; pdev->ctrl_buf[1] = data >> 8; ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 2); if (ret < 0) return ret; return 0; } int pwc_button_ctrl(struct pwc_device *pdev, u16 value) { int ret; ret = send_control_msg(pdev, SET_STATUS_CTL, value, NULL, 0); if (ret < 0) return ret; return 0; } /* POWER */ void pwc_camera_power(struct pwc_device *pdev, int power) { int r; if (!pdev->power_save) return; if (pdev->type < 675 || (pdev->type < 730 && pdev->release < 6)) return; /* Not supported by Nala or Timon < release 6 */ if (power) pdev->ctrl_buf[0] = 0x00; /* active */ else pdev->ctrl_buf[0] = 0xFF; /* power save */ r = send_control_msg(pdev, SET_STATUS_CTL, SET_POWER_SAVE_MODE_FORMATTER, pdev->ctrl_buf, 1); if (r < 0) PWC_ERROR("Failed to power %s camera (%d)\n", power ? "on" : "off", r); } int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value) { int r; if (pdev->type < 730) return 0; on_value /= 100; off_value /= 100; if (on_value < 0) on_value = 0; if (on_value > 0xff) on_value = 0xff; if (off_value < 0) off_value = 0; if (off_value > 0xff) off_value = 0xff; pdev->ctrl_buf[0] = on_value; pdev->ctrl_buf[1] = off_value; r = send_control_msg(pdev, SET_STATUS_CTL, LED_FORMATTER, pdev->ctrl_buf, 2); if (r < 0) PWC_ERROR("Failed to set LED on/off time (%d)\n", r); return r; } #ifdef CONFIG_USB_PWC_DEBUG int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor) { int ret = -1, request; if (pdev->type < 675) request = SENSOR_TYPE_FORMATTER1; else if (pdev->type < 730) return -1; /* The Vesta series doesn't have this call */ else request = SENSOR_TYPE_FORMATTER2; ret = recv_control_msg(pdev, GET_STATUS_CTL, request, 1); if (ret < 0) return ret; if (pdev->type < 675) *sensor = pdev->ctrl_buf[0] | 0x100; else *sensor = pdev->ctrl_buf[0]; return 0; } #endif
gpl-2.0
ZTE-Dev/android_kernel_zte_p892e10
arch/blackfin/kernel/traps.c
7868
16078
/* * Main exception handling logic. * * Copyright 2004-2010 Analog Devices Inc. * * Licensed under the GPL-2 or later */ #include <linux/bug.h> #include <linux/uaccess.h> #include <linux/module.h> #include <asm/traps.h> #include <asm/cplb.h> #include <asm/blackfin.h> #include <asm/irq_handler.h> #include <linux/irq.h> #include <asm/trace.h> #include <asm/fixed_code.h> #include <asm/pseudo_instructions.h> #include <asm/pda.h> #ifdef CONFIG_KGDB # include <linux/kgdb.h> # define CHK_DEBUGGER_TRAP() \ do { \ kgdb_handle_exception(trapnr, sig, info.si_code, fp); \ } while (0) # define CHK_DEBUGGER_TRAP_MAYBE() \ do { \ if (kgdb_connected) \ CHK_DEBUGGER_TRAP(); \ } while (0) #else # define CHK_DEBUGGER_TRAP() do { } while (0) # define CHK_DEBUGGER_TRAP_MAYBE() do { } while (0) #endif #ifdef CONFIG_DEBUG_VERBOSE #define verbose_printk(fmt, arg...) \ printk(fmt, ##arg) #else #define verbose_printk(fmt, arg...) \ ({ if (0) printk(fmt, ##arg); 0; }) #endif #if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE) u32 last_seqstat; #ifdef CONFIG_DEBUG_MMRS_MODULE EXPORT_SYMBOL(last_seqstat); #endif #endif /* Initiate the event table handler */ void __init trap_init(void) { CSYNC(); bfin_write_EVT3(trap); CSYNC(); } static int kernel_mode_regs(struct pt_regs *regs) { return regs->ipend & 0xffc0; } asmlinkage notrace void trap_c(struct pt_regs *fp) { #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON int j; #endif #ifdef CONFIG_BFIN_PSEUDODBG_INSNS int opcode; #endif unsigned int cpu = raw_smp_processor_id(); const char *strerror = NULL; int sig = 0; siginfo_t info; unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE; trace_buffer_save(j); #if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE) last_seqstat = (u32)fp->seqstat; #endif /* Important - be very careful dereferncing pointers - will lead to * double faults if the stack has become corrupt */ /* trap_c() will be called for exceptions. During exceptions * processing, the pc value should be set with retx value. * With this change we can cleanup some code in signal.c- TODO */ fp->orig_pc = fp->retx; /* printk("exception: 0x%x, ipend=%x, reti=%x, retx=%x\n", trapnr, fp->ipend, fp->pc, fp->retx); */ /* send the appropriate signal to the user program */ switch (trapnr) { /* This table works in conjunction with the one in ./mach-common/entry.S * Some exceptions are handled there (in assembly, in exception space) * Some are handled here, (in C, in interrupt space) * Some, like CPLB, are handled in both, where the normal path is * handled in assembly/exception space, and the error path is handled * here */ /* 0x00 - Linux Syscall, getting here is an error */ /* 0x01 - userspace gdb breakpoint, handled here */ case VEC_EXCPT01: info.si_code = TRAP_ILLTRAP; sig = SIGTRAP; CHK_DEBUGGER_TRAP_MAYBE(); /* Check if this is a breakpoint in kernel space */ if (kernel_mode_regs(fp)) goto traps_done; else break; /* 0x03 - User Defined, userspace stack overflow */ case VEC_EXCPT03: info.si_code = SEGV_STACKFLOW; sig = SIGSEGV; strerror = KERN_NOTICE EXC_0x03(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x02 - KGDB initial connection and break signal trap */ case VEC_EXCPT02: #ifdef CONFIG_KGDB info.si_code = TRAP_ILLTRAP; sig = SIGTRAP; CHK_DEBUGGER_TRAP(); goto traps_done; #endif /* 0x04 - User Defined */ /* 0x05 - User Defined */ /* 0x06 - User Defined */ /* 0x07 - User Defined */ /* 0x08 - User Defined */ /* 0x09 - User Defined */ /* 0x0A - User Defined */ /* 0x0B - User Defined */ /* 0x0C - User Defined */ /* 0x0D - User Defined */ /* 0x0E - User Defined */ /* 0x0F - User Defined */ /* If we got here, it is most likely that someone was trying to use a * custom exception handler, and it is not actually installed properly */ case VEC_EXCPT04 ... VEC_EXCPT15: info.si_code = ILL_ILLPARAOP; sig = SIGILL; strerror = KERN_NOTICE EXC_0x04(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x10 HW Single step, handled here */ case VEC_STEP: info.si_code = TRAP_STEP; sig = SIGTRAP; CHK_DEBUGGER_TRAP_MAYBE(); /* Check if this is a single step in kernel space */ if (kernel_mode_regs(fp)) goto traps_done; else break; /* 0x11 - Trace Buffer Full, handled here */ case VEC_OVFLOW: info.si_code = TRAP_TRACEFLOW; sig = SIGTRAP; strerror = KERN_NOTICE EXC_0x11(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x12 - Reserved, Caught by default */ /* 0x13 - Reserved, Caught by default */ /* 0x14 - Reserved, Caught by default */ /* 0x15 - Reserved, Caught by default */ /* 0x16 - Reserved, Caught by default */ /* 0x17 - Reserved, Caught by default */ /* 0x18 - Reserved, Caught by default */ /* 0x19 - Reserved, Caught by default */ /* 0x1A - Reserved, Caught by default */ /* 0x1B - Reserved, Caught by default */ /* 0x1C - Reserved, Caught by default */ /* 0x1D - Reserved, Caught by default */ /* 0x1E - Reserved, Caught by default */ /* 0x1F - Reserved, Caught by default */ /* 0x20 - Reserved, Caught by default */ /* 0x21 - Undefined Instruction, handled here */ case VEC_UNDEF_I: #ifdef CONFIG_BUG if (kernel_mode_regs(fp)) { switch (report_bug(fp->pc, fp)) { case BUG_TRAP_TYPE_NONE: break; case BUG_TRAP_TYPE_WARN: dump_bfin_trace_buffer(); fp->pc += 2; goto traps_done; case BUG_TRAP_TYPE_BUG: /* call to panic() will dump trace, and it is * off at this point, so it won't be clobbered */ panic("BUG()"); } } #endif #ifdef CONFIG_BFIN_PSEUDODBG_INSNS /* * Support for the fake instructions, if the instruction fails, * then just execute a illegal opcode failure (like normal). * Don't support these instructions inside the kernel */ if (!kernel_mode_regs(fp) && get_instruction(&opcode, (unsigned short *)fp->pc)) { if (execute_pseudodbg_assert(fp, opcode)) goto traps_done; if (execute_pseudodbg(fp, opcode)) goto traps_done; } #endif info.si_code = ILL_ILLOPC; sig = SIGILL; strerror = KERN_NOTICE EXC_0x21(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x22 - Illegal Instruction Combination, handled here */ case VEC_ILGAL_I: info.si_code = ILL_ILLPARAOP; sig = SIGILL; strerror = KERN_NOTICE EXC_0x22(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x23 - Data CPLB protection violation, handled here */ case VEC_CPLB_VL: info.si_code = ILL_CPLB_VI; sig = SIGSEGV; strerror = KERN_NOTICE EXC_0x23(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x24 - Data access misaligned, handled here */ case VEC_MISALI_D: info.si_code = BUS_ADRALN; sig = SIGBUS; strerror = KERN_NOTICE EXC_0x24(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x25 - Unrecoverable Event, handled here */ case VEC_UNCOV: info.si_code = ILL_ILLEXCPT; sig = SIGILL; strerror = KERN_NOTICE EXC_0x25(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr, error case is handled here */ case VEC_CPLB_M: info.si_code = BUS_ADRALN; sig = SIGBUS; strerror = KERN_NOTICE EXC_0x26(KERN_NOTICE); break; /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */ case VEC_CPLB_MHIT: info.si_code = ILL_CPLB_MULHIT; sig = SIGSEGV; #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO if (cpu_pda[cpu].dcplb_fault_addr < FIXED_CODE_START) strerror = KERN_NOTICE "NULL pointer access\n"; else #endif strerror = KERN_NOTICE EXC_0x27(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x28 - Emulation Watchpoint, handled here */ case VEC_WATCH: info.si_code = TRAP_WATCHPT; sig = SIGTRAP; pr_debug(EXC_0x28(KERN_DEBUG)); CHK_DEBUGGER_TRAP_MAYBE(); /* Check if this is a watchpoint in kernel space */ if (kernel_mode_regs(fp)) goto traps_done; else break; #ifdef CONFIG_BF535 /* 0x29 - Instruction fetch access error (535 only) */ case VEC_ISTRU_VL: /* ADSP-BF535 only (MH) */ info.si_code = BUS_OPFETCH; sig = SIGBUS; strerror = KERN_NOTICE "BF535: VEC_ISTRU_VL\n"; CHK_DEBUGGER_TRAP_MAYBE(); break; #else /* 0x29 - Reserved, Caught by default */ #endif /* 0x2A - Instruction fetch misaligned, handled here */ case VEC_MISALI_I: info.si_code = BUS_ADRALN; sig = SIGBUS; strerror = KERN_NOTICE EXC_0x2A(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x2B - Instruction CPLB protection violation, handled here */ case VEC_CPLB_I_VL: info.si_code = ILL_CPLB_VI; sig = SIGBUS; strerror = KERN_NOTICE EXC_0x2B(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x2C - Instruction CPLB miss, handled in _cplb_hdr */ case VEC_CPLB_I_M: info.si_code = ILL_CPLB_MISS; sig = SIGBUS; strerror = KERN_NOTICE EXC_0x2C(KERN_NOTICE); break; /* 0x2D - Instruction CPLB Multiple Hits, handled here */ case VEC_CPLB_I_MHIT: info.si_code = ILL_CPLB_MULHIT; sig = SIGSEGV; #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO if (cpu_pda[cpu].icplb_fault_addr < FIXED_CODE_START) strerror = KERN_NOTICE "Jump to NULL address\n"; else #endif strerror = KERN_NOTICE EXC_0x2D(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x2E - Illegal use of Supervisor Resource, handled here */ case VEC_ILL_RES: info.si_code = ILL_PRVOPC; sig = SIGILL; strerror = KERN_NOTICE EXC_0x2E(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x2F - Reserved, Caught by default */ /* 0x30 - Reserved, Caught by default */ /* 0x31 - Reserved, Caught by default */ /* 0x32 - Reserved, Caught by default */ /* 0x33 - Reserved, Caught by default */ /* 0x34 - Reserved, Caught by default */ /* 0x35 - Reserved, Caught by default */ /* 0x36 - Reserved, Caught by default */ /* 0x37 - Reserved, Caught by default */ /* 0x38 - Reserved, Caught by default */ /* 0x39 - Reserved, Caught by default */ /* 0x3A - Reserved, Caught by default */ /* 0x3B - Reserved, Caught by default */ /* 0x3C - Reserved, Caught by default */ /* 0x3D - Reserved, Caught by default */ /* 0x3E - Reserved, Caught by default */ /* 0x3F - Reserved, Caught by default */ case VEC_HWERR: info.si_code = BUS_ADRALN; sig = SIGBUS; switch (fp->seqstat & SEQSTAT_HWERRCAUSE) { /* System MMR Error */ case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR): info.si_code = BUS_ADRALN; sig = SIGBUS; strerror = KERN_NOTICE HWC_x2(KERN_NOTICE); break; /* External Memory Addressing Error */ case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR): if (ANOMALY_05000310) { static unsigned long anomaly_rets; if ((fp->pc >= (L1_CODE_START + L1_CODE_LENGTH - 512)) && (fp->pc < (L1_CODE_START + L1_CODE_LENGTH))) { /* * A false hardware error will happen while fetching at * the L1 instruction SRAM boundary. Ignore it. */ anomaly_rets = fp->rets; goto traps_done; } else if (fp->rets == anomaly_rets) { /* * While boundary code returns to a function, at the ret * point, a new false hardware error might occur too based * on tests. Ignore it too. */ goto traps_done; } else if ((fp->rets >= (L1_CODE_START + L1_CODE_LENGTH - 512)) && (fp->rets < (L1_CODE_START + L1_CODE_LENGTH))) { /* * If boundary code calls a function, at the entry point, * a new false hardware error maybe happen based on tests. * Ignore it too. */ goto traps_done; } else anomaly_rets = 0; } info.si_code = BUS_ADRERR; sig = SIGBUS; strerror = KERN_NOTICE HWC_x3(KERN_NOTICE); break; /* Performance Monitor Overflow */ case (SEQSTAT_HWERRCAUSE_PERF_FLOW): strerror = KERN_NOTICE HWC_x12(KERN_NOTICE); break; /* RAISE 5 instruction */ case (SEQSTAT_HWERRCAUSE_RAISE_5): printk(KERN_NOTICE HWC_x18(KERN_NOTICE)); break; default: /* Reserved */ printk(KERN_NOTICE HWC_default(KERN_NOTICE)); break; } CHK_DEBUGGER_TRAP_MAYBE(); break; /* * We should be handling all known exception types above, * if we get here we hit a reserved one, so panic */ default: info.si_code = ILL_ILLPARAOP; sig = SIGILL; verbose_printk(KERN_EMERG "Caught Unhandled Exception, code = %08lx\n", (fp->seqstat & SEQSTAT_EXCAUSE)); CHK_DEBUGGER_TRAP_MAYBE(); break; } BUG_ON(sig == 0); /* If the fault was caused by a kernel thread, or interrupt handler * we will kernel panic, so the system reboots. */ if (kernel_mode_regs(fp) || (current && !current->mm)) { console_verbose(); oops_in_progress = 1; } if (sig != SIGTRAP) { if (strerror) verbose_printk(strerror); dump_bfin_process(fp); dump_bfin_mem(fp); show_regs(fp); /* Print out the trace buffer if it makes sense */ #ifndef CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE if (trapnr == VEC_CPLB_I_M || trapnr == VEC_CPLB_M) verbose_printk(KERN_NOTICE "No trace since you do not have " "CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE enabled\n\n"); else #endif dump_bfin_trace_buffer(); if (oops_in_progress) { /* Dump the current kernel stack */ verbose_printk(KERN_NOTICE "Kernel Stack\n"); show_stack(current, NULL); print_modules(); #ifndef CONFIG_ACCESS_CHECK verbose_printk(KERN_EMERG "Please turn on " "CONFIG_ACCESS_CHECK\n"); #endif panic("Kernel exception"); } else { #ifdef CONFIG_DEBUG_VERBOSE unsigned long *stack; /* Dump the user space stack */ stack = (unsigned long *)rdusp(); verbose_printk(KERN_NOTICE "Userspace Stack\n"); show_stack(NULL, stack); #endif } } #ifdef CONFIG_IPIPE if (!ipipe_trap_notify(fp->seqstat & 0x3f, fp)) #endif { info.si_signo = sig; info.si_errno = 0; switch (trapnr) { case VEC_CPLB_VL: case VEC_MISALI_D: case VEC_CPLB_M: case VEC_CPLB_MHIT: info.si_addr = (void __user *)cpu_pda[cpu].dcplb_fault_addr; break; default: info.si_addr = (void __user *)fp->pc; break; } force_sig_info(sig, &info, current); } if ((ANOMALY_05000461 && trapnr == VEC_HWERR && !access_ok(VERIFY_READ, fp->pc, 8)) || (ANOMALY_05000281 && trapnr == VEC_HWERR) || (ANOMALY_05000189 && (trapnr == VEC_CPLB_I_VL || trapnr == VEC_CPLB_VL))) fp->pc = SAFE_USER_INSTRUCTION; traps_done: trace_buffer_restore(j); } asmlinkage void double_fault_c(struct pt_regs *fp) { #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON int j; trace_buffer_save(j); #endif console_verbose(); oops_in_progress = 1; #ifdef CONFIG_DEBUG_VERBOSE printk(KERN_EMERG "Double Fault\n"); #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) { unsigned int cpu = raw_smp_processor_id(); char buf[150]; decode_address(buf, cpu_pda[cpu].retx_doublefault); printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n", (unsigned int)cpu_pda[cpu].seqstat_doublefault & SEQSTAT_EXCAUSE, buf); decode_address(buf, cpu_pda[cpu].dcplb_doublefault_addr); printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf); decode_address(buf, cpu_pda[cpu].icplb_doublefault_addr); printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf); decode_address(buf, fp->retx); printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf); } else #endif { dump_bfin_process(fp); dump_bfin_mem(fp); show_regs(fp); dump_bfin_trace_buffer(); } #endif panic("Double Fault - unrecoverable event"); } void panic_cplb_error(int cplb_panic, struct pt_regs *fp) { switch (cplb_panic) { case CPLB_NO_UNLOCKED: printk(KERN_EMERG "All CPLBs are locked\n"); break; case CPLB_PROT_VIOL: return; case CPLB_NO_ADDR_MATCH: return; case CPLB_UNKNOWN_ERR: printk(KERN_EMERG "Unknown CPLB Exception\n"); break; } oops_in_progress = 1; dump_bfin_process(fp); dump_bfin_mem(fp); show_regs(fp); dump_stack(); panic("Unrecoverable event"); } #ifdef CONFIG_BUG int is_valid_bugaddr(unsigned long addr) { unsigned int opcode; if (!get_instruction(&opcode, (unsigned short *)addr)) return 0; return opcode == BFIN_BUG_OPCODE; } #endif /* stub this out */ #ifndef CONFIG_DEBUG_VERBOSE void show_regs(struct pt_regs *fp) { } #endif
gpl-2.0
juldiadia/kernel_stock_g3815
net/ipv6/netfilter/ip6table_raw.c
8892
2121
/* * IPv6 raw table, a port of the IPv4 raw table to IPv6 * * Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> */ #include <linux/module.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/slab.h> #define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) static const struct xt_table packet_raw = { .name = "raw", .valid_hooks = RAW_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV6, .priority = NF_IP6_PRI_RAW, }; /* The work comes in here from netfilter.c. */ static unsigned int ip6table_raw_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { const struct net *net = dev_net((in != NULL) ? in : out); return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_raw); } static struct nf_hook_ops *rawtable_ops __read_mostly; static int __net_init ip6table_raw_net_init(struct net *net) { struct ip6t_replace *repl; repl = ip6t_alloc_initial_table(&packet_raw); if (repl == NULL) return -ENOMEM; net->ipv6.ip6table_raw = ip6t_register_table(net, &packet_raw, repl); kfree(repl); if (IS_ERR(net->ipv6.ip6table_raw)) return PTR_ERR(net->ipv6.ip6table_raw); return 0; } static void __net_exit ip6table_raw_net_exit(struct net *net) { ip6t_unregister_table(net, net->ipv6.ip6table_raw); } static struct pernet_operations ip6table_raw_net_ops = { .init = ip6table_raw_net_init, .exit = ip6table_raw_net_exit, }; static int __init ip6table_raw_init(void) { int ret; ret = register_pernet_subsys(&ip6table_raw_net_ops); if (ret < 0) return ret; /* Register hooks */ rawtable_ops = xt_hook_link(&packet_raw, ip6table_raw_hook); if (IS_ERR(rawtable_ops)) { ret = PTR_ERR(rawtable_ops); goto cleanup_table; } return ret; cleanup_table: unregister_pernet_subsys(&ip6table_raw_net_ops); return ret; } static void __exit ip6table_raw_fini(void) { xt_hook_unlink(&packet_raw, rawtable_ops); unregister_pernet_subsys(&ip6table_raw_net_ops); } module_init(ip6table_raw_init); module_exit(ip6table_raw_fini); MODULE_LICENSE("GPL");
gpl-2.0
OMFGBKANG/nk2
net/ipv6/netfilter/ip6table_raw.c
8892
2121
/* * IPv6 raw table, a port of the IPv4 raw table to IPv6 * * Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> */ #include <linux/module.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/slab.h> #define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) static const struct xt_table packet_raw = { .name = "raw", .valid_hooks = RAW_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV6, .priority = NF_IP6_PRI_RAW, }; /* The work comes in here from netfilter.c. */ static unsigned int ip6table_raw_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { const struct net *net = dev_net((in != NULL) ? in : out); return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_raw); } static struct nf_hook_ops *rawtable_ops __read_mostly; static int __net_init ip6table_raw_net_init(struct net *net) { struct ip6t_replace *repl; repl = ip6t_alloc_initial_table(&packet_raw); if (repl == NULL) return -ENOMEM; net->ipv6.ip6table_raw = ip6t_register_table(net, &packet_raw, repl); kfree(repl); if (IS_ERR(net->ipv6.ip6table_raw)) return PTR_ERR(net->ipv6.ip6table_raw); return 0; } static void __net_exit ip6table_raw_net_exit(struct net *net) { ip6t_unregister_table(net, net->ipv6.ip6table_raw); } static struct pernet_operations ip6table_raw_net_ops = { .init = ip6table_raw_net_init, .exit = ip6table_raw_net_exit, }; static int __init ip6table_raw_init(void) { int ret; ret = register_pernet_subsys(&ip6table_raw_net_ops); if (ret < 0) return ret; /* Register hooks */ rawtable_ops = xt_hook_link(&packet_raw, ip6table_raw_hook); if (IS_ERR(rawtable_ops)) { ret = PTR_ERR(rawtable_ops); goto cleanup_table; } return ret; cleanup_table: unregister_pernet_subsys(&ip6table_raw_net_ops); return ret; } static void __exit ip6table_raw_fini(void) { xt_hook_unlink(&packet_raw, rawtable_ops); unregister_pernet_subsys(&ip6table_raw_net_ops); } module_init(ip6table_raw_init); module_exit(ip6table_raw_fini); MODULE_LICENSE("GPL");
gpl-2.0
squirrel20/linux-4.8.15
drivers/clk/tegra/clk-tegra210.c
189
93119
/* * Copyright (c) 2012-2014 NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/io.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/clkdev.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/clk/tegra.h> #include <dt-bindings/clock/tegra210-car.h> #include "clk.h" #include "clk-id.h" /* * TEGRA210_CAR_BANK_COUNT: the number of peripheral clock register * banks present in the Tegra210 CAR IP block. The banks are * identified by single letters, e.g.: L, H, U, V, W, X, Y. See * periph_regs[] in drivers/clk/tegra/clk.c */ #define TEGRA210_CAR_BANK_COUNT 7 #define CLK_SOURCE_CSITE 0x1d4 #define CLK_SOURCE_EMC 0x19c #define PLLC_BASE 0x80 #define PLLC_OUT 0x84 #define PLLC_MISC0 0x88 #define PLLC_MISC1 0x8c #define PLLC_MISC2 0x5d0 #define PLLC_MISC3 0x5d4 #define PLLC2_BASE 0x4e8 #define PLLC2_MISC0 0x4ec #define PLLC2_MISC1 0x4f0 #define PLLC2_MISC2 0x4f4 #define PLLC2_MISC3 0x4f8 #define PLLC3_BASE 0x4fc #define PLLC3_MISC0 0x500 #define PLLC3_MISC1 0x504 #define PLLC3_MISC2 0x508 #define PLLC3_MISC3 0x50c #define PLLM_BASE 0x90 #define PLLM_MISC1 0x98 #define PLLM_MISC2 0x9c #define PLLP_BASE 0xa0 #define PLLP_MISC0 0xac #define PLLP_MISC1 0x680 #define PLLA_BASE 0xb0 #define PLLA_MISC0 0xbc #define PLLA_MISC1 0xb8 #define PLLA_MISC2 0x5d8 #define PLLD_BASE 0xd0 #define PLLD_MISC0 0xdc #define PLLD_MISC1 0xd8 #define PLLU_BASE 0xc0 #define PLLU_OUTA 0xc4 #define PLLU_MISC0 0xcc #define PLLU_MISC1 0xc8 #define PLLX_BASE 0xe0 #define PLLX_MISC0 0xe4 #define PLLX_MISC1 0x510 #define PLLX_MISC2 0x514 #define PLLX_MISC3 0x518 #define PLLX_MISC4 0x5f0 #define PLLX_MISC5 0x5f4 #define PLLE_BASE 0xe8 #define PLLE_MISC0 0xec #define PLLD2_BASE 0x4b8 #define PLLD2_MISC0 0x4bc #define PLLD2_MISC1 0x570 #define PLLD2_MISC2 0x574 #define PLLD2_MISC3 0x578 #define PLLE_AUX 0x48c #define PLLRE_BASE 0x4c4 #define PLLRE_MISC0 0x4c8 #define PLLRE_OUT1 0x4cc #define PLLDP_BASE 0x590 #define PLLDP_MISC 0x594 #define PLLC4_BASE 0x5a4 #define PLLC4_MISC0 0x5a8 #define PLLC4_OUT 0x5e4 #define PLLMB_BASE 0x5e8 #define PLLMB_MISC1 0x5ec #define PLLA1_BASE 0x6a4 #define PLLA1_MISC0 0x6a8 #define PLLA1_MISC1 0x6ac #define PLLA1_MISC2 0x6b0 #define PLLA1_MISC3 0x6b4 #define PLLU_IDDQ_BIT 31 #define PLLCX_IDDQ_BIT 27 #define PLLRE_IDDQ_BIT 24 #define PLLA_IDDQ_BIT 25 #define PLLD_IDDQ_BIT 20 #define PLLSS_IDDQ_BIT 18 #define PLLM_IDDQ_BIT 5 #define PLLMB_IDDQ_BIT 17 #define PLLXP_IDDQ_BIT 3 #define PLLCX_RESET_BIT 30 #define PLL_BASE_LOCK BIT(27) #define PLLCX_BASE_LOCK BIT(26) #define PLLE_MISC_LOCK BIT(11) #define PLLRE_MISC_LOCK BIT(27) #define PLL_MISC_LOCK_ENABLE 18 #define PLLC_MISC_LOCK_ENABLE 24 #define PLLDU_MISC_LOCK_ENABLE 22 #define PLLU_MISC_LOCK_ENABLE 29 #define PLLE_MISC_LOCK_ENABLE 9 #define PLLRE_MISC_LOCK_ENABLE 30 #define PLLSS_MISC_LOCK_ENABLE 30 #define PLLP_MISC_LOCK_ENABLE 18 #define PLLM_MISC_LOCK_ENABLE 4 #define PLLMB_MISC_LOCK_ENABLE 16 #define PLLA_MISC_LOCK_ENABLE 28 #define PLLU_MISC_LOCK_ENABLE 29 #define PLLD_MISC_LOCK_ENABLE 18 #define PLLA_SDM_DIN_MASK 0xffff #define PLLA_SDM_EN_MASK BIT(26) #define PLLD_SDM_EN_MASK BIT(16) #define PLLD2_SDM_EN_MASK BIT(31) #define PLLD2_SSC_EN_MASK BIT(30) #define PLLDP_SS_CFG 0x598 #define PLLDP_SDM_EN_MASK BIT(31) #define PLLDP_SSC_EN_MASK BIT(30) #define PLLDP_SS_CTRL1 0x59c #define PLLDP_SS_CTRL2 0x5a0 #define PMC_PLLM_WB0_OVERRIDE 0x1dc #define PMC_PLLM_WB0_OVERRIDE_2 0x2b0 #define SATA_PLL_CFG0 0x490 #define SATA_PLL_CFG0_PADPLL_RESET_SWCTL BIT(0) #define SATA_PLL_CFG0_PADPLL_USE_LOCKDET BIT(2) #define SATA_PLL_CFG0_PADPLL_SLEEP_IDDQ BIT(13) #define SATA_PLL_CFG0_SEQ_ENABLE BIT(24) #define XUSBIO_PLL_CFG0 0x51c #define XUSBIO_PLL_CFG0_PADPLL_RESET_SWCTL BIT(0) #define XUSBIO_PLL_CFG0_CLK_ENABLE_SWCTL BIT(2) #define XUSBIO_PLL_CFG0_PADPLL_USE_LOCKDET BIT(6) #define XUSBIO_PLL_CFG0_PADPLL_SLEEP_IDDQ BIT(13) #define XUSBIO_PLL_CFG0_SEQ_ENABLE BIT(24) #define UTMIPLL_HW_PWRDN_CFG0 0x52c #define UTMIPLL_HW_PWRDN_CFG0_UTMIPLL_LOCK BIT(31) #define UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE BIT(25) #define UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE BIT(24) #define UTMIPLL_HW_PWRDN_CFG0_IDDQ_PD_INCLUDE BIT(7) #define UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET BIT(6) #define UTMIPLL_HW_PWRDN_CFG0_SEQ_RESET_INPUT_VALUE BIT(5) #define UTMIPLL_HW_PWRDN_CFG0_SEQ_IN_SWCTL BIT(4) #define UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL BIT(2) #define UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE BIT(1) #define UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL BIT(0) #define PLLU_HW_PWRDN_CFG0 0x530 #define PLLU_HW_PWRDN_CFG0_IDDQ_PD_INCLUDE BIT(28) #define PLLU_HW_PWRDN_CFG0_SEQ_ENABLE BIT(24) #define PLLU_HW_PWRDN_CFG0_USE_SWITCH_DETECT BIT(7) #define PLLU_HW_PWRDN_CFG0_USE_LOCKDET BIT(6) #define PLLU_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL BIT(2) #define PLLU_HW_PWRDN_CFG0_CLK_SWITCH_SWCTL BIT(0) #define XUSB_PLL_CFG0 0x534 #define XUSB_PLL_CFG0_UTMIPLL_LOCK_DLY 0x3ff #define XUSB_PLL_CFG0_PLLU_LOCK_DLY_MASK (0x3ff << 14) #define SPARE_REG0 0x55c #define CLK_M_DIVISOR_SHIFT 2 #define CLK_M_DIVISOR_MASK 0x3 /* * SDM fractional divisor is 16-bit 2's complement signed number within * (-2^12 ... 2^12-1) range. Represented in PLL data structure as unsigned * 16-bit value, with "0" divisor mapped to 0xFFFF. Data "0" is used to * indicate that SDM is disabled. * * Effective ndiv value when SDM is enabled: ndiv + 1/2 + sdm_din/2^13 */ #define PLL_SDM_COEFF BIT(13) #define sdin_din_to_data(din) ((u16)((din) ? : 0xFFFFU)) #define sdin_data_to_din(dat) (((dat) == 0xFFFFU) ? 0 : (s16)dat) /* Tegra CPU clock and reset control regs */ #define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS 0x470 #ifdef CONFIG_PM_SLEEP static struct cpu_clk_suspend_context { u32 clk_csite_src; } tegra210_cpu_clk_sctx; #endif static void __iomem *clk_base; static void __iomem *pmc_base; static unsigned long osc_freq; static unsigned long pll_ref_freq; static DEFINE_SPINLOCK(pll_d_lock); static DEFINE_SPINLOCK(pll_e_lock); static DEFINE_SPINLOCK(pll_re_lock); static DEFINE_SPINLOCK(pll_u_lock); static DEFINE_SPINLOCK(emc_lock); /* possible OSC frequencies in Hz */ static unsigned long tegra210_input_freq[] = { [5] = 38400000, [8] = 12000000, }; static const char *mux_pllmcp_clkm[] = { "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_mb", "pll_mb", "pll_p", }; #define mux_pllmcp_clkm_idx NULL #define PLL_ENABLE (1 << 30) #define PLLCX_MISC1_IDDQ (1 << 27) #define PLLCX_MISC0_RESET (1 << 30) #define PLLCX_MISC0_DEFAULT_VALUE 0x40080000 #define PLLCX_MISC0_WRITE_MASK 0x400ffffb #define PLLCX_MISC1_DEFAULT_VALUE 0x08000000 #define PLLCX_MISC1_WRITE_MASK 0x08003cff #define PLLCX_MISC2_DEFAULT_VALUE 0x1f720f05 #define PLLCX_MISC2_WRITE_MASK 0xffffff17 #define PLLCX_MISC3_DEFAULT_VALUE 0x000000c4 #define PLLCX_MISC3_WRITE_MASK 0x00ffffff /* PLLA */ #define PLLA_BASE_IDDQ (1 << 25) #define PLLA_BASE_LOCK (1 << 27) #define PLLA_MISC0_LOCK_ENABLE (1 << 28) #define PLLA_MISC0_LOCK_OVERRIDE (1 << 27) #define PLLA_MISC2_EN_SDM (1 << 26) #define PLLA_MISC2_EN_DYNRAMP (1 << 25) #define PLLA_MISC0_DEFAULT_VALUE 0x12000020 #define PLLA_MISC0_WRITE_MASK 0x7fffffff #define PLLA_MISC2_DEFAULT_VALUE 0x0 #define PLLA_MISC2_WRITE_MASK 0x06ffffff /* PLLD */ #define PLLD_MISC0_EN_SDM (1 << 16) #define PLLD_MISC0_LOCK_OVERRIDE (1 << 17) #define PLLD_MISC0_LOCK_ENABLE (1 << 18) #define PLLD_MISC0_IDDQ (1 << 20) #define PLLD_MISC0_DSI_CLKENABLE (1 << 21) #define PLLD_MISC0_DEFAULT_VALUE 0x00140000 #define PLLD_MISC0_WRITE_MASK 0x3ff7ffff #define PLLD_MISC1_DEFAULT_VALUE 0x20 #define PLLD_MISC1_WRITE_MASK 0x00ffffff /* PLLD2 and PLLDP and PLLC4 */ #define PLLDSS_BASE_LOCK (1 << 27) #define PLLDSS_BASE_LOCK_OVERRIDE (1 << 24) #define PLLDSS_BASE_IDDQ (1 << 18) #define PLLDSS_BASE_REF_SEL_SHIFT 25 #define PLLDSS_BASE_REF_SEL_MASK (0x3 << PLLDSS_BASE_REF_SEL_SHIFT) #define PLLDSS_MISC0_LOCK_ENABLE (1 << 30) #define PLLDSS_MISC1_CFG_EN_SDM (1 << 31) #define PLLDSS_MISC1_CFG_EN_SSC (1 << 30) #define PLLD2_MISC0_DEFAULT_VALUE 0x40000020 #define PLLD2_MISC1_CFG_DEFAULT_VALUE 0x10000000 #define PLLD2_MISC2_CTRL1_DEFAULT_VALUE 0x0 #define PLLD2_MISC3_CTRL2_DEFAULT_VALUE 0x0 #define PLLDP_MISC0_DEFAULT_VALUE 0x40000020 #define PLLDP_MISC1_CFG_DEFAULT_VALUE 0xc0000000 #define PLLDP_MISC2_CTRL1_DEFAULT_VALUE 0xf400f0da #define PLLDP_MISC3_CTRL2_DEFAULT_VALUE 0x2004f400 #define PLLDSS_MISC0_WRITE_MASK 0x47ffffff #define PLLDSS_MISC1_CFG_WRITE_MASK 0xf8000000 #define PLLDSS_MISC2_CTRL1_WRITE_MASK 0xffffffff #define PLLDSS_MISC3_CTRL2_WRITE_MASK 0xffffffff #define PLLC4_MISC0_DEFAULT_VALUE 0x40000000 /* PLLRE */ #define PLLRE_MISC0_LOCK_ENABLE (1 << 30) #define PLLRE_MISC0_LOCK_OVERRIDE (1 << 29) #define PLLRE_MISC0_LOCK (1 << 27) #define PLLRE_MISC0_IDDQ (1 << 24) #define PLLRE_BASE_DEFAULT_VALUE 0x0 #define PLLRE_MISC0_DEFAULT_VALUE 0x41000000 #define PLLRE_BASE_DEFAULT_MASK 0x1c000000 #define PLLRE_MISC0_WRITE_MASK 0x67ffffff /* PLLX */ #define PLLX_USE_DYN_RAMP 1 #define PLLX_BASE_LOCK (1 << 27) #define PLLX_MISC0_FO_G_DISABLE (0x1 << 28) #define PLLX_MISC0_LOCK_ENABLE (0x1 << 18) #define PLLX_MISC2_DYNRAMP_STEPB_SHIFT 24 #define PLLX_MISC2_DYNRAMP_STEPB_MASK (0xFF << PLLX_MISC2_DYNRAMP_STEPB_SHIFT) #define PLLX_MISC2_DYNRAMP_STEPA_SHIFT 16 #define PLLX_MISC2_DYNRAMP_STEPA_MASK (0xFF << PLLX_MISC2_DYNRAMP_STEPA_SHIFT) #define PLLX_MISC2_NDIV_NEW_SHIFT 8 #define PLLX_MISC2_NDIV_NEW_MASK (0xFF << PLLX_MISC2_NDIV_NEW_SHIFT) #define PLLX_MISC2_LOCK_OVERRIDE (0x1 << 4) #define PLLX_MISC2_DYNRAMP_DONE (0x1 << 2) #define PLLX_MISC2_EN_DYNRAMP (0x1 << 0) #define PLLX_MISC3_IDDQ (0x1 << 3) #define PLLX_MISC0_DEFAULT_VALUE PLLX_MISC0_LOCK_ENABLE #define PLLX_MISC0_WRITE_MASK 0x10c40000 #define PLLX_MISC1_DEFAULT_VALUE 0x20 #define PLLX_MISC1_WRITE_MASK 0x00ffffff #define PLLX_MISC2_DEFAULT_VALUE 0x0 #define PLLX_MISC2_WRITE_MASK 0xffffff11 #define PLLX_MISC3_DEFAULT_VALUE PLLX_MISC3_IDDQ #define PLLX_MISC3_WRITE_MASK 0x01ff0f0f #define PLLX_MISC4_DEFAULT_VALUE 0x0 #define PLLX_MISC4_WRITE_MASK 0x8000ffff #define PLLX_MISC5_DEFAULT_VALUE 0x0 #define PLLX_MISC5_WRITE_MASK 0x0000ffff #define PLLX_HW_CTRL_CFG 0x548 #define PLLX_HW_CTRL_CFG_SWCTRL (0x1 << 0) /* PLLMB */ #define PLLMB_BASE_LOCK (1 << 27) #define PLLMB_MISC1_LOCK_OVERRIDE (1 << 18) #define PLLMB_MISC1_IDDQ (1 << 17) #define PLLMB_MISC1_LOCK_ENABLE (1 << 16) #define PLLMB_MISC1_DEFAULT_VALUE 0x00030000 #define PLLMB_MISC1_WRITE_MASK 0x0007ffff /* PLLP */ #define PLLP_BASE_OVERRIDE (1 << 28) #define PLLP_BASE_LOCK (1 << 27) #define PLLP_MISC0_LOCK_ENABLE (1 << 18) #define PLLP_MISC0_LOCK_OVERRIDE (1 << 17) #define PLLP_MISC0_IDDQ (1 << 3) #define PLLP_MISC1_HSIO_EN_SHIFT 29 #define PLLP_MISC1_HSIO_EN (1 << PLLP_MISC1_HSIO_EN_SHIFT) #define PLLP_MISC1_XUSB_EN_SHIFT 28 #define PLLP_MISC1_XUSB_EN (1 << PLLP_MISC1_XUSB_EN_SHIFT) #define PLLP_MISC0_DEFAULT_VALUE 0x00040008 #define PLLP_MISC1_DEFAULT_VALUE 0x0 #define PLLP_MISC0_WRITE_MASK 0xdc6000f #define PLLP_MISC1_WRITE_MASK 0x70ffffff /* PLLU */ #define PLLU_BASE_LOCK (1 << 27) #define PLLU_BASE_OVERRIDE (1 << 24) #define PLLU_BASE_CLKENABLE_USB (1 << 21) #define PLLU_BASE_CLKENABLE_HSIC (1 << 22) #define PLLU_BASE_CLKENABLE_ICUSB (1 << 23) #define PLLU_BASE_CLKENABLE_48M (1 << 25) #define PLLU_BASE_CLKENABLE_ALL (PLLU_BASE_CLKENABLE_USB |\ PLLU_BASE_CLKENABLE_HSIC |\ PLLU_BASE_CLKENABLE_ICUSB |\ PLLU_BASE_CLKENABLE_48M) #define PLLU_MISC0_IDDQ (1 << 31) #define PLLU_MISC0_LOCK_ENABLE (1 << 29) #define PLLU_MISC1_LOCK_OVERRIDE (1 << 0) #define PLLU_MISC0_DEFAULT_VALUE 0xa0000000 #define PLLU_MISC1_DEFAULT_VALUE 0x0 #define PLLU_MISC0_WRITE_MASK 0xbfffffff #define PLLU_MISC1_WRITE_MASK 0x00000007 void tegra210_xusb_pll_hw_control_enable(void) { u32 val; val = readl_relaxed(clk_base + XUSBIO_PLL_CFG0); val &= ~(XUSBIO_PLL_CFG0_CLK_ENABLE_SWCTL | XUSBIO_PLL_CFG0_PADPLL_RESET_SWCTL); val |= XUSBIO_PLL_CFG0_PADPLL_USE_LOCKDET | XUSBIO_PLL_CFG0_PADPLL_SLEEP_IDDQ; writel_relaxed(val, clk_base + XUSBIO_PLL_CFG0); } EXPORT_SYMBOL_GPL(tegra210_xusb_pll_hw_control_enable); void tegra210_xusb_pll_hw_sequence_start(void) { u32 val; val = readl_relaxed(clk_base + XUSBIO_PLL_CFG0); val |= XUSBIO_PLL_CFG0_SEQ_ENABLE; writel_relaxed(val, clk_base + XUSBIO_PLL_CFG0); } EXPORT_SYMBOL_GPL(tegra210_xusb_pll_hw_sequence_start); void tegra210_sata_pll_hw_control_enable(void) { u32 val; val = readl_relaxed(clk_base + SATA_PLL_CFG0); val &= ~SATA_PLL_CFG0_PADPLL_RESET_SWCTL; val |= SATA_PLL_CFG0_PADPLL_USE_LOCKDET | SATA_PLL_CFG0_PADPLL_SLEEP_IDDQ; writel_relaxed(val, clk_base + SATA_PLL_CFG0); } EXPORT_SYMBOL_GPL(tegra210_sata_pll_hw_control_enable); void tegra210_sata_pll_hw_sequence_start(void) { u32 val; val = readl_relaxed(clk_base + SATA_PLL_CFG0); val |= SATA_PLL_CFG0_SEQ_ENABLE; writel_relaxed(val, clk_base + SATA_PLL_CFG0); } EXPORT_SYMBOL_GPL(tegra210_sata_pll_hw_sequence_start); static inline void _pll_misc_chk_default(void __iomem *base, struct tegra_clk_pll_params *params, u8 misc_num, u32 default_val, u32 mask) { u32 boot_val = readl_relaxed(base + params->ext_misc_reg[misc_num]); boot_val &= mask; default_val &= mask; if (boot_val != default_val) { pr_warn("boot misc%d 0x%x: expected 0x%x\n", misc_num, boot_val, default_val); pr_warn(" (comparison mask = 0x%x)\n", mask); params->defaults_set = false; } } /* * PLLCX: PLLC, PLLC2, PLLC3, PLLA1 * Hybrid PLLs with dynamic ramp. Dynamic ramp is allowed for any transition * that changes NDIV only, while PLL is already locked. */ static void pllcx_check_defaults(struct tegra_clk_pll_params *params) { u32 default_val; default_val = PLLCX_MISC0_DEFAULT_VALUE & (~PLLCX_MISC0_RESET); _pll_misc_chk_default(clk_base, params, 0, default_val, PLLCX_MISC0_WRITE_MASK); default_val = PLLCX_MISC1_DEFAULT_VALUE & (~PLLCX_MISC1_IDDQ); _pll_misc_chk_default(clk_base, params, 1, default_val, PLLCX_MISC1_WRITE_MASK); default_val = PLLCX_MISC2_DEFAULT_VALUE; _pll_misc_chk_default(clk_base, params, 2, default_val, PLLCX_MISC2_WRITE_MASK); default_val = PLLCX_MISC3_DEFAULT_VALUE; _pll_misc_chk_default(clk_base, params, 3, default_val, PLLCX_MISC3_WRITE_MASK); } static void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx) { pllcx->params->defaults_set = true; if (readl_relaxed(clk_base + pllcx->params->base_reg) & PLL_ENABLE) { /* PLL is ON: only check if defaults already set */ pllcx_check_defaults(pllcx->params); pr_warn("%s already enabled. Postponing set full defaults\n", name); return; } /* Defaults assert PLL reset, and set IDDQ */ writel_relaxed(PLLCX_MISC0_DEFAULT_VALUE, clk_base + pllcx->params->ext_misc_reg[0]); writel_relaxed(PLLCX_MISC1_DEFAULT_VALUE, clk_base + pllcx->params->ext_misc_reg[1]); writel_relaxed(PLLCX_MISC2_DEFAULT_VALUE, clk_base + pllcx->params->ext_misc_reg[2]); writel_relaxed(PLLCX_MISC3_DEFAULT_VALUE, clk_base + pllcx->params->ext_misc_reg[3]); udelay(1); } static void _pllc_set_defaults(struct tegra_clk_pll *pllcx) { tegra210_pllcx_set_defaults("PLL_C", pllcx); } static void _pllc2_set_defaults(struct tegra_clk_pll *pllcx) { tegra210_pllcx_set_defaults("PLL_C2", pllcx); } static void _pllc3_set_defaults(struct tegra_clk_pll *pllcx) { tegra210_pllcx_set_defaults("PLL_C3", pllcx); } static void _plla1_set_defaults(struct tegra_clk_pll *pllcx) { tegra210_pllcx_set_defaults("PLL_A1", pllcx); } /* * PLLA * PLL with dynamic ramp and fractional SDM. Dynamic ramp is not used. * Fractional SDM is allowed to provide exact audio rates. */ static void tegra210_plla_set_defaults(struct tegra_clk_pll *plla) { u32 mask; u32 val = readl_relaxed(clk_base + plla->params->base_reg); plla->params->defaults_set = true; if (val & PLL_ENABLE) { /* * PLL is ON: check if defaults already set, then set those * that can be updated in flight. */ if (val & PLLA_BASE_IDDQ) { pr_warn("PLL_A boot enabled with IDDQ set\n"); plla->params->defaults_set = false; } pr_warn("PLL_A already enabled. Postponing set full defaults\n"); val = PLLA_MISC0_DEFAULT_VALUE; /* ignore lock enable */ mask = PLLA_MISC0_LOCK_ENABLE | PLLA_MISC0_LOCK_OVERRIDE; _pll_misc_chk_default(clk_base, plla->params, 0, val, ~mask & PLLA_MISC0_WRITE_MASK); val = PLLA_MISC2_DEFAULT_VALUE; /* ignore all but control bit */ _pll_misc_chk_default(clk_base, plla->params, 2, val, PLLA_MISC2_EN_DYNRAMP); /* Enable lock detect */ val = readl_relaxed(clk_base + plla->params->ext_misc_reg[0]); val &= ~mask; val |= PLLA_MISC0_DEFAULT_VALUE & mask; writel_relaxed(val, clk_base + plla->params->ext_misc_reg[0]); udelay(1); return; } /* set IDDQ, enable lock detect, disable dynamic ramp and SDM */ val |= PLLA_BASE_IDDQ; writel_relaxed(val, clk_base + plla->params->base_reg); writel_relaxed(PLLA_MISC0_DEFAULT_VALUE, clk_base + plla->params->ext_misc_reg[0]); writel_relaxed(PLLA_MISC2_DEFAULT_VALUE, clk_base + plla->params->ext_misc_reg[2]); udelay(1); } /* * PLLD * PLL with fractional SDM. */ static void tegra210_plld_set_defaults(struct tegra_clk_pll *plld) { u32 val; u32 mask = 0xffff; plld->params->defaults_set = true; if (readl_relaxed(clk_base + plld->params->base_reg) & PLL_ENABLE) { pr_warn("PLL_D already enabled. Postponing set full defaults\n"); /* * PLL is ON: check if defaults already set, then set those * that can be updated in flight. */ val = PLLD_MISC1_DEFAULT_VALUE; _pll_misc_chk_default(clk_base, plld->params, 1, val, PLLD_MISC1_WRITE_MASK); /* ignore lock, DSI and SDM controls, make sure IDDQ not set */ val = PLLD_MISC0_DEFAULT_VALUE & (~PLLD_MISC0_IDDQ); mask |= PLLD_MISC0_DSI_CLKENABLE | PLLD_MISC0_LOCK_ENABLE | PLLD_MISC0_LOCK_OVERRIDE | PLLD_MISC0_EN_SDM; _pll_misc_chk_default(clk_base, plld->params, 0, val, ~mask & PLLD_MISC0_WRITE_MASK); /* Enable lock detect */ mask = PLLD_MISC0_LOCK_ENABLE | PLLD_MISC0_LOCK_OVERRIDE; val = readl_relaxed(clk_base + plld->params->ext_misc_reg[0]); val &= ~mask; val |= PLLD_MISC0_DEFAULT_VALUE & mask; writel_relaxed(val, clk_base + plld->params->ext_misc_reg[0]); udelay(1); return; } val = readl_relaxed(clk_base + plld->params->ext_misc_reg[0]); val &= PLLD_MISC0_DSI_CLKENABLE; val |= PLLD_MISC0_DEFAULT_VALUE; /* set IDDQ, enable lock detect, disable SDM */ writel_relaxed(val, clk_base + plld->params->ext_misc_reg[0]); writel_relaxed(PLLD_MISC1_DEFAULT_VALUE, clk_base + plld->params->ext_misc_reg[1]); udelay(1); } /* * PLLD2, PLLDP * PLL with fractional SDM and Spread Spectrum (SDM is a must if SSC is used). */ static void plldss_defaults(const char *pll_name, struct tegra_clk_pll *plldss, u32 misc0_val, u32 misc1_val, u32 misc2_val, u32 misc3_val) { u32 default_val; u32 val = readl_relaxed(clk_base + plldss->params->base_reg); plldss->params->defaults_set = true; if (val & PLL_ENABLE) { pr_warn("%s already enabled. Postponing set full defaults\n", pll_name); /* * PLL is ON: check if defaults already set, then set those * that can be updated in flight. */ if (val & PLLDSS_BASE_IDDQ) { pr_warn("plldss boot enabled with IDDQ set\n"); plldss->params->defaults_set = false; } /* ignore lock enable */ default_val = misc0_val; _pll_misc_chk_default(clk_base, plldss->params, 0, default_val, PLLDSS_MISC0_WRITE_MASK & (~PLLDSS_MISC0_LOCK_ENABLE)); /* * If SSC is used, check all settings, otherwise just confirm * that SSC is not used on boot as well. Do nothing when using * this function for PLLC4 that has only MISC0. */ if (plldss->params->ssc_ctrl_en_mask) { default_val = misc1_val; _pll_misc_chk_default(clk_base, plldss->params, 1, default_val, PLLDSS_MISC1_CFG_WRITE_MASK); default_val = misc2_val; _pll_misc_chk_default(clk_base, plldss->params, 2, default_val, PLLDSS_MISC2_CTRL1_WRITE_MASK); default_val = misc3_val; _pll_misc_chk_default(clk_base, plldss->params, 3, default_val, PLLDSS_MISC3_CTRL2_WRITE_MASK); } else if (plldss->params->ext_misc_reg[1]) { default_val = misc1_val; _pll_misc_chk_default(clk_base, plldss->params, 1, default_val, PLLDSS_MISC1_CFG_WRITE_MASK & (~PLLDSS_MISC1_CFG_EN_SDM)); } /* Enable lock detect */ if (val & PLLDSS_BASE_LOCK_OVERRIDE) { val &= ~PLLDSS_BASE_LOCK_OVERRIDE; writel_relaxed(val, clk_base + plldss->params->base_reg); } val = readl_relaxed(clk_base + plldss->params->ext_misc_reg[0]); val &= ~PLLDSS_MISC0_LOCK_ENABLE; val |= misc0_val & PLLDSS_MISC0_LOCK_ENABLE; writel_relaxed(val, clk_base + plldss->params->ext_misc_reg[0]); udelay(1); return; } /* set IDDQ, enable lock detect, configure SDM/SSC */ val |= PLLDSS_BASE_IDDQ; val &= ~PLLDSS_BASE_LOCK_OVERRIDE; writel_relaxed(val, clk_base + plldss->params->base_reg); /* When using this function for PLLC4 exit here */ if (!plldss->params->ext_misc_reg[1]) { writel_relaxed(misc0_val, clk_base + plldss->params->ext_misc_reg[0]); udelay(1); return; } writel_relaxed(misc0_val, clk_base + plldss->params->ext_misc_reg[0]); /* if SSC used set by 1st enable */ writel_relaxed(misc1_val & (~PLLDSS_MISC1_CFG_EN_SSC), clk_base + plldss->params->ext_misc_reg[1]); writel_relaxed(misc2_val, clk_base + plldss->params->ext_misc_reg[2]); writel_relaxed(misc3_val, clk_base + plldss->params->ext_misc_reg[3]); udelay(1); } static void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2) { plldss_defaults("PLL_D2", plld2, PLLD2_MISC0_DEFAULT_VALUE, PLLD2_MISC1_CFG_DEFAULT_VALUE, PLLD2_MISC2_CTRL1_DEFAULT_VALUE, PLLD2_MISC3_CTRL2_DEFAULT_VALUE); } static void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp) { plldss_defaults("PLL_DP", plldp, PLLDP_MISC0_DEFAULT_VALUE, PLLDP_MISC1_CFG_DEFAULT_VALUE, PLLDP_MISC2_CTRL1_DEFAULT_VALUE, PLLDP_MISC3_CTRL2_DEFAULT_VALUE); } /* * PLLC4 * Base and misc0 layout is the same as PLLD2/PLLDP, but no SDM/SSC support. * VCO is exposed to the clock tree via fixed 1/3 and 1/5 dividers. */ static void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4) { plldss_defaults("PLL_C4", pllc4, PLLC4_MISC0_DEFAULT_VALUE, 0, 0, 0); } /* * PLLRE * VCO is exposed to the clock tree directly along with post-divider output */ static void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre) { u32 mask; u32 val = readl_relaxed(clk_base + pllre->params->base_reg); pllre->params->defaults_set = true; if (val & PLL_ENABLE) { pr_warn("PLL_RE already enabled. Postponing set full defaults\n"); /* * PLL is ON: check if defaults already set, then set those * that can be updated in flight. */ val &= PLLRE_BASE_DEFAULT_MASK; if (val != PLLRE_BASE_DEFAULT_VALUE) { pr_warn("pllre boot base 0x%x : expected 0x%x\n", val, PLLRE_BASE_DEFAULT_VALUE); pr_warn("(comparison mask = 0x%x)\n", PLLRE_BASE_DEFAULT_MASK); pllre->params->defaults_set = false; } /* Ignore lock enable */ val = PLLRE_MISC0_DEFAULT_VALUE & (~PLLRE_MISC0_IDDQ); mask = PLLRE_MISC0_LOCK_ENABLE | PLLRE_MISC0_LOCK_OVERRIDE; _pll_misc_chk_default(clk_base, pllre->params, 0, val, ~mask & PLLRE_MISC0_WRITE_MASK); /* Enable lock detect */ val = readl_relaxed(clk_base + pllre->params->ext_misc_reg[0]); val &= ~mask; val |= PLLRE_MISC0_DEFAULT_VALUE & mask; writel_relaxed(val, clk_base + pllre->params->ext_misc_reg[0]); udelay(1); return; } /* set IDDQ, enable lock detect */ val &= ~PLLRE_BASE_DEFAULT_MASK; val |= PLLRE_BASE_DEFAULT_VALUE & PLLRE_BASE_DEFAULT_MASK; writel_relaxed(val, clk_base + pllre->params->base_reg); writel_relaxed(PLLRE_MISC0_DEFAULT_VALUE, clk_base + pllre->params->ext_misc_reg[0]); udelay(1); } static void pllx_get_dyn_steps(struct clk_hw *hw, u32 *step_a, u32 *step_b) { unsigned long input_rate; /* cf rate */ if (!IS_ERR_OR_NULL(hw->clk)) input_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); else input_rate = 38400000; input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate); switch (input_rate) { case 12000000: case 12800000: case 13000000: *step_a = 0x2B; *step_b = 0x0B; return; case 19200000: *step_a = 0x12; *step_b = 0x08; return; case 38400000: *step_a = 0x04; *step_b = 0x05; return; default: pr_err("%s: Unexpected reference rate %lu\n", __func__, input_rate); BUG(); } } static void pllx_check_defaults(struct tegra_clk_pll *pll) { u32 default_val; default_val = PLLX_MISC0_DEFAULT_VALUE; /* ignore lock enable */ _pll_misc_chk_default(clk_base, pll->params, 0, default_val, PLLX_MISC0_WRITE_MASK & (~PLLX_MISC0_LOCK_ENABLE)); default_val = PLLX_MISC1_DEFAULT_VALUE; _pll_misc_chk_default(clk_base, pll->params, 1, default_val, PLLX_MISC1_WRITE_MASK); /* ignore all but control bit */ default_val = PLLX_MISC2_DEFAULT_VALUE; _pll_misc_chk_default(clk_base, pll->params, 2, default_val, PLLX_MISC2_EN_DYNRAMP); default_val = PLLX_MISC3_DEFAULT_VALUE & (~PLLX_MISC3_IDDQ); _pll_misc_chk_default(clk_base, pll->params, 3, default_val, PLLX_MISC3_WRITE_MASK); default_val = PLLX_MISC4_DEFAULT_VALUE; _pll_misc_chk_default(clk_base, pll->params, 4, default_val, PLLX_MISC4_WRITE_MASK); default_val = PLLX_MISC5_DEFAULT_VALUE; _pll_misc_chk_default(clk_base, pll->params, 5, default_val, PLLX_MISC5_WRITE_MASK); } static void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx) { u32 val; u32 step_a, step_b; pllx->params->defaults_set = true; /* Get ready dyn ramp state machine settings */ pllx_get_dyn_steps(&pllx->hw, &step_a, &step_b); val = PLLX_MISC2_DEFAULT_VALUE & (~PLLX_MISC2_DYNRAMP_STEPA_MASK) & (~PLLX_MISC2_DYNRAMP_STEPB_MASK); val |= step_a << PLLX_MISC2_DYNRAMP_STEPA_SHIFT; val |= step_b << PLLX_MISC2_DYNRAMP_STEPB_SHIFT; if (readl_relaxed(clk_base + pllx->params->base_reg) & PLL_ENABLE) { pr_warn("PLL_X already enabled. Postponing set full defaults\n"); /* * PLL is ON: check if defaults already set, then set those * that can be updated in flight. */ pllx_check_defaults(pllx); /* Configure dyn ramp, disable lock override */ writel_relaxed(val, clk_base + pllx->params->ext_misc_reg[2]); /* Enable lock detect */ val = readl_relaxed(clk_base + pllx->params->ext_misc_reg[0]); val &= ~PLLX_MISC0_LOCK_ENABLE; val |= PLLX_MISC0_DEFAULT_VALUE & PLLX_MISC0_LOCK_ENABLE; writel_relaxed(val, clk_base + pllx->params->ext_misc_reg[0]); udelay(1); return; } /* Enable lock detect and CPU output */ writel_relaxed(PLLX_MISC0_DEFAULT_VALUE, clk_base + pllx->params->ext_misc_reg[0]); /* Setup */ writel_relaxed(PLLX_MISC1_DEFAULT_VALUE, clk_base + pllx->params->ext_misc_reg[1]); /* Configure dyn ramp state machine, disable lock override */ writel_relaxed(val, clk_base + pllx->params->ext_misc_reg[2]); /* Set IDDQ */ writel_relaxed(PLLX_MISC3_DEFAULT_VALUE, clk_base + pllx->params->ext_misc_reg[3]); /* Disable SDM */ writel_relaxed(PLLX_MISC4_DEFAULT_VALUE, clk_base + pllx->params->ext_misc_reg[4]); writel_relaxed(PLLX_MISC5_DEFAULT_VALUE, clk_base + pllx->params->ext_misc_reg[5]); udelay(1); } /* PLLMB */ static void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb) { u32 mask, val = readl_relaxed(clk_base + pllmb->params->base_reg); pllmb->params->defaults_set = true; if (val & PLL_ENABLE) { pr_warn("PLL_MB already enabled. Postponing set full defaults\n"); /* * PLL is ON: check if defaults already set, then set those * that can be updated in flight. */ val = PLLMB_MISC1_DEFAULT_VALUE & (~PLLMB_MISC1_IDDQ); mask = PLLMB_MISC1_LOCK_ENABLE | PLLMB_MISC1_LOCK_OVERRIDE; _pll_misc_chk_default(clk_base, pllmb->params, 0, val, ~mask & PLLMB_MISC1_WRITE_MASK); /* Enable lock detect */ val = readl_relaxed(clk_base + pllmb->params->ext_misc_reg[0]); val &= ~mask; val |= PLLMB_MISC1_DEFAULT_VALUE & mask; writel_relaxed(val, clk_base + pllmb->params->ext_misc_reg[0]); udelay(1); return; } /* set IDDQ, enable lock detect */ writel_relaxed(PLLMB_MISC1_DEFAULT_VALUE, clk_base + pllmb->params->ext_misc_reg[0]); udelay(1); } /* * PLLP * VCO is exposed to the clock tree directly along with post-divider output. * Both VCO and post-divider output rates are fixed at 408MHz and 204MHz, * respectively. */ static void pllp_check_defaults(struct tegra_clk_pll *pll, bool enabled) { u32 val, mask; /* Ignore lock enable (will be set), make sure not in IDDQ if enabled */ val = PLLP_MISC0_DEFAULT_VALUE & (~PLLP_MISC0_IDDQ); mask = PLLP_MISC0_LOCK_ENABLE | PLLP_MISC0_LOCK_OVERRIDE; if (!enabled) mask |= PLLP_MISC0_IDDQ; _pll_misc_chk_default(clk_base, pll->params, 0, val, ~mask & PLLP_MISC0_WRITE_MASK); /* Ignore branch controls */ val = PLLP_MISC1_DEFAULT_VALUE; mask = PLLP_MISC1_HSIO_EN | PLLP_MISC1_XUSB_EN; _pll_misc_chk_default(clk_base, pll->params, 1, val, ~mask & PLLP_MISC1_WRITE_MASK); } static void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp) { u32 mask; u32 val = readl_relaxed(clk_base + pllp->params->base_reg); pllp->params->defaults_set = true; if (val & PLL_ENABLE) { pr_warn("PLL_P already enabled. Postponing set full defaults\n"); /* * PLL is ON: check if defaults already set, then set those * that can be updated in flight. */ pllp_check_defaults(pllp, true); /* Enable lock detect */ val = readl_relaxed(clk_base + pllp->params->ext_misc_reg[0]); mask = PLLP_MISC0_LOCK_ENABLE | PLLP_MISC0_LOCK_OVERRIDE; val &= ~mask; val |= PLLP_MISC0_DEFAULT_VALUE & mask; writel_relaxed(val, clk_base + pllp->params->ext_misc_reg[0]); udelay(1); return; } /* set IDDQ, enable lock detect */ writel_relaxed(PLLP_MISC0_DEFAULT_VALUE, clk_base + pllp->params->ext_misc_reg[0]); /* Preserve branch control */ val = readl_relaxed(clk_base + pllp->params->ext_misc_reg[1]); mask = PLLP_MISC1_HSIO_EN | PLLP_MISC1_XUSB_EN; val &= mask; val |= ~mask & PLLP_MISC1_DEFAULT_VALUE; writel_relaxed(val, clk_base + pllp->params->ext_misc_reg[1]); udelay(1); } /* * PLLU * VCO is exposed to the clock tree directly along with post-divider output. * Both VCO and post-divider output rates are fixed at 480MHz and 240MHz, * respectively. */ static void pllu_check_defaults(struct tegra_clk_pll *pll, bool hw_control) { u32 val, mask; /* Ignore lock enable (will be set) and IDDQ if under h/w control */ val = PLLU_MISC0_DEFAULT_VALUE & (~PLLU_MISC0_IDDQ); mask = PLLU_MISC0_LOCK_ENABLE | (hw_control ? PLLU_MISC0_IDDQ : 0); _pll_misc_chk_default(clk_base, pll->params, 0, val, ~mask & PLLU_MISC0_WRITE_MASK); val = PLLU_MISC1_DEFAULT_VALUE; mask = PLLU_MISC1_LOCK_OVERRIDE; _pll_misc_chk_default(clk_base, pll->params, 1, val, ~mask & PLLU_MISC1_WRITE_MASK); } static void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu) { u32 val = readl_relaxed(clk_base + pllu->params->base_reg); pllu->params->defaults_set = true; if (val & PLL_ENABLE) { pr_warn("PLL_U already enabled. Postponing set full defaults\n"); /* * PLL is ON: check if defaults already set, then set those * that can be updated in flight. */ pllu_check_defaults(pllu, false); /* Enable lock detect */ val = readl_relaxed(clk_base + pllu->params->ext_misc_reg[0]); val &= ~PLLU_MISC0_LOCK_ENABLE; val |= PLLU_MISC0_DEFAULT_VALUE & PLLU_MISC0_LOCK_ENABLE; writel_relaxed(val, clk_base + pllu->params->ext_misc_reg[0]); val = readl_relaxed(clk_base + pllu->params->ext_misc_reg[1]); val &= ~PLLU_MISC1_LOCK_OVERRIDE; val |= PLLU_MISC1_DEFAULT_VALUE & PLLU_MISC1_LOCK_OVERRIDE; writel_relaxed(val, clk_base + pllu->params->ext_misc_reg[1]); udelay(1); return; } /* set IDDQ, enable lock detect */ writel_relaxed(PLLU_MISC0_DEFAULT_VALUE, clk_base + pllu->params->ext_misc_reg[0]); writel_relaxed(PLLU_MISC1_DEFAULT_VALUE, clk_base + pllu->params->ext_misc_reg[1]); udelay(1); } #define mask(w) ((1 << (w)) - 1) #define divm_mask(p) mask(p->params->div_nmp->divm_width) #define divn_mask(p) mask(p->params->div_nmp->divn_width) #define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\ mask(p->params->div_nmp->divp_width)) #define divm_shift(p) ((p)->params->div_nmp->divm_shift) #define divn_shift(p) ((p)->params->div_nmp->divn_shift) #define divp_shift(p) ((p)->params->div_nmp->divp_shift) #define divm_mask_shifted(p) (divm_mask(p) << divm_shift(p)) #define divn_mask_shifted(p) (divn_mask(p) << divn_shift(p)) #define divp_mask_shifted(p) (divp_mask(p) << divp_shift(p)) #define PLL_LOCKDET_DELAY 2 /* Lock detection safety delays */ static int tegra210_wait_for_mask(struct tegra_clk_pll *pll, u32 reg, u32 mask) { int i; u32 val = 0; for (i = 0; i < pll->params->lock_delay / PLL_LOCKDET_DELAY + 1; i++) { udelay(PLL_LOCKDET_DELAY); val = readl_relaxed(clk_base + reg); if ((val & mask) == mask) { udelay(PLL_LOCKDET_DELAY); return 0; } } return -ETIMEDOUT; } static int tegra210_pllx_dyn_ramp(struct tegra_clk_pll *pllx, struct tegra_clk_pll_freq_table *cfg) { u32 val, base, ndiv_new_mask; ndiv_new_mask = (divn_mask(pllx) >> pllx->params->div_nmp->divn_shift) << PLLX_MISC2_NDIV_NEW_SHIFT; val = readl_relaxed(clk_base + pllx->params->ext_misc_reg[2]); val &= (~ndiv_new_mask); val |= cfg->n << PLLX_MISC2_NDIV_NEW_SHIFT; writel_relaxed(val, clk_base + pllx->params->ext_misc_reg[2]); udelay(1); val = readl_relaxed(clk_base + pllx->params->ext_misc_reg[2]); val |= PLLX_MISC2_EN_DYNRAMP; writel_relaxed(val, clk_base + pllx->params->ext_misc_reg[2]); udelay(1); tegra210_wait_for_mask(pllx, pllx->params->ext_misc_reg[2], PLLX_MISC2_DYNRAMP_DONE); base = readl_relaxed(clk_base + pllx->params->base_reg) & (~divn_mask_shifted(pllx)); base |= cfg->n << pllx->params->div_nmp->divn_shift; writel_relaxed(base, clk_base + pllx->params->base_reg); udelay(1); val &= ~PLLX_MISC2_EN_DYNRAMP; writel_relaxed(val, clk_base + pllx->params->ext_misc_reg[2]); udelay(1); pr_debug("%s: dynamic ramp to m = %u n = %u p = %u, Fout = %lu kHz\n", __clk_get_name(pllx->hw.clk), cfg->m, cfg->n, cfg->p, cfg->input_rate / cfg->m * cfg->n / pllx->params->pdiv_tohw[cfg->p].pdiv / 1000); return 0; } /* * Common configuration for PLLs with fixed input divider policy: * - always set fixed M-value based on the reference rate * - always set P-value value 1:1 for output rates above VCO minimum, and * choose minimum necessary P-value for output rates below VCO maximum * - calculate N-value based on selected M and P * - calculate SDM_DIN fractional part */ static int tegra210_pll_fixed_mdiv_cfg(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg, unsigned long rate, unsigned long input_rate) { struct tegra_clk_pll *pll = to_clk_pll(hw); struct tegra_clk_pll_params *params = pll->params; int p; unsigned long cf, p_rate; u32 pdiv; if (!rate) return -EINVAL; if (!(params->flags & TEGRA_PLL_VCO_OUT)) { p = DIV_ROUND_UP(params->vco_min, rate); p = params->round_p_to_pdiv(p, &pdiv); } else { p = rate >= params->vco_min ? 1 : -EINVAL; } if (p < 0) return -EINVAL; cfg->m = tegra_pll_get_fixed_mdiv(hw, input_rate); cfg->p = p; /* Store P as HW value, as that is what is expected */ cfg->p = tegra_pll_p_div_to_hw(pll, cfg->p); p_rate = rate * p; if (p_rate > params->vco_max) p_rate = params->vco_max; cf = input_rate / cfg->m; cfg->n = p_rate / cf; cfg->sdm_data = 0; if (params->sdm_ctrl_reg) { unsigned long rem = p_rate - cf * cfg->n; /* If ssc is enabled SDM enabled as well, even for integer n */ if (rem || params->ssc_ctrl_reg) { u64 s = rem * PLL_SDM_COEFF; do_div(s, cf); s -= PLL_SDM_COEFF / 2; cfg->sdm_data = sdin_din_to_data(s); } } cfg->input_rate = input_rate; cfg->output_rate = rate; return 0; } /* * clk_pll_set_gain - set gain to m, n to calculate correct VCO rate * * @cfg: struct tegra_clk_pll_freq_table * cfg * * For Normal mode: * Fvco = Fref * NDIV / MDIV * * For fractional mode: * Fvco = Fref * (NDIV + 0.5 + SDM_DIN / PLL_SDM_COEFF) / MDIV */ static void tegra210_clk_pll_set_gain(struct tegra_clk_pll_freq_table *cfg) { cfg->n = cfg->n * PLL_SDM_COEFF + PLL_SDM_COEFF/2 + sdin_data_to_din(cfg->sdm_data); cfg->m *= PLL_SDM_COEFF; } static unsigned long tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params, unsigned long parent_rate) { unsigned long vco_min = params->vco_min; params->vco_min += DIV_ROUND_UP(parent_rate, PLL_SDM_COEFF); vco_min = min(vco_min, params->vco_min); return vco_min; } static struct div_nmp pllx_nmp = { .divm_shift = 0, .divm_width = 8, .divn_shift = 8, .divn_width = 8, .divp_shift = 20, .divp_width = 5, }; /* * PLL post divider maps - two types: quasi-linear and exponential * post divider. */ #define PLL_QLIN_PDIV_MAX 16 static const struct pdiv_map pll_qlin_pdiv_to_hw[] = { { .pdiv = 1, .hw_val = 0 }, { .pdiv = 2, .hw_val = 1 }, { .pdiv = 3, .hw_val = 2 }, { .pdiv = 4, .hw_val = 3 }, { .pdiv = 5, .hw_val = 4 }, { .pdiv = 6, .hw_val = 5 }, { .pdiv = 8, .hw_val = 6 }, { .pdiv = 9, .hw_val = 7 }, { .pdiv = 10, .hw_val = 8 }, { .pdiv = 12, .hw_val = 9 }, { .pdiv = 15, .hw_val = 10 }, { .pdiv = 16, .hw_val = 11 }, { .pdiv = 18, .hw_val = 12 }, { .pdiv = 20, .hw_val = 13 }, { .pdiv = 24, .hw_val = 14 }, { .pdiv = 30, .hw_val = 15 }, { .pdiv = 32, .hw_val = 16 }, }; static u32 pll_qlin_p_to_pdiv(u32 p, u32 *pdiv) { int i; if (p) { for (i = 0; i <= PLL_QLIN_PDIV_MAX; i++) { if (p <= pll_qlin_pdiv_to_hw[i].pdiv) { if (pdiv) *pdiv = i; return pll_qlin_pdiv_to_hw[i].pdiv; } } } return -EINVAL; } #define PLL_EXPO_PDIV_MAX 7 static const struct pdiv_map pll_expo_pdiv_to_hw[] = { { .pdiv = 1, .hw_val = 0 }, { .pdiv = 2, .hw_val = 1 }, { .pdiv = 4, .hw_val = 2 }, { .pdiv = 8, .hw_val = 3 }, { .pdiv = 16, .hw_val = 4 }, { .pdiv = 32, .hw_val = 5 }, { .pdiv = 64, .hw_val = 6 }, { .pdiv = 128, .hw_val = 7 }, }; static u32 pll_expo_p_to_pdiv(u32 p, u32 *pdiv) { if (p) { u32 i = fls(p); if (i == ffs(p)) i--; if (i <= PLL_EXPO_PDIV_MAX) { if (pdiv) *pdiv = i; return 1 << i; } } return -EINVAL; } static struct tegra_clk_pll_freq_table pll_x_freq_table[] = { /* 1 GHz */ { 12000000, 1000000000, 166, 1, 2, 0 }, /* actual: 996.0 MHz */ { 13000000, 1000000000, 153, 1, 2, 0 }, /* actual: 994.0 MHz */ { 38400000, 1000000000, 156, 3, 2, 0 }, /* actual: 998.4 MHz */ { 0, 0, 0, 0, 0, 0 }, }; static struct tegra_clk_pll_params pll_x_params = { .input_min = 12000000, .input_max = 800000000, .cf_min = 12000000, .cf_max = 38400000, .vco_min = 1350000000, .vco_max = 3000000000UL, .base_reg = PLLX_BASE, .misc_reg = PLLX_MISC0, .lock_mask = PLL_BASE_LOCK, .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE, .lock_delay = 300, .ext_misc_reg[0] = PLLX_MISC0, .ext_misc_reg[1] = PLLX_MISC1, .ext_misc_reg[2] = PLLX_MISC2, .ext_misc_reg[3] = PLLX_MISC3, .ext_misc_reg[4] = PLLX_MISC4, .ext_misc_reg[5] = PLLX_MISC5, .iddq_reg = PLLX_MISC3, .iddq_bit_idx = PLLXP_IDDQ_BIT, .max_p = PLL_QLIN_PDIV_MAX, .mdiv_default = 2, .dyn_ramp_reg = PLLX_MISC2, .stepa_shift = 16, .stepb_shift = 24, .round_p_to_pdiv = pll_qlin_p_to_pdiv, .pdiv_tohw = pll_qlin_pdiv_to_hw, .div_nmp = &pllx_nmp, .freq_table = pll_x_freq_table, .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, .dyn_ramp = tegra210_pllx_dyn_ramp, .set_defaults = tegra210_pllx_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; static struct div_nmp pllc_nmp = { .divm_shift = 0, .divm_width = 8, .divn_shift = 10, .divn_width = 8, .divp_shift = 20, .divp_width = 5, }; static struct tegra_clk_pll_freq_table pll_cx_freq_table[] = { { 12000000, 510000000, 85, 1, 2, 0 }, { 13000000, 510000000, 78, 1, 2, 0 }, /* actual: 507.0 MHz */ { 38400000, 510000000, 79, 3, 2, 0 }, /* actual: 505.6 MHz */ { 0, 0, 0, 0, 0, 0 }, }; static struct tegra_clk_pll_params pll_c_params = { .input_min = 12000000, .input_max = 700000000, .cf_min = 12000000, .cf_max = 50000000, .vco_min = 600000000, .vco_max = 1200000000, .base_reg = PLLC_BASE, .misc_reg = PLLC_MISC0, .lock_mask = PLL_BASE_LOCK, .lock_delay = 300, .iddq_reg = PLLC_MISC1, .iddq_bit_idx = PLLCX_IDDQ_BIT, .reset_reg = PLLC_MISC0, .reset_bit_idx = PLLCX_RESET_BIT, .max_p = PLL_QLIN_PDIV_MAX, .ext_misc_reg[0] = PLLC_MISC0, .ext_misc_reg[1] = PLLC_MISC1, .ext_misc_reg[2] = PLLC_MISC2, .ext_misc_reg[3] = PLLC_MISC3, .round_p_to_pdiv = pll_qlin_p_to_pdiv, .pdiv_tohw = pll_qlin_pdiv_to_hw, .mdiv_default = 3, .div_nmp = &pllc_nmp, .freq_table = pll_cx_freq_table, .flags = TEGRA_PLL_USE_LOCK, .set_defaults = _pllc_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; static struct div_nmp pllcx_nmp = { .divm_shift = 0, .divm_width = 8, .divn_shift = 10, .divn_width = 8, .divp_shift = 20, .divp_width = 5, }; static struct tegra_clk_pll_params pll_c2_params = { .input_min = 12000000, .input_max = 700000000, .cf_min = 12000000, .cf_max = 50000000, .vco_min = 600000000, .vco_max = 1200000000, .base_reg = PLLC2_BASE, .misc_reg = PLLC2_MISC0, .iddq_reg = PLLC2_MISC1, .iddq_bit_idx = PLLCX_IDDQ_BIT, .reset_reg = PLLC2_MISC0, .reset_bit_idx = PLLCX_RESET_BIT, .lock_mask = PLLCX_BASE_LOCK, .lock_delay = 300, .round_p_to_pdiv = pll_qlin_p_to_pdiv, .pdiv_tohw = pll_qlin_pdiv_to_hw, .mdiv_default = 3, .div_nmp = &pllcx_nmp, .max_p = PLL_QLIN_PDIV_MAX, .ext_misc_reg[0] = PLLC2_MISC0, .ext_misc_reg[1] = PLLC2_MISC1, .ext_misc_reg[2] = PLLC2_MISC2, .ext_misc_reg[3] = PLLC2_MISC3, .freq_table = pll_cx_freq_table, .flags = TEGRA_PLL_USE_LOCK, .set_defaults = _pllc2_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; static struct tegra_clk_pll_params pll_c3_params = { .input_min = 12000000, .input_max = 700000000, .cf_min = 12000000, .cf_max = 50000000, .vco_min = 600000000, .vco_max = 1200000000, .base_reg = PLLC3_BASE, .misc_reg = PLLC3_MISC0, .lock_mask = PLLCX_BASE_LOCK, .lock_delay = 300, .iddq_reg = PLLC3_MISC1, .iddq_bit_idx = PLLCX_IDDQ_BIT, .reset_reg = PLLC3_MISC0, .reset_bit_idx = PLLCX_RESET_BIT, .round_p_to_pdiv = pll_qlin_p_to_pdiv, .pdiv_tohw = pll_qlin_pdiv_to_hw, .mdiv_default = 3, .div_nmp = &pllcx_nmp, .max_p = PLL_QLIN_PDIV_MAX, .ext_misc_reg[0] = PLLC3_MISC0, .ext_misc_reg[1] = PLLC3_MISC1, .ext_misc_reg[2] = PLLC3_MISC2, .ext_misc_reg[3] = PLLC3_MISC3, .freq_table = pll_cx_freq_table, .flags = TEGRA_PLL_USE_LOCK, .set_defaults = _pllc3_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; static struct div_nmp pllss_nmp = { .divm_shift = 0, .divm_width = 8, .divn_shift = 8, .divn_width = 8, .divp_shift = 19, .divp_width = 5, }; static struct tegra_clk_pll_freq_table pll_c4_vco_freq_table[] = { { 12000000, 600000000, 50, 1, 1, 0 }, { 13000000, 600000000, 46, 1, 1, 0 }, /* actual: 598.0 MHz */ { 38400000, 600000000, 62, 4, 1, 0 }, /* actual: 595.2 MHz */ { 0, 0, 0, 0, 0, 0 }, }; static const struct clk_div_table pll_vco_post_div_table[] = { { .val = 0, .div = 1 }, { .val = 1, .div = 2 }, { .val = 2, .div = 3 }, { .val = 3, .div = 4 }, { .val = 4, .div = 5 }, { .val = 5, .div = 6 }, { .val = 6, .div = 8 }, { .val = 7, .div = 10 }, { .val = 8, .div = 12 }, { .val = 9, .div = 16 }, { .val = 10, .div = 12 }, { .val = 11, .div = 16 }, { .val = 12, .div = 20 }, { .val = 13, .div = 24 }, { .val = 14, .div = 32 }, { .val = 0, .div = 0 }, }; static struct tegra_clk_pll_params pll_c4_vco_params = { .input_min = 9600000, .input_max = 800000000, .cf_min = 9600000, .cf_max = 19200000, .vco_min = 500000000, .vco_max = 1080000000, .base_reg = PLLC4_BASE, .misc_reg = PLLC4_MISC0, .lock_mask = PLL_BASE_LOCK, .lock_delay = 300, .max_p = PLL_QLIN_PDIV_MAX, .ext_misc_reg[0] = PLLC4_MISC0, .iddq_reg = PLLC4_BASE, .iddq_bit_idx = PLLSS_IDDQ_BIT, .round_p_to_pdiv = pll_qlin_p_to_pdiv, .pdiv_tohw = pll_qlin_pdiv_to_hw, .mdiv_default = 3, .div_nmp = &pllss_nmp, .freq_table = pll_c4_vco_freq_table, .set_defaults = tegra210_pllc4_set_defaults, .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; static struct tegra_clk_pll_freq_table pll_m_freq_table[] = { { 12000000, 800000000, 66, 1, 1, 0 }, /* actual: 792.0 MHz */ { 13000000, 800000000, 61, 1, 1, 0 }, /* actual: 793.0 MHz */ { 38400000, 297600000, 93, 4, 3, 0 }, { 38400000, 400000000, 125, 4, 3, 0 }, { 38400000, 532800000, 111, 4, 2, 0 }, { 38400000, 665600000, 104, 3, 2, 0 }, { 38400000, 800000000, 125, 3, 2, 0 }, { 38400000, 931200000, 97, 4, 1, 0 }, { 38400000, 1065600000, 111, 4, 1, 0 }, { 38400000, 1200000000, 125, 4, 1, 0 }, { 38400000, 1331200000, 104, 3, 1, 0 }, { 38400000, 1459200000, 76, 2, 1, 0 }, { 38400000, 1600000000, 125, 3, 1, 0 }, { 0, 0, 0, 0, 0, 0 }, }; static struct div_nmp pllm_nmp = { .divm_shift = 0, .divm_width = 8, .override_divm_shift = 0, .divn_shift = 8, .divn_width = 8, .override_divn_shift = 8, .divp_shift = 20, .divp_width = 5, .override_divp_shift = 27, }; static struct tegra_clk_pll_params pll_m_params = { .input_min = 9600000, .input_max = 500000000, .cf_min = 9600000, .cf_max = 19200000, .vco_min = 800000000, .vco_max = 1866000000, .base_reg = PLLM_BASE, .misc_reg = PLLM_MISC2, .lock_mask = PLL_BASE_LOCK, .lock_enable_bit_idx = PLLM_MISC_LOCK_ENABLE, .lock_delay = 300, .iddq_reg = PLLM_MISC2, .iddq_bit_idx = PLLM_IDDQ_BIT, .max_p = PLL_QLIN_PDIV_MAX, .ext_misc_reg[0] = PLLM_MISC2, .ext_misc_reg[1] = PLLM_MISC1, .round_p_to_pdiv = pll_qlin_p_to_pdiv, .pdiv_tohw = pll_qlin_pdiv_to_hw, .div_nmp = &pllm_nmp, .pmc_divnm_reg = PMC_PLLM_WB0_OVERRIDE, .pmc_divp_reg = PMC_PLLM_WB0_OVERRIDE_2, .freq_table = pll_m_freq_table, .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; static struct tegra_clk_pll_params pll_mb_params = { .input_min = 9600000, .input_max = 500000000, .cf_min = 9600000, .cf_max = 19200000, .vco_min = 800000000, .vco_max = 1866000000, .base_reg = PLLMB_BASE, .misc_reg = PLLMB_MISC1, .lock_mask = PLL_BASE_LOCK, .lock_delay = 300, .iddq_reg = PLLMB_MISC1, .iddq_bit_idx = PLLMB_IDDQ_BIT, .max_p = PLL_QLIN_PDIV_MAX, .ext_misc_reg[0] = PLLMB_MISC1, .round_p_to_pdiv = pll_qlin_p_to_pdiv, .pdiv_tohw = pll_qlin_pdiv_to_hw, .div_nmp = &pllm_nmp, .freq_table = pll_m_freq_table, .flags = TEGRA_PLL_USE_LOCK, .set_defaults = tegra210_pllmb_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; static struct tegra_clk_pll_freq_table pll_e_freq_table[] = { /* PLLE special case: use cpcon field to store cml divider value */ { 672000000, 100000000, 125, 42, 0, 13 }, { 624000000, 100000000, 125, 39, 0, 13 }, { 336000000, 100000000, 125, 21, 0, 13 }, { 312000000, 100000000, 200, 26, 0, 14 }, { 38400000, 100000000, 125, 2, 0, 14 }, { 12000000, 100000000, 200, 1, 0, 14 }, { 0, 0, 0, 0, 0, 0 }, }; static struct div_nmp plle_nmp = { .divm_shift = 0, .divm_width = 8, .divn_shift = 8, .divn_width = 8, .divp_shift = 24, .divp_width = 5, }; static struct tegra_clk_pll_params pll_e_params = { .input_min = 12000000, .input_max = 800000000, .cf_min = 12000000, .cf_max = 38400000, .vco_min = 1600000000, .vco_max = 2500000000U, .base_reg = PLLE_BASE, .misc_reg = PLLE_MISC0, .aux_reg = PLLE_AUX, .lock_mask = PLLE_MISC_LOCK, .lock_enable_bit_idx = PLLE_MISC_LOCK_ENABLE, .lock_delay = 300, .div_nmp = &plle_nmp, .freq_table = pll_e_freq_table, .flags = TEGRA_PLL_FIXED | TEGRA_PLL_LOCK_MISC | TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, .fixed_rate = 100000000, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; static struct tegra_clk_pll_freq_table pll_re_vco_freq_table[] = { { 12000000, 672000000, 56, 1, 1, 0 }, { 13000000, 672000000, 51, 1, 1, 0 }, /* actual: 663.0 MHz */ { 38400000, 672000000, 70, 4, 1, 0 }, { 0, 0, 0, 0, 0, 0 }, }; static struct div_nmp pllre_nmp = { .divm_shift = 0, .divm_width = 8, .divn_shift = 8, .divn_width = 8, .divp_shift = 16, .divp_width = 5, }; static struct tegra_clk_pll_params pll_re_vco_params = { .input_min = 9600000, .input_max = 800000000, .cf_min = 9600000, .cf_max = 19200000, .vco_min = 350000000, .vco_max = 700000000, .base_reg = PLLRE_BASE, .misc_reg = PLLRE_MISC0, .lock_mask = PLLRE_MISC_LOCK, .lock_delay = 300, .max_p = PLL_QLIN_PDIV_MAX, .ext_misc_reg[0] = PLLRE_MISC0, .iddq_reg = PLLRE_MISC0, .iddq_bit_idx = PLLRE_IDDQ_BIT, .round_p_to_pdiv = pll_qlin_p_to_pdiv, .pdiv_tohw = pll_qlin_pdiv_to_hw, .div_nmp = &pllre_nmp, .freq_table = pll_re_vco_freq_table, .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC | TEGRA_PLL_VCO_OUT, .set_defaults = tegra210_pllre_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; static struct div_nmp pllp_nmp = { .divm_shift = 0, .divm_width = 8, .divn_shift = 10, .divn_width = 8, .divp_shift = 20, .divp_width = 5, }; static struct tegra_clk_pll_freq_table pll_p_freq_table[] = { { 12000000, 408000000, 34, 1, 1, 0 }, { 38400000, 408000000, 85, 8, 1, 0 }, /* cf = 4.8MHz, allowed exception */ { 0, 0, 0, 0, 0, 0 }, }; static struct tegra_clk_pll_params pll_p_params = { .input_min = 9600000, .input_max = 800000000, .cf_min = 9600000, .cf_max = 19200000, .vco_min = 350000000, .vco_max = 700000000, .base_reg = PLLP_BASE, .misc_reg = PLLP_MISC0, .lock_mask = PLL_BASE_LOCK, .lock_delay = 300, .iddq_reg = PLLP_MISC0, .iddq_bit_idx = PLLXP_IDDQ_BIT, .ext_misc_reg[0] = PLLP_MISC0, .ext_misc_reg[1] = PLLP_MISC1, .div_nmp = &pllp_nmp, .freq_table = pll_p_freq_table, .fixed_rate = 408000000, .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT, .set_defaults = tegra210_pllp_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; static struct tegra_clk_pll_params pll_a1_params = { .input_min = 12000000, .input_max = 700000000, .cf_min = 12000000, .cf_max = 50000000, .vco_min = 600000000, .vco_max = 1200000000, .base_reg = PLLA1_BASE, .misc_reg = PLLA1_MISC0, .lock_mask = PLLCX_BASE_LOCK, .lock_delay = 300, .iddq_reg = PLLA1_MISC0, .iddq_bit_idx = PLLCX_IDDQ_BIT, .reset_reg = PLLA1_MISC0, .reset_bit_idx = PLLCX_RESET_BIT, .round_p_to_pdiv = pll_qlin_p_to_pdiv, .pdiv_tohw = pll_qlin_pdiv_to_hw, .div_nmp = &pllc_nmp, .ext_misc_reg[0] = PLLA1_MISC0, .ext_misc_reg[1] = PLLA1_MISC1, .ext_misc_reg[2] = PLLA1_MISC2, .ext_misc_reg[3] = PLLA1_MISC3, .freq_table = pll_cx_freq_table, .flags = TEGRA_PLL_USE_LOCK, .set_defaults = _plla1_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; static struct div_nmp plla_nmp = { .divm_shift = 0, .divm_width = 8, .divn_shift = 8, .divn_width = 8, .divp_shift = 20, .divp_width = 5, }; static struct tegra_clk_pll_freq_table pll_a_freq_table[] = { { 12000000, 282240000, 47, 1, 2, 1, 0xf148 }, /* actual: 282240234 */ { 12000000, 368640000, 61, 1, 2, 1, 0xfe15 }, /* actual: 368640381 */ { 12000000, 240000000, 60, 1, 3, 1, 0 }, { 13000000, 282240000, 43, 1, 2, 1, 0xfd7d }, /* actual: 282239807 */ { 13000000, 368640000, 56, 1, 2, 1, 0x06d8 }, /* actual: 368640137 */ { 13000000, 240000000, 55, 1, 3, 1, 0 }, /* actual: 238.3 MHz */ { 38400000, 282240000, 44, 3, 2, 1, 0xf333 }, /* actual: 282239844 */ { 38400000, 368640000, 57, 3, 2, 1, 0x0333 }, /* actual: 368639844 */ { 38400000, 240000000, 75, 3, 3, 1, 0 }, { 0, 0, 0, 0, 0, 0, 0 }, }; static struct tegra_clk_pll_params pll_a_params = { .input_min = 12000000, .input_max = 800000000, .cf_min = 12000000, .cf_max = 19200000, .vco_min = 500000000, .vco_max = 1000000000, .base_reg = PLLA_BASE, .misc_reg = PLLA_MISC0, .lock_mask = PLL_BASE_LOCK, .lock_delay = 300, .round_p_to_pdiv = pll_qlin_p_to_pdiv, .pdiv_tohw = pll_qlin_pdiv_to_hw, .iddq_reg = PLLA_BASE, .iddq_bit_idx = PLLA_IDDQ_BIT, .div_nmp = &plla_nmp, .sdm_din_reg = PLLA_MISC1, .sdm_din_mask = PLLA_SDM_DIN_MASK, .sdm_ctrl_reg = PLLA_MISC2, .sdm_ctrl_en_mask = PLLA_SDM_EN_MASK, .ext_misc_reg[0] = PLLA_MISC0, .ext_misc_reg[1] = PLLA_MISC1, .ext_misc_reg[2] = PLLA_MISC2, .freq_table = pll_a_freq_table, .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW, .set_defaults = tegra210_plla_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, .set_gain = tegra210_clk_pll_set_gain, .adjust_vco = tegra210_clk_adjust_vco_min, }; static struct div_nmp plld_nmp = { .divm_shift = 0, .divm_width = 8, .divn_shift = 11, .divn_width = 8, .divp_shift = 20, .divp_width = 3, }; static struct tegra_clk_pll_freq_table pll_d_freq_table[] = { { 12000000, 594000000, 99, 1, 2, 0, 0 }, { 13000000, 594000000, 91, 1, 2, 0, 0xfc4f }, /* actual: 594000183 */ { 38400000, 594000000, 30, 1, 2, 0, 0x0e00 }, { 0, 0, 0, 0, 0, 0, 0 }, }; static struct tegra_clk_pll_params pll_d_params = { .input_min = 12000000, .input_max = 800000000, .cf_min = 12000000, .cf_max = 38400000, .vco_min = 750000000, .vco_max = 1500000000, .base_reg = PLLD_BASE, .misc_reg = PLLD_MISC0, .lock_mask = PLL_BASE_LOCK, .lock_delay = 1000, .iddq_reg = PLLD_MISC0, .iddq_bit_idx = PLLD_IDDQ_BIT, .round_p_to_pdiv = pll_expo_p_to_pdiv, .pdiv_tohw = pll_expo_pdiv_to_hw, .div_nmp = &plld_nmp, .sdm_din_reg = PLLD_MISC0, .sdm_din_mask = PLLA_SDM_DIN_MASK, .sdm_ctrl_reg = PLLD_MISC0, .sdm_ctrl_en_mask = PLLD_SDM_EN_MASK, .ext_misc_reg[0] = PLLD_MISC0, .ext_misc_reg[1] = PLLD_MISC1, .freq_table = pll_d_freq_table, .flags = TEGRA_PLL_USE_LOCK, .mdiv_default = 1, .set_defaults = tegra210_plld_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, .set_gain = tegra210_clk_pll_set_gain, .adjust_vco = tegra210_clk_adjust_vco_min, }; static struct tegra_clk_pll_freq_table tegra210_pll_d2_freq_table[] = { { 12000000, 594000000, 99, 1, 2, 0, 0xf000 }, { 13000000, 594000000, 91, 1, 2, 0, 0xfc4f }, /* actual: 594000183 */ { 38400000, 594000000, 30, 1, 2, 0, 0x0e00 }, { 0, 0, 0, 0, 0, 0, 0 }, }; /* s/w policy, always tegra_pll_ref */ static struct tegra_clk_pll_params pll_d2_params = { .input_min = 12000000, .input_max = 800000000, .cf_min = 12000000, .cf_max = 38400000, .vco_min = 750000000, .vco_max = 1500000000, .base_reg = PLLD2_BASE, .misc_reg = PLLD2_MISC0, .lock_mask = PLL_BASE_LOCK, .lock_delay = 300, .iddq_reg = PLLD2_BASE, .iddq_bit_idx = PLLSS_IDDQ_BIT, .sdm_din_reg = PLLD2_MISC3, .sdm_din_mask = PLLA_SDM_DIN_MASK, .sdm_ctrl_reg = PLLD2_MISC1, .sdm_ctrl_en_mask = PLLD2_SDM_EN_MASK, /* disable spread-spectrum for pll_d2 */ .ssc_ctrl_reg = 0, .ssc_ctrl_en_mask = 0, .round_p_to_pdiv = pll_qlin_p_to_pdiv, .pdiv_tohw = pll_qlin_pdiv_to_hw, .div_nmp = &pllss_nmp, .ext_misc_reg[0] = PLLD2_MISC0, .ext_misc_reg[1] = PLLD2_MISC1, .ext_misc_reg[2] = PLLD2_MISC2, .ext_misc_reg[3] = PLLD2_MISC3, .max_p = PLL_QLIN_PDIV_MAX, .mdiv_default = 1, .freq_table = tegra210_pll_d2_freq_table, .set_defaults = tegra210_plld2_set_defaults, .flags = TEGRA_PLL_USE_LOCK, .calc_rate = tegra210_pll_fixed_mdiv_cfg, .set_gain = tegra210_clk_pll_set_gain, .adjust_vco = tegra210_clk_adjust_vco_min, }; static struct tegra_clk_pll_freq_table pll_dp_freq_table[] = { { 12000000, 270000000, 90, 1, 4, 0, 0xf000 }, { 13000000, 270000000, 83, 1, 4, 0, 0xf000 }, /* actual: 269.8 MHz */ { 38400000, 270000000, 28, 1, 4, 0, 0xf400 }, { 0, 0, 0, 0, 0, 0, 0 }, }; static struct tegra_clk_pll_params pll_dp_params = { .input_min = 12000000, .input_max = 800000000, .cf_min = 12000000, .cf_max = 38400000, .vco_min = 750000000, .vco_max = 1500000000, .base_reg = PLLDP_BASE, .misc_reg = PLLDP_MISC, .lock_mask = PLL_BASE_LOCK, .lock_delay = 300, .iddq_reg = PLLDP_BASE, .iddq_bit_idx = PLLSS_IDDQ_BIT, .sdm_din_reg = PLLDP_SS_CTRL2, .sdm_din_mask = PLLA_SDM_DIN_MASK, .sdm_ctrl_reg = PLLDP_SS_CFG, .sdm_ctrl_en_mask = PLLDP_SDM_EN_MASK, .ssc_ctrl_reg = PLLDP_SS_CFG, .ssc_ctrl_en_mask = PLLDP_SSC_EN_MASK, .round_p_to_pdiv = pll_qlin_p_to_pdiv, .pdiv_tohw = pll_qlin_pdiv_to_hw, .div_nmp = &pllss_nmp, .ext_misc_reg[0] = PLLDP_MISC, .ext_misc_reg[1] = PLLDP_SS_CFG, .ext_misc_reg[2] = PLLDP_SS_CTRL1, .ext_misc_reg[3] = PLLDP_SS_CTRL2, .max_p = PLL_QLIN_PDIV_MAX, .mdiv_default = 1, .freq_table = pll_dp_freq_table, .set_defaults = tegra210_plldp_set_defaults, .flags = TEGRA_PLL_USE_LOCK, .calc_rate = tegra210_pll_fixed_mdiv_cfg, .set_gain = tegra210_clk_pll_set_gain, .adjust_vco = tegra210_clk_adjust_vco_min, }; static struct div_nmp pllu_nmp = { .divm_shift = 0, .divm_width = 8, .divn_shift = 8, .divn_width = 8, .divp_shift = 16, .divp_width = 5, }; static struct tegra_clk_pll_freq_table pll_u_freq_table[] = { { 12000000, 480000000, 40, 1, 1, 0 }, { 13000000, 480000000, 36, 1, 1, 0 }, /* actual: 468.0 MHz */ { 38400000, 480000000, 25, 2, 1, 0 }, { 0, 0, 0, 0, 0, 0 }, }; static struct tegra_clk_pll_params pll_u_vco_params = { .input_min = 9600000, .input_max = 800000000, .cf_min = 9600000, .cf_max = 19200000, .vco_min = 350000000, .vco_max = 700000000, .base_reg = PLLU_BASE, .misc_reg = PLLU_MISC0, .lock_mask = PLL_BASE_LOCK, .lock_delay = 1000, .iddq_reg = PLLU_MISC0, .iddq_bit_idx = PLLU_IDDQ_BIT, .ext_misc_reg[0] = PLLU_MISC0, .ext_misc_reg[1] = PLLU_MISC1, .round_p_to_pdiv = pll_qlin_p_to_pdiv, .pdiv_tohw = pll_qlin_pdiv_to_hw, .div_nmp = &pllu_nmp, .freq_table = pll_u_freq_table, .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT, .set_defaults = tegra210_pllu_set_defaults, .calc_rate = tegra210_pll_fixed_mdiv_cfg, }; static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = { [tegra_clk_ispb] = { .dt_id = TEGRA210_CLK_ISPB, .present = true }, [tegra_clk_rtc] = { .dt_id = TEGRA210_CLK_RTC, .present = true }, [tegra_clk_timer] = { .dt_id = TEGRA210_CLK_TIMER, .present = true }, [tegra_clk_uarta_8] = { .dt_id = TEGRA210_CLK_UARTA, .present = true }, [tegra_clk_sdmmc2_9] = { .dt_id = TEGRA210_CLK_SDMMC2, .present = true }, [tegra_clk_i2s1] = { .dt_id = TEGRA210_CLK_I2S1, .present = true }, [tegra_clk_i2c1] = { .dt_id = TEGRA210_CLK_I2C1, .present = true }, [tegra_clk_sdmmc1_9] = { .dt_id = TEGRA210_CLK_SDMMC1, .present = true }, [tegra_clk_sdmmc4_9] = { .dt_id = TEGRA210_CLK_SDMMC4, .present = true }, [tegra_clk_pwm] = { .dt_id = TEGRA210_CLK_PWM, .present = true }, [tegra_clk_i2s2] = { .dt_id = TEGRA210_CLK_I2S2, .present = true }, [tegra_clk_usbd] = { .dt_id = TEGRA210_CLK_USBD, .present = true }, [tegra_clk_isp_9] = { .dt_id = TEGRA210_CLK_ISP, .present = true }, [tegra_clk_disp2_8] = { .dt_id = TEGRA210_CLK_DISP2, .present = true }, [tegra_clk_disp1_8] = { .dt_id = TEGRA210_CLK_DISP1, .present = true }, [tegra_clk_host1x_9] = { .dt_id = TEGRA210_CLK_HOST1X, .present = true }, [tegra_clk_i2s0] = { .dt_id = TEGRA210_CLK_I2S0, .present = true }, [tegra_clk_apbdma] = { .dt_id = TEGRA210_CLK_APBDMA, .present = true }, [tegra_clk_kfuse] = { .dt_id = TEGRA210_CLK_KFUSE, .present = true }, [tegra_clk_sbc1_9] = { .dt_id = TEGRA210_CLK_SBC1, .present = true }, [tegra_clk_sbc2_9] = { .dt_id = TEGRA210_CLK_SBC2, .present = true }, [tegra_clk_sbc3_9] = { .dt_id = TEGRA210_CLK_SBC3, .present = true }, [tegra_clk_i2c5] = { .dt_id = TEGRA210_CLK_I2C5, .present = true }, [tegra_clk_csi] = { .dt_id = TEGRA210_CLK_CSI, .present = true }, [tegra_clk_i2c2] = { .dt_id = TEGRA210_CLK_I2C2, .present = true }, [tegra_clk_uartc_8] = { .dt_id = TEGRA210_CLK_UARTC, .present = true }, [tegra_clk_mipi_cal] = { .dt_id = TEGRA210_CLK_MIPI_CAL, .present = true }, [tegra_clk_emc] = { .dt_id = TEGRA210_CLK_EMC, .present = true }, [tegra_clk_usb2] = { .dt_id = TEGRA210_CLK_USB2, .present = true }, [tegra_clk_bsev] = { .dt_id = TEGRA210_CLK_BSEV, .present = true }, [tegra_clk_uartd_8] = { .dt_id = TEGRA210_CLK_UARTD, .present = true }, [tegra_clk_i2c3] = { .dt_id = TEGRA210_CLK_I2C3, .present = true }, [tegra_clk_sbc4_9] = { .dt_id = TEGRA210_CLK_SBC4, .present = true }, [tegra_clk_sdmmc3_9] = { .dt_id = TEGRA210_CLK_SDMMC3, .present = true }, [tegra_clk_pcie] = { .dt_id = TEGRA210_CLK_PCIE, .present = true }, [tegra_clk_owr_8] = { .dt_id = TEGRA210_CLK_OWR, .present = true }, [tegra_clk_afi] = { .dt_id = TEGRA210_CLK_AFI, .present = true }, [tegra_clk_csite_8] = { .dt_id = TEGRA210_CLK_CSITE, .present = true }, [tegra_clk_soc_therm_8] = { .dt_id = TEGRA210_CLK_SOC_THERM, .present = true }, [tegra_clk_dtv] = { .dt_id = TEGRA210_CLK_DTV, .present = true }, [tegra_clk_i2cslow] = { .dt_id = TEGRA210_CLK_I2CSLOW, .present = true }, [tegra_clk_tsec_8] = { .dt_id = TEGRA210_CLK_TSEC, .present = true }, [tegra_clk_xusb_host] = { .dt_id = TEGRA210_CLK_XUSB_HOST, .present = true }, [tegra_clk_csus] = { .dt_id = TEGRA210_CLK_CSUS, .present = true }, [tegra_clk_mselect] = { .dt_id = TEGRA210_CLK_MSELECT, .present = true }, [tegra_clk_tsensor] = { .dt_id = TEGRA210_CLK_TSENSOR, .present = true }, [tegra_clk_i2s3] = { .dt_id = TEGRA210_CLK_I2S3, .present = true }, [tegra_clk_i2s4] = { .dt_id = TEGRA210_CLK_I2S4, .present = true }, [tegra_clk_i2c4] = { .dt_id = TEGRA210_CLK_I2C4, .present = true }, [tegra_clk_d_audio] = { .dt_id = TEGRA210_CLK_D_AUDIO, .present = true }, [tegra_clk_hda2codec_2x_8] = { .dt_id = TEGRA210_CLK_HDA2CODEC_2X, .present = true }, [tegra_clk_spdif_2x] = { .dt_id = TEGRA210_CLK_SPDIF_2X, .present = true }, [tegra_clk_actmon] = { .dt_id = TEGRA210_CLK_ACTMON, .present = true }, [tegra_clk_extern1] = { .dt_id = TEGRA210_CLK_EXTERN1, .present = true }, [tegra_clk_extern2] = { .dt_id = TEGRA210_CLK_EXTERN2, .present = true }, [tegra_clk_extern3] = { .dt_id = TEGRA210_CLK_EXTERN3, .present = true }, [tegra_clk_sata_oob_8] = { .dt_id = TEGRA210_CLK_SATA_OOB, .present = true }, [tegra_clk_sata_8] = { .dt_id = TEGRA210_CLK_SATA, .present = true }, [tegra_clk_hda_8] = { .dt_id = TEGRA210_CLK_HDA, .present = true }, [tegra_clk_hda2hdmi] = { .dt_id = TEGRA210_CLK_HDA2HDMI, .present = true }, [tegra_clk_cilab] = { .dt_id = TEGRA210_CLK_CILAB, .present = true }, [tegra_clk_cilcd] = { .dt_id = TEGRA210_CLK_CILCD, .present = true }, [tegra_clk_cile] = { .dt_id = TEGRA210_CLK_CILE, .present = true }, [tegra_clk_dsialp] = { .dt_id = TEGRA210_CLK_DSIALP, .present = true }, [tegra_clk_dsiblp] = { .dt_id = TEGRA210_CLK_DSIBLP, .present = true }, [tegra_clk_entropy_8] = { .dt_id = TEGRA210_CLK_ENTROPY, .present = true }, [tegra_clk_xusb_ss] = { .dt_id = TEGRA210_CLK_XUSB_SS, .present = true }, [tegra_clk_i2c6] = { .dt_id = TEGRA210_CLK_I2C6, .present = true }, [tegra_clk_vim2_clk] = { .dt_id = TEGRA210_CLK_VIM2_CLK, .present = true }, [tegra_clk_clk72Mhz_8] = { .dt_id = TEGRA210_CLK_CLK72MHZ, .present = true }, [tegra_clk_vic03_8] = { .dt_id = TEGRA210_CLK_VIC03, .present = true }, [tegra_clk_dpaux] = { .dt_id = TEGRA210_CLK_DPAUX, .present = true }, [tegra_clk_dpaux1] = { .dt_id = TEGRA210_CLK_DPAUX1, .present = true }, [tegra_clk_sor0] = { .dt_id = TEGRA210_CLK_SOR0, .present = true }, [tegra_clk_sor0_lvds] = { .dt_id = TEGRA210_CLK_SOR0_LVDS, .present = true }, [tegra_clk_sor1] = { .dt_id = TEGRA210_CLK_SOR1, .present = true }, [tegra_clk_sor1_src] = { .dt_id = TEGRA210_CLK_SOR1_SRC, .present = true }, [tegra_clk_gpu] = { .dt_id = TEGRA210_CLK_GPU, .present = true }, [tegra_clk_pll_g_ref] = { .dt_id = TEGRA210_CLK_PLL_G_REF, .present = true, }, [tegra_clk_uartb_8] = { .dt_id = TEGRA210_CLK_UARTB, .present = true }, [tegra_clk_vfir] = { .dt_id = TEGRA210_CLK_VFIR, .present = true }, [tegra_clk_spdif_in_8] = { .dt_id = TEGRA210_CLK_SPDIF_IN, .present = true }, [tegra_clk_spdif_out] = { .dt_id = TEGRA210_CLK_SPDIF_OUT, .present = true }, [tegra_clk_vi_10] = { .dt_id = TEGRA210_CLK_VI, .present = true }, [tegra_clk_vi_sensor_8] = { .dt_id = TEGRA210_CLK_VI_SENSOR, .present = true }, [tegra_clk_fuse] = { .dt_id = TEGRA210_CLK_FUSE, .present = true }, [tegra_clk_fuse_burn] = { .dt_id = TEGRA210_CLK_FUSE_BURN, .present = true }, [tegra_clk_clk_32k] = { .dt_id = TEGRA210_CLK_CLK_32K, .present = true }, [tegra_clk_clk_m] = { .dt_id = TEGRA210_CLK_CLK_M, .present = true }, [tegra_clk_clk_m_div2] = { .dt_id = TEGRA210_CLK_CLK_M_DIV2, .present = true }, [tegra_clk_clk_m_div4] = { .dt_id = TEGRA210_CLK_CLK_M_DIV4, .present = true }, [tegra_clk_pll_ref] = { .dt_id = TEGRA210_CLK_PLL_REF, .present = true }, [tegra_clk_pll_c] = { .dt_id = TEGRA210_CLK_PLL_C, .present = true }, [tegra_clk_pll_c_out1] = { .dt_id = TEGRA210_CLK_PLL_C_OUT1, .present = true }, [tegra_clk_pll_c2] = { .dt_id = TEGRA210_CLK_PLL_C2, .present = true }, [tegra_clk_pll_c3] = { .dt_id = TEGRA210_CLK_PLL_C3, .present = true }, [tegra_clk_pll_m] = { .dt_id = TEGRA210_CLK_PLL_M, .present = true }, [tegra_clk_pll_m_out1] = { .dt_id = TEGRA210_CLK_PLL_M_OUT1, .present = true }, [tegra_clk_pll_p] = { .dt_id = TEGRA210_CLK_PLL_P, .present = true }, [tegra_clk_pll_p_out1] = { .dt_id = TEGRA210_CLK_PLL_P_OUT1, .present = true }, [tegra_clk_pll_p_out3] = { .dt_id = TEGRA210_CLK_PLL_P_OUT3, .present = true }, [tegra_clk_pll_p_out4_cpu] = { .dt_id = TEGRA210_CLK_PLL_P_OUT4, .present = true }, [tegra_clk_pll_p_out_hsio] = { .dt_id = TEGRA210_CLK_PLL_P_OUT_HSIO, .present = true }, [tegra_clk_pll_p_out_xusb] = { .dt_id = TEGRA210_CLK_PLL_P_OUT_XUSB, .present = true }, [tegra_clk_pll_p_out_cpu] = { .dt_id = TEGRA210_CLK_PLL_P_OUT_CPU, .present = true }, [tegra_clk_pll_p_out_adsp] = { .dt_id = TEGRA210_CLK_PLL_P_OUT_ADSP, .present = true }, [tegra_clk_pll_a] = { .dt_id = TEGRA210_CLK_PLL_A, .present = true }, [tegra_clk_pll_a_out0] = { .dt_id = TEGRA210_CLK_PLL_A_OUT0, .present = true }, [tegra_clk_pll_d] = { .dt_id = TEGRA210_CLK_PLL_D, .present = true }, [tegra_clk_pll_d_out0] = { .dt_id = TEGRA210_CLK_PLL_D_OUT0, .present = true }, [tegra_clk_pll_d2] = { .dt_id = TEGRA210_CLK_PLL_D2, .present = true }, [tegra_clk_pll_d2_out0] = { .dt_id = TEGRA210_CLK_PLL_D2_OUT0, .present = true }, [tegra_clk_pll_u] = { .dt_id = TEGRA210_CLK_PLL_U, .present = true }, [tegra_clk_pll_u_out] = { .dt_id = TEGRA210_CLK_PLL_U_OUT, .present = true }, [tegra_clk_pll_u_out1] = { .dt_id = TEGRA210_CLK_PLL_U_OUT1, .present = true }, [tegra_clk_pll_u_out2] = { .dt_id = TEGRA210_CLK_PLL_U_OUT2, .present = true }, [tegra_clk_pll_u_480m] = { .dt_id = TEGRA210_CLK_PLL_U_480M, .present = true }, [tegra_clk_pll_u_60m] = { .dt_id = TEGRA210_CLK_PLL_U_60M, .present = true }, [tegra_clk_pll_u_48m] = { .dt_id = TEGRA210_CLK_PLL_U_48M, .present = true }, [tegra_clk_pll_x] = { .dt_id = TEGRA210_CLK_PLL_X, .present = true }, [tegra_clk_pll_x_out0] = { .dt_id = TEGRA210_CLK_PLL_X_OUT0, .present = true }, [tegra_clk_pll_re_vco] = { .dt_id = TEGRA210_CLK_PLL_RE_VCO, .present = true }, [tegra_clk_pll_re_out] = { .dt_id = TEGRA210_CLK_PLL_RE_OUT, .present = true }, [tegra_clk_spdif_in_sync] = { .dt_id = TEGRA210_CLK_SPDIF_IN_SYNC, .present = true }, [tegra_clk_i2s0_sync] = { .dt_id = TEGRA210_CLK_I2S0_SYNC, .present = true }, [tegra_clk_i2s1_sync] = { .dt_id = TEGRA210_CLK_I2S1_SYNC, .present = true }, [tegra_clk_i2s2_sync] = { .dt_id = TEGRA210_CLK_I2S2_SYNC, .present = true }, [tegra_clk_i2s3_sync] = { .dt_id = TEGRA210_CLK_I2S3_SYNC, .present = true }, [tegra_clk_i2s4_sync] = { .dt_id = TEGRA210_CLK_I2S4_SYNC, .present = true }, [tegra_clk_vimclk_sync] = { .dt_id = TEGRA210_CLK_VIMCLK_SYNC, .present = true }, [tegra_clk_audio0] = { .dt_id = TEGRA210_CLK_AUDIO0, .present = true }, [tegra_clk_audio1] = { .dt_id = TEGRA210_CLK_AUDIO1, .present = true }, [tegra_clk_audio2] = { .dt_id = TEGRA210_CLK_AUDIO2, .present = true }, [tegra_clk_audio3] = { .dt_id = TEGRA210_CLK_AUDIO3, .present = true }, [tegra_clk_audio4] = { .dt_id = TEGRA210_CLK_AUDIO4, .present = true }, [tegra_clk_spdif] = { .dt_id = TEGRA210_CLK_SPDIF, .present = true }, [tegra_clk_clk_out_1] = { .dt_id = TEGRA210_CLK_CLK_OUT_1, .present = true }, [tegra_clk_clk_out_2] = { .dt_id = TEGRA210_CLK_CLK_OUT_2, .present = true }, [tegra_clk_clk_out_3] = { .dt_id = TEGRA210_CLK_CLK_OUT_3, .present = true }, [tegra_clk_blink] = { .dt_id = TEGRA210_CLK_BLINK, .present = true }, [tegra_clk_xusb_gate] = { .dt_id = TEGRA210_CLK_XUSB_GATE, .present = true }, [tegra_clk_xusb_host_src_8] = { .dt_id = TEGRA210_CLK_XUSB_HOST_SRC, .present = true }, [tegra_clk_xusb_falcon_src_8] = { .dt_id = TEGRA210_CLK_XUSB_FALCON_SRC, .present = true }, [tegra_clk_xusb_fs_src] = { .dt_id = TEGRA210_CLK_XUSB_FS_SRC, .present = true }, [tegra_clk_xusb_ss_src_8] = { .dt_id = TEGRA210_CLK_XUSB_SS_SRC, .present = true }, [tegra_clk_xusb_ss_div2] = { .dt_id = TEGRA210_CLK_XUSB_SS_DIV2, .present = true }, [tegra_clk_xusb_dev_src_8] = { .dt_id = TEGRA210_CLK_XUSB_DEV_SRC, .present = true }, [tegra_clk_xusb_dev] = { .dt_id = TEGRA210_CLK_XUSB_DEV, .present = true }, [tegra_clk_xusb_hs_src_4] = { .dt_id = TEGRA210_CLK_XUSB_HS_SRC, .present = true }, [tegra_clk_xusb_ssp_src] = { .dt_id = TEGRA210_CLK_XUSB_SSP_SRC, .present = true }, [tegra_clk_usb2_hsic_trk] = { .dt_id = TEGRA210_CLK_USB2_HSIC_TRK, .present = true }, [tegra_clk_hsic_trk] = { .dt_id = TEGRA210_CLK_HSIC_TRK, .present = true }, [tegra_clk_usb2_trk] = { .dt_id = TEGRA210_CLK_USB2_TRK, .present = true }, [tegra_clk_sclk] = { .dt_id = TEGRA210_CLK_SCLK, .present = true }, [tegra_clk_sclk_mux] = { .dt_id = TEGRA210_CLK_SCLK_MUX, .present = true }, [tegra_clk_hclk] = { .dt_id = TEGRA210_CLK_HCLK, .present = true }, [tegra_clk_pclk] = { .dt_id = TEGRA210_CLK_PCLK, .present = true }, [tegra_clk_cclk_g] = { .dt_id = TEGRA210_CLK_CCLK_G, .present = true }, [tegra_clk_cclk_lp] = { .dt_id = TEGRA210_CLK_CCLK_LP, .present = true }, [tegra_clk_dfll_ref] = { .dt_id = TEGRA210_CLK_DFLL_REF, .present = true }, [tegra_clk_dfll_soc] = { .dt_id = TEGRA210_CLK_DFLL_SOC, .present = true }, [tegra_clk_vi_sensor2_8] = { .dt_id = TEGRA210_CLK_VI_SENSOR2, .present = true }, [tegra_clk_pll_p_out5] = { .dt_id = TEGRA210_CLK_PLL_P_OUT5, .present = true }, [tegra_clk_pll_c4] = { .dt_id = TEGRA210_CLK_PLL_C4, .present = true }, [tegra_clk_pll_dp] = { .dt_id = TEGRA210_CLK_PLL_DP, .present = true }, [tegra_clk_audio0_mux] = { .dt_id = TEGRA210_CLK_AUDIO0_MUX, .present = true }, [tegra_clk_audio1_mux] = { .dt_id = TEGRA210_CLK_AUDIO1_MUX, .present = true }, [tegra_clk_audio2_mux] = { .dt_id = TEGRA210_CLK_AUDIO2_MUX, .present = true }, [tegra_clk_audio3_mux] = { .dt_id = TEGRA210_CLK_AUDIO3_MUX, .present = true }, [tegra_clk_audio4_mux] = { .dt_id = TEGRA210_CLK_AUDIO4_MUX, .present = true }, [tegra_clk_spdif_mux] = { .dt_id = TEGRA210_CLK_SPDIF_MUX, .present = true }, [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA210_CLK_CLK_OUT_1_MUX, .present = true }, [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA210_CLK_CLK_OUT_2_MUX, .present = true }, [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA210_CLK_CLK_OUT_3_MUX, .present = true }, [tegra_clk_maud] = { .dt_id = TEGRA210_CLK_MAUD, .present = true }, [tegra_clk_mipibif] = { .dt_id = TEGRA210_CLK_MIPIBIF, .present = true }, [tegra_clk_qspi] = { .dt_id = TEGRA210_CLK_QSPI, .present = true }, [tegra_clk_sdmmc_legacy] = { .dt_id = TEGRA210_CLK_SDMMC_LEGACY, .present = true }, [tegra_clk_tsecb] = { .dt_id = TEGRA210_CLK_TSECB, .present = true }, [tegra_clk_uartape] = { .dt_id = TEGRA210_CLK_UARTAPE, .present = true }, [tegra_clk_vi_i2c] = { .dt_id = TEGRA210_CLK_VI_I2C, .present = true }, [tegra_clk_ape] = { .dt_id = TEGRA210_CLK_APE, .present = true }, [tegra_clk_dbgapb] = { .dt_id = TEGRA210_CLK_DBGAPB, .present = true }, [tegra_clk_nvdec] = { .dt_id = TEGRA210_CLK_NVDEC, .present = true }, [tegra_clk_nvenc] = { .dt_id = TEGRA210_CLK_NVENC, .present = true }, [tegra_clk_nvjpg] = { .dt_id = TEGRA210_CLK_NVJPG, .present = true }, [tegra_clk_pll_c4_out0] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT0, .present = true }, [tegra_clk_pll_c4_out1] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT1, .present = true }, [tegra_clk_pll_c4_out2] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT2, .present = true }, [tegra_clk_pll_c4_out3] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT3, .present = true }, [tegra_clk_apb2ape] = { .dt_id = TEGRA210_CLK_APB2APE, .present = true }, }; static struct tegra_devclk devclks[] __initdata = { { .con_id = "clk_m", .dt_id = TEGRA210_CLK_CLK_M }, { .con_id = "pll_ref", .dt_id = TEGRA210_CLK_PLL_REF }, { .con_id = "clk_32k", .dt_id = TEGRA210_CLK_CLK_32K }, { .con_id = "clk_m_div2", .dt_id = TEGRA210_CLK_CLK_M_DIV2 }, { .con_id = "clk_m_div4", .dt_id = TEGRA210_CLK_CLK_M_DIV4 }, { .con_id = "pll_c", .dt_id = TEGRA210_CLK_PLL_C }, { .con_id = "pll_c_out1", .dt_id = TEGRA210_CLK_PLL_C_OUT1 }, { .con_id = "pll_c2", .dt_id = TEGRA210_CLK_PLL_C2 }, { .con_id = "pll_c3", .dt_id = TEGRA210_CLK_PLL_C3 }, { .con_id = "pll_p", .dt_id = TEGRA210_CLK_PLL_P }, { .con_id = "pll_p_out1", .dt_id = TEGRA210_CLK_PLL_P_OUT1 }, { .con_id = "pll_p_out2", .dt_id = TEGRA210_CLK_PLL_P_OUT2 }, { .con_id = "pll_p_out3", .dt_id = TEGRA210_CLK_PLL_P_OUT3 }, { .con_id = "pll_p_out4", .dt_id = TEGRA210_CLK_PLL_P_OUT4 }, { .con_id = "pll_m", .dt_id = TEGRA210_CLK_PLL_M }, { .con_id = "pll_m_out1", .dt_id = TEGRA210_CLK_PLL_M_OUT1 }, { .con_id = "pll_x", .dt_id = TEGRA210_CLK_PLL_X }, { .con_id = "pll_x_out0", .dt_id = TEGRA210_CLK_PLL_X_OUT0 }, { .con_id = "pll_u", .dt_id = TEGRA210_CLK_PLL_U }, { .con_id = "pll_u_out", .dt_id = TEGRA210_CLK_PLL_U_OUT }, { .con_id = "pll_u_out1", .dt_id = TEGRA210_CLK_PLL_U_OUT1 }, { .con_id = "pll_u_out2", .dt_id = TEGRA210_CLK_PLL_U_OUT2 }, { .con_id = "pll_u_480M", .dt_id = TEGRA210_CLK_PLL_U_480M }, { .con_id = "pll_u_60M", .dt_id = TEGRA210_CLK_PLL_U_60M }, { .con_id = "pll_u_48M", .dt_id = TEGRA210_CLK_PLL_U_48M }, { .con_id = "pll_d", .dt_id = TEGRA210_CLK_PLL_D }, { .con_id = "pll_d_out0", .dt_id = TEGRA210_CLK_PLL_D_OUT0 }, { .con_id = "pll_d2", .dt_id = TEGRA210_CLK_PLL_D2 }, { .con_id = "pll_d2_out0", .dt_id = TEGRA210_CLK_PLL_D2_OUT0 }, { .con_id = "pll_a", .dt_id = TEGRA210_CLK_PLL_A }, { .con_id = "pll_a_out0", .dt_id = TEGRA210_CLK_PLL_A_OUT0 }, { .con_id = "pll_re_vco", .dt_id = TEGRA210_CLK_PLL_RE_VCO }, { .con_id = "pll_re_out", .dt_id = TEGRA210_CLK_PLL_RE_OUT }, { .con_id = "spdif_in_sync", .dt_id = TEGRA210_CLK_SPDIF_IN_SYNC }, { .con_id = "i2s0_sync", .dt_id = TEGRA210_CLK_I2S0_SYNC }, { .con_id = "i2s1_sync", .dt_id = TEGRA210_CLK_I2S1_SYNC }, { .con_id = "i2s2_sync", .dt_id = TEGRA210_CLK_I2S2_SYNC }, { .con_id = "i2s3_sync", .dt_id = TEGRA210_CLK_I2S3_SYNC }, { .con_id = "i2s4_sync", .dt_id = TEGRA210_CLK_I2S4_SYNC }, { .con_id = "vimclk_sync", .dt_id = TEGRA210_CLK_VIMCLK_SYNC }, { .con_id = "audio0", .dt_id = TEGRA210_CLK_AUDIO0 }, { .con_id = "audio1", .dt_id = TEGRA210_CLK_AUDIO1 }, { .con_id = "audio2", .dt_id = TEGRA210_CLK_AUDIO2 }, { .con_id = "audio3", .dt_id = TEGRA210_CLK_AUDIO3 }, { .con_id = "audio4", .dt_id = TEGRA210_CLK_AUDIO4 }, { .con_id = "spdif", .dt_id = TEGRA210_CLK_SPDIF }, { .con_id = "spdif_2x", .dt_id = TEGRA210_CLK_SPDIF_2X }, { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA210_CLK_EXTERN1 }, { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA210_CLK_EXTERN2 }, { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA210_CLK_EXTERN3 }, { .con_id = "blink", .dt_id = TEGRA210_CLK_BLINK }, { .con_id = "cclk_g", .dt_id = TEGRA210_CLK_CCLK_G }, { .con_id = "cclk_lp", .dt_id = TEGRA210_CLK_CCLK_LP }, { .con_id = "sclk", .dt_id = TEGRA210_CLK_SCLK }, { .con_id = "hclk", .dt_id = TEGRA210_CLK_HCLK }, { .con_id = "pclk", .dt_id = TEGRA210_CLK_PCLK }, { .con_id = "fuse", .dt_id = TEGRA210_CLK_FUSE }, { .dev_id = "rtc-tegra", .dt_id = TEGRA210_CLK_RTC }, { .dev_id = "timer", .dt_id = TEGRA210_CLK_TIMER }, { .con_id = "pll_c4_out0", .dt_id = TEGRA210_CLK_PLL_C4_OUT0 }, { .con_id = "pll_c4_out1", .dt_id = TEGRA210_CLK_PLL_C4_OUT1 }, { .con_id = "pll_c4_out2", .dt_id = TEGRA210_CLK_PLL_C4_OUT2 }, { .con_id = "pll_c4_out3", .dt_id = TEGRA210_CLK_PLL_C4_OUT3 }, { .con_id = "dpaux", .dt_id = TEGRA210_CLK_DPAUX }, { .con_id = "sor0", .dt_id = TEGRA210_CLK_SOR0 }, }; static struct tegra_audio_clk_info tegra210_audio_plls[] = { { "pll_a", &pll_a_params, tegra_clk_pll_a, "pll_ref" }, { "pll_a1", &pll_a1_params, tegra_clk_pll_a1, "pll_ref" }, }; static struct clk **clks; static __init void tegra210_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base) { struct clk *clk; /* xusb_ss_div2 */ clk = clk_register_fixed_factor(NULL, "xusb_ss_div2", "xusb_ss_src", 0, 1, 2); clks[TEGRA210_CLK_XUSB_SS_DIV2] = clk; clk = tegra_clk_register_periph_fixed("sor_safe", "pll_p", 0, clk_base, 1, 17, 222); clks[TEGRA210_CLK_SOR_SAFE] = clk; clk = tegra_clk_register_periph_fixed("dpaux", "sor_safe", 0, clk_base, 1, 17, 181); clks[TEGRA210_CLK_DPAUX] = clk; clk = tegra_clk_register_periph_fixed("dpaux1", "sor_safe", 0, clk_base, 1, 17, 207); clks[TEGRA210_CLK_DPAUX1] = clk; /* pll_d_dsi_out */ clk = clk_register_gate(NULL, "pll_d_dsi_out", "pll_d_out0", 0, clk_base + PLLD_MISC0, 21, 0, &pll_d_lock); clks[TEGRA210_CLK_PLL_D_DSI_OUT] = clk; /* dsia */ clk = tegra_clk_register_periph_gate("dsia", "pll_d_dsi_out", 0, clk_base, 0, 48, periph_clk_enb_refcnt); clks[TEGRA210_CLK_DSIA] = clk; /* dsib */ clk = tegra_clk_register_periph_gate("dsib", "pll_d_dsi_out", 0, clk_base, 0, 82, periph_clk_enb_refcnt); clks[TEGRA210_CLK_DSIB] = clk; /* emc mux */ clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm, ARRAY_SIZE(mux_pllmcp_clkm), 0, clk_base + CLK_SOURCE_EMC, 29, 3, 0, &emc_lock); clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC, &emc_lock); clks[TEGRA210_CLK_MC] = clk; /* cml0 */ clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX, 0, 0, &pll_e_lock); clk_register_clkdev(clk, "cml0", NULL); clks[TEGRA210_CLK_CML0] = clk; /* cml1 */ clk = clk_register_gate(NULL, "cml1", "pll_e", 0, clk_base + PLLE_AUX, 1, 0, &pll_e_lock); clk_register_clkdev(clk, "cml1", NULL); clks[TEGRA210_CLK_CML1] = clk; tegra_periph_clk_init(clk_base, pmc_base, tegra210_clks, &pll_p_params); } static void __init tegra210_pll_init(void __iomem *clk_base, void __iomem *pmc) { struct clk *clk; /* PLLC */ clk = tegra_clk_register_pllxc_tegra210("pll_c", "pll_ref", clk_base, pmc, 0, &pll_c_params, NULL); if (!WARN_ON(IS_ERR(clk))) clk_register_clkdev(clk, "pll_c", NULL); clks[TEGRA210_CLK_PLL_C] = clk; /* PLLC_OUT1 */ clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c", clk_base + PLLC_OUT, 0, TEGRA_DIVIDER_ROUND_UP, 8, 8, 1, NULL); clk = tegra_clk_register_pll_out("pll_c_out1", "pll_c_out1_div", clk_base + PLLC_OUT, 1, 0, CLK_SET_RATE_PARENT, 0, NULL); clk_register_clkdev(clk, "pll_c_out1", NULL); clks[TEGRA210_CLK_PLL_C_OUT1] = clk; /* PLLC_UD */ clk = clk_register_fixed_factor(NULL, "pll_c_ud", "pll_c", CLK_SET_RATE_PARENT, 1, 1); clk_register_clkdev(clk, "pll_c_ud", NULL); clks[TEGRA210_CLK_PLL_C_UD] = clk; /* PLLC2 */ clk = tegra_clk_register_pllc_tegra210("pll_c2", "pll_ref", clk_base, pmc, 0, &pll_c2_params, NULL); clk_register_clkdev(clk, "pll_c2", NULL); clks[TEGRA210_CLK_PLL_C2] = clk; /* PLLC3 */ clk = tegra_clk_register_pllc_tegra210("pll_c3", "pll_ref", clk_base, pmc, 0, &pll_c3_params, NULL); clk_register_clkdev(clk, "pll_c3", NULL); clks[TEGRA210_CLK_PLL_C3] = clk; /* PLLM */ clk = tegra_clk_register_pllm("pll_m", "osc", clk_base, pmc, CLK_SET_RATE_GATE, &pll_m_params, NULL); clk_register_clkdev(clk, "pll_m", NULL); clks[TEGRA210_CLK_PLL_M] = clk; /* PLLMB */ clk = tegra_clk_register_pllmb("pll_mb", "osc", clk_base, pmc, CLK_SET_RATE_GATE, &pll_mb_params, NULL); clk_register_clkdev(clk, "pll_mb", NULL); clks[TEGRA210_CLK_PLL_MB] = clk; clk_register_clkdev(clk, "pll_m_out1", NULL); clks[TEGRA210_CLK_PLL_M_OUT1] = clk; /* PLLM_UD */ clk = clk_register_fixed_factor(NULL, "pll_m_ud", "pll_m", CLK_SET_RATE_PARENT, 1, 1); clk_register_clkdev(clk, "pll_m_ud", NULL); clks[TEGRA210_CLK_PLL_M_UD] = clk; /* PLLU_VCO */ clk = tegra_clk_register_pllu_tegra210("pll_u_vco", "pll_ref", clk_base, 0, &pll_u_vco_params, &pll_u_lock); clk_register_clkdev(clk, "pll_u_vco", NULL); clks[TEGRA210_CLK_PLL_U] = clk; /* PLLU_OUT */ clk = clk_register_divider_table(NULL, "pll_u_out", "pll_u_vco", 0, clk_base + PLLU_BASE, 16, 4, 0, pll_vco_post_div_table, NULL); clk_register_clkdev(clk, "pll_u_out", NULL); clks[TEGRA210_CLK_PLL_U_OUT] = clk; /* PLLU_OUT1 */ clk = tegra_clk_register_divider("pll_u_out1_div", "pll_u_out", clk_base + PLLU_OUTA, 0, TEGRA_DIVIDER_ROUND_UP, 8, 8, 1, &pll_u_lock); clk = tegra_clk_register_pll_out("pll_u_out1", "pll_u_out1_div", clk_base + PLLU_OUTA, 1, 0, CLK_SET_RATE_PARENT, 0, &pll_u_lock); clk_register_clkdev(clk, "pll_u_out1", NULL); clks[TEGRA210_CLK_PLL_U_OUT1] = clk; /* PLLU_OUT2 */ clk = tegra_clk_register_divider("pll_u_out2_div", "pll_u_out", clk_base + PLLU_OUTA, 0, TEGRA_DIVIDER_ROUND_UP, 24, 8, 1, &pll_u_lock); clk = tegra_clk_register_pll_out("pll_u_out2", "pll_u_out2_div", clk_base + PLLU_OUTA, 17, 16, CLK_SET_RATE_PARENT, 0, &pll_u_lock); clk_register_clkdev(clk, "pll_u_out2", NULL); clks[TEGRA210_CLK_PLL_U_OUT2] = clk; /* PLLU_480M */ clk = clk_register_gate(NULL, "pll_u_480M", "pll_u_vco", CLK_SET_RATE_PARENT, clk_base + PLLU_BASE, 22, 0, &pll_u_lock); clk_register_clkdev(clk, "pll_u_480M", NULL); clks[TEGRA210_CLK_PLL_U_480M] = clk; /* PLLU_60M */ clk = clk_register_gate(NULL, "pll_u_60M", "pll_u_out2", CLK_SET_RATE_PARENT, clk_base + PLLU_BASE, 23, 0, NULL); clk_register_clkdev(clk, "pll_u_60M", NULL); clks[TEGRA210_CLK_PLL_U_60M] = clk; /* PLLU_48M */ clk = clk_register_gate(NULL, "pll_u_48M", "pll_u_out1", CLK_SET_RATE_PARENT, clk_base + PLLU_BASE, 25, 0, NULL); clk_register_clkdev(clk, "pll_u_48M", NULL); clks[TEGRA210_CLK_PLL_U_48M] = clk; /* PLLD */ clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, pmc, 0, &pll_d_params, &pll_d_lock); clk_register_clkdev(clk, "pll_d", NULL); clks[TEGRA210_CLK_PLL_D] = clk; /* PLLD_OUT0 */ clk = clk_register_fixed_factor(NULL, "pll_d_out0", "pll_d", CLK_SET_RATE_PARENT, 1, 2); clk_register_clkdev(clk, "pll_d_out0", NULL); clks[TEGRA210_CLK_PLL_D_OUT0] = clk; /* PLLRE */ clk = tegra_clk_register_pllre_tegra210("pll_re_vco", "pll_ref", clk_base, pmc, 0, &pll_re_vco_params, &pll_re_lock, pll_ref_freq); clk_register_clkdev(clk, "pll_re_vco", NULL); clks[TEGRA210_CLK_PLL_RE_VCO] = clk; clk = clk_register_divider_table(NULL, "pll_re_out", "pll_re_vco", 0, clk_base + PLLRE_BASE, 16, 5, 0, pll_vco_post_div_table, &pll_re_lock); clk_register_clkdev(clk, "pll_re_out", NULL); clks[TEGRA210_CLK_PLL_RE_OUT] = clk; clk = tegra_clk_register_divider("pll_re_out1_div", "pll_re_vco", clk_base + PLLRE_OUT1, 0, TEGRA_DIVIDER_ROUND_UP, 8, 8, 1, NULL); clk = tegra_clk_register_pll_out("pll_re_out1", "pll_re_out1_div", clk_base + PLLRE_OUT1, 1, 0, CLK_SET_RATE_PARENT, 0, NULL); clks[TEGRA210_CLK_PLL_RE_OUT1] = clk; /* PLLE */ clk = tegra_clk_register_plle_tegra210("pll_e", "pll_ref", clk_base, 0, &pll_e_params, NULL); clk_register_clkdev(clk, "pll_e", NULL); clks[TEGRA210_CLK_PLL_E] = clk; /* PLLC4 */ clk = tegra_clk_register_pllre("pll_c4_vco", "pll_ref", clk_base, pmc, 0, &pll_c4_vco_params, NULL, pll_ref_freq); clk_register_clkdev(clk, "pll_c4_vco", NULL); clks[TEGRA210_CLK_PLL_C4] = clk; /* PLLC4_OUT0 */ clk = clk_register_divider_table(NULL, "pll_c4_out0", "pll_c4_vco", 0, clk_base + PLLC4_BASE, 19, 4, 0, pll_vco_post_div_table, NULL); clk_register_clkdev(clk, "pll_c4_out0", NULL); clks[TEGRA210_CLK_PLL_C4_OUT0] = clk; /* PLLC4_OUT1 */ clk = clk_register_fixed_factor(NULL, "pll_c4_out1", "pll_c4_vco", CLK_SET_RATE_PARENT, 1, 3); clk_register_clkdev(clk, "pll_c4_out1", NULL); clks[TEGRA210_CLK_PLL_C4_OUT1] = clk; /* PLLC4_OUT2 */ clk = clk_register_fixed_factor(NULL, "pll_c4_out2", "pll_c4_vco", CLK_SET_RATE_PARENT, 1, 5); clk_register_clkdev(clk, "pll_c4_out2", NULL); clks[TEGRA210_CLK_PLL_C4_OUT2] = clk; /* PLLC4_OUT3 */ clk = tegra_clk_register_divider("pll_c4_out3_div", "pll_c4_out0", clk_base + PLLC4_OUT, 0, TEGRA_DIVIDER_ROUND_UP, 8, 8, 1, NULL); clk = tegra_clk_register_pll_out("pll_c4_out3", "pll_c4_out3_div", clk_base + PLLC4_OUT, 1, 0, CLK_SET_RATE_PARENT, 0, NULL); clk_register_clkdev(clk, "pll_c4_out3", NULL); clks[TEGRA210_CLK_PLL_C4_OUT3] = clk; /* PLLDP */ clk = tegra_clk_register_pllss_tegra210("pll_dp", "pll_ref", clk_base, 0, &pll_dp_params, NULL); clk_register_clkdev(clk, "pll_dp", NULL); clks[TEGRA210_CLK_PLL_DP] = clk; /* PLLD2 */ clk = tegra_clk_register_pllss_tegra210("pll_d2", "pll_ref", clk_base, 0, &pll_d2_params, NULL); clk_register_clkdev(clk, "pll_d2", NULL); clks[TEGRA210_CLK_PLL_D2] = clk; /* PLLD2_OUT0 */ clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2", CLK_SET_RATE_PARENT, 1, 1); clk_register_clkdev(clk, "pll_d2_out0", NULL); clks[TEGRA210_CLK_PLL_D2_OUT0] = clk; /* PLLP_OUT2 */ clk = clk_register_fixed_factor(NULL, "pll_p_out2", "pll_p", CLK_SET_RATE_PARENT, 1, 2); clk_register_clkdev(clk, "pll_p_out2", NULL); clks[TEGRA210_CLK_PLL_P_OUT2] = clk; } /* Tegra210 CPU clock and reset control functions */ static void tegra210_wait_cpu_in_reset(u32 cpu) { unsigned int reg; do { reg = readl(clk_base + CLK_RST_CONTROLLER_CPU_CMPLX_STATUS); cpu_relax(); } while (!(reg & (1 << cpu))); /* check CPU been reset or not */ } static void tegra210_disable_cpu_clock(u32 cpu) { /* flow controller would take care in the power sequence. */ } #ifdef CONFIG_PM_SLEEP static void tegra210_cpu_clock_suspend(void) { /* switch coresite to clk_m, save off original source */ tegra210_cpu_clk_sctx.clk_csite_src = readl(clk_base + CLK_SOURCE_CSITE); writel(3 << 30, clk_base + CLK_SOURCE_CSITE); } static void tegra210_cpu_clock_resume(void) { writel(tegra210_cpu_clk_sctx.clk_csite_src, clk_base + CLK_SOURCE_CSITE); } #endif static struct tegra_cpu_car_ops tegra210_cpu_car_ops = { .wait_for_reset = tegra210_wait_cpu_in_reset, .disable_clock = tegra210_disable_cpu_clock, #ifdef CONFIG_PM_SLEEP .suspend = tegra210_cpu_clock_suspend, .resume = tegra210_cpu_clock_resume, #endif }; static const struct of_device_id pmc_match[] __initconst = { { .compatible = "nvidia,tegra210-pmc" }, { }, }; static struct tegra_clk_init_table init_table[] __initdata = { { TEGRA210_CLK_UARTA, TEGRA210_CLK_PLL_P, 408000000, 0 }, { TEGRA210_CLK_UARTB, TEGRA210_CLK_PLL_P, 408000000, 0 }, { TEGRA210_CLK_UARTC, TEGRA210_CLK_PLL_P, 408000000, 0 }, { TEGRA210_CLK_UARTD, TEGRA210_CLK_PLL_P, 408000000, 0 }, { TEGRA210_CLK_PLL_A, TEGRA210_CLK_CLK_MAX, 564480000, 1 }, { TEGRA210_CLK_PLL_A_OUT0, TEGRA210_CLK_CLK_MAX, 11289600, 1 }, { TEGRA210_CLK_EXTERN1, TEGRA210_CLK_PLL_A_OUT0, 0, 1 }, { TEGRA210_CLK_CLK_OUT_1_MUX, TEGRA210_CLK_EXTERN1, 0, 1 }, { TEGRA210_CLK_CLK_OUT_1, TEGRA210_CLK_CLK_MAX, 0, 1 }, { TEGRA210_CLK_I2S0, TEGRA210_CLK_PLL_A_OUT0, 11289600, 0 }, { TEGRA210_CLK_I2S1, TEGRA210_CLK_PLL_A_OUT0, 11289600, 0 }, { TEGRA210_CLK_I2S2, TEGRA210_CLK_PLL_A_OUT0, 11289600, 0 }, { TEGRA210_CLK_I2S3, TEGRA210_CLK_PLL_A_OUT0, 11289600, 0 }, { TEGRA210_CLK_I2S4, TEGRA210_CLK_PLL_A_OUT0, 11289600, 0 }, { TEGRA210_CLK_HOST1X, TEGRA210_CLK_PLL_P, 136000000, 1 }, { TEGRA210_CLK_SCLK_MUX, TEGRA210_CLK_PLL_P, 0, 1 }, { TEGRA210_CLK_SCLK, TEGRA210_CLK_CLK_MAX, 102000000, 1 }, { TEGRA210_CLK_DFLL_SOC, TEGRA210_CLK_PLL_P, 51000000, 1 }, { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 }, { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 }, { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 }, { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 }, { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 }, { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 }, { TEGRA210_CLK_XUSB_HS_SRC, TEGRA210_CLK_XUSB_SS_SRC, 120000000, 0 }, { TEGRA210_CLK_XUSB_SSP_SRC, TEGRA210_CLK_XUSB_SS_SRC, 120000000, 0 }, { TEGRA210_CLK_XUSB_FALCON_SRC, TEGRA210_CLK_PLL_P_OUT_XUSB, 204000000, 0 }, { TEGRA210_CLK_XUSB_HOST_SRC, TEGRA210_CLK_PLL_P_OUT_XUSB, 102000000, 0 }, { TEGRA210_CLK_XUSB_DEV_SRC, TEGRA210_CLK_PLL_P_OUT_XUSB, 102000000, 0 }, { TEGRA210_CLK_SATA, TEGRA210_CLK_PLL_P, 104000000, 0 }, { TEGRA210_CLK_SATA_OOB, TEGRA210_CLK_PLL_P, 204000000, 0 }, { TEGRA210_CLK_EMC, TEGRA210_CLK_CLK_MAX, 0, 1 }, { TEGRA210_CLK_MSELECT, TEGRA210_CLK_CLK_MAX, 0, 1 }, { TEGRA210_CLK_CSITE, TEGRA210_CLK_CLK_MAX, 0, 1 }, { TEGRA210_CLK_TSENSOR, TEGRA210_CLK_CLK_M, 400000, 0 }, { TEGRA210_CLK_I2C1, TEGRA210_CLK_PLL_P, 0, 0 }, { TEGRA210_CLK_I2C2, TEGRA210_CLK_PLL_P, 0, 0 }, { TEGRA210_CLK_I2C3, TEGRA210_CLK_PLL_P, 0, 0 }, { TEGRA210_CLK_I2C4, TEGRA210_CLK_PLL_P, 0, 0 }, { TEGRA210_CLK_I2C5, TEGRA210_CLK_PLL_P, 0, 0 }, { TEGRA210_CLK_I2C6, TEGRA210_CLK_PLL_P, 0, 0 }, { TEGRA210_CLK_PLL_DP, TEGRA210_CLK_CLK_MAX, 270000000, 0 }, { TEGRA210_CLK_SOC_THERM, TEGRA210_CLK_PLL_P, 51000000, 0 }, { TEGRA210_CLK_CCLK_G, TEGRA210_CLK_CLK_MAX, 0, 1 }, /* This MUST be the last entry. */ { TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 }, }; /** * tegra210_clock_apply_init_table - initialize clocks on Tegra210 SoCs * * Program an initial clock rate and enable or disable clocks needed * by the rest of the kernel, for Tegra210 SoCs. It is intended to be * called by assigning a pointer to it to tegra_clk_apply_init_table - * this will be called as an arch_initcall. No return value. */ static void __init tegra210_clock_apply_init_table(void) { tegra_init_from_table(init_table, clks, TEGRA210_CLK_CLK_MAX); } /** * tegra210_clock_init - Tegra210-specific clock initialization * @np: struct device_node * of the DT node for the SoC CAR IP block * * Register most SoC clocks for the Tegra210 system-on-chip. Intended * to be called by the OF init code when a DT node with the * "nvidia,tegra210-car" string is encountered, and declared with * CLK_OF_DECLARE. No return value. */ static void __init tegra210_clock_init(struct device_node *np) { struct device_node *node; u32 value, clk_m_div; clk_base = of_iomap(np, 0); if (!clk_base) { pr_err("ioremap tegra210 CAR failed\n"); return; } node = of_find_matching_node(NULL, pmc_match); if (!node) { pr_err("Failed to find pmc node\n"); WARN_ON(1); return; } pmc_base = of_iomap(node, 0); if (!pmc_base) { pr_err("Can't map pmc registers\n"); WARN_ON(1); return; } clks = tegra_clk_init(clk_base, TEGRA210_CLK_CLK_MAX, TEGRA210_CAR_BANK_COUNT); if (!clks) return; value = clk_readl(clk_base + SPARE_REG0) >> CLK_M_DIVISOR_SHIFT; clk_m_div = (value & CLK_M_DIVISOR_MASK) + 1; if (tegra_osc_clk_init(clk_base, tegra210_clks, tegra210_input_freq, ARRAY_SIZE(tegra210_input_freq), clk_m_div, &osc_freq, &pll_ref_freq) < 0) return; tegra_fixed_clk_init(tegra210_clks); tegra210_pll_init(clk_base, pmc_base); tegra210_periph_clk_init(clk_base, pmc_base); tegra_audio_clk_init(clk_base, pmc_base, tegra210_clks, tegra210_audio_plls, ARRAY_SIZE(tegra210_audio_plls)); tegra_pmc_clk_init(pmc_base, tegra210_clks); /* For Tegra210, PLLD is the only source for DSIA & DSIB */ value = clk_readl(clk_base + PLLD_BASE); value &= ~BIT(25); clk_writel(value, clk_base + PLLD_BASE); tegra_clk_apply_init_table = tegra210_clock_apply_init_table; tegra_super_clk_gen5_init(clk_base, pmc_base, tegra210_clks, &pll_x_params); tegra_add_of_provider(np); tegra_register_devclks(devclks, ARRAY_SIZE(devclks)); tegra_cpu_car_ops = &tegra210_cpu_car_ops; } CLK_OF_DECLARE(tegra210, "nvidia,tegra210-car", tegra210_clock_init);
gpl-2.0
CyanogenModXT720/android_kernel
drivers/staging/otus/80211core/cagg.c
189
110445
/* * Copyright (c) 2007-2008 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* */ /* Module Name : cagg.c */ /* */ /* Abstract */ /* This module contains A-MPDU aggregation related functions. */ /* */ /* NOTES */ /* None */ /* */ /************************************************************************/ #include "cprecomp.h" extern u8_t zcUpToAc[8]; const u8_t pri[] = {3,3,2,3,2,1,3,2,1,0}; u16_t aggr_count; u32_t success_mpdu; u32_t total_mpdu; void zfAggInit(zdev_t* dev) { u16_t i,j; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); /* * reset sta information */ zmw_enter_critical_section(dev); wd->aggInitiated = 0; wd->addbaComplete = 0; wd->addbaCount = 0; wd->reorder = 1; for (i=0; i<ZM_MAX_STA_SUPPORT; i++) { for (j=0; j<ZM_AC; j++) { //wd->aggSta[i].aggQNumber[j] = ZM_AGG_POOL_SIZE; wd->aggSta[i].aggFlag[j] = wd->aggSta[i].count[j] = 0; wd->aggSta[i].tid_tx[j] = NULL; wd->aggSta[i].tid_tx[j+1] = NULL; } } /* * reset Tx/Rx aggregation queue information */ wd->aggState = 0; for (i=0; i<ZM_AGG_POOL_SIZE; i++) { /* * reset tx aggregation queue */ wd->aggQPool[i] = zfwMemAllocate(dev, sizeof(struct aggQueue)); if(!wd->aggQPool[i]) { zmw_leave_critical_section(dev); return; } wd->aggQPool[i]->aggHead = wd->aggQPool[i]->aggTail = wd->aggQPool[i]->aggQEnabled = wd->aggQPool[i]->aggReady = wd->aggQPool[i]->clearFlag = wd->aggQPool[i]->deleteFlag = 0; //wd->aggQPool[i]->aggSize = 16; /* * reset rx aggregation queue */ wd->tid_rx[i] = zfwMemAllocate(dev, sizeof(struct agg_tid_rx)); if (!wd->tid_rx[i]) { zmw_leave_critical_section(dev); return; } wd->tid_rx[i]->aid = ZM_MAX_STA_SUPPORT; wd->tid_rx[i]->seq_start = wd->tid_rx[i]->baw_head = \ wd->tid_rx[i]->baw_tail = 0; wd->tid_rx[i]->sq_exceed_count = wd->tid_rx[i]->sq_behind_count = 0; for (j=0; j<=ZM_AGG_BAW_SIZE; j++) wd->tid_rx[i]->frame[j].buf = 0; /* * reset ADDBA exchange status code * 0: NULL * 1: ADDBA Request sent/received * 2: ACK for ADDBA Request sent/received * 3: ADDBA Response sent/received * 4: ACK for ADDBA Response sent/received */ wd->tid_rx[i]->addBaExchangeStatusCode = 0; } zmw_leave_critical_section(dev); zfAggTallyReset(dev); DESTQ.init = zfAggDestInit; DESTQ.init(dev); wd->aggInitiated = 1; aggr_count = 0; success_mpdu = 0; total_mpdu = 0; #ifdef ZM_ENABLE_AGGREGATION #ifndef ZM_ENABLE_FW_BA_RETRANSMISSION //disable BAW BAW = zfwMemAllocate(dev, sizeof(struct baw_enabler)); if(!BAW) { return; } BAW->init = zfBawInit; BAW->init(dev); #endif //disable BAW #endif } /************************************************************************/ /* */ /* FUNCTION DESCRIPTION zfAggGetSta */ /* return STA AID. */ /* take buf as input, use the dest address of buf as index to */ /* search STA AID. */ /* */ /* INPUTS */ /* dev : device pointer */ /* buf : buffer for one particular packet */ /* */ /* OUTPUTS */ /* AID */ /* */ /* AUTHOR */ /* Honda ZyDAS Technology Corporation 2006.11 */ /* */ /************************************************************************/ u16_t zfAggGetSta(zdev_t* dev, zbuf_t* buf) { u16_t id; u16_t dst[3]; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); dst[0] = zmw_rx_buf_readh(dev, buf, 0); dst[1] = zmw_rx_buf_readh(dev, buf, 2); dst[2] = zmw_rx_buf_readh(dev, buf, 4); zmw_enter_critical_section(dev); if(wd->wlanMode == ZM_MODE_AP) { id = zfApFindSta(dev, dst); } else { id = 0; } zmw_leave_critical_section(dev); #if ZM_AGG_FPGA_DEBUG id = 0; #endif return id; } /************************************************************************/ /* */ /* FUNCTION DESCRIPTION zfAggTxGetQueue */ /* return Queue Pool index. */ /* take aid as input, look for the queue index associated */ /* with this aid. */ /* */ /* INPUTS */ /* dev : device pointer */ /* aid : associated id */ /* */ /* OUTPUTS */ /* Queue number */ /* */ /* AUTHOR */ /* Honda ZyDAS Technology Corporation 2006.11 */ /* */ /************************************************************************/ TID_TX zfAggTxGetQueue(zdev_t* dev, u16_t aid, u16_t tid) { //u16_t i; TID_TX tid_tx; zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); /* * not a STA aid */ if (0xffff == aid) return NULL; //zmw_enter_critical_section(dev); tid_tx = wd->aggSta[aid].tid_tx[tid]; if (!tid_tx) return NULL; if (0 == tid_tx->aggQEnabled) return NULL; //zmw_leave_critical_section(dev); return tid_tx; } /************************************************************************/ /* */ /* FUNCTION DESCRIPTION zfAggTxNewQueue */ /* return Queue Pool index. */ /* take aid as input, find a new queue for this aid. */ /* */ /* INPUTS */ /* dev : device pointer */ /* aid : associated id */ /* */ /* OUTPUTS */ /* Queue number */ /* */ /* AUTHOR */ /* Honda ZyDAS Technology Corporation 2006.12 */ /* */ /************************************************************************/ TID_TX zfAggTxNewQueue(zdev_t* dev, u16_t aid, u16_t tid, zbuf_t* buf) { u16_t i; TID_TX tid_tx=NULL; u16_t ac = zcUpToAc[tid&0x7] & 0x3; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); /* * not a STA aid */ if (0xffff == aid) return NULL; zmw_enter_critical_section(dev); /* * find one new queue for sta */ for (i=0; i<ZM_AGG_POOL_SIZE; i++) { if (wd->aggQPool[i]->aggQEnabled) { /* * this q is enabled */ } else { tid_tx = wd->aggQPool[i]; tid_tx->aggQEnabled = 1; tid_tx->aggQSTA = aid; tid_tx->ac = ac; tid_tx->tid = tid; tid_tx->aggHead = tid_tx->aggTail = tid_tx->size = 0; tid_tx->aggReady = 0; wd->aggSta[aid].tid_tx[tid] = tid_tx; tid_tx->dst[0] = zmw_rx_buf_readh(dev, buf, 0); tid_tx->dst[1] = zmw_rx_buf_readh(dev, buf, 2); tid_tx->dst[2] = zmw_rx_buf_readh(dev, buf, 4); break; } } zmw_leave_critical_section(dev); return tid_tx; } /************************************************************************/ /* */ /* FUNCTION DESCRIPTION zfAggTxEnqueue */ /* return Status code ZM_SUCCESS or error code */ /* take (aid,ac,qnum,buf) as input */ /* */ /* INPUTS */ /* dev : device pointer */ /* aid : associated id */ /* ac : access category */ /* qnum: the queue number to which will be enqueued */ /* buf : the packet to be queued */ /* */ /* OUTPUTS */ /* status code */ /* */ /* AUTHOR */ /* Honda Atheros Communications, INC. 2006.12 */ /* */ /************************************************************************/ u16_t zfAggTxEnqueue(zdev_t* dev, zbuf_t* buf, u16_t aid, TID_TX tid_tx) { //u16_t qlen, frameLen; u32_t time; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); zmw_enter_critical_section(dev); tid_tx->size = zm_agg_qlen(dev, tid_tx->aggHead, tid_tx->aggTail); if (tid_tx->size < (ZM_AGGQ_SIZE - 2)) { /* Queue not full */ /* * buffer copy * in zfwBufFree will return a ndismsendcomplete * to resolve the synchronize problem in aggregate */ u8_t sendComplete = 0; tid_tx->aggvtxq[tid_tx->aggHead].buf = buf; time = zm_agg_GetTime(); tid_tx->aggvtxq[tid_tx->aggHead].arrivalTime = time; tid_tx->aggvtxq[tid_tx->aggHead].baw_retransmit = 0; tid_tx->aggHead = ((tid_tx->aggHead + 1) & ZM_AGGQ_SIZE_MASK); tid_tx->lastArrival = time; tid_tx->size++; tid_tx->size = zm_agg_qlen(dev, tid_tx->aggHead, tid_tx->aggTail); if (buf && (tid_tx->size < (ZM_AGGQ_SIZE - 10))) { tid_tx->complete = tid_tx->aggHead; sendComplete = 1; } zmw_leave_critical_section(dev); if (!DESTQ.exist(dev, 0, tid_tx->ac, tid_tx, NULL)) { DESTQ.insert(dev, 0, tid_tx->ac, tid_tx, NULL); } zm_msg1_agg(ZM_LV_0, "tid_tx->size=", tid_tx->size); //zm_debug_msg1("tid_tx->size=", tid_tx->size); if (buf && sendComplete && wd->zfcbSendCompleteIndication) { //zmw_leave_critical_section(dev); wd->zfcbSendCompleteIndication(dev, buf); } /*if (tid_tx->size >= 16 && zfHpGetFreeTxdCount(dev) > 20) zfAggTxSend(dev, zfHpGetFreeTxdCount(dev), tid_tx); */ return ZM_SUCCESS; } else { zm_msg1_agg(ZM_LV_0, "can't enqueue, tid_tx->size=", tid_tx->size); /* * Queue Full */ /* * zm_msg1_agg(ZM_LV_0, "Queue full, qnum = ", qnum); * wd->commTally.txQosDropCount[ac]++; * zfwBufFree(dev, buf, ZM_SUCCESS); * zm_msg1_agg(ZM_LV_1, "Packet discarded, VTXQ full, ac=", ac); * * return ZM_ERR_EXCEED_PRIORITY_THRESHOLD; */ } zmw_leave_critical_section(dev); if (!DESTQ.exist(dev, 0, tid_tx->ac, tid_tx, NULL)) { DESTQ.insert(dev, 0, tid_tx->ac, tid_tx, NULL); } return ZM_ERR_EXCEED_PRIORITY_THRESHOLD; } u16_t zfAggDestExist(zdev_t* dev, u16_t Qtype, u16_t ac, TID_TX tid_tx, void* vtxq) { struct dest* dest; u16_t exist = 0; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); zmw_enter_critical_section(dev); if (!DESTQ.Head[ac]) { exist = 0; } else { dest = DESTQ.Head[ac]; if (dest->tid_tx == tid_tx) { exist = 1; } else { while (dest->next != DESTQ.Head[ac]) { dest = dest->next; if (dest->tid_tx == tid_tx){ exist = 1; break; } } } } zmw_leave_critical_section(dev); return exist; } void zfAggDestInsert(zdev_t* dev, u16_t Qtype, u16_t ac, TID_TX tid_tx, void* vtxq) { struct dest* new_dest; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); new_dest = zfwMemAllocate(dev, sizeof(struct dest)); if(!new_dest) { return; } new_dest->Qtype = Qtype; new_dest->tid_tx = tid_tx; if (0 == Qtype) new_dest->tid_tx = tid_tx; else new_dest->vtxq = vtxq; if (!DESTQ.Head[ac]) { zmw_enter_critical_section(dev); new_dest->next = new_dest; DESTQ.Head[ac] = DESTQ.dest[ac] = new_dest; zmw_leave_critical_section(dev); } else { zmw_enter_critical_section(dev); new_dest->next = DESTQ.dest[ac]->next; DESTQ.dest[ac]->next = new_dest; zmw_leave_critical_section(dev); } //DESTQ.size[ac]++; return; } void zfAggDestDelete(zdev_t* dev, u16_t Qtype, TID_TX tid_tx, void* vtxq) { struct dest* dest, *temp; u16_t i; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); zmw_enter_critical_section(dev); if (wd->destLock) { zmw_leave_critical_section(dev); return; } //zmw_declare_for_critical_section(); for (i=0; i<4; i++) { if (!DESTQ.Head[i]) continue; dest = DESTQ.Head[i]; if (!dest) continue; while (dest && (dest->next != DESTQ.Head[i])) { if (Qtype == 0 && dest->next->tid_tx == tid_tx){ break; } if (Qtype == 1 && dest->next->vtxq == vtxq) { break; } dest = dest->next; } if ((Qtype == 0 && dest->next->tid_tx == tid_tx) || (Qtype == 1 && dest->next->vtxq == vtxq)) { tid_tx->size = zm_agg_qlen(dev, tid_tx->aggHead, tid_tx->aggTail); if (tid_tx->size) { zmw_leave_critical_section(dev); return; } if (!DESTQ.Head[i]) { temp = NULL; } else { temp = dest->next; if (temp == dest) { DESTQ.Head[i] = DESTQ.dest[i] = NULL; //DESTQ.size[i] = 0; } else { dest->next = dest->next->next; } } if (temp == NULL) {/* do nothing */} //zfwMemFree(dev, temp, sizeof(struct dest)); else zfwMemFree(dev, temp, sizeof(struct dest)); /*zmw_enter_critical_section(dev); if (DESTQ.size[i] > 0) DESTQ.size[i]--; zmw_leave_critical_section(dev); */ } } zmw_leave_critical_section(dev); return; } void zfAggDestInit(zdev_t* dev) { u16_t i; zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); for (i=0; i<4; i++) { //wd->destQ.Head[i].next = wd->destQ.Head[i]; //wd->destQ.dest[i] = wd->destQ.Head[i]; //DESTQ.size[i] = 0; DESTQ.Head[i] = NULL; } DESTQ.insert = zfAggDestInsert; DESTQ.delete = zfAggDestDelete; DESTQ.init = zfAggDestInit; DESTQ.getNext = zfAggDestGetNext; DESTQ.exist = zfAggDestExist; DESTQ.ppri = 0; return; } struct dest* zfAggDestGetNext(zdev_t* dev, u16_t ac) { struct dest *dest = NULL; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); zmw_enter_critical_section(dev); if (DESTQ.dest[ac]) { dest = DESTQ.dest[ac]; DESTQ.dest[ac] = DESTQ.dest[ac]->next; } else { dest = NULL; } zmw_leave_critical_section(dev); return dest; } #ifdef ZM_ENABLE_AGGREGATION #ifndef ZM_ENABLE_FW_BA_RETRANSMISSION //disable BAW u16_t zfAggTidTxInsertHead(zdev_t* dev, struct bufInfo *buf_info,TID_TX tid_tx) { zbuf_t* buf; u32_t time; struct baw_header *baw_header; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); buf = buf_info->buf; zmw_enter_critical_section(dev); tid_tx->size = zm_agg_qlen(dev, tid_tx->aggHead, tid_tx->aggTail); zmw_leave_critical_section(dev); if (tid_tx->size >= (ZM_AGGQ_SIZE - 2)) { zfwBufFree(dev, buf, ZM_SUCCESS); return 0; } zmw_enter_critical_section(dev); tid_tx->aggTail = (tid_tx->aggTail == 0)? ZM_AGGQ_SIZE_MASK: tid_tx->aggTail - 1; tid_tx->aggvtxq[tid_tx->aggTail].buf = buf; //time = zm_agg_GetTime(); tid_tx->aggvtxq[tid_tx->aggTail].arrivalTime = buf_info->timestamp; tid_tx->aggvtxq[tid_tx->aggTail].baw_retransmit = buf_info->baw_retransmit; baw_header = &tid_tx->aggvtxq[tid_tx->aggTail].baw_header; baw_header->headerLen = buf_info->baw_header->headerLen; baw_header->micLen = buf_info->baw_header->micLen; baw_header->snapLen = buf_info->baw_header->snapLen; baw_header->removeLen = buf_info->baw_header->removeLen; baw_header->keyIdx = buf_info->baw_header->keyIdx; zfwMemoryCopy((u8_t *)baw_header->header, (u8_t *)buf_info->baw_header->header, 58); zfwMemoryCopy((u8_t *)baw_header->mic , (u8_t *)buf_info->baw_header->mic , 8); zfwMemoryCopy((u8_t *)baw_header->snap , (u8_t *)buf_info->baw_header->snap , 8); tid_tx->size++; tid_tx->size = zm_agg_qlen(dev, tid_tx->aggHead, tid_tx->aggTail); zmw_leave_critical_section(dev); //tid_tx->lastArrival = time; if (1 == tid_tx->size) { DESTQ.insert(dev, 0, tid_tx->ac, tid_tx, NULL); } zm_msg1_agg(ZM_LV_0, "0xC2:insertHead, tid_tx->size=", tid_tx->size); return TRUE; } #endif //disable BAW #endif void zfiTxComplete(zdev_t* dev) { zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); if( (wd->wlanMode == ZM_MODE_AP) || (wd->wlanMode == ZM_MODE_INFRASTRUCTURE && wd->sta.EnableHT) || (wd->wlanMode == ZM_MODE_PSEUDO) ) { zfAggTxScheduler(dev, 0); } return; } TID_TX zfAggTxReady(zdev_t* dev) { //struct dest* dest; u16_t i; TID_TX tid_tx = NULL; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); zmw_enter_critical_section(dev); for (i=0; i<ZM_AGG_POOL_SIZE; i++) { if (wd->aggQPool[i]->aggQEnabled) { if (wd->aggQPool[i]->size >= 16) { tid_tx = wd->aggQPool[i]; break; } } else { } } zmw_leave_critical_section(dev); return tid_tx; } u16_t zfAggValidTidTx(zdev_t* dev, TID_TX tid_tx) { u16_t i, valid = 0; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); zmw_enter_critical_section(dev); for (i=0; i<ZM_AGG_POOL_SIZE; i++) { if (wd->aggQPool[i] == tid_tx) { valid = 1; break; } else { } } zmw_leave_critical_section(dev); return valid; } void zfAggTxScheduler(zdev_t* dev, u8_t ScanAndClear) { TID_TX tid_tx = NULL; void* vtxq; struct dest* dest; zbuf_t* buf; u32_t txql, min_txql; //u16_t aggr_size = 1; u16_t txq_threshold; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); if (!wd->aggInitiated) { return; } /* debug */ txql = TXQL; min_txql = AGG_MIN_TXQL; if(wd->txq_threshold) txq_threshold = wd->txq_threshold; else txq_threshold = AGG_MIN_TXQL; tid_tx = zfAggTxReady(dev); if (tid_tx) ScanAndClear = 0; while (zfHpGetFreeTxdCount(dev) > 20 && (TXQL < txq_threshold || tid_tx)) { //while (zfHpGetFreeTxdCount(dev) > 20 && (ScanAndClear || tid_tx)) { //while (TXQL < txq_threshold) { u16_t i; u8_t ac; s8_t destQ_count = 0; //while ((zfHpGetFreeTxdCount(dev)) > 32) { //DbgPrint("zfAggTxScheduler: in while loop"); for (i=0; i<4; i++) { if (DESTQ.Head[i]) destQ_count++; } if (0 >= destQ_count) break; zmw_enter_critical_section(dev); ac = pri[DESTQ.ppri]; DESTQ.ppri = (DESTQ.ppri + 1) % 10; zmw_leave_critical_section(dev); for (i=0; i<10; i++){ if(DESTQ.Head[ac]) break; zmw_enter_critical_section(dev); ac = pri[DESTQ.ppri]; DESTQ.ppri = (DESTQ.ppri + 1) % 10; zmw_leave_critical_section(dev); } if (i == 10) break; //DbgPrint("zfAggTxScheduler: have dest Q"); zmw_enter_critical_section(dev); wd->destLock = 1; zmw_leave_critical_section(dev); dest = DESTQ.getNext(dev, ac); if (!dest) { zmw_enter_critical_section(dev); wd->destLock = 0; zmw_leave_critical_section(dev); DbgPrint("bug report! DESTQ.getNext got nothing!"); break; } if (dest->Qtype == 0) { tid_tx = dest->tid_tx; //DbgPrint("zfAggTxScheduler: have tid_tx Q"); if(tid_tx && zfAggValidTidTx(dev, tid_tx)) tid_tx->size = zm_agg_qlen(dev, tid_tx->aggHead, tid_tx->aggTail); else { zmw_enter_critical_section(dev); wd->destLock = 0; zmw_leave_critical_section(dev); tid_tx = zfAggTxReady(dev); continue; } zmw_enter_critical_section(dev); wd->destLock = 0; zmw_leave_critical_section(dev); //zmw_enter_critical_section(dev); if (tid_tx && !tid_tx->size) { //zmw_leave_critical_section(dev); //DESTQ.delete(dev, 0, tid_tx, NULL); } else if(wd->aggState == 0){ //wd->aggState = 1; //zmw_leave_critical_section(dev); zfAggTxSend(dev, zfHpGetFreeTxdCount(dev), tid_tx); //wd->aggState = 0; } else { //zmw_leave_critical_section(dev); break; } } else { vtxq = dest->vtxq; buf = zfGetVtxq(dev, ac); zm_assert( buf != 0 ); zfTxSendEth(dev, buf, 0, ZM_EXTERNAL_ALLOC_BUF, 0); } /*flush all but < 16 frames in tid_tx to TXQ*/ tid_tx = zfAggTxReady(dev); } /*while ((zfHpGetFreeTxdCount(dev)) > 32) { //while ((zfHpGetFreeTxdCount(dev)) > 32) { destQ_count = 0; for (i=0; i<4; i++) destQ_count += wd->destQ.size[i]; if (0 >= destQ_count) break; ac = pri[wd->destQ.ppri]; wd->destQ.ppri = (wd->destQ.ppri + 1) % 10; for (i=0; i<10; i++){ if(wd->destQ.size[ac]!=0) break; ac = pri[wd->destQ.ppri]; wd->destQ.ppri = (wd->destQ.ppri + 1) % 10; } if (i == 10) break; dest = wd->destQ.getNext(dev, ac); if (dest->Qtype == 0) { tid_tx = dest->tid_tx; tid_tx->size = zm_agg_qlen(dev, tid_tx->aggHead, tid_tx->aggTail); if (!tid_tx->size) { wd->destQ.delete(dev, 0, tid_tx, NULL); break; } else if((wd->aggState == 0) && (tid_tx->size >= 16)){ zfAggTxSend(dev, zfHpGetFreeTxdCount(dev), tid_tx); } else { break; } } } */ return; } /************************************************************************/ /* */ /* FUNCTION DESCRIPTION zfAggTx */ /* return Status code ZM_SUCCESS or error code */ /* management A-MPDU aggregation function, */ /* management aggregation queue, calculate arrivalrate, */ /* add/delete an aggregation queue of a stream, */ /* enqueue packets into responsible aggregate queue. */ /* take (dev, buf, ac) as input */ /* */ /* INPUTS */ /* dev : device pointer */ /* buf : packet buff */ /* ac : access category */ /* */ /* OUTPUTS */ /* status code */ /* */ /* AUTHOR */ /* Honda Atheros Communications, INC. 2006.12 */ /* */ /************************************************************************/ u16_t zfAggTx(zdev_t* dev, zbuf_t* buf, u16_t tid) { u16_t aid; //u16_t qnum; //u16_t aggflag = 0; //u16_t arrivalrate = 0; TID_TX tid_tx; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); if(!wd->aggInitiated) { return ZM_ERR_TX_BUFFER_UNAVAILABLE; } aid = zfAggGetSta(dev, buf); //arrivalrate = zfAggTxArrivalRate(dev, aid, tid); if (0xffff == aid) { /* * STA not associated, this is a BC/MC or STA->AP packet */ return ZM_ERR_TX_BUFFER_UNAVAILABLE; } /* * STA associated, a unicast packet */ tid_tx = zfAggTxGetQueue(dev, aid, tid); /*tid_q.tid_tx = tid_tx; wd->destQ.insert = zfAggDestInsert; wd->destQ.insert(dev, 0, tid_q); */ if (tid_tx != NULL) { /* * this (aid, ac) is aggregated */ //if (arrivalrate < ZM_AGG_LOW_THRESHOLD) if (0) { /* * arrival rate too low * delete this aggregate queue */ zmw_enter_critical_section(dev); //wd->aggQPool[qnum]->clearFlag = wd->aggQPool[qnum]->deleteFlag =1; zmw_leave_critical_section(dev); } return zfAggTxEnqueue(dev, buf, aid, tid_tx); } else { /* * this (aid, ac) not yet aggregated * queue not found */ //if (arrivalrate > ZM_AGG_HIGH_THRESHOLD) if (1) { /* * arrivalrate high enough to get a new agg queue */ tid_tx = zfAggTxNewQueue(dev, aid, tid, buf); //zm_msg1_agg(ZM_LV_0, "get new AggQueue qnum = ", tid_tx->); if (tid_tx) { /* * got a new aggregate queue */ //zmw_enter_critical_section(dev); //wd->aggSta[aid].aggFlag[ac] = 1; //zmw_leave_critical_section(dev); /* * add ADDBA functions here * return ZM_ERR_TX_BUFFER_UNAVAILABLE; */ //zfAggSendAddbaRequest(dev, tid_tx->dst, tid_tx->ac, tid_tx->tid); //zmw_enter_critical_section(dev); //wd->aggSta[aid].aggFlag[ac] = 0; //zmw_leave_critical_section(dev); return zfAggTxEnqueue(dev, buf, aid, tid_tx); } else { /* * just can't get a new aggregate queue */ return ZM_ERR_TX_BUFFER_UNAVAILABLE; } } else { /* * arrival rate is not high enough to get a new agg queue */ return ZM_ERR_TX_BUFFER_UNAVAILABLE; } } } /************************************************************************/ /* */ /* FUNCTION DESCRIPTION zfAggTxReadyCount */ /* return counter of ready to aggregate queues. */ /* take (dev, ac) as input, only calculate the ready to aggregate */ /* queues of one particular ac. */ /* */ /* INPUTS */ /* dev : device pointer */ /* ac : access category */ /* */ /* OUTPUTS */ /* counter of ready to aggregate queues */ /* */ /* AUTHOR */ /* Honda Atheros Communications, INC. 2006.12 */ /* */ /************************************************************************/ u16_t zfAggTxReadyCount(zdev_t* dev, u16_t ac) { u16_t i; u16_t readycount = 0; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); zmw_enter_critical_section(dev); for (i=0 ; i<ZM_AGG_POOL_SIZE; i++) { if (wd->aggQPool[i]->aggQEnabled && (wd->aggQPool[i]->aggReady || \ wd->aggQPool[i]->clearFlag) && ac == wd->aggQPool[i]->ac) readycount++; } zmw_leave_critical_section(dev); return readycount; } /************************************************************************/ /* */ /* FUNCTION DESCRIPTION zfAggTxPartial */ /* return the number that Vtxq has to send. */ /* take (dev, ac, readycount) as input, calculate the ratio of */ /* Vtxq length to (Vtxq length + readycount) of a particular ac, */ /* and returns the Vtxq length * the ratio */ /* */ /* INPUTS */ /* dev : device pointer */ /* ac : access category */ /* readycount: the number of ready to aggregate queues of this ac */ /* */ /* OUTPUTS */ /* Vtxq length * ratio */ /* */ /* AUTHOR */ /* Honda Atheros Communications, INC. 2006.12 */ /* */ /************************************************************************/ u16_t zfAggTxPartial(zdev_t* dev, u16_t ac, u16_t readycount) { u16_t qlen; u16_t partial; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); zmw_enter_critical_section(dev); qlen = zm_agg_qlen(dev, wd->vtxqHead[ac], wd->vtxqTail[ac]); if ((qlen + readycount) > 0) { partial = (u16_t)( zm_agg_weight(ac) * ((u16_t)qlen/(qlen + \ readycount)) ); } else { partial = 0; } zmw_leave_critical_section(dev); if (partial > qlen) partial = qlen; return partial; } /************************************************************************/ /* */ /* FUNCTION DESCRIPTION zfAggTxSend */ /* return sentcount */ /* take (dev, ac, n) as input, n is the number of scheduled agg */ /* queues to be sent of the particular ac. */ /* */ /* INPUTS */ /* dev : device pointer */ /* ac : access category */ /* n : the number of scheduled aggregation queues to be sent */ /* */ /* OUTPUTS */ /* sentcount */ /* */ /* AUTHOR */ /* Honda Atheros Communications, INC. 2006.12 */ /* */ /************************************************************************/ u16_t zfAggTxSend(zdev_t* dev, u32_t freeTxd, TID_TX tid_tx) { //u16_t qnum; //u16_t qlen; u16_t j; //u16_t sentcount = 0; zbuf_t* buf; struct aggControl aggControl; u16_t aggLen; //zbuf_t* newBuf; //u16_t bufLen; //TID_BAW tid_baw = NULL; //struct bufInfo *buf_info; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); //while (tid_tx->size > 0) zmw_enter_critical_section(dev); tid_tx->size = zm_agg_qlen(dev, tid_tx->aggHead, tid_tx->aggTail); aggLen = zm_agg_min(16, zm_agg_min(tid_tx->size, (u16_t)(freeTxd - 2))); zmw_leave_critical_section(dev); /* * why there have to be 2 free Txd? */ if (aggLen <=0 ) return 0; if (aggLen == 1) { buf = zfAggTxGetVtxq(dev, tid_tx); if (buf) zfTxSendEth(dev, buf, 0, ZM_EXTERNAL_ALLOC_BUF, 0); if (tid_tx->size == 0) { //DESTQ.delete(dev, 0, tid_tx, NULL); } return 1; } /* * Free Txd queue is big enough to put aggregation */ zmw_enter_critical_section(dev); if (wd->aggState == 1) { zmw_leave_critical_section(dev); return 0; } wd->aggState = 1; zmw_leave_critical_section(dev); zm_msg1_agg(ZM_LV_0, "aggLen=", aggLen); tid_tx->aggFrameSize = 0; for (j=0; j < aggLen; j++) { buf = zfAggTxGetVtxq(dev, tid_tx); zmw_enter_critical_section(dev); tid_tx->size = zm_agg_qlen(dev, tid_tx->aggHead, tid_tx->aggTail); zmw_leave_critical_section(dev); if ( buf ) { //struct aggTally *agg_tal; u16_t completeIndex; if (0 == j) { aggControl.ampduIndication = ZM_AGG_FIRST_MPDU; } else if ((j == (aggLen - 1)) || tid_tx->size == 0) { aggControl.ampduIndication = ZM_AGG_LAST_MPDU; //wd->aggState = 0; } else { aggControl.ampduIndication = ZM_AGG_MIDDLE_MPDU; /* the packet is delayed more than 500 ms, drop it */ } tid_tx->aggFrameSize += zfwBufGetSize(dev, buf); aggControl.addbaIndication = 0; aggControl.aggEnabled = 1; #ifdef ZM_AGG_TALLY agg_tal = &wd->agg_tal; agg_tal->sent_packets_sum++; #endif zfAggTxSendEth(dev, buf, 0, ZM_EXTERNAL_ALLOC_BUF, 0, &aggControl, tid_tx); zmw_enter_critical_section(dev); completeIndex = tid_tx->complete; if(zm_agg_inQ(tid_tx, tid_tx->complete)) zm_agg_plus(tid_tx->complete); zmw_leave_critical_section(dev); if(zm_agg_inQ(tid_tx, completeIndex) && wd->zfcbSendCompleteIndication && tid_tx->aggvtxq[completeIndex].buf) { wd->zfcbSendCompleteIndication(dev, tid_tx->aggvtxq[completeIndex].buf); zm_debug_msg0("in queue complete worked!"); } } else { /* * this aggregation queue is empty */ zm_msg1_agg(ZM_LV_0, "aggLen not reached, but no more frame, j=", j); break; } } zmw_enter_critical_section(dev); wd->aggState = 0; zmw_leave_critical_section(dev); //zm_acquire_agg_spin_lock(Adapter); tid_tx->size = zm_agg_qlen(dev, tid_tx->aggHead, tid_tx->aggTail); //zm_release_agg_spin_lock(Adapter); if (tid_tx->size == 0) { //DESTQ.delete(dev, 0, tid_tx, NULL); } //zfAggInvokeBar(dev, tid_tx); if(j>0) { aggr_count++; zm_msg1_agg(ZM_LV_0, "0xC2:sent 1 aggr, aggr_count=", aggr_count); zm_msg1_agg(ZM_LV_0, "0xC2:sent 1 aggr, aggr_size=", j); } return j; } /************************************************************************/ /* */ /* FUNCTION DESCRIPTION zfAggTxGetReadyQueue */ /* return the number of the aggregation queue */ /* take (dev, ac) as input, find the agg queue with smallest */ /* arrival time (waited longest) among those ready or clearFlag */ /* set queues. */ /* */ /* INPUTS */ /* dev : device pointer */ /* ac : access category */ /* */ /* OUTPUTS */ /* aggregation queue number */ /* */ /* AUTHOR */ /* Honda Atheros Communications, INC. 2006.12 */ /* */ /************************************************************************/ TID_TX zfAggTxGetReadyQueue(zdev_t* dev, u16_t ac) { //u16_t qnum = ZM_AGG_POOL_SIZE; u16_t i; u32_t time = 0; TID_TX tid_tx = NULL; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); zmw_enter_critical_section(dev); for (i=0 ;i<ZM_AGG_POOL_SIZE; i++) { if (1 == wd->aggQPool[i]->aggQEnabled && ac == wd->aggQPool[i]->ac && (wd->aggQPool[i]->size > 0)) { if (0 == time || time > wd->aggQPool[i]->aggvtxq[ \ wd->aggQPool[i]->aggHead ].arrivalTime) { tid_tx = wd->aggQPool[i]; time = tid_tx->aggvtxq[ tid_tx->aggHead ].arrivalTime; } } } zmw_leave_critical_section(dev); return tid_tx; } /************************************************************************/ /* */ /* FUNCTION DESCRIPTION zfAggTxGetVtxq */ /* return an MSDU */ /* take (dev, qnum) as input, return an MSDU out of the agg queue. */ /* */ /* INPUTS */ /* dev : device pointer */ /* qnum: queue number */ /* */ /* OUTPUTS */ /* a MSDU */ /* */ /* AUTHOR */ /* Honda Atheros Communications, INC. 2006.12 */ /* */ /************************************************************************/ zbuf_t* zfAggTxGetVtxq(zdev_t* dev, TID_TX tid_tx) { zbuf_t* buf = NULL; zmw_declare_for_critical_section(); if (tid_tx->aggHead != tid_tx->aggTail) { buf = tid_tx->aggvtxq[ tid_tx->aggTail ].buf; tid_tx->aggvtxq[tid_tx->aggTail].buf = NULL; zmw_enter_critical_section(dev); tid_tx->aggTail = ((tid_tx->aggTail + 1) & ZM_AGGQ_SIZE_MASK); if(tid_tx->size > 0) tid_tx->size--; tid_tx->size = zm_agg_qlen(dev, tid_tx->aggHead, tid_tx->aggTail); if (NULL == buf) { //tid_tx->aggTail = tid_tx->aggHead = tid_tx->size = 0; //zm_msg1_agg(ZM_LV_0, "GetVtxq buf == NULL, tid_tx->size=", tid_tx->size); } zmw_leave_critical_section(dev); } else { /* * queue is empty */ zm_msg1_agg(ZM_LV_0, "tid_tx->aggHead == tid_tx->aggTail, tid_tx->size=", tid_tx->size); } if (zm_agg_qlen(dev, tid_tx->aggHead, tid_tx->aggTail) != tid_tx->size) zm_msg1_agg(ZM_LV_0, "qlen!=tid_tx->size! tid_tx->size=", tid_tx->size); return buf; } /************************************************************************/ /* */ /* FUNCTION DESCRIPTION zfAggTxDeleteQueue */ /* return ZM_SUCCESS (can't fail) */ /* take (dev, qnum) as input, reset (delete) this aggregate queue, */ /* this queue is virtually returned to the aggregate queue pool. */ /* */ /* INPUTS */ /* dev : device pointer */ /* qnum: queue number */ /* */ /* OUTPUTS */ /* ZM_SUCCESS */ /* */ /* AUTHOR */ /* Honda Atheros Communications, INC. 2006.12 */ /* */ /************************************************************************/ u16_t zfAggTxDeleteQueue(zdev_t* dev, u16_t qnum) { u16_t ac, tid; struct aggQueue *tx_tid; struct aggSta *agg_sta; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); tx_tid = wd->aggQPool[qnum]; agg_sta = &wd->aggSta[tx_tid->aggQSTA]; ac = tx_tid->ac; tid = tx_tid->tid; zmw_enter_critical_section(dev); tx_tid->aggQEnabled = 0; tx_tid->aggHead = tx_tid->aggTail = 0; tx_tid->aggReady = 0; tx_tid->clearFlag = tx_tid->deleteFlag = 0; tx_tid->size = 0; agg_sta->count[ac] = 0; agg_sta->tid_tx[tid] = NULL; agg_sta->aggFlag[ac] = 0; zmw_leave_critical_section(dev); zm_msg1_agg(ZM_LV_0, "queue deleted! qnum=", qnum); return ZM_SUCCESS; } #ifdef ZM_ENABLE_AGGREGATION #ifndef ZM_ENABLE_FW_BA_RETRANSMISSION //disable BAW void zfBawCore(zdev_t* dev, u16_t baw_seq, u32_t bitmap, u16_t aggLen) { TID_BAW tid_baw; s16_t i; zbuf_t* buf; struct bufInfo *buf_info; zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); tid_baw = BAW->getQ(dev, baw_seq); //tid_baw = NULL; if (NULL == tid_baw) return; total_mpdu += aggLen; for (i = aggLen - 1; i>=0; i--) { if (((bitmap >> i) & 0x1) == 0) { buf_info = BAW->pop(dev, i, tid_baw); buf = buf_info->buf; if (buf) { //wd->zfcbSetBawQ(dev, buf, 0); zfAggTidTxInsertHead(dev, buf_info, tid_baw->tid_tx); } } else { success_mpdu++; } } BAW->disable(dev, tid_baw); zfAggTxScheduler(dev); zm_debug_msg1("success_mpdu = ", success_mpdu); zm_debug_msg1(" total_mpdu = ", total_mpdu); } void zfBawInit(zdev_t* dev) { TID_BAW tid_baw; u16_t i,j; zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); for (i=0; i<ZM_BAW_POOL_SIZE; i++){ tid_baw = &BAW->tid_baw[i]; for (j=0; j<ZM_VTXQ_SIZE; j++) { tid_baw->frame[j].buf = NULL; } tid_baw->enabled = tid_baw->head = tid_baw->tail = tid_baw->size = 0; tid_baw->start_seq = 0; } BAW->delPoint = 0; BAW->core = zfBawCore; BAW->getNewQ = zfBawGetNewQ; BAW->insert = zfBawInsert; BAW->pop = zfBawPop; BAW->enable = zfBawEnable; BAW->disable = zfBawDisable; BAW->getQ = zfBawGetQ; } TID_BAW zfBawGetNewQ(zdev_t* dev, u16_t start_seq, TID_TX tid_tx) { TID_BAW tid_baw=NULL; TID_BAW next_baw=NULL; u16_t i; zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); /* for (i=0; i<ZM_BAW_POOL_SIZE; i++){ tid_baw = &BAW->tid_baw[i]; if (FALSE == tid_baw->enabled) break; } */ tid_baw = &BAW->tid_baw[BAW->delPoint]; i = BAW->delPoint; //if (ZM_BAW_POOL_SIZE == i) { //return NULL; // u8_t temp = BAW->delPoint; // tid_baw = &BAW->tid_baw[BAW->delPoint]; // BAW->disable(dev, tid_baw); // BAW->delPoint = (BAW->delPoint < (ZM_BAW_POOL_SIZE - 1))? (BAW->delPoint + 1): 0; // temp = BAW->delPoint; //} zm_msg1_agg(ZM_LV_0, "get new tid_baw, index=", i); BAW->delPoint = (i < (ZM_BAW_POOL_SIZE -1))? (i + 1): 0; next_baw = &BAW->tid_baw[BAW->delPoint]; if (1 == next_baw->enabled) BAW->disable(dev, next_baw); BAW->enable(dev, tid_baw, start_seq); tid_baw->tid_tx = tid_tx; return tid_baw; } u16_t zfBawInsert(zdev_t* dev, zbuf_t* buf, u16_t baw_seq, TID_BAW tid_baw, u8_t baw_retransmit, struct baw_header_r *header_r) { //TID_BAW tid_baw; //u16_t bufLen; //zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); if(tid_baw->size < (ZM_VTXQ_SIZE - 1)) { struct baw_header *baw_header = &tid_baw->frame[tid_baw->head].baw_header; baw_header->headerLen = header_r->headerLen; baw_header->micLen = header_r->micLen; baw_header->snapLen = header_r->snapLen; baw_header->removeLen = header_r->removeLen; baw_header->keyIdx = header_r->keyIdx; zfwMemoryCopy((u8_t *)baw_header->header, (u8_t *)header_r->header, 58); zfwMemoryCopy((u8_t *)baw_header->mic , (u8_t *)header_r->mic , 8); zfwMemoryCopy((u8_t *)baw_header->snap , (u8_t *)header_r->snap , 8); //wd->zfcbSetBawQ(dev, buf, 1); tid_baw->frame[tid_baw->head].buf = buf; tid_baw->frame[tid_baw->head].baw_seq = baw_seq; tid_baw->frame[tid_baw->head].baw_retransmit = baw_retransmit + 1; //tid_baw->frame[tid_baw->head].data = pBuf->data; tid_baw->head++; tid_baw->size++; } else { //wd->zfcbSetBawQ(dev, buf, 0); zfwBufFree(dev, buf, ZM_SUCCESS); return FALSE; } return TRUE; } struct bufInfo* zfBawPop(zdev_t* dev, u16_t index, TID_BAW tid_baw) { //TID_BAW tid_baw; //zbuf_t* buf; struct bufInfo *buf_info; zmw_get_wlan_dev(dev); buf_info = &wd->buf_info; buf_info->baw_header = NULL; if (NULL == (buf_info->buf = tid_baw->frame[index].buf)) return buf_info; buf_info->baw_retransmit = tid_baw->frame[index].baw_retransmit; buf_info->baw_header = &tid_baw->frame[index].baw_header; buf_info->timestamp = tid_baw->frame[index].timestamp; //pBuf->data = pBuf->buffer; //wd->zfcbRestoreBufData(dev, buf); tid_baw->frame[index].buf = NULL; return buf_info; } void zfBawEnable(zdev_t* dev, TID_BAW tid_baw, u16_t start_seq) { //TID_BAW tid_baw; //zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); tid_baw->enabled = TRUE; tid_baw->head = tid_baw->tail = tid_baw->size = 0; tid_baw->start_seq = start_seq; } void zfBawDisable(zdev_t* dev, TID_BAW tid_baw) { //TID_BAW tid_baw; u16_t i; //zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); for (i=0; i<ZM_VTXQ_SIZE; i++) { if (tid_baw->frame[i].buf) { //wd->zfcbSetBawQ(dev, tid_baw->frame[i].buf, 0); zfwBufFree(dev, tid_baw->frame[i].buf, ZM_SUCCESS); tid_baw->frame[i].buf = NULL; } } tid_baw->enabled = FALSE; } TID_BAW zfBawGetQ(zdev_t* dev, u16_t baw_seq) { TID_BAW tid_baw=NULL; u16_t i; zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); for (i=0; i<ZM_BAW_POOL_SIZE; i++){ tid_baw = &BAW->tid_baw[i]; if (TRUE == tid_baw->enabled) { zm_msg1_agg(ZM_LV_0, "get an old tid_baw, baw_seq=", baw_seq); zm_msg1_agg(ZM_LV_0, "check a tid_baw->start_seq=", tid_baw->start_seq); if(baw_seq == tid_baw->start_seq) break; } } if (ZM_BAW_POOL_SIZE == i) return NULL; return tid_baw; } #endif //disable BAW #endif u16_t zfAggTallyReset(zdev_t* dev) { struct aggTally* agg_tal; zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); agg_tal = &wd->agg_tal; agg_tal->got_packets_sum = 0; agg_tal->got_bytes_sum = 0; agg_tal->sent_bytes_sum = 0; agg_tal->sent_packets_sum = 0; agg_tal->avg_got_packets = 0; agg_tal->avg_got_bytes = 0; agg_tal->avg_sent_packets = 0; agg_tal->avg_sent_bytes = 0; agg_tal->time = 0; return 0; } /************************************************************************/ /* */ /* FUNCTION DESCRIPTION zfAggScanAndClear */ /* If the packets in a queue have waited for too long, clear and */ /* delete this aggregation queue. */ /* */ /* INPUTS */ /* dev : device pointer */ /* time : current time */ /* */ /* OUTPUTS */ /* ZM_SUCCESS */ /* */ /* AUTHOR */ /* Honda Atheros Communications, INC. 2006.12 */ /* */ /************************************************************************/ u16_t zfAggScanAndClear(zdev_t* dev, u32_t time) { u16_t i; u16_t head; u16_t tail; u32_t tick; u32_t arrivalTime; //u16_t aid, ac; TID_TX tid_tx; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); if(!(wd->state == ZM_WLAN_STATE_ENABLED)) return 0; zfAggTxScheduler(dev, 1); tick = zm_agg_GetTime(); for (i=0; i<ZM_AGG_POOL_SIZE; i++) { if (!wd->aggQPool[i]) return 0; if (1 == wd->aggQPool[i]->aggQEnabled) { tid_tx = wd->aggQPool[i]; zmw_enter_critical_section(dev); head = tid_tx->aggHead; tail = tid_tx->aggTail; arrivalTime = (u32_t)tid_tx->aggvtxq[tid_tx->aggTail].arrivalTime; if((tick - arrivalTime) <= ZM_AGG_CLEAR_TIME) { } else if((tid_tx->size = zm_agg_qlen(dev, tid_tx->aggHead, tid_tx->aggTail)) > 0) { tid_tx->clearFlag = 1; //zm_msg1_agg(ZM_LV_0, "clear queue tick =", tick); //zm_msg1_agg(ZM_LV_0, "clear queue arrival =", arrivalTime); //zmw_leave_critical_section(dev); //zfAggTxScheduler(dev); //zmw_enter_critical_section(dev); } if (tid_tx->size == 0) { /* * queue empty */ if (tick - tid_tx->lastArrival > ZM_AGG_DELETE_TIME) { zm_msg1_agg(ZM_LV_0, "delete queue, idle for n sec. n = ", \ ZM_AGG_DELETE_TIME/10); zmw_leave_critical_section(dev); zfAggTxDeleteQueue(dev, i); zmw_enter_critical_section(dev); } } zmw_leave_critical_section(dev); } } zfAggRxClear(dev, time); #ifdef ZM_AGG_TALLY if((wd->tick % 100) == 0) { zfAggPrintTally(dev); } #endif return ZM_SUCCESS; } u16_t zfAggPrintTally(zdev_t* dev) { struct aggTally* agg_tal; zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); agg_tal = &wd->agg_tal; if(agg_tal->got_packets_sum < 10) { zfAggTallyReset(dev); return 0; } agg_tal->time++; agg_tal->avg_got_packets = (agg_tal->avg_got_packets * (agg_tal->time - 1) + agg_tal->got_packets_sum) / agg_tal->time; agg_tal->avg_got_bytes = (agg_tal->avg_got_bytes * (agg_tal->time - 1) + agg_tal->got_bytes_sum) / agg_tal->time; agg_tal->avg_sent_packets = (agg_tal->avg_sent_packets * (agg_tal->time - 1) + agg_tal->sent_packets_sum) / agg_tal->time; agg_tal->avg_sent_bytes = (agg_tal->avg_sent_bytes * (agg_tal->time - 1) + agg_tal->sent_bytes_sum) / agg_tal->time; zm_msg1_agg(ZM_LV_0, "got_packets_sum =", agg_tal->got_packets_sum); zm_msg1_agg(ZM_LV_0, " got_bytes_sum =", agg_tal->got_bytes_sum); zm_msg1_agg(ZM_LV_0, "sent_packets_sum=", agg_tal->sent_packets_sum); zm_msg1_agg(ZM_LV_0, " sent_bytes_sum =", agg_tal->sent_bytes_sum); agg_tal->got_packets_sum = agg_tal->got_bytes_sum =agg_tal->sent_packets_sum = agg_tal->sent_bytes_sum = 0; zm_msg1_agg(ZM_LV_0, "avg_got_packets =", agg_tal->avg_got_packets); zm_msg1_agg(ZM_LV_0, " avg_got_bytes =", agg_tal->avg_got_bytes); zm_msg1_agg(ZM_LV_0, "avg_sent_packets=", agg_tal->avg_sent_packets); zm_msg1_agg(ZM_LV_0, " avg_sent_bytes =", agg_tal->avg_sent_bytes); if ((wd->commTally.BA_Fail == 0) || (wd->commTally.Hw_Tx_MPDU == 0)) { zm_msg1_agg(ZM_LV_0, "Hardware Tx MPDU=", wd->commTally.Hw_Tx_MPDU); zm_msg1_agg(ZM_LV_0, " BA Fail number=", wd->commTally.BA_Fail); } else zm_msg1_agg(ZM_LV_0, "1/(BA fail rate)=", wd->commTally.Hw_Tx_MPDU/wd->commTally.BA_Fail); return 0; } u16_t zfAggRxClear(zdev_t* dev, u32_t time) { u16_t i; struct agg_tid_rx *tid_rx; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); for (i=0; i<ZM_AGG_POOL_SIZE; i++) { zmw_enter_critical_section(dev); tid_rx = wd->tid_rx[i]; if (tid_rx->baw_head != tid_rx->baw_tail) { u16_t j = tid_rx->baw_tail; while ((j != tid_rx->baw_head) && !tid_rx->frame[j].buf) { j = (j + 1) & ZM_AGG_BAW_MASK; } if ((j != tid_rx->baw_head) && (time - tid_rx->frame[j].arrivalTime) > (ZM_AGG_CLEAR_TIME - 5)) { zmw_leave_critical_section(dev); zm_msg0_agg(ZM_LV_1, "queue RxFlush by RxClear"); zfAggRxFlush(dev, 0, tid_rx); zmw_enter_critical_section(dev); } } zmw_leave_critical_section(dev); } return ZM_SUCCESS; } struct agg_tid_rx* zfAggRxEnabled(zdev_t* dev, zbuf_t* buf) { u16_t dst0, src[3], ac, aid, fragOff; u8_t up; u16_t offset = 0; u16_t seq_no; u16_t frameType; u16_t frameCtrl; u16_t frameSubtype; u32_t tcp_seq; //struct aggSta *agg_sta; #if ZM_AGG_FPGA_REORDERING struct agg_tid_rx *tid_rx; #endif zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); seq_no = zmw_rx_buf_readh(dev, buf, 22) >> 4; //DbgPrint("Rx seq=%d\n", seq_no); if (wd->sta.EnableHT == 0) { return NULL; } frameCtrl = zmw_rx_buf_readb(dev, buf, 0); frameType = frameCtrl & 0xf; frameSubtype = frameCtrl & 0xf0; if (frameType != ZM_WLAN_DATA_FRAME) //non-Qos Data? (frameSubtype&0x80) { return NULL; } #ifdef ZM_ENABLE_PERFORMANCE_EVALUATION tcp_seq = zmw_rx_buf_readb(dev, buf, 22+36) << 24; tcp_seq += zmw_rx_buf_readb(dev, buf, 22+37) << 16; tcp_seq += zmw_rx_buf_readb(dev, buf, 22+38) << 8; tcp_seq += zmw_rx_buf_readb(dev, buf, 22+39); #endif ZM_SEQ_DEBUG("In %5d, %12u\n", seq_no, tcp_seq); dst0 = zmw_rx_buf_readh(dev, buf, offset+4); src[0] = zmw_rx_buf_readh(dev, buf, offset+10); src[1] = zmw_rx_buf_readh(dev, buf, offset+12); src[2] = zmw_rx_buf_readh(dev, buf, offset+14); #if ZM_AGG_FPGA_DEBUG aid = 0; #else aid = zfApFindSta(dev, src); #endif //agg_sta = &wd->aggSta[aid]; //zfTxGetIpTosAndFrag(dev, buf, &up, &fragOff); //ac = zcUpToAc[up&0x7] & 0x3; /* * Filter unicast frame only, aid == 0 is for debug only */ if ((dst0 & 0x1) == 0 && aid == 0) { #if ZM_AGG_FPGA_REORDERING tid_rx = zfAggRxGetQueue(dev, buf) ; if(!tid_rx) return NULL; else { //if (tid_rx->addBaExchangeStatusCode == ZM_AGG_ADDBA_RESPONSE) return tid_rx; } #else return NULL; #endif } return NULL; } u16_t zfAggRx(zdev_t* dev, zbuf_t* buf, struct zsAdditionInfo *addInfo, struct agg_tid_rx *tid_rx) { u16_t seq_no; s16_t index; u16_t offset = 0; zbuf_t* pbuf; u8_t frameSubType; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); ZM_BUFFER_TRACE(dev, buf) ZM_PERFORMANCE_RX_REORDER(dev); seq_no = zmw_rx_buf_readh(dev, buf, offset+22) >> 4; index = seq_no - tid_rx->seq_start; /* * for debug */ /* zm_msg2_agg(ZM_LV_0, "queue seq = ", seq_no); * DbgPrint("%s:%s%lxh %s%lxh\n", __func__, "queue seq=", seq_no, * "; seq_start=", tid_rx->seq_start); */ //DbgPrint("seq_no=%d, seq_start=%d\n", seq_no, tid_rx->seq_start); /* In some APs, we found that it might transmit NULL data whose sequence number is out or order. In order to avoid this problem, we ignore these NULL data. */ frameSubType = (zmw_rx_buf_readh(dev, buf, 0) & 0xF0) >> 4; /* If this is a NULL data instead of Qos NULL data */ if ((frameSubType & 0x0C) == 0x04) { s16_t seq_diff; seq_diff = (seq_no > tid_rx->seq_start) ? seq_no - tid_rx->seq_start : tid_rx->seq_start - seq_no; if (seq_diff > ZM_AGG_BAW_SIZE) { zm_debug_msg0("Free Rx NULL data in zfAggRx"); /* Free Rx buffer */ zfwBufFree(dev, buf, 0); return ZM_ERR_OUT_OF_ORDER_NULL_DATA; } } /* * sequence number wrap at 4k */ if (tid_rx->seq_start > seq_no) { //index += 4096; zmw_enter_critical_section(dev); if (tid_rx->seq_start >= 4096) { tid_rx->seq_start = 0; } zmw_leave_critical_section(dev); } if (tid_rx->seq_start == seq_no) { zmw_enter_critical_section(dev); if (((tid_rx->baw_head - tid_rx->baw_tail) & ZM_AGG_BAW_MASK) > 0) { //DbgPrint("head=%d, tail=%d", tid_rx->baw_head, tid_rx->baw_tail); tid_rx->baw_tail = (tid_rx->baw_tail + 1) & ZM_AGG_BAW_MASK; } tid_rx->seq_start = (tid_rx->seq_start + 1) & (4096 - 1); zmw_leave_critical_section(dev); ZM_PERFORMANCE_RX_SEQ(dev, buf); if (wd->zfcbRecv80211 != NULL) { //seq_no = zmw_rx_buf_readh(dev, buf, offset+22) >> 4; //DbgPrint("Recv indicate seq=%d\n", seq_no); //DbgPrint("1. seq=%d\n", seq_no); wd->zfcbRecv80211(dev, buf, addInfo); } else { zfiRecv80211(dev, buf, addInfo); } } else if (!zfAggRxEnqueue(dev, buf, tid_rx, addInfo)) { /* * duplicated packet */ return 1; } while (tid_rx->baw_head != tid_rx->baw_tail) {// && tid_rx->frame[tid_rx->baw_tail].buf) u16_t tailIndex; zmw_enter_critical_section(dev); tailIndex = tid_rx->baw_tail; pbuf = tid_rx->frame[tailIndex].buf; tid_rx->frame[tailIndex].buf = 0; if (!pbuf) { zmw_leave_critical_section(dev); break; } tid_rx->baw_tail = (tid_rx->baw_tail + 1) & ZM_AGG_BAW_MASK; tid_rx->seq_start = (tid_rx->seq_start + 1) & (4096 - 1); //if(pbuf && tid_rx->baw_size > 0) // tid_rx->baw_size--; zmw_leave_critical_section(dev); ZM_PERFORMANCE_RX_SEQ(dev, pbuf); if (wd->zfcbRecv80211 != NULL) { //seq_no = zmw_rx_buf_readh(dev, pbuf, offset+22) >> 4; //DbgPrint("Recv indicate seq=%d\n", seq_no); //DbgPrint("1. seq=%d\n", seq_no); wd->zfcbRecv80211(dev, pbuf, addInfo); } else { //seq_no = zmw_rx_buf_readh(dev, pbuf, offset+22) >> 4; //DbgPrint("Recv indicate seq=%d\n", seq_no); zfiRecv80211(dev, pbuf, addInfo); } } return 1; } struct agg_tid_rx *zfAggRxGetQueue(zdev_t* dev, zbuf_t* buf) { u16_t src[3]; u16_t aid, ac, i; u16_t offset = 0; struct agg_tid_rx *tid_rx = NULL; zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); src[0] = zmw_rx_buf_readh(dev, buf, offset+10); src[1] = zmw_rx_buf_readh(dev, buf, offset+12); src[2] = zmw_rx_buf_readh(dev, buf, offset+14); aid = zfApFindSta(dev, src); ac = (zmw_rx_buf_readh(dev, buf, 24) & 0xF); // mark by spin lock debug //zmw_enter_critical_section(dev); for (i=0; i<ZM_AGG_POOL_SIZE ; i++) { if((wd->tid_rx[i]->aid == aid) && (wd->tid_rx[i]->ac == ac)) { tid_rx = wd->tid_rx[i]; break; } } // mark by spin lock debug //zmw_leave_critical_section(dev); return tid_rx; } u16_t zfAggRxEnqueue(zdev_t* dev, zbuf_t* buf, struct agg_tid_rx *tid_rx, struct zsAdditionInfo *addInfo) { u16_t seq_no, offset = 0; u16_t q_index; s16_t index; u8_t bdropframe = 0; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); ZM_BUFFER_TRACE(dev, buf) seq_no = zmw_rx_buf_readh(dev, buf, offset+22) >> 4; index = seq_no - tid_rx->seq_start; /* * sequence number wrap at 4k * -1000: check for duplicate past packet */ bdropframe = 0; if (tid_rx->seq_start > seq_no) { if ((tid_rx->seq_start > 3967) && (seq_no < 128)) { index += 4096; } else if (tid_rx->seq_start - seq_no > 70) { zmw_enter_critical_section(dev); tid_rx->sq_behind_count++; if (tid_rx->sq_behind_count > 3) { tid_rx->sq_behind_count = 0; } else { bdropframe = 1; } zmw_leave_critical_section(dev); } else { bdropframe = 1; } } else { if (seq_no - tid_rx->seq_start > 70) { zmw_enter_critical_section(dev); tid_rx->sq_exceed_count++; if (tid_rx->sq_exceed_count > 3) { tid_rx->sq_exceed_count = 0; } else { bdropframe = 1; } zmw_leave_critical_section(dev); } } if (bdropframe == 1) { /*if (wd->zfcbRecv80211 != NULL) { wd->zfcbRecv80211(dev, buf, addInfo); } else { zfiRecv80211(dev, buf, addInfo); }*/ ZM_PERFORMANCE_FREE(dev, buf); zfwBufFree(dev, buf, 0); /*zfAggRxFlush(dev, seq_no, tid_rx); tid_rx->seq_start = seq_no; index = seq_no - tid_rx->seq_start; */ //DbgPrint("Free an old packet, seq_start=%d, seq_no=%d\n", tid_rx->seq_start, seq_no); /* * duplicate past packet * happens only in simulated aggregation environment */ return 0; } else { zmw_enter_critical_section(dev); if (tid_rx->sq_exceed_count > 0){ tid_rx->sq_exceed_count--; } if (tid_rx->sq_behind_count > 0) { tid_rx->sq_behind_count--; } zmw_leave_critical_section(dev); } if (index < 0) { zfAggRxFlush(dev, seq_no, tid_rx); tid_rx->seq_start = seq_no; index = 0; } //if (index >= (ZM_AGG_BAW_SIZE - 1)) if (index >= (ZM_AGG_BAW_MASK)) { /* * queue full */ //DbgPrint("index >= 64, seq_start=%d, seq_no=%d\n", tid_rx->seq_start, seq_no); zfAggRxFlush(dev, seq_no, tid_rx); //tid_rx->seq_start = seq_no; index = seq_no - tid_rx->seq_start; if ((tid_rx->seq_start > seq_no) && (tid_rx->seq_start > 1000) && (tid_rx->seq_start - 1000) > seq_no) { //index = seq_no - tid_rx->seq_start; index += 4096; } //index = seq_no - tid_rx->seq_start; while (index >= (ZM_AGG_BAW_MASK)) { //DbgPrint("index >= 64, seq_start=%d, seq_no=%d\n", tid_rx->seq_start, seq_no); tid_rx->seq_start = (tid_rx->seq_start + ZM_AGG_BAW_MASK) & (4096 - 1); index = seq_no - tid_rx->seq_start; if ((tid_rx->seq_start > seq_no) && (tid_rx->seq_start > 1000) && (tid_rx->seq_start - 1000) > seq_no) { index += 4096; } } } q_index = (tid_rx->baw_tail + index) & ZM_AGG_BAW_MASK; if (tid_rx->frame[q_index].buf && (((tid_rx->baw_head - tid_rx->baw_tail) & ZM_AGG_BAW_MASK) > (((q_index) - tid_rx->baw_tail) & ZM_AGG_BAW_MASK))) { ZM_PERFORMANCE_DUP(dev, tid_rx->frame[q_index].buf, buf); zfwBufFree(dev, buf, 0); //DbgPrint("Free a duplicate packet, seq_start=%d, seq_no=%d\n", tid_rx->seq_start, seq_no); //DbgPrint("head=%d, tail=%d", tid_rx->baw_head, tid_rx->baw_tail); /* * duplicate packet */ return 0; } zmw_enter_critical_section(dev); if(tid_rx->frame[q_index].buf) { zfwBufFree(dev, tid_rx->frame[q_index].buf, 0); tid_rx->frame[q_index].buf = 0; } tid_rx->frame[q_index].buf = buf; tid_rx->frame[q_index].arrivalTime = zm_agg_GetTime(); zfwMemoryCopy((void*)&tid_rx->frame[q_index].addInfo, (void*)addInfo, sizeof(struct zsAdditionInfo)); /* * for debug simulated aggregation only, * should be done in rx of ADDBA Request */ //tid_rx->addInfo = addInfo; if (((tid_rx->baw_head - tid_rx->baw_tail) & ZM_AGG_BAW_MASK) <= index) { //tid_rx->baw_size = index + 1; if (((tid_rx->baw_head - tid_rx->baw_tail) & ZM_AGG_BAW_MASK) <= //((q_index + 1) & ZM_AGG_BAW_MASK)) (((q_index) - tid_rx->baw_tail) & ZM_AGG_BAW_MASK))//tid_rx->baw_size ) tid_rx->baw_head = (q_index + 1) & ZM_AGG_BAW_MASK; } zmw_leave_critical_section(dev); /* * success */ //DbgPrint("head=%d, tail=%d, start=%d", tid_rx->baw_head, tid_rx->baw_tail, tid_rx->seq_start); return 1; } u16_t zfAggRxFlush(zdev_t* dev, u16_t seq_no, struct agg_tid_rx *tid_rx) { zbuf_t* pbuf; u16_t seq; struct zsAdditionInfo addInfo; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); ZM_PERFORMANCE_RX_FLUSH(dev); while (1) { zmw_enter_critical_section(dev); if (tid_rx->baw_tail == tid_rx->baw_head) { zmw_leave_critical_section(dev); break; } pbuf = tid_rx->frame[tid_rx->baw_tail].buf; zfwMemoryCopy((void*)&addInfo, (void*)&tid_rx->frame[tid_rx->baw_tail].addInfo, sizeof(struct zsAdditionInfo)); tid_rx->frame[tid_rx->baw_tail].buf = 0; //if(pbuf && tid_rx->baw_size > 0) tid_rx->baw_size--; tid_rx->baw_tail = (tid_rx->baw_tail + 1) & ZM_AGG_BAW_MASK; tid_rx->seq_start = (tid_rx->seq_start + 1) & (4096 - 1); zmw_leave_critical_section(dev); if (pbuf) { ZM_PERFORMANCE_RX_SEQ(dev, pbuf); if (wd->zfcbRecv80211 != NULL) { seq = zmw_rx_buf_readh(dev, pbuf, 22) >> 4; //DbgPrint("Recv indicate seq=%d\n", seq); //DbgPrint("2. seq=%d\n", seq); wd->zfcbRecv80211(dev, pbuf, &addInfo); } else { seq = zmw_rx_buf_readh(dev, pbuf, 22) >> 4; //DbgPrint("Recv indicate seq=%d\n", seq); zfiRecv80211(dev, pbuf, &addInfo); } } } zmw_enter_critical_section(dev); tid_rx->baw_head = tid_rx->baw_tail = 0; zmw_leave_critical_section(dev); return 1; } /************************************************************************/ /* */ /* FUNCTION DESCRIPTION zfAggRxFreeBuf */ /* Frees all queued packets in buffer when the driver is down. */ /* The zfFreeResource() will check if the buffer is all freed. */ /* */ /* INPUTS */ /* dev : device pointer */ /* */ /* OUTPUTS */ /* ZM_SUCCESS */ /* */ /* AUTHOR */ /* Honda Atheros Communications, INC. 2006.12 */ /* */ /************************************************************************/ u16_t zfAggRxFreeBuf(zdev_t* dev, u16_t destroy) { u16_t i; zbuf_t* buf; struct agg_tid_rx *tid_rx; TID_TX tid_tx; //struct bufInfo *buf_info; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); for (i=0; i<ZM_AGG_POOL_SIZE; i++) { u16_t j; tid_rx = wd->tid_rx[i]; for(j=0; j <= ZM_AGG_BAW_SIZE; j++) { zmw_enter_critical_section(dev); buf = tid_rx->frame[j].buf; tid_rx->frame[j].buf = 0; zmw_leave_critical_section(dev); if (buf) { zfwBufFree(dev, buf, 0); } } #if 0 if ( tid_rx->baw_head != tid_rx->baw_tail ) { while (tid_rx->baw_head != tid_rx->baw_tail) { buf = tid_rx->frame[tid_rx->baw_tail].buf; tid_rx->frame[tid_rx->baw_tail].buf = 0; if (buf) { zfwBufFree(dev, buf, 0); zmw_enter_critical_section(dev); tid_rx->frame[tid_rx->baw_tail].buf = 0; zmw_leave_critical_section(dev); } zmw_enter_critical_section(dev); //if (tid_rx->baw_size > 0)tid_rx->baw_size--; tid_rx->baw_tail = (tid_rx->baw_tail + 1) & ZM_AGG_BAW_MASK; tid_rx->seq_start++; zmw_leave_critical_section(dev); } } #endif zmw_enter_critical_section(dev); tid_rx->seq_start = 0; tid_rx->baw_head = tid_rx->baw_tail = 0; tid_rx->aid = ZM_MAX_STA_SUPPORT; zmw_leave_critical_section(dev); #ifdef ZM_ENABLE_AGGREGATION #ifndef ZM_ENABLE_FW_BA_RETRANSMISSION //disable BAW if (tid_baw->enabled) { zm_msg1_agg(ZM_LV_0, "Device down, clear BAW queue:", i); BAW->disable(dev, tid_baw); } #endif #endif if (1 == wd->aggQPool[i]->aggQEnabled) { tid_tx = wd->aggQPool[i]; buf = zfAggTxGetVtxq(dev, tid_tx); while (buf) { zfwBufFree(dev, buf, 0); buf = zfAggTxGetVtxq(dev, tid_tx); } } if(destroy) { zfwMemFree(dev, wd->aggQPool[i], sizeof(struct aggQueue)); zfwMemFree(dev, wd->tid_rx[i], sizeof(struct agg_tid_rx)); } } #ifdef ZM_ENABLE_AGGREGATION #ifndef ZM_ENABLE_FW_BA_RETRANSMISSION //disable BAW if(destroy) zfwMemFree(dev, BAW, sizeof(struct baw_enabler)); #endif #endif return ZM_SUCCESS; } void zfAggRecvBAR(zdev_t* dev, zbuf_t *buf) { u16_t start_seq, len; u8_t i, bitmap[8]; len = zfwBufGetSize(dev, buf); start_seq = zmw_rx_buf_readh(dev, buf, len-2); DbgPrint("Received a BAR Control frame, start_seq=%d", start_seq>>4); /* todo: set the bitmap by reordering buffer! */ for (i=0; i<8; i++) bitmap[i]=0; zfSendBA(dev, start_seq, bitmap); } #ifdef ZM_ENABLE_AGGREGATION #ifndef ZM_ENABLE_FW_BA_RETRANSMISSION //disable BAW void zfAggTxRetransmit(zdev_t* dev, struct bufInfo *buf_info, struct aggControl *aggControl, TID_TX tid_tx) { u16_t removeLen; u16_t err; zmw_get_wlan_dev(dev); if (aggControl && (ZM_AGG_FIRST_MPDU == aggControl->ampduIndication) ) { tid_tx->bar_ssn = buf_info->baw_header->header[15]; aggControl->tid_baw->start_seq = tid_tx->bar_ssn >> 4; zm_msg1_agg(ZM_LV_0, "start seq=", tid_tx->bar_ssn >> 4); } buf_info->baw_header->header[4] |= (1 << 11); if (aggControl && aggControl->aggEnabled) { //if (wd->enableAggregation==0 && !(buf_info->baw_header->header[6]&0x1)) //{ //if (((buf_info->baw_header->header[2] & 0x3) == 2)) //{ /* Enable aggregation */ buf_info->baw_header->header[1] |= 0x20; if (ZM_AGG_LAST_MPDU == aggControl->ampduIndication) { buf_info->baw_header->header[1] |= 0x4000; } else { buf_info->baw_header->header[1] &= ~0x4000; //zm_debug_msg0("ZM_AGG_LAST_MPDU"); } //} //else { // zm_debug_msg1("no aggr, header[2]&0x3 = ",buf_info->baw_header->header[2] & 0x3) // aggControl->aggEnabled = 0; //} //} //else { // zm_debug_msg1("no aggr, wd->enableAggregation = ", wd->enableAggregation); // zm_debug_msg1("no aggr, !header[6]&0x1 = ",!(buf_info->baw_header->header[6]&0x1)); // aggControl->aggEnabled = 0; //} } /*if (aggControl->tid_baw) { struct baw_header_r header_r; header_r.header = buf_info->baw_header->header; header_r.mic = buf_info->baw_header->mic; header_r.snap = buf_info->baw_header->snap; header_r.headerLen = buf_info->baw_header->headerLen; header_r.micLen = buf_info->baw_header->micLen; header_r.snapLen = buf_info->baw_header->snapLen; header_r.removeLen = buf_info->baw_header->removeLen; header_r.keyIdx = buf_info->baw_header->keyIdx; BAW->insert(dev, buf_info->buf, tid_tx->bar_ssn >> 4, aggControl->tid_baw, buf_info->baw_retransmit, &header_r); }*/ if ((err = zfHpSend(dev, buf_info->baw_header->header, buf_info->baw_header->headerLen, buf_info->baw_header->snap, buf_info->baw_header->snapLen, buf_info->baw_header->mic, buf_info->baw_header->micLen, buf_info->buf, buf_info->baw_header->removeLen, ZM_EXTERNAL_ALLOC_BUF, (u8_t)tid_tx->ac, buf_info->baw_header->keyIdx)) != ZM_SUCCESS) { goto zlError; } return; zlError: zfwBufFree(dev, buf_info->buf, 0); return; } #endif //disable BAW #endif /************************************************************************/ /* */ /* FUNCTION DESCRIPTION zfAggTxSendEth */ /* Called to transmit Ethernet frame from upper elayer. */ /* */ /* INPUTS */ /* dev : device pointer */ /* buf : buffer pointer */ /* port : WLAN port, 0=>standard, 0x10-0x17=>VAP, 0x20-0x25=>WDS */ /* */ /* OUTPUTS */ /* error code */ /* */ /* AUTHOR */ /* Stephen, Honda Atheros Communications, Inc. 2006.12 */ /* */ /************************************************************************/ u16_t zfAggTxSendEth(zdev_t* dev, zbuf_t* buf, u16_t port, u16_t bufType, u8_t flag, struct aggControl *aggControl, TID_TX tid_tx) { u16_t err; //u16_t addrTblSize; //struct zsAddrTbl addrTbl; u16_t removeLen; u16_t header[(8+30+2+18)/2]; /* ctr+(4+a1+a2+a3+2+a4)+qos+iv */ u16_t headerLen; u16_t mic[8/2]; u16_t micLen; u16_t snap[8/2]; u16_t snapLen; u16_t fragLen; u16_t frameLen; u16_t fragNum; struct zsFrag frag; u16_t i, id; u16_t da[3]; u16_t sa[3]; u8_t up; u8_t qosType, keyIdx = 0; u16_t fragOff; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); zm_msg1_tx(ZM_LV_2, "zfTxSendEth(), port=", port); /* Get IP TOS for QoS AC and IP frag offset */ zfTxGetIpTosAndFrag(dev, buf, &up, &fragOff); #ifdef ZM_ENABLE_NATIVE_WIFI if ( wd->wlanMode == ZM_MODE_INFRASTRUCTURE ) { /* DA */ da[0] = zmw_tx_buf_readh(dev, buf, 16); da[1] = zmw_tx_buf_readh(dev, buf, 18); da[2] = zmw_tx_buf_readh(dev, buf, 20); /* SA */ sa[0] = zmw_tx_buf_readh(dev, buf, 10); sa[1] = zmw_tx_buf_readh(dev, buf, 12); sa[2] = zmw_tx_buf_readh(dev, buf, 14); } else if ( wd->wlanMode == ZM_MODE_IBSS ) { /* DA */ da[0] = zmw_tx_buf_readh(dev, buf, 4); da[1] = zmw_tx_buf_readh(dev, buf, 6); da[2] = zmw_tx_buf_readh(dev, buf, 8); /* SA */ sa[0] = zmw_tx_buf_readh(dev, buf, 10); sa[1] = zmw_tx_buf_readh(dev, buf, 12); sa[2] = zmw_tx_buf_readh(dev, buf, 14); } else if ( wd->wlanMode == ZM_MODE_AP ) { /* DA */ da[0] = zmw_tx_buf_readh(dev, buf, 4); da[1] = zmw_tx_buf_readh(dev, buf, 6); da[2] = zmw_tx_buf_readh(dev, buf, 8); /* SA */ sa[0] = zmw_tx_buf_readh(dev, buf, 16); sa[1] = zmw_tx_buf_readh(dev, buf, 18); sa[2] = zmw_tx_buf_readh(dev, buf, 20); } else { // } #else /* DA */ da[0] = zmw_tx_buf_readh(dev, buf, 0); da[1] = zmw_tx_buf_readh(dev, buf, 2); da[2] = zmw_tx_buf_readh(dev, buf, 4); /* SA */ sa[0] = zmw_tx_buf_readh(dev, buf, 6); sa[1] = zmw_tx_buf_readh(dev, buf, 8); sa[2] = zmw_tx_buf_readh(dev, buf, 10); #endif //Decide Key Index in ATOM, No meaning in OTUS--CWYang(m) if (wd->wlanMode == ZM_MODE_AP) { keyIdx = wd->ap.bcHalKeyIdx[port]; id = zfApFindSta(dev, da); if (id != 0xffff) { switch (wd->ap.staTable[id].encryMode) { case ZM_AES: case ZM_TKIP: #ifdef ZM_ENABLE_CENC case ZM_CENC: #endif //ZM_ENABLE_CENC keyIdx = wd->ap.staTable[id].keyIdx; break; } } } else { switch (wd->sta.encryMode) { case ZM_WEP64: case ZM_WEP128: case ZM_WEP256: keyIdx = wd->sta.keyId; break; case ZM_AES: case ZM_TKIP: if ((da[0]& 0x1)) keyIdx = 5; else keyIdx = 4; break; #ifdef ZM_ENABLE_CENC case ZM_CENC: keyIdx = wd->sta.cencKeyId; break; #endif //ZM_ENABLE_CENC } } /* Create SNAP */ removeLen = zfTxGenWlanSnap(dev, buf, snap, &snapLen); //zm_msg1_tx(ZM_LV_0, "fragOff=", fragOff); fragLen = wd->fragThreshold; frameLen = zfwBufGetSize(dev, buf); frameLen -= removeLen; #if 0 /* Create MIC */ if ( (wd->wlanMode == ZM_MODE_INFRASTRUCTURE)&& (wd->sta.encryMode == ZM_TKIP) ) { if ( frameLen > fragLen ) { micLen = zfTxGenWlanTail(dev, buf, snap, snapLen, mic); } else { /* append MIC by HMAC */ micLen = 8; } } else { micLen = 0; } #else if ( frameLen > fragLen ) { micLen = zfTxGenWlanTail(dev, buf, snap, snapLen, mic); } else { /* append MIC by HMAC */ micLen = 0; } #endif /* Access Category */ if (wd->wlanMode == ZM_MODE_AP) { zfApGetStaQosType(dev, da, &qosType); if (qosType == 0) { up = 0; } } else if (wd->wlanMode == ZM_MODE_INFRASTRUCTURE) { if (wd->sta.wmeConnected == 0) { up = 0; } } else { /* TODO : STA QoS control field */ up = 0; } /* Assign sequence number */ zmw_enter_critical_section(dev); frag.seq[0] = ((wd->seq[zcUpToAc[up&0x7]]++) << 4); if (aggControl && (ZM_AGG_FIRST_MPDU == aggControl->ampduIndication) ) { tid_tx->bar_ssn = frag.seq[0]; zm_msg1_agg(ZM_LV_0, "start seq=", tid_tx->bar_ssn >> 4); } //tid_tx->baw_buf[tid_tx->baw_head-1].baw_seq=frag.seq[0]; zmw_leave_critical_section(dev); frag.buf[0] = buf; frag.bufType[0] = bufType; frag.flag[0] = flag; fragNum = 1; for (i=0; i<fragNum; i++) { /* Create WLAN header(Control Setting + 802.11 header + IV) */ if (up !=0 ) zm_debug_msg1("up not 0, up=",up); headerLen = zfTxGenWlanHeader(dev, frag.buf[i], header, frag.seq[i], frag.flag[i], snapLen+micLen, removeLen, port, da, sa, up, &micLen, snap, snapLen, aggControl); /* Get buffer DMA address */ //if ((addrTblSize = zfwBufMapDma(dev, frag.buf[i], &addrTbl)) == 0) //if ((addrTblSize = zfwMapTxDma(dev, frag.buf[i], &addrTbl)) == 0) //{ // err = ZM_ERR_BUFFER_DMA_ADDR; // goto zlError; //} /* Flush buffer on cache */ //zfwBufFlush(dev, frag.buf[i]); #if 0 zm_msg1_tx(ZM_LV_0, "headerLen=", headerLen); zm_msg1_tx(ZM_LV_0, "snapLen=", snapLen); zm_msg1_tx(ZM_LV_0, "micLen=", micLen); zm_msg1_tx(ZM_LV_0, "removeLen=", removeLen); zm_msg1_tx(ZM_LV_0, "addrTblSize=", addrTblSize); zm_msg1_tx(ZM_LV_0, "frag.bufType[0]=", frag.bufType[0]); #endif fragLen = zfwBufGetSize(dev, frag.buf[i]); if ((da[0]&0x1) == 0) { wd->commTally.txUnicastFrm++; wd->commTally.txUnicastOctets += (fragLen+snapLen); } else if ((da[0]& 0x1)) { wd->commTally.txBroadcastFrm++; wd->commTally.txBroadcastOctets += (fragLen+snapLen); } else { wd->commTally.txMulticastFrm++; wd->commTally.txMulticastOctets += (fragLen+snapLen); } wd->ledStruct.txTraffic++; #if 0 //Who care this? if ( (i)&&(i == (fragNum-1)) ) { wd->trafTally.txDataByteCount -= micLen; } #endif /*if (aggControl->tid_baw && aggControl->aggEnabled) { struct baw_header_r header_r; header_r.header = header; header_r.mic = mic; header_r.snap = snap; header_r.headerLen = headerLen; header_r.micLen = micLen; header_r.snapLen = snapLen; header_r.removeLen = removeLen; header_r.keyIdx = keyIdx; BAW->insert(dev, buf, tid_tx->bar_ssn >> 4, aggControl->tid_baw, 0, &header_r); }*/ if ((err = zfHpSend(dev, header, headerLen, snap, snapLen, mic, micLen, frag.buf[i], removeLen, frag.bufType[i], zcUpToAc[up&0x7], keyIdx)) != ZM_SUCCESS) { goto zlError; } continue; zlError: if (frag.bufType[i] == ZM_EXTERNAL_ALLOC_BUF) { zfwBufFree(dev, frag.buf[i], err); } else if (frag.bufType[i] == ZM_INTERNAL_ALLOC_BUF) { zfwBufFree(dev, frag.buf[i], 0); } else { zm_assert(0); } } /* for (i=0; i<fragNum; i++) */ return ZM_SUCCESS; } /* * zfAggSendADDBA() refers zfSendMmFrame() in cmm.c */ u16_t zfAggSendAddbaRequest(zdev_t* dev, u16_t *dst, u16_t ac, u16_t up) { zbuf_t* buf; //u16_t addrTblSize; //struct zsAddrTbl addrTbl; //u16_t err; u16_t offset = 0; u16_t hlen = 32; u16_t header[(24+25+1)/2]; u16_t vap = 0; u16_t i; u8_t encrypt = 0; //zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); /* * TBD : Maximum size of managment frame */ if ((buf = zfwBufAllocate(dev, 1024)) == NULL) { zm_msg0_mm(ZM_LV_0, "Alloc mm buf Fail!"); return ZM_SUCCESS; } /* * Reserve room for wlan header */ offset = hlen; /* * add addba frame body */ offset = zfAggSetAddbaFrameBody(dev, buf, offset, ac, up); zfwBufSetSize(dev, buf, offset); /* * Copy wlan header */ zfAggGenAddbaHeader(dev, dst, header, offset-hlen, buf, vap, encrypt); for (i=0; i<(hlen>>1); i++) { zmw_tx_buf_writeh(dev, buf, i*2, header[i]); } /* Get buffer DMA address */ //if ((addrTblSize = zfwBufMapDma(dev, buf, &addrTbl)) == 0) //if ((addrTblSize = zfwMapTxDma(dev, buf, &addrTbl)) == 0) //{ // goto zlError; //} //zm_msg2_mm(ZM_LV_2, "offset=", offset); //zm_msg2_mm(ZM_LV_2, "hlen=", hlen); //zm_msg2_mm(ZM_LV_2, "addrTblSize=", addrTblSize); //zm_msg2_mm(ZM_LV_2, "addrTbl.len[0]=", addrTbl.len[0]); //zm_msg2_mm(ZM_LV_2, "addrTbl.physAddrl[0]=", addrTbl.physAddrl[0]); //zm_msg2_mm(ZM_LV_2, "buf->data=", buf->data); #if 0 if ((err = zfHpSend(dev, NULL, 0, NULL, 0, NULL, 0, buf, 0, ZM_INTERNAL_ALLOC_BUF, 0, 0xff)) != ZM_SUCCESS) { goto zlError; } #else zfPutVmmq(dev, buf); zfPushVtxq(dev); #endif return ZM_SUCCESS; } u16_t zfAggSetAddbaFrameBody(zdev_t* dev, zbuf_t* buf, u16_t offset, u16_t ac, u16_t up) { u16_t ba_parameter, start_seq; zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); /* * ADDBA Request frame body */ /* * Category */ zmw_tx_buf_writeb(dev, buf, offset++, 3); /* * Action details = 0 */ zmw_tx_buf_writeb(dev, buf, offset++, ZM_WLAN_ADDBA_REQUEST_FRAME); /* * Dialog Token = nonzero * TBD: define how to get dialog token? */ zmw_tx_buf_writeb(dev, buf, offset++, 2); /* * Block Ack parameter set * BA policy = 1 for immediate BA, 0 for delayed BA * TID(4bits) & buffer size(4bits) (TID=up & buffer size=0x80) * TBD: how to get buffer size? * ¢z¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢{ * ¢x B0 ¢x B1 ¢x B2 B5 ¢x B6 B15 ¢x * ¢u¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢t * ¢x Reserved ¢x BA policy ¢x TID ¢x Buffer size ¢x * ¢|¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢} */ ba_parameter = 1 << 12; // buffer size = 0x40(64) ba_parameter |= up << 2; // tid = up ba_parameter |= 2; // ba policy = 1 zmw_tx_buf_writeh(dev, buf, offset, ba_parameter); offset+=2; /* * BA timeout value */ zmw_tx_buf_writeh(dev, buf, offset, 0); offset+=2; /* * BA starting sequence number * ¢z¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢{ * ¢x B0 B3 ¢x B4 B15 ¢x * ¢u¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢t * ¢x Frag num(0) ¢x BA starting seq num ¢x * ¢|¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢} */ start_seq = ((wd->seq[ac]) << 4) & 0xFFF0; zmw_tx_buf_writeh(dev, buf, offset, start_seq); offset+=2; return offset; } u16_t zfAggGenAddbaHeader(zdev_t* dev, u16_t* dst, u16_t* header, u16_t len, zbuf_t* buf, u16_t vap, u8_t encrypt) { u8_t hlen = 32; // MAC ctrl + PHY ctrl + 802.11 MM header //u8_t frameType = ZM_WLAN_FRAME_TYPE_ACTION; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); /* * Generate control setting */ //bodyLen = zfwBufGetSize(dev, buf); header[0] = 24+len+4; //Length header[1] = 0x8; //MAC control, backoff + (ack) #if 0 /* CCK 1M */ header[2] = 0x0f00; //PHY control L header[3] = 0x0000; //PHY control H #else /* OFDM 6M */ header[2] = 0x0f01; //PHY control L header[3] = 0x000B; //PHY control H #endif /* * Generate WLAN header * Frame control frame type and subtype */ header[4+0] = ZM_WLAN_FRAME_TYPE_ACTION; /* * Duration */ header[4+1] = 0; if (wd->wlanMode == ZM_MODE_INFRASTRUCTURE) { header[4+8] = wd->sta.bssid[0]; header[4+9] = wd->sta.bssid[1]; header[4+10] = wd->sta.bssid[2]; } else if (wd->wlanMode == ZM_MODE_PSEUDO) { /* Address 3 = 00:00:00:00:00:00 */ header[4+8] = 0; header[4+9] = 0; header[4+10] = 0; } else if (wd->wlanMode == ZM_MODE_IBSS) { header[4+8] = wd->sta.bssid[0]; header[4+9] = wd->sta.bssid[1]; header[4+10] = wd->sta.bssid[2]; } else if (wd->wlanMode == ZM_MODE_AP) { /* Address 3 = BSSID */ header[4+8] = wd->macAddr[0]; header[4+9] = wd->macAddr[1]; header[4+10] = wd->macAddr[2] + (vap<<8); } /* Address 1 = DA */ header[4+2] = dst[0]; header[4+3] = dst[1]; header[4+4] = dst[2]; /* Address 2 = SA */ header[4+5] = wd->macAddr[0]; header[4+6] = wd->macAddr[1]; if (wd->wlanMode == ZM_MODE_AP) { header[4+7] = wd->macAddr[2] + (vap<<8); } else { header[4+7] = wd->macAddr[2]; } /* Sequence Control */ zmw_enter_critical_section(dev); header[4+11] = ((wd->mmseq++)<<4); zmw_leave_critical_section(dev); return hlen; } u16_t zfAggProcessAction(zdev_t* dev, zbuf_t* buf) { u16_t category; //zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); category = zmw_rx_buf_readb(dev, buf, 24); switch (category) { case ZM_WLAN_BLOCK_ACK_ACTION_FRAME: zfAggBlockAckActionFrame(dev, buf); break; } return ZM_SUCCESS; } u16_t zfAggBlockAckActionFrame(zdev_t* dev, zbuf_t* buf) { u8_t action; //zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); action = zmw_rx_buf_readb(dev, buf, 25); #ifdef ZM_ENABLE_AGGREGATION switch (action) { case ZM_WLAN_ADDBA_REQUEST_FRAME: zm_msg0_agg(ZM_LV_0, "Received BA Action frame is ADDBA request"); zfAggRecvAddbaRequest(dev, buf); break; case ZM_WLAN_ADDBA_RESPONSE_FRAME: zm_msg0_agg(ZM_LV_0, "Received BA Action frame is ADDBA response"); zfAggRecvAddbaResponse(dev, buf); break; case ZM_WLAN_DELBA_FRAME: zfAggRecvDelba(dev, buf); break; } #endif return ZM_SUCCESS; } u16_t zfAggRecvAddbaRequest(zdev_t* dev, zbuf_t* buf) { //u16_t dialog; struct aggBaFrameParameter bf; u16_t i; //zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); bf.buf = buf; bf.dialog = zmw_rx_buf_readb(dev, buf, 26); /* * ba parameter set */ bf.ba_parameter = zmw_rx_buf_readh(dev, buf, 27); bf.ba_policy = (bf.ba_parameter >> 1) & 1; bf.tid = (bf.ba_parameter >> 2) & 0xF; bf.buffer_size = (bf.ba_parameter >> 6); /* * BA timeout value */ bf.ba_timeout = zmw_rx_buf_readh(dev, buf, 29); /* * BA starting sequence number */ bf.ba_start_seq = zmw_rx_buf_readh(dev, buf, 31) >> 4; i=26; while(i < 32) { zm_debug_msg2("Recv ADDBA Req:", zmw_rx_buf_readb(dev,buf,i)); i++; } zfAggSendAddbaResponse(dev, &bf); zfAggAddbaSetTidRx(dev, buf, &bf); return ZM_SUCCESS; } u16_t zfAggAddbaSetTidRx(zdev_t* dev, zbuf_t* buf, struct aggBaFrameParameter *bf) { u16_t i, ac, aid, fragOff; u16_t src[3]; u16_t offset = 0; u8_t up; struct agg_tid_rx *tid_rx = NULL; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); src[0] = zmw_rx_buf_readh(dev, buf, offset+10); src[1] = zmw_rx_buf_readh(dev, buf, offset+12); src[2] = zmw_rx_buf_readh(dev, buf, offset+14); aid = zfApFindSta(dev, src); zfTxGetIpTosAndFrag(dev, buf, &up, &fragOff); ac = zcUpToAc[up&0x7] & 0x3; ac = bf->tid; for (i=0; i<ZM_AGG_POOL_SIZE ; i++) { if((wd->tid_rx[i]->aid == aid) && (wd->tid_rx[i]->ac == ac)) { tid_rx = wd->tid_rx[i]; break; } } if (!tid_rx) { for (i=0; i<ZM_AGG_POOL_SIZE; i++) { if (wd->tid_rx[i]->aid == ZM_MAX_STA_SUPPORT) { tid_rx = wd->tid_rx[i]; break; } } if (!tid_rx) return 0; } zmw_enter_critical_section(dev); tid_rx->aid = aid; tid_rx->ac = ac; tid_rx->addBaExchangeStatusCode = ZM_AGG_ADDBA_RESPONSE; tid_rx->seq_start = bf->ba_start_seq; tid_rx->baw_head = tid_rx->baw_tail = 0; tid_rx->sq_exceed_count = tid_rx->sq_behind_count = 0; zmw_leave_critical_section(dev); return 0; } u16_t zfAggRecvAddbaResponse(zdev_t* dev, zbuf_t* buf) { u16_t i,ac, aid=0; u16_t src[3]; struct aggBaFrameParameter bf; zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); src[0] = zmw_rx_buf_readh(dev, buf, 10); src[1] = zmw_rx_buf_readh(dev, buf, 12); src[2] = zmw_rx_buf_readh(dev, buf, 14); if (wd->wlanMode == ZM_MODE_AP) aid = zfApFindSta(dev, src); bf.buf = buf; bf.dialog = zmw_rx_buf_readb(dev, buf, 26); bf.status_code = zmw_rx_buf_readh(dev, buf, 27); if (!bf.status_code) { wd->addbaComplete=1; } /* * ba parameter set */ bf.ba_parameter = zmw_rx_buf_readh(dev, buf, 29); bf.ba_policy = (bf.ba_parameter >> 1) & 1; bf.tid = (bf.ba_parameter >> 2) & 0xF; bf.buffer_size = (bf.ba_parameter >> 6); /* * BA timeout value */ bf.ba_timeout = zmw_rx_buf_readh(dev, buf, 31); i=26; while(i < 32) { zm_debug_msg2("Recv ADDBA Rsp:", zmw_rx_buf_readb(dev,buf,i)); i++; } ac = zcUpToAc[bf.tid&0x7] & 0x3; //zmw_enter_critical_section(dev); //wd->aggSta[aid].aggFlag[ac] = 0; //zmw_leave_critical_section(dev); return ZM_SUCCESS; } u16_t zfAggRecvDelba(zdev_t* dev, zbuf_t* buf) { //zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); return ZM_SUCCESS; } u16_t zfAggSendAddbaResponse(zdev_t* dev, struct aggBaFrameParameter *bf) { zbuf_t* buf; //u16_t addrTblSize; //struct zsAddrTbl addrTbl; //u16_t err; u16_t offset = 0; u16_t hlen = 32; u16_t header[(24+25+1)/2]; u16_t vap = 0; u16_t i; u8_t encrypt = 0; u16_t dst[3]; //zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); /* * TBD : Maximum size of managment frame */ if ((buf = zfwBufAllocate(dev, 1024)) == NULL) { zm_msg0_mm(ZM_LV_0, "Alloc mm buf Fail!"); return ZM_SUCCESS; } /* * Reserve room for wlan header */ offset = hlen; /* * add addba frame body */ offset = zfAggSetAddbaResponseFrameBody(dev, buf, bf, offset); zfwBufSetSize(dev, buf, offset); /* * Copy wlan header */ dst[0] = zmw_rx_buf_readh(dev, bf->buf, 10); dst[1] = zmw_rx_buf_readh(dev, bf->buf, 12); dst[2] = zmw_rx_buf_readh(dev, bf->buf, 14); zfAggGenAddbaHeader(dev, dst, header, offset-hlen, buf, vap, encrypt); for (i=0; i<(hlen>>1); i++) { zmw_tx_buf_writeh(dev, buf, i*2, header[i]); } /* Get buffer DMA address */ //if ((addrTblSize = zfwBufMapDma(dev, buf, &addrTbl)) == 0) //if ((addrTblSize = zfwMapTxDma(dev, buf, &addrTbl)) == 0) //{ // goto zlError; //} //zm_msg2_mm(ZM_LV_2, "offset=", offset); //zm_msg2_mm(ZM_LV_2, "hlen=", hlen); //zm_msg2_mm(ZM_LV_2, "addrTblSize=", addrTblSize); //zm_msg2_mm(ZM_LV_2, "addrTbl.len[0]=", addrTbl.len[0]); //zm_msg2_mm(ZM_LV_2, "addrTbl.physAddrl[0]=", addrTbl.physAddrl[0]); //zm_msg2_mm(ZM_LV_2, "buf->data=", buf->data); #if 0 if ((err = zfHpSend(dev, NULL, 0, NULL, 0, NULL, 0, buf, 0, ZM_INTERNAL_ALLOC_BUF, 0, 0xff)) != ZM_SUCCESS) { goto zlError; } #else zfPutVmmq(dev, buf); zfPushVtxq(dev); #endif //zfAggSendAddbaRequest(dev, dst, zcUpToAc[bf->tid&0x7] & 0x3, bf->tid); return ZM_SUCCESS; } u16_t zfAggSetAddbaResponseFrameBody(zdev_t* dev, zbuf_t* buf, struct aggBaFrameParameter *bf, u16_t offset) { //zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); /* * ADDBA Request frame body */ /* * Category */ zmw_tx_buf_writeb(dev, buf, offset++, 3); /* * Action details = 0 */ zmw_tx_buf_writeb(dev, buf, offset++, ZM_WLAN_ADDBA_RESPONSE_FRAME); /* * Dialog Token = nonzero */ zmw_tx_buf_writeb(dev, buf, offset++, bf->dialog); /* * Status code */ zmw_tx_buf_writeh(dev, buf, offset, 0); offset+=2; /* * Block Ack parameter set * BA policy = 1 for immediate BA, 0 for delayed BA * TID(4bits) & buffer size(4bits) (TID=0x1 & buffer size=0x80) * TBD: how to get TID number and buffer size? * ¢z¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢{ * ¢x B0 ¢x B1 ¢x B2 B5 ¢x B6 B15 ¢x * ¢u¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢t * ¢x Reserved ¢x BA policy ¢x TID ¢x Buffer size ¢x * ¢|¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢} */ zmw_tx_buf_writeh(dev, buf, offset, bf->ba_parameter); offset+=2; /* * BA timeout value */ zmw_tx_buf_writeh(dev, buf, offset, bf->ba_timeout); offset+=2; return offset; } void zfAggInvokeBar(zdev_t* dev, TID_TX tid_tx) { struct aggBarControl aggBarControl; //zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); //bar_control = aggBarControl->tid_info << 12 | aggBarControl->compressed_bitmap << 2 // | aggBarControl->multi_tid << 1 | aggBarControl->bar_ack_policy; aggBarControl.bar_ack_policy = 0; aggBarControl.multi_tid = 0; aggBarControl.compressed_bitmap = 0; aggBarControl.tid_info = tid_tx->tid; zfAggSendBar(dev, tid_tx, &aggBarControl); return; } /* * zfAggSendBar() refers zfAggSendAddbaRequest() */ u16_t zfAggSendBar(zdev_t* dev, TID_TX tid_tx, struct aggBarControl *aggBarControl) { zbuf_t* buf; //u16_t addrTblSize; //struct zsAddrTbl addrTbl; //u16_t err; u16_t offset = 0; u16_t hlen = 16+8; /* mac header + control headers*/ u16_t header[(8+24+1)/2]; u16_t vap = 0; u16_t i; u8_t encrypt = 0; //zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); /* * TBD : Maximum size of managment frame */ if ((buf = zfwBufAllocate(dev, 1024)) == NULL) { zm_msg0_mm(ZM_LV_0, "Alloc mm buf Fail!"); return ZM_SUCCESS; } /* * Reserve room for wlan header */ offset = hlen; /* * add addba frame body */ offset = zfAggSetBarBody(dev, buf, offset, tid_tx, aggBarControl); zfwBufSetSize(dev, buf, offset); /* * Copy wlan header */ zfAggGenBarHeader(dev, tid_tx->dst, header, offset-hlen, buf, vap, encrypt); for (i=0; i<(hlen>>1); i++) { zmw_tx_buf_writeh(dev, buf, i*2, header[i]); } /* Get buffer DMA address */ //if ((addrTblSize = zfwBufMapDma(dev, buf, &addrTbl)) == 0) //if ((addrTblSize = zfwMapTxDma(dev, buf, &addrTbl)) == 0) //{ // goto zlError; //} //zm_msg2_mm(ZM_LV_2, "offset=", offset); //zm_msg2_mm(ZM_LV_2, "hlen=", hlen); //zm_msg2_mm(ZM_LV_2, "addrTblSize=", addrTblSize); //zm_msg2_mm(ZM_LV_2, "addrTbl.len[0]=", addrTbl.len[0]); //zm_msg2_mm(ZM_LV_2, "addrTbl.physAddrl[0]=", addrTbl.physAddrl[0]); //zm_msg2_mm(ZM_LV_2, "buf->data=", buf->data); #if 0 if ((err = zfHpSend(dev, NULL, 0, NULL, 0, NULL, 0, buf, 0, ZM_INTERNAL_ALLOC_BUF, 0, 0xff)) != ZM_SUCCESS) { goto zlError; } #else zfPutVmmq(dev, buf); zfPushVtxq(dev); #endif return ZM_SUCCESS; } u16_t zfAggSetBarBody(zdev_t* dev, zbuf_t* buf, u16_t offset, TID_TX tid_tx, struct aggBarControl *aggBarControl) { u16_t bar_control, start_seq; //zmw_get_wlan_dev(dev); //zmw_declare_for_critical_section(); /* * BAR Control frame body */ /* * BAR Control Field * ¢z¢w¢w¢w¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢{ * ¢x B0 ¢x B1 ¢x B2 ¢x B3 B11 ¢x B12 B15 ¢x * ¢u¢w¢w¢w¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢t * ¢x BAR Ack ¢x Multi-TID ¢x Compressed ¢x Reserved ¢x TID_INFO ¢x * ¢x Policy ¢x ¢x Bitmap ¢x ¢x ¢x * ¢|¢w¢w¢w¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢} */ bar_control = aggBarControl->tid_info << 12 | aggBarControl->compressed_bitmap << 2 | aggBarControl->multi_tid << 1 | aggBarControl->bar_ack_policy; zmw_tx_buf_writeh(dev, buf, offset, bar_control); offset+=2; if (0 == aggBarControl->multi_tid) { /* * BA starting sequence number * ¢z¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢s¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢{ * ¢x B0 B3 ¢x B4 B15 ¢x * ¢u¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢q¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢t * ¢x Frag num(0) ¢x BA starting seq num ¢x * ¢|¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢r¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢w¢} */ start_seq = (tid_tx->bar_ssn << 4) & 0xFFF0; zmw_tx_buf_writeh(dev, buf, offset, start_seq); offset+=2; } if (1 == aggBarControl->multi_tid && 1 == aggBarControl->compressed_bitmap) { /* multi-tid BlockAckReq variant, not implemented*/ } return offset; } u16_t zfAggGenBarHeader(zdev_t* dev, u16_t* dst, u16_t* header, u16_t len, zbuf_t* buf, u16_t vap, u8_t encrypt) { u8_t hlen = 16+8; // MAC ctrl + PHY ctrl + 802.11 MM header //u8_t frameType = ZM_WLAN_FRAME_TYPE_ACTION; zmw_get_wlan_dev(dev); zmw_declare_for_critical_section(); /* * Generate control setting */ //bodyLen = zfwBufGetSize(dev, buf); header[0] = 16+len+4; //Length header[1] = 0x8; //MAC control, backoff + (ack) #if 1 /* CCK 1M */ header[2] = 0x0f00; //PHY control L header[3] = 0x0000; //PHY control H #else /* CCK 6M */ header[2] = 0x0f01; //PHY control L header[3] = 0x000B; //PHY control H #endif /* * Generate WLAN header * Frame control frame type and subtype */ header[4+0] = ZM_WLAN_FRAME_TYPE_BAR; /* * Duration */ header[4+1] = 0; /* Address 1 = DA */ header[4+2] = dst[0]; header[4+3] = dst[1]; header[4+4] = dst[2]; /* Address 2 = SA */ header[4+5] = wd->macAddr[0]; header[4+6] = wd->macAddr[1]; if (wd->wlanMode == ZM_MODE_AP) { #ifdef ZM_VAPMODE_MULTILE_SSID header[4+7] = wd->macAddr[2]; //Multiple SSID #else header[4+7] = wd->macAddr[2] + (vap<<8); //VAP #endif } else { header[4+7] = wd->macAddr[2]; } /* Sequence Control */ zmw_enter_critical_section(dev); header[4+11] = ((wd->mmseq++)<<4); zmw_leave_critical_section(dev); return hlen; }
gpl-2.0
kumajaya/android_kernel_samsung_universal5422
arch/sh/kernel/dumpstack.c
701
3462
/* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs * Copyright (C) 2009 Matt Fleming * Copyright (C) 2002 - 2012 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kallsyms.h> #include <linux/ftrace.h> #include <linux/debug_locks.h> #include <linux/kdebug.h> #include <linux/export.h> #include <linux/uaccess.h> #include <asm/unwinder.h> #include <asm/stacktrace.h> void dump_mem(const char *str, unsigned long bottom, unsigned long top) { unsigned long p; int i; printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top); for (p = bottom & ~31; p < top; ) { printk("%04lx: ", p & 0xffff); for (i = 0; i < 8; i++, p += 4) { unsigned int val; if (p < bottom || p >= top) printk(" "); else { if (__get_user(val, (unsigned int __user *)p)) { printk("\n"); return; } printk("%08x ", val); } } printk("\n"); } } void printk_address(unsigned long address, int reliable) { printk(" [<%p>] %s%pS\n", (void *) address, reliable ? "" : "? ", (void *) address); } #ifdef CONFIG_FUNCTION_GRAPH_TRACER static void print_ftrace_graph_addr(unsigned long addr, void *data, const struct stacktrace_ops *ops, struct thread_info *tinfo, int *graph) { struct task_struct *task = tinfo->task; unsigned long ret_addr; int index = task->curr_ret_stack; if (addr != (unsigned long)return_to_handler) return; if (!task->ret_stack || index < *graph) return; index -= *graph; ret_addr = task->ret_stack[index].ret; ops->address(data, ret_addr, 1); (*graph)++; } #else static inline void print_ftrace_graph_addr(unsigned long addr, void *data, const struct stacktrace_ops *ops, struct thread_info *tinfo, int *graph) { } #endif void stack_reader_dump(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, const struct stacktrace_ops *ops, void *data) { struct thread_info *context; int graph = 0; context = (struct thread_info *) ((unsigned long)sp & (~(THREAD_SIZE - 1))); while (!kstack_end(sp)) { unsigned long addr = *sp++; if (__kernel_text_address(addr)) { ops->address(data, addr, 1); print_ftrace_graph_addr(addr, data, ops, context, &graph); } } } static int print_trace_stack(void *data, char *name) { printk("%s <%s> ", (char *)data, name); return 0; } /* * Print one address/symbol entries per line. */ static void print_trace_address(void *data, unsigned long addr, int reliable) { printk(data); printk_address(addr, reliable); } static const struct stacktrace_ops print_trace_ops = { .stack = print_trace_stack, .address = print_trace_address, }; void show_trace(struct task_struct *tsk, unsigned long *sp, struct pt_regs *regs) { if (regs && user_mode(regs)) return; printk("\nCall trace:\n"); unwind_stack(tsk, regs, sp, &print_trace_ops, ""); printk("\n"); if (!tsk) tsk = current; debug_show_held_locks(tsk); } void show_stack(struct task_struct *tsk, unsigned long *sp) { unsigned long stack; if (!tsk) tsk = current; if (tsk == current) sp = (unsigned long *)current_stack_pointer; else sp = (unsigned long *)tsk->thread.sp; stack = (unsigned long)sp; dump_mem("Stack: ", stack, THREAD_SIZE + (unsigned long)task_stack_page(tsk)); show_trace(tsk, sp, NULL); }
gpl-2.0
xuhuashan/imx6q
linux-3.10.17-imx/arch/mips/kvm/kvm_mips_emul.c
701
47276
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * KVM/MIPS: Instruction/Exception emulation * * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Authors: Sanjay Lal <sanjayl@kymasys.com> */ #include <linux/errno.h> #include <linux/err.h> #include <linux/kvm_host.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/bootmem.h> #include <linux/random.h> #include <asm/page.h> #include <asm/cacheflush.h> #include <asm/cpu-info.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> #include <asm/inst.h> #undef CONFIG_MIPS_MT #include <asm/r4kcache.h> #define CONFIG_MIPS_MT #include "kvm_mips_opcode.h" #include "kvm_mips_int.h" #include "kvm_mips_comm.h" #include "trace.h" /* * Compute the return address and do emulate branch simulation, if required. * This function should be called only in branch delay slot active. */ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc) { unsigned int dspcontrol; union mips_instruction insn; struct kvm_vcpu_arch *arch = &vcpu->arch; long epc = instpc; long nextpc = KVM_INVALID_INST; if (epc & 3) goto unaligned; /* * Read the instruction */ insn.word = kvm_get_inst((uint32_t *) epc, vcpu); if (insn.word == KVM_INVALID_INST) return KVM_INVALID_INST; switch (insn.i_format.opcode) { /* * jr and jalr are in r_format format. */ case spec_op: switch (insn.r_format.func) { case jalr_op: arch->gprs[insn.r_format.rd] = epc + 8; /* Fall through */ case jr_op: nextpc = arch->gprs[insn.r_format.rs]; break; } break; /* * This group contains: * bltz_op, bgez_op, bltzl_op, bgezl_op, * bltzal_op, bgezal_op, bltzall_op, bgezall_op. */ case bcond_op: switch (insn.i_format.rt) { case bltz_op: case bltzl_op: if ((long)arch->gprs[insn.i_format.rs] < 0) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; nextpc = epc; break; case bgez_op: case bgezl_op: if ((long)arch->gprs[insn.i_format.rs] >= 0) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; nextpc = epc; break; case bltzal_op: case bltzall_op: arch->gprs[31] = epc + 8; if ((long)arch->gprs[insn.i_format.rs] < 0) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; nextpc = epc; break; case bgezal_op: case bgezall_op: arch->gprs[31] = epc + 8; if ((long)arch->gprs[insn.i_format.rs] >= 0) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; nextpc = epc; break; case bposge32_op: if (!cpu_has_dsp) goto sigill; dspcontrol = rddsp(0x01); if (dspcontrol >= 32) { epc = epc + 4 + (insn.i_format.simmediate << 2); } else epc += 8; nextpc = epc; break; } break; /* * These are unconditional and in j_format. */ case jal_op: arch->gprs[31] = instpc + 8; case j_op: epc += 4; epc >>= 28; epc <<= 28; epc |= (insn.j_format.target << 2); nextpc = epc; break; /* * These are conditional and in i_format. */ case beq_op: case beql_op: if (arch->gprs[insn.i_format.rs] == arch->gprs[insn.i_format.rt]) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; nextpc = epc; break; case bne_op: case bnel_op: if (arch->gprs[insn.i_format.rs] != arch->gprs[insn.i_format.rt]) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; nextpc = epc; break; case blez_op: /* not really i_format */ case blezl_op: /* rt field assumed to be zero */ if ((long)arch->gprs[insn.i_format.rs] <= 0) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; nextpc = epc; break; case bgtz_op: case bgtzl_op: /* rt field assumed to be zero */ if ((long)arch->gprs[insn.i_format.rs] > 0) epc = epc + 4 + (insn.i_format.simmediate << 2); else epc += 8; nextpc = epc; break; /* * And now the FPA/cp1 branch instructions. */ case cop1_op: printk("%s: unsupported cop1_op\n", __func__); break; } return nextpc; unaligned: printk("%s: unaligned epc\n", __func__); return nextpc; sigill: printk("%s: DSP branch but not DSP ASE\n", __func__); return nextpc; } enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause) { unsigned long branch_pc; enum emulation_result er = EMULATE_DONE; if (cause & CAUSEF_BD) { branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc); if (branch_pc == KVM_INVALID_INST) { er = EMULATE_FAIL; } else { vcpu->arch.pc = branch_pc; kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc); } } else vcpu->arch.pc += 4; kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); return er; } /* Everytime the compare register is written to, we need to decide when to fire * the timer that represents timer ticks to the GUEST. * */ enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; enum emulation_result er = EMULATE_DONE; /* If COUNT is enabled */ if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) { hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer); hrtimer_start(&vcpu->arch.comparecount_timer, ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL); } else { hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer); } return er; } enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; enum emulation_result er = EMULATE_DONE; if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, kvm_read_c0_guest_epc(cop0)); kvm_clear_c0_guest_status(cop0, ST0_EXL); vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { kvm_clear_c0_guest_status(cop0, ST0_ERL); vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); } else { printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", vcpu->arch.pc); er = EMULATE_FAIL; } return er; } enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DONE; kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, vcpu->arch.pending_exceptions); ++vcpu->stat.wait_exits; trace_kvm_exit(vcpu, WAIT_EXITS); if (!vcpu->arch.pending_exceptions) { vcpu->arch.wait = 1; kvm_vcpu_block(vcpu); /* We we are runnable, then definitely go off to user space to check if any * I/O interrupts are pending. */ if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { clear_bit(KVM_REQ_UNHALT, &vcpu->requests); vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; } } return er; } /* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch * this, if things ever change */ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; enum emulation_result er = EMULATE_FAIL; uint32_t pc = vcpu->arch.pc; printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); return er; } /* Write Guest TLB Entry @ Index */ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; int index = kvm_read_c0_guest_index(cop0); enum emulation_result er = EMULATE_DONE; struct kvm_mips_tlb *tlb = NULL; uint32_t pc = vcpu->arch.pc; if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { printk("%s: illegal index: %d\n", __func__, index); printk ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", pc, index, kvm_read_c0_guest_entryhi(cop0), kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0), kvm_read_c0_guest_pagemask(cop0)); index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; } tlb = &vcpu->arch.guest_tlb[index]; #if 1 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); #endif tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); kvm_debug ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", pc, index, kvm_read_c0_guest_entryhi(cop0), kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0), kvm_read_c0_guest_pagemask(cop0)); return er; } /* Write Guest TLB Entry @ Random Index */ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; enum emulation_result er = EMULATE_DONE; struct kvm_mips_tlb *tlb = NULL; uint32_t pc = vcpu->arch.pc; int index; #if 1 get_random_bytes(&index, sizeof(index)); index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); #else index = jiffies % KVM_MIPS_GUEST_TLB_SIZE; #endif if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { printk("%s: illegal index: %d\n", __func__, index); return EMULATE_FAIL; } tlb = &vcpu->arch.guest_tlb[index]; #if 1 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); #endif tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); kvm_debug ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", pc, index, kvm_read_c0_guest_entryhi(cop0), kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0)); return er; } enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; long entryhi = kvm_read_c0_guest_entryhi(cop0); enum emulation_result er = EMULATE_DONE; uint32_t pc = vcpu->arch.pc; int index = -1; index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); kvm_write_c0_guest_index(cop0, index); kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, index); return er; } enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, struct kvm_run *run, struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; enum emulation_result er = EMULATE_DONE; int32_t rt, rd, copz, sel, co_bit, op; uint32_t pc = vcpu->arch.pc; unsigned long curr_pc; /* * Update PC and hold onto current PC in case there is * an error and we want to rollback the PC */ curr_pc = vcpu->arch.pc; er = update_pc(vcpu, cause); if (er == EMULATE_FAIL) { return er; } copz = (inst >> 21) & 0x1f; rt = (inst >> 16) & 0x1f; rd = (inst >> 11) & 0x1f; sel = inst & 0x7; co_bit = (inst >> 25) & 1; /* Verify that the register is valid */ if (rd > MIPS_CP0_DESAVE) { printk("Invalid rd: %d\n", rd); er = EMULATE_FAIL; goto done; } if (co_bit) { op = (inst) & 0xff; switch (op) { case tlbr_op: /* Read indexed TLB entry */ er = kvm_mips_emul_tlbr(vcpu); break; case tlbwi_op: /* Write indexed */ er = kvm_mips_emul_tlbwi(vcpu); break; case tlbwr_op: /* Write random */ er = kvm_mips_emul_tlbwr(vcpu); break; case tlbp_op: /* TLB Probe */ er = kvm_mips_emul_tlbp(vcpu); break; case rfe_op: printk("!!!COP0_RFE!!!\n"); break; case eret_op: er = kvm_mips_emul_eret(vcpu); goto dont_update_pc; break; case wait_op: er = kvm_mips_emul_wait(vcpu); break; } } else { switch (copz) { case mfc_op: #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS cop0->stat[rd][sel]++; #endif /* Get reg */ if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */ vcpu->arch.gprs[rt] = (read_c0_count() >> 2); } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { vcpu->arch.gprs[rt] = 0x0; #ifdef CONFIG_KVM_MIPS_DYN_TRANS kvm_mips_trans_mfc0(inst, opc, vcpu); #endif } else { vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; #ifdef CONFIG_KVM_MIPS_DYN_TRANS kvm_mips_trans_mfc0(inst, opc, vcpu); #endif } kvm_debug ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n", pc, rd, sel, rt, vcpu->arch.gprs[rt]); break; case dmfc_op: vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; break; case mtc_op: #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS cop0->stat[rd][sel]++; #endif if ((rd == MIPS_CP0_TLB_INDEX) && (vcpu->arch.gprs[rt] >= KVM_MIPS_GUEST_TLB_SIZE)) { printk("Invalid TLB Index: %ld", vcpu->arch.gprs[rt]); er = EMULATE_FAIL; break; } #define C0_EBASE_CORE_MASK 0xff if ((rd == MIPS_CP0_PRID) && (sel == 1)) { /* Preserve CORE number */ kvm_change_c0_guest_ebase(cop0, ~(C0_EBASE_CORE_MASK), vcpu->arch.gprs[rt]); printk("MTCz, cop0->reg[EBASE]: %#lx\n", kvm_read_c0_guest_ebase(cop0)); } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { uint32_t nasid = vcpu->arch.gprs[rt] & ASID_MASK; if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) && ((kvm_read_c0_guest_entryhi(cop0) & ASID_MASK) != nasid)) { kvm_debug ("MTCz, change ASID from %#lx to %#lx\n", kvm_read_c0_guest_entryhi(cop0) & ASID_MASK, vcpu->arch.gprs[rt] & ASID_MASK); /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); } kvm_write_c0_guest_entryhi(cop0, vcpu->arch.gprs[rt]); } /* Are we writing to COUNT */ else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { /* Linux doesn't seem to write into COUNT, we throw an error * if we notice a write to COUNT */ /*er = EMULATE_FAIL; */ goto done; } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n", pc, kvm_read_c0_guest_compare(cop0), vcpu->arch.gprs[rt]); /* If we are writing to COMPARE */ /* Clear pending timer interrupt, if any */ kvm_mips_callbacks->dequeue_timer_int(vcpu); kvm_write_c0_guest_compare(cop0, vcpu->arch.gprs[rt]); } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { kvm_write_c0_guest_status(cop0, vcpu->arch.gprs[rt]); /* Make sure that CU1 and NMI bits are never set */ kvm_clear_c0_guest_status(cop0, (ST0_CU1 | ST0_NMI)); #ifdef CONFIG_KVM_MIPS_DYN_TRANS kvm_mips_trans_mtc0(inst, opc, vcpu); #endif } else { cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; #ifdef CONFIG_KVM_MIPS_DYN_TRANS kvm_mips_trans_mtc0(inst, opc, vcpu); #endif } kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc, rd, sel, cop0->reg[rd][sel]); break; case dmtc_op: printk ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", vcpu->arch.pc, rt, rd, sel); er = EMULATE_FAIL; break; case mfmcz_op: #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS cop0->stat[MIPS_CP0_STATUS][0]++; #endif if (rt != 0) { vcpu->arch.gprs[rt] = kvm_read_c0_guest_status(cop0); } /* EI */ if (inst & 0x20) { kvm_debug("[%#lx] mfmcz_op: EI\n", vcpu->arch.pc); kvm_set_c0_guest_status(cop0, ST0_IE); } else { kvm_debug("[%#lx] mfmcz_op: DI\n", vcpu->arch.pc); kvm_clear_c0_guest_status(cop0, ST0_IE); } break; case wrpgpr_op: { uint32_t css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf; uint32_t pss = (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */ if (css || pss) { er = EMULATE_FAIL; break; } kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd, vcpu->arch.gprs[rt]); vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; } break; default: printk ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", vcpu->arch.pc, copz); er = EMULATE_FAIL; break; } } done: /* * Rollback PC only if emulation was unsuccessful */ if (er == EMULATE_FAIL) { vcpu->arch.pc = curr_pc; } dont_update_pc: /* * This is for special instructions whose emulation * updates the PC, so do not overwrite the PC under * any circumstances */ return er; } enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause, struct kvm_run *run, struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DO_MMIO; int32_t op, base, rt, offset; uint32_t bytes; void *data = run->mmio.data; unsigned long curr_pc; /* * Update PC and hold onto current PC in case there is * an error and we want to rollback the PC */ curr_pc = vcpu->arch.pc; er = update_pc(vcpu, cause); if (er == EMULATE_FAIL) return er; rt = (inst >> 16) & 0x1f; base = (inst >> 21) & 0x1f; offset = inst & 0xffff; op = (inst >> 26) & 0x3f; switch (op) { case sb_op: bytes = 1; if (bytes > sizeof(run->mmio.data)) { kvm_err("%s: bad MMIO length: %d\n", __func__, run->mmio.len); } run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(vcpu->arch. host_cp0_badvaddr); if (run->mmio.phys_addr == KVM_INVALID_ADDR) { er = EMULATE_FAIL; break; } run->mmio.len = bytes; run->mmio.is_write = 1; vcpu->mmio_needed = 1; vcpu->mmio_is_write = 1; *(u8 *) data = vcpu->arch.gprs[rt]; kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n", vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt], *(uint8_t *) data); break; case sw_op: bytes = 4; if (bytes > sizeof(run->mmio.data)) { kvm_err("%s: bad MMIO length: %d\n", __func__, run->mmio.len); } run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(vcpu->arch. host_cp0_badvaddr); if (run->mmio.phys_addr == KVM_INVALID_ADDR) { er = EMULATE_FAIL; break; } run->mmio.len = bytes; run->mmio.is_write = 1; vcpu->mmio_needed = 1; vcpu->mmio_is_write = 1; *(uint32_t *) data = vcpu->arch.gprs[rt]; kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n", vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt], *(uint32_t *) data); break; case sh_op: bytes = 2; if (bytes > sizeof(run->mmio.data)) { kvm_err("%s: bad MMIO length: %d\n", __func__, run->mmio.len); } run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(vcpu->arch. host_cp0_badvaddr); if (run->mmio.phys_addr == KVM_INVALID_ADDR) { er = EMULATE_FAIL; break; } run->mmio.len = bytes; run->mmio.is_write = 1; vcpu->mmio_needed = 1; vcpu->mmio_is_write = 1; *(uint16_t *) data = vcpu->arch.gprs[rt]; kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n", vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt], *(uint32_t *) data); break; default: printk("Store not yet supported"); er = EMULATE_FAIL; break; } /* * Rollback PC if emulation was unsuccessful */ if (er == EMULATE_FAIL) { vcpu->arch.pc = curr_pc; } return er; } enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause, struct kvm_run *run, struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DO_MMIO; int32_t op, base, rt, offset; uint32_t bytes; rt = (inst >> 16) & 0x1f; base = (inst >> 21) & 0x1f; offset = inst & 0xffff; op = (inst >> 26) & 0x3f; vcpu->arch.pending_load_cause = cause; vcpu->arch.io_gpr = rt; switch (op) { case lw_op: bytes = 4; if (bytes > sizeof(run->mmio.data)) { kvm_err("%s: bad MMIO length: %d\n", __func__, run->mmio.len); er = EMULATE_FAIL; break; } run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(vcpu->arch. host_cp0_badvaddr); if (run->mmio.phys_addr == KVM_INVALID_ADDR) { er = EMULATE_FAIL; break; } run->mmio.len = bytes; run->mmio.is_write = 0; vcpu->mmio_needed = 1; vcpu->mmio_is_write = 0; break; case lh_op: case lhu_op: bytes = 2; if (bytes > sizeof(run->mmio.data)) { kvm_err("%s: bad MMIO length: %d\n", __func__, run->mmio.len); er = EMULATE_FAIL; break; } run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(vcpu->arch. host_cp0_badvaddr); if (run->mmio.phys_addr == KVM_INVALID_ADDR) { er = EMULATE_FAIL; break; } run->mmio.len = bytes; run->mmio.is_write = 0; vcpu->mmio_needed = 1; vcpu->mmio_is_write = 0; if (op == lh_op) vcpu->mmio_needed = 2; else vcpu->mmio_needed = 1; break; case lbu_op: case lb_op: bytes = 1; if (bytes > sizeof(run->mmio.data)) { kvm_err("%s: bad MMIO length: %d\n", __func__, run->mmio.len); er = EMULATE_FAIL; break; } run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(vcpu->arch. host_cp0_badvaddr); if (run->mmio.phys_addr == KVM_INVALID_ADDR) { er = EMULATE_FAIL; break; } run->mmio.len = bytes; run->mmio.is_write = 0; vcpu->mmio_is_write = 0; if (op == lb_op) vcpu->mmio_needed = 2; else vcpu->mmio_needed = 1; break; default: printk("Load not yet supported"); er = EMULATE_FAIL; break; } return er; } int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) { unsigned long offset = (va & ~PAGE_MASK); struct kvm *kvm = vcpu->kvm; unsigned long pa; gfn_t gfn; pfn_t pfn; gfn = va >> PAGE_SHIFT; if (gfn >= kvm->arch.guest_pmap_npages) { printk("%s: Invalid gfn: %#llx\n", __func__, gfn); kvm_mips_dump_host_tlbs(); kvm_arch_vcpu_dump_regs(vcpu); return -1; } pfn = kvm->arch.guest_pmap[gfn]; pa = (pfn << PAGE_SHIFT) | offset; printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa)); mips32_SyncICache(CKSEG0ADDR(pa), 32); return 0; } #define MIPS_CACHE_OP_INDEX_INV 0x0 #define MIPS_CACHE_OP_INDEX_LD_TAG 0x1 #define MIPS_CACHE_OP_INDEX_ST_TAG 0x2 #define MIPS_CACHE_OP_IMP 0x3 #define MIPS_CACHE_OP_HIT_INV 0x4 #define MIPS_CACHE_OP_FILL_WB_INV 0x5 #define MIPS_CACHE_OP_HIT_HB 0x6 #define MIPS_CACHE_OP_FETCH_LOCK 0x7 #define MIPS_CACHE_ICACHE 0x0 #define MIPS_CACHE_DCACHE 0x1 #define MIPS_CACHE_SEC 0x3 enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, struct kvm_run *run, struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; extern void (*r4k_blast_dcache) (void); extern void (*r4k_blast_icache) (void); enum emulation_result er = EMULATE_DONE; int32_t offset, cache, op_inst, op, base; struct kvm_vcpu_arch *arch = &vcpu->arch; unsigned long va; unsigned long curr_pc; /* * Update PC and hold onto current PC in case there is * an error and we want to rollback the PC */ curr_pc = vcpu->arch.pc; er = update_pc(vcpu, cause); if (er == EMULATE_FAIL) return er; base = (inst >> 21) & 0x1f; op_inst = (inst >> 16) & 0x1f; offset = inst & 0xffff; cache = (inst >> 16) & 0x3; op = (inst >> 18) & 0x7; va = arch->gprs[base] + offset; kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", cache, op, base, arch->gprs[base], offset); /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate * the caches entirely by stepping through all the ways/indexes */ if (op == MIPS_CACHE_OP_INDEX_INV) { kvm_debug ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base], offset); if (cache == MIPS_CACHE_DCACHE) r4k_blast_dcache(); else if (cache == MIPS_CACHE_ICACHE) r4k_blast_icache(); else { printk("%s: unsupported CACHE INDEX operation\n", __func__); return EMULATE_FAIL; } #ifdef CONFIG_KVM_MIPS_DYN_TRANS kvm_mips_trans_cache_index(inst, opc, vcpu); #endif goto done; } preempt_disable(); if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) { kvm_mips_handle_kseg0_tlb_fault(va, vcpu); } } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { int index; /* If an entry already exists then skip */ if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) { goto skip_fault; } /* If address not in the guest TLB, then give the guest a fault, the * resulting handler will do the right thing */ index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | (kvm_read_c0_guest_entryhi (cop0) & ASID_MASK)); if (index < 0) { vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); vcpu->arch.host_cp0_badvaddr = va; er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, vcpu); preempt_enable(); goto dont_update_pc; } else { struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ if (!TLB_IS_VALID(*tlb, va)) { er = kvm_mips_emulate_tlbinv_ld(cause, NULL, run, vcpu); preempt_enable(); goto dont_update_pc; } else { /* We fault an entry from the guest tlb to the shadow host TLB */ kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, NULL); } } } else { printk ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", cache, op, base, arch->gprs[base], offset); er = EMULATE_FAIL; preempt_enable(); goto dont_update_pc; } skip_fault: /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ if (cache == MIPS_CACHE_DCACHE && (op == MIPS_CACHE_OP_FILL_WB_INV || op == MIPS_CACHE_OP_HIT_INV)) { flush_dcache_line(va); #ifdef CONFIG_KVM_MIPS_DYN_TRANS /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */ kvm_mips_trans_cache_va(inst, opc, vcpu); #endif } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) { flush_dcache_line(va); flush_icache_line(va); #ifdef CONFIG_KVM_MIPS_DYN_TRANS /* Replace the CACHE instruction, with a SYNCI */ kvm_mips_trans_cache_va(inst, opc, vcpu); #endif } else { printk ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", cache, op, base, arch->gprs[base], offset); er = EMULATE_FAIL; preempt_enable(); goto dont_update_pc; } preempt_enable(); dont_update_pc: /* * Rollback PC */ vcpu->arch.pc = curr_pc; done: return er; } enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, struct kvm_run *run, struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DONE; uint32_t inst; /* * Fetch the instruction. */ if (cause & CAUSEF_BD) { opc += 1; } inst = kvm_get_inst(opc, vcpu); switch (((union mips_instruction)inst).r_format.opcode) { case cop0_op: er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu); break; case sb_op: case sh_op: case sw_op: er = kvm_mips_emulate_store(inst, cause, run, vcpu); break; case lb_op: case lbu_op: case lhu_op: case lh_op: case lw_op: er = kvm_mips_emulate_load(inst, cause, run, vcpu); break; case cache_op: ++vcpu->stat.cache_exits; trace_kvm_exit(vcpu, CACHE_EXITS); er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu); break; default: printk("Instruction emulation not supported (%p/%#x)\n", opc, inst); kvm_arch_vcpu_dump_regs(vcpu); er = EMULATE_FAIL; break; } return er; } enum emulation_result kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc, struct kvm_run *run, struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ kvm_write_c0_guest_epc(cop0, arch->pc); kvm_set_c0_guest_status(cop0, ST0_EXL); if (cause & CAUSEF_BD) kvm_set_c0_guest_cause(cop0, CAUSEF_BD); else kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); kvm_change_c0_guest_cause(cop0, (0xff), (T_SYSCALL << CAUSEB_EXCCODE)); /* Set PC to the exception entry point */ arch->pc = KVM_GUEST_KSEG0 + 0x180; } else { printk("Trying to deliver SYSCALL when EXL is already set\n"); er = EMULATE_FAIL; } return er; } enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc, struct kvm_run *run, struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ kvm_write_c0_guest_epc(cop0, arch->pc); kvm_set_c0_guest_status(cop0, ST0_EXL); if (cause & CAUSEF_BD) kvm_set_c0_guest_cause(cop0, CAUSEF_BD); else kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n", arch->pc); /* set pc to the exception entry point */ arch->pc = KVM_GUEST_KSEG0 + 0x0; } else { kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", arch->pc); arch->pc = KVM_GUEST_KSEG0 + 0x180; } kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_LD_MISS << CAUSEB_EXCCODE)); /* setup badvaddr, context and entryhi registers for the guest */ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); /* XXXKYMA: is the context register used by linux??? */ kvm_write_c0_guest_entryhi(cop0, entryhi); /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); return er; } enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc, struct kvm_run *run, struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ kvm_write_c0_guest_epc(cop0, arch->pc); kvm_set_c0_guest_status(cop0, ST0_EXL); if (cause & CAUSEF_BD) kvm_set_c0_guest_cause(cop0, CAUSEF_BD); else kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", arch->pc); /* set pc to the exception entry point */ arch->pc = KVM_GUEST_KSEG0 + 0x180; } else { kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", arch->pc); arch->pc = KVM_GUEST_KSEG0 + 0x180; } kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_LD_MISS << CAUSEB_EXCCODE)); /* setup badvaddr, context and entryhi registers for the guest */ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); /* XXXKYMA: is the context register used by linux??? */ kvm_write_c0_guest_entryhi(cop0, entryhi); /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); return er; } enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc, struct kvm_run *run, struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ kvm_write_c0_guest_epc(cop0, arch->pc); kvm_set_c0_guest_status(cop0, ST0_EXL); if (cause & CAUSEF_BD) kvm_set_c0_guest_cause(cop0, CAUSEF_BD); else kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", arch->pc); /* Set PC to the exception entry point */ arch->pc = KVM_GUEST_KSEG0 + 0x0; } else { kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", arch->pc); arch->pc = KVM_GUEST_KSEG0 + 0x180; } kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_ST_MISS << CAUSEB_EXCCODE)); /* setup badvaddr, context and entryhi registers for the guest */ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); /* XXXKYMA: is the context register used by linux??? */ kvm_write_c0_guest_entryhi(cop0, entryhi); /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); return er; } enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc, struct kvm_run *run, struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ kvm_write_c0_guest_epc(cop0, arch->pc); kvm_set_c0_guest_status(cop0, ST0_EXL); if (cause & CAUSEF_BD) kvm_set_c0_guest_cause(cop0, CAUSEF_BD); else kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", arch->pc); /* Set PC to the exception entry point */ arch->pc = KVM_GUEST_KSEG0 + 0x180; } else { kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", arch->pc); arch->pc = KVM_GUEST_KSEG0 + 0x180; } kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_ST_MISS << CAUSEB_EXCCODE)); /* setup badvaddr, context and entryhi registers for the guest */ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); /* XXXKYMA: is the context register used by linux??? */ kvm_write_c0_guest_entryhi(cop0, entryhi); /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); return er; } /* TLBMOD: store into address matching TLB with Dirty bit off */ enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, struct kvm_run *run, struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DONE; #ifdef DEBUG /* * If address not in the guest TLB, then we are in trouble */ index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); if (index < 0) { /* XXXKYMA Invalidate and retry */ kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr); kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n", __func__, entryhi); kvm_mips_dump_guest_tlbs(vcpu); kvm_mips_dump_host_tlbs(); return EMULATE_FAIL; } #endif er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu); return er; } enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc, struct kvm_run *run, struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ kvm_write_c0_guest_epc(cop0, arch->pc); kvm_set_c0_guest_status(cop0, ST0_EXL); if (cause & CAUSEF_BD) kvm_set_c0_guest_cause(cop0, CAUSEF_BD); else kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", arch->pc); arch->pc = KVM_GUEST_KSEG0 + 0x180; } else { kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", arch->pc); arch->pc = KVM_GUEST_KSEG0 + 0x180; } kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE)); /* setup badvaddr, context and entryhi registers for the guest */ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); /* XXXKYMA: is the context register used by linux??? */ kvm_write_c0_guest_entryhi(cop0, entryhi); /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); return er; } enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc, struct kvm_run *run, struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ kvm_write_c0_guest_epc(cop0, arch->pc); kvm_set_c0_guest_status(cop0, ST0_EXL); if (cause & CAUSEF_BD) kvm_set_c0_guest_cause(cop0, CAUSEF_BD); else kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); } arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_change_c0_guest_cause(cop0, (0xff), (T_COP_UNUSABLE << CAUSEB_EXCCODE)); kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); return er; } enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc, struct kvm_run *run, struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ kvm_write_c0_guest_epc(cop0, arch->pc); kvm_set_c0_guest_status(cop0, ST0_EXL); if (cause & CAUSEF_BD) kvm_set_c0_guest_cause(cop0, CAUSEF_BD); else kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); kvm_change_c0_guest_cause(cop0, (0xff), (T_RES_INST << CAUSEB_EXCCODE)); /* Set PC to the exception entry point */ arch->pc = KVM_GUEST_KSEG0 + 0x180; } else { kvm_err("Trying to deliver RI when EXL is already set\n"); er = EMULATE_FAIL; } return er; } enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc, struct kvm_run *run, struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ kvm_write_c0_guest_epc(cop0, arch->pc); kvm_set_c0_guest_status(cop0, ST0_EXL); if (cause & CAUSEF_BD) kvm_set_c0_guest_cause(cop0, CAUSEF_BD); else kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); kvm_change_c0_guest_cause(cop0, (0xff), (T_BREAK << CAUSEB_EXCCODE)); /* Set PC to the exception entry point */ arch->pc = KVM_GUEST_KSEG0 + 0x180; } else { printk("Trying to deliver BP when EXL is already set\n"); er = EMULATE_FAIL; } return er; } /* * ll/sc, rdhwr, sync emulation */ #define OPCODE 0xfc000000 #define BASE 0x03e00000 #define RT 0x001f0000 #define OFFSET 0x0000ffff #define LL 0xc0000000 #define SC 0xe0000000 #define SPEC0 0x00000000 #define SPEC3 0x7c000000 #define RD 0x0000f800 #define FUNC 0x0000003f #define SYNC 0x0000000f #define RDHWR 0x0000003b enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, struct kvm_run *run, struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; unsigned long curr_pc; uint32_t inst; /* * Update PC and hold onto current PC in case there is * an error and we want to rollback the PC */ curr_pc = vcpu->arch.pc; er = update_pc(vcpu, cause); if (er == EMULATE_FAIL) return er; /* * Fetch the instruction. */ if (cause & CAUSEF_BD) opc += 1; inst = kvm_get_inst(opc, vcpu); if (inst == KVM_INVALID_INST) { printk("%s: Cannot get inst @ %p\n", __func__, opc); return EMULATE_FAIL; } if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) { int rd = (inst & RD) >> 11; int rt = (inst & RT) >> 16; switch (rd) { case 0: /* CPU number */ arch->gprs[rt] = 0; break; case 1: /* SYNCI length */ arch->gprs[rt] = min(current_cpu_data.dcache.linesz, current_cpu_data.icache.linesz); break; case 2: /* Read count register */ printk("RDHWR: Cont register\n"); arch->gprs[rt] = kvm_read_c0_guest_count(cop0); break; case 3: /* Count register resolution */ switch (current_cpu_data.cputype) { case CPU_20KC: case CPU_25KF: arch->gprs[rt] = 1; break; default: arch->gprs[rt] = 2; } break; case 29: #if 1 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); #else /* UserLocal not implemented */ er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); #endif break; default: printk("RDHWR not supported\n"); er = EMULATE_FAIL; break; } } else { printk("Emulate RI not supported @ %p: %#x\n", opc, inst); er = EMULATE_FAIL; } /* * Rollback PC only if emulation was unsuccessful */ if (er == EMULATE_FAIL) { vcpu->arch.pc = curr_pc; } return er; } enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) { unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; enum emulation_result er = EMULATE_DONE; unsigned long curr_pc; if (run->mmio.len > sizeof(*gpr)) { printk("Bad MMIO length: %d", run->mmio.len); er = EMULATE_FAIL; goto done; } /* * Update PC and hold onto current PC in case there is * an error and we want to rollback the PC */ curr_pc = vcpu->arch.pc; er = update_pc(vcpu, vcpu->arch.pending_load_cause); if (er == EMULATE_FAIL) return er; switch (run->mmio.len) { case 4: *gpr = *(int32_t *) run->mmio.data; break; case 2: if (vcpu->mmio_needed == 2) *gpr = *(int16_t *) run->mmio.data; else *gpr = *(int16_t *) run->mmio.data; break; case 1: if (vcpu->mmio_needed == 2) *gpr = *(int8_t *) run->mmio.data; else *gpr = *(u8 *) run->mmio.data; break; } if (vcpu->arch.pending_load_cause & CAUSEF_BD) kvm_debug ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, vcpu->mmio_needed); done: return er; } static enum emulation_result kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc, struct kvm_run *run, struct kvm_vcpu *vcpu) { uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ kvm_write_c0_guest_epc(cop0, arch->pc); kvm_set_c0_guest_status(cop0, ST0_EXL); if (cause & CAUSEF_BD) kvm_set_c0_guest_cause(cop0, CAUSEF_BD); else kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); kvm_change_c0_guest_cause(cop0, (0xff), (exccode << CAUSEB_EXCCODE)); /* Set PC to the exception entry point */ arch->pc = KVM_GUEST_KSEG0 + 0x180; kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", exccode, kvm_read_c0_guest_epc(cop0), kvm_read_c0_guest_badvaddr(cop0)); } else { printk("Trying to deliver EXC when EXL is already set\n"); er = EMULATE_FAIL; } return er; } enum emulation_result kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, struct kvm_run *run, struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DONE; uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); if (usermode) { switch (exccode) { case T_INT: case T_SYSCALL: case T_BREAK: case T_RES_INST: break; case T_COP_UNUSABLE: if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0) er = EMULATE_PRIV_FAIL; break; case T_TLB_MOD: break; case T_TLB_LD_MISS: /* We we are accessing Guest kernel space, then send an address error exception to the guest */ if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { printk("%s: LD MISS @ %#lx\n", __func__, badvaddr); cause &= ~0xff; cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE); er = EMULATE_PRIV_FAIL; } break; case T_TLB_ST_MISS: /* We we are accessing Guest kernel space, then send an address error exception to the guest */ if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { printk("%s: ST MISS @ %#lx\n", __func__, badvaddr); cause &= ~0xff; cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE); er = EMULATE_PRIV_FAIL; } break; case T_ADDR_ERR_ST: printk("%s: address error ST @ %#lx\n", __func__, badvaddr); if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { cause &= ~0xff; cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE); } er = EMULATE_PRIV_FAIL; break; case T_ADDR_ERR_LD: printk("%s: address error LD @ %#lx\n", __func__, badvaddr); if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { cause &= ~0xff; cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE); } er = EMULATE_PRIV_FAIL; break; default: er = EMULATE_PRIV_FAIL; break; } } if (er == EMULATE_PRIV_FAIL) { kvm_mips_emulate_exc(cause, opc, run, vcpu); } return er; } /* User Address (UA) fault, this could happen if * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this * case we pass on the fault to the guest kernel and let it handle it. * (2) TLB entry is present in the Guest TLB but not in the shadow, in this * case we inject the TLB from the Guest TLB into the shadow host TLB */ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, struct kvm_run *run, struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DONE; uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; unsigned long va = vcpu->arch.host_cp0_badvaddr; int index; kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n", vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi); /* KVM would not have got the exception if this entry was valid in the shadow host TLB * Check the Guest TLB, if the entry is not there then send the guest an * exception. The guest exc handler should then inject an entry into the * guest TLB */ index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | (kvm_read_c0_guest_entryhi (vcpu->arch.cop0) & ASID_MASK)); if (index < 0) { if (exccode == T_TLB_LD_MISS) { er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); } else if (exccode == T_TLB_ST_MISS) { er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); } else { printk("%s: invalid exc code: %d\n", __func__, exccode); er = EMULATE_FAIL; } } else { struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ if (!TLB_IS_VALID(*tlb, va)) { if (exccode == T_TLB_LD_MISS) { er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, vcpu); } else if (exccode == T_TLB_ST_MISS) { er = kvm_mips_emulate_tlbinv_st(cause, opc, run, vcpu); } else { printk("%s: invalid exc code: %d\n", __func__, exccode); er = EMULATE_FAIL; } } else { #ifdef DEBUG kvm_debug ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); #endif /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */ kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, NULL); } } return er; }
gpl-2.0
BhavySinghal/qassam-le2
drivers/usb/host/ehci-s5p.c
1725
7977
/* * SAMSUNG S5P USB HOST EHCI Controller * * Copyright (C) 2011 Samsung Electronics Co.Ltd * Author: Jingoo Han <jg1.han@samsung.com> * Author: Joonyoung Shim <jy0922.shim@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/platform_data/usb-ehci-s5p.h> #include <linux/usb/phy.h> #include <linux/usb/samsung_usb_phy.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/usb/otg.h> #include "ehci.h" #define DRIVER_DESC "EHCI s5p driver" #define EHCI_INSNREG00(base) (base + 0x90) #define EHCI_INSNREG00_ENA_INCR16 (0x1 << 25) #define EHCI_INSNREG00_ENA_INCR8 (0x1 << 24) #define EHCI_INSNREG00_ENA_INCR4 (0x1 << 23) #define EHCI_INSNREG00_ENA_INCRX_ALIGN (0x1 << 22) #define EHCI_INSNREG00_ENABLE_DMA_BURST \ (EHCI_INSNREG00_ENA_INCR16 | EHCI_INSNREG00_ENA_INCR8 | \ EHCI_INSNREG00_ENA_INCR4 | EHCI_INSNREG00_ENA_INCRX_ALIGN) static const char hcd_name[] = "ehci-s5p"; static struct hc_driver __read_mostly s5p_ehci_hc_driver; struct s5p_ehci_hcd { struct clk *clk; struct usb_phy *phy; struct usb_otg *otg; struct s5p_ehci_platdata *pdata; }; #define to_s5p_ehci(hcd) (struct s5p_ehci_hcd *)(hcd_to_ehci(hcd)->priv) static void s5p_setup_vbus_gpio(struct platform_device *pdev) { struct device *dev = &pdev->dev; int err; int gpio; if (!dev->of_node) return; gpio = of_get_named_gpio(dev->of_node, "samsung,vbus-gpio", 0); if (!gpio_is_valid(gpio)) return; err = devm_gpio_request_one(dev, gpio, GPIOF_OUT_INIT_HIGH, "ehci_vbus_gpio"); if (err) dev_err(dev, "can't request ehci vbus gpio %d", gpio); } static int s5p_ehci_probe(struct platform_device *pdev) { struct s5p_ehci_platdata *pdata = pdev->dev.platform_data; struct s5p_ehci_hcd *s5p_ehci; struct usb_hcd *hcd; struct ehci_hcd *ehci; struct resource *res; struct usb_phy *phy; int irq; int err; /* * Right now device-tree probed devices don't get dma_mask set. * Since shared usb code relies on it, set it here for now. * Once we move to full device tree support this will vanish off. */ if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); s5p_setup_vbus_gpio(pdev); hcd = usb_create_hcd(&s5p_ehci_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { dev_err(&pdev->dev, "Unable to create HCD\n"); return -ENOMEM; } s5p_ehci = to_s5p_ehci(hcd); phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2); if (IS_ERR(phy)) { /* Fallback to pdata */ if (!pdata) { usb_put_hcd(hcd); dev_warn(&pdev->dev, "no platform data or transceiver defined\n"); return -EPROBE_DEFER; } else { s5p_ehci->pdata = pdata; } } else { s5p_ehci->phy = phy; s5p_ehci->otg = phy->otg; } s5p_ehci->clk = devm_clk_get(&pdev->dev, "usbhost"); if (IS_ERR(s5p_ehci->clk)) { dev_err(&pdev->dev, "Failed to get usbhost clock\n"); err = PTR_ERR(s5p_ehci->clk); goto fail_clk; } err = clk_prepare_enable(s5p_ehci->clk); if (err) goto fail_clk; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Failed to get I/O memory\n"); err = -ENXIO; goto fail_io; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->regs = devm_ioremap(&pdev->dev, res->start, hcd->rsrc_len); if (!hcd->regs) { dev_err(&pdev->dev, "Failed to remap I/O memory\n"); err = -ENOMEM; goto fail_io; } irq = platform_get_irq(pdev, 0); if (!irq) { dev_err(&pdev->dev, "Failed to get IRQ\n"); err = -ENODEV; goto fail_io; } if (s5p_ehci->otg) s5p_ehci->otg->set_host(s5p_ehci->otg, &hcd->self); if (s5p_ehci->phy) usb_phy_init(s5p_ehci->phy); else if (s5p_ehci->pdata->phy_init) s5p_ehci->pdata->phy_init(pdev, USB_PHY_TYPE_HOST); ehci = hcd_to_ehci(hcd); ehci->caps = hcd->regs; /* DMA burst Enable */ writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs)); err = usb_add_hcd(hcd, irq, IRQF_SHARED); if (err) { dev_err(&pdev->dev, "Failed to add USB HCD\n"); goto fail_add_hcd; } platform_set_drvdata(pdev, hcd); return 0; fail_add_hcd: if (s5p_ehci->phy) usb_phy_shutdown(s5p_ehci->phy); else if (s5p_ehci->pdata->phy_exit) s5p_ehci->pdata->phy_exit(pdev, USB_PHY_TYPE_HOST); fail_io: clk_disable_unprepare(s5p_ehci->clk); fail_clk: usb_put_hcd(hcd); return err; } static int s5p_ehci_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct s5p_ehci_hcd *s5p_ehci = to_s5p_ehci(hcd); usb_remove_hcd(hcd); if (s5p_ehci->otg) s5p_ehci->otg->set_host(s5p_ehci->otg, &hcd->self); if (s5p_ehci->phy) usb_phy_shutdown(s5p_ehci->phy); else if (s5p_ehci->pdata->phy_exit) s5p_ehci->pdata->phy_exit(pdev, USB_PHY_TYPE_HOST); clk_disable_unprepare(s5p_ehci->clk); usb_put_hcd(hcd); return 0; } static void s5p_ehci_shutdown(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); if (hcd->driver->shutdown) hcd->driver->shutdown(hcd); } #ifdef CONFIG_PM static int s5p_ehci_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct s5p_ehci_hcd *s5p_ehci = to_s5p_ehci(hcd); struct platform_device *pdev = to_platform_device(dev); bool do_wakeup = device_may_wakeup(dev); int rc; rc = ehci_suspend(hcd, do_wakeup); if (s5p_ehci->otg) s5p_ehci->otg->set_host(s5p_ehci->otg, &hcd->self); if (s5p_ehci->phy) usb_phy_shutdown(s5p_ehci->phy); else if (s5p_ehci->pdata->phy_exit) s5p_ehci->pdata->phy_exit(pdev, USB_PHY_TYPE_HOST); clk_disable_unprepare(s5p_ehci->clk); return rc; } static int s5p_ehci_resume(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct s5p_ehci_hcd *s5p_ehci = to_s5p_ehci(hcd); struct platform_device *pdev = to_platform_device(dev); clk_prepare_enable(s5p_ehci->clk); if (s5p_ehci->otg) s5p_ehci->otg->set_host(s5p_ehci->otg, &hcd->self); if (s5p_ehci->phy) usb_phy_init(s5p_ehci->phy); else if (s5p_ehci->pdata->phy_init) s5p_ehci->pdata->phy_init(pdev, USB_PHY_TYPE_HOST); /* DMA burst Enable */ writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs)); ehci_resume(hcd, false); return 0; } #else #define s5p_ehci_suspend NULL #define s5p_ehci_resume NULL #endif static const struct dev_pm_ops s5p_ehci_pm_ops = { .suspend = s5p_ehci_suspend, .resume = s5p_ehci_resume, }; #ifdef CONFIG_OF static const struct of_device_id exynos_ehci_match[] = { { .compatible = "samsung,exynos4210-ehci" }, {}, }; MODULE_DEVICE_TABLE(of, exynos_ehci_match); #endif static struct platform_driver s5p_ehci_driver = { .probe = s5p_ehci_probe, .remove = s5p_ehci_remove, .shutdown = s5p_ehci_shutdown, .driver = { .name = "s5p-ehci", .owner = THIS_MODULE, .pm = &s5p_ehci_pm_ops, .of_match_table = of_match_ptr(exynos_ehci_match), } }; static const struct ehci_driver_overrides s5p_overrides __initdata = { .extra_priv_size = sizeof(struct s5p_ehci_hcd), }; static int __init ehci_s5p_init(void) { if (usb_disabled()) return -ENODEV; pr_info("%s: " DRIVER_DESC "\n", hcd_name); ehci_init_driver(&s5p_ehci_hc_driver, &s5p_overrides); return platform_driver_register(&s5p_ehci_driver); } module_init(ehci_s5p_init); static void __exit ehci_s5p_cleanup(void) { platform_driver_unregister(&s5p_ehci_driver); } module_exit(ehci_s5p_cleanup); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_ALIAS("platform:s5p-ehci"); MODULE_AUTHOR("Jingoo Han"); MODULE_AUTHOR("Joonyoung Shim"); MODULE_LICENSE("GPL v2");
gpl-2.0
AD5GB/android_kernel_googlesource-common
fs/xfs/xfs_message.c
2237
2724
/* * Copyright (c) 2011 Red Hat, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_types.h" #include "xfs_log.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_mount.h" /* * XFS logging functions */ static void __xfs_printk( const char *level, const struct xfs_mount *mp, struct va_format *vaf) { if (mp && mp->m_fsname) { printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf); return; } printk("%sXFS: %pV\n", level, vaf); } #define define_xfs_printk_level(func, kern_level) \ void func(const struct xfs_mount *mp, const char *fmt, ...) \ { \ struct va_format vaf; \ va_list args; \ \ va_start(args, fmt); \ \ vaf.fmt = fmt; \ vaf.va = &args; \ \ __xfs_printk(kern_level, mp, &vaf); \ va_end(args); \ } \ define_xfs_printk_level(xfs_emerg, KERN_EMERG); define_xfs_printk_level(xfs_alert, KERN_ALERT); define_xfs_printk_level(xfs_crit, KERN_CRIT); define_xfs_printk_level(xfs_err, KERN_ERR); define_xfs_printk_level(xfs_warn, KERN_WARNING); define_xfs_printk_level(xfs_notice, KERN_NOTICE); define_xfs_printk_level(xfs_info, KERN_INFO); #ifdef DEBUG define_xfs_printk_level(xfs_debug, KERN_DEBUG); #endif void xfs_alert_tag( const struct xfs_mount *mp, int panic_tag, const char *fmt, ...) { struct va_format vaf; va_list args; int do_panic = 0; if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) { xfs_alert(mp, "Transforming an alert into a BUG."); do_panic = 1; } va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; __xfs_printk(KERN_ALERT, mp, &vaf); va_end(args); BUG_ON(do_panic); } void asswarn(char *expr, char *file, int line) { xfs_warn(NULL, "Assertion failed: %s, file: %s, line: %d", expr, file, line); WARN_ON(1); } void assfail(char *expr, char *file, int line) { xfs_emerg(NULL, "Assertion failed: %s, file: %s, line: %d", expr, file, line); BUG(); } void xfs_hex_dump(void *p, int length) { print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_ADDRESS, 16, 1, p, length, 1); }
gpl-2.0
dhiru1602/android_kernel_samsung_smdk4412
drivers/media/video/adv7170.c
3261
9861
/* * adv7170 - adv7170, adv7171 video encoder driver version 0.0.1 * * Copyright (C) 2002 Maxim Yevtyushkin <max@linuxmedialabs.com> * * Based on adv7176 driver by: * * Copyright (C) 1998 Dave Perks <dperks@ibm.net> * Copyright (C) 1999 Wolfgang Scherr <scherr@net4you.net> * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx> * - some corrections for Pinnacle Systems Inc. DC10plus card. * * Changes by Ronald Bultje <rbultje@ronald.bitfreak.net> * - moved over to linux>=2.4.x i2c protocol (1/1/2003) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <asm/uaccess.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> MODULE_DESCRIPTION("Analog Devices ADV7170 video encoder driver"); MODULE_AUTHOR("Maxim Yevtyushkin"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-1)"); /* ----------------------------------------------------------------------- */ struct adv7170 { struct v4l2_subdev sd; unsigned char reg[128]; v4l2_std_id norm; int input; }; static inline struct adv7170 *to_adv7170(struct v4l2_subdev *sd) { return container_of(sd, struct adv7170, sd); } static char *inputs[] = { "pass_through", "play_back" }; /* ----------------------------------------------------------------------- */ static inline int adv7170_write(struct v4l2_subdev *sd, u8 reg, u8 value) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct adv7170 *encoder = to_adv7170(sd); encoder->reg[reg] = value; return i2c_smbus_write_byte_data(client, reg, value); } static inline int adv7170_read(struct v4l2_subdev *sd, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_read_byte_data(client, reg); } static int adv7170_write_block(struct v4l2_subdev *sd, const u8 *data, unsigned int len) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct adv7170 *encoder = to_adv7170(sd); int ret = -1; u8 reg; /* the adv7170 has an autoincrement function, use it if * the adapter understands raw I2C */ if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { /* do raw I2C, not smbus compatible */ u8 block_data[32]; int block_len; while (len >= 2) { block_len = 0; block_data[block_len++] = reg = data[0]; do { block_data[block_len++] = encoder->reg[reg++] = data[1]; len -= 2; data += 2; } while (len >= 2 && data[0] == reg && block_len < 32); ret = i2c_master_send(client, block_data, block_len); if (ret < 0) break; } } else { /* do some slow I2C emulation kind of thing */ while (len >= 2) { reg = *data++; ret = adv7170_write(sd, reg, *data++); if (ret < 0) break; len -= 2; } } return ret; } /* ----------------------------------------------------------------------- */ #define TR0MODE 0x4c #define TR0RST 0x80 #define TR1CAPT 0x00 #define TR1PLAY 0x00 static const unsigned char init_NTSC[] = { 0x00, 0x10, /* MR0 */ 0x01, 0x20, /* MR1 */ 0x02, 0x0e, /* MR2 RTC control: bits 2 and 1 */ 0x03, 0x80, /* MR3 */ 0x04, 0x30, /* MR4 */ 0x05, 0x00, /* Reserved */ 0x06, 0x00, /* Reserved */ 0x07, TR0MODE, /* TM0 */ 0x08, TR1CAPT, /* TM1 */ 0x09, 0x16, /* Fsc0 */ 0x0a, 0x7c, /* Fsc1 */ 0x0b, 0xf0, /* Fsc2 */ 0x0c, 0x21, /* Fsc3 */ 0x0d, 0x00, /* Subcarrier Phase */ 0x0e, 0x00, /* Closed Capt. Ext 0 */ 0x0f, 0x00, /* Closed Capt. Ext 1 */ 0x10, 0x00, /* Closed Capt. 0 */ 0x11, 0x00, /* Closed Capt. 1 */ 0x12, 0x00, /* Pedestal Ctl 0 */ 0x13, 0x00, /* Pedestal Ctl 1 */ 0x14, 0x00, /* Pedestal Ctl 2 */ 0x15, 0x00, /* Pedestal Ctl 3 */ 0x16, 0x00, /* CGMS_WSS_0 */ 0x17, 0x00, /* CGMS_WSS_1 */ 0x18, 0x00, /* CGMS_WSS_2 */ 0x19, 0x00, /* Teletext Ctl */ }; static const unsigned char init_PAL[] = { 0x00, 0x71, /* MR0 */ 0x01, 0x20, /* MR1 */ 0x02, 0x0e, /* MR2 RTC control: bits 2 and 1 */ 0x03, 0x80, /* MR3 */ 0x04, 0x30, /* MR4 */ 0x05, 0x00, /* Reserved */ 0x06, 0x00, /* Reserved */ 0x07, TR0MODE, /* TM0 */ 0x08, TR1CAPT, /* TM1 */ 0x09, 0xcb, /* Fsc0 */ 0x0a, 0x8a, /* Fsc1 */ 0x0b, 0x09, /* Fsc2 */ 0x0c, 0x2a, /* Fsc3 */ 0x0d, 0x00, /* Subcarrier Phase */ 0x0e, 0x00, /* Closed Capt. Ext 0 */ 0x0f, 0x00, /* Closed Capt. Ext 1 */ 0x10, 0x00, /* Closed Capt. 0 */ 0x11, 0x00, /* Closed Capt. 1 */ 0x12, 0x00, /* Pedestal Ctl 0 */ 0x13, 0x00, /* Pedestal Ctl 1 */ 0x14, 0x00, /* Pedestal Ctl 2 */ 0x15, 0x00, /* Pedestal Ctl 3 */ 0x16, 0x00, /* CGMS_WSS_0 */ 0x17, 0x00, /* CGMS_WSS_1 */ 0x18, 0x00, /* CGMS_WSS_2 */ 0x19, 0x00, /* Teletext Ctl */ }; static int adv7170_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std) { struct adv7170 *encoder = to_adv7170(sd); v4l2_dbg(1, debug, sd, "set norm %llx\n", (unsigned long long)std); if (std & V4L2_STD_NTSC) { adv7170_write_block(sd, init_NTSC, sizeof(init_NTSC)); if (encoder->input == 0) adv7170_write(sd, 0x02, 0x0e); /* Enable genlock */ adv7170_write(sd, 0x07, TR0MODE | TR0RST); adv7170_write(sd, 0x07, TR0MODE); } else if (std & V4L2_STD_PAL) { adv7170_write_block(sd, init_PAL, sizeof(init_PAL)); if (encoder->input == 0) adv7170_write(sd, 0x02, 0x0e); /* Enable genlock */ adv7170_write(sd, 0x07, TR0MODE | TR0RST); adv7170_write(sd, 0x07, TR0MODE); } else { v4l2_dbg(1, debug, sd, "illegal norm: %llx\n", (unsigned long long)std); return -EINVAL; } v4l2_dbg(1, debug, sd, "switched to %llx\n", (unsigned long long)std); encoder->norm = std; return 0; } static int adv7170_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct adv7170 *encoder = to_adv7170(sd); /* RJ: input = 0: input is from decoder input = 1: input is from ZR36060 input = 2: color bar */ v4l2_dbg(1, debug, sd, "set input from %s\n", input == 0 ? "decoder" : "ZR36060"); switch (input) { case 0: adv7170_write(sd, 0x01, 0x20); adv7170_write(sd, 0x08, TR1CAPT); /* TR1 */ adv7170_write(sd, 0x02, 0x0e); /* Enable genlock */ adv7170_write(sd, 0x07, TR0MODE | TR0RST); adv7170_write(sd, 0x07, TR0MODE); /* udelay(10); */ break; case 1: adv7170_write(sd, 0x01, 0x00); adv7170_write(sd, 0x08, TR1PLAY); /* TR1 */ adv7170_write(sd, 0x02, 0x08); adv7170_write(sd, 0x07, TR0MODE | TR0RST); adv7170_write(sd, 0x07, TR0MODE); /* udelay(10); */ break; default: v4l2_dbg(1, debug, sd, "illegal input: %d\n", input); return -EINVAL; } v4l2_dbg(1, debug, sd, "switched to %s\n", inputs[input]); encoder->input = input; return 0; } static int adv7170_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7170, 0); } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops adv7170_core_ops = { .g_chip_ident = adv7170_g_chip_ident, }; static const struct v4l2_subdev_video_ops adv7170_video_ops = { .s_std_output = adv7170_s_std_output, .s_routing = adv7170_s_routing, }; static const struct v4l2_subdev_ops adv7170_ops = { .core = &adv7170_core_ops, .video = &adv7170_video_ops, }; /* ----------------------------------------------------------------------- */ static int adv7170_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct adv7170 *encoder; struct v4l2_subdev *sd; int i; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); encoder = kzalloc(sizeof(struct adv7170), GFP_KERNEL); if (encoder == NULL) return -ENOMEM; sd = &encoder->sd; v4l2_i2c_subdev_init(sd, client, &adv7170_ops); encoder->norm = V4L2_STD_NTSC; encoder->input = 0; i = adv7170_write_block(sd, init_NTSC, sizeof(init_NTSC)); if (i >= 0) { i = adv7170_write(sd, 0x07, TR0MODE | TR0RST); i = adv7170_write(sd, 0x07, TR0MODE); i = adv7170_read(sd, 0x12); v4l2_dbg(1, debug, sd, "revision %d\n", i & 1); } if (i < 0) v4l2_dbg(1, debug, sd, "init error 0x%x\n", i); return 0; } static int adv7170_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(to_adv7170(sd)); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id adv7170_id[] = { { "adv7170", 0 }, { "adv7171", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adv7170_id); static struct i2c_driver adv7170_driver = { .driver = { .owner = THIS_MODULE, .name = "adv7170", }, .probe = adv7170_probe, .remove = adv7170_remove, .id_table = adv7170_id, }; static __init int init_adv7170(void) { return i2c_add_driver(&adv7170_driver); } static __exit void exit_adv7170(void) { i2c_del_driver(&adv7170_driver); } module_init(init_adv7170); module_exit(exit_adv7170);
gpl-2.0
jgcaap/NewKernel
net/decnet/dn_dev.c
4797
32808
/* * DECnet An implementation of the DECnet protocol suite for the LINUX * operating system. DECnet is implemented using the BSD Socket * interface as the means of communication with the user level. * * DECnet Device Layer * * Authors: Steve Whitehouse <SteveW@ACM.org> * Eduardo Marcelo Serrat <emserrat@geocities.com> * * Changes: * Steve Whitehouse : Devices now see incoming frames so they * can mark on who it came from. * Steve Whitehouse : Fixed bug in creating neighbours. Each neighbour * can now have a device specific setup func. * Steve Whitehouse : Added /proc/sys/net/decnet/conf/<dev>/ * Steve Whitehouse : Fixed bug which sometimes killed timer * Steve Whitehouse : Multiple ifaddr support * Steve Whitehouse : SIOCGIFCONF is now a compile time option * Steve Whitehouse : /proc/sys/net/decnet/conf/<sys>/forwarding * Steve Whitehouse : Removed timer1 - it's a user space issue now * Patrick Caulfield : Fixed router hello message format * Steve Whitehouse : Got rid of constant sizes for blksize for * devices. All mtu based now. */ #include <linux/capability.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/if_addr.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/skbuff.h> #include <linux/sysctl.h> #include <linux/notifier.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <net/net_namespace.h> #include <net/neighbour.h> #include <net/dst.h> #include <net/flow.h> #include <net/fib_rules.h> #include <net/netlink.h> #include <net/dn.h> #include <net/dn_dev.h> #include <net/dn_route.h> #include <net/dn_neigh.h> #include <net/dn_fib.h> #define DN_IFREQ_SIZE (sizeof(struct ifreq) - sizeof(struct sockaddr) + sizeof(struct sockaddr_dn)) static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00}; static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00}; static char dn_hiord[ETH_ALEN] = {0xAA,0x00,0x04,0x00,0x00,0x00}; static unsigned char dn_eco_version[3] = {0x02,0x00,0x00}; extern struct neigh_table dn_neigh_table; /* * decnet_address is kept in network order. */ __le16 decnet_address = 0; static DEFINE_SPINLOCK(dndev_lock); static struct net_device *decnet_default_device; static BLOCKING_NOTIFIER_HEAD(dnaddr_chain); static struct dn_dev *dn_dev_create(struct net_device *dev, int *err); static void dn_dev_delete(struct net_device *dev); static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa); static int dn_eth_up(struct net_device *); static void dn_eth_down(struct net_device *); static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa); static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa); static struct dn_dev_parms dn_dev_list[] = { { .type = ARPHRD_ETHER, /* Ethernet */ .mode = DN_DEV_BCAST, .state = DN_DEV_S_RU, .t2 = 1, .t3 = 10, .name = "ethernet", .up = dn_eth_up, .down = dn_eth_down, .timer3 = dn_send_brd_hello, }, { .type = ARPHRD_IPGRE, /* DECnet tunneled over GRE in IP */ .mode = DN_DEV_BCAST, .state = DN_DEV_S_RU, .t2 = 1, .t3 = 10, .name = "ipgre", .timer3 = dn_send_brd_hello, }, #if 0 { .type = ARPHRD_X25, /* Bog standard X.25 */ .mode = DN_DEV_UCAST, .state = DN_DEV_S_DS, .t2 = 1, .t3 = 120, .name = "x25", .timer3 = dn_send_ptp_hello, }, #endif #if 0 { .type = ARPHRD_PPP, /* DECnet over PPP */ .mode = DN_DEV_BCAST, .state = DN_DEV_S_RU, .t2 = 1, .t3 = 10, .name = "ppp", .timer3 = dn_send_brd_hello, }, #endif { .type = ARPHRD_DDCMP, /* DECnet over DDCMP */ .mode = DN_DEV_UCAST, .state = DN_DEV_S_DS, .t2 = 1, .t3 = 120, .name = "ddcmp", .timer3 = dn_send_ptp_hello, }, { .type = ARPHRD_LOOPBACK, /* Loopback interface - always last */ .mode = DN_DEV_BCAST, .state = DN_DEV_S_RU, .t2 = 1, .t3 = 10, .name = "loopback", .timer3 = dn_send_brd_hello, } }; #define DN_DEV_LIST_SIZE ARRAY_SIZE(dn_dev_list) #define DN_DEV_PARMS_OFFSET(x) offsetof(struct dn_dev_parms, x) #ifdef CONFIG_SYSCTL static int min_t2[] = { 1 }; static int max_t2[] = { 60 }; /* No max specified, but this seems sensible */ static int min_t3[] = { 1 }; static int max_t3[] = { 8191 }; /* Must fit in 16 bits when multiplied by BCT3MULT or T3MULT */ static int min_priority[1]; static int max_priority[] = { 127 }; /* From DECnet spec */ static int dn_forwarding_proc(ctl_table *, int, void __user *, size_t *, loff_t *); static struct dn_dev_sysctl_table { struct ctl_table_header *sysctl_header; ctl_table dn_dev_vars[5]; } dn_dev_sysctl = { NULL, { { .procname = "forwarding", .data = (void *)DN_DEV_PARMS_OFFSET(forwarding), .maxlen = sizeof(int), .mode = 0644, .proc_handler = dn_forwarding_proc, }, { .procname = "priority", .data = (void *)DN_DEV_PARMS_OFFSET(priority), .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_priority, .extra2 = &max_priority }, { .procname = "t2", .data = (void *)DN_DEV_PARMS_OFFSET(t2), .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_t2, .extra2 = &max_t2 }, { .procname = "t3", .data = (void *)DN_DEV_PARMS_OFFSET(t3), .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_t3, .extra2 = &max_t3 }, {0} }, }; static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms) { struct dn_dev_sysctl_table *t; int i; #define DN_CTL_PATH_DEV 3 struct ctl_path dn_ctl_path[] = { { .procname = "net", }, { .procname = "decnet", }, { .procname = "conf", }, { /* to be set */ }, { }, }; t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL); if (t == NULL) return; for(i = 0; i < ARRAY_SIZE(t->dn_dev_vars) - 1; i++) { long offset = (long)t->dn_dev_vars[i].data; t->dn_dev_vars[i].data = ((char *)parms) + offset; } if (dev) { dn_ctl_path[DN_CTL_PATH_DEV].procname = dev->name; } else { dn_ctl_path[DN_CTL_PATH_DEV].procname = parms->name; } t->dn_dev_vars[0].extra1 = (void *)dev; t->sysctl_header = register_sysctl_paths(dn_ctl_path, t->dn_dev_vars); if (t->sysctl_header == NULL) kfree(t); else parms->sysctl = t; } static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms) { if (parms->sysctl) { struct dn_dev_sysctl_table *t = parms->sysctl; parms->sysctl = NULL; unregister_sysctl_table(t->sysctl_header); kfree(t); } } static int dn_forwarding_proc(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { #ifdef CONFIG_DECNET_ROUTER struct net_device *dev = table->extra1; struct dn_dev *dn_db; int err; int tmp, old; if (table->extra1 == NULL) return -EINVAL; dn_db = rcu_dereference_raw(dev->dn_ptr); old = dn_db->parms.forwarding; err = proc_dointvec(table, write, buffer, lenp, ppos); if ((err >= 0) && write) { if (dn_db->parms.forwarding < 0) dn_db->parms.forwarding = 0; if (dn_db->parms.forwarding > 2) dn_db->parms.forwarding = 2; /* * What an ugly hack this is... its works, just. It * would be nice if sysctl/proc were just that little * bit more flexible so I don't have to write a special * routine, or suffer hacks like this - SJW */ tmp = dn_db->parms.forwarding; dn_db->parms.forwarding = old; if (dn_db->parms.down) dn_db->parms.down(dev); dn_db->parms.forwarding = tmp; if (dn_db->parms.up) dn_db->parms.up(dev); } return err; #else return -EINVAL; #endif } #else /* CONFIG_SYSCTL */ static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms) { } static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms) { } #endif /* CONFIG_SYSCTL */ static inline __u16 mtu2blksize(struct net_device *dev) { u32 blksize = dev->mtu; if (blksize > 0xffff) blksize = 0xffff; if (dev->type == ARPHRD_ETHER || dev->type == ARPHRD_PPP || dev->type == ARPHRD_IPGRE || dev->type == ARPHRD_LOOPBACK) blksize -= 2; return (__u16)blksize; } static struct dn_ifaddr *dn_dev_alloc_ifa(void) { struct dn_ifaddr *ifa; ifa = kzalloc(sizeof(*ifa), GFP_KERNEL); return ifa; } static void dn_dev_free_ifa(struct dn_ifaddr *ifa) { kfree_rcu(ifa, rcu); } static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr __rcu **ifap, int destroy) { struct dn_ifaddr *ifa1 = rtnl_dereference(*ifap); unsigned char mac_addr[6]; struct net_device *dev = dn_db->dev; ASSERT_RTNL(); *ifap = ifa1->ifa_next; if (dn_db->dev->type == ARPHRD_ETHER) { if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) { dn_dn2eth(mac_addr, ifa1->ifa_local); dev_mc_del(dev, mac_addr); } } dn_ifaddr_notify(RTM_DELADDR, ifa1); blocking_notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1); if (destroy) { dn_dev_free_ifa(ifa1); if (dn_db->ifa_list == NULL) dn_dev_delete(dn_db->dev); } } static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa) { struct net_device *dev = dn_db->dev; struct dn_ifaddr *ifa1; unsigned char mac_addr[6]; ASSERT_RTNL(); /* Check for duplicates */ for (ifa1 = rtnl_dereference(dn_db->ifa_list); ifa1 != NULL; ifa1 = rtnl_dereference(ifa1->ifa_next)) { if (ifa1->ifa_local == ifa->ifa_local) return -EEXIST; } if (dev->type == ARPHRD_ETHER) { if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) { dn_dn2eth(mac_addr, ifa->ifa_local); dev_mc_add(dev, mac_addr); } } ifa->ifa_next = dn_db->ifa_list; rcu_assign_pointer(dn_db->ifa_list, ifa); dn_ifaddr_notify(RTM_NEWADDR, ifa); blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa); return 0; } static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa) { struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr); int rv; if (dn_db == NULL) { int err; dn_db = dn_dev_create(dev, &err); if (dn_db == NULL) return err; } ifa->ifa_dev = dn_db; if (dev->flags & IFF_LOOPBACK) ifa->ifa_scope = RT_SCOPE_HOST; rv = dn_dev_insert_ifa(dn_db, ifa); if (rv) dn_dev_free_ifa(ifa); return rv; } int dn_dev_ioctl(unsigned int cmd, void __user *arg) { char buffer[DN_IFREQ_SIZE]; struct ifreq *ifr = (struct ifreq *)buffer; struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr; struct dn_dev *dn_db; struct net_device *dev; struct dn_ifaddr *ifa = NULL; struct dn_ifaddr __rcu **ifap = NULL; int ret = 0; if (copy_from_user(ifr, arg, DN_IFREQ_SIZE)) return -EFAULT; ifr->ifr_name[IFNAMSIZ-1] = 0; dev_load(&init_net, ifr->ifr_name); switch (cmd) { case SIOCGIFADDR: break; case SIOCSIFADDR: if (!capable(CAP_NET_ADMIN)) return -EACCES; if (sdn->sdn_family != AF_DECnet) return -EINVAL; break; default: return -EINVAL; } rtnl_lock(); if ((dev = __dev_get_by_name(&init_net, ifr->ifr_name)) == NULL) { ret = -ENODEV; goto done; } if ((dn_db = rtnl_dereference(dev->dn_ptr)) != NULL) { for (ifap = &dn_db->ifa_list; (ifa = rtnl_dereference(*ifap)) != NULL; ifap = &ifa->ifa_next) if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0) break; } if (ifa == NULL && cmd != SIOCSIFADDR) { ret = -EADDRNOTAVAIL; goto done; } switch (cmd) { case SIOCGIFADDR: *((__le16 *)sdn->sdn_nodeaddr) = ifa->ifa_local; goto rarok; case SIOCSIFADDR: if (!ifa) { if ((ifa = dn_dev_alloc_ifa()) == NULL) { ret = -ENOBUFS; break; } memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); } else { if (ifa->ifa_local == dn_saddr2dn(sdn)) break; dn_dev_del_ifa(dn_db, ifap, 0); } ifa->ifa_local = ifa->ifa_address = dn_saddr2dn(sdn); ret = dn_dev_set_ifa(dev, ifa); } done: rtnl_unlock(); return ret; rarok: if (copy_to_user(arg, ifr, DN_IFREQ_SIZE)) ret = -EFAULT; goto done; } struct net_device *dn_dev_get_default(void) { struct net_device *dev; spin_lock(&dndev_lock); dev = decnet_default_device; if (dev) { if (dev->dn_ptr) dev_hold(dev); else dev = NULL; } spin_unlock(&dndev_lock); return dev; } int dn_dev_set_default(struct net_device *dev, int force) { struct net_device *old = NULL; int rv = -EBUSY; if (!dev->dn_ptr) return -ENODEV; spin_lock(&dndev_lock); if (force || decnet_default_device == NULL) { old = decnet_default_device; decnet_default_device = dev; rv = 0; } spin_unlock(&dndev_lock); if (old) dev_put(old); return rv; } static void dn_dev_check_default(struct net_device *dev) { spin_lock(&dndev_lock); if (dev == decnet_default_device) { decnet_default_device = NULL; } else { dev = NULL; } spin_unlock(&dndev_lock); if (dev) dev_put(dev); } /* * Called with RTNL */ static struct dn_dev *dn_dev_by_index(int ifindex) { struct net_device *dev; struct dn_dev *dn_dev = NULL; dev = __dev_get_by_index(&init_net, ifindex); if (dev) dn_dev = rtnl_dereference(dev->dn_ptr); return dn_dev; } static const struct nla_policy dn_ifa_policy[IFA_MAX+1] = { [IFA_ADDRESS] = { .type = NLA_U16 }, [IFA_LOCAL] = { .type = NLA_U16 }, [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, }; static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); struct nlattr *tb[IFA_MAX+1]; struct dn_dev *dn_db; struct ifaddrmsg *ifm; struct dn_ifaddr *ifa; struct dn_ifaddr __rcu **ifap; int err = -EINVAL; if (!net_eq(net, &init_net)) goto errout; err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); if (err < 0) goto errout; err = -ENODEV; ifm = nlmsg_data(nlh); if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL) goto errout; err = -EADDRNOTAVAIL; for (ifap = &dn_db->ifa_list; (ifa = rtnl_dereference(*ifap)) != NULL; ifap = &ifa->ifa_next) { if (tb[IFA_LOCAL] && nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2)) continue; if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label)) continue; dn_dev_del_ifa(dn_db, ifap, 1); return 0; } errout: return err; } static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); struct nlattr *tb[IFA_MAX+1]; struct net_device *dev; struct dn_dev *dn_db; struct ifaddrmsg *ifm; struct dn_ifaddr *ifa; int err; if (!net_eq(net, &init_net)) return -EINVAL; err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); if (err < 0) return err; if (tb[IFA_LOCAL] == NULL) return -EINVAL; ifm = nlmsg_data(nlh); if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL) return -ENODEV; if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL) { dn_db = dn_dev_create(dev, &err); if (!dn_db) return err; } if ((ifa = dn_dev_alloc_ifa()) == NULL) return -ENOBUFS; if (tb[IFA_ADDRESS] == NULL) tb[IFA_ADDRESS] = tb[IFA_LOCAL]; ifa->ifa_local = nla_get_le16(tb[IFA_LOCAL]); ifa->ifa_address = nla_get_le16(tb[IFA_ADDRESS]); ifa->ifa_flags = ifm->ifa_flags; ifa->ifa_scope = ifm->ifa_scope; ifa->ifa_dev = dn_db; if (tb[IFA_LABEL]) nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ); else memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); err = dn_dev_insert_ifa(dn_db, ifa); if (err) dn_dev_free_ifa(ifa); return err; } static inline size_t dn_ifaddr_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + nla_total_size(IFNAMSIZ) /* IFA_LABEL */ + nla_total_size(2) /* IFA_ADDRESS */ + nla_total_size(2); /* IFA_LOCAL */ } static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa, u32 pid, u32 seq, int event, unsigned int flags) { struct ifaddrmsg *ifm; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags); if (nlh == NULL) return -EMSGSIZE; ifm = nlmsg_data(nlh); ifm->ifa_family = AF_DECnet; ifm->ifa_prefixlen = 16; ifm->ifa_flags = ifa->ifa_flags | IFA_F_PERMANENT; ifm->ifa_scope = ifa->ifa_scope; ifm->ifa_index = ifa->ifa_dev->dev->ifindex; if (ifa->ifa_address) NLA_PUT_LE16(skb, IFA_ADDRESS, ifa->ifa_address); if (ifa->ifa_local) NLA_PUT_LE16(skb, IFA_LOCAL, ifa->ifa_local); if (ifa->ifa_label[0]) NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label); return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa) { struct sk_buff *skb; int err = -ENOBUFS; skb = alloc_skb(dn_ifaddr_nlmsg_size(), GFP_KERNEL); if (skb == NULL) goto errout; err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0); if (err < 0) { /* -EMSGSIZE implies BUG in dn_ifaddr_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, &init_net, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL); return; errout: if (err < 0) rtnl_set_sk_err(&init_net, RTNLGRP_DECnet_IFADDR, err); } static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); int idx, dn_idx = 0, skip_ndevs, skip_naddr; struct net_device *dev; struct dn_dev *dn_db; struct dn_ifaddr *ifa; if (!net_eq(net, &init_net)) return 0; skip_ndevs = cb->args[0]; skip_naddr = cb->args[1]; idx = 0; rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { if (idx < skip_ndevs) goto cont; else if (idx > skip_ndevs) { /* Only skip over addresses for first dev dumped * in this iteration (idx == skip_ndevs) */ skip_naddr = 0; } if ((dn_db = rcu_dereference(dev->dn_ptr)) == NULL) goto cont; for (ifa = rcu_dereference(dn_db->ifa_list), dn_idx = 0; ifa; ifa = rcu_dereference(ifa->ifa_next), dn_idx++) { if (dn_idx < skip_naddr) continue; if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWADDR, NLM_F_MULTI) < 0) goto done; } cont: idx++; } done: rcu_read_unlock(); cb->args[0] = idx; cb->args[1] = dn_idx; return skb->len; } static int dn_dev_get_first(struct net_device *dev, __le16 *addr) { struct dn_dev *dn_db; struct dn_ifaddr *ifa; int rv = -ENODEV; rcu_read_lock(); dn_db = rcu_dereference(dev->dn_ptr); if (dn_db == NULL) goto out; ifa = rcu_dereference(dn_db->ifa_list); if (ifa != NULL) { *addr = ifa->ifa_local; rv = 0; } out: rcu_read_unlock(); return rv; } /* * Find a default address to bind to. * * This is one of those areas where the initial VMS concepts don't really * map onto the Linux concepts, and since we introduced multiple addresses * per interface we have to cope with slightly odd ways of finding out what * "our address" really is. Mostly it's not a problem; for this we just guess * a sensible default. Eventually the routing code will take care of all the * nasties for us I hope. */ int dn_dev_bind_default(__le16 *addr) { struct net_device *dev; int rv; dev = dn_dev_get_default(); last_chance: if (dev) { rv = dn_dev_get_first(dev, addr); dev_put(dev); if (rv == 0 || dev == init_net.loopback_dev) return rv; } dev = init_net.loopback_dev; dev_hold(dev); goto last_chance; } static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa) { struct endnode_hello_message *msg; struct sk_buff *skb = NULL; __le16 *pktlen; struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL) return; skb->dev = dev; msg = (struct endnode_hello_message *)skb_put(skb,sizeof(*msg)); msg->msgflg = 0x0D; memcpy(msg->tiver, dn_eco_version, 3); dn_dn2eth(msg->id, ifa->ifa_local); msg->iinfo = DN_RT_INFO_ENDN; msg->blksize = cpu_to_le16(mtu2blksize(dev)); msg->area = 0x00; memset(msg->seed, 0, 8); memcpy(msg->neighbor, dn_hiord, ETH_ALEN); if (dn_db->router) { struct dn_neigh *dn = (struct dn_neigh *)dn_db->router; dn_dn2eth(msg->neighbor, dn->addr); } msg->timer = cpu_to_le16((unsigned short)dn_db->parms.t3); msg->mpd = 0x00; msg->datalen = 0x02; memset(msg->data, 0xAA, 2); pktlen = (__le16 *)skb_push(skb,2); *pktlen = cpu_to_le16(skb->len - 2); skb_reset_network_header(skb); dn_rt_finish_output(skb, dn_rt_all_rt_mcast, msg->id); } #define DRDELAY (5 * HZ) static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn_ifaddr *ifa) { /* First check time since device went up */ if ((jiffies - dn_db->uptime) < DRDELAY) return 0; /* If there is no router, then yes... */ if (!dn_db->router) return 1; /* otherwise only if we have a higher priority or.. */ if (dn->priority < dn_db->parms.priority) return 1; /* if we have equal priority and a higher node number */ if (dn->priority != dn_db->parms.priority) return 0; if (le16_to_cpu(dn->addr) < le16_to_cpu(ifa->ifa_local)) return 1; return 0; } static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa) { int n; struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); struct dn_neigh *dn = (struct dn_neigh *)dn_db->router; struct sk_buff *skb; size_t size; unsigned char *ptr; unsigned char *i1, *i2; __le16 *pktlen; char *src; if (mtu2blksize(dev) < (26 + 7)) return; n = mtu2blksize(dev) - 26; n /= 7; if (n > 32) n = 32; size = 2 + 26 + 7 * n; if ((skb = dn_alloc_skb(NULL, size, GFP_ATOMIC)) == NULL) return; skb->dev = dev; ptr = skb_put(skb, size); *ptr++ = DN_RT_PKT_CNTL | DN_RT_PKT_ERTH; *ptr++ = 2; /* ECO */ *ptr++ = 0; *ptr++ = 0; dn_dn2eth(ptr, ifa->ifa_local); src = ptr; ptr += ETH_ALEN; *ptr++ = dn_db->parms.forwarding == 1 ? DN_RT_INFO_L1RT : DN_RT_INFO_L2RT; *((__le16 *)ptr) = cpu_to_le16(mtu2blksize(dev)); ptr += 2; *ptr++ = dn_db->parms.priority; /* Priority */ *ptr++ = 0; /* Area: Reserved */ *((__le16 *)ptr) = cpu_to_le16((unsigned short)dn_db->parms.t3); ptr += 2; *ptr++ = 0; /* MPD: Reserved */ i1 = ptr++; memset(ptr, 0, 7); /* Name: Reserved */ ptr += 7; i2 = ptr++; n = dn_neigh_elist(dev, ptr, n); *i2 = 7 * n; *i1 = 8 + *i2; skb_trim(skb, (27 + *i2)); pktlen = (__le16 *)skb_push(skb, 2); *pktlen = cpu_to_le16(skb->len - 2); skb_reset_network_header(skb); if (dn_am_i_a_router(dn, dn_db, ifa)) { struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC); if (skb2) { dn_rt_finish_output(skb2, dn_rt_all_end_mcast, src); } } dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src); } static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa) { struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); if (dn_db->parms.forwarding == 0) dn_send_endnode_hello(dev, ifa); else dn_send_router_hello(dev, ifa); } static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa) { int tdlen = 16; int size = dev->hard_header_len + 2 + 4 + tdlen; struct sk_buff *skb = dn_alloc_skb(NULL, size, GFP_ATOMIC); int i; unsigned char *ptr; char src[ETH_ALEN]; if (skb == NULL) return ; skb->dev = dev; skb_push(skb, dev->hard_header_len); ptr = skb_put(skb, 2 + 4 + tdlen); *ptr++ = DN_RT_PKT_HELO; *((__le16 *)ptr) = ifa->ifa_local; ptr += 2; *ptr++ = tdlen; for(i = 0; i < tdlen; i++) *ptr++ = 0252; dn_dn2eth(src, ifa->ifa_local); dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src); } static int dn_eth_up(struct net_device *dev) { struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); if (dn_db->parms.forwarding == 0) dev_mc_add(dev, dn_rt_all_end_mcast); else dev_mc_add(dev, dn_rt_all_rt_mcast); dn_db->use_long = 1; return 0; } static void dn_eth_down(struct net_device *dev) { struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); if (dn_db->parms.forwarding == 0) dev_mc_del(dev, dn_rt_all_end_mcast); else dev_mc_del(dev, dn_rt_all_rt_mcast); } static void dn_dev_set_timer(struct net_device *dev); static void dn_dev_timer_func(unsigned long arg) { struct net_device *dev = (struct net_device *)arg; struct dn_dev *dn_db; struct dn_ifaddr *ifa; rcu_read_lock(); dn_db = rcu_dereference(dev->dn_ptr); if (dn_db->t3 <= dn_db->parms.t2) { if (dn_db->parms.timer3) { for (ifa = rcu_dereference(dn_db->ifa_list); ifa; ifa = rcu_dereference(ifa->ifa_next)) { if (!(ifa->ifa_flags & IFA_F_SECONDARY)) dn_db->parms.timer3(dev, ifa); } } dn_db->t3 = dn_db->parms.t3; } else { dn_db->t3 -= dn_db->parms.t2; } rcu_read_unlock(); dn_dev_set_timer(dev); } static void dn_dev_set_timer(struct net_device *dev) { struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); if (dn_db->parms.t2 > dn_db->parms.t3) dn_db->parms.t2 = dn_db->parms.t3; dn_db->timer.data = (unsigned long)dev; dn_db->timer.function = dn_dev_timer_func; dn_db->timer.expires = jiffies + (dn_db->parms.t2 * HZ); add_timer(&dn_db->timer); } static struct dn_dev *dn_dev_create(struct net_device *dev, int *err) { int i; struct dn_dev_parms *p = dn_dev_list; struct dn_dev *dn_db; for(i = 0; i < DN_DEV_LIST_SIZE; i++, p++) { if (p->type == dev->type) break; } *err = -ENODEV; if (i == DN_DEV_LIST_SIZE) return NULL; *err = -ENOBUFS; if ((dn_db = kzalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL) return NULL; memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms)); rcu_assign_pointer(dev->dn_ptr, dn_db); dn_db->dev = dev; init_timer(&dn_db->timer); dn_db->uptime = jiffies; dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table); if (!dn_db->neigh_parms) { RCU_INIT_POINTER(dev->dn_ptr, NULL); kfree(dn_db); return NULL; } if (dn_db->parms.up) { if (dn_db->parms.up(dev) < 0) { neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms); dev->dn_ptr = NULL; kfree(dn_db); return NULL; } } dn_dev_sysctl_register(dev, &dn_db->parms); dn_dev_set_timer(dev); *err = 0; return dn_db; } /* * This processes a device up event. We only start up * the loopback device & ethernet devices with correct * MAC addresses automatically. Others must be started * specifically. * * FIXME: How should we configure the loopback address ? If we could dispense * with using decnet_address here and for autobind, it will be one less thing * for users to worry about setting up. */ void dn_dev_up(struct net_device *dev) { struct dn_ifaddr *ifa; __le16 addr = decnet_address; int maybe_default = 0; struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr); if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK)) return; /* * Need to ensure that loopback device has a dn_db attached to it * to allow creation of neighbours against it, even though it might * not have a local address of its own. Might as well do the same for * all autoconfigured interfaces. */ if (dn_db == NULL) { int err; dn_db = dn_dev_create(dev, &err); if (dn_db == NULL) return; } if (dev->type == ARPHRD_ETHER) { if (memcmp(dev->dev_addr, dn_hiord, 4) != 0) return; addr = dn_eth2dn(dev->dev_addr); maybe_default = 1; } if (addr == 0) return; if ((ifa = dn_dev_alloc_ifa()) == NULL) return; ifa->ifa_local = ifa->ifa_address = addr; ifa->ifa_flags = 0; ifa->ifa_scope = RT_SCOPE_UNIVERSE; strcpy(ifa->ifa_label, dev->name); dn_dev_set_ifa(dev, ifa); /* * Automagically set the default device to the first automatically * configured ethernet card in the system. */ if (maybe_default) { dev_hold(dev); if (dn_dev_set_default(dev, 0)) dev_put(dev); } } static void dn_dev_delete(struct net_device *dev) { struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr); if (dn_db == NULL) return; del_timer_sync(&dn_db->timer); dn_dev_sysctl_unregister(&dn_db->parms); dn_dev_check_default(dev); neigh_ifdown(&dn_neigh_table, dev); if (dn_db->parms.down) dn_db->parms.down(dev); dev->dn_ptr = NULL; neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms); neigh_ifdown(&dn_neigh_table, dev); if (dn_db->router) neigh_release(dn_db->router); if (dn_db->peer) neigh_release(dn_db->peer); kfree(dn_db); } void dn_dev_down(struct net_device *dev) { struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr); struct dn_ifaddr *ifa; if (dn_db == NULL) return; while ((ifa = rtnl_dereference(dn_db->ifa_list)) != NULL) { dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0); dn_dev_free_ifa(ifa); } dn_dev_delete(dev); } void dn_dev_init_pkt(struct sk_buff *skb) { } void dn_dev_veri_pkt(struct sk_buff *skb) { } void dn_dev_hello(struct sk_buff *skb) { } void dn_dev_devices_off(void) { struct net_device *dev; rtnl_lock(); for_each_netdev(&init_net, dev) dn_dev_down(dev); rtnl_unlock(); } void dn_dev_devices_on(void) { struct net_device *dev; rtnl_lock(); for_each_netdev(&init_net, dev) { if (dev->flags & IFF_UP) dn_dev_up(dev); } rtnl_unlock(); } int register_dnaddr_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&dnaddr_chain, nb); } int unregister_dnaddr_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&dnaddr_chain, nb); } #ifdef CONFIG_PROC_FS static inline int is_dn_dev(struct net_device *dev) { return dev->dn_ptr != NULL; } static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { int i; struct net_device *dev; rcu_read_lock(); if (*pos == 0) return SEQ_START_TOKEN; i = 1; for_each_netdev_rcu(&init_net, dev) { if (!is_dn_dev(dev)) continue; if (i++ == *pos) return dev; } return NULL; } static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct net_device *dev; ++*pos; dev = v; if (v == SEQ_START_TOKEN) dev = net_device_entry(&init_net.dev_base_head); for_each_netdev_continue_rcu(&init_net, dev) { if (!is_dn_dev(dev)) continue; return dev; } return NULL; } static void dn_dev_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static char *dn_type2asc(char type) { switch (type) { case DN_DEV_BCAST: return "B"; case DN_DEV_UCAST: return "U"; case DN_DEV_MPOINT: return "M"; } return "?"; } static int dn_dev_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "Name Flags T1 Timer1 T3 Timer3 BlkSize Pri State DevType Router Peer\n"); else { struct net_device *dev = v; char peer_buf[DN_ASCBUF_LEN]; char router_buf[DN_ASCBUF_LEN]; struct dn_dev *dn_db = rcu_dereference(dev->dn_ptr); seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu" " %04hu %03d %02x %-10s %-7s %-7s\n", dev->name ? dev->name : "???", dn_type2asc(dn_db->parms.mode), 0, 0, dn_db->t3, dn_db->parms.t3, mtu2blksize(dev), dn_db->parms.priority, dn_db->parms.state, dn_db->parms.name, dn_db->router ? dn_addr2asc(le16_to_cpu(*(__le16 *)dn_db->router->primary_key), router_buf) : "", dn_db->peer ? dn_addr2asc(le16_to_cpu(*(__le16 *)dn_db->peer->primary_key), peer_buf) : ""); } return 0; } static const struct seq_operations dn_dev_seq_ops = { .start = dn_dev_seq_start, .next = dn_dev_seq_next, .stop = dn_dev_seq_stop, .show = dn_dev_seq_show, }; static int dn_dev_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &dn_dev_seq_ops); } static const struct file_operations dn_dev_seq_fops = { .owner = THIS_MODULE, .open = dn_dev_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* CONFIG_PROC_FS */ static int addr[2]; module_param_array(addr, int, NULL, 0444); MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node"); void __init dn_dev_init(void) { if (addr[0] > 63 || addr[0] < 0) { printk(KERN_ERR "DECnet: Area must be between 0 and 63"); return; } if (addr[1] > 1023 || addr[1] < 0) { printk(KERN_ERR "DECnet: Node must be between 0 and 1023"); return; } decnet_address = cpu_to_le16((addr[0] << 10) | addr[1]); dn_dev_devices_on(); rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL, NULL); rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL, NULL); rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr, NULL); proc_net_fops_create(&init_net, "decnet_dev", S_IRUGO, &dn_dev_seq_fops); #ifdef CONFIG_SYSCTL { int i; for(i = 0; i < DN_DEV_LIST_SIZE; i++) dn_dev_sysctl_register(NULL, &dn_dev_list[i]); } #endif /* CONFIG_SYSCTL */ } void __exit dn_dev_cleanup(void) { #ifdef CONFIG_SYSCTL { int i; for(i = 0; i < DN_DEV_LIST_SIZE; i++) dn_dev_sysctl_unregister(&dn_dev_list[i]); } #endif /* CONFIG_SYSCTL */ proc_net_remove(&init_net, "decnet_dev"); dn_dev_devices_off(); }
gpl-2.0
paul-chambers/netgear-r7800
git_home/linux.git/sourcecode/drivers/scsi/bfa/bfad_attr.c
4797
28933
/* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* * bfa_attr.c Linux driver configuration interface module. */ #include "bfad_drv.h" #include "bfad_im.h" /* * FC transport template entry, get SCSI target port ID. */ static void bfad_im_get_starget_port_id(struct scsi_target *starget) { struct Scsi_Host *shost; struct bfad_im_port_s *im_port; struct bfad_s *bfad; struct bfad_itnim_s *itnim = NULL; u32 fc_id = -1; unsigned long flags; shost = dev_to_shost(starget->dev.parent); im_port = (struct bfad_im_port_s *) shost->hostdata[0]; bfad = im_port->bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); itnim = bfad_get_itnim(im_port, starget->id); if (itnim) fc_id = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); fc_starget_port_id(starget) = fc_id; spin_unlock_irqrestore(&bfad->bfad_lock, flags); } /* * FC transport template entry, get SCSI target nwwn. */ static void bfad_im_get_starget_node_name(struct scsi_target *starget) { struct Scsi_Host *shost; struct bfad_im_port_s *im_port; struct bfad_s *bfad; struct bfad_itnim_s *itnim = NULL; u64 node_name = 0; unsigned long flags; shost = dev_to_shost(starget->dev.parent); im_port = (struct bfad_im_port_s *) shost->hostdata[0]; bfad = im_port->bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); itnim = bfad_get_itnim(im_port, starget->id); if (itnim) node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim); fc_starget_node_name(starget) = cpu_to_be64(node_name); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } /* * FC transport template entry, get SCSI target pwwn. */ static void bfad_im_get_starget_port_name(struct scsi_target *starget) { struct Scsi_Host *shost; struct bfad_im_port_s *im_port; struct bfad_s *bfad; struct bfad_itnim_s *itnim = NULL; u64 port_name = 0; unsigned long flags; shost = dev_to_shost(starget->dev.parent); im_port = (struct bfad_im_port_s *) shost->hostdata[0]; bfad = im_port->bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); itnim = bfad_get_itnim(im_port, starget->id); if (itnim) port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); fc_starget_port_name(starget) = cpu_to_be64(port_name); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } /* * FC transport template entry, get SCSI host port ID. */ static void bfad_im_get_host_port_id(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; fc_host_port_id(shost) = bfa_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port)); } /* * FC transport template entry, get SCSI host port type. */ static void bfad_im_get_host_port_type(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_lport_attr_s port_attr; bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); switch (port_attr.port_type) { case BFA_PORT_TYPE_NPORT: fc_host_port_type(shost) = FC_PORTTYPE_NPORT; break; case BFA_PORT_TYPE_NLPORT: fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; break; case BFA_PORT_TYPE_P2P: fc_host_port_type(shost) = FC_PORTTYPE_PTP; break; case BFA_PORT_TYPE_LPORT: fc_host_port_type(shost) = FC_PORTTYPE_LPORT; break; default: fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; break; } } /* * FC transport template entry, get SCSI host port state. */ static void bfad_im_get_host_port_state(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_port_attr_s attr; bfa_fcport_get_attr(&bfad->bfa, &attr); switch (attr.port_state) { case BFA_PORT_ST_LINKDOWN: fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; break; case BFA_PORT_ST_LINKUP: fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; break; case BFA_PORT_ST_DISABLED: case BFA_PORT_ST_STOPPED: case BFA_PORT_ST_IOCDOWN: case BFA_PORT_ST_IOCDIS: fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; break; case BFA_PORT_ST_UNINIT: case BFA_PORT_ST_ENABLING_QWAIT: case BFA_PORT_ST_ENABLING: case BFA_PORT_ST_DISABLING_QWAIT: case BFA_PORT_ST_DISABLING: default: fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; break; } } /* * FC transport template entry, get SCSI host active fc4s. */ static void bfad_im_get_host_active_fc4s(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; memset(fc_host_active_fc4s(shost), 0, sizeof(fc_host_active_fc4s(shost))); if (port->supported_fc4s & BFA_LPORT_ROLE_FCP_IM) fc_host_active_fc4s(shost)[2] = 1; fc_host_active_fc4s(shost)[7] = 1; } /* * FC transport template entry, get SCSI host link speed. */ static void bfad_im_get_host_speed(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_port_attr_s attr; bfa_fcport_get_attr(&bfad->bfa, &attr); switch (attr.speed) { case BFA_PORT_SPEED_10GBPS: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; case BFA_PORT_SPEED_16GBPS: fc_host_speed(shost) = FC_PORTSPEED_16GBIT; break; case BFA_PORT_SPEED_8GBPS: fc_host_speed(shost) = FC_PORTSPEED_8GBIT; break; case BFA_PORT_SPEED_4GBPS: fc_host_speed(shost) = FC_PORTSPEED_4GBIT; break; case BFA_PORT_SPEED_2GBPS: fc_host_speed(shost) = FC_PORTSPEED_2GBIT; break; case BFA_PORT_SPEED_1GBPS: fc_host_speed(shost) = FC_PORTSPEED_1GBIT; break; default: fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } } /* * FC transport template entry, get SCSI host port type. */ static void bfad_im_get_host_fabric_name(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; wwn_t fabric_nwwn = 0; fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port); fc_host_fabric_name(shost) = cpu_to_be64(fabric_nwwn); } /* * FC transport template entry, get BFAD statistics. */ static struct fc_host_statistics * bfad_im_get_stats(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_hal_comp fcomp; union bfa_port_stats_u *fcstats; struct fc_host_statistics *hstats; bfa_status_t rc; unsigned long flags; fcstats = kzalloc(sizeof(union bfa_port_stats_u), GFP_KERNEL); if (fcstats == NULL) return NULL; hstats = &bfad->link_stats; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); memset(hstats, 0, sizeof(struct fc_host_statistics)); rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa), fcstats, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) return NULL; wait_for_completion(&fcomp.comp); /* Fill the fc_host_statistics structure */ hstats->seconds_since_last_reset = fcstats->fc.secs_reset; hstats->tx_frames = fcstats->fc.tx_frames; hstats->tx_words = fcstats->fc.tx_words; hstats->rx_frames = fcstats->fc.rx_frames; hstats->rx_words = fcstats->fc.rx_words; hstats->lip_count = fcstats->fc.lip_count; hstats->nos_count = fcstats->fc.nos_count; hstats->error_frames = fcstats->fc.error_frames; hstats->dumped_frames = fcstats->fc.dropped_frames; hstats->link_failure_count = fcstats->fc.link_failures; hstats->loss_of_sync_count = fcstats->fc.loss_of_syncs; hstats->loss_of_signal_count = fcstats->fc.loss_of_signals; hstats->prim_seq_protocol_err_count = fcstats->fc.primseq_errs; hstats->invalid_crc_count = fcstats->fc.invalid_crcs; kfree(fcstats); return hstats; } /* * FC transport template entry, reset BFAD statistics. */ static void bfad_im_reset_stats(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_hal_comp fcomp; unsigned long flags; bfa_status_t rc; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) return; wait_for_completion(&fcomp.comp); return; } /* * FC transport template entry, get rport loss timeout. */ static void bfad_im_get_rport_loss_tmo(struct fc_rport *rport) { struct bfad_itnim_data_s *itnim_data = rport->dd_data; struct bfad_itnim_s *itnim = itnim_data->itnim; struct bfad_s *bfad = itnim->im->bfad; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); rport->dev_loss_tmo = bfa_fcpim_path_tov_get(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } /* * FC transport template entry, set rport loss timeout. */ static void bfad_im_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) { struct bfad_itnim_data_s *itnim_data = rport->dd_data; struct bfad_itnim_s *itnim = itnim_data->itnim; struct bfad_s *bfad = itnim->im->bfad; unsigned long flags; if (timeout > 0) { spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcpim_path_tov_set(&bfad->bfa, timeout); rport->dev_loss_tmo = bfa_fcpim_path_tov_get(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } } static int bfad_im_vport_create(struct fc_vport *fc_vport, bool disable) { char *vname = fc_vport->symbolic_name; struct Scsi_Host *shost = fc_vport->shost; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_lport_cfg_s port_cfg; struct bfad_vport_s *vp; int status = 0, rc; unsigned long flags; memset(&port_cfg, 0, sizeof(port_cfg)); u64_to_wwn(fc_vport->node_name, (u8 *)&port_cfg.nwwn); u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn); if (strlen(vname) > 0) strcpy((char *)&port_cfg.sym_name, vname); port_cfg.roles = BFA_LPORT_ROLE_FCP_IM; spin_lock_irqsave(&bfad->bfad_lock, flags); list_for_each_entry(vp, &bfad->pbc_vport_list, list_entry) { if (port_cfg.pwwn == vp->fcs_vport.lport.port_cfg.pwwn) { port_cfg.preboot_vp = vp->fcs_vport.lport.port_cfg.preboot_vp; break; } } spin_unlock_irqrestore(&bfad->bfad_lock, flags); rc = bfad_vport_create(bfad, 0, &port_cfg, &fc_vport->dev); if (rc == BFA_STATUS_OK) { struct bfad_vport_s *vport; struct bfa_fcs_vport_s *fcs_vport; struct Scsi_Host *vshost; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, port_cfg.pwwn); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (fcs_vport == NULL) return VPCERR_BAD_WWN; fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); if (disable) { spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_vport_stop(fcs_vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); } vport = fcs_vport->vport_drv; vshost = vport->drv_port.im_port->shost; fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn); fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn); fc_vport->dd_data = vport; vport->drv_port.im_port->fc_vport = fc_vport; } else if (rc == BFA_STATUS_INVALID_WWN) return VPCERR_BAD_WWN; else if (rc == BFA_STATUS_VPORT_EXISTS) return VPCERR_BAD_WWN; else if (rc == BFA_STATUS_VPORT_MAX) return VPCERR_NO_FABRIC_SUPP; else if (rc == BFA_STATUS_VPORT_WWN_BP) return VPCERR_BAD_WWN; else return FC_VPORT_FAILED; return status; } int bfad_im_issue_fc_host_lip(struct Scsi_Host *shost) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_hal_comp fcomp; unsigned long flags; uint32_t status; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); status = bfa_port_disable(&bfad->bfa.modules.port, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (status != BFA_STATUS_OK) return -EIO; wait_for_completion(&fcomp.comp); if (fcomp.status != BFA_STATUS_OK) return -EIO; spin_lock_irqsave(&bfad->bfad_lock, flags); status = bfa_port_enable(&bfad->bfa.modules.port, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (status != BFA_STATUS_OK) return -EIO; wait_for_completion(&fcomp.comp); if (fcomp.status != BFA_STATUS_OK) return -EIO; return 0; } static int bfad_im_vport_delete(struct fc_vport *fc_vport) { struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) vport->drv_port.im_port; struct bfad_s *bfad = im_port->bfad; struct bfad_port_s *port; struct bfa_fcs_vport_s *fcs_vport; struct Scsi_Host *vshost; wwn_t pwwn; int rc; unsigned long flags; struct completion fcomp; if (im_port->flags & BFAD_PORT_DELETE) { bfad_scsi_host_free(bfad, im_port); list_del(&vport->list_entry); return 0; } port = im_port->port; vshost = vport->drv_port.im_port->shost; u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn); spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (fcs_vport == NULL) return VPCERR_BAD_WWN; vport->drv_port.flags |= BFAD_PORT_DELETE; vport->comp_del = &fcomp; init_completion(vport->comp_del); spin_lock_irqsave(&bfad->bfad_lock, flags); rc = bfa_fcs_vport_delete(&vport->fcs_vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc == BFA_STATUS_PBC) { vport->drv_port.flags &= ~BFAD_PORT_DELETE; vport->comp_del = NULL; return -1; } wait_for_completion(vport->comp_del); bfad_scsi_host_free(bfad, im_port); list_del(&vport->list_entry); kfree(vport); return 0; } static int bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable) { struct bfad_vport_s *vport; struct bfad_s *bfad; struct bfa_fcs_vport_s *fcs_vport; struct Scsi_Host *vshost; wwn_t pwwn; unsigned long flags; vport = (struct bfad_vport_s *)fc_vport->dd_data; bfad = vport->drv_port.bfad; vshost = vport->drv_port.im_port->shost; u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn); spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (fcs_vport == NULL) return VPCERR_BAD_WWN; if (disable) { bfa_fcs_vport_stop(fcs_vport); fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); } else { bfa_fcs_vport_start(fcs_vport); fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); } return 0; } struct fc_function_template bfad_im_fc_function_template = { /* Target dynamic attributes */ .get_starget_port_id = bfad_im_get_starget_port_id, .show_starget_port_id = 1, .get_starget_node_name = bfad_im_get_starget_node_name, .show_starget_node_name = 1, .get_starget_port_name = bfad_im_get_starget_port_name, .show_starget_port_name = 1, /* Host dynamic attribute */ .get_host_port_id = bfad_im_get_host_port_id, .show_host_port_id = 1, /* Host fixed attributes */ .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, /* More host dynamic attributes */ .show_host_port_type = 1, .get_host_port_type = bfad_im_get_host_port_type, .show_host_port_state = 1, .get_host_port_state = bfad_im_get_host_port_state, .show_host_active_fc4s = 1, .get_host_active_fc4s = bfad_im_get_host_active_fc4s, .show_host_speed = 1, .get_host_speed = bfad_im_get_host_speed, .show_host_fabric_name = 1, .get_host_fabric_name = bfad_im_get_host_fabric_name, .show_host_symbolic_name = 1, /* Statistics */ .get_fc_host_stats = bfad_im_get_stats, .reset_fc_host_stats = bfad_im_reset_stats, /* Allocation length for host specific data */ .dd_fcrport_size = sizeof(struct bfad_itnim_data_s *), /* Remote port fixed attributes */ .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .show_rport_dev_loss_tmo = 1, .get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo, .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, .issue_fc_host_lip = bfad_im_issue_fc_host_lip, .vport_create = bfad_im_vport_create, .vport_delete = bfad_im_vport_delete, .vport_disable = bfad_im_vport_disable, .bsg_request = bfad_im_bsg_request, .bsg_timeout = bfad_im_bsg_timeout, }; struct fc_function_template bfad_im_vport_fc_function_template = { /* Target dynamic attributes */ .get_starget_port_id = bfad_im_get_starget_port_id, .show_starget_port_id = 1, .get_starget_node_name = bfad_im_get_starget_node_name, .show_starget_node_name = 1, .get_starget_port_name = bfad_im_get_starget_port_name, .show_starget_port_name = 1, /* Host dynamic attribute */ .get_host_port_id = bfad_im_get_host_port_id, .show_host_port_id = 1, /* Host fixed attributes */ .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, /* More host dynamic attributes */ .show_host_port_type = 1, .get_host_port_type = bfad_im_get_host_port_type, .show_host_port_state = 1, .get_host_port_state = bfad_im_get_host_port_state, .show_host_active_fc4s = 1, .get_host_active_fc4s = bfad_im_get_host_active_fc4s, .show_host_speed = 1, .get_host_speed = bfad_im_get_host_speed, .show_host_fabric_name = 1, .get_host_fabric_name = bfad_im_get_host_fabric_name, .show_host_symbolic_name = 1, /* Statistics */ .get_fc_host_stats = bfad_im_get_stats, .reset_fc_host_stats = bfad_im_reset_stats, /* Allocation length for host specific data */ .dd_fcrport_size = sizeof(struct bfad_itnim_data_s *), /* Remote port fixed attributes */ .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .show_rport_dev_loss_tmo = 1, .get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo, .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, }; /* * Scsi_Host_attrs SCSI host attributes */ static ssize_t bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; bfa_get_adapter_serial_num(&bfad->bfa, serial_num); return snprintf(buf, PAGE_SIZE, "%s\n", serial_num); } static ssize_t bfad_im_model_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char model[BFA_ADAPTER_MODEL_NAME_LEN]; bfa_get_adapter_model(&bfad->bfa, model); return snprintf(buf, PAGE_SIZE, "%s\n", model); } static ssize_t bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char model[BFA_ADAPTER_MODEL_NAME_LEN]; char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; int nports = 0; bfa_get_adapter_model(&bfad->bfa, model); nports = bfa_get_nports(&bfad->bfa); if (!strcmp(model, "Brocade-425")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 4Gbps PCIe dual port FC HBA"); else if (!strcmp(model, "Brocade-825")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 8Gbps PCIe dual port FC HBA"); else if (!strcmp(model, "Brocade-42B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 4Gbps PCIe dual port FC HBA for HP"); else if (!strcmp(model, "Brocade-82B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 8Gbps PCIe dual port FC HBA for HP"); else if (!strcmp(model, "Brocade-1010")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 10Gbps single port CNA"); else if (!strcmp(model, "Brocade-1020")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 10Gbps dual port CNA"); else if (!strcmp(model, "Brocade-1007")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 10Gbps CNA for IBM Blade Center"); else if (!strcmp(model, "Brocade-415")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 4Gbps PCIe single port FC HBA"); else if (!strcmp(model, "Brocade-815")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 8Gbps PCIe single port FC HBA"); else if (!strcmp(model, "Brocade-41B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 4Gbps PCIe single port FC HBA for HP"); else if (!strcmp(model, "Brocade-81B")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 8Gbps PCIe single port FC HBA for HP"); else if (!strcmp(model, "Brocade-804")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 8Gbps FC HBA for HP Bladesystem C-class"); else if (!strcmp(model, "Brocade-902") || !strcmp(model, "Brocade-1741")) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 10Gbps CNA for Dell M-Series Blade Servers"); else if (strstr(model, "Brocade-1560")) { if (nports == 1) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 16Gbps PCIe single port FC HBA"); else snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 16Gbps PCIe dual port FC HBA"); } else if (strstr(model, "Brocade-1710")) { if (nports == 1) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 10Gbps single port CNA"); else snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 10Gbps dual port CNA"); } else if (strstr(model, "Brocade-1860")) { if (nports == 1 && bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 10Gbps single port CNA"); else if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 16Gbps PCIe single port FC HBA"); else if (nports == 2 && bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 10Gbps dual port CNA"); else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Brocade 16Gbps PCIe dual port FC HBA"); } else snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Invalid Model"); return snprintf(buf, PAGE_SIZE, "%s\n", model_descr); } static ssize_t bfad_im_node_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; u64 nwwn; nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port); return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn)); } static ssize_t bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_lport_attr_s port_attr; char symname[BFA_SYMNAME_MAXLEN]; bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); strncpy(symname, port_attr.port_cfg.sym_name.symname, BFA_SYMNAME_MAXLEN); return snprintf(buf, PAGE_SIZE, "%s\n", symname); } static ssize_t bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char hw_ver[BFA_VERSION_LEN]; bfa_get_pci_chip_rev(&bfad->bfa, hw_ver); return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver); } static ssize_t bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_VERSION); } static ssize_t bfad_im_optionrom_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char optrom_ver[BFA_VERSION_LEN]; bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver); return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver); } static ssize_t bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; char fw_ver[BFA_VERSION_LEN]; bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver); } static ssize_t bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; return snprintf(buf, PAGE_SIZE, "%d\n", bfa_get_nports(&bfad->bfa)); } static ssize_t bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_NAME); } static ssize_t bfad_im_num_of_discovered_ports_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_port_s *port = im_port->port; struct bfad_s *bfad = im_port->bfad; int nrports = 2048; wwn_t *rports = NULL; unsigned long flags; rports = kzalloc(sizeof(wwn_t) * nrports , GFP_ATOMIC); if (rports == NULL) return snprintf(buf, PAGE_SIZE, "Failed\n"); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_lport_get_rports(port->fcs_port, rports, &nrports); spin_unlock_irqrestore(&bfad->bfad_lock, flags); kfree(rports); return snprintf(buf, PAGE_SIZE, "%d\n", nrports); } static DEVICE_ATTR(serial_number, S_IRUGO, bfad_im_serial_num_show, NULL); static DEVICE_ATTR(model, S_IRUGO, bfad_im_model_show, NULL); static DEVICE_ATTR(model_description, S_IRUGO, bfad_im_model_desc_show, NULL); static DEVICE_ATTR(node_name, S_IRUGO, bfad_im_node_name_show, NULL); static DEVICE_ATTR(symbolic_name, S_IRUGO, bfad_im_symbolic_name_show, NULL); static DEVICE_ATTR(hardware_version, S_IRUGO, bfad_im_hw_version_show, NULL); static DEVICE_ATTR(driver_version, S_IRUGO, bfad_im_drv_version_show, NULL); static DEVICE_ATTR(option_rom_version, S_IRUGO, bfad_im_optionrom_version_show, NULL); static DEVICE_ATTR(firmware_version, S_IRUGO, bfad_im_fw_version_show, NULL); static DEVICE_ATTR(number_of_ports, S_IRUGO, bfad_im_num_of_ports_show, NULL); static DEVICE_ATTR(driver_name, S_IRUGO, bfad_im_drv_name_show, NULL); static DEVICE_ATTR(number_of_discovered_ports, S_IRUGO, bfad_im_num_of_discovered_ports_show, NULL); struct device_attribute *bfad_im_host_attrs[] = { &dev_attr_serial_number, &dev_attr_model, &dev_attr_model_description, &dev_attr_node_name, &dev_attr_symbolic_name, &dev_attr_hardware_version, &dev_attr_driver_version, &dev_attr_option_rom_version, &dev_attr_firmware_version, &dev_attr_number_of_ports, &dev_attr_driver_name, &dev_attr_number_of_discovered_ports, NULL, }; struct device_attribute *bfad_im_vport_attrs[] = { &dev_attr_serial_number, &dev_attr_model, &dev_attr_model_description, &dev_attr_node_name, &dev_attr_symbolic_name, &dev_attr_hardware_version, &dev_attr_driver_version, &dev_attr_option_rom_version, &dev_attr_firmware_version, &dev_attr_number_of_ports, &dev_attr_driver_name, &dev_attr_number_of_discovered_ports, NULL, };
gpl-2.0
davidmueller13/bricked-hammerhead
arch/arm/mach-sa1100/leds-hackkit.c
4797
2268
/* * linux/arch/arm/mach-sa1100/leds-hackkit.c * * based on leds-lart.c * * (C) Erik Mouw (J.A.K.Mouw@its.tudelft.nl), April 21, 2000 * (C) Stefan Eletzhofer <stefan.eletzhofer@eletztrick.de>, 2002 * * The HackKit has two leds (GPIO 22/23). The red led (gpio 22) is used * as cpu led, the green one is used as timer led. */ #include <linux/init.h> #include <mach/hardware.h> #include <asm/leds.h> #include "leds.h" #define LED_STATE_ENABLED 1 #define LED_STATE_CLAIMED 2 static unsigned int led_state; static unsigned int hw_led_state; #define LED_GREEN GPIO_GPIO23 #define LED_RED GPIO_GPIO22 #define LED_MASK (LED_RED | LED_GREEN) void hackkit_leds_event(led_event_t evt) { unsigned long flags; local_irq_save(flags); switch(evt) { case led_start: /* pin 22/23 are outputs */ GPDR |= LED_MASK; hw_led_state = LED_MASK; led_state = LED_STATE_ENABLED; break; case led_stop: led_state &= ~LED_STATE_ENABLED; break; case led_claim: led_state |= LED_STATE_CLAIMED; hw_led_state = LED_MASK; break; case led_release: led_state &= ~LED_STATE_CLAIMED; hw_led_state = LED_MASK; break; #ifdef CONFIG_LEDS_TIMER case led_timer: if (!(led_state & LED_STATE_CLAIMED)) hw_led_state ^= LED_GREEN; break; #endif #ifdef CONFIG_LEDS_CPU case led_idle_start: /* The LART people like the LED to be off when the system is idle... */ if (!(led_state & LED_STATE_CLAIMED)) hw_led_state &= ~LED_RED; break; case led_idle_end: /* ... and on if the system is not idle */ if (!(led_state & LED_STATE_CLAIMED)) hw_led_state |= LED_RED; break; #endif case led_red_on: if (led_state & LED_STATE_CLAIMED) hw_led_state &= ~LED_RED; break; case led_red_off: if (led_state & LED_STATE_CLAIMED) hw_led_state |= LED_RED; break; case led_green_on: if (led_state & LED_STATE_CLAIMED) hw_led_state &= ~LED_GREEN; break; case led_green_off: if (led_state & LED_STATE_CLAIMED) hw_led_state |= LED_GREEN; break; default: break; } /* Now set the GPIO state, or nothing will happen at all */ if (led_state & LED_STATE_ENABLED) { GPSR = hw_led_state; GPCR = hw_led_state ^ LED_MASK; } local_irq_restore(flags); }
gpl-2.0
remicks/android_kernel_lge_hammerhead
arch/powerpc/platforms/85xx/socrates_fpga_pic.c
6845
8527
/* * Copyright (C) 2008 Ilya Yanok, Emcraft Systems * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/irq.h> #include <linux/of_platform.h> #include <linux/io.h> /* * The FPGA supports 9 interrupt sources, which can be routed to 3 * interrupt request lines of the MPIC. The line to be used can be * specified through the third cell of FDT property "interrupts". */ #define SOCRATES_FPGA_NUM_IRQS 9 #define FPGA_PIC_IRQCFG (0x0) #define FPGA_PIC_IRQMASK(n) (0x4 + 0x4 * (n)) #define SOCRATES_FPGA_IRQ_MASK ((1 << SOCRATES_FPGA_NUM_IRQS) - 1) struct socrates_fpga_irq_info { unsigned int irq_line; int type; }; /* * Interrupt routing and type table * * IRQ_TYPE_NONE means the interrupt type is configurable, * otherwise it's fixed to the specified value. */ static struct socrates_fpga_irq_info fpga_irqs[SOCRATES_FPGA_NUM_IRQS] = { [0] = {0, IRQ_TYPE_NONE}, [1] = {0, IRQ_TYPE_LEVEL_HIGH}, [2] = {0, IRQ_TYPE_LEVEL_LOW}, [3] = {0, IRQ_TYPE_NONE}, [4] = {0, IRQ_TYPE_NONE}, [5] = {0, IRQ_TYPE_NONE}, [6] = {0, IRQ_TYPE_NONE}, [7] = {0, IRQ_TYPE_NONE}, [8] = {0, IRQ_TYPE_LEVEL_HIGH}, }; static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock); static void __iomem *socrates_fpga_pic_iobase; static struct irq_domain *socrates_fpga_pic_irq_host; static unsigned int socrates_fpga_irqs[3]; static inline uint32_t socrates_fpga_pic_read(int reg) { return in_be32(socrates_fpga_pic_iobase + reg); } static inline void socrates_fpga_pic_write(int reg, uint32_t val) { out_be32(socrates_fpga_pic_iobase + reg, val); } static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq) { uint32_t cause; unsigned long flags; int i; /* Check irq line routed to the MPIC */ for (i = 0; i < 3; i++) { if (irq == socrates_fpga_irqs[i]) break; } if (i == 3) return NO_IRQ; raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); cause = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(i)); raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); for (i = SOCRATES_FPGA_NUM_IRQS - 1; i >= 0; i--) { if (cause >> (i + 16)) break; } return irq_linear_revmap(socrates_fpga_pic_irq_host, (irq_hw_number_t)i); } void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq; /* * See if we actually have an interrupt, call generic handling code if * we do. */ cascade_irq = socrates_fpga_pic_get_irq(irq); if (cascade_irq != NO_IRQ) generic_handle_irq(cascade_irq); chip->irq_eoi(&desc->irq_data); } static void socrates_fpga_pic_ack(struct irq_data *d) { unsigned long flags; unsigned int irq_line, hwirq = irqd_to_hwirq(d); uint32_t mask; irq_line = fpga_irqs[hwirq].irq_line; raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) & SOCRATES_FPGA_IRQ_MASK; mask |= (1 << (hwirq + 16)); socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); } static void socrates_fpga_pic_mask(struct irq_data *d) { unsigned long flags; unsigned int hwirq = irqd_to_hwirq(d); int irq_line; u32 mask; irq_line = fpga_irqs[hwirq].irq_line; raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) & SOCRATES_FPGA_IRQ_MASK; mask &= ~(1 << hwirq); socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); } static void socrates_fpga_pic_mask_ack(struct irq_data *d) { unsigned long flags; unsigned int hwirq = irqd_to_hwirq(d); int irq_line; u32 mask; irq_line = fpga_irqs[hwirq].irq_line; raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) & SOCRATES_FPGA_IRQ_MASK; mask &= ~(1 << hwirq); mask |= (1 << (hwirq + 16)); socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); } static void socrates_fpga_pic_unmask(struct irq_data *d) { unsigned long flags; unsigned int hwirq = irqd_to_hwirq(d); int irq_line; u32 mask; irq_line = fpga_irqs[hwirq].irq_line; raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) & SOCRATES_FPGA_IRQ_MASK; mask |= (1 << hwirq); socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); } static void socrates_fpga_pic_eoi(struct irq_data *d) { unsigned long flags; unsigned int hwirq = irqd_to_hwirq(d); int irq_line; u32 mask; irq_line = fpga_irqs[hwirq].irq_line; raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) & SOCRATES_FPGA_IRQ_MASK; mask |= (1 << (hwirq + 16)); socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); } static int socrates_fpga_pic_set_type(struct irq_data *d, unsigned int flow_type) { unsigned long flags; unsigned int hwirq = irqd_to_hwirq(d); int polarity; u32 mask; if (fpga_irqs[hwirq].type != IRQ_TYPE_NONE) return -EINVAL; switch (flow_type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_LEVEL_HIGH: polarity = 1; break; case IRQ_TYPE_LEVEL_LOW: polarity = 0; break; default: return -EINVAL; } raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); mask = socrates_fpga_pic_read(FPGA_PIC_IRQCFG); if (polarity) mask |= (1 << hwirq); else mask &= ~(1 << hwirq); socrates_fpga_pic_write(FPGA_PIC_IRQCFG, mask); raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); return 0; } static struct irq_chip socrates_fpga_pic_chip = { .name = "FPGA-PIC", .irq_ack = socrates_fpga_pic_ack, .irq_mask = socrates_fpga_pic_mask, .irq_mask_ack = socrates_fpga_pic_mask_ack, .irq_unmask = socrates_fpga_pic_unmask, .irq_eoi = socrates_fpga_pic_eoi, .irq_set_type = socrates_fpga_pic_set_type, }; static int socrates_fpga_pic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hwirq) { /* All interrupts are LEVEL sensitive */ irq_set_status_flags(virq, IRQ_LEVEL); irq_set_chip_and_handler(virq, &socrates_fpga_pic_chip, handle_fasteoi_irq); return 0; } static int socrates_fpga_pic_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { struct socrates_fpga_irq_info *fpga_irq = &fpga_irqs[intspec[0]]; *out_hwirq = intspec[0]; if (fpga_irq->type == IRQ_TYPE_NONE) { /* type is configurable */ if (intspec[1] != IRQ_TYPE_LEVEL_LOW && intspec[1] != IRQ_TYPE_LEVEL_HIGH) { pr_warning("FPGA PIC: invalid irq type, " "setting default active low\n"); *out_flags = IRQ_TYPE_LEVEL_LOW; } else { *out_flags = intspec[1]; } } else { /* type is fixed */ *out_flags = fpga_irq->type; } /* Use specified interrupt routing */ if (intspec[2] <= 2) fpga_irq->irq_line = intspec[2]; else pr_warning("FPGA PIC: invalid irq routing\n"); return 0; } static const struct irq_domain_ops socrates_fpga_pic_host_ops = { .map = socrates_fpga_pic_host_map, .xlate = socrates_fpga_pic_host_xlate, }; void socrates_fpga_pic_init(struct device_node *pic) { unsigned long flags; int i; /* Setup an irq_domain structure */ socrates_fpga_pic_irq_host = irq_domain_add_linear(pic, SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, NULL); if (socrates_fpga_pic_irq_host == NULL) { pr_err("FPGA PIC: Unable to allocate host\n"); return; } for (i = 0; i < 3; i++) { socrates_fpga_irqs[i] = irq_of_parse_and_map(pic, i); if (socrates_fpga_irqs[i] == NO_IRQ) { pr_warning("FPGA PIC: can't get irq%d.\n", i); continue; } irq_set_chained_handler(socrates_fpga_irqs[i], socrates_fpga_pic_cascade); } socrates_fpga_pic_iobase = of_iomap(pic, 0); raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); socrates_fpga_pic_write(FPGA_PIC_IRQMASK(0), SOCRATES_FPGA_IRQ_MASK << 16); socrates_fpga_pic_write(FPGA_PIC_IRQMASK(1), SOCRATES_FPGA_IRQ_MASK << 16); socrates_fpga_pic_write(FPGA_PIC_IRQMASK(2), SOCRATES_FPGA_IRQ_MASK << 16); raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); pr_info("FPGA PIC: Setting up Socrates FPGA PIC\n"); }
gpl-2.0
system1357/pdk7105-3.4
sound/core/seq/seq_ports.c
7613
18925
/* * ALSA sequencer Ports * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl> * Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <sound/core.h> #include <linux/slab.h> #include <linux/module.h> #include "seq_system.h" #include "seq_ports.h" #include "seq_clientmgr.h" /* registration of client ports */ /* NOTE: the current implementation of the port structure as a linked list is not optimal for clients that have many ports. For sending messages to all subscribers of a port we first need to find the address of the port structure, which means we have to traverse the list. A direct access table (array) would be better, but big preallocated arrays waste memory. Possible actions: 1) leave it this way, a client does normaly does not have more than a few ports 2) replace the linked list of ports by a array of pointers which is dynamicly kmalloced. When a port is added or deleted we can simply allocate a new array, copy the corresponding pointers, and delete the old one. We then only need a pointer to this array, and an integer that tells us how much elements are in array. */ /* return pointer to port structure - port is locked if found */ struct snd_seq_client_port *snd_seq_port_use_ptr(struct snd_seq_client *client, int num) { struct snd_seq_client_port *port; if (client == NULL) return NULL; read_lock(&client->ports_lock); list_for_each_entry(port, &client->ports_list_head, list) { if (port->addr.port == num) { if (port->closing) break; /* deleting now */ snd_use_lock_use(&port->use_lock); read_unlock(&client->ports_lock); return port; } } read_unlock(&client->ports_lock); return NULL; /* not found */ } /* search for the next port - port is locked if found */ struct snd_seq_client_port *snd_seq_port_query_nearest(struct snd_seq_client *client, struct snd_seq_port_info *pinfo) { int num; struct snd_seq_client_port *port, *found; num = pinfo->addr.port; found = NULL; read_lock(&client->ports_lock); list_for_each_entry(port, &client->ports_list_head, list) { if (port->addr.port < num) continue; if (port->addr.port == num) { found = port; break; } if (found == NULL || port->addr.port < found->addr.port) found = port; } if (found) { if (found->closing) found = NULL; else snd_use_lock_use(&found->use_lock); } read_unlock(&client->ports_lock); return found; } /* initialize snd_seq_port_subs_info */ static void port_subs_info_init(struct snd_seq_port_subs_info *grp) { INIT_LIST_HEAD(&grp->list_head); grp->count = 0; grp->exclusive = 0; rwlock_init(&grp->list_lock); init_rwsem(&grp->list_mutex); grp->open = NULL; grp->close = NULL; } /* create a port, port number is returned (-1 on failure) */ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client, int port) { unsigned long flags; struct snd_seq_client_port *new_port, *p; int num = -1; /* sanity check */ if (snd_BUG_ON(!client)) return NULL; if (client->num_ports >= SNDRV_SEQ_MAX_PORTS - 1) { snd_printk(KERN_WARNING "too many ports for client %d\n", client->number); return NULL; } /* create a new port */ new_port = kzalloc(sizeof(*new_port), GFP_KERNEL); if (! new_port) { snd_printd("malloc failed for registering client port\n"); return NULL; /* failure, out of memory */ } /* init port data */ new_port->addr.client = client->number; new_port->addr.port = -1; new_port->owner = THIS_MODULE; sprintf(new_port->name, "port-%d", num); snd_use_lock_init(&new_port->use_lock); port_subs_info_init(&new_port->c_src); port_subs_info_init(&new_port->c_dest); num = port >= 0 ? port : 0; mutex_lock(&client->ports_mutex); write_lock_irqsave(&client->ports_lock, flags); list_for_each_entry(p, &client->ports_list_head, list) { if (p->addr.port > num) break; if (port < 0) /* auto-probe mode */ num = p->addr.port + 1; } /* insert the new port */ list_add_tail(&new_port->list, &p->list); client->num_ports++; new_port->addr.port = num; /* store the port number in the port */ write_unlock_irqrestore(&client->ports_lock, flags); mutex_unlock(&client->ports_mutex); sprintf(new_port->name, "port-%d", num); return new_port; } /* */ enum group_type { SRC_LIST, DEST_LIST }; static int subscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_port_subs_info *grp, struct snd_seq_port_subscribe *info, int send_ack); static int unsubscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_port_subs_info *grp, struct snd_seq_port_subscribe *info, int send_ack); static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr, struct snd_seq_client **cp) { struct snd_seq_client_port *p; *cp = snd_seq_client_use_ptr(addr->client); if (*cp) { p = snd_seq_port_use_ptr(*cp, addr->port); if (! p) { snd_seq_client_unlock(*cp); *cp = NULL; } return p; } return NULL; } /* * remove all subscribers on the list * this is called from port_delete, for each src and dest list. */ static void clear_subscriber_list(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_port_subs_info *grp, int grptype) { struct list_head *p, *n; list_for_each_safe(p, n, &grp->list_head) { struct snd_seq_subscribers *subs; struct snd_seq_client *c; struct snd_seq_client_port *aport; if (grptype == SRC_LIST) { subs = list_entry(p, struct snd_seq_subscribers, src_list); aport = get_client_port(&subs->info.dest, &c); } else { subs = list_entry(p, struct snd_seq_subscribers, dest_list); aport = get_client_port(&subs->info.sender, &c); } list_del(p); unsubscribe_port(client, port, grp, &subs->info, 0); if (!aport) { /* looks like the connected port is being deleted. * we decrease the counter, and when both ports are deleted * remove the subscriber info */ if (atomic_dec_and_test(&subs->ref_count)) kfree(subs); } else { /* ok we got the connected port */ struct snd_seq_port_subs_info *agrp; agrp = (grptype == SRC_LIST) ? &aport->c_dest : &aport->c_src; down_write(&agrp->list_mutex); if (grptype == SRC_LIST) list_del(&subs->dest_list); else list_del(&subs->src_list); up_write(&agrp->list_mutex); unsubscribe_port(c, aport, agrp, &subs->info, 1); kfree(subs); snd_seq_port_unlock(aport); snd_seq_client_unlock(c); } } } /* delete port data */ static int port_delete(struct snd_seq_client *client, struct snd_seq_client_port *port) { /* set closing flag and wait for all port access are gone */ port->closing = 1; snd_use_lock_sync(&port->use_lock); /* clear subscribers info */ clear_subscriber_list(client, port, &port->c_src, SRC_LIST); clear_subscriber_list(client, port, &port->c_dest, DEST_LIST); if (port->private_free) port->private_free(port->private_data); snd_BUG_ON(port->c_src.count != 0); snd_BUG_ON(port->c_dest.count != 0); kfree(port); return 0; } /* delete a port with the given port id */ int snd_seq_delete_port(struct snd_seq_client *client, int port) { unsigned long flags; struct snd_seq_client_port *found = NULL, *p; mutex_lock(&client->ports_mutex); write_lock_irqsave(&client->ports_lock, flags); list_for_each_entry(p, &client->ports_list_head, list) { if (p->addr.port == port) { /* ok found. delete from the list at first */ list_del(&p->list); client->num_ports--; found = p; break; } } write_unlock_irqrestore(&client->ports_lock, flags); mutex_unlock(&client->ports_mutex); if (found) return port_delete(client, found); else return -ENOENT; } /* delete the all ports belonging to the given client */ int snd_seq_delete_all_ports(struct snd_seq_client *client) { unsigned long flags; struct list_head deleted_list; struct snd_seq_client_port *port, *tmp; /* move the port list to deleted_list, and * clear the port list in the client data. */ mutex_lock(&client->ports_mutex); write_lock_irqsave(&client->ports_lock, flags); if (! list_empty(&client->ports_list_head)) { list_add(&deleted_list, &client->ports_list_head); list_del_init(&client->ports_list_head); } else { INIT_LIST_HEAD(&deleted_list); } client->num_ports = 0; write_unlock_irqrestore(&client->ports_lock, flags); /* remove each port in deleted_list */ list_for_each_entry_safe(port, tmp, &deleted_list, list) { list_del(&port->list); snd_seq_system_client_ev_port_exit(port->addr.client, port->addr.port); port_delete(client, port); } mutex_unlock(&client->ports_mutex); return 0; } /* set port info fields */ int snd_seq_set_port_info(struct snd_seq_client_port * port, struct snd_seq_port_info * info) { if (snd_BUG_ON(!port || !info)) return -EINVAL; /* set port name */ if (info->name[0]) strlcpy(port->name, info->name, sizeof(port->name)); /* set capabilities */ port->capability = info->capability; /* get port type */ port->type = info->type; /* information about supported channels/voices */ port->midi_channels = info->midi_channels; port->midi_voices = info->midi_voices; port->synth_voices = info->synth_voices; /* timestamping */ port->timestamping = (info->flags & SNDRV_SEQ_PORT_FLG_TIMESTAMP) ? 1 : 0; port->time_real = (info->flags & SNDRV_SEQ_PORT_FLG_TIME_REAL) ? 1 : 0; port->time_queue = info->time_queue; return 0; } /* get port info fields */ int snd_seq_get_port_info(struct snd_seq_client_port * port, struct snd_seq_port_info * info) { if (snd_BUG_ON(!port || !info)) return -EINVAL; /* get port name */ strlcpy(info->name, port->name, sizeof(info->name)); /* get capabilities */ info->capability = port->capability; /* get port type */ info->type = port->type; /* information about supported channels/voices */ info->midi_channels = port->midi_channels; info->midi_voices = port->midi_voices; info->synth_voices = port->synth_voices; /* get subscriber counts */ info->read_use = port->c_src.count; info->write_use = port->c_dest.count; /* timestamping */ info->flags = 0; if (port->timestamping) { info->flags |= SNDRV_SEQ_PORT_FLG_TIMESTAMP; if (port->time_real) info->flags |= SNDRV_SEQ_PORT_FLG_TIME_REAL; info->time_queue = port->time_queue; } return 0; } /* * call callback functions (if any): * the callbacks are invoked only when the first (for connection) or * the last subscription (for disconnection) is done. Second or later * subscription results in increment of counter, but no callback is * invoked. * This feature is useful if these callbacks are associated with * initialization or termination of devices (see seq_midi.c). * * If callback_all option is set, the callback function is invoked * at each connection/disconnection. */ static int subscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_port_subs_info *grp, struct snd_seq_port_subscribe *info, int send_ack) { int err = 0; if (!try_module_get(port->owner)) return -EFAULT; grp->count++; if (grp->open && (port->callback_all || grp->count == 1)) { err = grp->open(port->private_data, info); if (err < 0) { module_put(port->owner); grp->count--; } } if (err >= 0 && send_ack && client->type == USER_CLIENT) snd_seq_client_notify_subscription(port->addr.client, port->addr.port, info, SNDRV_SEQ_EVENT_PORT_SUBSCRIBED); return err; } static int unsubscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_port_subs_info *grp, struct snd_seq_port_subscribe *info, int send_ack) { int err = 0; if (! grp->count) return -EINVAL; grp->count--; if (grp->close && (port->callback_all || grp->count == 0)) err = grp->close(port->private_data, info); if (send_ack && client->type == USER_CLIENT) snd_seq_client_notify_subscription(port->addr.client, port->addr.port, info, SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED); module_put(port->owner); return err; } /* check if both addresses are identical */ static inline int addr_match(struct snd_seq_addr *r, struct snd_seq_addr *s) { return (r->client == s->client) && (r->port == s->port); } /* check the two subscribe info match */ /* if flags is zero, checks only sender and destination addresses */ static int match_subs_info(struct snd_seq_port_subscribe *r, struct snd_seq_port_subscribe *s) { if (addr_match(&r->sender, &s->sender) && addr_match(&r->dest, &s->dest)) { if (r->flags && r->flags == s->flags) return r->queue == s->queue; else if (! r->flags) return 1; } return 0; } /* connect two ports */ int snd_seq_port_connect(struct snd_seq_client *connector, struct snd_seq_client *src_client, struct snd_seq_client_port *src_port, struct snd_seq_client *dest_client, struct snd_seq_client_port *dest_port, struct snd_seq_port_subscribe *info) { struct snd_seq_port_subs_info *src = &src_port->c_src; struct snd_seq_port_subs_info *dest = &dest_port->c_dest; struct snd_seq_subscribers *subs, *s; int err, src_called = 0; unsigned long flags; int exclusive; subs = kzalloc(sizeof(*subs), GFP_KERNEL); if (! subs) return -ENOMEM; subs->info = *info; atomic_set(&subs->ref_count, 2); down_write(&src->list_mutex); down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING); exclusive = info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE ? 1 : 0; err = -EBUSY; if (exclusive) { if (! list_empty(&src->list_head) || ! list_empty(&dest->list_head)) goto __error; } else { if (src->exclusive || dest->exclusive) goto __error; /* check whether already exists */ list_for_each_entry(s, &src->list_head, src_list) { if (match_subs_info(info, &s->info)) goto __error; } list_for_each_entry(s, &dest->list_head, dest_list) { if (match_subs_info(info, &s->info)) goto __error; } } if ((err = subscribe_port(src_client, src_port, src, info, connector->number != src_client->number)) < 0) goto __error; src_called = 1; if ((err = subscribe_port(dest_client, dest_port, dest, info, connector->number != dest_client->number)) < 0) goto __error; /* add to list */ write_lock_irqsave(&src->list_lock, flags); // write_lock(&dest->list_lock); // no other lock yet list_add_tail(&subs->src_list, &src->list_head); list_add_tail(&subs->dest_list, &dest->list_head); // write_unlock(&dest->list_lock); // no other lock yet write_unlock_irqrestore(&src->list_lock, flags); src->exclusive = dest->exclusive = exclusive; up_write(&dest->list_mutex); up_write(&src->list_mutex); return 0; __error: if (src_called) unsubscribe_port(src_client, src_port, src, info, connector->number != src_client->number); kfree(subs); up_write(&dest->list_mutex); up_write(&src->list_mutex); return err; } /* remove the connection */ int snd_seq_port_disconnect(struct snd_seq_client *connector, struct snd_seq_client *src_client, struct snd_seq_client_port *src_port, struct snd_seq_client *dest_client, struct snd_seq_client_port *dest_port, struct snd_seq_port_subscribe *info) { struct snd_seq_port_subs_info *src = &src_port->c_src; struct snd_seq_port_subs_info *dest = &dest_port->c_dest; struct snd_seq_subscribers *subs; int err = -ENOENT; unsigned long flags; down_write(&src->list_mutex); down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING); /* look for the connection */ list_for_each_entry(subs, &src->list_head, src_list) { if (match_subs_info(info, &subs->info)) { write_lock_irqsave(&src->list_lock, flags); // write_lock(&dest->list_lock); // no lock yet list_del(&subs->src_list); list_del(&subs->dest_list); // write_unlock(&dest->list_lock); write_unlock_irqrestore(&src->list_lock, flags); src->exclusive = dest->exclusive = 0; unsubscribe_port(src_client, src_port, src, info, connector->number != src_client->number); unsubscribe_port(dest_client, dest_port, dest, info, connector->number != dest_client->number); kfree(subs); err = 0; break; } } up_write(&dest->list_mutex); up_write(&src->list_mutex); return err; } /* get matched subscriber */ struct snd_seq_subscribers *snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp, struct snd_seq_addr *dest_addr) { struct snd_seq_subscribers *s, *found = NULL; down_read(&src_grp->list_mutex); list_for_each_entry(s, &src_grp->list_head, src_list) { if (addr_match(dest_addr, &s->info.dest)) { found = s; break; } } up_read(&src_grp->list_mutex); return found; } /* * Attach a device driver that wants to receive events from the * sequencer. Returns the new port number on success. * A driver that wants to receive the events converted to midi, will * use snd_seq_midisynth_register_port(). */ /* exported */ int snd_seq_event_port_attach(int client, struct snd_seq_port_callback *pcbp, int cap, int type, int midi_channels, int midi_voices, char *portname) { struct snd_seq_port_info portinfo; int ret; /* Set up the port */ memset(&portinfo, 0, sizeof(portinfo)); portinfo.addr.client = client; strlcpy(portinfo.name, portname ? portname : "Unamed port", sizeof(portinfo.name)); portinfo.capability = cap; portinfo.type = type; portinfo.kernel = pcbp; portinfo.midi_channels = midi_channels; portinfo.midi_voices = midi_voices; /* Create it */ ret = snd_seq_kernel_client_ctl(client, SNDRV_SEQ_IOCTL_CREATE_PORT, &portinfo); if (ret >= 0) ret = portinfo.addr.port; return ret; } EXPORT_SYMBOL(snd_seq_event_port_attach); /* * Detach the driver from a port. */ /* exported */ int snd_seq_event_port_detach(int client, int port) { struct snd_seq_port_info portinfo; int err; memset(&portinfo, 0, sizeof(portinfo)); portinfo.addr.client = client; portinfo.addr.port = port; err = snd_seq_kernel_client_ctl(client, SNDRV_SEQ_IOCTL_DELETE_PORT, &portinfo); return err; } EXPORT_SYMBOL(snd_seq_event_port_detach);
gpl-2.0