repo_name
string
path
string
copies
string
size
string
content
string
license
string
marioaugustorama/linux
arch/arm/mach-orion5x/tsx09-common.c
419
2548
/* * QNAP TS-x09 Boards common functions * * Maintainers: Lennert Buytenhek <buytenh@marvell.com> * Byron Bradley <byron.bbradley@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/mv643xx_eth.h> #include <linux/timex.h> #include <linux/serial_reg.h> #include "orion5x.h" #include "tsx09-common.h" #include "common.h" /***************************************************************************** * QNAP TS-x09 specific power off method via UART1-attached PIC ****************************************************************************/ #define UART1_REG(x) (UART1_VIRT_BASE + ((UART_##x) << 2)) void qnap_tsx09_power_off(void) { /* 19200 baud divisor */ const unsigned divisor = ((orion5x_tclk + (8 * 19200)) / (16 * 19200)); pr_info("%s: triggering power-off...\n", __func__); /* hijack uart1 and reset into sane state (19200,8n1) */ writel(0x83, UART1_REG(LCR)); writel(divisor & 0xff, UART1_REG(DLL)); writel((divisor >> 8) & 0xff, UART1_REG(DLM)); writel(0x03, UART1_REG(LCR)); writel(0x00, UART1_REG(IER)); writel(0x00, UART1_REG(FCR)); writel(0x00, UART1_REG(MCR)); /* send the power-off command 'A' to PIC */ writel('A', UART1_REG(TX)); } /***************************************************************************** * Ethernet ****************************************************************************/ struct mv643xx_eth_platform_data qnap_tsx09_eth_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; static int __init qnap_tsx09_check_mac_addr(const char *addr_str) { u_int8_t addr[6]; if (!mac_pton(addr_str, addr)) return -1; printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr); memcpy(qnap_tsx09_eth_data.mac_addr, addr, 6); return 0; } /* * The 'NAS Config' flash partition has an ext2 filesystem which * contains a file that has the ethernet MAC address in plain text * (format "xx:xx:xx:xx:xx:xx\n"). */ void __init qnap_tsx09_find_mac_addr(u32 mem_base, u32 size) { unsigned long addr; for (addr = mem_base; addr < (mem_base + size); addr += 1024) { void __iomem *nor_page; int ret = 0; nor_page = ioremap(addr, 1024); if (nor_page != NULL) { ret = qnap_tsx09_check_mac_addr((__force const char *)nor_page); iounmap(nor_page); } if (ret == 0) break; } }
gpl-2.0
mir-ror/linux-yocto-dev
drivers/net/ethernet/mellanox/mlx5/core/port.c
419
3035
/* * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/cmd.h> #include "mlx5_core.h" int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write) { struct mlx5_access_reg_mbox_in *in = NULL; struct mlx5_access_reg_mbox_out *out = NULL; int err = -ENOMEM; in = mlx5_vzalloc(sizeof(*in) + size_in); if (!in) return -ENOMEM; out = mlx5_vzalloc(sizeof(*out) + size_out); if (!out) goto ex1; memcpy(in->data, data_in, size_in); in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ACCESS_REG); in->hdr.opmod = cpu_to_be16(!write); in->arg = cpu_to_be32(arg); in->register_id = cpu_to_be16(reg_num); err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out, sizeof(*out) + size_out); if (err) goto ex2; if (out->hdr.status) err = mlx5_cmd_status_to_err(&out->hdr); if (!err) memcpy(data_out, out->data, size_out); ex2: mlx5_vfree(out); ex1: mlx5_vfree(in); return err; } EXPORT_SYMBOL_GPL(mlx5_core_access_reg); struct mlx5_reg_pcap { u8 rsvd0; u8 port_num; u8 rsvd1[2]; __be32 caps_127_96; __be32 caps_95_64; __be32 caps_63_32; __be32 caps_31_0; }; int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps) { struct mlx5_reg_pcap in; struct mlx5_reg_pcap out; int err; memset(&in, 0, sizeof(in)); in.caps_127_96 = cpu_to_be32(caps); in.port_num = port_num; err = mlx5_core_access_reg(dev, &in, sizeof(in), &out, sizeof(out), MLX5_REG_PCAP, 0, 1); return err; } EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
gpl-2.0
again4you/linux
block/blk-exec.c
675
3273
/* * Functions related to setting various queue properties from drivers */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/blk-mq.h> #include <linux/sched/sysctl.h> #include "blk.h" /* * for max sense size */ #include <scsi/scsi_cmnd.h> /** * blk_end_sync_rq - executes a completion event on a request * @rq: request to complete * @error: end I/O status of the request */ static void blk_end_sync_rq(struct request *rq, int error) { struct completion *waiting = rq->end_io_data; rq->end_io_data = NULL; /* * complete last, if this is a stack request the process (and thus * the rq pointer) could be invalid right after this complete() */ complete(waiting); } /** * blk_execute_rq_nowait - insert a request into queue for execution * @q: queue to insert the request in * @bd_disk: matching gendisk * @rq: request to insert * @at_head: insert request at head or tail of queue * @done: I/O completion handler * * Description: * Insert a fully prepared request at the back of the I/O scheduler queue * for execution. Don't wait for completion. * * Note: * This function will invoke @done directly if the queue is dead. */ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, struct request *rq, int at_head, rq_end_io_fn *done) { int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; WARN_ON(irqs_disabled()); WARN_ON(rq->cmd_type == REQ_TYPE_FS); rq->rq_disk = bd_disk; rq->end_io = done; /* * don't check dying flag for MQ because the request won't * be resued after dying flag is set */ if (q->mq_ops) { blk_mq_insert_request(rq, at_head, true, false); return; } spin_lock_irq(q->queue_lock); if (unlikely(blk_queue_dying(q))) { rq->cmd_flags |= REQ_QUIET; rq->errors = -ENXIO; __blk_end_request_all(rq, rq->errors); spin_unlock_irq(q->queue_lock); return; } __elv_add_request(q, rq, where); __blk_run_queue(q); spin_unlock_irq(q->queue_lock); } EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); /** * blk_execute_rq - insert a request into queue for execution * @q: queue to insert the request in * @bd_disk: matching gendisk * @rq: request to insert * @at_head: insert request at head or tail of queue * * Description: * Insert a fully prepared request at the back of the I/O scheduler queue * for execution and wait for completion. */ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, struct request *rq, int at_head) { DECLARE_COMPLETION_ONSTACK(wait); char sense[SCSI_SENSE_BUFFERSIZE]; int err = 0; unsigned long hang_check; if (!rq->sense) { memset(sense, 0, sizeof(sense)); rq->sense = sense; rq->sense_len = 0; } rq->end_io_data = &wait; blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); /* Prevent hang_check timer from firing at us during very long I/O */ hang_check = sysctl_hung_task_timeout_secs; if (hang_check) while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2))); else wait_for_completion_io(&wait); if (rq->errors) err = -EIO; if (rq->sense == sense) { rq->sense = NULL; rq->sense_len = 0; } return err; } EXPORT_SYMBOL(blk_execute_rq);
gpl-2.0
glewarne/S6-UniBase
net/netfilter/interceptor/debug_linux.c
931
1092
/** @copyright Copyright (c) 2011 - 2013, INSIDE Secure Oy. All rights reserved. */ #include "implementation_defs.h" #include "debug_filter.h" #include <linux/module.h> int vprintk(const char *fmt, va_list args); extern void assert_outputf( const char *condition, const char *file, int line, const char *module, const char *func, const char *description) { panic(description); } static const char * last_slash(const char *str) { const char *last = str; while (*str != 0) { if (*str == '/') last = str; str++; } return last + 1; } void debug_outputf( const char *level, const char *flow, const char *module, const char *file, int line, const char *func, const char *format, ...) { if (debug_filter(level, flow, module, file, func)) { va_list args; printk("%s %s %s:%d ", level, module, last_slash(file), line); va_start(args, format); vprintk(format, args); va_end(args); printk("\n"); } }
gpl-2.0
MinimalOS/android_kernel_moto_shamu
arch/s390/kvm/priv.c
1443
16147
/* * handling privileged instructions * * Copyright IBM Corp. 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2 only) * as published by the Free Software Foundation. * * Author(s): Carsten Otte <cotte@de.ibm.com> * Christian Borntraeger <borntraeger@de.ibm.com> */ #include <linux/kvm.h> #include <linux/gfp.h> #include <linux/errno.h> #include <linux/compat.h> #include <asm/asm-offsets.h> #include <asm/current.h> #include <asm/debug.h> #include <asm/ebcdic.h> #include <asm/sysinfo.h> #include <asm/ptrace.h> #include <asm/compat.h> #include "gaccess.h" #include "kvm-s390.h" #include "trace.h" static int handle_set_prefix(struct kvm_vcpu *vcpu) { u64 operand2; u32 address = 0; u8 tmp; vcpu->stat.instruction_spx++; operand2 = kvm_s390_get_base_disp_s(vcpu); /* must be word boundary */ if (operand2 & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); /* get the value */ if (get_guest(vcpu, address, (u32 __user *) operand2)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); address = address & 0x7fffe000u; /* make sure that the new value is valid memory */ if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); kvm_s390_set_prefix(vcpu, address); VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); trace_kvm_s390_handle_prefix(vcpu, 1, address); return 0; } static int handle_store_prefix(struct kvm_vcpu *vcpu) { u64 operand2; u32 address; vcpu->stat.instruction_stpx++; operand2 = kvm_s390_get_base_disp_s(vcpu); /* must be word boundary */ if (operand2 & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); address = vcpu->arch.sie_block->prefix; address = address & 0x7fffe000u; /* get the value */ if (put_guest(vcpu, address, (u32 __user *)operand2)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); trace_kvm_s390_handle_prefix(vcpu, 0, address); return 0; } static int handle_store_cpu_address(struct kvm_vcpu *vcpu) { u64 useraddr; vcpu->stat.instruction_stap++; useraddr = kvm_s390_get_base_disp_s(vcpu); if (useraddr & 1) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr); trace_kvm_s390_handle_stap(vcpu, useraddr); return 0; } static int handle_skey(struct kvm_vcpu *vcpu) { vcpu->stat.instruction_storage_key++; vcpu->arch.sie_block->gpsw.addr -= 4; VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); return 0; } static int handle_tpi(struct kvm_vcpu *vcpu) { struct kvm_s390_interrupt_info *inti; u64 addr; int cc; addr = kvm_s390_get_base_disp_s(vcpu); if (addr & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); cc = 0; inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0); if (!inti) goto no_interrupt; cc = 1; if (addr) { /* * Store the two-word I/O interruption code into the * provided area. */ put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) addr); put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) (addr + 2)); put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) (addr + 4)); } else { /* * Store the three-word I/O interruption code into * the appropriate lowcore area. */ put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID); put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR); put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM); put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD); } kfree(inti); no_interrupt: /* Set condition code and we're done. */ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44; return 0; } static int handle_tsch(struct kvm_vcpu *vcpu) { struct kvm_s390_interrupt_info *inti; inti = kvm_s390_get_io_int(vcpu->kvm, 0, vcpu->run->s.regs.gprs[1]); /* * Prepare exit to userspace. * We indicate whether we dequeued a pending I/O interrupt * so that userspace can re-inject it if the instruction gets * a program check. While this may re-order the pending I/O * interrupts, this is no problem since the priority is kept * intact. */ vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; vcpu->run->s390_tsch.dequeued = !!inti; if (inti) { vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; } vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; kfree(inti); return -EREMOTE; } static int handle_io_inst(struct kvm_vcpu *vcpu) { VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); if (vcpu->kvm->arch.css_support) { /* * Most I/O instructions will be handled by userspace. * Exceptions are tpi and the interrupt portion of tsch. */ if (vcpu->arch.sie_block->ipa == 0xb236) return handle_tpi(vcpu); if (vcpu->arch.sie_block->ipa == 0xb235) return handle_tsch(vcpu); /* Handle in userspace. */ return -EOPNOTSUPP; } else { /* * Set condition code 3 to stop the guest from issueing channel * I/O instructions. */ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44; return 0; } } static int handle_stfl(struct kvm_vcpu *vcpu) { unsigned int facility_list; int rc; vcpu->stat.instruction_stfl++; /* only pass the facility bits, which we can handle */ facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3; rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), &facility_list, sizeof(facility_list)); if (rc) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); VCPU_EVENT(vcpu, 5, "store facility list value %x", facility_list); trace_kvm_s390_handle_stfl(vcpu, facility_list); return 0; } static void handle_new_psw(struct kvm_vcpu *vcpu) { /* Check whether the new psw is enabled for machine checks. */ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK) kvm_s390_deliver_pending_machine_checks(vcpu); } #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL #define PSW_ADDR_24 0x0000000000ffffffUL #define PSW_ADDR_31 0x000000007fffffffUL static int is_valid_psw(psw_t *psw) { if (psw->mask & PSW_MASK_UNASSIGNED) return 0; if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { if (psw->addr & ~PSW_ADDR_31) return 0; } if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24)) return 0; if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) return 0; return 1; } int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) { psw_t *gpsw = &vcpu->arch.sie_block->gpsw; psw_compat_t new_psw; u64 addr; if (gpsw->mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OPERATION); addr = kvm_s390_get_base_disp_s(vcpu); if (addr & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); if (!(new_psw.mask & PSW32_MASK_BASE)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE; gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; if (!is_valid_psw(gpsw)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); handle_new_psw(vcpu); return 0; } static int handle_lpswe(struct kvm_vcpu *vcpu) { psw_t new_psw; u64 addr; addr = kvm_s390_get_base_disp_s(vcpu); if (addr & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); vcpu->arch.sie_block->gpsw = new_psw; if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); handle_new_psw(vcpu); return 0; } static int handle_stidp(struct kvm_vcpu *vcpu) { u64 operand2; vcpu->stat.instruction_stidp++; operand2 = kvm_s390_get_base_disp_s(vcpu); if (operand2 & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); return 0; } static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) { struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; int cpus = 0; int n; spin_lock(&fi->lock); for (n = 0; n < KVM_MAX_VCPUS; n++) if (fi->local_int[n]) cpus++; spin_unlock(&fi->lock); /* deal with other level 3 hypervisors */ if (stsi(mem, 3, 2, 2)) mem->count = 0; if (mem->count < 8) mem->count++; for (n = mem->count - 1; n > 0 ; n--) memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); mem->vm[0].cpus_total = cpus; mem->vm[0].cpus_configured = cpus; mem->vm[0].cpus_standby = 0; mem->vm[0].cpus_reserved = 0; mem->vm[0].caf = 1000; memcpy(mem->vm[0].name, "KVMguest", 8); ASCEBC(mem->vm[0].name, 8); memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); ASCEBC(mem->vm[0].cpi, 16); } static int handle_stsi(struct kvm_vcpu *vcpu) { int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; unsigned long mem = 0; u64 operand2; int rc = 0; vcpu->stat.instruction_stsi++; VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); operand2 = kvm_s390_get_base_disp_s(vcpu); if (operand2 & 0xfff && fc > 0) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); switch (fc) { case 0: vcpu->run->s.regs.gprs[0] = 3 << 28; vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); return 0; case 1: /* same handling for 1 and 2 */ case 2: mem = get_zeroed_page(GFP_KERNEL); if (!mem) goto out_no_data; if (stsi((void *) mem, fc, sel1, sel2)) goto out_no_data; break; case 3: if (sel1 != 2 || sel2 != 2) goto out_no_data; mem = get_zeroed_page(GFP_KERNEL); if (!mem) goto out_no_data; handle_stsi_3_2_2(vcpu, (void *) mem); break; default: goto out_no_data; } if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) { rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out_exception; } trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); free_page(mem); vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); vcpu->run->s.regs.gprs[0] = 0; return 0; out_no_data: /* condition code 3 */ vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; out_exception: free_page(mem); return rc; } static const intercept_handler_t b2_handlers[256] = { [0x02] = handle_stidp, [0x10] = handle_set_prefix, [0x11] = handle_store_prefix, [0x12] = handle_store_cpu_address, [0x29] = handle_skey, [0x2a] = handle_skey, [0x2b] = handle_skey, [0x30] = handle_io_inst, [0x31] = handle_io_inst, [0x32] = handle_io_inst, [0x33] = handle_io_inst, [0x34] = handle_io_inst, [0x35] = handle_io_inst, [0x36] = handle_io_inst, [0x37] = handle_io_inst, [0x38] = handle_io_inst, [0x39] = handle_io_inst, [0x3a] = handle_io_inst, [0x3b] = handle_io_inst, [0x3c] = handle_io_inst, [0x5f] = handle_io_inst, [0x74] = handle_io_inst, [0x76] = handle_io_inst, [0x7d] = handle_stsi, [0xb1] = handle_stfl, [0xb2] = handle_lpswe, }; int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) { intercept_handler_t handler; /* * a lot of B2 instructions are priviledged. We first check for * the privileged ones, that we can handle in the kernel. If the * kernel can handle this instruction, we check for the problem * state bit and (a) handle the instruction or (b) send a code 2 * program check. * Anything else goes to userspace.*/ handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; if (handler) { if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OPERATION); else return handler(vcpu); } return -EOPNOTSUPP; } static int handle_epsw(struct kvm_vcpu *vcpu) { int reg1, reg2; reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24; reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; /* This basically extracts the mask half of the psw. */ vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000; vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; if (reg2) { vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000; vcpu->run->s.regs.gprs[reg2] |= vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff; } return 0; } static const intercept_handler_t b9_handlers[256] = { [0x8d] = handle_epsw, [0x9c] = handle_io_inst, }; int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) { intercept_handler_t handler; /* This is handled just as for the B2 instructions. */ handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; if (handler) { if ((handler != handle_epsw) && (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OPERATION); else return handler(vcpu); } return -EOPNOTSUPP; } static const intercept_handler_t eb_handlers[256] = { [0x8a] = handle_io_inst, }; int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu) { intercept_handler_t handler; /* All eb instructions that end up here are privileged. */ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OPERATION); handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; if (handler) return handler(vcpu); return -EOPNOTSUPP; } static int handle_tprot(struct kvm_vcpu *vcpu) { u64 address1, address2; struct vm_area_struct *vma; unsigned long user_address; vcpu->stat.instruction_tprot++; kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); /* we only handle the Linux memory detection case: * access key == 0 * guest DAT == off * everything else goes to userspace. */ if (address2 & 0xf0) return -EOPNOTSUPP; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) return -EOPNOTSUPP; down_read(&current->mm->mmap_sem); user_address = __gmap_translate(address1, vcpu->arch.gmap); if (IS_ERR_VALUE(user_address)) goto out_inject; vma = find_vma(current->mm, user_address); if (!vma) goto out_inject; vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ)) vcpu->arch.sie_block->gpsw.mask |= (1ul << 44); if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ)) vcpu->arch.sie_block->gpsw.mask |= (2ul << 44); up_read(&current->mm->mmap_sem); return 0; out_inject: up_read(&current->mm->mmap_sem); return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); } int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) { /* For e5xx... instructions we only handle TPROT */ if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) return handle_tprot(vcpu); return -EOPNOTSUPP; } static int handle_sckpf(struct kvm_vcpu *vcpu) { u32 value; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OPERATION); if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; vcpu->arch.sie_block->todpr = value; return 0; } static const intercept_handler_t x01_handlers[256] = { [0x07] = handle_sckpf, }; int kvm_s390_handle_01(struct kvm_vcpu *vcpu) { intercept_handler_t handler; handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; if (handler) return handler(vcpu); return -EOPNOTSUPP; }
gpl-2.0
denggww123/IMX6_DB_Kernel_3.0.35
drivers/w1/w1.c
1699
26216
/* * w1.c * * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <asm/atomic.h> #include "w1.h" #include "w1_log.h" #include "w1_int.h" #include "w1_family.h" #include "w1_netlink.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol."); static int w1_timeout = 10; int w1_max_slave_count = 10; int w1_max_slave_ttl = 10; module_param_named(timeout, w1_timeout, int, 0); module_param_named(max_slave_count, w1_max_slave_count, int, 0); module_param_named(slave_ttl, w1_max_slave_ttl, int, 0); DEFINE_MUTEX(w1_mlock); LIST_HEAD(w1_masters); static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn); static int w1_master_match(struct device *dev, struct device_driver *drv) { return 1; } static int w1_master_probe(struct device *dev) { return -ENODEV; } static void w1_master_release(struct device *dev) { struct w1_master *md = dev_to_w1_master(dev); dev_dbg(dev, "%s: Releasing %s.\n", __func__, md->name); memset(md, 0, sizeof(struct w1_master) + sizeof(struct w1_bus_master)); kfree(md); } static void w1_slave_release(struct device *dev) { struct w1_slave *sl = dev_to_w1_slave(dev); dev_dbg(dev, "%s: Releasing %s.\n", __func__, sl->name); while (atomic_read(&sl->refcnt)) { dev_dbg(dev, "Waiting for %s to become free: refcnt=%d.\n", sl->name, atomic_read(&sl->refcnt)); if (msleep_interruptible(1000)) flush_signals(current); } w1_family_put(sl->family); sl->master->slave_count--; complete(&sl->released); } static ssize_t w1_slave_read_name(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_slave *sl = dev_to_w1_slave(dev); return sprintf(buf, "%s\n", sl->name); } static ssize_t w1_slave_read_id(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_slave *sl = dev_to_w1_slave(dev); ssize_t count = sizeof(sl->reg_num); memcpy(buf, (u8 *)&sl->reg_num, count); return count; } static struct device_attribute w1_slave_attr_name = __ATTR(name, S_IRUGO, w1_slave_read_name, NULL); static struct device_attribute w1_slave_attr_id = __ATTR(id, S_IRUGO, w1_slave_read_id, NULL); /* Default family */ static ssize_t w1_default_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct w1_slave *sl = kobj_to_w1_slave(kobj); mutex_lock(&sl->master->mutex); if (w1_reset_select_slave(sl)) { count = 0; goto out_up; } w1_write_block(sl->master, buf, count); out_up: mutex_unlock(&sl->master->mutex); return count; } static ssize_t w1_default_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct w1_slave *sl = kobj_to_w1_slave(kobj); mutex_lock(&sl->master->mutex); w1_read_block(sl->master, buf, count); mutex_unlock(&sl->master->mutex); return count; } static struct bin_attribute w1_default_attr = { .attr = { .name = "rw", .mode = S_IRUGO | S_IWUSR, }, .size = PAGE_SIZE, .read = w1_default_read, .write = w1_default_write, }; static int w1_default_add_slave(struct w1_slave *sl) { return sysfs_create_bin_file(&sl->dev.kobj, &w1_default_attr); } static void w1_default_remove_slave(struct w1_slave *sl) { sysfs_remove_bin_file(&sl->dev.kobj, &w1_default_attr); } static struct w1_family_ops w1_default_fops = { .add_slave = w1_default_add_slave, .remove_slave = w1_default_remove_slave, }; static struct w1_family w1_default_family = { .fops = &w1_default_fops, }; static int w1_uevent(struct device *dev, struct kobj_uevent_env *env); static struct bus_type w1_bus_type = { .name = "w1", .match = w1_master_match, .uevent = w1_uevent, }; struct device_driver w1_master_driver = { .name = "w1_master_driver", .bus = &w1_bus_type, .probe = w1_master_probe, }; struct device w1_master_device = { .parent = NULL, .bus = &w1_bus_type, .init_name = "w1 bus master", .driver = &w1_master_driver, .release = &w1_master_release }; static struct device_driver w1_slave_driver = { .name = "w1_slave_driver", .bus = &w1_bus_type, }; #if 0 struct device w1_slave_device = { .parent = NULL, .bus = &w1_bus_type, .init_name = "w1 bus slave", .driver = &w1_slave_driver, .release = &w1_slave_release }; #endif /* 0 */ static ssize_t w1_master_attribute_show_name(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_master *md = dev_to_w1_master(dev); ssize_t count; mutex_lock(&md->mutex); count = sprintf(buf, "%s\n", md->name); mutex_unlock(&md->mutex); return count; } static ssize_t w1_master_attribute_store_search(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { long tmp; struct w1_master *md = dev_to_w1_master(dev); if (strict_strtol(buf, 0, &tmp) == -EINVAL) return -EINVAL; mutex_lock(&md->mutex); md->search_count = tmp; mutex_unlock(&md->mutex); wake_up_process(md->thread); return count; } static ssize_t w1_master_attribute_show_search(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_master *md = dev_to_w1_master(dev); ssize_t count; mutex_lock(&md->mutex); count = sprintf(buf, "%d\n", md->search_count); mutex_unlock(&md->mutex); return count; } static ssize_t w1_master_attribute_store_pullup(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { long tmp; struct w1_master *md = dev_to_w1_master(dev); if (strict_strtol(buf, 0, &tmp) == -EINVAL) return -EINVAL; mutex_lock(&md->mutex); md->enable_pullup = tmp; mutex_unlock(&md->mutex); wake_up_process(md->thread); return count; } static ssize_t w1_master_attribute_show_pullup(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_master *md = dev_to_w1_master(dev); ssize_t count; mutex_lock(&md->mutex); count = sprintf(buf, "%d\n", md->enable_pullup); mutex_unlock(&md->mutex); return count; } static ssize_t w1_master_attribute_show_pointer(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_master *md = dev_to_w1_master(dev); ssize_t count; mutex_lock(&md->mutex); count = sprintf(buf, "0x%p\n", md->bus_master); mutex_unlock(&md->mutex); return count; } static ssize_t w1_master_attribute_show_timeout(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t count; count = sprintf(buf, "%d\n", w1_timeout); return count; } static ssize_t w1_master_attribute_show_max_slave_count(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_master *md = dev_to_w1_master(dev); ssize_t count; mutex_lock(&md->mutex); count = sprintf(buf, "%d\n", md->max_slave_count); mutex_unlock(&md->mutex); return count; } static ssize_t w1_master_attribute_show_attempts(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_master *md = dev_to_w1_master(dev); ssize_t count; mutex_lock(&md->mutex); count = sprintf(buf, "%lu\n", md->attempts); mutex_unlock(&md->mutex); return count; } static ssize_t w1_master_attribute_show_slave_count(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_master *md = dev_to_w1_master(dev); ssize_t count; mutex_lock(&md->mutex); count = sprintf(buf, "%d\n", md->slave_count); mutex_unlock(&md->mutex); return count; } static ssize_t w1_master_attribute_show_slaves(struct device *dev, struct device_attribute *attr, char *buf) { struct w1_master *md = dev_to_w1_master(dev); int c = PAGE_SIZE; mutex_lock(&md->mutex); if (md->slave_count == 0) c -= snprintf(buf + PAGE_SIZE - c, c, "not found.\n"); else { struct list_head *ent, *n; struct w1_slave *sl; list_for_each_safe(ent, n, &md->slist) { sl = list_entry(ent, struct w1_slave, w1_slave_entry); c -= snprintf(buf + PAGE_SIZE - c, c, "%s\n", sl->name); } } mutex_unlock(&md->mutex); return PAGE_SIZE - c; } static ssize_t w1_master_attribute_show_add(struct device *dev, struct device_attribute *attr, char *buf) { int c = PAGE_SIZE; c -= snprintf(buf+PAGE_SIZE - c, c, "write device id xx-xxxxxxxxxxxx to add slave\n"); return PAGE_SIZE - c; } static int w1_atoreg_num(struct device *dev, const char *buf, size_t count, struct w1_reg_num *rn) { unsigned int family; unsigned long long id; int i; u64 rn64_le; /* The CRC value isn't read from the user because the sysfs directory * doesn't include it and most messages from the bus search don't * print it either. It would be unreasonable for the user to then * provide it. */ const char *error_msg = "bad slave string format, expecting " "ff-dddddddddddd\n"; if (buf[2] != '-') { dev_err(dev, "%s", error_msg); return -EINVAL; } i = sscanf(buf, "%02x-%012llx", &family, &id); if (i != 2) { dev_err(dev, "%s", error_msg); return -EINVAL; } rn->family = family; rn->id = id; rn64_le = cpu_to_le64(*(u64 *)rn); rn->crc = w1_calc_crc8((u8 *)&rn64_le, 7); #if 0 dev_info(dev, "With CRC device is %02x.%012llx.%02x.\n", rn->family, (unsigned long long)rn->id, rn->crc); #endif return 0; } /* Searches the slaves in the w1_master and returns a pointer or NULL. * Note: must hold the mutex */ static struct w1_slave *w1_slave_search_device(struct w1_master *dev, struct w1_reg_num *rn) { struct w1_slave *sl; list_for_each_entry(sl, &dev->slist, w1_slave_entry) { if (sl->reg_num.family == rn->family && sl->reg_num.id == rn->id && sl->reg_num.crc == rn->crc) { return sl; } } return NULL; } static ssize_t w1_master_attribute_store_add(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w1_master *md = dev_to_w1_master(dev); struct w1_reg_num rn; struct w1_slave *sl; ssize_t result = count; if (w1_atoreg_num(dev, buf, count, &rn)) return -EINVAL; mutex_lock(&md->mutex); sl = w1_slave_search_device(md, &rn); /* It would be nice to do a targeted search one the one-wire bus * for the new device to see if it is out there or not. But the * current search doesn't support that. */ if (sl) { dev_info(dev, "Device %s already exists\n", sl->name); result = -EINVAL; } else { w1_attach_slave_device(md, &rn); } mutex_unlock(&md->mutex); return result; } static ssize_t w1_master_attribute_show_remove(struct device *dev, struct device_attribute *attr, char *buf) { int c = PAGE_SIZE; c -= snprintf(buf+PAGE_SIZE - c, c, "write device id xx-xxxxxxxxxxxx to remove slave\n"); return PAGE_SIZE - c; } static ssize_t w1_master_attribute_store_remove(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w1_master *md = dev_to_w1_master(dev); struct w1_reg_num rn; struct w1_slave *sl; ssize_t result = count; if (w1_atoreg_num(dev, buf, count, &rn)) return -EINVAL; mutex_lock(&md->mutex); sl = w1_slave_search_device(md, &rn); if (sl) { w1_slave_detach(sl); } else { dev_info(dev, "Device %02x-%012llx doesn't exists\n", rn.family, (unsigned long long)rn.id); result = -EINVAL; } mutex_unlock(&md->mutex); return result; } #define W1_MASTER_ATTR_RO(_name, _mode) \ struct device_attribute w1_master_attribute_##_name = \ __ATTR(w1_master_##_name, _mode, \ w1_master_attribute_show_##_name, NULL) #define W1_MASTER_ATTR_RW(_name, _mode) \ struct device_attribute w1_master_attribute_##_name = \ __ATTR(w1_master_##_name, _mode, \ w1_master_attribute_show_##_name, \ w1_master_attribute_store_##_name) static W1_MASTER_ATTR_RO(name, S_IRUGO); static W1_MASTER_ATTR_RO(slaves, S_IRUGO); static W1_MASTER_ATTR_RO(slave_count, S_IRUGO); static W1_MASTER_ATTR_RO(max_slave_count, S_IRUGO); static W1_MASTER_ATTR_RO(attempts, S_IRUGO); static W1_MASTER_ATTR_RO(timeout, S_IRUGO); static W1_MASTER_ATTR_RO(pointer, S_IRUGO); static W1_MASTER_ATTR_RW(search, S_IRUGO | S_IWUSR | S_IWGRP); static W1_MASTER_ATTR_RW(pullup, S_IRUGO | S_IWUSR | S_IWGRP); static W1_MASTER_ATTR_RW(add, S_IRUGO | S_IWUSR | S_IWGRP); static W1_MASTER_ATTR_RW(remove, S_IRUGO | S_IWUSR | S_IWGRP); static struct attribute *w1_master_default_attrs[] = { &w1_master_attribute_name.attr, &w1_master_attribute_slaves.attr, &w1_master_attribute_slave_count.attr, &w1_master_attribute_max_slave_count.attr, &w1_master_attribute_attempts.attr, &w1_master_attribute_timeout.attr, &w1_master_attribute_pointer.attr, &w1_master_attribute_search.attr, &w1_master_attribute_pullup.attr, &w1_master_attribute_add.attr, &w1_master_attribute_remove.attr, NULL }; static struct attribute_group w1_master_defattr_group = { .attrs = w1_master_default_attrs, }; int w1_create_master_attributes(struct w1_master *master) { return sysfs_create_group(&master->dev.kobj, &w1_master_defattr_group); } void w1_destroy_master_attributes(struct w1_master *master) { sysfs_remove_group(&master->dev.kobj, &w1_master_defattr_group); } #ifdef CONFIG_HOTPLUG static int w1_uevent(struct device *dev, struct kobj_uevent_env *env) { struct w1_master *md = NULL; struct w1_slave *sl = NULL; char *event_owner, *name; int err; if (dev->driver == &w1_master_driver) { md = container_of(dev, struct w1_master, dev); event_owner = "master"; name = md->name; } else if (dev->driver == &w1_slave_driver) { sl = container_of(dev, struct w1_slave, dev); event_owner = "slave"; name = sl->name; } else { dev_dbg(dev, "Unknown event.\n"); return -EINVAL; } dev_dbg(dev, "Hotplug event for %s %s, bus_id=%s.\n", event_owner, name, dev_name(dev)); if (dev->driver != &w1_slave_driver || !sl) return 0; err = add_uevent_var(env, "W1_FID=%02X", sl->reg_num.family); if (err) return err; err = add_uevent_var(env, "W1_SLAVE_ID=%024LX", (unsigned long long)sl->reg_num.id); if (err) return err; return 0; }; #else static int w1_uevent(struct device *dev, struct kobj_uevent_env *env) { return 0; } #endif static int __w1_attach_slave_device(struct w1_slave *sl) { int err; sl->dev.parent = &sl->master->dev; sl->dev.driver = &w1_slave_driver; sl->dev.bus = &w1_bus_type; sl->dev.release = &w1_slave_release; dev_set_name(&sl->dev, "%02x-%012llx", (unsigned int) sl->reg_num.family, (unsigned long long) sl->reg_num.id); snprintf(&sl->name[0], sizeof(sl->name), "%02x-%012llx", (unsigned int) sl->reg_num.family, (unsigned long long) sl->reg_num.id); dev_dbg(&sl->dev, "%s: registering %s as %p.\n", __func__, dev_name(&sl->dev), sl); err = device_register(&sl->dev); if (err < 0) { dev_err(&sl->dev, "Device registration [%s] failed. err=%d\n", dev_name(&sl->dev), err); return err; } /* Create "name" entry */ err = device_create_file(&sl->dev, &w1_slave_attr_name); if (err < 0) { dev_err(&sl->dev, "sysfs file creation for [%s] failed. err=%d\n", dev_name(&sl->dev), err); goto out_unreg; } /* Create "id" entry */ err = device_create_file(&sl->dev, &w1_slave_attr_id); if (err < 0) { dev_err(&sl->dev, "sysfs file creation for [%s] failed. err=%d\n", dev_name(&sl->dev), err); goto out_rem1; } /* if the family driver needs to initialize something... */ if (sl->family->fops && sl->family->fops->add_slave && ((err = sl->family->fops->add_slave(sl)) < 0)) { dev_err(&sl->dev, "sysfs file creation for [%s] failed. err=%d\n", dev_name(&sl->dev), err); goto out_rem2; } list_add_tail(&sl->w1_slave_entry, &sl->master->slist); return 0; out_rem2: device_remove_file(&sl->dev, &w1_slave_attr_id); out_rem1: device_remove_file(&sl->dev, &w1_slave_attr_name); out_unreg: device_unregister(&sl->dev); return err; } static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn) { struct w1_slave *sl; struct w1_family *f; int err; struct w1_netlink_msg msg; sl = kzalloc(sizeof(struct w1_slave), GFP_KERNEL); if (!sl) { dev_err(&dev->dev, "%s: failed to allocate new slave device.\n", __func__); return -ENOMEM; } sl->owner = THIS_MODULE; sl->master = dev; set_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags); memset(&msg, 0, sizeof(msg)); memcpy(&sl->reg_num, rn, sizeof(sl->reg_num)); atomic_set(&sl->refcnt, 0); init_completion(&sl->released); spin_lock(&w1_flock); f = w1_family_registered(rn->family); if (!f) { f= &w1_default_family; dev_info(&dev->dev, "Family %x for %02x.%012llx.%02x is not registered.\n", rn->family, rn->family, (unsigned long long)rn->id, rn->crc); } __w1_family_get(f); spin_unlock(&w1_flock); sl->family = f; err = __w1_attach_slave_device(sl); if (err < 0) { dev_err(&dev->dev, "%s: Attaching %s failed.\n", __func__, sl->name); w1_family_put(sl->family); kfree(sl); return err; } sl->ttl = dev->slave_ttl; dev->slave_count++; memcpy(msg.id.id, rn, sizeof(msg.id)); msg.type = W1_SLAVE_ADD; w1_netlink_send(dev, &msg); return 0; } void w1_slave_detach(struct w1_slave *sl) { struct w1_netlink_msg msg; dev_dbg(&sl->dev, "%s: detaching %s [%p].\n", __func__, sl->name, sl); list_del(&sl->w1_slave_entry); if (sl->family->fops && sl->family->fops->remove_slave) sl->family->fops->remove_slave(sl); memset(&msg, 0, sizeof(msg)); memcpy(msg.id.id, &sl->reg_num, sizeof(msg.id)); msg.type = W1_SLAVE_REMOVE; w1_netlink_send(sl->master, &msg); device_remove_file(&sl->dev, &w1_slave_attr_id); device_remove_file(&sl->dev, &w1_slave_attr_name); device_unregister(&sl->dev); wait_for_completion(&sl->released); kfree(sl); } struct w1_master *w1_search_master_id(u32 id) { struct w1_master *dev; int found = 0; mutex_lock(&w1_mlock); list_for_each_entry(dev, &w1_masters, w1_master_entry) { if (dev->id == id) { found = 1; atomic_inc(&dev->refcnt); break; } } mutex_unlock(&w1_mlock); return (found)?dev:NULL; } struct w1_slave *w1_search_slave(struct w1_reg_num *id) { struct w1_master *dev; struct w1_slave *sl = NULL; int found = 0; mutex_lock(&w1_mlock); list_for_each_entry(dev, &w1_masters, w1_master_entry) { mutex_lock(&dev->mutex); list_for_each_entry(sl, &dev->slist, w1_slave_entry) { if (sl->reg_num.family == id->family && sl->reg_num.id == id->id && sl->reg_num.crc == id->crc) { found = 1; atomic_inc(&dev->refcnt); atomic_inc(&sl->refcnt); break; } } mutex_unlock(&dev->mutex); if (found) break; } mutex_unlock(&w1_mlock); return (found)?sl:NULL; } void w1_reconnect_slaves(struct w1_family *f, int attach) { struct w1_slave *sl, *sln; struct w1_master *dev; mutex_lock(&w1_mlock); list_for_each_entry(dev, &w1_masters, w1_master_entry) { dev_dbg(&dev->dev, "Reconnecting slaves in device %s " "for family %02x.\n", dev->name, f->fid); mutex_lock(&dev->mutex); list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) { /* If it is a new family, slaves with the default * family driver and are that family will be * connected. If the family is going away, devices * matching that family are reconneced. */ if ((attach && sl->family->fid == W1_FAMILY_DEFAULT && sl->reg_num.family == f->fid) || (!attach && sl->family->fid == f->fid)) { struct w1_reg_num rn; memcpy(&rn, &sl->reg_num, sizeof(rn)); w1_slave_detach(sl); w1_attach_slave_device(dev, &rn); } } dev_dbg(&dev->dev, "Reconnecting slaves in device %s " "has been finished.\n", dev->name); mutex_unlock(&dev->mutex); } mutex_unlock(&w1_mlock); } void w1_slave_found(struct w1_master *dev, u64 rn) { struct w1_slave *sl; struct w1_reg_num *tmp; u64 rn_le = cpu_to_le64(rn); atomic_inc(&dev->refcnt); tmp = (struct w1_reg_num *) &rn; sl = w1_slave_search_device(dev, tmp); if (sl) { set_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags); } else { if (rn && tmp->crc == w1_calc_crc8((u8 *)&rn_le, 7)) w1_attach_slave_device(dev, tmp); } atomic_dec(&dev->refcnt); } /** * Performs a ROM Search & registers any devices found. * The 1-wire search is a simple binary tree search. * For each bit of the address, we read two bits and write one bit. * The bit written will put to sleep all devies that don't match that bit. * When the two reads differ, the direction choice is obvious. * When both bits are 0, we must choose a path to take. * When we can scan all 64 bits without having to choose a path, we are done. * * See "Application note 187 1-wire search algorithm" at www.maxim-ic.com * * @dev The master device to search * @cb Function to call when a device is found */ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb) { u64 last_rn, rn, tmp64; int i, slave_count = 0; int last_zero, last_device; int search_bit, desc_bit; u8 triplet_ret = 0; search_bit = 0; rn = last_rn = 0; last_device = 0; last_zero = -1; desc_bit = 64; while ( !last_device && (slave_count++ < dev->max_slave_count) ) { last_rn = rn; rn = 0; /* * Reset bus and all 1-wire device state machines * so they can respond to our requests. * * Return 0 - device(s) present, 1 - no devices present. */ if (w1_reset_bus(dev)) { dev_dbg(&dev->dev, "No devices present on the wire.\n"); break; } /* Start the search */ w1_write_8(dev, search_type); for (i = 0; i < 64; ++i) { /* Determine the direction/search bit */ if (i == desc_bit) search_bit = 1; /* took the 0 path last time, so take the 1 path */ else if (i > desc_bit) search_bit = 0; /* take the 0 path on the next branch */ else search_bit = ((last_rn >> i) & 0x1); /** Read two bits and write one bit */ triplet_ret = w1_triplet(dev, search_bit); /* quit if no device responded */ if ( (triplet_ret & 0x03) == 0x03 ) break; /* If both directions were valid, and we took the 0 path... */ if (triplet_ret == 0) last_zero = i; /* extract the direction taken & update the device number */ tmp64 = (triplet_ret >> 2); rn |= (tmp64 << i); if (kthread_should_stop()) { dev_dbg(&dev->dev, "Abort w1_search\n"); return; } } if ( (triplet_ret & 0x03) != 0x03 ) { if ( (desc_bit == last_zero) || (last_zero < 0)) last_device = 1; desc_bit = last_zero; cb(dev, rn); } } } void w1_search_process_cb(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb) { struct w1_slave *sl, *sln; list_for_each_entry(sl, &dev->slist, w1_slave_entry) clear_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags); w1_search_devices(dev, search_type, cb); list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) { if (!test_bit(W1_SLAVE_ACTIVE, (unsigned long *)&sl->flags) && !--sl->ttl) w1_slave_detach(sl); else if (test_bit(W1_SLAVE_ACTIVE, (unsigned long *)&sl->flags)) sl->ttl = dev->slave_ttl; } if (dev->search_count > 0) dev->search_count--; } static void w1_search_process(struct w1_master *dev, u8 search_type) { w1_search_process_cb(dev, search_type, w1_slave_found); } int w1_process(void *data) { struct w1_master *dev = (struct w1_master *) data; /* As long as w1_timeout is only set by a module parameter the sleep * time can be calculated in jiffies once. */ const unsigned long jtime = msecs_to_jiffies(w1_timeout * 1000); while (!kthread_should_stop()) { if (dev->search_count) { mutex_lock(&dev->mutex); w1_search_process(dev, W1_SEARCH); mutex_unlock(&dev->mutex); } try_to_freeze(); __set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) break; /* Only sleep when the search is active. */ if (dev->search_count) schedule_timeout(jtime); else schedule(); } atomic_dec(&dev->refcnt); return 0; } static int __init w1_init(void) { int retval; printk(KERN_INFO "Driver for 1-wire Dallas network protocol.\n"); w1_init_netlink(); retval = bus_register(&w1_bus_type); if (retval) { printk(KERN_ERR "Failed to register bus. err=%d.\n", retval); goto err_out_exit_init; } retval = driver_register(&w1_master_driver); if (retval) { printk(KERN_ERR "Failed to register master driver. err=%d.\n", retval); goto err_out_bus_unregister; } retval = driver_register(&w1_slave_driver); if (retval) { printk(KERN_ERR "Failed to register master driver. err=%d.\n", retval); goto err_out_master_unregister; } return 0; #if 0 /* For undoing the slave register if there was a step after it. */ err_out_slave_unregister: driver_unregister(&w1_slave_driver); #endif err_out_master_unregister: driver_unregister(&w1_master_driver); err_out_bus_unregister: bus_unregister(&w1_bus_type); err_out_exit_init: return retval; } static void __exit w1_fini(void) { struct w1_master *dev; /* Set netlink removal messages and some cleanup */ list_for_each_entry(dev, &w1_masters, w1_master_entry) __w1_remove_master_device(dev); w1_fini_netlink(); driver_unregister(&w1_slave_driver); driver_unregister(&w1_master_driver); bus_unregister(&w1_bus_type); } module_init(w1_init); module_exit(w1_fini);
gpl-2.0
fourthwall/GT-N7000-ICS-3.0.y
arch/arm/mach-s3c2412/mach-vstms.c
2979
3547
/* linux/arch/arm/mach-s3c2412/mach-vstms.c * * (C) 2006 Thomas Gleixner <tglx@linutronix.de> * * Derived from mach-smdk2413.c - (C) 2006 Simtec Electronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <asm/setup.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <plat/regs-serial.h> #include <mach/regs-gpio.h> #include <mach/regs-lcd.h> #include <mach/idle.h> #include <mach/fb.h> #include <plat/iic.h> #include <plat/nand.h> #include <plat/s3c2410.h> #include <plat/s3c2412.h> #include <plat/clock.h> #include <plat/devs.h> #include <plat/cpu.h> static struct map_desc vstms_iodesc[] __initdata = { }; static struct s3c2410_uartcfg vstms_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = 0x3c5, .ulcon = 0x03, .ufcon = 0x51, }, [1] = { .hwport = 1, .flags = 0, .ucon = 0x3c5, .ulcon = 0x03, .ufcon = 0x51, }, [2] = { .hwport = 2, .flags = 0, .ucon = 0x3c5, .ulcon = 0x03, .ufcon = 0x51, } }; static struct mtd_partition __initdata vstms_nand_part[] = { [0] = { .name = "Boot Agent", .size = 0x7C000, .offset = 0, }, [1] = { .name = "UBoot Config", .offset = 0x7C000, .size = 0x4000, }, [2] = { .name = "Kernel", .offset = 0x80000, .size = 0x200000, }, [3] = { .name = "RFS", .offset = 0x280000, .size = 0x3d80000, }, }; static struct s3c2410_nand_set __initdata vstms_nand_sets[] = { [0] = { .name = "NAND", .nr_chips = 1, .nr_partitions = ARRAY_SIZE(vstms_nand_part), .partitions = vstms_nand_part, }, }; /* choose a set of timings which should suit most 512Mbit * chips and beyond. */ static struct s3c2410_platform_nand __initdata vstms_nand_info = { .tacls = 20, .twrph0 = 60, .twrph1 = 20, .nr_sets = ARRAY_SIZE(vstms_nand_sets), .sets = vstms_nand_sets, }; static struct platform_device *vstms_devices[] __initdata = { &s3c_device_ohci, &s3c_device_wdt, &s3c_device_i2c0, &s3c_device_iis, &s3c_device_rtc, &s3c_device_nand, }; static void __init vstms_fixup(struct machine_desc *desc, struct tag *tags, char **cmdline, struct meminfo *mi) { if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) { mi->nr_banks=1; mi->bank[0].start = 0x30000000; mi->bank[0].size = SZ_64M; } } static void __init vstms_map_io(void) { s3c24xx_init_io(vstms_iodesc, ARRAY_SIZE(vstms_iodesc)); s3c24xx_init_clocks(12000000); s3c24xx_init_uarts(vstms_uartcfgs, ARRAY_SIZE(vstms_uartcfgs)); } static void __init vstms_init(void) { s3c_i2c0_set_platdata(NULL); s3c_nand_set_platdata(&vstms_nand_info); platform_add_devices(vstms_devices, ARRAY_SIZE(vstms_devices)); } MACHINE_START(VSTMS, "VSTMS") .boot_params = S3C2410_SDRAM_PA + 0x100, .fixup = vstms_fixup, .init_irq = s3c24xx_init_irq, .init_machine = vstms_init, .map_io = vstms_map_io, .timer = &s3c24xx_timer, MACHINE_END
gpl-2.0
sktjdgns1189/android_kernel_pantech_ef63l-cm
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
3235
334919
/* bnx2x_main.c: Broadcom Everest network driver. * * Copyright (c) 2007-2012 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Maintained by: Eilon Greenstein <eilong@broadcom.com> * Written by: Eliezer Tamir * Based on code from Michael Chan's bnx2 driver * UDP CSUM errata workaround by Arik Gendelman * Slowpath and fastpath rework by Vladislav Zolotarov * Statistics and Link management by Yitchak Gertner * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/device.h> /* for dev_info() */ #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/dma-mapping.h> #include <linux/bitops.h> #include <linux/irq.h> #include <linux/delay.h> #include <asm/byteorder.h> #include <linux/time.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/if.h> #include <linux/if_vlan.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/tcp.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <linux/workqueue.h> #include <linux/crc32.h> #include <linux/crc32c.h> #include <linux/prefetch.h> #include <linux/zlib.h> #include <linux/io.h> #include <linux/semaphore.h> #include <linux/stringify.h> #include <linux/vmalloc.h> #include "bnx2x.h" #include "bnx2x_init.h" #include "bnx2x_init_ops.h" #include "bnx2x_cmn.h" #include "bnx2x_dcb.h" #include "bnx2x_sp.h" #include <linux/firmware.h> #include "bnx2x_fw_file_hdr.h" /* FW files */ #define FW_FILE_VERSION \ __stringify(BCM_5710_FW_MAJOR_VERSION) "." \ __stringify(BCM_5710_FW_MINOR_VERSION) "." \ __stringify(BCM_5710_FW_REVISION_VERSION) "." \ __stringify(BCM_5710_FW_ENGINEERING_VERSION) #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw" #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" /* Time in jiffies before concluding the transmitter is hung */ #define TX_TIMEOUT (5*HZ) static char version[] __devinitdata = "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Eliezer Tamir"); MODULE_DESCRIPTION("Broadcom NetXtreme II " "BCM57710/57711/57711E/" "57712/57712_MF/57800/57800_MF/57810/57810_MF/" "57840/57840_MF Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); MODULE_FIRMWARE(FW_FILE_NAME_E1); MODULE_FIRMWARE(FW_FILE_NAME_E1H); MODULE_FIRMWARE(FW_FILE_NAME_E2); static int multi_mode = 1; module_param(multi_mode, int, 0); MODULE_PARM_DESC(multi_mode, " Multi queue mode " "(0 Disable; 1 Enable (default))"); int num_queues; module_param(num_queues, int, 0); MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1" " (default is as a number of CPUs)"); static int disable_tpa; module_param(disable_tpa, int, 0); MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); #define INT_MODE_INTx 1 #define INT_MODE_MSI 2 static int int_mode; module_param(int_mode, int, 0); MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " "(1 INT#x; 2 MSI)"); static int dropless_fc; module_param(dropless_fc, int, 0); MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); static int mrrs = -1; module_param(mrrs, int, 0); MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, " Default debug msglevel"); struct workqueue_struct *bnx2x_wq; enum bnx2x_board_type { BCM57710 = 0, BCM57711, BCM57711E, BCM57712, BCM57712_MF, BCM57800, BCM57800_MF, BCM57810, BCM57810_MF, BCM57840, BCM57840_MF }; /* indexed by board_type, above */ static struct { char *name; } board_info[] __devinitdata = { { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, { "Broadcom NetXtreme II BCM57840 10/20 Gigabit " "Ethernet Multi Function"} }; #ifndef PCI_DEVICE_ID_NX2_57710 #define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710 #endif #ifndef PCI_DEVICE_ID_NX2_57711 #define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711 #endif #ifndef PCI_DEVICE_ID_NX2_57711E #define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E #endif #ifndef PCI_DEVICE_ID_NX2_57712 #define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712 #endif #ifndef PCI_DEVICE_ID_NX2_57712_MF #define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF #endif #ifndef PCI_DEVICE_ID_NX2_57800 #define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800 #endif #ifndef PCI_DEVICE_ID_NX2_57800_MF #define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF #endif #ifndef PCI_DEVICE_ID_NX2_57810 #define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810 #endif #ifndef PCI_DEVICE_ID_NX2_57810_MF #define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF #endif #ifndef PCI_DEVICE_ID_NX2_57840 #define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840 #endif #ifndef PCI_DEVICE_ID_NX2_57840_MF #define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF #endif static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, { 0 } }; MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); /* Global resources for unloading a previously loaded device */ #define BNX2X_PREV_WAIT_NEEDED 1 static DEFINE_SEMAPHORE(bnx2x_prev_sem); static LIST_HEAD(bnx2x_prev_list); /**************************************************************************** * General service functions ****************************************************************************/ static inline void __storm_memset_dma_mapping(struct bnx2x *bp, u32 addr, dma_addr_t mapping) { REG_WR(bp, addr, U64_LO(mapping)); REG_WR(bp, addr + 4, U64_HI(mapping)); } static inline void storm_memset_spq_addr(struct bnx2x *bp, dma_addr_t mapping, u16 abs_fid) { u32 addr = XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); __storm_memset_dma_mapping(bp, addr, mapping); } static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, u16 pf_id) { REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), pf_id); REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), pf_id); REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), pf_id); REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), pf_id); } static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, u8 enable) { REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), enable); REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), enable); REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), enable); REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), enable); } static inline void storm_memset_eq_data(struct bnx2x *bp, struct event_ring_data *eq_data, u16 pfid) { size_t size = sizeof(struct event_ring_data); u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid); __storm_memset_struct(bp, addr, size, (u32 *)eq_data); } static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, u16 pfid) { u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); REG_WR16(bp, addr, eq_prod); } /* used only at init * locking is done by mcp */ static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) { pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); } static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) { u32 val; pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val); pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); return val; } #define DMAE_DP_SRC_GRC "grc src_addr [%08x]" #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]" #define DMAE_DP_DST_GRC "grc dst_addr [%08x]" #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" #define DMAE_DP_DST_NONE "dst_addr [none]" static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl) { u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; switch (dmae->opcode & DMAE_COMMAND_DST) { case DMAE_CMD_DST_PCI: if (src_type == DMAE_CMD_SRC_PCI) DP(msglvl, "DMAE: opcode 0x%08x\n" "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" "comp_addr [%x:%08x], comp_val 0x%08x\n", dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val); else DP(msglvl, "DMAE: opcode 0x%08x\n" "src [%08x], len [%d*4], dst [%x:%08x]\n" "comp_addr [%x:%08x], comp_val 0x%08x\n", dmae->opcode, dmae->src_addr_lo >> 2, dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val); break; case DMAE_CMD_DST_GRC: if (src_type == DMAE_CMD_SRC_PCI) DP(msglvl, "DMAE: opcode 0x%08x\n" "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" "comp_addr [%x:%08x], comp_val 0x%08x\n", dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, dmae->len, dmae->dst_addr_lo >> 2, dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val); else DP(msglvl, "DMAE: opcode 0x%08x\n" "src [%08x], len [%d*4], dst [%08x]\n" "comp_addr [%x:%08x], comp_val 0x%08x\n", dmae->opcode, dmae->src_addr_lo >> 2, dmae->len, dmae->dst_addr_lo >> 2, dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val); break; default: if (src_type == DMAE_CMD_SRC_PCI) DP(msglvl, "DMAE: opcode 0x%08x\n" "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" "comp_addr [%x:%08x] comp_val 0x%08x\n", dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val); else DP(msglvl, "DMAE: opcode 0x%08x\n" "src_addr [%08x] len [%d * 4] dst_addr [none]\n" "comp_addr [%x:%08x] comp_val 0x%08x\n", dmae->opcode, dmae->src_addr_lo >> 2, dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val); break; } } /* copy command into DMAE command memory and set DMAE command go */ void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) { u32 cmd_offset; int i; cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx); for (i = 0; i < (sizeof(struct dmae_command)/4); i++) { REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i)); } REG_WR(bp, dmae_reg_go_c[idx], 1); } u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type) { return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | DMAE_CMD_C_ENABLE); } u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode) { return opcode & ~DMAE_CMD_SRC_RESET; } u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, bool with_comp, u8 comp_type) { u32 opcode = 0; opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | (dst_type << DMAE_COMMAND_DST_SHIFT)); opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); #ifdef __BIG_ENDIAN opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; #else opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; #endif if (with_comp) opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type); return opcode; } static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, u8 src_type, u8 dst_type) { memset(dmae, 0, sizeof(struct dmae_command)); /* set the opcode */ dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type, true, DMAE_COMP_PCI); /* fill in the completion parameters */ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); dmae->comp_val = DMAE_COMP_VAL; } /* issue a dmae command over the init-channel and wailt for completion */ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) { u32 *wb_comp = bnx2x_sp(bp, wb_comp); int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; int rc = 0; /* * Lock the dmae channel. Disable BHs to prevent a dead-lock * as long as this code is called both from syscall context and * from ndo_set_rx_mode() flow that may be called from BH. */ spin_lock_bh(&bp->dmae_lock); /* reset completion */ *wb_comp = 0; /* post the command on the channel used for initializations */ bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); /* wait for completion */ udelay(5); while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { if (!cnt || (bp->recovery_state != BNX2X_RECOVERY_DONE && bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { BNX2X_ERR("DMAE timeout!\n"); rc = DMAE_TIMEOUT; goto unlock; } cnt--; udelay(50); } if (*wb_comp & DMAE_PCI_ERR_FLAG) { BNX2X_ERR("DMAE PCI error!\n"); rc = DMAE_PCI_ERROR; } unlock: spin_unlock_bh(&bp->dmae_lock); return rc; } void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, u32 len32) { struct dmae_command dmae; if (!bp->dmae_ready) { u32 *data = bnx2x_sp(bp, wb_data[0]); if (CHIP_IS_E1(bp)) bnx2x_init_ind_wr(bp, dst_addr, data, len32); else bnx2x_init_str_wr(bp, dst_addr, data, len32); return; } /* set opcode and fixed command fields */ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); /* fill in addresses and len */ dmae.src_addr_lo = U64_LO(dma_addr); dmae.src_addr_hi = U64_HI(dma_addr); dmae.dst_addr_lo = dst_addr >> 2; dmae.dst_addr_hi = 0; dmae.len = len32; bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF); /* issue the command and wait for completion */ bnx2x_issue_dmae_with_comp(bp, &dmae); } void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) { struct dmae_command dmae; if (!bp->dmae_ready) { u32 *data = bnx2x_sp(bp, wb_data[0]); int i; if (CHIP_IS_E1(bp)) for (i = 0; i < len32; i++) data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); else for (i = 0; i < len32; i++) data[i] = REG_RD(bp, src_addr + i*4); return; } /* set opcode and fixed command fields */ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); /* fill in addresses and len */ dmae.src_addr_lo = src_addr >> 2; dmae.src_addr_hi = 0; dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); dmae.len = len32; bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF); /* issue the command and wait for completion */ bnx2x_issue_dmae_with_comp(bp, &dmae); } static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, u32 addr, u32 len) { int dmae_wr_max = DMAE_LEN32_WR_MAX(bp); int offset = 0; while (len > dmae_wr_max) { bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, dmae_wr_max); offset += dmae_wr_max * 4; len -= dmae_wr_max; } bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); } /* used only for slowpath so not inlined */ static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo) { u32 wb_write[2]; wb_write[0] = val_hi; wb_write[1] = val_lo; REG_WR_DMAE(bp, reg, wb_write, 2); } #ifdef USE_WB_RD static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg) { u32 wb_data[2]; REG_RD_DMAE(bp, reg, wb_data, 2); return HILO_U64(wb_data[0], wb_data[1]); } #endif static int bnx2x_mc_assert(struct bnx2x *bp) { char last_idx; int i, rc = 0; u32 row0, row1, row2, row3; /* XSTORM */ last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); /* print the asserts */ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(bp, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(bp, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(bp, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(bp, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } /* TSTORM */ last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); /* print the asserts */ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(bp, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(bp, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(bp, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(bp, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } /* CSTORM */ last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); /* print the asserts */ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(bp, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(bp, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(bp, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(bp, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } /* USTORM */ last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); /* print the asserts */ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(bp, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(bp, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(bp, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(bp, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } return rc; } void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) { u32 addr, val; u32 mark, offset; __be32 data[9]; int word; u32 trace_shmem_base; if (BP_NOMCP(bp)) { BNX2X_ERR("NO MCP - can not dump\n"); return; } netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", (bp->common.bc_ver & 0xff0000) >> 16, (bp->common.bc_ver & 0xff00) >> 8, (bp->common.bc_ver & 0xff)); val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val); if (BP_PATH(bp) == 0) trace_shmem_base = bp->common.shmem_base; else trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); addr = trace_shmem_base - 0x800; /* validate TRCB signature */ mark = REG_RD(bp, addr); if (mark != MFW_TRACE_SIGNATURE) { BNX2X_ERR("Trace buffer signature is missing."); return ; } /* read cyclic buffer pointer */ addr += 4; mark = REG_RD(bp, addr); mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) + ((mark + 0x3) & ~0x3) - 0x08000000; printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); printk("%s", lvl); for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { for (word = 0; word < 8; word++) data[word] = htonl(REG_RD(bp, offset + 4*word)); data[8] = 0x0; pr_cont("%s", (char *)data); } for (offset = addr + 4; offset <= mark; offset += 0x8*4) { for (word = 0; word < 8; word++) data[word] = htonl(REG_RD(bp, offset + 4*word)); data[8] = 0x0; pr_cont("%s", (char *)data); } printk("%s" "end of fw dump\n", lvl); } static inline void bnx2x_fw_dump(struct bnx2x *bp) { bnx2x_fw_dump_lvl(bp, KERN_ERR); } void bnx2x_panic_dump(struct bnx2x *bp) { int i; u16 j; struct hc_sp_status_block_data sp_sb_data; int func = BP_FUNC(bp); #ifdef BNX2X_STOP_ON_ERROR u16 start = 0, end = 0; u8 cos; #endif bp->stats_state = STATS_STATE_DISABLED; bp->eth_stats.unrecoverable_error++; DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); BNX2X_ERR("begin crash dump -----------------\n"); /* Indices */ /* Common */ BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", bp->def_idx, bp->def_att_idx, bp->attn_state, bp->spq_prod_idx, bp->stats_counter); BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", bp->def_status_blk->atten_status_block.attn_bits, bp->def_status_blk->atten_status_block.attn_bits_ack, bp->def_status_blk->atten_status_block.status_block_id, bp->def_status_blk->atten_status_block.attn_bits_index); BNX2X_ERR(" def ("); for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) pr_cont("0x%x%s", bp->def_status_blk->sp_sb.index_values[i], (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + i*sizeof(u32)); pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", sp_sb_data.igu_sb_id, sp_sb_data.igu_seg_id, sp_sb_data.p_func.pf_id, sp_sb_data.p_func.vnic_id, sp_sb_data.p_func.vf_id, sp_sb_data.p_func.vf_valid, sp_sb_data.state); for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; int loop; struct hc_status_block_data_e2 sb_data_e2; struct hc_status_block_data_e1x sb_data_e1x; struct hc_status_block_sm *hc_sm_p = CHIP_IS_E1x(bp) ? sb_data_e1x.common.state_machine : sb_data_e2.common.state_machine; struct hc_index_data *hc_index_p = CHIP_IS_E1x(bp) ? sb_data_e1x.index_data : sb_data_e2.index_data; u8 data_size, cos; u32 *sb_data_p; struct bnx2x_fp_txdata txdata; /* Rx */ BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", i, fp->rx_bd_prod, fp->rx_bd_cons, fp->rx_comp_prod, fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n", fp->rx_sge_prod, fp->last_max_sge, le16_to_cpu(fp->fp_hc_idx)); /* Tx */ for_each_cos_in_tx_queue(fp, cos) { txdata = fp->txdata[cos]; BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", i, txdata.tx_pkt_prod, txdata.tx_pkt_cons, txdata.tx_bd_prod, txdata.tx_bd_cons, le16_to_cpu(*txdata.tx_cons_sb)); } loop = CHIP_IS_E1x(bp) ? HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2; /* host sb data */ #ifdef BCM_CNIC if (IS_FCOE_FP(fp)) continue; #endif BNX2X_ERR(" run indexes ("); for (j = 0; j < HC_SB_MAX_SM; j++) pr_cont("0x%x%s", fp->sb_running_index[j], (j == HC_SB_MAX_SM - 1) ? ")" : " "); BNX2X_ERR(" indexes ("); for (j = 0; j < loop; j++) pr_cont("0x%x%s", fp->sb_index_values[j], (j == loop - 1) ? ")" : " "); /* fw sb data */ data_size = CHIP_IS_E1x(bp) ? sizeof(struct hc_status_block_data_e1x) : sizeof(struct hc_status_block_data_e2); data_size /= sizeof(u32); sb_data_p = CHIP_IS_E1x(bp) ? (u32 *)&sb_data_e1x : (u32 *)&sb_data_e2; /* copy sb data in here */ for (j = 0; j < data_size; j++) *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + j * sizeof(u32)); if (!CHIP_IS_E1x(bp)) { pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", sb_data_e2.common.p_func.pf_id, sb_data_e2.common.p_func.vf_id, sb_data_e2.common.p_func.vf_valid, sb_data_e2.common.p_func.vnic_id, sb_data_e2.common.same_igu_sb_1b, sb_data_e2.common.state); } else { pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", sb_data_e1x.common.p_func.pf_id, sb_data_e1x.common.p_func.vf_id, sb_data_e1x.common.p_func.vf_valid, sb_data_e1x.common.p_func.vnic_id, sb_data_e1x.common.same_igu_sb_1b, sb_data_e1x.common.state); } /* SB_SMs data */ for (j = 0; j < HC_SB_MAX_SM; j++) { pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n", j, hc_sm_p[j].__flags, hc_sm_p[j].igu_sb_id, hc_sm_p[j].igu_seg_id, hc_sm_p[j].time_to_expire, hc_sm_p[j].timer_value); } /* Indecies data */ for (j = 0; j < loop; j++) { pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, hc_index_p[j].flags, hc_index_p[j].timeout); } } #ifdef BNX2X_STOP_ON_ERROR /* Rings */ /* Rx */ for_each_rx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); for (j = start; j != end; j = RX_BD(j + 1)) { u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j]; struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", i, j, rx_bd[1], rx_bd[0], sw_bd->data); } start = RX_SGE(fp->rx_sge_prod); end = RX_SGE(fp->last_max_sge); for (j = start; j != end; j = RX_SGE(j + 1)) { u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n", i, j, rx_sge[1], rx_sge[0], sw_page->page); } start = RCQ_BD(fp->rx_comp_cons - 10); end = RCQ_BD(fp->rx_comp_cons + 503); for (j = start; j != end; j = RCQ_BD(j + 1)) { u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n", i, j, cqe[0], cqe[1], cqe[2], cqe[3]); } } /* Tx */ for_each_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_cos_in_tx_queue(fp, cos) { struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); for (j = start; j != end; j = TX_BD(j + 1)) { struct sw_tx_bd *sw_bd = &txdata->tx_buf_ring[j]; BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n", i, cos, j, sw_bd->skb, sw_bd->first_bd); } start = TX_BD(txdata->tx_bd_cons - 10); end = TX_BD(txdata->tx_bd_cons + 254); for (j = start; j != end; j = TX_BD(j + 1)) { u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n", i, cos, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]); } } } #endif bnx2x_fw_dump(bp); bnx2x_mc_assert(bp); BNX2X_ERR("end crash dump -----------------\n"); } /* * FLR Support for E2 * * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW * initialization. */ #define FLR_WAIT_USEC 10000 /* 10 miliseconds */ #define FLR_WAIT_INTERVAL 50 /* usec */ #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */ struct pbf_pN_buf_regs { int pN; u32 init_crd; u32 crd; u32 crd_freed; }; struct pbf_pN_cmd_regs { int pN; u32 lines_occup; u32 lines_freed; }; static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, struct pbf_pN_buf_regs *regs, u32 poll_count) { u32 init_crd, crd, crd_start, crd_freed, crd_freed_start; u32 cur_cnt = poll_count; crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); crd = crd_start = REG_RD(bp, regs->crd); init_crd = REG_RD(bp, regs->init_crd); DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) < (init_crd - crd_start))) { if (cur_cnt--) { udelay(FLR_WAIT_INTERVAL); crd = REG_RD(bp, regs->crd); crd_freed = REG_RD(bp, regs->crd_freed); } else { DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", regs->pN); DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", regs->pN, crd); DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed); break; } } DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); } static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, struct pbf_pN_cmd_regs *regs, u32 poll_count) { u32 occup, to_free, freed, freed_start; u32 cur_cnt = poll_count; occup = to_free = REG_RD(bp, regs->lines_occup); freed = freed_start = REG_RD(bp, regs->lines_freed); DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) { if (cur_cnt--) { udelay(FLR_WAIT_INTERVAL); occup = REG_RD(bp, regs->lines_occup); freed = REG_RD(bp, regs->lines_freed); } else { DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", regs->pN); DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); break; } } DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); } static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, u32 expected, u32 poll_count) { u32 cur_cnt = poll_count; u32 val; while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) udelay(FLR_WAIT_INTERVAL); return val; } static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, char *msg, u32 poll_cnt) { u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); if (val != 0) { BNX2X_ERR("%s usage count=%d\n", msg, val); return 1; } return 0; } static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) { /* adjust polling timeout */ if (CHIP_REV_IS_EMUL(bp)) return FLR_POLL_CNT * 2000; if (CHIP_REV_IS_FPGA(bp)) return FLR_POLL_CNT * 120; return FLR_POLL_CNT; } static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) { struct pbf_pN_cmd_regs cmd_regs[] = { {0, (CHIP_IS_E3B0(bp)) ? PBF_REG_TQ_OCCUPANCY_Q0 : PBF_REG_P0_TQ_OCCUPANCY, (CHIP_IS_E3B0(bp)) ? PBF_REG_TQ_LINES_FREED_CNT_Q0 : PBF_REG_P0_TQ_LINES_FREED_CNT}, {1, (CHIP_IS_E3B0(bp)) ? PBF_REG_TQ_OCCUPANCY_Q1 : PBF_REG_P1_TQ_OCCUPANCY, (CHIP_IS_E3B0(bp)) ? PBF_REG_TQ_LINES_FREED_CNT_Q1 : PBF_REG_P1_TQ_LINES_FREED_CNT}, {4, (CHIP_IS_E3B0(bp)) ? PBF_REG_TQ_OCCUPANCY_LB_Q : PBF_REG_P4_TQ_OCCUPANCY, (CHIP_IS_E3B0(bp)) ? PBF_REG_TQ_LINES_FREED_CNT_LB_Q : PBF_REG_P4_TQ_LINES_FREED_CNT} }; struct pbf_pN_buf_regs buf_regs[] = { {0, (CHIP_IS_E3B0(bp)) ? PBF_REG_INIT_CRD_Q0 : PBF_REG_P0_INIT_CRD , (CHIP_IS_E3B0(bp)) ? PBF_REG_CREDIT_Q0 : PBF_REG_P0_CREDIT, (CHIP_IS_E3B0(bp)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, {1, (CHIP_IS_E3B0(bp)) ? PBF_REG_INIT_CRD_Q1 : PBF_REG_P1_INIT_CRD, (CHIP_IS_E3B0(bp)) ? PBF_REG_CREDIT_Q1 : PBF_REG_P1_CREDIT, (CHIP_IS_E3B0(bp)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, {4, (CHIP_IS_E3B0(bp)) ? PBF_REG_INIT_CRD_LB_Q : PBF_REG_P4_INIT_CRD, (CHIP_IS_E3B0(bp)) ? PBF_REG_CREDIT_LB_Q : PBF_REG_P4_CREDIT, (CHIP_IS_E3B0(bp)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, }; int i; /* Verify the command queues are flushed P0, P1, P4 */ for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); /* Verify the transmission buffers are flushed P0, P1, P4 */ for (i = 0; i < ARRAY_SIZE(buf_regs); i++) bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); } #define OP_GEN_PARAM(param) \ (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) #define OP_GEN_TYPE(type) \ (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) #define OP_GEN_AGG_VECT(index) \ (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) { struct sdm_op_gen op_gen = {0}; u32 comp_addr = BAR_CSTRORM_INTMEM + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); int ret = 0; if (REG_RD(bp, comp_addr)) { BNX2X_ERR("Cleanup complete was not 0 before sending\n"); return 1; } op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); op_gen.command |= OP_GEN_AGG_VECT(clnup_func); op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; DP(BNX2X_MSG_SP, "sending FW Final cleanup\n"); REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command); if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { BNX2X_ERR("FW final cleanup did not succeed\n"); DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n", (REG_RD(bp, comp_addr))); ret = 1; } /* Zero completion for nxt FLR */ REG_WR(bp, comp_addr, 0); return ret; } static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev) { int pos; u16 status; pos = pci_pcie_cap(dev); if (!pos) return false; pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); return status & PCI_EXP_DEVSTA_TRPND; } /* PF FLR specific routines */ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) { /* wait for CFC PF usage-counter to zero (includes all the VFs) */ if (bnx2x_flr_clnup_poll_hw_counter(bp, CFC_REG_NUM_LCIDS_INSIDE_PF, "CFC PF usage counter timed out", poll_cnt)) return 1; /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ if (bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_PF_USAGE_CNT, "DQ PF usage counter timed out", poll_cnt)) return 1; /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ if (bnx2x_flr_clnup_poll_hw_counter(bp, QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp), "QM PF usage counter timed out", poll_cnt)) return 1; /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ if (bnx2x_flr_clnup_poll_hw_counter(bp, TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp), "Timers VNIC usage counter timed out", poll_cnt)) return 1; if (bnx2x_flr_clnup_poll_hw_counter(bp, TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp), "Timers NUM_SCANS usage counter timed out", poll_cnt)) return 1; /* Wait DMAE PF usage counter to zero */ if (bnx2x_flr_clnup_poll_hw_counter(bp, dmae_reg_go_c[INIT_DMAE_C(bp)], "DMAE dommand register timed out", poll_cnt)) return 1; return 0; } static void bnx2x_hw_enable_status(struct bnx2x *bp) { u32 val; val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF); DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); val = REG_RD(bp, PBF_REG_DISABLE_PF); DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN); DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN); DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK); DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val); } static int bnx2x_pf_flr_clnup(struct bnx2x *bp) { u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); /* Re-enable PF target read access */ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); /* Poll HW usage counters */ DP(BNX2X_MSG_SP, "Polling usage counters\n"); if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) return -EBUSY; /* Zero the igu 'trailing edge' and 'leading edge' */ /* Send the FW cleanup command */ if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt)) return -EBUSY; /* ATC cleanup */ /* Verify TX hw is flushed */ bnx2x_tx_hw_flushed(bp, poll_cnt); /* Wait 100ms (not adjusted according to platform) */ msleep(100); /* Verify no pending pci transactions */ if (bnx2x_is_pcie_pending(bp->pdev)) BNX2X_ERR("PCIE Transactions still pending\n"); /* Debug */ bnx2x_hw_enable_status(bp); /* * Master enable - Due to WB DMAE writes performed before this * register is re-initialized as part of the regular function init */ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); return 0; } static void bnx2x_hc_int_enable(struct bnx2x *bp) { int port = BP_PORT(bp); u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; u32 val = REG_RD(bp, addr); int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; if (msix) { val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0); val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); } else if (msi) { val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); } else { val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); if (!CHIP_IS_E1(bp)) { DP(NETIF_MSG_IFUP, "write %x to HC %d (addr 0x%x)\n", val, port, addr); REG_WR(bp, addr, val); val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; } } if (CHIP_IS_E1(bp)) REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF); DP(NETIF_MSG_IFUP, "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); REG_WR(bp, addr, val); /* * Ensure that HC_CONFIG is written before leading/trailing edge config */ mmiowb(); barrier(); if (!CHIP_IS_E1(bp)) { /* init leading/trailing edge */ if (IS_MF(bp)) { val = (0xee0f | (1 << (BP_VN(bp) + 4))); if (bp->port.pmf) /* enable nig and gpio3 attention */ val |= 0x1100; } else val = 0xffff; REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); } /* Make sure that interrupts are indeed enabled from here on */ mmiowb(); } static void bnx2x_igu_int_enable(struct bnx2x *bp) { u32 val; int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); if (msix) { val &= ~(IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_SINGLE_ISR_EN); val |= (IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN); } else if (msi) { val &= ~IGU_PF_CONF_INT_LINE_EN; val |= (IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); } else { val &= ~IGU_PF_CONF_MSI_MSIX_EN; val |= (IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); } DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n", val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); barrier(); /* init leading/trailing edge */ if (IS_MF(bp)) { val = (0xee0f | (1 << (BP_VN(bp) + 4))); if (bp->port.pmf) /* enable nig and gpio3 attention */ val |= 0x1100; } else val = 0xffff; REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); /* Make sure that interrupts are indeed enabled from here on */ mmiowb(); } void bnx2x_int_enable(struct bnx2x *bp) { if (bp->common.int_block == INT_BLOCK_HC) bnx2x_hc_int_enable(bp); else bnx2x_igu_int_enable(bp); } static void bnx2x_hc_int_disable(struct bnx2x *bp) { int port = BP_PORT(bp); u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; u32 val = REG_RD(bp, addr); /* * in E1 we must use only PCI configuration space to disable * MSI/MSIX capablility * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block */ if (CHIP_IS_E1(bp)) { /* Since IGU_PF_CONF_MSI_MSIX_EN still always on * Use mask register to prevent from HC sending interrupts * after we exit the function */ REG_WR(bp, HC_REG_INT_MASK + port*4, 0); val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); } else val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); DP(NETIF_MSG_IFDOWN, "write %x to HC %d (addr 0x%x)\n", val, port, addr); /* flush all outstanding writes */ mmiowb(); REG_WR(bp, addr, val); if (REG_RD(bp, addr) != val) BNX2X_ERR("BUG! proper val not read from IGU!\n"); } static void bnx2x_igu_int_disable(struct bnx2x *bp) { u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); val &= ~(IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN); DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); /* flush all outstanding writes */ mmiowb(); REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) BNX2X_ERR("BUG! proper val not read from IGU!\n"); } void bnx2x_int_disable(struct bnx2x *bp) { if (bp->common.int_block == INT_BLOCK_HC) bnx2x_hc_int_disable(bp); else bnx2x_igu_int_disable(bp); } void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) { int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; int i, offset; if (disable_hw) /* prevent the HW from sending interrupts */ bnx2x_int_disable(bp); /* make sure all ISRs are done */ if (msix) { synchronize_irq(bp->msix_table[0].vector); offset = 1; #ifdef BCM_CNIC offset++; #endif for_each_eth_queue(bp, i) synchronize_irq(bp->msix_table[offset++].vector); } else synchronize_irq(bp->pdev->irq); /* make sure sp_task is not running */ cancel_delayed_work(&bp->sp_task); cancel_delayed_work(&bp->period_task); flush_workqueue(bnx2x_wq); } /* fast path */ /* * General service functions */ /* Return true if succeeded to acquire the lock */ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) { u32 lock_status; u32 resource_bit = (1 << resource); int func = BP_FUNC(bp); u32 hw_lock_control_reg; DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Trying to take a lock on resource %d\n", resource); /* Validating that the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", resource, HW_LOCK_MAX_RESOURCE_VALUE); return false; } if (func <= 5) hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); else hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); /* Try to acquire the lock */ REG_WR(bp, hw_lock_control_reg + 4, resource_bit); lock_status = REG_RD(bp, hw_lock_control_reg); if (lock_status & resource_bit) return true; DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Failed to get a lock on resource %d\n", resource); return false; } /** * bnx2x_get_leader_lock_resource - get the recovery leader resource id * * @bp: driver handle * * Returns the recovery leader resource id according to the engine this function * belongs to. Currently only only 2 engines is supported. */ static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp) { if (BP_PATH(bp)) return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; else return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; } /** * bnx2x_trylock_leader_lock- try to aquire a leader lock. * * @bp: driver handle * * Tries to aquire a leader lock for cuurent engine. */ static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp) { return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); } #ifdef BCM_CNIC static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); #endif void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) { struct bnx2x *bp = fp->bp; int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj; DP(BNX2X_MSG_SP, "fp %d cid %d got ramrod #%d state is %x type is %d\n", fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.ramrod_type); switch (command) { case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); drv_cmd = BNX2X_Q_CMD_UPDATE; break; case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid); drv_cmd = BNX2X_Q_CMD_SETUP; break; case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; break; case (RAMROD_CMD_ID_ETH_HALT): DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid); drv_cmd = BNX2X_Q_CMD_HALT; break; case (RAMROD_CMD_ID_ETH_TERMINATE): DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid); drv_cmd = BNX2X_Q_CMD_TERMINATE; break; case (RAMROD_CMD_ID_ETH_EMPTY): DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid); drv_cmd = BNX2X_Q_CMD_EMPTY; break; default: BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", command, fp->index); return; } if ((drv_cmd != BNX2X_Q_CMD_MAX) && q_obj->complete_cmd(bp, q_obj, drv_cmd)) /* q_obj->complete_cmd() failure means that this was * an unexpected completion. * * In this case we don't want to increase the bp->spq_left * because apparently we haven't sent this command the first * place. */ #ifdef BNX2X_STOP_ON_ERROR bnx2x_panic(); #else return; #endif smp_mb__before_atomic_inc(); atomic_inc(&bp->cq_spq_left); /* push the change in bp->spq_left and towards the memory */ smp_mb__after_atomic_inc(); DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); return; } void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod) { u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset; bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod, start); } irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) { struct bnx2x *bp = netdev_priv(dev_instance); u16 status = bnx2x_ack_int(bp); u16 mask; int i; u8 cos; /* Return here if interrupt is shared and it's not for us */ if (unlikely(status == 0)) { DP(NETIF_MSG_INTR, "not our interrupt!\n"); return IRQ_NONE; } DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) return IRQ_HANDLED; #endif for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; mask = 0x2 << (fp->index + CNIC_PRESENT); if (status & mask) { /* Handle Rx or Tx according to SB id */ prefetch(fp->rx_cons_sb); for_each_cos_in_tx_queue(fp, cos) prefetch(fp->txdata[cos].tx_cons_sb); prefetch(&fp->sb_running_index[SM_RX_ID]); napi_schedule(&bnx2x_fp(bp, fp->index, napi)); status &= ~mask; } } #ifdef BCM_CNIC mask = 0x2; if (status & (mask | 0x1)) { struct cnic_ops *c_ops = NULL; if (likely(bp->state == BNX2X_STATE_OPEN)) { rcu_read_lock(); c_ops = rcu_dereference(bp->cnic_ops); if (c_ops) c_ops->cnic_handler(bp->cnic_data, NULL); rcu_read_unlock(); } status &= ~mask; } #endif if (unlikely(status & 0x1)) { queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); status &= ~0x1; if (!status) return IRQ_HANDLED; } if (unlikely(status)) DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", status); return IRQ_HANDLED; } /* Link */ /* * General service functions */ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) { u32 lock_status; u32 resource_bit = (1 << resource); int func = BP_FUNC(bp); u32 hw_lock_control_reg; int cnt; /* Validating that the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", resource, HW_LOCK_MAX_RESOURCE_VALUE); return -EINVAL; } if (func <= 5) { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); } else { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); } /* Validating that the resource is not already taken */ lock_status = REG_RD(bp, hw_lock_control_reg); if (lock_status & resource_bit) { BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n", lock_status, resource_bit); return -EEXIST; } /* Try for 5 second every 5ms */ for (cnt = 0; cnt < 1000; cnt++) { /* Try to acquire the lock */ REG_WR(bp, hw_lock_control_reg + 4, resource_bit); lock_status = REG_RD(bp, hw_lock_control_reg); if (lock_status & resource_bit) return 0; msleep(5); } BNX2X_ERR("Timeout\n"); return -EAGAIN; } int bnx2x_release_leader_lock(struct bnx2x *bp) { return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); } int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) { u32 lock_status; u32 resource_bit = (1 << resource); int func = BP_FUNC(bp); u32 hw_lock_control_reg; /* Validating that the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", resource, HW_LOCK_MAX_RESOURCE_VALUE); return -EINVAL; } if (func <= 5) { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); } else { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); } /* Validating that the resource is currently taken */ lock_status = REG_RD(bp, hw_lock_control_reg); if (!(lock_status & resource_bit)) { BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n", lock_status, resource_bit); return -EFAULT; } REG_WR(bp, hw_lock_control_reg, resource_bit); return 0; } int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; int gpio_shift = gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); u32 gpio_mask = (1 << gpio_shift); u32 gpio_reg; int value; if (gpio_num > MISC_REGISTERS_GPIO_3) { BNX2X_ERR("Invalid GPIO %d\n", gpio_num); return -EINVAL; } /* read GPIO value */ gpio_reg = REG_RD(bp, MISC_REG_GPIO); /* get the requested pin value */ if ((gpio_reg & gpio_mask) == gpio_mask) value = 1; else value = 0; DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value); return value; } int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; int gpio_shift = gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); u32 gpio_mask = (1 << gpio_shift); u32 gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { BNX2X_ERR("Invalid GPIO %d\n", gpio_num); return -EINVAL; } bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); /* read GPIO and mask except the float bits */ gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); switch (mode) { case MISC_REGISTERS_GPIO_OUTPUT_LOW: DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n", gpio_num, gpio_shift); /* clear FLOAT and set CLR */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); break; case MISC_REGISTERS_GPIO_OUTPUT_HIGH: DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n", gpio_num, gpio_shift); /* clear FLOAT and set SET */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); break; case MISC_REGISTERS_GPIO_INPUT_HI_Z: DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n", gpio_num, gpio_shift); /* set FLOAT */ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); break; default: break; } REG_WR(bp, MISC_REG_GPIO, gpio_reg); bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); return 0; } int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode) { u32 gpio_reg = 0; int rc = 0; /* Any port swapping should be handled by caller. */ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); /* read GPIO and mask except the float bits */ gpio_reg = REG_RD(bp, MISC_REG_GPIO); gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); switch (mode) { case MISC_REGISTERS_GPIO_OUTPUT_LOW: DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins); /* set CLR */ gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); break; case MISC_REGISTERS_GPIO_OUTPUT_HIGH: DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins); /* set SET */ gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); break; case MISC_REGISTERS_GPIO_INPUT_HI_Z: DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins); /* set FLOAT */ gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); break; default: BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode); rc = -EINVAL; break; } if (rc == 0) REG_WR(bp, MISC_REG_GPIO, gpio_reg); bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); return rc; } int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; int gpio_shift = gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); u32 gpio_mask = (1 << gpio_shift); u32 gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { BNX2X_ERR("Invalid GPIO %d\n", gpio_num); return -EINVAL; } bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); /* read GPIO int */ gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); switch (mode) { case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> output low\n", gpio_num, gpio_shift); /* clear SET and set CLR */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); break; case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> output high\n", gpio_num, gpio_shift); /* clear CLR and set SET */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); break; default: break; } REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); return 0; } static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) { u32 spio_mask = (1 << spio_num); u32 spio_reg; if ((spio_num < MISC_REGISTERS_SPIO_4) || (spio_num > MISC_REGISTERS_SPIO_7)) { BNX2X_ERR("Invalid SPIO %d\n", spio_num); return -EINVAL; } bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); /* read SPIO and mask except the float bits */ spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); switch (mode) { case MISC_REGISTERS_SPIO_OUTPUT_LOW: DP(NETIF_MSG_HW, "Set SPIO %d -> output low\n", spio_num); /* clear FLOAT and set CLR */ spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); break; case MISC_REGISTERS_SPIO_OUTPUT_HIGH: DP(NETIF_MSG_HW, "Set SPIO %d -> output high\n", spio_num); /* clear FLOAT and set SET */ spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS); break; case MISC_REGISTERS_SPIO_INPUT_HI_Z: DP(NETIF_MSG_HW, "Set SPIO %d -> input\n", spio_num); /* set FLOAT */ spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); break; default: break; } REG_WR(bp, MISC_REG_SPIO, spio_reg); bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); return 0; } void bnx2x_calc_fc_adv(struct bnx2x *bp) { u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); switch (bp->link_vars.ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause); break; case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | ADVERTISED_Pause); break; case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; break; default: bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause); break; } } u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) { if (!BP_NOMCP(bp)) { u8 rc; int cfx_idx = bnx2x_get_link_cfg_idx(bp); u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; /* * Initialize link parameters structure variables * It is recommended to turn off RX FC for jumbo frames * for better performance */ if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; else bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; bnx2x_acquire_phy_lock(bp); if (load_mode == LOAD_DIAG) { struct link_params *lp = &bp->link_params; lp->loopback_mode = LOOPBACK_XGXS; /* do PHY loopback at 10G speed, if possible */ if (lp->req_line_speed[cfx_idx] < SPEED_10000) { if (lp->speed_cap_mask[cfx_idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) lp->req_line_speed[cfx_idx] = SPEED_10000; else lp->req_line_speed[cfx_idx] = SPEED_1000; } } rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); bnx2x_release_phy_lock(bp); bnx2x_calc_fc_adv(bp); if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) { bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); bnx2x_link_report(bp); } else queue_delayed_work(bnx2x_wq, &bp->period_task, 0); bp->link_params.req_line_speed[cfx_idx] = req_line_speed; return rc; } BNX2X_ERR("Bootcode is missing - can not initialize link\n"); return -EINVAL; } void bnx2x_link_set(struct bnx2x *bp) { if (!BP_NOMCP(bp)) { bnx2x_acquire_phy_lock(bp); bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); bnx2x_phy_init(&bp->link_params, &bp->link_vars); bnx2x_release_phy_lock(bp); bnx2x_calc_fc_adv(bp); } else BNX2X_ERR("Bootcode is missing - can not set link\n"); } static void bnx2x__link_reset(struct bnx2x *bp) { if (!BP_NOMCP(bp)) { bnx2x_acquire_phy_lock(bp); bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); bnx2x_release_phy_lock(bp); } else BNX2X_ERR("Bootcode is missing - can not reset link\n"); } u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) { u8 rc = 0; if (!BP_NOMCP(bp)) { bnx2x_acquire_phy_lock(bp); rc = bnx2x_test_link(&bp->link_params, &bp->link_vars, is_serdes); bnx2x_release_phy_lock(bp); } else BNX2X_ERR("Bootcode is missing - can not test link\n"); return rc; } static void bnx2x_init_port_minmax(struct bnx2x *bp) { u32 r_param = bp->link_vars.line_speed / 8; u32 fair_periodic_timeout_usec; u32 t_fair; memset(&(bp->cmng.rs_vars), 0, sizeof(struct rate_shaping_vars_per_port)); memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port)); /* 100 usec in SDM ticks = 25 since each tick is 4 usec */ bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4; /* this is the threshold below which no timer arming will occur 1.25 coefficient is for the threshold to be a little bigger than the real time, to compensate for timer in-accuracy */ bp->cmng.rs_vars.rs_threshold = (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4; /* resolution of fairness timer */ fair_periodic_timeout_usec = QM_ARB_BYTES / r_param; /* for 10G it is 1000usec. for 1G it is 10000usec. */ t_fair = T_FAIR_COEF / bp->link_vars.line_speed; /* this is the threshold below which we won't arm the timer anymore */ bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES; /* we multiply by 1e3/8 to get bytes/msec. We don't want the credits to pass a credit of the t_fair*FAIR_MEM (algorithm resolution) */ bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM; /* since each tick is 4 usec */ bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4; } /* Calculates the sum of vn_min_rates. It's needed for further normalizing of the min_rates. Returns: sum of vn_min_rates. or 0 - if all the min_rates are 0. In the later case fainess algorithm should be deactivated. If not all min_rates are zero then those that are zeroes will be set to 1. */ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) { int all_zero = 1; int vn; bp->vn_weight_sum = 0; for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { u32 vn_cfg = bp->mf_config[vn]; u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT) * 100; /* Skip hidden vns */ if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) continue; /* If min rate is zero - set it to 1 */ if (!vn_min_rate) vn_min_rate = DEF_MIN_RATE; else all_zero = 0; bp->vn_weight_sum += vn_min_rate; } /* if ETS or all min rates are zeros - disable fairness */ if (BNX2X_IS_ETS_ENABLED(bp)) { bp->cmng.flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); } else if (all_zero) { bp->cmng.flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; DP(NETIF_MSG_IFUP, "All MIN values are zeroes" " fairness will be disabled\n"); } else bp->cmng.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; } static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) { struct rate_shaping_vars_per_vn m_rs_vn; struct fairness_vars_per_vn m_fair_vn; u32 vn_cfg = bp->mf_config[vn]; int func = func_by_vn(bp, vn); u16 vn_min_rate, vn_max_rate; int i; /* If function is hidden - set min and max to zeroes */ if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { vn_min_rate = 0; vn_max_rate = 0; } else { u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT) * 100; /* If fairness is enabled (not all min rates are zeroes) and if current min rate is zero - set it to 1. This is a requirement of the algorithm. */ if (bp->vn_weight_sum && (vn_min_rate == 0)) vn_min_rate = DEF_MIN_RATE; if (IS_MF_SI(bp)) /* maxCfg in percents of linkspeed */ vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; else /* maxCfg is absolute in 100Mb units */ vn_max_rate = maxCfg * 100; } DP(NETIF_MSG_IFUP, "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n", func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn)); memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn)); /* global vn counter - maximal Mbps for this vn */ m_rs_vn.vn_counter.rate = vn_max_rate; /* quota - number of bytes transmitted in this period */ m_rs_vn.vn_counter.quota = (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8; if (bp->vn_weight_sum) { /* credit for each period of the fairness algorithm: number of bytes in T_FAIR (the vn share the port rate). vn_weight_sum should not be larger than 10000, thus T_FAIR_COEF / (8 * vn_weight_sum) will always be greater than zero */ m_fair_vn.vn_credit_delta = max_t(u32, (vn_min_rate * (T_FAIR_COEF / (8 * bp->vn_weight_sum))), (bp->cmng.fair_vars.fair_threshold + MIN_ABOVE_THRESH)); DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", m_fair_vn.vn_credit_delta); } /* Store it to internal memory */ for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++) REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4, ((u32 *)(&m_rs_vn))[i]); for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++) REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4, ((u32 *)(&m_fair_vn))[i]); } static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) { if (CHIP_REV_IS_SLOW(bp)) return CMNG_FNS_NONE; if (IS_MF(bp)) return CMNG_FNS_MINMAX; return CMNG_FNS_NONE; } void bnx2x_read_mf_cfg(struct bnx2x *bp) { int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); if (BP_NOMCP(bp)) return; /* what should be the default bvalue in this case */ /* For 2 port configuration the absolute function number formula * is: * abs_func = 2 * vn + BP_PORT + BP_PATH * * and there are 4 functions per port * * For 4 port configuration it is * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH * * and there are 2 functions per port */ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); if (func >= E1H_FUNC_MAX) break; bp->mf_config[vn] = MF_CFG_RD(bp, func_mf_config[func].config); } } static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) { if (cmng_type == CMNG_FNS_MINMAX) { int vn; /* clear cmng_enables */ bp->cmng.flags.cmng_enables = 0; /* read mf conf from shmem */ if (read_cfg) bnx2x_read_mf_cfg(bp); /* Init rate shaping and fairness contexts */ bnx2x_init_port_minmax(bp); /* vn_weight_sum and enable fairness if not 0 */ bnx2x_calc_vn_weight_sum(bp); /* calculate and set min-max rate for each vn */ if (bp->port.pmf) for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) bnx2x_init_vn_minmax(bp, vn); /* always enable rate shaping and fairness */ bp->cmng.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; if (!bp->vn_weight_sum) DP(NETIF_MSG_IFUP, "All MIN values are zeroes" " fairness will be disabled\n"); return; } /* rate shaping and fairness are disabled */ DP(NETIF_MSG_IFUP, "rate shaping and fairness are disabled\n"); } /* This function is called upon link interrupt */ static void bnx2x_link_attn(struct bnx2x *bp) { /* Make sure that we are synced with the current statistics */ bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_link_update(&bp->link_params, &bp->link_vars); if (bp->link_vars.link_up) { /* dropless flow control */ if (!CHIP_IS_E1(bp) && bp->dropless_fc) { int port = BP_PORT(bp); u32 pause_enabled = 0; if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) pause_enabled = 1; REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_ETH_PAUSE_ENABLED_OFFSET(port), pause_enabled); } if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { struct host_port_stats *pstats; pstats = bnx2x_sp(bp, port_stats); /* reset old mac stats */ memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx)); } if (bp->state == BNX2X_STATE_OPEN) bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); } if (bp->link_vars.link_up && bp->link_vars.line_speed) { int cmng_fns = bnx2x_get_cmng_fns_mode(bp); if (cmng_fns != CMNG_FNS_NONE) { bnx2x_cmng_fns_init(bp, false, cmng_fns); storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); } else /* rate shaping and fairness are disabled */ DP(NETIF_MSG_IFUP, "single function mode without fairness\n"); } __bnx2x_link_report(bp); if (IS_MF(bp)) bnx2x_link_sync_notify(bp); } void bnx2x__link_status_update(struct bnx2x *bp) { if (bp->state != BNX2X_STATE_OPEN) return; /* read updated dcb configuration */ bnx2x_dcbx_pmf_update(bp); bnx2x_link_status_update(&bp->link_params, &bp->link_vars); if (bp->link_vars.link_up) bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); else bnx2x_stats_handle(bp, STATS_EVENT_STOP); /* indicate link status */ bnx2x_link_report(bp); } static void bnx2x_pmf_update(struct bnx2x *bp) { int port = BP_PORT(bp); u32 val; bp->port.pmf = 1; DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf); /* * We need the mb() to ensure the ordering between the writing to * bp->port.pmf here and reading it from the bnx2x_periodic_task(). */ smp_mb(); /* queue a periodic task */ queue_delayed_work(bnx2x_wq, &bp->period_task, 0); bnx2x_dcbx_pmf_update(bp); /* enable nig attention */ val = (0xff0f | (1 << (BP_VN(bp) + 4))); if (bp->common.int_block == INT_BLOCK_HC) { REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); } else if (!CHIP_IS_E1x(bp)) { REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); } bnx2x_stats_handle(bp, STATS_EVENT_PMF); } /* end of Link */ /* slow path */ /* * General service functions */ /* send the MCP a request, block until there is a reply */ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) { int mb_idx = BP_FW_MB_IDX(bp); u32 seq; u32 rc = 0; u32 cnt = 1; u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; mutex_lock(&bp->fw_mb_mutex); seq = ++bp->fw_seq; SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n", (command | seq), param); do { /* let the FW do it's magic ... */ msleep(delay); rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header); /* Give the FW up to 5 second (500*10ms) */ } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", cnt*delay, rc, seq); /* is this a reply to our command? */ if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) rc &= FW_MSG_CODE_MASK; else { /* FW BUG! */ BNX2X_ERR("FW failed to respond!\n"); bnx2x_fw_dump(bp); rc = 0; } mutex_unlock(&bp->fw_mb_mutex); return rc; } void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) { if (CHIP_IS_E1x(bp)) { struct tstorm_eth_function_common_config tcfg = {0}; storm_memset_func_cfg(bp, &tcfg, p->func_id); } /* Enable the function in the FW */ storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); storm_memset_func_en(bp, p->func_id, 1); /* spq */ if (p->func_flgs & FUNC_FLG_SPQ) { storm_memset_spq_addr(bp, p->spq_map, p->func_id); REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); } } /** * bnx2x_get_tx_only_flags - Return common flags * * @bp device handle * @fp queue handle * @zero_stats TRUE if statistics zeroing is needed * * Return the flags that are common for the Tx-only and not normal connections. */ static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool zero_stats) { unsigned long flags = 0; /* PF driver will always initialize the Queue to an ACTIVE state */ __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); /* tx only connections collect statistics (on the same index as the * parent connection). The statistics are zeroed when the parent * connection is initialized. */ __set_bit(BNX2X_Q_FLG_STATS, &flags); if (zero_stats) __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); return flags; } static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool leading) { unsigned long flags = 0; /* calculate other queue flags */ if (IS_MF_SD(bp)) __set_bit(BNX2X_Q_FLG_OV, &flags); if (IS_FCOE_FP(fp)) __set_bit(BNX2X_Q_FLG_FCOE, &flags); if (!fp->disable_tpa) { __set_bit(BNX2X_Q_FLG_TPA, &flags); __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); if (fp->mode == TPA_MODE_GRO) __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags); } if (leading) { __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags); __set_bit(BNX2X_Q_FLG_MCAST, &flags); } /* Always set HW VLAN stripping */ __set_bit(BNX2X_Q_FLG_VLAN, &flags); return flags | bnx2x_get_common_flags(bp, fp, true); } static void bnx2x_pf_q_prep_general(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init, u8 cos) { gen_init->stat_id = bnx2x_stats_id(fp); gen_init->spcl_id = fp->cl_id; /* Always use mini-jumbo MTU for FCoE L2 ring */ if (IS_FCOE_FP(fp)) gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; else gen_init->mtu = bp->dev->mtu; gen_init->cos = cos; } static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, struct bnx2x_rxq_setup_params *rxq_init) { u8 max_sge = 0; u16 sge_sz = 0; u16 tpa_agg_size = 0; if (!fp->disable_tpa) { pause->sge_th_lo = SGE_TH_LO(bp); pause->sge_th_hi = SGE_TH_HI(bp); /* validate SGE ring has enough to cross high threshold */ WARN_ON(bp->dropless_fc && pause->sge_th_hi + FW_PREFETCH_CNT > MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); tpa_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT; max_sge = ((max_sge + PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE, 0xffff); } /* pause - not for e1 */ if (!CHIP_IS_E1(bp)) { pause->bd_th_lo = BD_TH_LO(bp); pause->bd_th_hi = BD_TH_HI(bp); pause->rcq_th_lo = RCQ_TH_LO(bp); pause->rcq_th_hi = RCQ_TH_HI(bp); /* * validate that rings have enough entries to cross * high thresholds */ WARN_ON(bp->dropless_fc && pause->bd_th_hi + FW_PREFETCH_CNT > bp->rx_ring_size); WARN_ON(bp->dropless_fc && pause->rcq_th_hi + FW_PREFETCH_CNT > NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT); pause->pri_map = 1; } /* rxq setup */ rxq_init->dscr_map = fp->rx_desc_mapping; rxq_init->sge_map = fp->rx_sge_mapping; rxq_init->rcq_map = fp->rx_comp_mapping; rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; /* This should be a maximum number of data bytes that may be * placed on the BD (not including paddings). */ rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; rxq_init->cl_qzone_id = fp->cl_qzone_id; rxq_init->tpa_agg_sz = tpa_agg_size; rxq_init->sge_buf_sz = sge_sz; rxq_init->max_sges_pkt = max_sge; rxq_init->rss_engine_id = BP_FUNC(bp); rxq_init->mcast_engine_id = BP_FUNC(bp); /* Maximum number or simultaneous TPA aggregation for this Queue. * * For PF Clients it should be the maximum avaliable number. * VF driver(s) may want to define it to a smaller value. */ rxq_init->max_tpa_queues = MAX_AGG_QS(bp); rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; rxq_init->fw_sb_id = fp->fw_sb_id; if (IS_FCOE_FP(fp)) rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; else rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; } static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, u8 cos) { txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping; txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; txq_init->fw_sb_id = fp->fw_sb_id; /* * set the tss leading client id for TX classfication == * leading RSS client id */ txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); if (IS_FCOE_FP(fp)) { txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; } } static void bnx2x_pf_init(struct bnx2x *bp) { struct bnx2x_func_init_params func_init = {0}; struct event_ring_data eq_data = { {0} }; u16 flags; if (!CHIP_IS_E1x(bp)) { /* reset IGU PF statistics: MSIX + ATTN */ /* PF */ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + BNX2X_IGU_STAS_MSG_VF_CNT*4 + (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp))*4, 0); /* ATTN */ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + BNX2X_IGU_STAS_MSG_VF_CNT*4 + BNX2X_IGU_STAS_MSG_PF_CNT*4 + (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp))*4, 0); } /* function setup flags */ flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); /* This flag is relevant for E1x only. * E2 doesn't have a TPA configuration in a function level. */ flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; func_init.func_flgs = flags; func_init.pf_id = BP_FUNC(bp); func_init.func_id = BP_FUNC(bp); func_init.spq_map = bp->spq_mapping; func_init.spq_prod = bp->spq_prod_idx; bnx2x_func_init(bp, &func_init); memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); /* * Congestion management values depend on the link rate * There is no active link so initial link rate is set to 10 Gbps. * When the link comes up The congestion management values are * re-calculated according to the actual link rate. */ bp->link_vars.line_speed = SPEED_10000; bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); /* Only the PMF sets the HW */ if (bp->port.pmf) storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); /* init Event Queue */ eq_data.base_addr.hi = U64_HI(bp->eq_mapping); eq_data.base_addr.lo = U64_LO(bp->eq_mapping); eq_data.producer = bp->eq_prod; eq_data.index_id = HC_SP_INDEX_EQ_CONS; eq_data.sb_id = DEF_SB_ID; storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); } static void bnx2x_e1h_disable(struct bnx2x *bp) { int port = BP_PORT(bp); bnx2x_tx_disable(bp); REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); } static void bnx2x_e1h_enable(struct bnx2x *bp) { int port = BP_PORT(bp); REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); /* Tx queue should be only reenabled */ netif_tx_wake_all_queues(bp->dev); /* * Should not call netif_carrier_on since it will be called if the link * is up when checking for link state */ } #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) { struct eth_stats_info *ether_stat = &bp->slowpath->drv_info_to_mcp.ether_stat; /* leave last char as NULL */ memcpy(ether_stat->version, DRV_MODULE_VERSION, ETH_STAT_INFO_VERSION_LEN - 1); bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj, DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, ether_stat->mac_local); ether_stat->mtu_size = bp->dev->mtu; if (bp->dev->features & NETIF_F_RXCSUM) ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; if (bp->dev->features & NETIF_F_TSO) ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; ether_stat->feature_flags |= bp->common.boot_mode; ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0; ether_stat->txq_size = bp->tx_ring_size; ether_stat->rxq_size = bp->rx_ring_size; } static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) { #ifdef BCM_CNIC struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; struct fcoe_stats_info *fcoe_stat = &bp->slowpath->drv_info_to_mcp.fcoe_stat; memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN); fcoe_stat->qos_priority = app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; /* insert FCoE stats from ramrod response */ if (!NO_FCOE(bp)) { struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = &bp->fw_stats_data->queue_stats[FCOE_IDX]. tstorm_queue_statistics; struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = &bp->fw_stats_data->queue_stats[FCOE_IDX]. xstorm_queue_statistics; struct fcoe_statistics_params *fw_fcoe_stat = &bp->fw_stats_data->fcoe; ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo, fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); ADD_64(fcoe_stat->rx_bytes_hi, fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, fcoe_stat->rx_bytes_lo, fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); ADD_64(fcoe_stat->rx_bytes_hi, fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, fcoe_stat->rx_bytes_lo, fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); ADD_64(fcoe_stat->rx_bytes_hi, fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, fcoe_stat->rx_bytes_lo, fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, fcoe_q_tstorm_stats->rcv_ucast_pkts); ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, fcoe_q_tstorm_stats->rcv_bcast_pkts); ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo, fcoe_q_tstorm_stats->rcv_mcast_pkts); ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo, fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); ADD_64(fcoe_stat->tx_bytes_hi, fcoe_q_xstorm_stats->ucast_bytes_sent.hi, fcoe_stat->tx_bytes_lo, fcoe_q_xstorm_stats->ucast_bytes_sent.lo); ADD_64(fcoe_stat->tx_bytes_hi, fcoe_q_xstorm_stats->bcast_bytes_sent.hi, fcoe_stat->tx_bytes_lo, fcoe_q_xstorm_stats->bcast_bytes_sent.lo); ADD_64(fcoe_stat->tx_bytes_hi, fcoe_q_xstorm_stats->mcast_bytes_sent.hi, fcoe_stat->tx_bytes_lo, fcoe_q_xstorm_stats->mcast_bytes_sent.lo); ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, fcoe_q_xstorm_stats->ucast_pkts_sent); ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, fcoe_q_xstorm_stats->bcast_pkts_sent); ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo, fcoe_q_xstorm_stats->mcast_pkts_sent); } /* ask L5 driver to add data to the struct */ bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD); #endif } static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) { #ifdef BCM_CNIC struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; struct iscsi_stats_info *iscsi_stat = &bp->slowpath->drv_info_to_mcp.iscsi_stat; memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN); iscsi_stat->qos_priority = app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; /* ask L5 driver to add data to the struct */ bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD); #endif } /* called due to MCP event (on pmf): * reread new bandwidth configuration * configure FW * notify others function about the change */ static inline void bnx2x_config_mf_bw(struct bnx2x *bp) { if (bp->link_vars.link_up) { bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); bnx2x_link_sync_notify(bp); } storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); } static inline void bnx2x_set_mf_bw(struct bnx2x *bp) { bnx2x_config_mf_bw(bp); bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); } static void bnx2x_handle_drv_info_req(struct bnx2x *bp) { enum drv_info_opcode op_code; u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); /* if drv_info version supported by MFW doesn't match - send NACK */ if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); return; } op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> DRV_INFO_CONTROL_OP_CODE_SHIFT; memset(&bp->slowpath->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); switch (op_code) { case ETH_STATS_OPCODE: bnx2x_drv_info_ether_stat(bp); break; case FCOE_STATS_OPCODE: bnx2x_drv_info_fcoe_stat(bp); break; case ISCSI_STATS_OPCODE: bnx2x_drv_info_iscsi_stat(bp); break; default: /* if op code isn't supported - send NACK */ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); return; } /* if we got drv_info attn from MFW then these fields are defined in * shmem2 for sure */ SHMEM2_WR(bp, drv_info_host_addr_lo, U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp))); SHMEM2_WR(bp, drv_info_host_addr_hi, U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); } static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) { DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { /* * This is the only place besides the function initialization * where the bp->flags can change so it is done without any * locks */ if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n"); bp->flags |= MF_FUNC_DIS; bnx2x_e1h_disable(bp); } else { DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n"); bp->flags &= ~MF_FUNC_DIS; bnx2x_e1h_enable(bp); } dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; } if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { bnx2x_config_mf_bw(bp); dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; } /* Report results to MCP */ if (dcc_event) bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0); else bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0); } /* must be called under the spq lock */ static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) { struct eth_spe *next_spe = bp->spq_prod_bd; if (bp->spq_prod_bd == bp->spq_last_bd) { bp->spq_prod_bd = bp->spq; bp->spq_prod_idx = 0; DP(BNX2X_MSG_SP, "end of spq\n"); } else { bp->spq_prod_bd++; bp->spq_prod_idx++; } return next_spe; } /* must be called under the spq lock */ static inline void bnx2x_sp_prod_update(struct bnx2x *bp) { int func = BP_FUNC(bp); /* * Make sure that BD data is updated before writing the producer: * BD data is written to the memory, the producer is read from the * memory, thus we need a full memory barrier to ensure the ordering. */ mb(); REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), bp->spq_prod_idx); mmiowb(); } /** * bnx2x_is_contextless_ramrod - check if the current command ends on EQ * * @cmd: command to check * @cmd_type: command type */ static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) { if ((cmd_type == NONE_CONNECTION_TYPE) || (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) return true; else return false; } /** * bnx2x_sp_post - place a single command on an SP ring * * @bp: driver handle * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) * @cid: SW CID the command is related to * @data_hi: command private data address (high 32 bits) * @data_lo: command private data address (low 32 bits) * @cmd_type: command type (e.g. NONE, ETH) * * SP data is handled as if it's always an address pair, thus data fields are * not swapped to little endian in upper functions. Instead this function swaps * data as if it's two u32 fields. */ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, u32 data_hi, u32 data_lo, int cmd_type) { struct eth_spe *spe; u16 type; bool common = bnx2x_is_contextless_ramrod(command, cmd_type); #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) { BNX2X_ERR("Can't post SP when there is panic\n"); return -EIO; } #endif spin_lock_bh(&bp->spq_lock); if (common) { if (!atomic_read(&bp->eq_spq_left)) { BNX2X_ERR("BUG! EQ ring full!\n"); spin_unlock_bh(&bp->spq_lock); bnx2x_panic(); return -EBUSY; } } else if (!atomic_read(&bp->cq_spq_left)) { BNX2X_ERR("BUG! SPQ ring full!\n"); spin_unlock_bh(&bp->spq_lock); bnx2x_panic(); return -EBUSY; } spe = bnx2x_sp_get_next(bp); /* CID needs port number to be encoded int it */ spe->hdr.conn_and_cmd_data = cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(bp, cid)); type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & SPE_HDR_FUNCTION_ID); spe->hdr.type = cpu_to_le16(type); spe->data.update_data_addr.hi = cpu_to_le32(data_hi); spe->data.update_data_addr.lo = cpu_to_le32(data_lo); /* * It's ok if the actual decrement is issued towards the memory * somewhere between the spin_lock and spin_unlock. Thus no * more explict memory barrier is needed. */ if (common) atomic_dec(&bp->eq_spq_left); else atomic_dec(&bp->cq_spq_left); DP(BNX2X_MSG_SP, "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n", bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) + (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); bnx2x_sp_prod_update(bp); spin_unlock_bh(&bp->spq_lock); return 0; } /* acquire split MCP access lock register */ static int bnx2x_acquire_alr(struct bnx2x *bp) { u32 j, val; int rc = 0; might_sleep(); for (j = 0; j < 1000; j++) { val = (1UL << 31); REG_WR(bp, GRCBASE_MCP + 0x9c, val); val = REG_RD(bp, GRCBASE_MCP + 0x9c); if (val & (1L << 31)) break; msleep(5); } if (!(val & (1L << 31))) { BNX2X_ERR("Cannot acquire MCP access lock register\n"); rc = -EBUSY; } return rc; } /* release split MCP access lock register */ static void bnx2x_release_alr(struct bnx2x *bp) { REG_WR(bp, GRCBASE_MCP + 0x9c, 0); } #define BNX2X_DEF_SB_ATT_IDX 0x0001 #define BNX2X_DEF_SB_IDX 0x0002 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) { struct host_sp_status_block *def_sb = bp->def_status_blk; u16 rc = 0; barrier(); /* status block is written to by the chip */ if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; rc |= BNX2X_DEF_SB_ATT_IDX; } if (bp->def_idx != def_sb->sp_sb.running_index) { bp->def_idx = def_sb->sp_sb.running_index; rc |= BNX2X_DEF_SB_IDX; } /* Do not reorder: indecies reading should complete before handling */ barrier(); return rc; } /* * slow path service functions */ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) { int port = BP_PORT(bp); u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : NIG_REG_MASK_INTERRUPT_PORT0; u32 aeu_mask; u32 nig_mask = 0; u32 reg_addr; if (bp->attn_state & asserted) BNX2X_ERR("IGU ERROR\n"); bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); aeu_mask = REG_RD(bp, aeu_addr); DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", aeu_mask, asserted); aeu_mask &= ~(asserted & 0x3ff); DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); REG_WR(bp, aeu_addr, aeu_mask); bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); bp->attn_state |= asserted; DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); if (asserted & ATTN_HARD_WIRED_MASK) { if (asserted & ATTN_NIG_FOR_FUNC) { bnx2x_acquire_phy_lock(bp); /* save nig interrupt mask */ nig_mask = REG_RD(bp, nig_int_mask_addr); /* If nig_mask is not set, no need to call the update * function. */ if (nig_mask) { REG_WR(bp, nig_int_mask_addr, 0); bnx2x_link_attn(bp); } /* handle unicore attn? */ } if (asserted & ATTN_SW_TIMER_4_FUNC) DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n"); if (asserted & GPIO_2_FUNC) DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n"); if (asserted & GPIO_3_FUNC) DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n"); if (asserted & GPIO_4_FUNC) DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n"); if (port == 0) { if (asserted & ATTN_GENERAL_ATTN_1) { DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n"); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); } if (asserted & ATTN_GENERAL_ATTN_2) { DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n"); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); } if (asserted & ATTN_GENERAL_ATTN_3) { DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n"); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); } } else { if (asserted & ATTN_GENERAL_ATTN_4) { DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n"); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); } if (asserted & ATTN_GENERAL_ATTN_5) { DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n"); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); } if (asserted & ATTN_GENERAL_ATTN_6) { DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n"); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); } } } /* if hardwired */ if (bp->common.int_block == INT_BLOCK_HC) reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET); else reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted, (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); REG_WR(bp, reg_addr, asserted); /* now set back the mask */ if (asserted & ATTN_NIG_FOR_FUNC) { REG_WR(bp, nig_int_mask_addr, nig_mask); bnx2x_release_phy_lock(bp); } } static inline void bnx2x_fan_failure(struct bnx2x *bp) { int port = BP_PORT(bp); u32 ext_phy_config; /* mark the failure */ ext_phy_config = SHMEM_RD(bp, dev_info.port_hw_config[port].external_phy_config); ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, ext_phy_config); /* log the failure */ netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" "Please contact OEM Support for assistance\n"); /* * Scheudle device reset (unload) * This is due to some boards consuming sufficient power when driver is * up to overheat if fan fails. */ smp_mb__before_clear_bit(); set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state); smp_mb__after_clear_bit(); schedule_delayed_work(&bp->sp_rtnl_task, 0); } static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) { int port = BP_PORT(bp); int reg_offset; u32 val; reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { val = REG_RD(bp, reg_offset); val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; REG_WR(bp, reg_offset, val); BNX2X_ERR("SPIO5 hw attention\n"); /* Fan failure attention */ bnx2x_hw_reset_phy(&bp->link_params); bnx2x_fan_failure(bp); } if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { bnx2x_acquire_phy_lock(bp); bnx2x_handle_module_detect_int(&bp->link_params); bnx2x_release_phy_lock(bp); } if (attn & HW_INTERRUT_ASSERT_SET_0) { val = REG_RD(bp, reg_offset); val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); REG_WR(bp, reg_offset, val); BNX2X_ERR("FATAL HW block attention set0 0x%x\n", (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); bnx2x_panic(); } } static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) { u32 val; if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); BNX2X_ERR("DB hw attention 0x%x\n", val); /* DORQ discard attention */ if (val & 0x2) BNX2X_ERR("FATAL error from DORQ\n"); } if (attn & HW_INTERRUT_ASSERT_SET_1) { int port = BP_PORT(bp); int reg_offset; reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); val = REG_RD(bp, reg_offset); val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); REG_WR(bp, reg_offset, val); BNX2X_ERR("FATAL HW block attention set1 0x%x\n", (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); bnx2x_panic(); } } static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) { u32 val; if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); BNX2X_ERR("CFC hw attention 0x%x\n", val); /* CFC error attention */ if (val & 0x2) BNX2X_ERR("FATAL error from CFC\n"); } if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); BNX2X_ERR("PXP hw attention-0 0x%x\n", val); /* RQ_USDMDP_FIFO_OVERFLOW */ if (val & 0x18000) BNX2X_ERR("FATAL error from PXP\n"); if (!CHIP_IS_E1x(bp)) { val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); BNX2X_ERR("PXP hw attention-1 0x%x\n", val); } } if (attn & HW_INTERRUT_ASSERT_SET_2) { int port = BP_PORT(bp); int reg_offset; reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); val = REG_RD(bp, reg_offset); val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); REG_WR(bp, reg_offset, val); BNX2X_ERR("FATAL HW block attention set2 0x%x\n", (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); bnx2x_panic(); } } static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) { u32 val; if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { if (attn & BNX2X_PMF_LINK_ASSERT) { int func = BP_FUNC(bp); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, func_mf_config[BP_ABS_FUNC(bp)].config); val = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_status); if (val & DRV_STATUS_DCC_EVENT_MASK) bnx2x_dcc_event(bp, (val & DRV_STATUS_DCC_EVENT_MASK)); if (val & DRV_STATUS_SET_MF_BW) bnx2x_set_mf_bw(bp); if (val & DRV_STATUS_DRV_INFO_REQ) bnx2x_handle_drv_info_req(bp); if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) bnx2x_pmf_update(bp); if (bp->port.pmf && (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && bp->dcbx_enabled > 0) /* start dcbx state machine */ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_NEG_RECEIVED); if (bp->link_vars.periodic_flags & PERIODIC_FLAGS_LINK_EVENT) { /* sync with link */ bnx2x_acquire_phy_lock(bp); bp->link_vars.periodic_flags &= ~PERIODIC_FLAGS_LINK_EVENT; bnx2x_release_phy_lock(bp); if (IS_MF(bp)) bnx2x_link_sync_notify(bp); bnx2x_link_report(bp); } /* Always call it here: bnx2x_link_report() will * prevent the link indication duplication. */ bnx2x__link_status_update(bp); } else if (attn & BNX2X_MC_ASSERT_BITS) { BNX2X_ERR("MC assert!\n"); bnx2x_mc_assert(bp); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0); bnx2x_panic(); } else if (attn & BNX2X_MCP_ASSERT) { BNX2X_ERR("MCP assert!\n"); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); bnx2x_fw_dump(bp); } else BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); } if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); if (attn & BNX2X_GRC_TIMEOUT) { val = CHIP_IS_E1(bp) ? 0 : REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN); BNX2X_ERR("GRC time-out 0x%08x\n", val); } if (attn & BNX2X_GRC_RSV) { val = CHIP_IS_E1(bp) ? 0 : REG_RD(bp, MISC_REG_GRC_RSV_ATTN); BNX2X_ERR("GRC reserved 0x%08x\n", val); } REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); } } /* * Bits map: * 0-7 - Engine0 load counter. * 8-15 - Engine1 load counter. * 16 - Engine0 RESET_IN_PROGRESS bit. * 17 - Engine1 RESET_IN_PROGRESS bit. * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function * on the engine * 19 - Engine1 ONE_IS_LOADED. * 20 - Chip reset flow bit. When set none-leader must wait for both engines * leader to complete (check for both RESET_IN_PROGRESS bits and not for * just the one belonging to its engine). * */ #define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 #define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff #define BNX2X_PATH0_LOAD_CNT_SHIFT 0 #define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 #define BNX2X_PATH1_LOAD_CNT_SHIFT 8 #define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 #define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 #define BNX2X_GLOBAL_RESET_BIT 0x00040000 /* * Set the GLOBAL_RESET bit. * * Should be run under rtnl lock */ void bnx2x_set_reset_global(struct bnx2x *bp) { u32 val; bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); } /* * Clear the GLOBAL_RESET bit. * * Should be run under rtnl lock */ static inline void bnx2x_clear_reset_global(struct bnx2x *bp) { u32 val; bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); } /* * Checks the GLOBAL_RESET bit. * * should be run under rtnl lock */ static inline bool bnx2x_reset_is_global(struct bnx2x *bp) { u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; } /* * Clear RESET_IN_PROGRESS bit for the current engine. * * Should be run under rtnl lock */ static inline void bnx2x_set_reset_done(struct bnx2x *bp) { u32 val; u32 bit = BP_PATH(bp) ? BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); /* Clear the bit */ val &= ~bit; REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); } /* * Set RESET_IN_PROGRESS for the current engine. * * should be run under rtnl lock */ void bnx2x_set_reset_in_progress(struct bnx2x *bp) { u32 val; u32 bit = BP_PATH(bp) ? BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); /* Set the bit */ val |= bit; REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); } /* * Checks the RESET_IN_PROGRESS bit for the given engine. * should be run under rtnl lock */ bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) { u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); u32 bit = engine ? BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; /* return false if bit is set */ return (val & bit) ? false : true; } /* * set pf load for the current pf. * * should be run under rtnl lock */ void bnx2x_set_pf_load(struct bnx2x *bp) { u32 val1, val; u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : BNX2X_PATH0_LOAD_CNT_MASK; u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : BNX2X_PATH0_LOAD_CNT_SHIFT; bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val); /* get the current counter value */ val1 = (val & mask) >> shift; /* set bit of that PF */ val1 |= (1 << bp->pf_num); /* clear the old value */ val &= ~mask; /* set the new one */ val |= ((val1 << shift) & mask); REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); } /** * bnx2x_clear_pf_load - clear pf load mark * * @bp: driver handle * * Should be run under rtnl lock. * Decrements the load counter for the current engine. Returns * whether other functions are still loaded */ bool bnx2x_clear_pf_load(struct bnx2x *bp) { u32 val1, val; u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : BNX2X_PATH0_LOAD_CNT_MASK; u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : BNX2X_PATH0_LOAD_CNT_SHIFT; bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val); /* get the current counter value */ val1 = (val & mask) >> shift; /* clear bit of that PF */ val1 &= ~(1 << bp->pf_num); /* clear the old value */ val &= ~mask; /* set the new one */ val |= ((val1 << shift) & mask); REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); return val1 != 0; } /* * Read the load status for the current engine. * * should be run under rtnl lock */ static inline bool bnx2x_get_load_status(struct bnx2x *bp, int engine) { u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : BNX2X_PATH0_LOAD_CNT_MASK); u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : BNX2X_PATH0_LOAD_CNT_SHIFT); u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val); val = (val & mask) >> shift; DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n", engine, val); return val != 0; } /* * Reset the load status for the current engine. */ static inline void bnx2x_clear_load_status(struct bnx2x *bp) { u32 val; u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : BNX2X_PATH0_LOAD_CNT_MASK); bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask)); bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); } static inline void _print_next_block(int idx, const char *blk) { pr_cont("%s%s", idx ? ", " : "", blk); } static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, bool print) { int i = 0; u32 cur_bit = 0; for (i = 0; sig; i++) { cur_bit = ((u32)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: if (print) _print_next_block(par_num++, "BRB"); break; case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: if (print) _print_next_block(par_num++, "PARSER"); break; case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: if (print) _print_next_block(par_num++, "TSDM"); break; case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: if (print) _print_next_block(par_num++, "SEARCHER"); break; case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: if (print) _print_next_block(par_num++, "TCM"); break; case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: if (print) _print_next_block(par_num++, "TSEMI"); break; case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: if (print) _print_next_block(par_num++, "XPB"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return par_num; } static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, bool *global, bool print) { int i = 0; u32 cur_bit = 0; for (i = 0; sig; i++) { cur_bit = ((u32)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: if (print) _print_next_block(par_num++, "PBF"); break; case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: if (print) _print_next_block(par_num++, "QM"); break; case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: if (print) _print_next_block(par_num++, "TM"); break; case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: if (print) _print_next_block(par_num++, "XSDM"); break; case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: if (print) _print_next_block(par_num++, "XCM"); break; case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: if (print) _print_next_block(par_num++, "XSEMI"); break; case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: if (print) _print_next_block(par_num++, "DOORBELLQ"); break; case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: if (print) _print_next_block(par_num++, "NIG"); break; case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: if (print) _print_next_block(par_num++, "VAUX PCI CORE"); *global = true; break; case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: if (print) _print_next_block(par_num++, "DEBUG"); break; case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: if (print) _print_next_block(par_num++, "USDM"); break; case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: if (print) _print_next_block(par_num++, "UCM"); break; case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: if (print) _print_next_block(par_num++, "USEMI"); break; case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: if (print) _print_next_block(par_num++, "UPB"); break; case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: if (print) _print_next_block(par_num++, "CSDM"); break; case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: if (print) _print_next_block(par_num++, "CCM"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return par_num; } static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, bool print) { int i = 0; u32 cur_bit = 0; for (i = 0; sig; i++) { cur_bit = ((u32)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: if (print) _print_next_block(par_num++, "CSEMI"); break; case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: if (print) _print_next_block(par_num++, "PXP"); break; case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: if (print) _print_next_block(par_num++, "PXPPCICLOCKCLIENT"); break; case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: if (print) _print_next_block(par_num++, "CFC"); break; case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: if (print) _print_next_block(par_num++, "CDU"); break; case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: if (print) _print_next_block(par_num++, "DMAE"); break; case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: if (print) _print_next_block(par_num++, "IGU"); break; case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: if (print) _print_next_block(par_num++, "MISC"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return par_num; } static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, bool *global, bool print) { int i = 0; u32 cur_bit = 0; for (i = 0; sig; i++) { cur_bit = ((u32)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: if (print) _print_next_block(par_num++, "MCP ROM"); *global = true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: if (print) _print_next_block(par_num++, "MCP UMP RX"); *global = true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: if (print) _print_next_block(par_num++, "MCP UMP TX"); *global = true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: if (print) _print_next_block(par_num++, "MCP SCPAD"); *global = true; break; } /* Clear the bit */ sig &= ~cur_bit; } } return par_num; } static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, bool print) { int i = 0; u32 cur_bit = 0; for (i = 0; sig; i++) { cur_bit = ((u32)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: if (print) _print_next_block(par_num++, "PGLUE_B"); break; case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: if (print) _print_next_block(par_num++, "ATC"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return par_num; } static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, u32 *sig) { if ((sig[0] & HW_PRTY_ASSERT_SET_0) || (sig[1] & HW_PRTY_ASSERT_SET_1) || (sig[2] & HW_PRTY_ASSERT_SET_2) || (sig[3] & HW_PRTY_ASSERT_SET_3) || (sig[4] & HW_PRTY_ASSERT_SET_4)) { int par_num = 0; DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", sig[0] & HW_PRTY_ASSERT_SET_0, sig[1] & HW_PRTY_ASSERT_SET_1, sig[2] & HW_PRTY_ASSERT_SET_2, sig[3] & HW_PRTY_ASSERT_SET_3, sig[4] & HW_PRTY_ASSERT_SET_4); if (print) netdev_err(bp->dev, "Parity errors detected in blocks: "); par_num = bnx2x_check_blocks_with_parity0( sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); par_num = bnx2x_check_blocks_with_parity1( sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); par_num = bnx2x_check_blocks_with_parity2( sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); par_num = bnx2x_check_blocks_with_parity3( sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); par_num = bnx2x_check_blocks_with_parity4( sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); if (print) pr_cont("\n"); return true; } else return false; } /** * bnx2x_chk_parity_attn - checks for parity attentions. * * @bp: driver handle * @global: true if there was a global attention * @print: show parity attention in syslog */ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) { struct attn_route attn = { {0} }; int port = BP_PORT(bp); attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); if (!CHIP_IS_E1x(bp)) attn.sig[4] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); return bnx2x_parity_attn(bp, global, print, attn.sig); } static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) { u32 val; if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); BNX2X_ERR("PGLUE hw attention 0x%x\n", val); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); } if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR); BNX2X_ERR("ATC hw attention 0x%x\n", val); if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); } if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { BNX2X_ERR("FATAL parity attention set4 0x%x\n", (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); } } static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) { struct attn_route attn, *group_mask; int port = BP_PORT(bp); int index; u32 reg_addr; u32 val; u32 aeu_mask; bool global = false; /* need to take HW lock because MCP or other port might also try to handle this event */ bnx2x_acquire_alr(bp); if (bnx2x_chk_parity_attn(bp, &global, true)) { #ifndef BNX2X_STOP_ON_ERROR bp->recovery_state = BNX2X_RECOVERY_INIT; schedule_delayed_work(&bp->sp_rtnl_task, 0); /* Disable HW interrupts */ bnx2x_int_disable(bp); /* In case of parity errors don't handle attentions so that * other function would "see" parity errors. */ #else bnx2x_panic(); #endif bnx2x_release_alr(bp); return; } attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); if (!CHIP_IS_E1x(bp)) attn.sig[4] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); else attn.sig[4] = 0; DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n", attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { if (deasserted & (1 << index)) { group_mask = &bp->attn_group[index]; DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n", index, group_mask->sig[0], group_mask->sig[1], group_mask->sig[2], group_mask->sig[3], group_mask->sig[4]); bnx2x_attn_int_deasserted4(bp, attn.sig[4] & group_mask->sig[4]); bnx2x_attn_int_deasserted3(bp, attn.sig[3] & group_mask->sig[3]); bnx2x_attn_int_deasserted1(bp, attn.sig[1] & group_mask->sig[1]); bnx2x_attn_int_deasserted2(bp, attn.sig[2] & group_mask->sig[2]); bnx2x_attn_int_deasserted0(bp, attn.sig[0] & group_mask->sig[0]); } } bnx2x_release_alr(bp); if (bp->common.int_block == INT_BLOCK_HC) reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR); else reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); val = ~deasserted; DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val, (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); REG_WR(bp, reg_addr, val); if (~bp->attn_state & deasserted) BNX2X_ERR("IGU ERROR\n"); reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); aeu_mask = REG_RD(bp, reg_addr); DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", aeu_mask, deasserted); aeu_mask |= (deasserted & 0x3ff); DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); REG_WR(bp, reg_addr, aeu_mask); bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); bp->attn_state &= ~deasserted; DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); } static void bnx2x_attn_int(struct bnx2x *bp) { /* read local copy of bits */ u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. attn_bits); u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. attn_bits_ack); u32 attn_state = bp->attn_state; /* look for changed bits */ u32 asserted = attn_bits & ~attn_ack & ~attn_state; u32 deasserted = ~attn_bits & attn_ack & attn_state; DP(NETIF_MSG_HW, "attn_bits %x attn_ack %x asserted %x deasserted %x\n", attn_bits, attn_ack, asserted, deasserted); if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) BNX2X_ERR("BAD attention state\n"); /* handle bits that were raised */ if (asserted) bnx2x_attn_int_asserted(bp, asserted); if (deasserted) bnx2x_attn_int_deasserted(bp, deasserted); } void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, u16 index, u8 op, u8 update) { u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, igu_addr); } static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) { /* No memory barriers */ storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); mmiowb(); /* keep prod updates ordered */ } #ifdef BCM_CNIC static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, union event_ring_elem *elem) { u8 err = elem->message.error; if (!bp->cnic_eth_dev.starting_cid || (cid < bp->cnic_eth_dev.starting_cid && cid != bp->cnic_eth_dev.iscsi_l2_cid)) return 1; DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); if (unlikely(err)) { BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", cid); bnx2x_panic_dump(bp); } bnx2x_cnic_cfc_comp(bp, cid, err); return 0; } #endif static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp) { struct bnx2x_mcast_ramrod_params rparam; int rc; memset(&rparam, 0, sizeof(rparam)); rparam.mcast_obj = &bp->mcast_obj; netif_addr_lock_bh(bp->dev); /* Clear pending state for the last command */ bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); /* If there are pending mcast commands - send them */ if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); if (rc < 0) BNX2X_ERR("Failed to send pending mcast commands: %d\n", rc); } netif_addr_unlock_bh(bp->dev); } static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp, union event_ring_elem *elem) { unsigned long ramrod_flags = 0; int rc = 0; u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; struct bnx2x_vlan_mac_obj *vlan_mac_obj; /* Always push next commands out, don't wait here */ __set_bit(RAMROD_CONT, &ramrod_flags); switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { case BNX2X_FILTER_MAC_PENDING: DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); #ifdef BCM_CNIC if (cid == BNX2X_ISCSI_ETH_CID) vlan_mac_obj = &bp->iscsi_l2_mac_obj; else #endif vlan_mac_obj = &bp->fp[cid].mac_obj; break; case BNX2X_FILTER_MCAST_PENDING: DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n"); /* This is only relevant for 57710 where multicast MACs are * configured as unicast MACs using the same ramrod. */ bnx2x_handle_mcast_eqe(bp); return; default: BNX2X_ERR("Unsupported classification command: %d\n", elem->message.data.eth_event.echo); return; } rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); if (rc < 0) BNX2X_ERR("Failed to schedule new commands: %d\n", rc); else if (rc > 0) DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); } #ifdef BCM_CNIC static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); #endif static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) { netif_addr_lock_bh(bp->dev); clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); /* Send rx_mode command again if was requested */ if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) bnx2x_set_storm_rx_mode(bp); #ifdef BCM_CNIC else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state)) bnx2x_set_iscsi_eth_rx_mode(bp, true); else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state)) bnx2x_set_iscsi_eth_rx_mode(bp, false); #endif netif_addr_unlock_bh(bp->dev); } static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( struct bnx2x *bp, u32 cid) { DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); #ifdef BCM_CNIC if (cid == BNX2X_FCOE_ETH_CID) return &bnx2x_fcoe(bp, q_obj); else #endif return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj); } static void bnx2x_eq_int(struct bnx2x *bp) { u16 hw_cons, sw_cons, sw_prod; union event_ring_elem *elem; u32 cid; u8 opcode; int spqe_cnt = 0; struct bnx2x_queue_sp_obj *q_obj; struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; hw_cons = le16_to_cpu(*bp->eq_cons_sb); /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. * when we get the the next-page we nned to adjust so the loop * condition below will be met. The next element is the size of a * regular element and hence incrementing by 1 */ if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) hw_cons++; /* This function may never run in parallel with itself for a * specific bp, thus there is no need in "paired" read memory * barrier here. */ sw_cons = bp->eq_cons; sw_prod = bp->eq_prod; DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); for (; sw_cons != hw_cons; sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { elem = &bp->eq_ring[EQ_DESC(sw_cons)]; cid = SW_CID(elem->message.data.cfc_del_event.cid); opcode = elem->message.opcode; /* handle eq element */ switch (opcode) { case EVENT_RING_OPCODE_STAT_QUERY: DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, "got statistics comp event %d\n", bp->stats_comp++); /* nothing to do with stats comp */ goto next_spqe; case EVENT_RING_OPCODE_CFC_DEL: /* handle according to cid range */ /* * we may want to verify here that the bp state is * HALTING */ DP(BNX2X_MSG_SP, "got delete ramrod for MULTI[%d]\n", cid); #ifdef BCM_CNIC if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) goto next_spqe; #endif q_obj = bnx2x_cid_to_q_obj(bp, cid); if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) break; goto next_spqe; case EVENT_RING_OPCODE_STOP_TRAFFIC: DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_TX_STOP)) break; bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); goto next_spqe; case EVENT_RING_OPCODE_START_TRAFFIC: DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_TX_START)) break; bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_START: DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, "got FUNC_START ramrod\n"); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) break; goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_STOP: DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, "got FUNC_STOP ramrod\n"); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) break; goto next_spqe; } switch (opcode | bp->state) { case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPEN): case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPENING_WAIT4_PORT): cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid); rss_raw->clear_pending(rss_raw); break; case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_OPEN): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_DIAG): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_CLOSING_WAIT4_HALT): DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n"); bnx2x_handle_classification_eqe(bp, elem); break; case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_OPEN): case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_DIAG): case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_CLOSING_WAIT4_HALT): DP(BNX2X_MSG_SP, "got mcast ramrod\n"); bnx2x_handle_mcast_eqe(bp); break; case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_OPEN): case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_DIAG): case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_CLOSING_WAIT4_HALT): DP(BNX2X_MSG_SP, "got rx_mode ramrod\n"); bnx2x_handle_rx_mode_eqe(bp); break; default: /* unknown event log error and continue */ BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", elem->message.opcode, bp->state); } next_spqe: spqe_cnt++; } /* for */ smp_mb__before_atomic_inc(); atomic_add(spqe_cnt, &bp->eq_spq_left); bp->eq_cons = sw_cons; bp->eq_prod = sw_prod; /* Make sure that above mem writes were issued towards the memory */ smp_wmb(); /* update producer */ bnx2x_update_eq_prod(bp, bp->eq_prod); } static void bnx2x_sp_task(struct work_struct *work) { struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); u16 status; status = bnx2x_update_dsb_idx(bp); /* if (status == 0) */ /* BNX2X_ERR("spurious slowpath interrupt!\n"); */ DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status); /* HW attentions */ if (status & BNX2X_DEF_SB_ATT_IDX) { bnx2x_attn_int(bp); status &= ~BNX2X_DEF_SB_ATT_IDX; } /* SP events: STAT_QUERY and others */ if (status & BNX2X_DEF_SB_IDX) { #ifdef BCM_CNIC struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); if ((!NO_FCOE(bp)) && (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { /* * Prevent local bottom-halves from running as * we are going to change the local NAPI list. */ local_bh_disable(); napi_schedule(&bnx2x_fcoe(bp, napi)); local_bh_enable(); } #endif /* Handle EQ completions */ bnx2x_eq_int(bp); bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); status &= ~BNX2X_DEF_SB_IDX; } if (unlikely(status)) DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n", status); bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); } irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct bnx2x *bp = netdev_priv(dev); bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) return IRQ_HANDLED; #endif #ifdef BCM_CNIC { struct cnic_ops *c_ops; rcu_read_lock(); c_ops = rcu_dereference(bp->cnic_ops); if (c_ops) c_ops->cnic_handler(bp->cnic_data, NULL); rcu_read_unlock(); } #endif queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); return IRQ_HANDLED; } /* end of slow path */ void bnx2x_drv_pulse(struct bnx2x *bp) { SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, bp->fw_drv_pulse_wr_seq); } static void bnx2x_timer(unsigned long data) { struct bnx2x *bp = (struct bnx2x *) data; if (!netif_running(bp->dev)) return; if (!BP_NOMCP(bp)) { int mb_idx = BP_FW_MB_IDX(bp); u32 drv_pulse; u32 mcp_pulse; ++bp->fw_drv_pulse_wr_seq; bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; /* TBD - add SYSTEM_TIME */ drv_pulse = bp->fw_drv_pulse_wr_seq; bnx2x_drv_pulse(bp); mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & MCP_PULSE_SEQ_MASK); /* The delta between driver pulse and mcp response * should be 1 (before mcp response) or 0 (after mcp response) */ if ((drv_pulse != mcp_pulse) && (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { /* someone lost a heartbeat... */ BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n", drv_pulse, mcp_pulse); } } if (bp->state == BNX2X_STATE_OPEN) bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); mod_timer(&bp->timer, jiffies + bp->current_interval); } /* end of Statistics */ /* nic init */ /* * nic init service functions */ static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) { u32 i; if (!(len%4) && !(addr%4)) for (i = 0; i < len; i += 4) REG_WR(bp, addr + i, fill); else for (i = 0; i < len; i++) REG_WR8(bp, addr + i, fill); } /* helper: writes FP SP data to FW - data_size in dwords */ static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp, int fw_sb_id, u32 *sb_data_p, u32 data_size) { int index; for (index = 0; index < data_size; index++) REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + sizeof(u32)*index, *(sb_data_p + index)); } static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) { u32 *sb_data_p; u32 data_size = 0; struct hc_status_block_data_e2 sb_data_e2; struct hc_status_block_data_e1x sb_data_e1x; /* disable the function first */ if (!CHIP_IS_E1x(bp)) { memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); sb_data_e2.common.state = SB_DISABLED; sb_data_e2.common.p_func.vf_valid = false; sb_data_p = (u32 *)&sb_data_e2; data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); } else { memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); sb_data_e1x.common.state = SB_DISABLED; sb_data_e1x.common.p_func.vf_valid = false; sb_data_p = (u32 *)&sb_data_e1x; data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); } bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); bnx2x_fill(bp, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0, CSTORM_STATUS_BLOCK_SIZE); bnx2x_fill(bp, BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0, CSTORM_SYNC_BLOCK_SIZE); } /* helper: writes SP SB data to FW */ static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp, struct hc_sp_status_block_data *sp_sb_data) { int func = BP_FUNC(bp); int i; for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + i*sizeof(u32), *((u32 *)sp_sb_data + i)); } static inline void bnx2x_zero_sp_sb(struct bnx2x *bp) { int func = BP_FUNC(bp); struct hc_sp_status_block_data sp_sb_data; memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); sp_sb_data.state = SB_DISABLED; sp_sb_data.p_func.vf_valid = false; bnx2x_wr_sp_sb_data(bp, &sp_sb_data); bnx2x_fill(bp, BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0, CSTORM_SP_STATUS_BLOCK_SIZE); bnx2x_fill(bp, BAR_CSTRORM_INTMEM + CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, CSTORM_SP_SYNC_BLOCK_SIZE); } static inline void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, int igu_sb_id, int igu_seg_id) { hc_sm->igu_sb_id = igu_sb_id; hc_sm->igu_seg_id = igu_seg_id; hc_sm->timer_value = 0xFF; hc_sm->time_to_expire = 0xFFFFFFFF; } /* allocates state machine ids. */ static inline void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) { /* zero out state machine indices */ /* rx indices */ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; /* tx indices */ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; /* map indices */ /* rx indices */ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; /* tx indices */ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; } static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, u8 vf_valid, int fw_sb_id, int igu_sb_id) { int igu_seg_id; struct hc_status_block_data_e2 sb_data_e2; struct hc_status_block_data_e1x sb_data_e1x; struct hc_status_block_sm *hc_sm_p; int data_size; u32 *sb_data_p; if (CHIP_INT_MODE_IS_BC(bp)) igu_seg_id = HC_SEG_ACCESS_NORM; else igu_seg_id = IGU_SEG_ACCESS_NORM; bnx2x_zero_fp_sb(bp, fw_sb_id); if (!CHIP_IS_E1x(bp)) { memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); sb_data_e2.common.state = SB_ENABLED; sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); sb_data_e2.common.p_func.vf_id = vfid; sb_data_e2.common.p_func.vf_valid = vf_valid; sb_data_e2.common.p_func.vnic_id = BP_VN(bp); sb_data_e2.common.same_igu_sb_1b = true; sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping); sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping); hc_sm_p = sb_data_e2.common.state_machine; sb_data_p = (u32 *)&sb_data_e2; data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); bnx2x_map_sb_state_machines(sb_data_e2.index_data); } else { memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); sb_data_e1x.common.state = SB_ENABLED; sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); sb_data_e1x.common.p_func.vf_id = 0xff; sb_data_e1x.common.p_func.vf_valid = false; sb_data_e1x.common.p_func.vnic_id = BP_VN(bp); sb_data_e1x.common.same_igu_sb_1b = true; sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping); sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping); hc_sm_p = sb_data_e1x.common.state_machine; sb_data_p = (u32 *)&sb_data_e1x; data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); bnx2x_map_sb_state_machines(sb_data_e1x.index_data); } bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id); /* write indecies to HW */ bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); } static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, u16 tx_usec, u16 rx_usec) { bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS, false, rx_usec); bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_TX_CQ_CONS_COS0, false, tx_usec); bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_TX_CQ_CONS_COS1, false, tx_usec); bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_TX_CQ_CONS_COS2, false, tx_usec); } static void bnx2x_init_def_sb(struct bnx2x *bp) { struct host_sp_status_block *def_sb = bp->def_status_blk; dma_addr_t mapping = bp->def_status_blk_mapping; int igu_sp_sb_index; int igu_seg_id; int port = BP_PORT(bp); int func = BP_FUNC(bp); int reg_offset, reg_offset_en5; u64 section; int index; struct hc_sp_status_block_data sp_sb_data; memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); if (CHIP_INT_MODE_IS_BC(bp)) { igu_sp_sb_index = DEF_SB_IGU_ID; igu_seg_id = HC_SEG_ACCESS_DEF; } else { igu_sp_sb_index = bp->igu_dsb_id; igu_seg_id = IGU_SEG_ACCESS_DEF; } /* ATTN */ section = ((u64)mapping) + offsetof(struct host_sp_status_block, atten_status_block); def_sb->atten_status_block.status_block_id = igu_sp_sb_index; bp->attn_state = 0; reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { int sindex; /* take care of sig[0]..sig[4] */ for (sindex = 0; sindex < 4; sindex++) bp->attn_group[index].sig[sindex] = REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); if (!CHIP_IS_E1x(bp)) /* * enable5 is separate from the rest of the registers, * and therefore the address skip is 4 * and not 16 between the different groups */ bp->attn_group[index].sig[4] = REG_RD(bp, reg_offset_en5 + 0x4*index); else bp->attn_group[index].sig[4] = 0; } if (bp->common.int_block == INT_BLOCK_HC) { reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : HC_REG_ATTN_MSG0_ADDR_L); REG_WR(bp, reg_offset, U64_LO(section)); REG_WR(bp, reg_offset + 4, U64_HI(section)); } else if (!CHIP_IS_E1x(bp)) { REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); } section = ((u64)mapping) + offsetof(struct host_sp_status_block, sp_sb); bnx2x_zero_sp_sb(bp); sp_sb_data.state = SB_ENABLED; sp_sb_data.host_sb_addr.lo = U64_LO(section); sp_sb_data.host_sb_addr.hi = U64_HI(section); sp_sb_data.igu_sb_id = igu_sp_sb_index; sp_sb_data.igu_seg_id = igu_seg_id; sp_sb_data.p_func.pf_id = func; sp_sb_data.p_func.vnic_id = BP_VN(bp); sp_sb_data.p_func.vf_id = 0xff; bnx2x_wr_sp_sb_data(bp, &sp_sb_data); bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); } void bnx2x_update_coalesce(struct bnx2x *bp) { int i; for_each_eth_queue(bp, i) bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, bp->tx_ticks, bp->rx_ticks); } static void bnx2x_init_sp_ring(struct bnx2x *bp) { spin_lock_init(&bp->spq_lock); atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); bp->spq_prod_idx = 0; bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; bp->spq_prod_bd = bp->spq; bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; } static void bnx2x_init_eq_ring(struct bnx2x *bp) { int i; for (i = 1; i <= NUM_EQ_PAGES; i++) { union event_ring_elem *elem = &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; elem->next_page.addr.hi = cpu_to_le32(U64_HI(bp->eq_mapping + BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); elem->next_page.addr.lo = cpu_to_le32(U64_LO(bp->eq_mapping + BCM_PAGE_SIZE*(i % NUM_EQ_PAGES))); } bp->eq_cons = 0; bp->eq_prod = NUM_EQ_DESC; bp->eq_cons_sb = BNX2X_EQ_INDEX; /* we want a warning message before it gets rought... */ atomic_set(&bp->eq_spq_left, min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); } /* called with netif_addr_lock_bh() */ void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, unsigned long rx_mode_flags, unsigned long rx_accept_flags, unsigned long tx_accept_flags, unsigned long ramrod_flags) { struct bnx2x_rx_mode_ramrod_params ramrod_param; int rc; memset(&ramrod_param, 0, sizeof(ramrod_param)); /* Prepare ramrod parameters */ ramrod_param.cid = 0; ramrod_param.cl_id = cl_id; ramrod_param.rx_mode_obj = &bp->rx_mode_obj; ramrod_param.func_id = BP_FUNC(bp); ramrod_param.pstate = &bp->sp_state; ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING; ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata); ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata); set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); ramrod_param.ramrod_flags = ramrod_flags; ramrod_param.rx_mode_flags = rx_mode_flags; ramrod_param.rx_accept_flags = rx_accept_flags; ramrod_param.tx_accept_flags = tx_accept_flags; rc = bnx2x_config_rx_mode(bp, &ramrod_param); if (rc < 0) { BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); return; } } /* called with netif_addr_lock_bh() */ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) { unsigned long rx_mode_flags = 0, ramrod_flags = 0; unsigned long rx_accept_flags = 0, tx_accept_flags = 0; #ifdef BCM_CNIC if (!NO_FCOE(bp)) /* Configure rx_mode of FCoE Queue */ __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); #endif switch (bp->rx_mode) { case BNX2X_RX_MODE_NONE: /* * 'drop all' supersedes any accept flags that may have been * passed to the function. */ break; case BNX2X_RX_MODE_NORMAL: __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); /* internal switching mode */ __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); break; case BNX2X_RX_MODE_ALLMULTI: __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); /* internal switching mode */ __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); break; case BNX2X_RX_MODE_PROMISC: /* According to deffinition of SI mode, iface in promisc mode * should receive matched and unmatched (in resolution of port) * unicast packets. */ __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); /* internal switching mode */ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); if (IS_MF_SI(bp)) __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags); else __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); break; default: BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode); return; } if (bp->rx_mode != BNX2X_RX_MODE_NONE) { __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags); } __set_bit(RAMROD_RX, &ramrod_flags); __set_bit(RAMROD_TX, &ramrod_flags); bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags, tx_accept_flags, ramrod_flags); } static void bnx2x_init_internal_common(struct bnx2x *bp) { int i; if (IS_MF_SI(bp)) /* * In switch independent mode, the TSTORM needs to accept * packets that failed classification, since approximate match * mac addresses aren't written to NIG LLH */ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2); else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0); /* Zero this manually as its initialization is currently missing in the initTool */ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + i * 4, 0); if (!CHIP_IS_E1x(bp)) { REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, CHIP_INT_MODE_IS_BC(bp) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE); } } static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) { switch (load_code) { case FW_MSG_CODE_DRV_LOAD_COMMON: case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: bnx2x_init_internal_common(bp); /* no break */ case FW_MSG_CODE_DRV_LOAD_PORT: /* nothing to do */ /* no break */ case FW_MSG_CODE_DRV_LOAD_FUNCTION: /* internal memory per function is initialized inside bnx2x_pf_init */ break; default: BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); break; } } static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) { return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT; } static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) { return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT; } static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) { if (CHIP_IS_E1x(fp->bp)) return BP_L_ID(fp->bp) + fp->index; else /* We want Client ID to be the same as IGU SB ID for 57712 */ return bnx2x_fp_igu_sb_id(fp); } static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) { struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; u8 cos; unsigned long q_type = 0; u32 cids[BNX2X_MULTI_TX_COS] = { 0 }; fp->rx_queue = fp_idx; fp->cid = fp_idx; fp->cl_id = bnx2x_fp_cl_id(fp); fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); /* qZone id equals to FW (per path) client id */ fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); /* init shortcut */ fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); /* Setup SB indicies */ fp->rx_cons_sb = BNX2X_RX_SB_INDEX; /* Configure Queue State object */ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS); /* init tx data */ for_each_cos_in_tx_queue(fp, cos) { bnx2x_init_txdata(bp, &fp->txdata[cos], CID_COS_TO_TX_ONLY_CID(fp->cid, cos), FP_COS_TO_TXQ(fp, cos), BNX2X_TX_SB_INDEX_BASE + cos); cids[cos] = fp->txdata[cos].cid; } bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata), q_type); /** * Configure classification DBs: Always enable Tx switching */ bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, fp->fw_sb_id, fp->igu_sb_id); bnx2x_update_fpsb_idx(fp); } void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) { int i; for_each_eth_queue(bp, i) bnx2x_init_eth_fp(bp, i); #ifdef BCM_CNIC if (!NO_FCOE(bp)) bnx2x_init_fcoe_fp(bp); bnx2x_init_sb(bp, bp->cnic_sb_mapping, BNX2X_VF_ID_INVALID, false, bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); #endif /* Initialize MOD_ABS interrupts */ bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, bp->common.shmem_base, bp->common.shmem2_base, BP_PORT(bp)); /* ensure status block indices were read */ rmb(); bnx2x_init_def_sb(bp); bnx2x_update_dsb_idx(bp); bnx2x_init_rx_rings(bp); bnx2x_init_tx_rings(bp); bnx2x_init_sp_ring(bp); bnx2x_init_eq_ring(bp); bnx2x_init_internal(bp, load_code); bnx2x_pf_init(bp); bnx2x_stats_init(bp); /* flush all before enabling interrupts */ mb(); mmiowb(); bnx2x_int_enable(bp); /* Check for SPIO5 */ bnx2x_attn_int_deasserted0(bp, REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) & AEU_INPUTS_ATTN_BITS_SPIO5); } /* end of nic init */ /* * gzip service functions */ static int bnx2x_gunzip_init(struct bnx2x *bp) { bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, &bp->gunzip_mapping, GFP_KERNEL); if (bp->gunzip_buf == NULL) goto gunzip_nomem1; bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); if (bp->strm == NULL) goto gunzip_nomem2; bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); if (bp->strm->workspace == NULL) goto gunzip_nomem3; return 0; gunzip_nomem3: kfree(bp->strm); bp->strm = NULL; gunzip_nomem2: dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, bp->gunzip_mapping); bp->gunzip_buf = NULL; gunzip_nomem1: BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n"); return -ENOMEM; } static void bnx2x_gunzip_end(struct bnx2x *bp) { if (bp->strm) { vfree(bp->strm->workspace); kfree(bp->strm); bp->strm = NULL; } if (bp->gunzip_buf) { dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, bp->gunzip_mapping); bp->gunzip_buf = NULL; } } static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) { int n, rc; /* check gzip header */ if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) { BNX2X_ERR("Bad gzip header\n"); return -EINVAL; } n = 10; #define FNAME 0x8 if (zbuf[3] & FNAME) while ((zbuf[n++] != 0) && (n < len)); bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; bp->strm->avail_in = len - n; bp->strm->next_out = bp->gunzip_buf; bp->strm->avail_out = FW_BUF_SIZE; rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); if (rc != Z_OK) return rc; rc = zlib_inflate(bp->strm, Z_FINISH); if ((rc != Z_OK) && (rc != Z_STREAM_END)) netdev_err(bp->dev, "Firmware decompression error: %s\n", bp->strm->msg); bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); if (bp->gunzip_outlen & 0x3) netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n", bp->gunzip_outlen); bp->gunzip_outlen >>= 2; zlib_inflateEnd(bp->strm); if (rc == Z_STREAM_END) return 0; return rc; } /* nic load/unload */ /* * General service functions */ /* send a NIG loopback debug packet */ static void bnx2x_lb_pckt(struct bnx2x *bp) { u32 wb_write[3]; /* Ethernet source and destination addresses */ wb_write[0] = 0x55555555; wb_write[1] = 0x55555555; wb_write[2] = 0x20; /* SOP */ REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); /* NON-IP protocol */ wb_write[0] = 0x09000000; wb_write[1] = 0x55555555; wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); } /* some of the internal memories * are not directly readable from the driver * to test them we send debug packets */ static int bnx2x_int_mem_test(struct bnx2x *bp) { int factor; int count, i; u32 val = 0; if (CHIP_REV_IS_FPGA(bp)) factor = 120; else if (CHIP_REV_IS_EMUL(bp)) factor = 200; else factor = 1; /* Disable inputs of parser neighbor blocks */ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); REG_WR(bp, CFC_REG_DEBUG0, 0x1); REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); /* Write 0 to parser credits for CFC search request */ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); /* send Ethernet packet */ bnx2x_lb_pckt(bp); /* TODO do i reset NIG statistic? */ /* Wait until NIG register shows 1 packet of size 0x10 */ count = 1000 * factor; while (count) { bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); val = *bnx2x_sp(bp, wb_data[0]); if (val == 0x10) break; msleep(10); count--; } if (val != 0x10) { BNX2X_ERR("NIG timeout val = 0x%x\n", val); return -1; } /* Wait until PRS register shows 1 packet */ count = 1000 * factor; while (count) { val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); if (val == 1) break; msleep(10); count--; } if (val != 0x1) { BNX2X_ERR("PRS timeout val = 0x%x\n", val); return -2; } /* Reset and init BRB, PRS */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); msleep(50); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); msleep(50); bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); DP(NETIF_MSG_HW, "part2\n"); /* Disable inputs of parser neighbor blocks */ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); REG_WR(bp, CFC_REG_DEBUG0, 0x1); REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); /* Write 0 to parser credits for CFC search request */ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); /* send 10 Ethernet packets */ for (i = 0; i < 10; i++) bnx2x_lb_pckt(bp); /* Wait until NIG register shows 10 + 1 packets of size 11*0x10 = 0xb0 */ count = 1000 * factor; while (count) { bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); val = *bnx2x_sp(bp, wb_data[0]); if (val == 0xb0) break; msleep(10); count--; } if (val != 0xb0) { BNX2X_ERR("NIG timeout val = 0x%x\n", val); return -3; } /* Wait until PRS register shows 2 packets */ val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); if (val != 2) BNX2X_ERR("PRS timeout val = 0x%x\n", val); /* Write 1 to parser credits for CFC search request */ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); /* Wait until PRS register shows 3 packets */ msleep(10 * factor); /* Wait until NIG register shows 1 packet of size 0x10 */ val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); if (val != 3) BNX2X_ERR("PRS timeout val = 0x%x\n", val); /* clear NIG EOP FIFO */ for (i = 0; i < 11; i++) REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO); val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY); if (val != 1) { BNX2X_ERR("clear of NIG failed\n"); return -4; } /* Reset and init BRB, PRS, NIG */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); msleep(50); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); msleep(50); bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); #ifndef BCM_CNIC /* set NIC mode */ REG_WR(bp, PRS_REG_NIC_MODE, 1); #endif /* Enable inputs of parser neighbor blocks */ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); REG_WR(bp, CFC_REG_DEBUG0, 0x0); REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); DP(NETIF_MSG_HW, "done\n"); return 0; /* OK */ } static void bnx2x_enable_blocks_attention(struct bnx2x *bp) { REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); if (!CHIP_IS_E1x(bp)) REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); else REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); /* * mask read length error interrupts in brb for parser * (parsing unit and 'checksum and crc' unit) * these errors are legal (PU reads fixed length and CAC can cause * read length error on truncated packets) */ REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00); REG_WR(bp, QM_REG_QM_INT_MASK, 0); REG_WR(bp, TM_REG_TM_INT_MASK, 0); REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ if (CHIP_REV_IS_FPGA(bp)) REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); else if (!CHIP_IS_E1x(bp)) REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED)); else REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000); REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ if (!CHIP_IS_E1x(bp)) /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ } static void bnx2x_reset_common(struct bnx2x *bp) { u32 val = 0x1400; /* reset_common */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0xd3ffff7f); if (CHIP_IS_E3(bp)) { val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; } REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val); } static void bnx2x_setup_dmae(struct bnx2x *bp) { bp->dmae_ready = 0; spin_lock_init(&bp->dmae_lock); } static void bnx2x_init_pxp(struct bnx2x *bp) { u16 devctl; int r_order, w_order; pci_read_config_word(bp->pdev, pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl); DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); if (bp->mrrs == -1) r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12); else { DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); r_order = bp->mrrs; } bnx2x_init_pxp_arb(bp, r_order, w_order); } static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) { int is_required; u32 val; int port; if (BP_NOMCP(bp)) return; is_required = 0; val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & SHARED_HW_CFG_FAN_FAILURE_MASK; if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) is_required = 1; /* * The fan failure mechanism is usually related to the PHY type since * the power consumption of the board is affected by the PHY. Currently, * fan is required for most designs with SFX7101, BCM8727 and BCM8481. */ else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) for (port = PORT_0; port < PORT_MAX; port++) { is_required |= bnx2x_fan_failure_det_req( bp, bp->common.shmem_base, bp->common.shmem2_base, port); } DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); if (is_required == 0) return; /* Fan failure is indicated by SPIO 5 */ bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, MISC_REGISTERS_SPIO_INPUT_HI_Z); /* set to active low mode */ val = REG_RD(bp, MISC_REG_SPIO_INT); val |= ((1 << MISC_REGISTERS_SPIO_5) << MISC_REGISTERS_SPIO_INT_OLD_SET_POS); REG_WR(bp, MISC_REG_SPIO_INT, val); /* enable interrupt to signal the IGU */ val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); val |= (1 << MISC_REGISTERS_SPIO_5); REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); } static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num) { u32 offset = 0; if (CHIP_IS_E1(bp)) return; if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX)) return; switch (BP_ABS_FUNC(bp)) { case 0: offset = PXP2_REG_PGL_PRETEND_FUNC_F0; break; case 1: offset = PXP2_REG_PGL_PRETEND_FUNC_F1; break; case 2: offset = PXP2_REG_PGL_PRETEND_FUNC_F2; break; case 3: offset = PXP2_REG_PGL_PRETEND_FUNC_F3; break; case 4: offset = PXP2_REG_PGL_PRETEND_FUNC_F4; break; case 5: offset = PXP2_REG_PGL_PRETEND_FUNC_F5; break; case 6: offset = PXP2_REG_PGL_PRETEND_FUNC_F6; break; case 7: offset = PXP2_REG_PGL_PRETEND_FUNC_F7; break; default: return; } REG_WR(bp, offset, pretend_func_num); REG_RD(bp, offset); DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num); } void bnx2x_pf_disable(struct bnx2x *bp) { u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); val &= ~IGU_PF_CONF_FUNC_EN; REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); } static inline void bnx2x__common_init_phy(struct bnx2x *bp) { u32 shmem_base[2], shmem2_base[2]; shmem_base[0] = bp->common.shmem_base; shmem2_base[0] = bp->common.shmem2_base; if (!CHIP_IS_E1x(bp)) { shmem_base[1] = SHMEM2_RD(bp, other_shmem_base_addr); shmem2_base[1] = SHMEM2_RD(bp, other_shmem2_base_addr); } bnx2x_acquire_phy_lock(bp); bnx2x_common_init_phy(bp, shmem_base, shmem2_base, bp->common.chip_id); bnx2x_release_phy_lock(bp); } /** * bnx2x_init_hw_common - initialize the HW at the COMMON phase. * * @bp: driver handle */ static int bnx2x_init_hw_common(struct bnx2x *bp) { u32 val; DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp)); /* * take the UNDI lock to protect undi_unload flow from accessing * registers while we're resetting the chip */ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); bnx2x_reset_common(bp); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); val = 0xfffc; if (CHIP_IS_E3(bp)) { val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; } REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); if (!CHIP_IS_E1x(bp)) { u8 abs_func_id; /** * 4-port mode or 2-port mode we need to turn of master-enable * for everyone, after that, turn it back on for self. * so, we disregard multi-function or not, and always disable * for all functions on the given path, this means 0,2,4,6 for * path 0 and 1,3,5,7 for path 1 */ for (abs_func_id = BP_PATH(bp); abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) { if (abs_func_id == BP_ABS_FUNC(bp)) { REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); continue; } bnx2x_pretend_func(bp, abs_func_id); /* clear pf enable */ bnx2x_pf_disable(bp); bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); } } bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON); if (CHIP_IS_E1(bp)) { /* enable HW interrupt from PXP on USDM overflow bit 16 on INT_MASK_0 */ REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); } bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); bnx2x_init_pxp(bp); #ifdef __BIG_ENDIAN REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); /* make sure this value is 0 */ REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); #endif bnx2x_ilt_init_page_size(bp, INITOP_SET); if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); /* let the HW do it's magic ... */ msleep(100); /* finish PXP init */ val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); if (val != 1) { BNX2X_ERR("PXP2 CFG failed\n"); return -EBUSY; } val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); if (val != 1) { BNX2X_ERR("PXP2 RD_INIT failed\n"); return -EBUSY; } /* Timers bug workaround E2 only. We need to set the entire ILT to * have entries with value "0" and valid bit on. * This needs to be done by the first PF that is loaded in a path * (i.e. common phase) */ if (!CHIP_IS_E1x(bp)) { /* In E2 there is a bug in the timers block that can cause function 6 / 7 * (i.e. vnic3) to start even if it is marked as "scan-off". * This occurs when a different function (func2,3) is being marked * as "scan-off". Real-life scenario for example: if a driver is being * load-unloaded while func6,7 are down. This will cause the timer to access * the ilt, translate to a logical address and send a request to read/write. * Since the ilt for the function that is down is not valid, this will cause * a translation error which is unrecoverable. * The Workaround is intended to make sure that when this happens nothing fatal * will occur. The workaround: * 1. First PF driver which loads on a path will: * a. After taking the chip out of reset, by using pretend, * it will write "0" to the following registers of * the other vnics. * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); * And for itself it will write '1' to * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable * dmae-operations (writing to pram for example.) * note: can be done for only function 6,7 but cleaner this * way. * b. Write zero+valid to the entire ILT. * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of * VNIC3 (of that port). The range allocated will be the * entire ILT. This is needed to prevent ILT range error. * 2. Any PF driver load flow: * a. ILT update with the physical addresses of the allocated * logical pages. * b. Wait 20msec. - note that this timeout is needed to make * sure there are no requests in one of the PXP internal * queues with "old" ILT addresses. * c. PF enable in the PGLC. * d. Clear the was_error of the PF in the PGLC. (could have * occured while driver was down) * e. PF enable in the CFC (WEAK + STRONG) * f. Timers scan enable * 3. PF driver unload flow: * a. Clear the Timers scan_en. * b. Polling for scan_on=0 for that PF. * c. Clear the PF enable bit in the PXP. * d. Clear the PF enable in the CFC (WEAK + STRONG) * e. Write zero+valid to all ILT entries (The valid bit must * stay set) * f. If this is VNIC 3 of a port then also init * first_timers_ilt_entry to zero and last_timers_ilt_entry * to the last enrty in the ILT. * * Notes: * Currently the PF error in the PGLC is non recoverable. * In the future the there will be a recovery routine for this error. * Currently attention is masked. * Having an MCP lock on the load/unload process does not guarantee that * there is no Timer disable during Func6/7 enable. This is because the * Timers scan is currently being cleared by the MCP on FLR. * Step 2.d can be done only for PF6/7 and the driver can also check if * there is error before clearing it. But the flow above is simpler and * more general. * All ILT entries are written by zero+valid and not just PF6/7 * ILT entries since in the future the ILT entries allocation for * PF-s might be dynamic. */ struct ilt_client_info ilt_cli; struct bnx2x_ilt ilt; memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); memset(&ilt, 0, sizeof(struct bnx2x_ilt)); /* initialize dummy TM client */ ilt_cli.start = 0; ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; ilt_cli.client_num = ILT_CLIENT_TM; /* Step 1: set zeroes to all ilt page entries with valid bit on * Step 2: set the timers first/last ilt entry to point * to the entire range to prevent ILT range error for 3rd/4th * vnic (this code assumes existance of the vnic) * * both steps performed by call to bnx2x_ilt_client_init_op() * with dummy TM client * * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT * and his brother are split registers */ bnx2x_pretend_func(bp, (BP_PATH(bp) + 6)); bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR); bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); } REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); if (!CHIP_IS_E1x(bp)) { int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : (CHIP_REV_IS_FPGA(bp) ? 400 : 0); bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON); /* let the HW do it's magic ... */ do { msleep(200); val = REG_RD(bp, ATC_REG_ATC_INIT_DONE); } while (factor-- && (val != 1)); if (val != 1) { BNX2X_ERR("ATC_INIT failed\n"); return -EBUSY; } } bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); /* clean the DMAE memory */ bp->dmae_ready = 1; bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON); bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); /* QM queues pointers table */ bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); /* soft reset pulse */ REG_WR(bp, QM_REG_SOFT_RESET, 1); REG_WR(bp, QM_REG_SOFT_RESET, 0); #ifdef BCM_CNIC bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); #endif bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); if (!CHIP_REV_IS_SLOW(bp)) /* enable hw interrupt from doorbell Q */ REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); if (!CHIP_IS_E1(bp)) REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) /* Bit-map indicating which L2 hdrs may appear * after the basic Ethernet header */ REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, bp->path_has_ovlan ? 7 : 6); bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON); if (!CHIP_IS_E1x(bp)) { /* reset VFC memories */ REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, VFC_MEMORIES_RST_REG_CAM_RST | VFC_MEMORIES_RST_REG_RAM_RST); REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, VFC_MEMORIES_RST_REG_CAM_RST | VFC_MEMORIES_RST_REG_RAM_RST); msleep(20); } bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON); /* sync semi rtc */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x80000000); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x80000000); bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); if (!CHIP_IS_E1x(bp)) REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, bp->path_has_ovlan ? 7 : 6); REG_WR(bp, SRC_REG_SOFT_RST, 1); bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); #ifdef BCM_CNIC REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); #endif REG_WR(bp, SRC_REG_SOFT_RST, 0); if (sizeof(union cdu_context) != 1024) /* we currently assume that a context is 1024 bytes */ dev_alert(&bp->pdev->dev, "please adjust the size of cdu_context(%ld)\n", (long)sizeof(union cdu_context)); bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); val = (4 << 24) + (0 << 12) + 1024; REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON); REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); /* enable context validation interrupt from CFC */ REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); /* set the thresholds to prevent CFC/CDU race */ REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON); if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp)) REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON); /* Reset PCIE errors for debug */ REG_WR(bp, 0x2814, 0xffffffff); REG_WR(bp, 0x3820, 0xffffffff); if (!CHIP_IS_E1x(bp)) { REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); } bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON); if (!CHIP_IS_E1(bp)) { /* in E3 this done in per-port section */ if (!CHIP_IS_E3(bp)) REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); } if (CHIP_IS_E1H(bp)) /* not applicable for E2 (and above ...) */ REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); if (CHIP_REV_IS_SLOW(bp)) msleep(200); /* finish CFC init */ val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); if (val != 1) { BNX2X_ERR("CFC LL_INIT failed\n"); return -EBUSY; } val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); if (val != 1) { BNX2X_ERR("CFC AC_INIT failed\n"); return -EBUSY; } val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); if (val != 1) { BNX2X_ERR("CFC CAM_INIT failed\n"); return -EBUSY; } REG_WR(bp, CFC_REG_DEBUG0, 0); if (CHIP_IS_E1(bp)) { /* read NIG statistic to see if this is our first up since powerup */ bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); val = *bnx2x_sp(bp, wb_data[0]); /* do internal memory self test */ if ((val == 0) && bnx2x_int_mem_test(bp)) { BNX2X_ERR("internal mem self test failed\n"); return -EBUSY; } } bnx2x_setup_fan_failure_detection(bp); /* clear PXP2 attentions */ REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); bnx2x_enable_blocks_attention(bp); bnx2x_enable_blocks_parity(bp); if (!BP_NOMCP(bp)) { if (CHIP_IS_E1x(bp)) bnx2x__common_init_phy(bp); } else BNX2X_ERR("Bootcode is missing - can not initialize link\n"); return 0; } /** * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. * * @bp: driver handle */ static int bnx2x_init_hw_common_chip(struct bnx2x *bp) { int rc = bnx2x_init_hw_common(bp); if (rc) return rc; /* In E2 2-PORT mode, same ext phy is used for the two paths */ if (!BP_NOMCP(bp)) bnx2x__common_init_phy(bp); return 0; } static int bnx2x_init_hw_port(struct bnx2x *bp) { int port = BP_PORT(bp); int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; u32 low, high; u32 val; bnx2x__link_reset(bp); DP(NETIF_MSG_HW, "starting port init port %d\n", port); REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); bnx2x_init_block(bp, BLOCK_MISC, init_phase); bnx2x_init_block(bp, BLOCK_PXP, init_phase); bnx2x_init_block(bp, BLOCK_PXP2, init_phase); /* Timers bug workaround: disables the pf_master bit in pglue at * common phase, we need to enable it here before any dmae access are * attempted. Therefore we manually added the enable-master to the * port phase (it also happens in the function phase) */ if (!CHIP_IS_E1x(bp)) REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); bnx2x_init_block(bp, BLOCK_ATC, init_phase); bnx2x_init_block(bp, BLOCK_DMAE, init_phase); bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); bnx2x_init_block(bp, BLOCK_QM, init_phase); bnx2x_init_block(bp, BLOCK_TCM, init_phase); bnx2x_init_block(bp, BLOCK_UCM, init_phase); bnx2x_init_block(bp, BLOCK_CCM, init_phase); bnx2x_init_block(bp, BLOCK_XCM, init_phase); /* QM cid (connection) count */ bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); #ifdef BCM_CNIC bnx2x_init_block(bp, BLOCK_TM, init_phase); REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); #endif bnx2x_init_block(bp, BLOCK_DORQ, init_phase); if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { bnx2x_init_block(bp, BLOCK_BRB1, init_phase); if (IS_MF(bp)) low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); else if (bp->dev->mtu > 4096) { if (bp->flags & ONE_PORT_FLAG) low = 160; else { val = bp->dev->mtu; /* (24*1024 + val*4)/256 */ low = 96 + (val/64) + ((val % 64) ? 1 : 0); } } else low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); high = low + 56; /* 14*1024/256 */ REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); } if (CHIP_MODE_IS_4_PORT(bp)) REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 : BRB1_REG_MAC_GUARANTIED_0), 40); bnx2x_init_block(bp, BLOCK_PRS, init_phase); if (CHIP_IS_E3B0(bp)) /* Ovlan exists only if we are in multi-function + * switch-dependent mode, in switch-independent there * is no ovlan headers */ REG_WR(bp, BP_PORT(bp) ? PRS_REG_HDRS_AFTER_BASIC_PORT_1 : PRS_REG_HDRS_AFTER_BASIC_PORT_0, (bp->path_has_ovlan ? 7 : 6)); bnx2x_init_block(bp, BLOCK_TSDM, init_phase); bnx2x_init_block(bp, BLOCK_CSDM, init_phase); bnx2x_init_block(bp, BLOCK_USDM, init_phase); bnx2x_init_block(bp, BLOCK_XSDM, init_phase); bnx2x_init_block(bp, BLOCK_TSEM, init_phase); bnx2x_init_block(bp, BLOCK_USEM, init_phase); bnx2x_init_block(bp, BLOCK_CSEM, init_phase); bnx2x_init_block(bp, BLOCK_XSEM, init_phase); bnx2x_init_block(bp, BLOCK_UPB, init_phase); bnx2x_init_block(bp, BLOCK_XPB, init_phase); bnx2x_init_block(bp, BLOCK_PBF, init_phase); if (CHIP_IS_E1x(bp)) { /* configure PBF to work without PAUSE mtu 9000 */ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); /* update threshold */ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); /* update init credit */ REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); /* probe changes */ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); udelay(50); REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); } #ifdef BCM_CNIC bnx2x_init_block(bp, BLOCK_SRC, init_phase); #endif bnx2x_init_block(bp, BLOCK_CDU, init_phase); bnx2x_init_block(bp, BLOCK_CFC, init_phase); if (CHIP_IS_E1(bp)) { REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); } bnx2x_init_block(bp, BLOCK_HC, init_phase); bnx2x_init_block(bp, BLOCK_IGU, init_phase); bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); /* init aeu_mask_attn_func_0/1: * - SF mode: bits 3-7 are masked. only bits 0-2 are in use * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF * bits 4-7 are used for "per vn group attention" */ val = IS_MF(bp) ? 0xF7 : 0x7; /* Enable DCBX attention for all but E1 */ val |= CHIP_IS_E1(bp) ? 0 : 0x10; REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); bnx2x_init_block(bp, BLOCK_NIG, init_phase); if (!CHIP_IS_E1x(bp)) { /* Bit-map indicating which L2 hdrs may appear after the * basic Ethernet header */ REG_WR(bp, BP_PORT(bp) ? NIG_REG_P1_HDRS_AFTER_BASIC : NIG_REG_P0_HDRS_AFTER_BASIC, IS_MF_SD(bp) ? 7 : 6); if (CHIP_IS_E3(bp)) REG_WR(bp, BP_PORT(bp) ? NIG_REG_LLH1_MF_MODE : NIG_REG_LLH_MF_MODE, IS_MF(bp)); } if (!CHIP_IS_E3(bp)) REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); if (!CHIP_IS_E1(bp)) { /* 0x2 disable mf_ov, 0x1 enable */ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, (IS_MF_SD(bp) ? 0x1 : 0x2)); if (!CHIP_IS_E1x(bp)) { val = 0; switch (bp->mf_mode) { case MULTI_FUNCTION_SD: val = 1; break; case MULTI_FUNCTION_SI: val = 2; break; } REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE : NIG_REG_LLH0_CLS_TYPE), val); } { REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); } } /* If SPIO5 is set to generate interrupts, enable it for this port */ val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); if (val & (1 << MISC_REGISTERS_SPIO_5)) { u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); val = REG_RD(bp, reg_addr); val |= AEU_INPUTS_ATTN_BITS_SPIO5; REG_WR(bp, reg_addr, val); } return 0; } static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) { int reg; if (CHIP_IS_E1(bp)) reg = PXP2_REG_RQ_ONCHIP_AT + index*8; else reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); } static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) { bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); } static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) { u32 i, base = FUNC_ILT_BASE(func); for (i = base; i < base + ILT_PER_FUNC; i++) bnx2x_ilt_wr(bp, i, 0); } static int bnx2x_init_hw_func(struct bnx2x *bp) { int port = BP_PORT(bp); int func = BP_FUNC(bp); int init_phase = PHASE_PF0 + func; struct bnx2x_ilt *ilt = BP_ILT(bp); u16 cdu_ilt_start; u32 addr, val; u32 main_mem_base, main_mem_size, main_mem_prty_clr; int i, main_mem_width, rc; DP(NETIF_MSG_HW, "starting func init func %d\n", func); /* FLR cleanup - hmmm */ if (!CHIP_IS_E1x(bp)) { rc = bnx2x_pf_flr_clnup(bp); if (rc) return rc; } /* set MSI reconfigure capability */ if (bp->common.int_block == INT_BLOCK_HC) { addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); val = REG_RD(bp, addr); val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; REG_WR(bp, addr, val); } bnx2x_init_block(bp, BLOCK_PXP, init_phase); bnx2x_init_block(bp, BLOCK_PXP2, init_phase); ilt = BP_ILT(bp); cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; for (i = 0; i < L2_ILT_LINES(bp); i++) { ilt->lines[cdu_ilt_start + i].page = bp->context.vcxt + (ILT_PAGE_CIDS * i); ilt->lines[cdu_ilt_start + i].page_mapping = bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i); /* cdu ilt pages are allocated manually so there's no need to set the size */ } bnx2x_ilt_init_op(bp, INITOP_SET); #ifdef BCM_CNIC bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); /* T1 hash bits value determines the T1 number of entries */ REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); #endif #ifndef BCM_CNIC /* set NIC mode */ REG_WR(bp, PRS_REG_NIC_MODE, 1); #endif /* BCM_CNIC */ if (!CHIP_IS_E1x(bp)) { u32 pf_conf = IGU_PF_CONF_FUNC_EN; /* Turn on a single ISR mode in IGU if driver is going to use * INT#x or MSI */ if (!(bp->flags & USING_MSIX_FLAG)) pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; /* * Timers workaround bug: function init part. * Need to wait 20msec after initializing ILT, * needed to make sure there are no requests in * one of the PXP internal queues with "old" ILT addresses */ msleep(20); /* * Master enable - Due to WB DMAE writes performed before this * register is re-initialized as part of the regular function * init */ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); /* Enable the function in IGU */ REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf); } bp->dmae_ready = 1; bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); if (!CHIP_IS_E1x(bp)) REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); bnx2x_init_block(bp, BLOCK_ATC, init_phase); bnx2x_init_block(bp, BLOCK_DMAE, init_phase); bnx2x_init_block(bp, BLOCK_NIG, init_phase); bnx2x_init_block(bp, BLOCK_SRC, init_phase); bnx2x_init_block(bp, BLOCK_MISC, init_phase); bnx2x_init_block(bp, BLOCK_TCM, init_phase); bnx2x_init_block(bp, BLOCK_UCM, init_phase); bnx2x_init_block(bp, BLOCK_CCM, init_phase); bnx2x_init_block(bp, BLOCK_XCM, init_phase); bnx2x_init_block(bp, BLOCK_TSEM, init_phase); bnx2x_init_block(bp, BLOCK_USEM, init_phase); bnx2x_init_block(bp, BLOCK_CSEM, init_phase); bnx2x_init_block(bp, BLOCK_XSEM, init_phase); if (!CHIP_IS_E1x(bp)) REG_WR(bp, QM_REG_PF_EN, 1); if (!CHIP_IS_E1x(bp)) { REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); } bnx2x_init_block(bp, BLOCK_QM, init_phase); bnx2x_init_block(bp, BLOCK_TM, init_phase); bnx2x_init_block(bp, BLOCK_DORQ, init_phase); bnx2x_init_block(bp, BLOCK_BRB1, init_phase); bnx2x_init_block(bp, BLOCK_PRS, init_phase); bnx2x_init_block(bp, BLOCK_TSDM, init_phase); bnx2x_init_block(bp, BLOCK_CSDM, init_phase); bnx2x_init_block(bp, BLOCK_USDM, init_phase); bnx2x_init_block(bp, BLOCK_XSDM, init_phase); bnx2x_init_block(bp, BLOCK_UPB, init_phase); bnx2x_init_block(bp, BLOCK_XPB, init_phase); bnx2x_init_block(bp, BLOCK_PBF, init_phase); if (!CHIP_IS_E1x(bp)) REG_WR(bp, PBF_REG_DISABLE_PF, 0); bnx2x_init_block(bp, BLOCK_CDU, init_phase); bnx2x_init_block(bp, BLOCK_CFC, init_phase); if (!CHIP_IS_E1x(bp)) REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); if (IS_MF(bp)) { REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); } bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); /* HC init per function */ if (bp->common.int_block == INT_BLOCK_HC) { if (CHIP_IS_E1H(bp)) { REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); } bnx2x_init_block(bp, BLOCK_HC, init_phase); } else { int num_segs, sb_idx, prod_offset; REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); if (!CHIP_IS_E1x(bp)) { REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); } bnx2x_init_block(bp, BLOCK_IGU, init_phase); if (!CHIP_IS_E1x(bp)) { int dsb_idx = 0; /** * Producer memory: * E2 mode: address 0-135 match to the mapping memory; * 136 - PF0 default prod; 137 - PF1 default prod; * 138 - PF2 default prod; 139 - PF3 default prod; * 140 - PF0 attn prod; 141 - PF1 attn prod; * 142 - PF2 attn prod; 143 - PF3 attn prod; * 144-147 reserved. * * E1.5 mode - In backward compatible mode; * for non default SB; each even line in the memory * holds the U producer and each odd line hold * the C producer. The first 128 producers are for * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 * producers are for the DSB for each PF. * Each PF has five segments: (the order inside each * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; * 132-135 C prods; 136-139 X prods; 140-143 T prods; * 144-147 attn prods; */ /* non-default-status-blocks */ num_segs = CHIP_INT_MODE_IS_BC(bp) ? IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) { prod_offset = (bp->igu_base_sb + sb_idx) * num_segs; for (i = 0; i < num_segs; i++) { addr = IGU_REG_PROD_CONS_MEMORY + (prod_offset + i) * 4; REG_WR(bp, addr, 0); } /* send consumer update with value 0 */ bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx, USTORM_ID, 0, IGU_INT_NOP, 1); bnx2x_igu_clear_sb(bp, bp->igu_base_sb + sb_idx); } /* default-status-blocks */ num_segs = CHIP_INT_MODE_IS_BC(bp) ? IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; if (CHIP_MODE_IS_4_PORT(bp)) dsb_idx = BP_FUNC(bp); else dsb_idx = BP_VN(bp); prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? IGU_BC_BASE_DSB_PROD + dsb_idx : IGU_NORM_BASE_DSB_PROD + dsb_idx); /* * igu prods come in chunks of E1HVN_MAX (4) - * does not matters what is the current chip mode */ for (i = 0; i < (num_segs * E1HVN_MAX); i += E1HVN_MAX) { addr = IGU_REG_PROD_CONS_MEMORY + (prod_offset + i)*4; REG_WR(bp, addr, 0); } /* send consumer update with 0 */ if (CHIP_INT_MODE_IS_BC(bp)) { bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_NOP, 1); bnx2x_ack_sb(bp, bp->igu_dsb_id, CSTORM_ID, 0, IGU_INT_NOP, 1); bnx2x_ack_sb(bp, bp->igu_dsb_id, XSTORM_ID, 0, IGU_INT_NOP, 1); bnx2x_ack_sb(bp, bp->igu_dsb_id, TSTORM_ID, 0, IGU_INT_NOP, 1); bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, 0, IGU_INT_NOP, 1); } else { bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_NOP, 1); bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, 0, IGU_INT_NOP, 1); } bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); /* !!! these should become driver const once rf-tool supports split-68 const */ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); } } /* Reset PCIE errors for debug */ REG_WR(bp, 0x2114, 0xffffffff); REG_WR(bp, 0x2120, 0xffffffff); if (CHIP_IS_E1x(bp)) { main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ main_mem_base = HC_REG_MAIN_MEMORY + BP_PORT(bp) * (main_mem_size * 4); main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; main_mem_width = 8; val = REG_RD(bp, main_mem_prty_clr); if (val) DP(NETIF_MSG_HW, "Hmmm... Parity errors in HC block during function init (0x%x)!\n", val); /* Clear "false" parity errors in MSI-X table */ for (i = main_mem_base; i < main_mem_base + main_mem_size * 4; i += main_mem_width) { bnx2x_read_dmae(bp, i, main_mem_width / 4); bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), i, main_mem_width / 4); } /* Clear HC parity attention */ REG_RD(bp, main_mem_prty_clr); } #ifdef BNX2X_STOP_ON_ERROR /* Enable STORMs SP logging */ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); #endif bnx2x_phy_probe(&bp->link_params); return 0; } void bnx2x_free_mem(struct bnx2x *bp) { /* fastpath */ bnx2x_free_fp_mem(bp); /* end of fastpath */ BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, sizeof(struct host_sp_status_block)); BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, bp->fw_stats_data_sz + bp->fw_stats_req_sz); BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, sizeof(struct bnx2x_slowpath)); BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping, bp->context.size); bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); BNX2X_FREE(bp->ilt->lines); #ifdef BCM_CNIC if (!CHIP_IS_E1x(bp)) BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, sizeof(struct host_hc_status_block_e2)); else BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, sizeof(struct host_hc_status_block_e1x)); BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); #endif BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, BCM_PAGE_SIZE * NUM_EQ_PAGES); } static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) { int num_groups; int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; /* number of queues for statistics is number of eth queues + FCoE */ u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; /* Total number of FW statistics requests = * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats + * num of queues */ bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; /* Request is built from stats_query_header and an array of * stats_query_cmd_group each of which contains * STATS_QUERY_CMD_COUNT rules. The real number or requests is * configured in the stats_query_header. */ num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) + (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0); bp->fw_stats_req_sz = sizeof(struct stats_query_header) + num_groups * sizeof(struct stats_query_cmd_group); /* Data for statistics requests + stats_conter * * stats_counter holds per-STORM counters that are incremented * when STORM has finished with the current request. * * memory for FCoE offloaded statistics are counted anyway, * even if they will not be sent. */ bp->fw_stats_data_sz = sizeof(struct per_port_stats) + sizeof(struct per_pf_stats) + sizeof(struct fcoe_statistics_params) + sizeof(struct per_queue_stats) * num_queue_stats + sizeof(struct stats_counter); BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping, bp->fw_stats_data_sz + bp->fw_stats_req_sz); /* Set shortcuts */ bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; bp->fw_stats_req_mapping = bp->fw_stats_mapping; bp->fw_stats_data = (struct bnx2x_fw_stats_data *) ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); bp->fw_stats_data_mapping = bp->fw_stats_mapping + bp->fw_stats_req_sz; return 0; alloc_mem_err: BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, bp->fw_stats_data_sz + bp->fw_stats_req_sz); BNX2X_ERR("Can't allocate memory\n"); return -ENOMEM; } int bnx2x_alloc_mem(struct bnx2x *bp) { #ifdef BCM_CNIC if (!CHIP_IS_E1x(bp)) /* size = the status block + ramrod buffers */ BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, sizeof(struct host_hc_status_block_e2)); else BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping, sizeof(struct host_hc_status_block_e1x)); /* allocate searcher T2 table */ BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); #endif BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, sizeof(struct host_sp_status_block)); BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, sizeof(struct bnx2x_slowpath)); #ifdef BCM_CNIC /* write address to which L5 should insert its values */ bp->cnic_eth_dev.addr_drv_info_to_mcp = &bp->slowpath->drv_info_to_mcp; #endif /* Allocated memory for FW statistics */ if (bnx2x_alloc_fw_stats_mem(bp)) goto alloc_mem_err; bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, bp->context.size); BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) goto alloc_mem_err; /* Slow path ring */ BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); /* EQ */ BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, BCM_PAGE_SIZE * NUM_EQ_PAGES); /* fastpath */ /* need to be done at the end, since it's self adjusting to amount * of memory available for RSS queues */ if (bnx2x_alloc_fp_mem(bp)) goto alloc_mem_err; return 0; alloc_mem_err: bnx2x_free_mem(bp); BNX2X_ERR("Can't allocate memory\n"); return -ENOMEM; } /* * Init service functions */ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, struct bnx2x_vlan_mac_obj *obj, bool set, int mac_type, unsigned long *ramrod_flags) { int rc; struct bnx2x_vlan_mac_ramrod_params ramrod_param; memset(&ramrod_param, 0, sizeof(ramrod_param)); /* Fill general parameters */ ramrod_param.vlan_mac_obj = obj; ramrod_param.ramrod_flags = *ramrod_flags; /* Fill a user request section if needed */ if (!test_bit(RAMROD_CONT, ramrod_flags)) { memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); /* Set the command: ADD or DEL */ if (set) ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; else ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; } rc = bnx2x_config_vlan_mac(bp, &ramrod_param); if (rc < 0) BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); return rc; } int bnx2x_del_all_macs(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *mac_obj, int mac_type, bool wait_for_comp) { int rc; unsigned long ramrod_flags = 0, vlan_mac_flags = 0; /* Wait for completion of requested */ if (wait_for_comp) __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); /* Set the mac type of addresses we want to clear */ __set_bit(mac_type, &vlan_mac_flags); rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc < 0) BNX2X_ERR("Failed to delete MACs: %d\n", rc); return rc; } int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) { unsigned long ramrod_flags = 0; #ifdef BCM_CNIC if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_STORAGE_SD(bp)) { DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, "Ignoring Zero MAC for STORAGE SD mode\n"); return 0; } #endif DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); /* Eth MAC is set on RSS leading client (fp[0]) */ return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set, BNX2X_ETH_MAC, &ramrod_flags); } int bnx2x_setup_leading(struct bnx2x *bp) { return bnx2x_setup_queue(bp, &bp->fp[0], 1); } /** * bnx2x_set_int_mode - configure interrupt mode * * @bp: driver handle * * In case of MSI-X it will also try to enable MSI-X. */ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) { switch (int_mode) { case INT_MODE_MSI: bnx2x_enable_msi(bp); /* falling through... */ case INT_MODE_INTx: bp->num_queues = 1 + NON_ETH_CONTEXT_USE; BNX2X_DEV_INFO("set number of queues to 1\n"); break; default: /* Set number of queues according to bp->multi_mode value */ bnx2x_set_num_queues(bp); BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); /* if we can't use MSI-X we only need one fp, * so try to enable MSI-X with the requested number of fp's * and fallback to MSI or legacy INTx with one fp */ if (bnx2x_enable_msix(bp)) { /* failed to enable MSI-X */ BNX2X_DEV_INFO("Failed to enable MSI-X (%d), set number of queues to %d\n", bp->num_queues, 1 + NON_ETH_CONTEXT_USE); bp->num_queues = 1 + NON_ETH_CONTEXT_USE; /* Try to enable MSI */ if (!(bp->flags & DISABLE_MSI_FLAG)) bnx2x_enable_msi(bp); } break; } } /* must be called prioir to any HW initializations */ static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp) { return L2_ILT_LINES(bp); } void bnx2x_ilt_set_info(struct bnx2x *bp) { struct ilt_client_info *ilt_client; struct bnx2x_ilt *ilt = BP_ILT(bp); u16 line = 0; ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line); /* CDU */ ilt_client = &ilt->clients[ILT_CLIENT_CDU]; ilt_client->client_num = ILT_CLIENT_CDU; ilt_client->page_size = CDU_ILT_PAGE_SZ; ilt_client->flags = ILT_CLIENT_SKIP_MEM; ilt_client->start = line; line += bnx2x_cid_ilt_lines(bp); #ifdef BCM_CNIC line += CNIC_ILT_LINES; #endif ilt_client->end = line - 1; DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); /* QM */ if (QM_INIT(bp->qm_cid_count)) { ilt_client = &ilt->clients[ILT_CLIENT_QM]; ilt_client->client_num = ILT_CLIENT_QM; ilt_client->page_size = QM_ILT_PAGE_SZ; ilt_client->flags = 0; ilt_client->start = line; /* 4 bytes for each cid */ line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, QM_ILT_PAGE_SZ); ilt_client->end = line - 1; DP(NETIF_MSG_IFUP, "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); } /* SRC */ ilt_client = &ilt->clients[ILT_CLIENT_SRC]; #ifdef BCM_CNIC ilt_client->client_num = ILT_CLIENT_SRC; ilt_client->page_size = SRC_ILT_PAGE_SZ; ilt_client->flags = 0; ilt_client->start = line; line += SRC_ILT_LINES; ilt_client->end = line - 1; DP(NETIF_MSG_IFUP, "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); #else ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); #endif /* TM */ ilt_client = &ilt->clients[ILT_CLIENT_TM]; #ifdef BCM_CNIC ilt_client->client_num = ILT_CLIENT_TM; ilt_client->page_size = TM_ILT_PAGE_SZ; ilt_client->flags = 0; ilt_client->start = line; line += TM_ILT_LINES; ilt_client->end = line - 1; DP(NETIF_MSG_IFUP, "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); #else ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); #endif BUG_ON(line > ILT_MAX_LINES); } /** * bnx2x_pf_q_prep_init - prepare INIT transition parameters * * @bp: driver handle * @fp: pointer to fastpath * @init_params: pointer to parameters structure * * parameters configured: * - HC configuration * - Queue's CDU context */ static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) { u8 cos; /* FCoE Queue uses Default SB, thus has no HC capabilities */ if (!IS_FCOE_FP(fp)) { __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); /* If HC is supporterd, enable host coalescing in the transition * to INIT state. */ __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); /* HC rate */ init_params->rx.hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0; init_params->tx.hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0; /* FW SB ID */ init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; /* * CQ index among the SB indices: FCoE clients uses the default * SB, therefore it's different. */ init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; } /* set maximum number of COSs supported by this queue */ init_params->max_cos = fp->max_cos; DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n", fp->index, init_params->max_cos); /* set the context pointers queue object */ for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) init_params->cxts[cos] = &bp->context.vcxt[fp->txdata[cos].cid].eth; } int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_queue_state_params *q_params, struct bnx2x_queue_setup_tx_only_params *tx_only_params, int tx_index, bool leading) { memset(tx_only_params, 0, sizeof(*tx_only_params)); /* Set the command */ q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; /* Set tx-only QUEUE flags: don't zero statistics */ tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); /* choose the index of the cid to send the slow path on */ tx_only_params->cid_index = tx_index; /* Set general TX_ONLY_SETUP parameters */ bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); /* Set Tx TX_ONLY_SETUP parameters */ bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); DP(NETIF_MSG_IFUP, "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n", tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, tx_only_params->gen_params.spcl_id, tx_only_params->flags); /* send the ramrod */ return bnx2x_queue_state_change(bp, q_params); } /** * bnx2x_setup_queue - setup queue * * @bp: driver handle * @fp: pointer to fastpath * @leading: is leading * * This function performs 2 steps in a Queue state machine * actually: 1) RESET->INIT 2) INIT->SETUP */ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool leading) { struct bnx2x_queue_state_params q_params = {NULL}; struct bnx2x_queue_setup_params *setup_params = &q_params.params.setup; struct bnx2x_queue_setup_tx_only_params *tx_only_params = &q_params.params.tx_only; int rc; u8 tx_index; DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index); /* reset IGU state skip FCoE L2 queue */ if (!IS_FCOE_FP(fp)) bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); q_params.q_obj = &fp->q_obj; /* We want to wait for completion in this context */ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); /* Prepare the INIT parameters */ bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init); /* Set the command */ q_params.cmd = BNX2X_Q_CMD_INIT; /* Change the state to INIT */ rc = bnx2x_queue_state_change(bp, &q_params); if (rc) { BNX2X_ERR("Queue(%d) INIT failed\n", fp->index); return rc; } DP(NETIF_MSG_IFUP, "init complete\n"); /* Now move the Queue to the SETUP state... */ memset(setup_params, 0, sizeof(*setup_params)); /* Set QUEUE flags */ setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); /* Set general SETUP parameters */ bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, FIRST_TX_COS_INDEX); bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, &setup_params->rxq_params); bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, FIRST_TX_COS_INDEX); /* Set the command */ q_params.cmd = BNX2X_Q_CMD_SETUP; /* Change the state to SETUP */ rc = bnx2x_queue_state_change(bp, &q_params); if (rc) { BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index); return rc; } /* loop through the relevant tx-only indices */ for (tx_index = FIRST_TX_ONLY_COS_INDEX; tx_index < fp->max_cos; tx_index++) { /* prepare and send tx-only ramrod*/ rc = bnx2x_setup_tx_only(bp, fp, &q_params, tx_only_params, tx_index, leading); if (rc) { BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n", fp->index, tx_index); return rc; } } return rc; } static int bnx2x_stop_queue(struct bnx2x *bp, int index) { struct bnx2x_fastpath *fp = &bp->fp[index]; struct bnx2x_fp_txdata *txdata; struct bnx2x_queue_state_params q_params = {NULL}; int rc, tx_index; DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); q_params.q_obj = &fp->q_obj; /* We want to wait for completion in this context */ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); /* close tx-only connections */ for (tx_index = FIRST_TX_ONLY_COS_INDEX; tx_index < fp->max_cos; tx_index++){ /* ascertain this is a normal queue*/ txdata = &fp->txdata[tx_index]; DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", txdata->txq_index); /* send halt terminate on tx-only connection */ q_params.cmd = BNX2X_Q_CMD_TERMINATE; memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate)); q_params.params.terminate.cid_index = tx_index; rc = bnx2x_queue_state_change(bp, &q_params); if (rc) return rc; /* send halt terminate on tx-only connection */ q_params.cmd = BNX2X_Q_CMD_CFC_DEL; memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); q_params.params.cfc_del.cid_index = tx_index; rc = bnx2x_queue_state_change(bp, &q_params); if (rc) return rc; } /* Stop the primary connection: */ /* ...halt the connection */ q_params.cmd = BNX2X_Q_CMD_HALT; rc = bnx2x_queue_state_change(bp, &q_params); if (rc) return rc; /* ...terminate the connection */ q_params.cmd = BNX2X_Q_CMD_TERMINATE; memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate)); q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; rc = bnx2x_queue_state_change(bp, &q_params); if (rc) return rc; /* ...delete cfc entry */ q_params.cmd = BNX2X_Q_CMD_CFC_DEL; memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; return bnx2x_queue_state_change(bp, &q_params); } static void bnx2x_reset_func(struct bnx2x *bp) { int port = BP_PORT(bp); int func = BP_FUNC(bp); int i; /* Disable the function in the FW */ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); /* FP SBs */ for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), SB_DISABLED); } #ifdef BCM_CNIC /* CNIC SB */ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED); #endif /* SP SB */ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), SB_DISABLED); for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0); /* Configure IGU */ if (bp->common.int_block == INT_BLOCK_HC) { REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); } else { REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); } #ifdef BCM_CNIC /* Disable Timer scan */ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); /* * Wait for at least 10ms and up to 2 second for the timers scan to * complete */ for (i = 0; i < 200; i++) { msleep(10); if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) break; } #endif /* Clear ILT */ bnx2x_clear_func_ilt(bp, func); /* Timers workaround bug for E2: if this is vnic-3, * we need to set the entire ilt range for this timers. */ if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) { struct ilt_client_info ilt_cli; /* use dummy TM client */ memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); ilt_cli.start = 0; ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; ilt_cli.client_num = ILT_CLIENT_TM; bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR); } /* this assumes that reset_port() called before reset_func()*/ if (!CHIP_IS_E1x(bp)) bnx2x_pf_disable(bp); bp->dmae_ready = 0; } static void bnx2x_reset_port(struct bnx2x *bp) { int port = BP_PORT(bp); u32 val; /* Reset physical Link */ bnx2x__link_reset(bp); REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); /* Do not rcv packets to BRB */ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); /* Do not direct rcv packets that are not for MCP to the BRB */ REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); /* Configure AEU */ REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); msleep(100); /* Check for BRB port occupancy */ val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); if (val) DP(NETIF_MSG_IFDOWN, "BRB1 is not empty %d blocks are occupied\n", val); /* TODO: Close Doorbell port? */ } static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) { struct bnx2x_func_state_params func_params = {NULL}; /* Prepare parameters for function state transitions */ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); func_params.f_obj = &bp->func_obj; func_params.cmd = BNX2X_F_CMD_HW_RESET; func_params.params.hw_init.load_phase = load_code; return bnx2x_func_state_change(bp, &func_params); } static inline int bnx2x_func_stop(struct bnx2x *bp) { struct bnx2x_func_state_params func_params = {NULL}; int rc; /* Prepare parameters for function state transitions */ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); func_params.f_obj = &bp->func_obj; func_params.cmd = BNX2X_F_CMD_STOP; /* * Try to stop the function the 'good way'. If fails (in case * of a parity error during bnx2x_chip_cleanup()) and we are * not in a debug mode, perform a state transaction in order to * enable further HW_RESET transaction. */ rc = bnx2x_func_state_change(bp, &func_params); if (rc) { #ifdef BNX2X_STOP_ON_ERROR return rc; #else BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n"); __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); return bnx2x_func_state_change(bp, &func_params); #endif } return 0; } /** * bnx2x_send_unload_req - request unload mode from the MCP. * * @bp: driver handle * @unload_mode: requested function's unload mode * * Return unload mode returned by the MCP: COMMON, PORT or FUNC. */ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) { u32 reset_code = 0; int port = BP_PORT(bp); /* Select the UNLOAD request mode */ if (unload_mode == UNLOAD_NORMAL) reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; else if (bp->flags & NO_WOL_FLAG) reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; else if (bp->wol) { u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; u8 *mac_addr = bp->dev->dev_addr; u32 val; u16 pmc; /* The mac address is written to entries 1-4 to * preserve entry 0 which is used by the PMF */ u8 entry = (BP_VN(bp) + 1)*8; val = (mac_addr[0] << 8) | mac_addr[1]; EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | (mac_addr[4] << 8) | mac_addr[5]; EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); /* Enable the PME and clear the status */ pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; } else reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; /* Send the request to the MCP */ if (!BP_NOMCP(bp)) reset_code = bnx2x_fw_command(bp, reset_code, 0); else { int path = BP_PATH(bp); DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); load_count[path][0]--; load_count[path][1 + port]--; DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); if (load_count[path][0] == 0) reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; else if (load_count[path][1 + port] == 0) reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; else reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; } return reset_code; } /** * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. * * @bp: driver handle */ void bnx2x_send_unload_done(struct bnx2x *bp) { /* Report UNLOAD_DONE to MCP */ if (!BP_NOMCP(bp)) bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); } static inline int bnx2x_func_wait_started(struct bnx2x *bp) { int tout = 50; int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; if (!bp->port.pmf) return 0; /* * (assumption: No Attention from MCP at this stage) * PMF probably in the middle of TXdisable/enable transaction * 1. Sync IRS for default SB * 2. Sync SP queue - this guarantes us that attention handling started * 3. Wait, that TXdisable/enable transaction completes * * 1+2 guranty that if DCBx attention was scheduled it already changed * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy * received complettion for the transaction the state is TX_STOPPED. * State will return to STARTED after completion of TX_STOPPED-->STARTED * transaction. */ /* make sure default SB ISR is done */ if (msix) synchronize_irq(bp->msix_table[0].vector); else synchronize_irq(bp->pdev->irq); flush_workqueue(bnx2x_wq); while (bnx2x_func_get_state(bp, &bp->func_obj) != BNX2X_F_STATE_STARTED && tout--) msleep(20); if (bnx2x_func_get_state(bp, &bp->func_obj) != BNX2X_F_STATE_STARTED) { #ifdef BNX2X_STOP_ON_ERROR BNX2X_ERR("Wrong function state\n"); return -EBUSY; #else /* * Failed to complete the transaction in a "good way" * Force both transactions with CLR bit */ struct bnx2x_func_state_params func_params = {NULL}; DP(NETIF_MSG_IFDOWN, "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); func_params.f_obj = &bp->func_obj; __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); /* STARTED-->TX_ST0PPED */ func_params.cmd = BNX2X_F_CMD_TX_STOP; bnx2x_func_state_change(bp, &func_params); /* TX_ST0PPED-->STARTED */ func_params.cmd = BNX2X_F_CMD_TX_START; return bnx2x_func_state_change(bp, &func_params); #endif } return 0; } void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) { int port = BP_PORT(bp); int i, rc = 0; u8 cos; struct bnx2x_mcast_ramrod_params rparam = {NULL}; u32 reset_code; /* Wait until tx fastpath tasks complete */ for_each_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_cos_in_tx_queue(fp, cos) rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]); #ifdef BNX2X_STOP_ON_ERROR if (rc) return; #endif } /* Give HW time to discard old tx messages */ usleep_range(1000, 1000); /* Clean all ETH MACs */ rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false); if (rc < 0) BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); /* Clean up UC list */ rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC, true); if (rc < 0) BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", rc); /* Disable LLH */ if (!CHIP_IS_E1(bp)) REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); /* Set "drop all" (stop Rx). * We need to take a netif_addr_lock() here in order to prevent * a race between the completion code and this code. */ netif_addr_lock_bh(bp->dev); /* Schedule the rx_mode command */ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); else bnx2x_set_storm_rx_mode(bp); /* Cleanup multicast configuration */ rparam.mcast_obj = &bp->mcast_obj; rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); if (rc < 0) BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc); netif_addr_unlock_bh(bp->dev); /* * Send the UNLOAD_REQUEST to the MCP. This will return if * this function should perform FUNC, PORT or COMMON HW * reset. */ reset_code = bnx2x_send_unload_req(bp, unload_mode); /* * (assumption: No Attention from MCP at this stage) * PMF probably in the middle of TXdisable/enable transaction */ rc = bnx2x_func_wait_started(bp); if (rc) { BNX2X_ERR("bnx2x_func_wait_started failed\n"); #ifdef BNX2X_STOP_ON_ERROR return; #endif } /* Close multi and leading connections * Completions for ramrods are collected in a synchronous way */ for_each_queue(bp, i) if (bnx2x_stop_queue(bp, i)) #ifdef BNX2X_STOP_ON_ERROR return; #else goto unload_error; #endif /* If SP settings didn't get completed so far - something * very wrong has happen. */ if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n"); #ifndef BNX2X_STOP_ON_ERROR unload_error: #endif rc = bnx2x_func_stop(bp); if (rc) { BNX2X_ERR("Function stop failed!\n"); #ifdef BNX2X_STOP_ON_ERROR return; #endif } /* Disable HW interrupts, NAPI */ bnx2x_netif_stop(bp, 1); /* Release IRQs */ bnx2x_free_irq(bp); /* Reset the chip */ rc = bnx2x_reset_hw(bp, reset_code); if (rc) BNX2X_ERR("HW_RESET failed\n"); /* Report UNLOAD_DONE to MCP */ bnx2x_send_unload_done(bp); } void bnx2x_disable_close_the_gate(struct bnx2x *bp) { u32 val; DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n"); if (CHIP_IS_E1(bp)) { int port = BP_PORT(bp); u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; val = REG_RD(bp, addr); val &= ~(0x300); REG_WR(bp, addr, val); } else { val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val); } } /* Close gates #2, #3 and #4: */ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) { u32 val; /* Gates #2 and #4a are closed/opened for "not E1" only */ if (!CHIP_IS_E1(bp)) { /* #4 */ REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close); /* #2 */ REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); } /* #3 */ if (CHIP_IS_E1x(bp)) { /* Prevent interrupts from HC on both ports */ val = REG_RD(bp, HC_REG_CONFIG_1); REG_WR(bp, HC_REG_CONFIG_1, (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); val = REG_RD(bp, HC_REG_CONFIG_0); REG_WR(bp, HC_REG_CONFIG_0, (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); } else { /* Prevent incomming interrupts in IGU */ val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, (!close) ? (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); } DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", close ? "closing" : "opening"); mmiowb(); } #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val) { /* Do some magic... */ u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); *magic_val = val & SHARED_MF_CLP_MAGIC; MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); } /** * bnx2x_clp_reset_done - restore the value of the `magic' bit. * * @bp: driver handle * @magic_val: old value of the `magic' bit. */ static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) { /* Restore the `magic' bit value... */ u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); MF_CFG_WR(bp, shared_mf_config.clp_mb, (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); } /** * bnx2x_reset_mcp_prep - prepare for MCP reset. * * @bp: driver handle * @magic_val: old value of 'magic' bit. * * Takes care of CLP configurations. */ static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) { u32 shmem; u32 validity_offset; DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n"); /* Set `magic' bit in order to save MF config */ if (!CHIP_IS_E1(bp)) bnx2x_clp_reset_prep(bp, magic_val); /* Get shmem offset */ shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); validity_offset = offsetof(struct shmem_region, validity_map[0]); /* Clear validity map flags */ if (shmem > 0) REG_WR(bp, shmem + validity_offset, 0); } #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ #define MCP_ONE_TIMEOUT 100 /* 100 ms */ /** * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT * * @bp: driver handle */ static inline void bnx2x_mcp_wait_one(struct bnx2x *bp) { /* special handling for emulation and FPGA, wait 10 times longer */ if (CHIP_REV_IS_SLOW(bp)) msleep(MCP_ONE_TIMEOUT*10); else msleep(MCP_ONE_TIMEOUT); } /* * initializes bp->common.shmem_base and waits for validity signature to appear */ static int bnx2x_init_shmem(struct bnx2x *bp) { int cnt = 0; u32 val = 0; do { bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); if (bp->common.shmem_base) { val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); if (val & SHR_MEM_VALIDITY_MB) return 0; } bnx2x_mcp_wait_one(bp); } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); BNX2X_ERR("BAD MCP validity signature\n"); return -ENODEV; } static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) { int rc = bnx2x_init_shmem(bp); /* Restore the `magic' bit value */ if (!CHIP_IS_E1(bp)) bnx2x_clp_reset_done(bp, magic_val); return rc; } static void bnx2x_pxp_prep(struct bnx2x *bp) { if (!CHIP_IS_E1(bp)) { REG_WR(bp, PXP2_REG_RD_START_INIT, 0); REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); mmiowb(); } } /* * Reset the whole chip except for: * - PCIE core * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by * one reset bit) * - IGU * - MISC (including AEU) * - GRC * - RBCN, RBCP */ static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) { u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; u32 global_bits2, stay_reset2; /* * Bits that have to be set in reset_mask2 if we want to reset 'global' * (per chip) blocks. */ global_bits2 = MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; /* Don't reset the following blocks */ not_reset_mask1 = MISC_REGISTERS_RESET_REG_1_RST_HC | MISC_REGISTERS_RESET_REG_1_RST_PXPV | MISC_REGISTERS_RESET_REG_1_RST_PXP; not_reset_mask2 = MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | MISC_REGISTERS_RESET_REG_2_RST_RBCN | MISC_REGISTERS_RESET_REG_2_RST_GRC | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | MISC_REGISTERS_RESET_REG_2_RST_ATC | MISC_REGISTERS_RESET_REG_2_PGLC; /* * Keep the following blocks in reset: * - all xxMACs are handled by the bnx2x_link code. */ stay_reset2 = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | MISC_REGISTERS_RESET_REG_2_UMAC0 | MISC_REGISTERS_RESET_REG_2_UMAC1 | MISC_REGISTERS_RESET_REG_2_XMAC | MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; /* Full reset masks according to the chip */ reset_mask1 = 0xffffffff; if (CHIP_IS_E1(bp)) reset_mask2 = 0xffff; else if (CHIP_IS_E1H(bp)) reset_mask2 = 0x1ffff; else if (CHIP_IS_E2(bp)) reset_mask2 = 0xfffff; else /* CHIP_IS_E3 */ reset_mask2 = 0x3ffffff; /* Don't reset global blocks unless we need to */ if (!global) reset_mask2 &= ~global_bits2; /* * In case of attention in the QM, we need to reset PXP * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM * because otherwise QM reset would release 'close the gates' shortly * before resetting the PXP, then the PSWRQ would send a write * request to PGLUE. Then when PXP is reset, PGLUE would try to * read the payload data from PSWWR, but PSWWR would not * respond. The write queue in PGLUE would stuck, dmae commands * would not return. Therefore it's important to reset the second * reset register (containing the * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM * bit). */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, reset_mask2 & (~not_reset_mask2)); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, reset_mask1 & (~not_reset_mask1)); barrier(); mmiowb(); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2 & (~stay_reset2)); barrier(); mmiowb(); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); mmiowb(); } /** * bnx2x_er_poll_igu_vq - poll for pending writes bit. * It should get cleared in no more than 1s. * * @bp: driver handle * * It should get cleared in no more than 1s. Returns 0 if * pending writes bit gets cleared. */ static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) { u32 cnt = 1000; u32 pend_bits = 0; do { pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS); if (pend_bits == 0) break; usleep_range(1000, 1000); } while (cnt-- > 0); if (cnt <= 0) { BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n", pend_bits); return -EBUSY; } return 0; } static int bnx2x_process_kill(struct bnx2x *bp, bool global) { int cnt = 1000; u32 val = 0; u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; /* Empty the Tetris buffer, wait for 1s */ do { sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT); blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT); port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && ((port_is_idle_0 & 0x1) == 0x1) && ((port_is_idle_1 & 0x1) == 0x1) && (pgl_exp_rom2 == 0xffffffff)) break; usleep_range(1000, 1000); } while (cnt-- > 0); if (cnt <= 0) { BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n"); BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2); return -EAGAIN; } barrier(); /* Close gates #2, #3 and #4 */ bnx2x_set_234_gates(bp, true); /* Poll for IGU VQs for 57712 and newer chips */ if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) return -EAGAIN; /* TBD: Indicate that "process kill" is in progress to MCP */ /* Clear "unprepared" bit */ REG_WR(bp, MISC_REG_UNPREPARED, 0); barrier(); /* Make sure all is written to the chip before the reset */ mmiowb(); /* Wait for 1ms to empty GLUE and PCI-E core queues, * PSWHST, GRC and PSWRD Tetris buffer. */ usleep_range(1000, 1000); /* Prepare to chip reset: */ /* MCP */ if (global) bnx2x_reset_mcp_prep(bp, &val); /* PXP */ bnx2x_pxp_prep(bp); barrier(); /* reset the chip */ bnx2x_process_kill_chip_reset(bp, global); barrier(); /* Recover after reset: */ /* MCP */ if (global && bnx2x_reset_mcp_comp(bp, val)) return -EAGAIN; /* TBD: Add resetting the NO_MCP mode DB here */ /* PXP */ bnx2x_pxp_prep(bp); /* Open the gates #2, #3 and #4 */ bnx2x_set_234_gates(bp, false); /* TBD: IGU/AEU preparation bring back the AEU/IGU to a * reset state, re-enable attentions. */ return 0; } int bnx2x_leader_reset(struct bnx2x *bp) { int rc = 0; bool global = bnx2x_reset_is_global(bp); u32 load_code; /* if not going to reset MCP - load "fake" driver to reset HW while * driver is owner of the HW */ if (!global && !BP_NOMCP(bp)) { load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); if (!load_code) { BNX2X_ERR("MCP response failure, aborting\n"); rc = -EAGAIN; goto exit_leader_reset; } if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { BNX2X_ERR("MCP unexpected resp, aborting\n"); rc = -EAGAIN; goto exit_leader_reset2; } load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); if (!load_code) { BNX2X_ERR("MCP response failure, aborting\n"); rc = -EAGAIN; goto exit_leader_reset2; } } /* Try to recover after the failure */ if (bnx2x_process_kill(bp, global)) { BNX2X_ERR("Something bad had happen on engine %d! Aii!\n", BP_PATH(bp)); rc = -EAGAIN; goto exit_leader_reset2; } /* * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver * state. */ bnx2x_set_reset_done(bp); if (global) bnx2x_clear_reset_global(bp); exit_leader_reset2: /* unload "fake driver" if it was loaded */ if (!global && !BP_NOMCP(bp)) { bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); } exit_leader_reset: bp->is_leader = 0; bnx2x_release_leader_lock(bp); smp_mb(); return rc; } static inline void bnx2x_recovery_failed(struct bnx2x *bp) { netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); /* Disconnect this device */ netif_device_detach(bp->dev); /* * Block ifup for all function on this engine until "process kill" * or power cycle. */ bnx2x_set_reset_in_progress(bp); /* Shut down the power */ bnx2x_set_power_state(bp, PCI_D3hot); bp->recovery_state = BNX2X_RECOVERY_FAILED; smp_mb(); } /* * Assumption: runs under rtnl lock. This together with the fact * that it's called only from bnx2x_sp_rtnl() ensure that it * will never be called when netif_running(bp->dev) is false. */ static void bnx2x_parity_recover(struct bnx2x *bp) { bool global = false; u32 error_recovered, error_unrecovered; bool is_parity; DP(NETIF_MSG_HW, "Handling parity\n"); while (1) { switch (bp->recovery_state) { case BNX2X_RECOVERY_INIT: DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); is_parity = bnx2x_chk_parity_attn(bp, &global, false); WARN_ON(!is_parity); /* Try to get a LEADER_LOCK HW lock */ if (bnx2x_trylock_leader_lock(bp)) { bnx2x_set_reset_in_progress(bp); /* * Check if there is a global attention and if * there was a global attention, set the global * reset bit. */ if (global) bnx2x_set_reset_global(bp); bp->is_leader = 1; } /* Stop the driver */ /* If interface has been removed - break */ if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY)) return; bp->recovery_state = BNX2X_RECOVERY_WAIT; /* Ensure "is_leader", MCP command sequence and * "recovery_state" update values are seen on other * CPUs. */ smp_mb(); break; case BNX2X_RECOVERY_WAIT: DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); if (bp->is_leader) { int other_engine = BP_PATH(bp) ? 0 : 1; bool other_load_status = bnx2x_get_load_status(bp, other_engine); bool load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); global = bnx2x_reset_is_global(bp); /* * In case of a parity in a global block, let * the first leader that performs a * leader_reset() reset the global blocks in * order to clear global attentions. Otherwise * the the gates will remain closed for that * engine. */ if (load_status || (global && other_load_status)) { /* Wait until all other functions get * down. */ schedule_delayed_work(&bp->sp_rtnl_task, HZ/10); return; } else { /* If all other functions got down - * try to bring the chip back to * normal. In any case it's an exit * point for a leader. */ if (bnx2x_leader_reset(bp)) { bnx2x_recovery_failed(bp); return; } /* If we are here, means that the * leader has succeeded and doesn't * want to be a leader any more. Try * to continue as a none-leader. */ break; } } else { /* non-leader */ if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) { /* Try to get a LEADER_LOCK HW lock as * long as a former leader may have * been unloaded by the user or * released a leadership by another * reason. */ if (bnx2x_trylock_leader_lock(bp)) { /* I'm a leader now! Restart a * switch case. */ bp->is_leader = 1; break; } schedule_delayed_work(&bp->sp_rtnl_task, HZ/10); return; } else { /* * If there was a global attention, wait * for it to be cleared. */ if (bnx2x_reset_is_global(bp)) { schedule_delayed_work( &bp->sp_rtnl_task, HZ/10); return; } error_recovered = bp->eth_stats.recoverable_error; error_unrecovered = bp->eth_stats.unrecoverable_error; bp->recovery_state = BNX2X_RECOVERY_NIC_LOADING; if (bnx2x_nic_load(bp, LOAD_NORMAL)) { error_unrecovered++; netdev_err(bp->dev, "Recovery failed. Power cycle needed\n"); /* Disconnect this device */ netif_device_detach(bp->dev); /* Shut down the power */ bnx2x_set_power_state( bp, PCI_D3hot); smp_mb(); } else { bp->recovery_state = BNX2X_RECOVERY_DONE; error_recovered++; smp_mb(); } bp->eth_stats.recoverable_error = error_recovered; bp->eth_stats.unrecoverable_error = error_unrecovered; return; } } default: return; } } } static int bnx2x_close(struct net_device *dev); /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is * scheduled on a general queue in order to prevent a dead lock. */ static void bnx2x_sp_rtnl_task(struct work_struct *work) { struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); rtnl_lock(); if (!netif_running(bp->dev)) goto sp_rtnl_exit; /* if stop on error is defined no recovery flows should be executed */ #ifdef BNX2X_STOP_ON_ERROR BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" "you will need to reboot when done\n"); goto sp_rtnl_not_reset; #endif if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { /* * Clear all pending SP commands as we are going to reset the * function anyway. */ bp->sp_rtnl_state = 0; smp_mb(); bnx2x_parity_recover(bp); goto sp_rtnl_exit; } if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { /* * Clear all pending SP commands as we are going to reset the * function anyway. */ bp->sp_rtnl_state = 0; smp_mb(); bnx2x_nic_unload(bp, UNLOAD_NORMAL); bnx2x_nic_load(bp, LOAD_NORMAL); goto sp_rtnl_exit; } #ifdef BNX2X_STOP_ON_ERROR sp_rtnl_not_reset: #endif if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); /* * in case of fan failure we need to reset id if the "stop on error" * debug flag is set, since we trying to prevent permanent overheating * damage */ if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n"); netif_device_detach(bp->dev); bnx2x_close(bp->dev); } sp_rtnl_exit: rtnl_unlock(); } /* end of nic load/unload */ static void bnx2x_period_task(struct work_struct *work) { struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work); if (!netif_running(bp->dev)) goto period_task_exit; if (CHIP_REV_IS_SLOW(bp)) { BNX2X_ERR("period task called on emulation, ignoring\n"); goto period_task_exit; } bnx2x_acquire_phy_lock(bp); /* * The barrier is needed to ensure the ordering between the writing to * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and * the reading here. */ smp_mb(); if (bp->port.pmf) { bnx2x_period_func(&bp->link_params, &bp->link_vars); /* Re-queue task in 1 sec */ queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); } bnx2x_release_phy_lock(bp); period_task_exit: return; } /* * Init service functions */ static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) { u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0; u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; return base + (BP_ABS_FUNC(bp)) * stride; } static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp) { u32 reg = bnx2x_get_pretend_reg(bp); /* Flush all outstanding writes */ mmiowb(); /* Pretend to be function 0 */ REG_WR(bp, reg, 0); REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */ /* From now we are in the "like-E1" mode */ bnx2x_int_disable(bp); /* Flush all outstanding writes */ mmiowb(); /* Restore the original function */ REG_WR(bp, reg, BP_ABS_FUNC(bp)); REG_RD(bp, reg); } static inline void bnx2x_undi_int_disable(struct bnx2x *bp) { if (CHIP_IS_E1(bp)) bnx2x_int_disable(bp); else bnx2x_undi_int_disable_e1h(bp); } static void __devinit bnx2x_prev_unload_close_mac(struct bnx2x *bp) { u32 val, base_addr, offset, mask, reset_reg; bool mac_stopped = false; u8 port = BP_PORT(bp); reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); if (!CHIP_IS_E3(bp)) { val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; if ((mask & reset_reg) && val) { u32 wb_data[2]; BNX2X_DEV_INFO("Disable bmac Rx\n"); base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM : NIG_REG_INGRESS_BMAC0_MEM; offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL : BIGMAC_REGISTER_BMAC_CONTROL; /* * use rd/wr since we cannot use dmae. This is safe * since MCP won't access the bus due to the request * to unload, and no function on the path can be * loaded at this time. */ wb_data[0] = REG_RD(bp, base_addr + offset); wb_data[1] = REG_RD(bp, base_addr + offset + 0x4); wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; REG_WR(bp, base_addr + offset, wb_data[0]); REG_WR(bp, base_addr + offset + 0x4, wb_data[1]); } BNX2X_DEV_INFO("Disable emac Rx\n"); REG_WR(bp, NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4, 0); mac_stopped = true; } else { if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { BNX2X_DEV_INFO("Disable xmac Rx\n"); base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI); REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1)); REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1)); REG_WR(bp, base_addr + XMAC_REG_CTRL, 0); mac_stopped = true; } mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; if (mask & reset_reg) { BNX2X_DEV_INFO("Disable umac Rx\n"); base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; REG_WR(bp, base_addr + UMAC_REG_COMMAND_CONFIG, 0); mac_stopped = true; } } if (mac_stopped) msleep(20); } #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc) { u16 rcq, bd; u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port)); rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg); BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", port, bd, rcq); } static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp) { u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); if (!rc) { BNX2X_ERR("MCP response failure, aborting\n"); return -EBUSY; } return 0; } static bool __devinit bnx2x_prev_is_path_marked(struct bnx2x *bp) { struct bnx2x_prev_path_list *tmp_list; int rc = false; if (down_trylock(&bnx2x_prev_sem)) return false; list_for_each_entry(tmp_list, &bnx2x_prev_list, list) { if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && bp->pdev->bus->number == tmp_list->bus && BP_PATH(bp) == tmp_list->path) { rc = true; BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n", BP_PATH(bp)); break; } } up(&bnx2x_prev_sem); return rc; } static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp) { struct bnx2x_prev_path_list *tmp_list; int rc; tmp_list = (struct bnx2x_prev_path_list *) kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL); if (!tmp_list) { BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n"); return -ENOMEM; } tmp_list->bus = bp->pdev->bus->number; tmp_list->slot = PCI_SLOT(bp->pdev->devfn); tmp_list->path = BP_PATH(bp); rc = down_interruptible(&bnx2x_prev_sem); if (rc) { BNX2X_ERR("Received %d when tried to take lock\n", rc); kfree(tmp_list); } else { BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n", BP_PATH(bp)); list_add(&tmp_list->list, &bnx2x_prev_list); up(&bnx2x_prev_sem); } return rc; } static bool __devinit bnx2x_can_flr(struct bnx2x *bp) { int pos; u32 cap; struct pci_dev *dev = bp->pdev; pos = pci_pcie_cap(dev); if (!pos) return false; pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap); if (!(cap & PCI_EXP_DEVCAP_FLR)) return false; return true; } static int __devinit bnx2x_do_flr(struct bnx2x *bp) { int i, pos; u16 status; struct pci_dev *dev = bp->pdev; /* probe the capability first */ if (bnx2x_can_flr(bp)) return -ENOTTY; pos = pci_pcie_cap(dev); if (!pos) return -ENOTTY; /* Wait for Transaction Pending bit clean */ for (i = 0; i < 4; i++) { if (i) msleep((1 << (i - 1)) * 100); pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); if (!(status & PCI_EXP_DEVSTA_TRPND)) goto clear; } dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); clear: if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", bp->common.bc_ver); return -EINVAL; } bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); return 0; } static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp) { int rc; BNX2X_DEV_INFO("Uncommon unload Flow\n"); /* Test if previous unload process was already finished for this path */ if (bnx2x_prev_is_path_marked(bp)) return bnx2x_prev_mcp_done(bp); /* If function has FLR capabilities, and existing FW version matches * the one required, then FLR will be sufficient to clean any residue * left by previous driver */ if (bnx2x_test_firmware_version(bp, false) && bnx2x_can_flr(bp)) return bnx2x_do_flr(bp); /* Close the MCP request, return failure*/ rc = bnx2x_prev_mcp_done(bp); if (!rc) rc = BNX2X_PREV_WAIT_NEEDED; return rc; } static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp) { u32 reset_reg, tmp_reg = 0, rc; /* It is possible a previous function received 'common' answer, * but hasn't loaded yet, therefore creating a scenario of * multiple functions receiving 'common' on the same path. */ BNX2X_DEV_INFO("Common unload Flow\n"); if (bnx2x_prev_is_path_marked(bp)) return bnx2x_prev_mcp_done(bp); reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1); /* Reset should be performed after BRB is emptied */ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { u32 timer_count = 1000; bool prev_undi = false; /* Close the MAC Rx to prevent BRB from filling up */ bnx2x_prev_unload_close_mac(bp); /* Check if the UNDI driver was previously loaded * UNDI driver initializes CID offset for normal bell to 0x7 */ reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1); if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST); if (tmp_reg == 0x7) { BNX2X_DEV_INFO("UNDI previously loaded\n"); prev_undi = true; /* clear the UNDI indication */ REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); } } /* wait until BRB is empty */ tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); while (timer_count) { u32 prev_brb = tmp_reg; tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); if (!tmp_reg) break; BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg); /* reset timer as long as BRB actually gets emptied */ if (prev_brb > tmp_reg) timer_count = 1000; else timer_count--; /* If UNDI resides in memory, manually increment it */ if (prev_undi) bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1); udelay(10); } if (!timer_count) BNX2X_ERR("Failed to empty BRB, hope for the best\n"); } /* No packets are in the pipeline, path is ready for reset */ bnx2x_reset_common(bp); rc = bnx2x_prev_mark_path(bp); if (rc) { bnx2x_prev_mcp_done(bp); return rc; } return bnx2x_prev_mcp_done(bp); } /* previous driver DMAE transaction may have occurred when pre-boot stage ended * and boot began, or when kdump kernel was loaded. Either case would invalidate * the addresses of the transaction, resulting in was-error bit set in the pci * causing all hw-to-host pcie transactions to timeout. If this happened we want * to clear the interrupt which detected this from the pglueb and the was done * bit */ static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp) { u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp)); } } static int __devinit bnx2x_prev_unload(struct bnx2x *bp) { int time_counter = 10; u32 rc, fw, hw_lock_reg, hw_lock_val; BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); /* clear hw from errors which may have resulted from an interrupted * dmae transaction. */ bnx2x_prev_interrupted_dmae(bp); /* Release previously held locks */ hw_lock_reg = (BP_FUNC(bp) <= 5) ? (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); hw_lock_val = (REG_RD(bp, hw_lock_reg)); if (hw_lock_val) { if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { BNX2X_DEV_INFO("Release Previously held NVRAM lock\n"); REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp))); } BNX2X_DEV_INFO("Release Previously held hw lock\n"); REG_WR(bp, hw_lock_reg, 0xffffffff); } else BNX2X_DEV_INFO("No need to release hw/nvram locks\n"); if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) { BNX2X_DEV_INFO("Release previously held alr\n"); REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); } do { /* Lock MCP using an unload request */ fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); if (!fw) { BNX2X_ERR("MCP response failure, aborting\n"); rc = -EBUSY; break; } if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { rc = bnx2x_prev_unload_common(bp); break; } /* non-common reply from MCP night require looping */ rc = bnx2x_prev_unload_uncommon(bp); if (rc != BNX2X_PREV_WAIT_NEEDED) break; msleep(20); } while (--time_counter); if (!time_counter || rc) { BNX2X_ERR("Failed unloading previous driver, aborting\n"); rc = -EBUSY; } BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc); return rc; } static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) { u32 val, val2, val3, val4, id, boot_mode; u16 pmc; /* Get the chip revision id and number. */ /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ val = REG_RD(bp, MISC_REG_CHIP_NUM); id = ((val & 0xffff) << 16); val = REG_RD(bp, MISC_REG_CHIP_REV); id |= ((val & 0xf) << 12); val = REG_RD(bp, MISC_REG_CHIP_METAL); id |= ((val & 0xff) << 4); val = REG_RD(bp, MISC_REG_BOND_ID); id |= (val & 0xf); bp->common.chip_id = id; /* Set doorbell size */ bp->db_size = (1 << BNX2X_DB_SHIFT); if (!CHIP_IS_E1x(bp)) { val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); if ((val & 1) == 0) val = REG_RD(bp, MISC_REG_PORT4MODE_EN); else val = (val >> 1) & 1; BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" : "2_PORT_MODE"); bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; if (CHIP_MODE_IS_4_PORT(bp)) bp->pfid = (bp->pf_num >> 1); /* 0..3 */ else bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */ } else { bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ bp->pfid = bp->pf_num; /* 0..7 */ } BNX2X_DEV_INFO("pf_id: %x", bp->pfid); bp->link_params.chip_id = bp->common.chip_id; BNX2X_DEV_INFO("chip ID is 0x%x\n", id); val = (REG_RD(bp, 0x2874) & 0x55); if ((bp->common.chip_id & 0x1) || (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { bp->flags |= ONE_PORT_FLAG; BNX2X_DEV_INFO("single port device\n"); } val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", bp->common.flash_size, bp->common.flash_size); bnx2x_init_shmem(bp); bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? MISC_REG_GENERIC_CR_1 : MISC_REG_GENERIC_CR_0)); bp->link_params.shmem_base = bp->common.shmem_base; bp->link_params.shmem2_base = bp->common.shmem2_base; BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", bp->common.shmem_base, bp->common.shmem2_base); if (!bp->common.shmem_base) { BNX2X_DEV_INFO("MCP not active\n"); bp->flags |= NO_MCP_FLAG; return; } bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); bp->link_params.hw_led_mode = ((bp->common.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> SHARED_HW_CFG_LED_MODE_SHIFT); bp->link_params.feature_config_flags = 0; val = SHMEM_RD(bp, dev_info.shared_feature_config.config); if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) bp->link_params.feature_config_flags |= FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; else bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; bp->common.bc_ver = val; BNX2X_DEV_INFO("bc_ver %X\n", val); if (val < BNX2X_BC_VER) { /* for now only warn * later we might need to enforce this */ BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n", BNX2X_BC_VER, val); } bp->link_params.feature_config_flags |= (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; bp->link_params.feature_config_flags |= (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; bp->link_params.feature_config_flags |= (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? BC_SUPPORTS_PFC_STATS : 0; boot_mode = SHMEM_RD(bp, dev_info.port_feature_config[BP_PORT(bp)].mba_config) & PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; switch (boot_mode) { case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE: bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE; break; case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB: bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI; break; case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT: bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE; break; case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE: bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE; break; } pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; BNX2X_DEV_INFO("%sWoL capable\n", (bp->flags & NO_WOL_FLAG) ? "not " : ""); val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", val, val2, val3, val4); } #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) { int pfid = BP_FUNC(bp); int igu_sb_id; u32 val; u8 fid, igu_sb_cnt = 0; bp->igu_base_sb = 0xff; if (CHIP_INT_MODE_IS_BC(bp)) { int vn = BP_VN(bp); igu_sb_cnt = bp->igu_sb_cnt; bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * FP_SB_MAX_E1x; bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn); return; } /* IGU in normal mode - read CAM */ for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; igu_sb_id++) { val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) continue; fid = IGU_FID(val); if ((fid & IGU_FID_ENCODE_IS_PF)) { if ((fid & IGU_FID_PF_NUM_MASK) != pfid) continue; if (IGU_VEC(val) == 0) /* default status block */ bp->igu_dsb_id = igu_sb_id; else { if (bp->igu_base_sb == 0xff) bp->igu_base_sb = igu_sb_id; igu_sb_cnt++; } } } #ifdef CONFIG_PCI_MSI /* * It's expected that number of CAM entries for this functions is equal * to the number evaluated based on the MSI-X table size. We want a * harsh warning if these values are different! */ WARN_ON(bp->igu_sb_cnt != igu_sb_cnt); #endif if (igu_sb_cnt == 0) BNX2X_ERR("CAM configuration error\n"); } static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) { int cfg_size = 0, idx, port = BP_PORT(bp); /* Aggregation of supported attributes of all external phys */ bp->port.supported[0] = 0; bp->port.supported[1] = 0; switch (bp->link_params.num_phys) { case 1: bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported; cfg_size = 1; break; case 2: bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; cfg_size = 1; break; case 3: if (bp->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { bp->port.supported[1] = bp->link_params.phy[EXT_PHY1].supported; bp->port.supported[0] = bp->link_params.phy[EXT_PHY2].supported; } else { bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; bp->port.supported[1] = bp->link_params.phy[EXT_PHY2].supported; } cfg_size = 2; break; } if (!(bp->port.supported[0] || bp->port.supported[1])) { BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n", SHMEM_RD(bp, dev_info.port_hw_config[port].external_phy_config), SHMEM_RD(bp, dev_info.port_hw_config[port].external_phy_config2)); return; } if (CHIP_IS_E3(bp)) bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); else { switch (switch_cfg) { case SWITCH_CFG_1G: bp->port.phy_addr = REG_RD( bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); break; case SWITCH_CFG_10G: bp->port.phy_addr = REG_RD( bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); break; default: BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", bp->port.link_config[0]); return; } } BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); /* mask what we support according to speed_cap_mask per configuration */ for (idx = 0; idx < cfg_size; idx++) { if (!(bp->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half; if (!(bp->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full; if (!(bp->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half; if (!(bp->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full; if (!(bp->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); if (!(bp->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full; if (!(bp->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; } BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], bp->port.supported[1]); } static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) { u32 link_config, idx, cfg_size = 0; bp->port.advertising[0] = 0; bp->port.advertising[1] = 0; switch (bp->link_params.num_phys) { case 1: case 2: cfg_size = 1; break; case 3: cfg_size = 2; break; } for (idx = 0; idx < cfg_size; idx++) { bp->link_params.req_duplex[idx] = DUPLEX_FULL; link_config = bp->port.link_config[idx]; switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { case PORT_FEATURE_LINK_SPEED_AUTO: if (bp->port.supported[idx] & SUPPORTED_Autoneg) { bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG; bp->port.advertising[idx] |= bp->port.supported[idx]; if (bp->link_params.phy[EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) bp->port.advertising[idx] |= (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full); } else { /* force 10G, no AN */ bp->link_params.req_line_speed[idx] = SPEED_10000; bp->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); continue; } break; case PORT_FEATURE_LINK_SPEED_10M_FULL: if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { bp->link_params.req_line_speed[idx] = SPEED_10; bp->port.advertising[idx] |= (ADVERTISED_10baseT_Full | ADVERTISED_TP); } else { BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_10M_HALF: if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { bp->link_params.req_line_speed[idx] = SPEED_10; bp->link_params.req_duplex[idx] = DUPLEX_HALF; bp->port.advertising[idx] |= (ADVERTISED_10baseT_Half | ADVERTISED_TP); } else { BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_100M_FULL: if (bp->port.supported[idx] & SUPPORTED_100baseT_Full) { bp->link_params.req_line_speed[idx] = SPEED_100; bp->port.advertising[idx] |= (ADVERTISED_100baseT_Full | ADVERTISED_TP); } else { BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_100M_HALF: if (bp->port.supported[idx] & SUPPORTED_100baseT_Half) { bp->link_params.req_line_speed[idx] = SPEED_100; bp->link_params.req_duplex[idx] = DUPLEX_HALF; bp->port.advertising[idx] |= (ADVERTISED_100baseT_Half | ADVERTISED_TP); } else { BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_1G: if (bp->port.supported[idx] & SUPPORTED_1000baseT_Full) { bp->link_params.req_line_speed[idx] = SPEED_1000; bp->port.advertising[idx] |= (ADVERTISED_1000baseT_Full | ADVERTISED_TP); } else { BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_2_5G: if (bp->port.supported[idx] & SUPPORTED_2500baseX_Full) { bp->link_params.req_line_speed[idx] = SPEED_2500; bp->port.advertising[idx] |= (ADVERTISED_2500baseX_Full | ADVERTISED_TP); } else { BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_10G_CX4: if (bp->port.supported[idx] & SUPPORTED_10000baseT_Full) { bp->link_params.req_line_speed[idx] = SPEED_10000; bp->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); } else { BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_20G: bp->link_params.req_line_speed[idx] = SPEED_20000; break; default: BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n", link_config); bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG; bp->port.advertising[idx] = bp->port.supported[idx]; break; } bp->link_params.req_flow_ctrl[idx] = (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); if ((bp->link_params.req_flow_ctrl[idx] == BNX2X_FLOW_CTRL_AUTO) && !(bp->port.supported[idx] & SUPPORTED_Autoneg)) { bp->link_params.req_flow_ctrl[idx] = BNX2X_FLOW_CTRL_NONE; } BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n", bp->link_params.req_line_speed[idx], bp->link_params.req_duplex[idx], bp->link_params.req_flow_ctrl[idx], bp->port.advertising[idx]); } } static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) { mac_hi = cpu_to_be16(mac_hi); mac_lo = cpu_to_be32(mac_lo); memcpy(mac_buf, &mac_hi, sizeof(mac_hi)); memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo)); } static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) { int port = BP_PORT(bp); u32 config; u32 ext_phy_type, ext_phy_config; bp->link_params.bp = bp; bp->link_params.port = port; bp->link_params.lane_config = SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); bp->link_params.speed_cap_mask[0] = SHMEM_RD(bp, dev_info.port_hw_config[port].speed_capability_mask); bp->link_params.speed_cap_mask[1] = SHMEM_RD(bp, dev_info.port_hw_config[port].speed_capability_mask2); bp->port.link_config[0] = SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); bp->port.link_config[1] = SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2); bp->link_params.multi_phy_config = SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config); /* If the device is capable of WoL, set the default state according * to the HW */ config = SHMEM_RD(bp, dev_info.port_feature_config[port].config); bp->wol = (!(bp->flags & NO_WOL_FLAG) && (config & PORT_FEATURE_WOL_ENABLED)); BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n", bp->link_params.lane_config, bp->link_params.speed_cap_mask[0], bp->port.link_config[0]); bp->link_params.switch_cfg = (bp->port.link_config[0] & PORT_FEATURE_CONNECTED_SWITCH_MASK); bnx2x_phy_probe(&bp->link_params); bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); bnx2x_link_settings_requested(bp); /* * If connected directly, work with the internal PHY, otherwise, work * with the external PHY */ ext_phy_config = SHMEM_RD(bp, dev_info.port_hw_config[port].external_phy_config); ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) bp->mdio.prtad = bp->port.phy_addr; else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) bp->mdio.prtad = XGXS_EXT_PHY_ADDR(ext_phy_config); /* * Check if hw lock is required to access MDC/MDIO bus to the PHY(s) * In MF mode, it is set to cover self test cases */ if (IS_MF(bp)) bp->port.need_hw_lock = 1; else bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, bp->common.shmem_base, bp->common.shmem2_base); } void bnx2x_get_iscsi_info(struct bnx2x *bp) { u32 no_flags = NO_ISCSI_FLAG; #ifdef BCM_CNIC int port = BP_PORT(bp); u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, drv_lic_key[port].max_iscsi_conn); /* Get the number of maximum allowed iSCSI connections */ bp->cnic_eth_dev.max_iscsi_conn = (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> BNX2X_MAX_ISCSI_INIT_CONN_SHIFT; BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n", bp->cnic_eth_dev.max_iscsi_conn); /* * If maximum allowed number of connections is zero - * disable the feature. */ if (!bp->cnic_eth_dev.max_iscsi_conn) bp->flags |= no_flags; #else bp->flags |= no_flags; #endif } #ifdef BCM_CNIC static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) { /* Port info */ bp->cnic_eth_dev.fcoe_wwn_port_name_hi = MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper); bp->cnic_eth_dev.fcoe_wwn_port_name_lo = MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower); /* Node info */ bp->cnic_eth_dev.fcoe_wwn_node_name_hi = MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper); bp->cnic_eth_dev.fcoe_wwn_node_name_lo = MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); } #endif static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp) { #ifdef BCM_CNIC int port = BP_PORT(bp); int func = BP_ABS_FUNC(bp); u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, drv_lic_key[port].max_fcoe_conn); /* Get the number of maximum allowed FCoE connections */ bp->cnic_eth_dev.max_fcoe_conn = (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> BNX2X_MAX_FCOE_INIT_CONN_SHIFT; /* Read the WWN: */ if (!IS_MF(bp)) { /* Port info */ bp->cnic_eth_dev.fcoe_wwn_port_name_hi = SHMEM_RD(bp, dev_info.port_hw_config[port]. fcoe_wwn_port_name_upper); bp->cnic_eth_dev.fcoe_wwn_port_name_lo = SHMEM_RD(bp, dev_info.port_hw_config[port]. fcoe_wwn_port_name_lower); /* Node info */ bp->cnic_eth_dev.fcoe_wwn_node_name_hi = SHMEM_RD(bp, dev_info.port_hw_config[port]. fcoe_wwn_node_name_upper); bp->cnic_eth_dev.fcoe_wwn_node_name_lo = SHMEM_RD(bp, dev_info.port_hw_config[port]. fcoe_wwn_node_name_lower); } else if (!IS_MF_SD(bp)) { u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); /* * Read the WWN info only if the FCoE feature is enabled for * this function. */ if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) bnx2x_get_ext_wwn_info(bp, func); } else if (IS_MF_FCOE_SD(bp)) bnx2x_get_ext_wwn_info(bp, func); BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); /* * If maximum allowed number of connections is zero - * disable the feature. */ if (!bp->cnic_eth_dev.max_fcoe_conn) bp->flags |= NO_FCOE_FLAG; #else bp->flags |= NO_FCOE_FLAG; #endif } static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) { /* * iSCSI may be dynamically disabled but reading * info here we will decrease memory usage by driver * if the feature is disabled for good */ bnx2x_get_iscsi_info(bp); bnx2x_get_fcoe_info(bp); } static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) { u32 val, val2; int func = BP_ABS_FUNC(bp); int port = BP_PORT(bp); #ifdef BCM_CNIC u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; u8 *fip_mac = bp->fip_mac; #endif /* Zero primary MAC configuration */ memset(bp->dev->dev_addr, 0, ETH_ALEN); if (BP_NOMCP(bp)) { BNX2X_ERROR("warning: random MAC workaround active\n"); eth_hw_addr_random(bp->dev); } else if (IS_MF(bp)) { val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); #ifdef BCM_CNIC /* * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or * FCoE MAC then the appropriate feature should be disabled. * * In non SD mode features configuration comes from * struct func_ext_config. */ if (!IS_MF_SD(bp)) { u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { val2 = MF_CFG_RD(bp, func_ext_config[func]. iscsi_mac_addr_upper); val = MF_CFG_RD(bp, func_ext_config[func]. iscsi_mac_addr_lower); bnx2x_set_mac_buf(iscsi_mac, val, val2); BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", iscsi_mac); } else bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { val2 = MF_CFG_RD(bp, func_ext_config[func]. fcoe_mac_addr_upper); val = MF_CFG_RD(bp, func_ext_config[func]. fcoe_mac_addr_lower); bnx2x_set_mac_buf(fip_mac, val, val2); BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n", fip_mac); } else bp->flags |= NO_FCOE_FLAG; } else { /* SD MODE */ if (IS_MF_STORAGE_SD(bp)) { if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { /* use primary mac as iscsi mac */ memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN); BNX2X_DEV_INFO("SD ISCSI MODE\n"); BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", iscsi_mac); } else { /* FCoE */ memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); BNX2X_DEV_INFO("SD FCoE MODE\n"); BNX2X_DEV_INFO("Read FIP MAC: %pM\n", fip_mac); } /* Zero primary MAC configuration */ memset(bp->dev->dev_addr, 0, ETH_ALEN); } } #endif } else { /* in SF read MACs from port configuration */ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); #ifdef BCM_CNIC val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. iscsi_mac_upper); val = SHMEM_RD(bp, dev_info.port_hw_config[port]. iscsi_mac_lower); bnx2x_set_mac_buf(iscsi_mac, val, val2); val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. fcoe_fip_mac_upper); val = SHMEM_RD(bp, dev_info.port_hw_config[port]. fcoe_fip_mac_lower); bnx2x_set_mac_buf(fip_mac, val, val2); #endif } memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); #ifdef BCM_CNIC /* Disable iSCSI if MAC configuration is * invalid. */ if (!is_valid_ether_addr(iscsi_mac)) { bp->flags |= NO_ISCSI_FLAG; memset(iscsi_mac, 0, ETH_ALEN); } /* Disable FCoE if MAC configuration is * invalid. */ if (!is_valid_ether_addr(fip_mac)) { bp->flags |= NO_FCOE_FLAG; memset(bp->fip_mac, 0, ETH_ALEN); } #endif if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) dev_err(&bp->pdev->dev, "bad Ethernet MAC address configuration: %pM\n" "change it manually before bringing up the appropriate network interface\n", bp->dev->dev_addr); } static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) { int /*abs*/func = BP_ABS_FUNC(bp); int vn; u32 val = 0; int rc = 0; bnx2x_get_common_hwinfo(bp); /* * initialize IGU parameters */ if (CHIP_IS_E1x(bp)) { bp->common.int_block = INT_BLOCK_HC; bp->igu_dsb_id = DEF_SB_IGU_ID; bp->igu_base_sb = 0; } else { bp->common.int_block = INT_BLOCK_IGU; /* do not allow device reset during IGU info preocessing */ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { int tout = 5000; BNX2X_DEV_INFO("FORCING Normal Mode\n"); val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val); REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f); while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { tout--; usleep_range(1000, 1000); } if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { dev_err(&bp->pdev->dev, "FORCING Normal Mode failed!!!\n"); return -EPERM; } } if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { BNX2X_DEV_INFO("IGU Backward Compatible Mode\n"); bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; } else BNX2X_DEV_INFO("IGU Normal Mode\n"); bnx2x_get_igu_cam_info(bp); bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); } /* * set base FW non-default (fast path) status block id, this value is * used to initialize the fw_sb_id saved on the fp/queue structure to * determine the id used by the FW. */ if (CHIP_IS_E1x(bp)) bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); else /* * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of * the same queue are indicated on the same IGU SB). So we prefer * FW and IGU SBs to be the same value. */ bp->base_fw_ndsb = bp->igu_base_sb; BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n" "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt, bp->base_fw_ndsb); /* * Initialize MF configuration */ bp->mf_ov = 0; bp->mf_mode = 0; vn = BP_VN(bp); if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", bp->common.shmem2_base, SHMEM2_RD(bp, size), (u32)offsetof(struct shmem2_region, mf_cfg_addr)); if (SHMEM2_HAS(bp, mf_cfg_addr)) bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); else bp->common.mf_cfg_base = bp->common.shmem_base + offsetof(struct shmem_region, func_mb) + E1H_FUNC_MAX * sizeof(struct drv_func_mb); /* * get mf configuration: * 1. existence of MF configuration * 2. MAC address must be legal (check only upper bytes) * for Switch-Independent mode; * OVLAN must be legal for Switch-Dependent mode * 3. SF_MODE configures specific MF mode */ if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { /* get mf configuration */ val = SHMEM_RD(bp, dev_info.shared_feature_config.config); val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK; switch (val) { case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: val = MF_CFG_RD(bp, func_mf_config[func]. mac_upper); /* check for legal mac (upper bytes)*/ if (val != 0xffff) { bp->mf_mode = MULTI_FUNCTION_SI; bp->mf_config[vn] = MF_CFG_RD(bp, func_mf_config[func].config); } else BNX2X_DEV_INFO("illegal MAC address for SI\n"); break; case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: /* get OV configuration */ val = MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag); val &= FUNC_MF_CFG_E1HOV_TAG_MASK; if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { bp->mf_mode = MULTI_FUNCTION_SD; bp->mf_config[vn] = MF_CFG_RD(bp, func_mf_config[func].config); } else BNX2X_DEV_INFO("illegal OV for SD\n"); break; default: /* Unknown configuration: reset mf_config */ bp->mf_config[vn] = 0; BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val); } } BNX2X_DEV_INFO("%s function mode\n", IS_MF(bp) ? "multi" : "single"); switch (bp->mf_mode) { case MULTI_FUNCTION_SD: val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & FUNC_MF_CFG_E1HOV_TAG_MASK; if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { bp->mf_ov = val; bp->path_has_ovlan = true; BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n", func, bp->mf_ov, bp->mf_ov); } else { dev_err(&bp->pdev->dev, "No valid MF OV for func %d, aborting\n", func); return -EPERM; } break; case MULTI_FUNCTION_SI: BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", func); break; default: if (vn) { dev_err(&bp->pdev->dev, "VN %d is in a single function mode, aborting\n", vn); return -EPERM; } break; } /* check if other port on the path needs ovlan: * Since MF configuration is shared between ports * Possible mixed modes are only * {SF, SI} {SF, SD} {SD, SF} {SI, SF} */ if (CHIP_MODE_IS_4_PORT(bp) && !bp->path_has_ovlan && !IS_MF(bp) && bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { u8 other_port = !BP_PORT(bp); u8 other_func = BP_PATH(bp) + 2*other_port; val = MF_CFG_RD(bp, func_mf_config[other_func].e1hov_tag); if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) bp->path_has_ovlan = true; } } /* adjust igu_sb_cnt to MF for E1x */ if (CHIP_IS_E1x(bp) && IS_MF(bp)) bp->igu_sb_cnt /= E1HVN_MAX; /* port info */ bnx2x_get_port_hwinfo(bp); /* Get MAC addresses */ bnx2x_get_mac_hwinfo(bp); bnx2x_get_cnic_info(bp); return rc; } static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp) { int cnt, i, block_end, rodi; char vpd_start[BNX2X_VPD_LEN+1]; char str_id_reg[VENDOR_ID_LEN+1]; char str_id_cap[VENDOR_ID_LEN+1]; char *vpd_data; char *vpd_extended_data = NULL; u8 len; cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start); memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); if (cnt < BNX2X_VPD_LEN) goto out_not_found; /* VPD RO tag should be first tag after identifier string, hence * we should be able to find it in first BNX2X_VPD_LEN chars */ i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN, PCI_VPD_LRDT_RO_DATA); if (i < 0) goto out_not_found; block_end = i + PCI_VPD_LRDT_TAG_SIZE + pci_vpd_lrdt_size(&vpd_start[i]); i += PCI_VPD_LRDT_TAG_SIZE; if (block_end > BNX2X_VPD_LEN) { vpd_extended_data = kmalloc(block_end, GFP_KERNEL); if (vpd_extended_data == NULL) goto out_not_found; /* read rest of vpd image into vpd_extended_data */ memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN); cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN, block_end - BNX2X_VPD_LEN, vpd_extended_data + BNX2X_VPD_LEN); if (cnt < (block_end - BNX2X_VPD_LEN)) goto out_not_found; vpd_data = vpd_extended_data; } else vpd_data = vpd_start; /* now vpd_data holds full vpd content in both cases */ rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, PCI_VPD_RO_KEYWORD_MFR_ID); if (rodi < 0) goto out_not_found; len = pci_vpd_info_field_size(&vpd_data[rodi]); if (len != VENDOR_ID_LEN) goto out_not_found; rodi += PCI_VPD_INFO_FLD_HDR_SIZE; /* vendor specific info */ snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL); snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL); if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) || !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) { rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, PCI_VPD_RO_KEYWORD_VENDOR0); if (rodi >= 0) { len = pci_vpd_info_field_size(&vpd_data[rodi]); rodi += PCI_VPD_INFO_FLD_HDR_SIZE; if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) { memcpy(bp->fw_ver, &vpd_data[rodi], len); bp->fw_ver[len] = ' '; } } kfree(vpd_extended_data); return; } out_not_found: kfree(vpd_extended_data); return; } static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp) { u32 flags = 0; if (CHIP_REV_IS_FPGA(bp)) SET_FLAGS(flags, MODE_FPGA); else if (CHIP_REV_IS_EMUL(bp)) SET_FLAGS(flags, MODE_EMUL); else SET_FLAGS(flags, MODE_ASIC); if (CHIP_MODE_IS_4_PORT(bp)) SET_FLAGS(flags, MODE_PORT4); else SET_FLAGS(flags, MODE_PORT2); if (CHIP_IS_E2(bp)) SET_FLAGS(flags, MODE_E2); else if (CHIP_IS_E3(bp)) { SET_FLAGS(flags, MODE_E3); if (CHIP_REV(bp) == CHIP_REV_Ax) SET_FLAGS(flags, MODE_E3_A0); else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); } if (IS_MF(bp)) { SET_FLAGS(flags, MODE_MF); switch (bp->mf_mode) { case MULTI_FUNCTION_SD: SET_FLAGS(flags, MODE_MF_SD); break; case MULTI_FUNCTION_SI: SET_FLAGS(flags, MODE_MF_SI); break; } } else SET_FLAGS(flags, MODE_SF); #if defined(__LITTLE_ENDIAN) SET_FLAGS(flags, MODE_LITTLE_ENDIAN); #else /*(__BIG_ENDIAN)*/ SET_FLAGS(flags, MODE_BIG_ENDIAN); #endif INIT_MODE_FLAGS(bp) = flags; } static int __devinit bnx2x_init_bp(struct bnx2x *bp) { int func; int rc; mutex_init(&bp->port.phy_mutex); mutex_init(&bp->fw_mb_mutex); spin_lock_init(&bp->stats_lock); #ifdef BCM_CNIC mutex_init(&bp->cnic_mutex); #endif INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); rc = bnx2x_get_hwinfo(bp); if (rc) return rc; bnx2x_set_modes_bitmap(bp); rc = bnx2x_alloc_mem_bp(bp); if (rc) return rc; bnx2x_read_fwinfo(bp); func = BP_FUNC(bp); /* need to reset chip if undi was active */ if (!BP_NOMCP(bp)) { /* init fw_seq */ bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK; BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); bnx2x_prev_unload(bp); } if (CHIP_REV_IS_FPGA(bp)) dev_err(&bp->pdev->dev, "FPGA detected\n"); if (BP_NOMCP(bp) && (func == 0)) dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); bp->multi_mode = multi_mode; bp->disable_tpa = disable_tpa; #ifdef BCM_CNIC bp->disable_tpa |= IS_MF_STORAGE_SD(bp); #endif /* Set TPA flags */ if (bp->disable_tpa) { bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); bp->dev->features &= ~NETIF_F_LRO; } else { bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); bp->dev->features |= NETIF_F_LRO; } if (CHIP_IS_E1(bp)) bp->dropless_fc = 0; else bp->dropless_fc = dropless_fc; bp->mrrs = mrrs; bp->tx_ring_size = MAX_TX_AVAIL; /* make sure that the numbers are in the right granularity */ bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ; init_timer(&bp->timer); bp->timer.expires = jiffies + bp->current_interval; bp->timer.data = (unsigned long) bp; bp->timer.function = bnx2x_timer; bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); bnx2x_dcbx_init_params(bp); #ifdef BCM_CNIC if (CHIP_IS_E1x(bp)) bp->cnic_base_cl_id = FP_SB_MAX_E1x; else bp->cnic_base_cl_id = FP_SB_MAX_E2; #endif /* multiple tx priority */ if (CHIP_IS_E1x(bp)) bp->max_cos = BNX2X_MULTI_TX_COS_E1X; if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; if (CHIP_IS_E3B0(bp)) bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; bp->gro_check = bnx2x_need_gro_check(bp->dev->mtu); return rc; } /**************************************************************************** * General service functions ****************************************************************************/ /* * net_device service functions */ /* called with rtnl_lock */ static int bnx2x_open(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); bool global = false; int other_engine = BP_PATH(bp) ? 0 : 1; bool other_load_status, load_status; bp->stats_init = true; netif_carrier_off(dev); bnx2x_set_power_state(bp, PCI_D0); other_load_status = bnx2x_get_load_status(bp, other_engine); load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); /* * If parity had happen during the unload, then attentions * and/or RECOVERY_IN_PROGRES may still be set. In this case we * want the first function loaded on the current engine to * complete the recovery. */ if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || bnx2x_chk_parity_attn(bp, &global, true)) do { /* * If there are attentions and they are in a global * blocks, set the GLOBAL_RESET bit regardless whether * it will be this function that will complete the * recovery or not. */ if (global) bnx2x_set_reset_global(bp); /* * Only the first function on the current engine should * try to recover in open. In case of attentions in * global blocks only the first in the chip should try * to recover. */ if ((!load_status && (!global || !other_load_status)) && bnx2x_trylock_leader_lock(bp) && !bnx2x_leader_reset(bp)) { netdev_info(bp->dev, "Recovered in open\n"); break; } /* recovery has failed... */ bnx2x_set_power_state(bp, PCI_D3hot); bp->recovery_state = BNX2X_RECOVERY_FAILED; BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n" "If you still see this message after a few retries then power cycle is required.\n"); return -EAGAIN; } while (0); bp->recovery_state = BNX2X_RECOVERY_DONE; return bnx2x_nic_load(bp, LOAD_OPEN); } /* called with rtnl_lock */ static int bnx2x_close(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); /* Unload the driver, release IRQs */ bnx2x_nic_unload(bp, UNLOAD_CLOSE); /* Power off */ bnx2x_set_power_state(bp, PCI_D3hot); return 0; } static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) { int mc_count = netdev_mc_count(bp->dev); struct bnx2x_mcast_list_elem *mc_mac = kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC); struct netdev_hw_addr *ha; if (!mc_mac) return -ENOMEM; INIT_LIST_HEAD(&p->mcast_list); netdev_for_each_mc_addr(ha, bp->dev) { mc_mac->mac = bnx2x_mc_addr(ha); list_add_tail(&mc_mac->link, &p->mcast_list); mc_mac++; } p->mcast_list_len = mc_count; return 0; } static inline void bnx2x_free_mcast_macs_list( struct bnx2x_mcast_ramrod_params *p) { struct bnx2x_mcast_list_elem *mc_mac = list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem, link); WARN_ON(!mc_mac); kfree(mc_mac); } /** * bnx2x_set_uc_list - configure a new unicast MACs list. * * @bp: driver handle * * We will use zero (0) as a MAC type for these MACs. */ static inline int bnx2x_set_uc_list(struct bnx2x *bp) { int rc; struct net_device *dev = bp->dev; struct netdev_hw_addr *ha; struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; unsigned long ramrod_flags = 0; /* First schedule a cleanup up of old configuration */ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false); if (rc < 0) { BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc); return rc; } netdev_for_each_uc_addr(ha, dev) { rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, BNX2X_UC_LIST_MAC, &ramrod_flags); if (rc < 0) { BNX2X_ERR("Failed to schedule ADD operations: %d\n", rc); return rc; } } /* Execute the pending commands */ __set_bit(RAMROD_CONT, &ramrod_flags); return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */, BNX2X_UC_LIST_MAC, &ramrod_flags); } static inline int bnx2x_set_mc_list(struct bnx2x *bp) { struct net_device *dev = bp->dev; struct bnx2x_mcast_ramrod_params rparam = {NULL}; int rc = 0; rparam.mcast_obj = &bp->mcast_obj; /* first, clear all configured multicast MACs */ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); if (rc < 0) { BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc); return rc; } /* then, configure a new MACs list */ if (netdev_mc_count(dev)) { rc = bnx2x_init_mcast_macs_list(bp, &rparam); if (rc) { BNX2X_ERR("Failed to create multicast MACs list: %d\n", rc); return rc; } /* Now add the new MACs */ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_ADD); if (rc < 0) BNX2X_ERR("Failed to set a new multicast configuration: %d\n", rc); bnx2x_free_mcast_macs_list(&rparam); } return rc; } /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ void bnx2x_set_rx_mode(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); u32 rx_mode = BNX2X_RX_MODE_NORMAL; if (bp->state != BNX2X_STATE_OPEN) { DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); return; } DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); if (dev->flags & IFF_PROMISC) rx_mode = BNX2X_RX_MODE_PROMISC; else if ((dev->flags & IFF_ALLMULTI) || ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp))) rx_mode = BNX2X_RX_MODE_ALLMULTI; else { /* some multicasts */ if (bnx2x_set_mc_list(bp) < 0) rx_mode = BNX2X_RX_MODE_ALLMULTI; if (bnx2x_set_uc_list(bp) < 0) rx_mode = BNX2X_RX_MODE_PROMISC; } bp->rx_mode = rx_mode; #ifdef BCM_CNIC /* handle ISCSI SD mode */ if (IS_MF_ISCSI_SD(bp)) bp->rx_mode = BNX2X_RX_MODE_NONE; #endif /* Schedule the rx_mode command */ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); return; } bnx2x_set_storm_rx_mode(bp); } /* called with rtnl_lock */ static int bnx2x_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) { struct bnx2x *bp = netdev_priv(netdev); u16 value; int rc; DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", prtad, devad, addr); /* The HW expects different devad if CL22 is used */ devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; bnx2x_acquire_phy_lock(bp); rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value); bnx2x_release_phy_lock(bp); DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); if (!rc) rc = value; return rc; } /* called with rtnl_lock */ static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad, u16 addr, u16 value) { struct bnx2x *bp = netdev_priv(netdev); int rc; DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n", prtad, devad, addr, value); /* The HW expects different devad if CL22 is used */ devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; bnx2x_acquire_phy_lock(bp); rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value); bnx2x_release_phy_lock(bp); return rc; } /* called with rtnl_lock */ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct bnx2x *bp = netdev_priv(dev); struct mii_ioctl_data *mdio = if_mii(ifr); DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", mdio->phy_id, mdio->reg_num, mdio->val_in); if (!netif_running(dev)) return -EAGAIN; return mdio_mii_ioctl(&bp->mdio, mdio, cmd); } #ifdef CONFIG_NET_POLL_CONTROLLER static void poll_bnx2x(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); disable_irq(bp->pdev->irq); bnx2x_interrupt(bp->pdev->irq, dev); enable_irq(bp->pdev->irq); } #endif static int bnx2x_validate_addr(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) { BNX2X_ERR("Non-valid Ethernet address\n"); return -EADDRNOTAVAIL; } return 0; } static const struct net_device_ops bnx2x_netdev_ops = { .ndo_open = bnx2x_open, .ndo_stop = bnx2x_close, .ndo_start_xmit = bnx2x_start_xmit, .ndo_select_queue = bnx2x_select_queue, .ndo_set_rx_mode = bnx2x_set_rx_mode, .ndo_set_mac_address = bnx2x_change_mac_addr, .ndo_validate_addr = bnx2x_validate_addr, .ndo_do_ioctl = bnx2x_ioctl, .ndo_change_mtu = bnx2x_change_mtu, .ndo_fix_features = bnx2x_fix_features, .ndo_set_features = bnx2x_set_features, .ndo_tx_timeout = bnx2x_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = poll_bnx2x, #endif .ndo_setup_tc = bnx2x_setup_tc, #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, #endif }; static inline int bnx2x_set_coherency_mask(struct bnx2x *bp) { struct device *dev = &bp->pdev->dev; if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { bp->flags |= USING_DAC_FLAG; if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { dev_err(dev, "dma_set_coherent_mask failed, aborting\n"); return -EIO; } } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { dev_err(dev, "System does not support DMA, aborting\n"); return -EIO; } return 0; } static int __devinit bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev, unsigned long board_type) { struct bnx2x *bp; int rc; u32 pci_cfg_dword; bool chip_is_e1x = (board_type == BCM57710 || board_type == BCM57711 || board_type == BCM57711E); SET_NETDEV_DEV(dev, &pdev->dev); bp = netdev_priv(dev); bp->dev = dev; bp->pdev = pdev; bp->flags = 0; rc = pci_enable_device(pdev); if (rc) { dev_err(&bp->pdev->dev, "Cannot enable PCI device, aborting\n"); goto err_out; } if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { dev_err(&bp->pdev->dev, "Cannot find PCI device base address, aborting\n"); rc = -ENODEV; goto err_out_disable; } if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { dev_err(&bp->pdev->dev, "Cannot find second PCI device" " base address, aborting\n"); rc = -ENODEV; goto err_out_disable; } if (atomic_read(&pdev->enable_cnt) == 1) { rc = pci_request_regions(pdev, DRV_MODULE_NAME); if (rc) { dev_err(&bp->pdev->dev, "Cannot obtain PCI resources, aborting\n"); goto err_out_disable; } pci_set_master(pdev); pci_save_state(pdev); } bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); if (bp->pm_cap == 0) { dev_err(&bp->pdev->dev, "Cannot find power management capability, aborting\n"); rc = -EIO; goto err_out_release; } if (!pci_is_pcie(pdev)) { dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); rc = -EIO; goto err_out_release; } rc = bnx2x_set_coherency_mask(bp); if (rc) goto err_out_release; dev->mem_start = pci_resource_start(pdev, 0); dev->base_addr = dev->mem_start; dev->mem_end = pci_resource_end(pdev, 0); dev->irq = pdev->irq; bp->regview = pci_ioremap_bar(pdev, 0); if (!bp->regview) { dev_err(&bp->pdev->dev, "Cannot map register space, aborting\n"); rc = -ENOMEM; goto err_out_release; } /* In E1/E1H use pci device function given by kernel. * In E2/E3 read physical function from ME register since these chips * support Physical Device Assignment where kernel BDF maybe arbitrary * (depending on hypervisor). */ if (chip_is_e1x) bp->pf_num = PCI_FUNC(pdev->devfn); else {/* chip is E2/3*/ pci_read_config_dword(bp->pdev, PCICFG_ME_REGISTER, &pci_cfg_dword); bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT); } BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); bnx2x_set_power_state(bp, PCI_D0); /* clean indirect addresses */ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); /* * Clean the following indirect addresses for all functions since it * is not used by the driver. */ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); if (chip_is_e1x) { REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); } /* * Enable internal target-read (in case we are probed after PF FLR). * Must be done prior to any BAR read access. Only for 57712 and up */ if (!chip_is_e1x) REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); /* Reset the load counter */ bnx2x_clear_load_status(bp); dev->watchdog_timeo = TX_TIMEOUT; dev->netdev_ops = &bnx2x_netdev_ops; bnx2x_set_ethtool_ops(dev); dev->priv_flags |= IFF_UNICAST_FLT; dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX; dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX; if (bp->flags & USING_DAC_FLAG) dev->features |= NETIF_F_HIGHDMA; /* Add Loopback capability to the device */ dev->hw_features |= NETIF_F_LOOPBACK; #ifdef BCM_DCBNL dev->dcbnl_ops = &bnx2x_dcbnl_ops; #endif /* get_port_hwinfo() will set prtad and mmds properly */ bp->mdio.prtad = MDIO_PRTAD_NONE; bp->mdio.mmds = 0; bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; bp->mdio.dev = dev; bp->mdio.mdio_read = bnx2x_mdio_read; bp->mdio.mdio_write = bnx2x_mdio_write; return 0; err_out_release: if (atomic_read(&pdev->enable_cnt) == 1) pci_release_regions(pdev); err_out_disable: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); err_out: return rc; } static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, int *speed) { u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL); *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT; /* return value of 1=2.5GHz 2=5GHz */ *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; } static int bnx2x_check_firmware(struct bnx2x *bp) { const struct firmware *firmware = bp->firmware; struct bnx2x_fw_file_hdr *fw_hdr; struct bnx2x_fw_file_section *sections; u32 offset, len, num_ops; u16 *ops_offsets; int i; const u8 *fw_ver; if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) { BNX2X_ERR("Wrong FW size\n"); return -EINVAL; } fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data; sections = (struct bnx2x_fw_file_section *)fw_hdr; /* Make sure none of the offsets and sizes make us read beyond * the end of the firmware data */ for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) { offset = be32_to_cpu(sections[i].offset); len = be32_to_cpu(sections[i].len); if (offset + len > firmware->size) { BNX2X_ERR("Section %d length is out of bounds\n", i); return -EINVAL; } } /* Likewise for the init_ops offsets */ offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset); ops_offsets = (u16 *)(firmware->data + offset); num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op); for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { if (be16_to_cpu(ops_offsets[i]) > num_ops) { BNX2X_ERR("Section offset %d is out of bounds\n", i); return -EINVAL; } } /* Check FW version */ offset = be32_to_cpu(fw_hdr->fw_version.offset); fw_ver = firmware->data + offset; if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) || (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3], BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_ENGINEERING_VERSION); return -EINVAL; } return 0; } static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) { const __be32 *source = (const __be32 *)_source; u32 *target = (u32 *)_target; u32 i; for (i = 0; i < n/4; i++) target[i] = be32_to_cpu(source[i]); } /* Ops array is stored in the following format: {op(8bit), offset(24bit, big endian), data(32bit, big endian)} */ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) { const __be32 *source = (const __be32 *)_source; struct raw_op *target = (struct raw_op *)_target; u32 i, j, tmp; for (i = 0, j = 0; i < n/8; i++, j += 2) { tmp = be32_to_cpu(source[j]); target[i].op = (tmp >> 24) & 0xff; target[i].offset = tmp & 0xffffff; target[i].raw_data = be32_to_cpu(source[j + 1]); } } /** * IRO array is stored in the following format: * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } */ static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) { const __be32 *source = (const __be32 *)_source; struct iro *target = (struct iro *)_target; u32 i, j, tmp; for (i = 0, j = 0; i < n/sizeof(struct iro); i++) { target[i].base = be32_to_cpu(source[j]); j++; tmp = be32_to_cpu(source[j]); target[i].m1 = (tmp >> 16) & 0xffff; target[i].m2 = tmp & 0xffff; j++; tmp = be32_to_cpu(source[j]); target[i].m3 = (tmp >> 16) & 0xffff; target[i].size = tmp & 0xffff; j++; } } static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) { const __be16 *source = (const __be16 *)_source; u16 *target = (u16 *)_target; u32 i; for (i = 0; i < n/2; i++) target[i] = be16_to_cpu(source[i]); } #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \ do { \ u32 len = be32_to_cpu(fw_hdr->arr.len); \ bp->arr = kmalloc(len, GFP_KERNEL); \ if (!bp->arr) \ goto lbl; \ func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \ (u8 *)bp->arr, len); \ } while (0) static int bnx2x_init_firmware(struct bnx2x *bp) { const char *fw_file_name; struct bnx2x_fw_file_hdr *fw_hdr; int rc; if (bp->firmware) return 0; if (CHIP_IS_E1(bp)) fw_file_name = FW_FILE_NAME_E1; else if (CHIP_IS_E1H(bp)) fw_file_name = FW_FILE_NAME_E1H; else if (!CHIP_IS_E1x(bp)) fw_file_name = FW_FILE_NAME_E2; else { BNX2X_ERR("Unsupported chip revision\n"); return -EINVAL; } BNX2X_DEV_INFO("Loading %s\n", fw_file_name); rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); if (rc) { BNX2X_ERR("Can't load firmware file %s\n", fw_file_name); goto request_firmware_exit; } rc = bnx2x_check_firmware(bp); if (rc) { BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); goto request_firmware_exit; } fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; /* Initialize the pointers to the init arrays */ /* Blob */ BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n); /* Opcodes */ BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops); /* Offsets */ BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n); /* STORMs firmware */ INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data + be32_to_cpu(fw_hdr->tsem_int_table_data.offset); INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data + be32_to_cpu(fw_hdr->tsem_pram_data.offset); INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data + be32_to_cpu(fw_hdr->usem_int_table_data.offset); INIT_USEM_PRAM_DATA(bp) = bp->firmware->data + be32_to_cpu(fw_hdr->usem_pram_data.offset); INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data + be32_to_cpu(fw_hdr->xsem_int_table_data.offset); INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data + be32_to_cpu(fw_hdr->xsem_pram_data.offset); INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data + be32_to_cpu(fw_hdr->csem_int_table_data.offset); INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + be32_to_cpu(fw_hdr->csem_pram_data.offset); /* IRO */ BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro); return 0; iro_alloc_err: kfree(bp->init_ops_offsets); init_offsets_alloc_err: kfree(bp->init_ops); init_ops_alloc_err: kfree(bp->init_data); request_firmware_exit: release_firmware(bp->firmware); bp->firmware = NULL; return rc; } static void bnx2x_release_firmware(struct bnx2x *bp) { kfree(bp->init_ops_offsets); kfree(bp->init_ops); kfree(bp->init_data); release_firmware(bp->firmware); bp->firmware = NULL; } static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { .init_hw_cmn_chip = bnx2x_init_hw_common_chip, .init_hw_cmn = bnx2x_init_hw_common, .init_hw_port = bnx2x_init_hw_port, .init_hw_func = bnx2x_init_hw_func, .reset_hw_cmn = bnx2x_reset_common, .reset_hw_port = bnx2x_reset_port, .reset_hw_func = bnx2x_reset_func, .gunzip_init = bnx2x_gunzip_init, .gunzip_end = bnx2x_gunzip_end, .init_fw = bnx2x_init_firmware, .release_fw = bnx2x_release_firmware, }; void bnx2x__init_func_obj(struct bnx2x *bp) { /* Prepare DMAE related driver resources */ bnx2x_setup_dmae(bp); bnx2x_init_func_obj(bp, &bp->func_obj, bnx2x_sp(bp, func_rdata), bnx2x_sp_mapping(bp, func_rdata), &bnx2x_func_sp_drv); } /* must be called after sriov-enable */ static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp) { int cid_count = BNX2X_L2_CID_COUNT(bp); #ifdef BCM_CNIC cid_count += CNIC_CID_MAX; #endif return roundup(cid_count, QM_CID_ROUND); } /** * bnx2x_get_num_none_def_sbs - return the number of none default SBs * * @dev: pci device * */ static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev) { int pos; u16 control; pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); /* * If MSI-X is not supported - return number of SBs needed to support * one fast path queue: one FP queue + SB for CNIC */ if (!pos) return 1 + CNIC_PRESENT; /* * The value in the PCI configuration space is the index of the last * entry, namely one less than the actual size of the table, which is * exactly what we want to return from this function: number of all SBs * without the default SB. */ pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); return control & PCI_MSIX_FLAGS_QSIZE; } static int __devinit bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev = NULL; struct bnx2x *bp; int pcie_width, pcie_speed; int rc, max_non_def_sbs; int rx_count, tx_count, rss_count; /* * An estimated maximum supported CoS number according to the chip * version. * We will try to roughly estimate the maximum number of CoSes this chip * may support in order to minimize the memory allocated for Tx * netdev_queue's. This number will be accurately calculated during the * initialization of bp->max_cos based on the chip versions AND chip * revision in the bnx2x_init_bp(). */ u8 max_cos_est = 0; switch (ent->driver_data) { case BCM57710: case BCM57711: case BCM57711E: max_cos_est = BNX2X_MULTI_TX_COS_E1X; break; case BCM57712: case BCM57712_MF: max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0; break; case BCM57800: case BCM57800_MF: case BCM57810: case BCM57810_MF: case BCM57840: case BCM57840_MF: max_cos_est = BNX2X_MULTI_TX_COS_E3B0; break; default: pr_err("Unknown board_type (%ld), aborting\n", ent->driver_data); return -ENODEV; } max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev); /* !!! FIXME !!! * Do not allow the maximum SB count to grow above 16 * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48. * We will use the FP_SB_MAX_E1x macro for this matter. */ max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs); WARN_ON(!max_non_def_sbs); /* Maximum number of RSS queues: one IGU SB goes to CNIC */ rss_count = max_non_def_sbs - CNIC_PRESENT; /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ rx_count = rss_count + FCOE_PRESENT; /* * Maximum number of netdev Tx queues: * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 */ tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT; /* dev zeroed in init_etherdev */ dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); if (!dev) return -ENOMEM; bp = netdev_priv(dev); BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", tx_count, rx_count); bp->igu_sb_cnt = max_non_def_sbs; bp->msg_enable = debug; pci_set_drvdata(pdev, dev); rc = bnx2x_init_dev(pdev, dev, ent->driver_data); if (rc < 0) { free_netdev(dev); return rc; } BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs); rc = bnx2x_init_bp(bp); if (rc) goto init_one_exit; /* * Map doorbels here as we need the real value of bp->max_cos which * is initialized in bnx2x_init_bp(). */ bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), min_t(u64, BNX2X_DB_SIZE(bp), pci_resource_len(pdev, 2))); if (!bp->doorbells) { dev_err(&bp->pdev->dev, "Cannot map doorbell space, aborting\n"); rc = -ENOMEM; goto init_one_exit; } /* calc qm_cid_count */ bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); #ifdef BCM_CNIC /* disable FCOE L2 queue for E1x */ if (CHIP_IS_E1x(bp)) bp->flags |= NO_FCOE_FLAG; #endif /* Configure interrupt mode: try to enable MSI-X/MSI if * needed, set bp->num_queues appropriately. */ bnx2x_set_int_mode(bp); /* Add all NAPI objects */ bnx2x_add_all_napi(bp); rc = register_netdev(dev); if (rc) { dev_err(&pdev->dev, "Cannot register net device\n"); goto init_one_exit; } #ifdef BCM_CNIC if (!NO_FCOE(bp)) { /* Add storage MAC address */ rtnl_lock(); dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); rtnl_unlock(); } #endif bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); BNX2X_DEV_INFO( "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", board_info[ent->driver_data].name, (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), pcie_width, ((!CHIP_IS_E2(bp) && pcie_speed == 2) || (CHIP_IS_E2(bp) && pcie_speed == 1)) ? "5GHz (Gen2)" : "2.5GHz", dev->base_addr, bp->pdev->irq, dev->dev_addr); return 0; init_one_exit: if (bp->regview) iounmap(bp->regview); if (bp->doorbells) iounmap(bp->doorbells); free_netdev(dev); if (atomic_read(&pdev->enable_cnt) == 1) pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return rc; } static void __devexit bnx2x_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct bnx2x *bp; if (!dev) { dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); return; } bp = netdev_priv(dev); #ifdef BCM_CNIC /* Delete storage MAC address */ if (!NO_FCOE(bp)) { rtnl_lock(); dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); rtnl_unlock(); } #endif #ifdef BCM_DCBNL /* Delete app tlvs from dcbnl */ bnx2x_dcbnl_update_applist(bp, true); #endif unregister_netdev(dev); /* Delete all NAPI objects */ bnx2x_del_all_napi(bp); /* Power on: we can't let PCI layer write to us while we are in D3 */ bnx2x_set_power_state(bp, PCI_D0); /* Disable MSI/MSI-X */ bnx2x_disable_msi(bp); /* Power off */ bnx2x_set_power_state(bp, PCI_D3hot); /* Make sure RESET task is not scheduled before continuing */ cancel_delayed_work_sync(&bp->sp_rtnl_task); if (bp->regview) iounmap(bp->regview); if (bp->doorbells) iounmap(bp->doorbells); bnx2x_release_firmware(bp); bnx2x_free_mem_bp(bp); free_netdev(dev); if (atomic_read(&pdev->enable_cnt) == 1) pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } static int bnx2x_eeh_nic_unload(struct bnx2x *bp) { int i; bp->state = BNX2X_STATE_ERROR; bp->rx_mode = BNX2X_RX_MODE_NONE; #ifdef BCM_CNIC bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); #endif /* Stop Tx */ bnx2x_tx_disable(bp); bnx2x_netif_stop(bp, 0); del_timer_sync(&bp->timer); bnx2x_stats_handle(bp, STATS_EVENT_STOP); /* Release IRQs */ bnx2x_free_irq(bp); /* Free SKBs, SGEs, TPA pool and driver internals */ bnx2x_free_skbs(bp); for_each_rx_queue(bp, i) bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); bnx2x_free_mem(bp); bp->state = BNX2X_STATE_CLOSED; netif_carrier_off(bp->dev); return 0; } static void bnx2x_eeh_recover(struct bnx2x *bp) { u32 val; mutex_init(&bp->port.phy_mutex); val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) BNX2X_ERR("BAD MCP validity signature\n"); } /** * bnx2x_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state * * This function is called after a PCI bus error affecting * this device has been detected. */ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct net_device *dev = pci_get_drvdata(pdev); struct bnx2x *bp = netdev_priv(dev); rtnl_lock(); netif_device_detach(dev); if (state == pci_channel_io_perm_failure) { rtnl_unlock(); return PCI_ERS_RESULT_DISCONNECT; } if (netif_running(dev)) bnx2x_eeh_nic_unload(bp); pci_disable_device(pdev); rtnl_unlock(); /* Request a slot reset */ return PCI_ERS_RESULT_NEED_RESET; } /** * bnx2x_io_slot_reset - called after the PCI bus has been reset * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. */ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct bnx2x *bp = netdev_priv(dev); rtnl_lock(); if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "Cannot re-enable PCI device after reset\n"); rtnl_unlock(); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); pci_restore_state(pdev); if (netif_running(dev)) bnx2x_set_power_state(bp, PCI_D0); rtnl_unlock(); return PCI_ERS_RESULT_RECOVERED; } /** * bnx2x_io_resume - called when traffic can start flowing again * @pdev: Pointer to PCI device * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. */ static void bnx2x_io_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct bnx2x *bp = netdev_priv(dev); if (bp->recovery_state != BNX2X_RECOVERY_DONE) { netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); return; } rtnl_lock(); bnx2x_eeh_recover(bp); if (netif_running(dev)) bnx2x_nic_load(bp, LOAD_NORMAL); netif_device_attach(dev); rtnl_unlock(); } static struct pci_error_handlers bnx2x_err_handler = { .error_detected = bnx2x_io_error_detected, .slot_reset = bnx2x_io_slot_reset, .resume = bnx2x_io_resume, }; static struct pci_driver bnx2x_pci_driver = { .name = DRV_MODULE_NAME, .id_table = bnx2x_pci_tbl, .probe = bnx2x_init_one, .remove = __devexit_p(bnx2x_remove_one), .suspend = bnx2x_suspend, .resume = bnx2x_resume, .err_handler = &bnx2x_err_handler, }; static int __init bnx2x_init(void) { int ret; pr_info("%s", version); bnx2x_wq = create_singlethread_workqueue("bnx2x"); if (bnx2x_wq == NULL) { pr_err("Cannot create workqueue\n"); return -ENOMEM; } ret = pci_register_driver(&bnx2x_pci_driver); if (ret) { pr_err("Cannot register driver\n"); destroy_workqueue(bnx2x_wq); } return ret; } static void __exit bnx2x_cleanup(void) { struct list_head *pos, *q; pci_unregister_driver(&bnx2x_pci_driver); destroy_workqueue(bnx2x_wq); /* Free globablly allocated resources */ list_for_each_safe(pos, q, &bnx2x_prev_list) { struct bnx2x_prev_path_list *tmp = list_entry(pos, struct bnx2x_prev_path_list, list); list_del(pos); kfree(tmp); } } void bnx2x_notify_link_changed(struct bnx2x *bp) { REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1); } module_init(bnx2x_init); module_exit(bnx2x_cleanup); #ifdef BCM_CNIC /** * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). * * @bp: driver handle * @set: set or clear the CAM entry * * This function will wait until the ramdord completion returns. * Return 0 if success, -ENODEV if ramrod doesn't return. */ static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) { unsigned long ramrod_flags = 0; __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, &bp->iscsi_l2_mac_obj, true, BNX2X_ISCSI_ETH_MAC, &ramrod_flags); } /* count denotes the number of new completions we have seen */ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) { struct eth_spe *spe; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) return; #endif spin_lock_bh(&bp->spq_lock); BUG_ON(bp->cnic_spq_pending < count); bp->cnic_spq_pending -= count; for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) & SPE_HDR_CONN_TYPE) >> SPE_HDR_CONN_TYPE_SHIFT; u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) >> SPE_HDR_CMD_ID_SHIFT) & 0xff; /* Set validation for iSCSI L2 client before sending SETUP * ramrod */ if (type == ETH_CONNECTION_TYPE) { if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) bnx2x_set_ctx_validation(bp, &bp->context. vcxt[BNX2X_ISCSI_ETH_CID].eth, BNX2X_ISCSI_ETH_CID); } /* * There may be not more than 8 L2, not more than 8 L5 SPEs * and in the air. We also check that number of outstanding * COMMON ramrods is not more than the EQ and SPQ can * accommodate. */ if (type == ETH_CONNECTION_TYPE) { if (!atomic_read(&bp->cq_spq_left)) break; else atomic_dec(&bp->cq_spq_left); } else if (type == NONE_CONNECTION_TYPE) { if (!atomic_read(&bp->eq_spq_left)) break; else atomic_dec(&bp->eq_spq_left); } else if ((type == ISCSI_CONNECTION_TYPE) || (type == FCOE_CONNECTION_TYPE)) { if (bp->cnic_spq_pending >= bp->cnic_eth_dev.max_kwqe_pending) break; else bp->cnic_spq_pending++; } else { BNX2X_ERR("Unknown SPE type: %d\n", type); bnx2x_panic(); break; } spe = bnx2x_sp_get_next(bp); *spe = *bp->cnic_kwq_cons; DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n", bp->cnic_spq_pending, bp->cnic_kwq_pending, count); if (bp->cnic_kwq_cons == bp->cnic_kwq_last) bp->cnic_kwq_cons = bp->cnic_kwq; else bp->cnic_kwq_cons++; } bnx2x_sp_prod_update(bp); spin_unlock_bh(&bp->spq_lock); } static int bnx2x_cnic_sp_queue(struct net_device *dev, struct kwqe_16 *kwqes[], u32 count) { struct bnx2x *bp = netdev_priv(dev); int i; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) { BNX2X_ERR("Can't post to SP queue while panic\n"); return -EIO; } #endif if ((bp->recovery_state != BNX2X_RECOVERY_DONE) && (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { BNX2X_ERR("Handling parity error recovery. Try again later\n"); return -EAGAIN; } spin_lock_bh(&bp->spq_lock); for (i = 0; i < count; i++) { struct eth_spe *spe = (struct eth_spe *)kwqes[i]; if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) break; *bp->cnic_kwq_prod = *spe; bp->cnic_kwq_pending++; DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n", spe->hdr.conn_and_cmd_data, spe->hdr.type, spe->data.update_data_addr.hi, spe->data.update_data_addr.lo, bp->cnic_kwq_pending); if (bp->cnic_kwq_prod == bp->cnic_kwq_last) bp->cnic_kwq_prod = bp->cnic_kwq; else bp->cnic_kwq_prod++; } spin_unlock_bh(&bp->spq_lock); if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) bnx2x_cnic_sp_post(bp, 0); return i; } static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl) { struct cnic_ops *c_ops; int rc = 0; mutex_lock(&bp->cnic_mutex); c_ops = rcu_dereference_protected(bp->cnic_ops, lockdep_is_held(&bp->cnic_mutex)); if (c_ops) rc = c_ops->cnic_ctl(bp->cnic_data, ctl); mutex_unlock(&bp->cnic_mutex); return rc; } static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl) { struct cnic_ops *c_ops; int rc = 0; rcu_read_lock(); c_ops = rcu_dereference(bp->cnic_ops); if (c_ops) rc = c_ops->cnic_ctl(bp->cnic_data, ctl); rcu_read_unlock(); return rc; } /* * for commands that have no data */ int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) { struct cnic_ctl_info ctl = {0}; ctl.cmd = cmd; return bnx2x_cnic_ctl_send(bp, &ctl); } static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) { struct cnic_ctl_info ctl = {0}; /* first we tell CNIC and only then we count this as a completion */ ctl.cmd = CNIC_CTL_COMPLETION_CMD; ctl.data.comp.cid = cid; ctl.data.comp.error = err; bnx2x_cnic_ctl_send_bh(bp, &ctl); bnx2x_cnic_sp_post(bp, 0); } /* Called with netif_addr_lock_bh() taken. * Sets an rx_mode config for an iSCSI ETH client. * Doesn't block. * Completion should be checked outside. */ static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) { unsigned long accept_flags = 0, ramrod_flags = 0; u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED; if (start) { /* Start accepting on iSCSI L2 ring. Accept all multicasts * because it's the only way for UIO Queue to accept * multicasts (in non-promiscuous mode only one Queue per * function will receive multicast packets (leading in our * case). */ __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags); __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags); __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); /* Clear STOP_PENDING bit if START is requested */ clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED; } else /* Clear START_PENDING bit if STOP is requested */ clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) set_bit(sched_state, &bp->sp_state); else { __set_bit(RAMROD_RX, &ramrod_flags); bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0, ramrod_flags); } } static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) { struct bnx2x *bp = netdev_priv(dev); int rc = 0; switch (ctl->cmd) { case DRV_CTL_CTXTBL_WR_CMD: { u32 index = ctl->data.io.offset; dma_addr_t addr = ctl->data.io.dma_addr; bnx2x_ilt_wr(bp, index, addr); break; } case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: { int count = ctl->data.credit.credit_count; bnx2x_cnic_sp_post(bp, count); break; } /* rtnl_lock is held. */ case DRV_CTL_START_L2_CMD: { struct cnic_eth_dev *cp = &bp->cnic_eth_dev; unsigned long sp_bits = 0; /* Configure the iSCSI classification object */ bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, cp->iscsi_l2_client_id, cp->iscsi_l2_cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), bnx2x_sp_mapping(bp, mac_rdata), BNX2X_FILTER_MAC_PENDING, &bp->sp_state, BNX2X_OBJ_TYPE_RX, &bp->macs_pool); /* Set iSCSI MAC address */ rc = bnx2x_set_iscsi_eth_mac_addr(bp); if (rc) break; mmiowb(); barrier(); /* Start accepting on iSCSI L2 ring */ netif_addr_lock_bh(dev); bnx2x_set_iscsi_eth_rx_mode(bp, true); netif_addr_unlock_bh(dev); /* bits to wait on */ __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits); if (!bnx2x_wait_sp_comp(bp, sp_bits)) BNX2X_ERR("rx_mode completion timed out!\n"); break; } /* rtnl_lock is held. */ case DRV_CTL_STOP_L2_CMD: { unsigned long sp_bits = 0; /* Stop accepting on iSCSI L2 ring */ netif_addr_lock_bh(dev); bnx2x_set_iscsi_eth_rx_mode(bp, false); netif_addr_unlock_bh(dev); /* bits to wait on */ __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits); if (!bnx2x_wait_sp_comp(bp, sp_bits)) BNX2X_ERR("rx_mode completion timed out!\n"); mmiowb(); barrier(); /* Unset iSCSI L2 MAC */ rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, BNX2X_ISCSI_ETH_MAC, true); break; } case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { int count = ctl->data.credit.credit_count; smp_mb__before_atomic_inc(); atomic_add(count, &bp->cq_spq_left); smp_mb__after_atomic_inc(); break; } case DRV_CTL_ULP_REGISTER_CMD: { int ulp_type = ctl->data.ulp_type; if (CHIP_IS_E3(bp)) { int idx = BP_FW_MB_IDX(bp); u32 cap; cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); if (ulp_type == CNIC_ULP_ISCSI) cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; else if (ulp_type == CNIC_ULP_FCOE) cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE; SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); } break; } case DRV_CTL_ULP_UNREGISTER_CMD: { int ulp_type = ctl->data.ulp_type; if (CHIP_IS_E3(bp)) { int idx = BP_FW_MB_IDX(bp); u32 cap; cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); if (ulp_type == CNIC_ULP_ISCSI) cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; else if (ulp_type == CNIC_ULP_FCOE) cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE; SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); } break; } default: BNX2X_ERR("unknown command %x\n", ctl->cmd); rc = -EINVAL; } return rc; } void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) { struct cnic_eth_dev *cp = &bp->cnic_eth_dev; if (bp->flags & USING_MSIX_FLAG) { cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; cp->irq_arr[0].vector = bp->msix_table[1].vector; } else { cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; } if (!CHIP_IS_E1x(bp)) cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; else cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); cp->irq_arr[1].status_blk = bp->def_status_blk; cp->irq_arr[1].status_blk_num = DEF_SB_ID; cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; cp->num_irq = 2; } static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, void *data) { struct bnx2x *bp = netdev_priv(dev); struct cnic_eth_dev *cp = &bp->cnic_eth_dev; if (ops == NULL) { BNX2X_ERR("NULL ops received\n"); return -EINVAL; } bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!bp->cnic_kwq) return -ENOMEM; bp->cnic_kwq_cons = bp->cnic_kwq; bp->cnic_kwq_prod = bp->cnic_kwq; bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; bp->cnic_spq_pending = 0; bp->cnic_kwq_pending = 0; bp->cnic_data = data; cp->num_irq = 0; cp->drv_state |= CNIC_DRV_STATE_REGD; cp->iro_arr = bp->iro_arr; bnx2x_setup_cnic_irq_info(bp); rcu_assign_pointer(bp->cnic_ops, ops); return 0; } static int bnx2x_unregister_cnic(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); struct cnic_eth_dev *cp = &bp->cnic_eth_dev; mutex_lock(&bp->cnic_mutex); cp->drv_state = 0; RCU_INIT_POINTER(bp->cnic_ops, NULL); mutex_unlock(&bp->cnic_mutex); synchronize_rcu(); kfree(bp->cnic_kwq); bp->cnic_kwq = NULL; return 0; } struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); struct cnic_eth_dev *cp = &bp->cnic_eth_dev; /* If both iSCSI and FCoE are disabled - return NULL in * order to indicate CNIC that it should not try to work * with this device. */ if (NO_ISCSI(bp) && NO_FCOE(bp)) return NULL; cp->drv_owner = THIS_MODULE; cp->chip_id = CHIP_ID(bp); cp->pdev = bp->pdev; cp->io_base = bp->regview; cp->io_base2 = bp->doorbells; cp->max_kwqe_pending = 8; cp->ctx_blk_size = CDU_ILT_PAGE_SZ; cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + bnx2x_cid_ilt_lines(bp); cp->ctx_tbl_len = CNIC_ILT_LINES; cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; cp->drv_ctl = bnx2x_drv_ctl; cp->drv_register_cnic = bnx2x_register_cnic; cp->drv_unregister_cnic = bnx2x_unregister_cnic; cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID; cp->iscsi_l2_client_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; if (NO_ISCSI_OOO(bp)) cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; if (NO_ISCSI(bp)) cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; if (NO_FCOE(bp)) cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; BNX2X_DEV_INFO( "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n", cp->ctx_blk_size, cp->ctx_tbl_offset, cp->ctx_tbl_len, cp->starting_cid); return cp; } EXPORT_SYMBOL(bnx2x_cnic_probe); #endif /* BCM_CNIC */
gpl-2.0
pranaysahith/android_kernel_msm_beni
drivers/input/keyboard/imx_keypad.c
4003
16362
/* * Driver for the IMX keypad port. * Copyright (C) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * <<Power management needs to be implemented>>. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/err.h> #include <linux/init.h> #include <linux/input/matrix_keypad.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/timer.h> /* * Keypad Controller registers (halfword) */ #define KPCR 0x00 /* Keypad Control Register */ #define KPSR 0x02 /* Keypad Status Register */ #define KBD_STAT_KPKD (0x1 << 0) /* Key Press Interrupt Status bit (w1c) */ #define KBD_STAT_KPKR (0x1 << 1) /* Key Release Interrupt Status bit (w1c) */ #define KBD_STAT_KDSC (0x1 << 2) /* Key Depress Synch Chain Status bit (w1c)*/ #define KBD_STAT_KRSS (0x1 << 3) /* Key Release Synch Status bit (w1c)*/ #define KBD_STAT_KDIE (0x1 << 8) /* Key Depress Interrupt Enable Status bit */ #define KBD_STAT_KRIE (0x1 << 9) /* Key Release Interrupt Enable */ #define KBD_STAT_KPPEN (0x1 << 10) /* Keypad Clock Enable */ #define KDDR 0x04 /* Keypad Data Direction Register */ #define KPDR 0x06 /* Keypad Data Register */ #define MAX_MATRIX_KEY_ROWS 8 #define MAX_MATRIX_KEY_COLS 8 #define MATRIX_ROW_SHIFT 3 #define MAX_MATRIX_KEY_NUM (MAX_MATRIX_KEY_ROWS * MAX_MATRIX_KEY_COLS) struct imx_keypad { struct clk *clk; struct input_dev *input_dev; void __iomem *mmio_base; int irq; struct timer_list check_matrix_timer; /* * The matrix is stable only if no changes are detected after * IMX_KEYPAD_SCANS_FOR_STABILITY scans */ #define IMX_KEYPAD_SCANS_FOR_STABILITY 3 int stable_count; bool enabled; /* Masks for enabled rows/cols */ unsigned short rows_en_mask; unsigned short cols_en_mask; unsigned short keycodes[MAX_MATRIX_KEY_NUM]; /* * Matrix states: * -stable: achieved after a complete debounce process. * -unstable: used in the debouncing process. */ unsigned short matrix_stable_state[MAX_MATRIX_KEY_COLS]; unsigned short matrix_unstable_state[MAX_MATRIX_KEY_COLS]; }; /* Scan the matrix and return the new state in *matrix_volatile_state. */ static void imx_keypad_scan_matrix(struct imx_keypad *keypad, unsigned short *matrix_volatile_state) { int col; unsigned short reg_val; for (col = 0; col < MAX_MATRIX_KEY_COLS; col++) { if ((keypad->cols_en_mask & (1 << col)) == 0) continue; /* * Discharge keypad capacitance: * 2. write 1s on column data. * 3. configure columns as totem-pole to discharge capacitance. * 4. configure columns as open-drain. */ reg_val = readw(keypad->mmio_base + KPDR); reg_val |= 0xff00; writew(reg_val, keypad->mmio_base + KPDR); reg_val = readw(keypad->mmio_base + KPCR); reg_val &= ~((keypad->cols_en_mask & 0xff) << 8); writew(reg_val, keypad->mmio_base + KPCR); udelay(2); reg_val = readw(keypad->mmio_base + KPCR); reg_val |= (keypad->cols_en_mask & 0xff) << 8; writew(reg_val, keypad->mmio_base + KPCR); /* * 5. Write a single column to 0, others to 1. * 6. Sample row inputs and save data. * 7. Repeat steps 2 - 6 for remaining columns. */ reg_val = readw(keypad->mmio_base + KPDR); reg_val &= ~(1 << (8 + col)); writew(reg_val, keypad->mmio_base + KPDR); /* * Delay added to avoid propagating the 0 from column to row * when scanning. */ udelay(5); /* * 1s in matrix_volatile_state[col] means key pressures * throw data from non enabled rows. */ reg_val = readw(keypad->mmio_base + KPDR); matrix_volatile_state[col] = (~reg_val) & keypad->rows_en_mask; } /* * Return in standby mode: * 9. write 0s to columns */ reg_val = readw(keypad->mmio_base + KPDR); reg_val &= 0x00ff; writew(reg_val, keypad->mmio_base + KPDR); } /* * Compare the new matrix state (volatile) with the stable one stored in * keypad->matrix_stable_state and fire events if changes are detected. */ static void imx_keypad_fire_events(struct imx_keypad *keypad, unsigned short *matrix_volatile_state) { struct input_dev *input_dev = keypad->input_dev; int row, col; for (col = 0; col < MAX_MATRIX_KEY_COLS; col++) { unsigned short bits_changed; int code; if ((keypad->cols_en_mask & (1 << col)) == 0) continue; /* Column is not enabled */ bits_changed = keypad->matrix_stable_state[col] ^ matrix_volatile_state[col]; if (bits_changed == 0) continue; /* Column does not contain changes */ for (row = 0; row < MAX_MATRIX_KEY_ROWS; row++) { if ((keypad->rows_en_mask & (1 << row)) == 0) continue; /* Row is not enabled */ if ((bits_changed & (1 << row)) == 0) continue; /* Row does not contain changes */ code = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT); input_event(input_dev, EV_MSC, MSC_SCAN, code); input_report_key(input_dev, keypad->keycodes[code], matrix_volatile_state[col] & (1 << row)); dev_dbg(&input_dev->dev, "Event code: %d, val: %d", keypad->keycodes[code], matrix_volatile_state[col] & (1 << row)); } } input_sync(input_dev); } /* * imx_keypad_check_for_events is the timer handler. */ static void imx_keypad_check_for_events(unsigned long data) { struct imx_keypad *keypad = (struct imx_keypad *) data; unsigned short matrix_volatile_state[MAX_MATRIX_KEY_COLS]; unsigned short reg_val; bool state_changed, is_zero_matrix; int i; memset(matrix_volatile_state, 0, sizeof(matrix_volatile_state)); imx_keypad_scan_matrix(keypad, matrix_volatile_state); state_changed = false; for (i = 0; i < MAX_MATRIX_KEY_COLS; i++) { if ((keypad->cols_en_mask & (1 << i)) == 0) continue; if (keypad->matrix_unstable_state[i] ^ matrix_volatile_state[i]) { state_changed = true; break; } } /* * If the matrix state is changed from the previous scan * (Re)Begin the debouncing process, saving the new state in * keypad->matrix_unstable_state. * else * Increase the count of number of scans with a stable state. */ if (state_changed) { memcpy(keypad->matrix_unstable_state, matrix_volatile_state, sizeof(matrix_volatile_state)); keypad->stable_count = 0; } else keypad->stable_count++; /* * If the matrix is not as stable as we want reschedule scan * in the near future. */ if (keypad->stable_count < IMX_KEYPAD_SCANS_FOR_STABILITY) { mod_timer(&keypad->check_matrix_timer, jiffies + msecs_to_jiffies(10)); return; } /* * If the matrix state is stable, fire the events and save the new * stable state. Note, if the matrix is kept stable for longer * (keypad->stable_count > IMX_KEYPAD_SCANS_FOR_STABILITY) all * events have already been generated. */ if (keypad->stable_count == IMX_KEYPAD_SCANS_FOR_STABILITY) { imx_keypad_fire_events(keypad, matrix_volatile_state); memcpy(keypad->matrix_stable_state, matrix_volatile_state, sizeof(matrix_volatile_state)); } is_zero_matrix = true; for (i = 0; i < MAX_MATRIX_KEY_COLS; i++) { if (matrix_volatile_state[i] != 0) { is_zero_matrix = false; break; } } if (is_zero_matrix) { /* * All keys have been released. Enable only the KDI * interrupt for future key presses (clear the KDI * status bit and its sync chain before that). */ reg_val = readw(keypad->mmio_base + KPSR); reg_val |= KBD_STAT_KPKD | KBD_STAT_KDSC; writew(reg_val, keypad->mmio_base + KPSR); reg_val = readw(keypad->mmio_base + KPSR); reg_val |= KBD_STAT_KDIE; reg_val &= ~KBD_STAT_KRIE; writew(reg_val, keypad->mmio_base + KPSR); } else { /* * Some keys are still pressed. Schedule a rescan in * attempt to detect multiple key presses and enable * the KRI interrupt to react quickly to key release * event. */ mod_timer(&keypad->check_matrix_timer, jiffies + msecs_to_jiffies(60)); reg_val = readw(keypad->mmio_base + KPSR); reg_val |= KBD_STAT_KPKR | KBD_STAT_KRSS; writew(reg_val, keypad->mmio_base + KPSR); reg_val = readw(keypad->mmio_base + KPSR); reg_val |= KBD_STAT_KRIE; reg_val &= ~KBD_STAT_KDIE; writew(reg_val, keypad->mmio_base + KPSR); } } static irqreturn_t imx_keypad_irq_handler(int irq, void *dev_id) { struct imx_keypad *keypad = dev_id; unsigned short reg_val; reg_val = readw(keypad->mmio_base + KPSR); /* Disable both interrupt types */ reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE); /* Clear interrupts status bits */ reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD; writew(reg_val, keypad->mmio_base + KPSR); if (keypad->enabled) { /* The matrix is supposed to be changed */ keypad->stable_count = 0; /* Schedule the scanning procedure near in the future */ mod_timer(&keypad->check_matrix_timer, jiffies + msecs_to_jiffies(2)); } return IRQ_HANDLED; } static void imx_keypad_config(struct imx_keypad *keypad) { unsigned short reg_val; /* * Include enabled rows in interrupt generation (KPCR[7:0]) * Configure keypad columns as open-drain (KPCR[15:8]) */ reg_val = readw(keypad->mmio_base + KPCR); reg_val |= keypad->rows_en_mask & 0xff; /* rows */ reg_val |= (keypad->cols_en_mask & 0xff) << 8; /* cols */ writew(reg_val, keypad->mmio_base + KPCR); /* Write 0's to KPDR[15:8] (Colums) */ reg_val = readw(keypad->mmio_base + KPDR); reg_val &= 0x00ff; writew(reg_val, keypad->mmio_base + KPDR); /* Configure columns as output, rows as input (KDDR[15:0]) */ writew(0xff00, keypad->mmio_base + KDDR); /* * Clear Key Depress and Key Release status bit. * Clear both synchronizer chain. */ reg_val = readw(keypad->mmio_base + KPSR); reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD | KBD_STAT_KDSC | KBD_STAT_KRSS; writew(reg_val, keypad->mmio_base + KPSR); /* Enable KDI and disable KRI (avoid false release events). */ reg_val |= KBD_STAT_KDIE; reg_val &= ~KBD_STAT_KRIE; writew(reg_val, keypad->mmio_base + KPSR); } static void imx_keypad_inhibit(struct imx_keypad *keypad) { unsigned short reg_val; /* Inhibit KDI and KRI interrupts. */ reg_val = readw(keypad->mmio_base + KPSR); reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE); writew(reg_val, keypad->mmio_base + KPSR); /* Colums as open drain and disable all rows */ writew(0xff00, keypad->mmio_base + KPCR); } static void imx_keypad_close(struct input_dev *dev) { struct imx_keypad *keypad = input_get_drvdata(dev); dev_dbg(&dev->dev, ">%s\n", __func__); /* Mark keypad as being inactive */ keypad->enabled = false; synchronize_irq(keypad->irq); del_timer_sync(&keypad->check_matrix_timer); imx_keypad_inhibit(keypad); /* Disable clock unit */ clk_disable(keypad->clk); } static int imx_keypad_open(struct input_dev *dev) { struct imx_keypad *keypad = input_get_drvdata(dev); dev_dbg(&dev->dev, ">%s\n", __func__); /* We became active from now */ keypad->enabled = true; /* Enable the kpp clock */ clk_enable(keypad->clk); imx_keypad_config(keypad); /* Sanity control, not all the rows must be actived now. */ if ((readw(keypad->mmio_base + KPDR) & keypad->rows_en_mask) == 0) { dev_err(&dev->dev, "too many keys pressed, control pins initialisation\n"); goto open_err; } return 0; open_err: imx_keypad_close(dev); return -EIO; } static int __devinit imx_keypad_probe(struct platform_device *pdev) { const struct matrix_keymap_data *keymap_data = pdev->dev.platform_data; struct imx_keypad *keypad; struct input_dev *input_dev; struct resource *res; int irq, error, i; if (keymap_data == NULL) { dev_err(&pdev->dev, "no keymap defined\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no irq defined in platform data\n"); return -EINVAL; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "no I/O memory defined in platform data\n"); return -EINVAL; } res = request_mem_region(res->start, resource_size(res), pdev->name); if (res == NULL) { dev_err(&pdev->dev, "failed to request I/O memory\n"); return -EBUSY; } input_dev = input_allocate_device(); if (!input_dev) { dev_err(&pdev->dev, "failed to allocate the input device\n"); error = -ENOMEM; goto failed_rel_mem; } keypad = kzalloc(sizeof(struct imx_keypad), GFP_KERNEL); if (!keypad) { dev_err(&pdev->dev, "not enough memory for driver data\n"); error = -ENOMEM; goto failed_free_input; } keypad->input_dev = input_dev; keypad->irq = irq; keypad->stable_count = 0; setup_timer(&keypad->check_matrix_timer, imx_keypad_check_for_events, (unsigned long) keypad); keypad->mmio_base = ioremap(res->start, resource_size(res)); if (keypad->mmio_base == NULL) { dev_err(&pdev->dev, "failed to remap I/O memory\n"); error = -ENOMEM; goto failed_free_priv; } keypad->clk = clk_get(&pdev->dev, "kpp"); if (IS_ERR(keypad->clk)) { dev_err(&pdev->dev, "failed to get keypad clock\n"); error = PTR_ERR(keypad->clk); goto failed_unmap; } /* Search for rows and cols enabled */ for (i = 0; i < keymap_data->keymap_size; i++) { keypad->rows_en_mask |= 1 << KEY_ROW(keymap_data->keymap[i]); keypad->cols_en_mask |= 1 << KEY_COL(keymap_data->keymap[i]); } if (keypad->rows_en_mask > ((1 << MAX_MATRIX_KEY_ROWS) - 1) || keypad->cols_en_mask > ((1 << MAX_MATRIX_KEY_COLS) - 1)) { dev_err(&pdev->dev, "invalid key data (too many rows or colums)\n"); error = -EINVAL; goto failed_clock_put; } dev_dbg(&pdev->dev, "enabled rows mask: %x\n", keypad->rows_en_mask); dev_dbg(&pdev->dev, "enabled cols mask: %x\n", keypad->cols_en_mask); /* Init the Input device */ input_dev->name = pdev->name; input_dev->id.bustype = BUS_HOST; input_dev->dev.parent = &pdev->dev; input_dev->open = imx_keypad_open; input_dev->close = imx_keypad_close; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); input_dev->keycode = keypad->keycodes; input_dev->keycodesize = sizeof(keypad->keycodes[0]); input_dev->keycodemax = ARRAY_SIZE(keypad->keycodes); matrix_keypad_build_keymap(keymap_data, MATRIX_ROW_SHIFT, keypad->keycodes, input_dev->keybit); input_set_capability(input_dev, EV_MSC, MSC_SCAN); input_set_drvdata(input_dev, keypad); /* Ensure that the keypad will stay dormant until opened */ imx_keypad_inhibit(keypad); error = request_irq(irq, imx_keypad_irq_handler, IRQF_DISABLED, pdev->name, keypad); if (error) { dev_err(&pdev->dev, "failed to request IRQ\n"); goto failed_clock_put; } /* Register the input device */ error = input_register_device(input_dev); if (error) { dev_err(&pdev->dev, "failed to register input device\n"); goto failed_free_irq; } platform_set_drvdata(pdev, keypad); device_init_wakeup(&pdev->dev, 1); return 0; failed_free_irq: free_irq(irq, pdev); failed_clock_put: clk_put(keypad->clk); failed_unmap: iounmap(keypad->mmio_base); failed_free_priv: kfree(keypad); failed_free_input: input_free_device(input_dev); failed_rel_mem: release_mem_region(res->start, resource_size(res)); return error; } static int __devexit imx_keypad_remove(struct platform_device *pdev) { struct imx_keypad *keypad = platform_get_drvdata(pdev); struct resource *res; dev_dbg(&pdev->dev, ">%s\n", __func__); platform_set_drvdata(pdev, NULL); input_unregister_device(keypad->input_dev); free_irq(keypad->irq, keypad); clk_put(keypad->clk); iounmap(keypad->mmio_base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); kfree(keypad); return 0; } static struct platform_driver imx_keypad_driver = { .driver = { .name = "imx-keypad", .owner = THIS_MODULE, }, .probe = imx_keypad_probe, .remove = __devexit_p(imx_keypad_remove), }; static int __init imx_keypad_init(void) { return platform_driver_register(&imx_keypad_driver); } static void __exit imx_keypad_exit(void) { platform_driver_unregister(&imx_keypad_driver); } module_init(imx_keypad_init); module_exit(imx_keypad_exit); MODULE_AUTHOR("Alberto Panizzo <maramaopercheseimorto@gmail.com>"); MODULE_DESCRIPTION("IMX Keypad Port Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:imx-keypad");
gpl-2.0
AODP/android_kernel_asus_moorefield
arch/sh/mm/tlbflush_64.c
4515
3604
/* * arch/sh/mm/tlb-flush_64.c * * Copyright (C) 2000, 2001 Paolo Alberelli * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes) * Copyright (C) 2003 - 2012 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/signal.h> #include <linux/rwsem.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/perf_event.h> #include <linux/interrupt.h> #include <asm/io.h> #include <asm/tlb.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> void local_flush_tlb_one(unsigned long asid, unsigned long page) { unsigned long long match, pteh=0, lpage; unsigned long tlb; /* * Sign-extend based on neff. */ lpage = neff_sign_extend(page); match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID; match |= lpage; for_each_itlb_entry(tlb) { asm volatile ("getcfg %1, 0, %0" : "=r" (pteh) : "r" (tlb) ); if (pteh == match) { __flush_tlb_slot(tlb); break; } } for_each_dtlb_entry(tlb) { asm volatile ("getcfg %1, 0, %0" : "=r" (pteh) : "r" (tlb) ); if (pteh == match) { __flush_tlb_slot(tlb); break; } } } void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { unsigned long flags; if (vma->vm_mm) { page &= PAGE_MASK; local_irq_save(flags); local_flush_tlb_one(get_asid(), page); local_irq_restore(flags); } } void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { unsigned long flags; unsigned long long match, pteh=0, pteh_epn, pteh_low; unsigned long tlb; unsigned int cpu = smp_processor_id(); struct mm_struct *mm; mm = vma->vm_mm; if (cpu_context(cpu, mm) == NO_CONTEXT) return; local_irq_save(flags); start &= PAGE_MASK; end &= PAGE_MASK; match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID; /* Flush ITLB */ for_each_itlb_entry(tlb) { asm volatile ("getcfg %1, 0, %0" : "=r" (pteh) : "r" (tlb) ); pteh_epn = pteh & PAGE_MASK; pteh_low = pteh & ~PAGE_MASK; if (pteh_low == match && pteh_epn >= start && pteh_epn <= end) __flush_tlb_slot(tlb); } /* Flush DTLB */ for_each_dtlb_entry(tlb) { asm volatile ("getcfg %1, 0, %0" : "=r" (pteh) : "r" (tlb) ); pteh_epn = pteh & PAGE_MASK; pteh_low = pteh & ~PAGE_MASK; if (pteh_low == match && pteh_epn >= start && pteh_epn <= end) __flush_tlb_slot(tlb); } local_irq_restore(flags); } void local_flush_tlb_mm(struct mm_struct *mm) { unsigned long flags; unsigned int cpu = smp_processor_id(); if (cpu_context(cpu, mm) == NO_CONTEXT) return; local_irq_save(flags); cpu_context(cpu, mm) = NO_CONTEXT; if (mm == current->mm) activate_context(mm, cpu); local_irq_restore(flags); } void local_flush_tlb_all(void) { /* Invalidate all, including shared pages, excluding fixed TLBs */ unsigned long flags, tlb; local_irq_save(flags); /* Flush each ITLB entry */ for_each_itlb_entry(tlb) __flush_tlb_slot(tlb); /* Flush each DTLB entry */ for_each_dtlb_entry(tlb) __flush_tlb_slot(tlb); local_irq_restore(flags); } void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { /* FIXME: Optimize this later.. */ flush_tlb_all(); } void __flush_tlb_global(void) { flush_tlb_all(); }
gpl-2.0
tobigun/samsung-kernel-smg800y
arch/arm/mach-realview/hotplug.c
4515
2630
/* * linux/arch/arm/mach-realview/hotplug.c * * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/smp.h> #include <asm/cacheflush.h> #include <asm/cp15.h> #include <asm/smp_plat.h> extern volatile int pen_release; static inline void cpu_enter_lowpower(void) { unsigned int v; flush_cache_all(); asm volatile( " mcr p15, 0, %1, c7, c5, 0\n" " mcr p15, 0, %1, c7, c10, 4\n" /* * Turn off coherency */ " mrc p15, 0, %0, c1, c0, 1\n" " bic %0, %0, #0x20\n" " mcr p15, 0, %0, c1, c0, 1\n" " mrc p15, 0, %0, c1, c0, 0\n" " bic %0, %0, %2\n" " mcr p15, 0, %0, c1, c0, 0\n" : "=&r" (v) : "r" (0), "Ir" (CR_C) : "cc"); } static inline void cpu_leave_lowpower(void) { unsigned int v; asm volatile( "mrc p15, 0, %0, c1, c0, 0\n" " orr %0, %0, %1\n" " mcr p15, 0, %0, c1, c0, 0\n" " mrc p15, 0, %0, c1, c0, 1\n" " orr %0, %0, #0x20\n" " mcr p15, 0, %0, c1, c0, 1\n" : "=&r" (v) : "Ir" (CR_C) : "cc"); } static inline void platform_do_lowpower(unsigned int cpu, int *spurious) { /* * there is no power-control hardware on this platform, so all * we can do is put the core into WFI; this is safe as the calling * code will have already disabled interrupts */ for (;;) { /* * here's the WFI */ asm(".word 0xe320f003\n" : : : "memory", "cc"); if (pen_release == cpu_logical_map(cpu)) { /* * OK, proper wakeup, we're done */ break; } /* * Getting here, means that we have come out of WFI without * having been woken up - this shouldn't happen * * Just note it happening - when we're woken, we can report * its occurrence. */ (*spurious)++; } } int platform_cpu_kill(unsigned int cpu) { return 1; } /* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ void platform_cpu_die(unsigned int cpu) { int spurious = 0; /* * we're ready for shutdown now, so do it */ cpu_enter_lowpower(); platform_do_lowpower(cpu, &spurious); /* * bring this CPU back into the world of cache * coherency, and then restore interrupts */ cpu_leave_lowpower(); if (spurious) pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); } int platform_cpu_disable(unsigned int cpu) { /* * we don't allow CPU 0 to be shutdown (it is still too special * e.g. clock tick interrupts) */ return cpu == 0 ? -EPERM : 0; }
gpl-2.0
kamma/ace_kernel
arch/sparc/kernel/muldiv.c
4771
5820
/* * muldiv.c: Hardware multiply/division illegal instruction trap * for sun4c/sun4 (which do not have those instructions) * * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * * 2004-12-25 Krzysztof Helt (krzysztof.h1@wp.pl) * - fixed registers constrains in inline assembly declarations */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <asm/ptrace.h> #include <asm/processor.h> #include <asm/system.h> #include <asm/uaccess.h> #include "kernel.h" /* #define DEBUG_MULDIV */ static inline int has_imm13(int insn) { return (insn & 0x2000); } static inline int is_foocc(int insn) { return (insn & 0x800000); } static inline int sign_extend_imm13(int imm) { return imm << 19 >> 19; } static inline void advance(struct pt_regs *regs) { regs->pc = regs->npc; regs->npc += 4; } static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, unsigned int rd) { if(rs2 >= 16 || rs1 >= 16 || rd >= 16) { /* Wheee... */ __asm__ __volatile__("save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "restore; restore; restore; restore;\n\t" "restore; restore; restore;\n\t"); } } #define fetch_reg(reg, regs) ({ \ struct reg_window32 __user *win; \ register unsigned long ret; \ \ if (!(reg)) ret = 0; \ else if ((reg) < 16) { \ ret = regs->u_regs[(reg)]; \ } else { \ /* Ho hum, the slightly complicated case. */ \ win = (struct reg_window32 __user *)regs->u_regs[UREG_FP];\ if (get_user (ret, &win->locals[(reg) - 16])) return -1;\ } \ ret; \ }) static inline int store_reg(unsigned int result, unsigned int reg, struct pt_regs *regs) { struct reg_window32 __user *win; if (!reg) return 0; if (reg < 16) { regs->u_regs[reg] = result; return 0; } else { /* need to use put_user() in this case: */ win = (struct reg_window32 __user *) regs->u_regs[UREG_FP]; return (put_user(result, &win->locals[reg - 16])); } } /* Should return 0 if mul/div emulation succeeded and SIGILL should * not be issued. */ int do_user_muldiv(struct pt_regs *regs, unsigned long pc) { unsigned int insn; int inst; unsigned int rs1, rs2, rdv; if (!pc) return -1; /* This happens to often, I think */ if (get_user (insn, (unsigned int __user *)pc)) return -1; if ((insn & 0xc1400000) != 0x80400000) return -1; inst = ((insn >> 19) & 0xf); if ((inst & 0xe) != 10 && (inst & 0xe) != 14) return -1; /* Now we know we have to do something with umul, smul, udiv or sdiv */ rs1 = (insn >> 14) & 0x1f; rs2 = insn & 0x1f; rdv = (insn >> 25) & 0x1f; if (has_imm13(insn)) { maybe_flush_windows(rs1, 0, rdv); rs2 = sign_extend_imm13(insn); } else { maybe_flush_windows(rs1, rs2, rdv); rs2 = fetch_reg(rs2, regs); } rs1 = fetch_reg(rs1, regs); switch (inst) { case 10: /* umul */ #ifdef DEBUG_MULDIV printk ("unsigned muldiv: 0x%x * 0x%x = ", rs1, rs2); #endif __asm__ __volatile__ ("\n\t" "mov %0, %%o0\n\t" "call .umul\n\t" " mov %1, %%o1\n\t" "mov %%o0, %0\n\t" "mov %%o1, %1\n\t" : "=r" (rs1), "=r" (rs2) : "0" (rs1), "1" (rs2) : "o0", "o1", "o2", "o3", "o4", "o5", "o7", "cc"); #ifdef DEBUG_MULDIV printk ("0x%x%08x\n", rs2, rs1); #endif if (store_reg(rs1, rdv, regs)) return -1; regs->y = rs2; break; case 11: /* smul */ #ifdef DEBUG_MULDIV printk ("signed muldiv: 0x%x * 0x%x = ", rs1, rs2); #endif __asm__ __volatile__ ("\n\t" "mov %0, %%o0\n\t" "call .mul\n\t" " mov %1, %%o1\n\t" "mov %%o0, %0\n\t" "mov %%o1, %1\n\t" : "=r" (rs1), "=r" (rs2) : "0" (rs1), "1" (rs2) : "o0", "o1", "o2", "o3", "o4", "o5", "o7", "cc"); #ifdef DEBUG_MULDIV printk ("0x%x%08x\n", rs2, rs1); #endif if (store_reg(rs1, rdv, regs)) return -1; regs->y = rs2; break; case 14: /* udiv */ #ifdef DEBUG_MULDIV printk ("unsigned muldiv: 0x%x%08x / 0x%x = ", regs->y, rs1, rs2); #endif if (!rs2) { #ifdef DEBUG_MULDIV printk ("DIVISION BY ZERO\n"); #endif handle_hw_divzero (regs, pc, regs->npc, regs->psr); return 0; } __asm__ __volatile__ ("\n\t" "mov %2, %%o0\n\t" "mov %0, %%o1\n\t" "mov %%g0, %%o2\n\t" "call __udivdi3\n\t" " mov %1, %%o3\n\t" "mov %%o1, %0\n\t" "mov %%o0, %1\n\t" : "=r" (rs1), "=r" (rs2) : "r" (regs->y), "0" (rs1), "1" (rs2) : "o0", "o1", "o2", "o3", "o4", "o5", "o7", "g1", "g2", "g3", "cc"); #ifdef DEBUG_MULDIV printk ("0x%x\n", rs1); #endif if (store_reg(rs1, rdv, regs)) return -1; break; case 15: /* sdiv */ #ifdef DEBUG_MULDIV printk ("signed muldiv: 0x%x%08x / 0x%x = ", regs->y, rs1, rs2); #endif if (!rs2) { #ifdef DEBUG_MULDIV printk ("DIVISION BY ZERO\n"); #endif handle_hw_divzero (regs, pc, regs->npc, regs->psr); return 0; } __asm__ __volatile__ ("\n\t" "mov %2, %%o0\n\t" "mov %0, %%o1\n\t" "mov %%g0, %%o2\n\t" "call __divdi3\n\t" " mov %1, %%o3\n\t" "mov %%o1, %0\n\t" "mov %%o0, %1\n\t" : "=r" (rs1), "=r" (rs2) : "r" (regs->y), "0" (rs1), "1" (rs2) : "o0", "o1", "o2", "o3", "o4", "o5", "o7", "g1", "g2", "g3", "cc"); #ifdef DEBUG_MULDIV printk ("0x%x\n", rs1); #endif if (store_reg(rs1, rdv, regs)) return -1; break; } if (is_foocc (insn)) { regs->psr &= ~PSR_ICC; if ((inst & 0xe) == 14) { /* ?div */ if (rs2) regs->psr |= PSR_V; } if (!rs1) regs->psr |= PSR_Z; if (((int)rs1) < 0) regs->psr |= PSR_N; #ifdef DEBUG_MULDIV printk ("psr muldiv: %08x\n", regs->psr); #endif } advance(regs); return 0; }
gpl-2.0
nbr11/android_kernel_lge_hammerhead
net/netfilter/ipvs/ip_vs_proto.c
4771
8990
/* * ip_vs_proto.c: transport protocol load balancing support for IPVS * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * Julian Anastasov <ja@ssi.bg> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Changes: * */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/gfp.h> #include <linux/in.h> #include <linux/ip.h> #include <net/protocol.h> #include <net/tcp.h> #include <net/udp.h> #include <linux/stat.h> #include <linux/proc_fs.h> #include <net/ip_vs.h> /* * IPVS protocols can only be registered/unregistered when the ipvs * module is loaded/unloaded, so no lock is needed in accessing the * ipvs protocol table. */ #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */ #define IP_VS_PROTO_HASH(proto) ((proto) & (IP_VS_PROTO_TAB_SIZE-1)) static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE]; /* * register an ipvs protocol */ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp) { unsigned hash = IP_VS_PROTO_HASH(pp->protocol); pp->next = ip_vs_proto_table[hash]; ip_vs_proto_table[hash] = pp; if (pp->init != NULL) pp->init(pp); return 0; } /* * register an ipvs protocols netns related data */ static int register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp) { struct netns_ipvs *ipvs = net_ipvs(net); unsigned hash = IP_VS_PROTO_HASH(pp->protocol); struct ip_vs_proto_data *pd = kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC); if (!pd) return -ENOMEM; pd->pp = pp; /* For speed issues */ pd->next = ipvs->proto_data_table[hash]; ipvs->proto_data_table[hash] = pd; atomic_set(&pd->appcnt, 0); /* Init app counter */ if (pp->init_netns != NULL) { int ret = pp->init_netns(net, pd); if (ret) { /* unlink an free proto data */ ipvs->proto_data_table[hash] = pd->next; kfree(pd); return ret; } } return 0; } /* * unregister an ipvs protocol */ static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp) { struct ip_vs_protocol **pp_p; unsigned hash = IP_VS_PROTO_HASH(pp->protocol); pp_p = &ip_vs_proto_table[hash]; for (; *pp_p; pp_p = &(*pp_p)->next) { if (*pp_p == pp) { *pp_p = pp->next; if (pp->exit != NULL) pp->exit(pp); return 0; } } return -ESRCH; } /* * unregister an ipvs protocols netns data */ static int unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd) { struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_proto_data **pd_p; unsigned hash = IP_VS_PROTO_HASH(pd->pp->protocol); pd_p = &ipvs->proto_data_table[hash]; for (; *pd_p; pd_p = &(*pd_p)->next) { if (*pd_p == pd) { *pd_p = pd->next; if (pd->pp->exit_netns != NULL) pd->pp->exit_netns(net, pd); kfree(pd); return 0; } } return -ESRCH; } /* * get ip_vs_protocol object by its proto. */ struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto) { struct ip_vs_protocol *pp; unsigned hash = IP_VS_PROTO_HASH(proto); for (pp = ip_vs_proto_table[hash]; pp; pp = pp->next) { if (pp->protocol == proto) return pp; } return NULL; } EXPORT_SYMBOL(ip_vs_proto_get); /* * get ip_vs_protocol object data by netns and proto */ struct ip_vs_proto_data * __ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto) { struct ip_vs_proto_data *pd; unsigned hash = IP_VS_PROTO_HASH(proto); for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) { if (pd->pp->protocol == proto) return pd; } return NULL; } struct ip_vs_proto_data * ip_vs_proto_data_get(struct net *net, unsigned short proto) { struct netns_ipvs *ipvs = net_ipvs(net); return __ipvs_proto_data_get(ipvs, proto); } EXPORT_SYMBOL(ip_vs_proto_data_get); /* * Propagate event for state change to all protocols */ void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags) { struct ip_vs_proto_data *pd; int i; for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { for (pd = ipvs->proto_data_table[i]; pd; pd = pd->next) { if (pd->pp->timeout_change) pd->pp->timeout_change(pd, flags); } } } int * ip_vs_create_timeout_table(int *table, int size) { return kmemdup(table, size, GFP_ATOMIC); } /* * Set timeout value for state specified by name */ int ip_vs_set_state_timeout(int *table, int num, const char *const *names, const char *name, int to) { int i; if (!table || !name || !to) return -EINVAL; for (i = 0; i < num; i++) { if (strcmp(names[i], name)) continue; table[i] = to * HZ; return 0; } return -ENOENT; } const char * ip_vs_state_name(__u16 proto, int state) { struct ip_vs_protocol *pp = ip_vs_proto_get(proto); if (pp == NULL || pp->state_name == NULL) return (IPPROTO_IP == proto) ? "NONE" : "ERR!"; return pp->state_name(state); } static void ip_vs_tcpudp_debug_packet_v4(struct ip_vs_protocol *pp, const struct sk_buff *skb, int offset, const char *msg) { char buf[128]; struct iphdr _iph, *ih; ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); if (ih == NULL) sprintf(buf, "TRUNCATED"); else if (ih->frag_off & htons(IP_OFFSET)) sprintf(buf, "%pI4->%pI4 frag", &ih->saddr, &ih->daddr); else { __be16 _ports[2], *pptr; pptr = skb_header_pointer(skb, offset + ih->ihl*4, sizeof(_ports), _ports); if (pptr == NULL) sprintf(buf, "TRUNCATED %pI4->%pI4", &ih->saddr, &ih->daddr); else sprintf(buf, "%pI4:%u->%pI4:%u", &ih->saddr, ntohs(pptr[0]), &ih->daddr, ntohs(pptr[1])); } pr_debug("%s: %s %s\n", msg, pp->name, buf); } #ifdef CONFIG_IP_VS_IPV6 static void ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp, const struct sk_buff *skb, int offset, const char *msg) { char buf[192]; struct ipv6hdr _iph, *ih; ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); if (ih == NULL) sprintf(buf, "TRUNCATED"); else if (ih->nexthdr == IPPROTO_FRAGMENT) sprintf(buf, "%pI6->%pI6 frag", &ih->saddr, &ih->daddr); else { __be16 _ports[2], *pptr; pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr), sizeof(_ports), _ports); if (pptr == NULL) sprintf(buf, "TRUNCATED %pI6->%pI6", &ih->saddr, &ih->daddr); else sprintf(buf, "%pI6:%u->%pI6:%u", &ih->saddr, ntohs(pptr[0]), &ih->daddr, ntohs(pptr[1])); } pr_debug("%s: %s %s\n", msg, pp->name, buf); } #endif void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, const struct sk_buff *skb, int offset, const char *msg) { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) ip_vs_tcpudp_debug_packet_v6(pp, skb, offset, msg); else #endif ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg); } /* * per network name-space init */ int __net_init ip_vs_protocol_net_init(struct net *net) { int i, ret; static struct ip_vs_protocol *protos[] = { #ifdef CONFIG_IP_VS_PROTO_TCP &ip_vs_protocol_tcp, #endif #ifdef CONFIG_IP_VS_PROTO_UDP &ip_vs_protocol_udp, #endif #ifdef CONFIG_IP_VS_PROTO_SCTP &ip_vs_protocol_sctp, #endif #ifdef CONFIG_IP_VS_PROTO_AH &ip_vs_protocol_ah, #endif #ifdef CONFIG_IP_VS_PROTO_ESP &ip_vs_protocol_esp, #endif }; for (i = 0; i < ARRAY_SIZE(protos); i++) { ret = register_ip_vs_proto_netns(net, protos[i]); if (ret < 0) goto cleanup; } return 0; cleanup: ip_vs_protocol_net_cleanup(net); return ret; } void __net_exit ip_vs_protocol_net_cleanup(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_proto_data *pd; int i; /* unregister all the ipvs proto data for this netns */ for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { while ((pd = ipvs->proto_data_table[i]) != NULL) unregister_ip_vs_proto_netns(net, pd); } } int __init ip_vs_protocol_init(void) { char protocols[64]; #define REGISTER_PROTOCOL(p) \ do { \ register_ip_vs_protocol(p); \ strcat(protocols, ", "); \ strcat(protocols, (p)->name); \ } while (0) protocols[0] = '\0'; protocols[2] = '\0'; #ifdef CONFIG_IP_VS_PROTO_TCP REGISTER_PROTOCOL(&ip_vs_protocol_tcp); #endif #ifdef CONFIG_IP_VS_PROTO_UDP REGISTER_PROTOCOL(&ip_vs_protocol_udp); #endif #ifdef CONFIG_IP_VS_PROTO_SCTP REGISTER_PROTOCOL(&ip_vs_protocol_sctp); #endif #ifdef CONFIG_IP_VS_PROTO_AH REGISTER_PROTOCOL(&ip_vs_protocol_ah); #endif #ifdef CONFIG_IP_VS_PROTO_ESP REGISTER_PROTOCOL(&ip_vs_protocol_esp); #endif pr_info("Registered protocols (%s)\n", &protocols[2]); return 0; } void ip_vs_protocol_cleanup(void) { struct ip_vs_protocol *pp; int i; /* unregister all the ipvs protocols */ for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { while ((pp = ip_vs_proto_table[i]) != NULL) unregister_ip_vs_protocol(pp); } }
gpl-2.0
rootbalance/Kernel_source_jfltexx
drivers/input/mouse/pxa930_trkball.c
5027
6405
/* * PXA930 track ball mouse driver * * Copyright (C) 2007 Marvell International Ltd. * 2008-02-28: Yong Yao <yaoyong@marvell.com> * initial version * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/slab.h> #include <mach/hardware.h> #include <mach/pxa930_trkball.h> /* Trackball Controller Register Definitions */ #define TBCR (0x000C) #define TBCNTR (0x0010) #define TBSBC (0x0014) #define TBCR_TBRST (1 << 1) #define TBCR_TBSB (1 << 10) #define TBCR_Y_FLT(n) (((n) & 0xf) << 6) #define TBCR_X_FLT(n) (((n) & 0xf) << 2) #define TBCNTR_YM(n) (((n) >> 24) & 0xff) #define TBCNTR_YP(n) (((n) >> 16) & 0xff) #define TBCNTR_XM(n) (((n) >> 8) & 0xff) #define TBCNTR_XP(n) ((n) & 0xff) #define TBSBC_TBSBC (0x1) struct pxa930_trkball { struct pxa930_trkball_platform_data *pdata; /* Memory Mapped Register */ struct resource *mem; void __iomem *mmio_base; struct input_dev *input; }; static irqreturn_t pxa930_trkball_interrupt(int irq, void *dev_id) { struct pxa930_trkball *trkball = dev_id; struct input_dev *input = trkball->input; int tbcntr, x, y; /* According to the spec software must read TBCNTR twice: * if the read value is the same, the reading is valid */ tbcntr = __raw_readl(trkball->mmio_base + TBCNTR); if (tbcntr == __raw_readl(trkball->mmio_base + TBCNTR)) { x = (TBCNTR_XP(tbcntr) - TBCNTR_XM(tbcntr)) / 2; y = (TBCNTR_YP(tbcntr) - TBCNTR_YM(tbcntr)) / 2; input_report_rel(input, REL_X, x); input_report_rel(input, REL_Y, y); input_sync(input); } __raw_writel(TBSBC_TBSBC, trkball->mmio_base + TBSBC); __raw_writel(0, trkball->mmio_base + TBSBC); return IRQ_HANDLED; } /* For TBCR, we need to wait for a while to make sure it has been modified. */ static int write_tbcr(struct pxa930_trkball *trkball, int v) { int i = 100; __raw_writel(v, trkball->mmio_base + TBCR); while (--i) { if (__raw_readl(trkball->mmio_base + TBCR) == v) break; msleep(1); } if (i == 0) { pr_err("%s: timed out writing TBCR(%x)!\n", __func__, v); return -ETIMEDOUT; } return 0; } static void pxa930_trkball_config(struct pxa930_trkball *trkball) { uint32_t tbcr; /* According to spec, need to write the filters of x,y to 0xf first! */ tbcr = __raw_readl(trkball->mmio_base + TBCR); write_tbcr(trkball, tbcr | TBCR_X_FLT(0xf) | TBCR_Y_FLT(0xf)); write_tbcr(trkball, TBCR_X_FLT(trkball->pdata->x_filter) | TBCR_Y_FLT(trkball->pdata->y_filter)); /* According to spec, set TBCR_TBRST first, before clearing it! */ tbcr = __raw_readl(trkball->mmio_base + TBCR); write_tbcr(trkball, tbcr | TBCR_TBRST); write_tbcr(trkball, tbcr & ~TBCR_TBRST); __raw_writel(TBSBC_TBSBC, trkball->mmio_base + TBSBC); __raw_writel(0, trkball->mmio_base + TBSBC); pr_debug("%s: final TBCR=%x!\n", __func__, __raw_readl(trkball->mmio_base + TBCR)); } static int pxa930_trkball_open(struct input_dev *dev) { struct pxa930_trkball *trkball = input_get_drvdata(dev); pxa930_trkball_config(trkball); return 0; } static void pxa930_trkball_disable(struct pxa930_trkball *trkball) { uint32_t tbcr = __raw_readl(trkball->mmio_base + TBCR); /* Held in reset, gate the 32-KHz input clock off */ write_tbcr(trkball, tbcr | TBCR_TBRST); } static void pxa930_trkball_close(struct input_dev *dev) { struct pxa930_trkball *trkball = input_get_drvdata(dev); pxa930_trkball_disable(trkball); } static int __devinit pxa930_trkball_probe(struct platform_device *pdev) { struct pxa930_trkball *trkball; struct input_dev *input; struct resource *res; int irq, error; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "failed to get trkball irq\n"); return -ENXIO; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get register memory\n"); return -ENXIO; } trkball = kzalloc(sizeof(struct pxa930_trkball), GFP_KERNEL); if (!trkball) return -ENOMEM; trkball->pdata = pdev->dev.platform_data; if (!trkball->pdata) { dev_err(&pdev->dev, "no platform data defined\n"); error = -EINVAL; goto failed; } trkball->mmio_base = ioremap_nocache(res->start, resource_size(res)); if (!trkball->mmio_base) { dev_err(&pdev->dev, "failed to ioremap registers\n"); error = -ENXIO; goto failed; } /* held the module in reset, will be enabled in open() */ pxa930_trkball_disable(trkball); error = request_irq(irq, pxa930_trkball_interrupt, 0, pdev->name, trkball); if (error) { dev_err(&pdev->dev, "failed to request irq: %d\n", error); goto failed_free_io; } platform_set_drvdata(pdev, trkball); input = input_allocate_device(); if (!input) { dev_err(&pdev->dev, "failed to allocate input device\n"); error = -ENOMEM; goto failed_free_irq; } input->name = pdev->name; input->id.bustype = BUS_HOST; input->open = pxa930_trkball_open; input->close = pxa930_trkball_close; input->dev.parent = &pdev->dev; input_set_drvdata(input, trkball); trkball->input = input; input_set_capability(input, EV_REL, REL_X); input_set_capability(input, EV_REL, REL_Y); error = input_register_device(input); if (error) { dev_err(&pdev->dev, "unable to register input device\n"); goto failed_free_input; } return 0; failed_free_input: input_free_device(input); failed_free_irq: free_irq(irq, trkball); failed_free_io: iounmap(trkball->mmio_base); failed: kfree(trkball); return error; } static int __devexit pxa930_trkball_remove(struct platform_device *pdev) { struct pxa930_trkball *trkball = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); input_unregister_device(trkball->input); free_irq(irq, trkball); iounmap(trkball->mmio_base); kfree(trkball); return 0; } static struct platform_driver pxa930_trkball_driver = { .driver = { .name = "pxa930-trkball", }, .probe = pxa930_trkball_probe, .remove = __devexit_p(pxa930_trkball_remove), }; module_platform_driver(pxa930_trkball_driver); MODULE_AUTHOR("Yong Yao <yaoyong@marvell.com>"); MODULE_DESCRIPTION("PXA930 Trackball Mouse Driver"); MODULE_LICENSE("GPL");
gpl-2.0
AMohseni76/Prime_Kernel
drivers/input/keyboard/pxa930_rotary.c
5027
4679
/* * Driver for the enhanced rotary controller on pxa930 and pxa935 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/slab.h> #include <mach/pxa930_rotary.h> #define SBCR (0x04) #define ERCR (0x0c) #define SBCR_ERSB (1 << 5) struct pxa930_rotary { struct input_dev *input_dev; void __iomem *mmio_base; int last_ercr; struct pxa930_rotary_platform_data *pdata; }; static void clear_sbcr(struct pxa930_rotary *r) { uint32_t sbcr = __raw_readl(r->mmio_base + SBCR); __raw_writel(sbcr | SBCR_ERSB, r->mmio_base + SBCR); __raw_writel(sbcr & ~SBCR_ERSB, r->mmio_base + SBCR); } static irqreturn_t rotary_irq(int irq, void *dev_id) { struct pxa930_rotary *r = dev_id; struct pxa930_rotary_platform_data *pdata = r->pdata; int ercr, delta, key; ercr = __raw_readl(r->mmio_base + ERCR) & 0xf; clear_sbcr(r); delta = ercr - r->last_ercr; if (delta == 0) return IRQ_HANDLED; r->last_ercr = ercr; if (pdata->up_key && pdata->down_key) { key = (delta > 0) ? pdata->up_key : pdata->down_key; input_report_key(r->input_dev, key, 1); input_sync(r->input_dev); input_report_key(r->input_dev, key, 0); } else input_report_rel(r->input_dev, pdata->rel_code, delta); input_sync(r->input_dev); return IRQ_HANDLED; } static int pxa930_rotary_open(struct input_dev *dev) { struct pxa930_rotary *r = input_get_drvdata(dev); clear_sbcr(r); return 0; } static void pxa930_rotary_close(struct input_dev *dev) { struct pxa930_rotary *r = input_get_drvdata(dev); clear_sbcr(r); } static int __devinit pxa930_rotary_probe(struct platform_device *pdev) { struct pxa930_rotary_platform_data *pdata = pdev->dev.platform_data; struct pxa930_rotary *r; struct input_dev *input_dev; struct resource *res; int irq; int err; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no irq for rotary controller\n"); return -ENXIO; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "no I/O memory defined\n"); return -ENXIO; } if (!pdata) { dev_err(&pdev->dev, "no platform data defined\n"); return -EINVAL; } r = kzalloc(sizeof(struct pxa930_rotary), GFP_KERNEL); if (!r) return -ENOMEM; r->mmio_base = ioremap_nocache(res->start, resource_size(res)); if (r->mmio_base == NULL) { dev_err(&pdev->dev, "failed to remap IO memory\n"); err = -ENXIO; goto failed_free; } r->pdata = pdata; platform_set_drvdata(pdev, r); /* allocate and register the input device */ input_dev = input_allocate_device(); if (!input_dev) { dev_err(&pdev->dev, "failed to allocate input device\n"); err = -ENOMEM; goto failed_free_io; } input_dev->name = pdev->name; input_dev->id.bustype = BUS_HOST; input_dev->open = pxa930_rotary_open; input_dev->close = pxa930_rotary_close; input_dev->dev.parent = &pdev->dev; if (pdata->up_key && pdata->down_key) { __set_bit(pdata->up_key, input_dev->keybit); __set_bit(pdata->down_key, input_dev->keybit); __set_bit(EV_KEY, input_dev->evbit); } else { __set_bit(pdata->rel_code, input_dev->relbit); __set_bit(EV_REL, input_dev->evbit); } r->input_dev = input_dev; input_set_drvdata(input_dev, r); err = request_irq(irq, rotary_irq, 0, "enhanced rotary", r); if (err) { dev_err(&pdev->dev, "failed to request IRQ\n"); goto failed_free_input; } err = input_register_device(input_dev); if (err) { dev_err(&pdev->dev, "failed to register input device\n"); goto failed_free_irq; } return 0; failed_free_irq: free_irq(irq, r); failed_free_input: input_free_device(input_dev); failed_free_io: iounmap(r->mmio_base); failed_free: kfree(r); return err; } static int __devexit pxa930_rotary_remove(struct platform_device *pdev) { struct pxa930_rotary *r = platform_get_drvdata(pdev); free_irq(platform_get_irq(pdev, 0), r); input_unregister_device(r->input_dev); iounmap(r->mmio_base); platform_set_drvdata(pdev, NULL); kfree(r); return 0; } static struct platform_driver pxa930_rotary_driver = { .driver = { .name = "pxa930-rotary", .owner = THIS_MODULE, }, .probe = pxa930_rotary_probe, .remove = __devexit_p(pxa930_rotary_remove), }; module_platform_driver(pxa930_rotary_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Driver for PXA93x Enhanced Rotary Controller"); MODULE_AUTHOR("Yao Yong <yaoyong@marvell.com>");
gpl-2.0
Sudokamikaze/XKernel-taoshan
drivers/media/dvb/frontends/dvb-pll.c
5027
20509
/* * descriptions + helper functions for simple dvb plls. * * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs] * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/module.h> #include <linux/dvb/frontend.h> #include <asm/types.h> #include "dvb-pll.h" struct dvb_pll_priv { /* pll number */ int nr; /* i2c details */ int pll_i2c_address; struct i2c_adapter *i2c; /* the PLL descriptor */ struct dvb_pll_desc *pll_desc; /* cached frequency/bandwidth */ u32 frequency; u32 bandwidth; }; #define DVB_PLL_MAX 64 static unsigned int dvb_pll_devcount; static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable verbose debug messages"); static unsigned int id[DVB_PLL_MAX] = { [ 0 ... (DVB_PLL_MAX-1) ] = DVB_PLL_UNDEFINED }; module_param_array(id, int, NULL, 0644); MODULE_PARM_DESC(id, "force pll id to use (DEBUG ONLY)"); /* ----------------------------------------------------------- */ struct dvb_pll_desc { char *name; u32 min; u32 max; u32 iffreq; void (*set)(struct dvb_frontend *fe, u8 *buf); u8 *initdata; u8 *initdata2; u8 *sleepdata; int count; struct { u32 limit; u32 stepsize; u8 config; u8 cb; } entries[12]; }; /* ----------------------------------------------------------- */ /* descriptions */ static struct dvb_pll_desc dvb_pll_thomson_dtt7579 = { .name = "Thomson dtt7579", .min = 177000000, .max = 858000000, .iffreq= 36166667, .sleepdata = (u8[]){ 2, 0xb4, 0x03 }, .count = 4, .entries = { { 443250000, 166667, 0xb4, 0x02 }, { 542000000, 166667, 0xb4, 0x08 }, { 771000000, 166667, 0xbc, 0x08 }, { 999999999, 166667, 0xf4, 0x08 }, }, }; static void thomson_dtt759x_bw(struct dvb_frontend *fe, u8 *buf) { u32 bw = fe->dtv_property_cache.bandwidth_hz; if (bw == 7000000) buf[3] |= 0x10; } static struct dvb_pll_desc dvb_pll_thomson_dtt759x = { .name = "Thomson dtt759x", .min = 177000000, .max = 896000000, .set = thomson_dtt759x_bw, .iffreq= 36166667, .sleepdata = (u8[]){ 2, 0x84, 0x03 }, .count = 5, .entries = { { 264000000, 166667, 0xb4, 0x02 }, { 470000000, 166667, 0xbc, 0x02 }, { 735000000, 166667, 0xbc, 0x08 }, { 835000000, 166667, 0xf4, 0x08 }, { 999999999, 166667, 0xfc, 0x08 }, }, }; static struct dvb_pll_desc dvb_pll_lg_z201 = { .name = "LG z201", .min = 174000000, .max = 862000000, .iffreq= 36166667, .sleepdata = (u8[]){ 2, 0xbc, 0x03 }, .count = 5, .entries = { { 157500000, 166667, 0xbc, 0x01 }, { 443250000, 166667, 0xbc, 0x02 }, { 542000000, 166667, 0xbc, 0x04 }, { 830000000, 166667, 0xf4, 0x04 }, { 999999999, 166667, 0xfc, 0x04 }, }, }; static struct dvb_pll_desc dvb_pll_unknown_1 = { .name = "unknown 1", /* used by dntv live dvb-t */ .min = 174000000, .max = 862000000, .iffreq= 36166667, .count = 9, .entries = { { 150000000, 166667, 0xb4, 0x01 }, { 173000000, 166667, 0xbc, 0x01 }, { 250000000, 166667, 0xb4, 0x02 }, { 400000000, 166667, 0xbc, 0x02 }, { 420000000, 166667, 0xf4, 0x02 }, { 470000000, 166667, 0xfc, 0x02 }, { 600000000, 166667, 0xbc, 0x08 }, { 730000000, 166667, 0xf4, 0x08 }, { 999999999, 166667, 0xfc, 0x08 }, }, }; /* Infineon TUA6010XS * used in Thomson Cable Tuner */ static struct dvb_pll_desc dvb_pll_tua6010xs = { .name = "Infineon TUA6010XS", .min = 44250000, .max = 858000000, .iffreq= 36125000, .count = 3, .entries = { { 115750000, 62500, 0x8e, 0x03 }, { 403250000, 62500, 0x8e, 0x06 }, { 999999999, 62500, 0x8e, 0x85 }, }, }; /* Panasonic env57h1xd5 (some Philips PLL ?) */ static struct dvb_pll_desc dvb_pll_env57h1xd5 = { .name = "Panasonic ENV57H1XD5", .min = 44250000, .max = 858000000, .iffreq= 36125000, .count = 4, .entries = { { 153000000, 166667, 0xc2, 0x41 }, { 470000000, 166667, 0xc2, 0x42 }, { 526000000, 166667, 0xc2, 0x84 }, { 999999999, 166667, 0xc2, 0xa4 }, }, }; /* Philips TDA6650/TDA6651 * used in Panasonic ENV77H11D5 */ static void tda665x_bw(struct dvb_frontend *fe, u8 *buf) { u32 bw = fe->dtv_property_cache.bandwidth_hz; if (bw == 8000000) buf[3] |= 0x08; } static struct dvb_pll_desc dvb_pll_tda665x = { .name = "Philips TDA6650/TDA6651", .min = 44250000, .max = 858000000, .set = tda665x_bw, .iffreq= 36166667, .initdata = (u8[]){ 4, 0x0b, 0xf5, 0x85, 0xab }, .count = 12, .entries = { { 93834000, 166667, 0xca, 0x61 /* 011 0 0 0 01 */ }, { 123834000, 166667, 0xca, 0xa1 /* 101 0 0 0 01 */ }, { 161000000, 166667, 0xca, 0xa1 /* 101 0 0 0 01 */ }, { 163834000, 166667, 0xca, 0xc2 /* 110 0 0 0 10 */ }, { 253834000, 166667, 0xca, 0x62 /* 011 0 0 0 10 */ }, { 383834000, 166667, 0xca, 0xa2 /* 101 0 0 0 10 */ }, { 443834000, 166667, 0xca, 0xc2 /* 110 0 0 0 10 */ }, { 444000000, 166667, 0xca, 0xc4 /* 110 0 0 1 00 */ }, { 583834000, 166667, 0xca, 0x64 /* 011 0 0 1 00 */ }, { 793834000, 166667, 0xca, 0xa4 /* 101 0 0 1 00 */ }, { 444834000, 166667, 0xca, 0xc4 /* 110 0 0 1 00 */ }, { 861000000, 166667, 0xca, 0xe4 /* 111 0 0 1 00 */ }, } }; /* Infineon TUA6034 * used in LG TDTP E102P */ static void tua6034_bw(struct dvb_frontend *fe, u8 *buf) { u32 bw = fe->dtv_property_cache.bandwidth_hz; if (bw == 7000000) buf[3] |= 0x08; } static struct dvb_pll_desc dvb_pll_tua6034 = { .name = "Infineon TUA6034", .min = 44250000, .max = 858000000, .iffreq= 36166667, .count = 3, .set = tua6034_bw, .entries = { { 174500000, 62500, 0xce, 0x01 }, { 230000000, 62500, 0xce, 0x02 }, { 999999999, 62500, 0xce, 0x04 }, }, }; /* ALPS TDED4 * used in Nebula-Cards and USB boxes */ static void tded4_bw(struct dvb_frontend *fe, u8 *buf) { u32 bw = fe->dtv_property_cache.bandwidth_hz; if (bw == 8000000) buf[3] |= 0x04; } static struct dvb_pll_desc dvb_pll_tded4 = { .name = "ALPS TDED4", .min = 47000000, .max = 863000000, .iffreq= 36166667, .set = tded4_bw, .count = 4, .entries = { { 153000000, 166667, 0x85, 0x01 }, { 470000000, 166667, 0x85, 0x02 }, { 823000000, 166667, 0x85, 0x08 }, { 999999999, 166667, 0x85, 0x88 }, } }; /* ALPS TDHU2 * used in AverTVHD MCE A180 */ static struct dvb_pll_desc dvb_pll_tdhu2 = { .name = "ALPS TDHU2", .min = 54000000, .max = 864000000, .iffreq= 44000000, .count = 4, .entries = { { 162000000, 62500, 0x85, 0x01 }, { 426000000, 62500, 0x85, 0x02 }, { 782000000, 62500, 0x85, 0x08 }, { 999999999, 62500, 0x85, 0x88 }, } }; /* Samsung TBMV30111IN / TBMV30712IN1 * used in Air2PC ATSC - 2nd generation (nxt2002) */ static struct dvb_pll_desc dvb_pll_samsung_tbmv = { .name = "Samsung TBMV30111IN / TBMV30712IN1", .min = 54000000, .max = 860000000, .iffreq= 44000000, .count = 6, .entries = { { 172000000, 166667, 0xb4, 0x01 }, { 214000000, 166667, 0xb4, 0x02 }, { 467000000, 166667, 0xbc, 0x02 }, { 721000000, 166667, 0xbc, 0x08 }, { 841000000, 166667, 0xf4, 0x08 }, { 999999999, 166667, 0xfc, 0x02 }, } }; /* * Philips SD1878 Tuner. */ static struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = { .name = "Philips SD1878", .min = 950000, .max = 2150000, .iffreq= 249, /* zero-IF, offset 249 is to round up */ .count = 4, .entries = { { 1250000, 500, 0xc4, 0x00}, { 1450000, 500, 0xc4, 0x40}, { 2050000, 500, 0xc4, 0x80}, { 2150000, 500, 0xc4, 0xc0}, }, }; static void opera1_bw(struct dvb_frontend *fe, u8 *buf) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_pll_priv *priv = fe->tuner_priv; u32 b_w = (c->symbol_rate * 27) / 32000; struct i2c_msg msg = { .addr = priv->pll_i2c_address, .flags = 0, .buf = buf, .len = 4 }; int result; u8 lpf; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); result = i2c_transfer(priv->i2c, &msg, 1); if (result != 1) printk(KERN_ERR "%s: i2c_transfer failed:%d", __func__, result); if (b_w <= 10000) lpf = 0xc; else if (b_w <= 12000) lpf = 0x2; else if (b_w <= 14000) lpf = 0xa; else if (b_w <= 16000) lpf = 0x6; else if (b_w <= 18000) lpf = 0xe; else if (b_w <= 20000) lpf = 0x1; else if (b_w <= 22000) lpf = 0x9; else if (b_w <= 24000) lpf = 0x5; else if (b_w <= 26000) lpf = 0xd; else if (b_w <= 28000) lpf = 0x3; else lpf = 0xb; buf[2] ^= 0x1c; /* Flip bits 3-5 */ /* Set lpf */ buf[2] |= ((lpf >> 2) & 0x3) << 3; buf[3] |= (lpf & 0x3) << 2; return; } static struct dvb_pll_desc dvb_pll_opera1 = { .name = "Opera Tuner", .min = 900000, .max = 2250000, .initdata = (u8[]){ 4, 0x08, 0xe5, 0xe1, 0x00 }, .initdata2 = (u8[]){ 4, 0x08, 0xe5, 0xe5, 0x00 }, .iffreq= 0, .set = opera1_bw, .count = 8, .entries = { { 1064000, 500, 0xf9, 0xc2 }, { 1169000, 500, 0xf9, 0xe2 }, { 1299000, 500, 0xf9, 0x20 }, { 1444000, 500, 0xf9, 0x40 }, { 1606000, 500, 0xf9, 0x60 }, { 1777000, 500, 0xf9, 0x80 }, { 1941000, 500, 0xf9, 0xa0 }, { 2250000, 500, 0xf9, 0xc0 }, } }; static void samsung_dtos403ih102a_set(struct dvb_frontend *fe, u8 *buf) { struct dvb_pll_priv *priv = fe->tuner_priv; struct i2c_msg msg = { .addr = priv->pll_i2c_address, .flags = 0, .buf = buf, .len = 4 }; int result; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); result = i2c_transfer(priv->i2c, &msg, 1); if (result != 1) printk(KERN_ERR "%s: i2c_transfer failed:%d", __func__, result); buf[2] = 0x9e; buf[3] = 0x90; return; } /* unknown pll used in Samsung DTOS403IH102A DVB-C tuner */ static struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = { .name = "Samsung DTOS403IH102A", .min = 44250000, .max = 858000000, .iffreq = 36125000, .count = 8, .set = samsung_dtos403ih102a_set, .entries = { { 135000000, 62500, 0xbe, 0x01 }, { 177000000, 62500, 0xf6, 0x01 }, { 370000000, 62500, 0xbe, 0x02 }, { 450000000, 62500, 0xf6, 0x02 }, { 466000000, 62500, 0xfe, 0x02 }, { 538000000, 62500, 0xbe, 0x08 }, { 826000000, 62500, 0xf6, 0x08 }, { 999999999, 62500, 0xfe, 0x08 }, } }; /* Samsung TDTC9251DH0 DVB-T NIM, as used on AirStar 2 */ static struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = { .name = "Samsung TDTC9251DH0", .min = 48000000, .max = 863000000, .iffreq = 36166667, .count = 3, .entries = { { 157500000, 166667, 0xcc, 0x09 }, { 443000000, 166667, 0xcc, 0x0a }, { 863000000, 166667, 0xcc, 0x08 }, } }; /* Samsung TBDU18132 DVB-S NIM with TSA5059 PLL, used in SkyStar2 DVB-S 2.3 */ static struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = { .name = "Samsung TBDU18132", .min = 950000, .max = 2150000, /* guesses */ .iffreq = 0, .count = 2, .entries = { { 1550000, 125, 0x84, 0x82 }, { 4095937, 125, 0x84, 0x80 }, } /* TSA5059 PLL has a 17 bit divisor rather than the 15 bits supported * by this driver. The two extra bits are 0x60 in the third byte. 15 * bits is enough for over 4 GHz, which is enough to cover the range * of this tuner. We could use the additional divisor bits by adding * more entries, e.g. { 0x0ffff * 125 + 125/2, 125, 0x84 | 0x20, }, { 0x17fff * 125 + 125/2, 125, 0x84 | 0x40, }, { 0x1ffff * 125 + 125/2, 125, 0x84 | 0x60, }, */ }; /* Samsung TBMU24112 DVB-S NIM with SL1935 zero-IF tuner */ static struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = { .name = "Samsung TBMU24112", .min = 950000, .max = 2150000, /* guesses */ .iffreq = 0, .count = 2, .entries = { { 1500000, 125, 0x84, 0x18 }, { 9999999, 125, 0x84, 0x08 }, } }; /* Alps TDEE4 DVB-C NIM, used on Cablestar 2 */ /* byte 4 : 1 * * AGD R3 R2 R1 R0 * byte 5 : C1 * RE RTS BS4 BS3 BS2 BS1 * AGD = 1, R3 R2 R1 R0 = 0 1 0 1 => byte 4 = 1**10101 = 0x95 * Range(MHz) C1 * RE RTS BS4 BS3 BS2 BS1 Byte 5 * 47 - 153 0 * 0 0 0 0 0 1 0x01 * 153 - 430 0 * 0 0 0 0 1 0 0x02 * 430 - 822 0 * 0 0 1 0 0 0 0x08 * 822 - 862 1 * 0 0 1 0 0 0 0x88 */ static struct dvb_pll_desc dvb_pll_alps_tdee4 = { .name = "ALPS TDEE4", .min = 47000000, .max = 862000000, .iffreq = 36125000, .count = 4, .entries = { { 153000000, 62500, 0x95, 0x01 }, { 430000000, 62500, 0x95, 0x02 }, { 822000000, 62500, 0x95, 0x08 }, { 999999999, 62500, 0x95, 0x88 }, } }; /* ----------------------------------------------------------- */ static struct dvb_pll_desc *pll_list[] = { [DVB_PLL_UNDEFINED] = NULL, [DVB_PLL_THOMSON_DTT7579] = &dvb_pll_thomson_dtt7579, [DVB_PLL_THOMSON_DTT759X] = &dvb_pll_thomson_dtt759x, [DVB_PLL_LG_Z201] = &dvb_pll_lg_z201, [DVB_PLL_UNKNOWN_1] = &dvb_pll_unknown_1, [DVB_PLL_TUA6010XS] = &dvb_pll_tua6010xs, [DVB_PLL_ENV57H1XD5] = &dvb_pll_env57h1xd5, [DVB_PLL_TUA6034] = &dvb_pll_tua6034, [DVB_PLL_TDA665X] = &dvb_pll_tda665x, [DVB_PLL_TDED4] = &dvb_pll_tded4, [DVB_PLL_TDEE4] = &dvb_pll_alps_tdee4, [DVB_PLL_TDHU2] = &dvb_pll_tdhu2, [DVB_PLL_SAMSUNG_TBMV] = &dvb_pll_samsung_tbmv, [DVB_PLL_PHILIPS_SD1878_TDA8261] = &dvb_pll_philips_sd1878_tda8261, [DVB_PLL_OPERA1] = &dvb_pll_opera1, [DVB_PLL_SAMSUNG_DTOS403IH102A] = &dvb_pll_samsung_dtos403ih102a, [DVB_PLL_SAMSUNG_TDTC9251DH0] = &dvb_pll_samsung_tdtc9251dh0, [DVB_PLL_SAMSUNG_TBDU18132] = &dvb_pll_samsung_tbdu18132, [DVB_PLL_SAMSUNG_TBMU24112] = &dvb_pll_samsung_tbmu24112, }; /* ----------------------------------------------------------- */ /* code */ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf, const u32 frequency) { struct dvb_pll_priv *priv = fe->tuner_priv; struct dvb_pll_desc *desc = priv->pll_desc; u32 div; int i; if (frequency && (frequency < desc->min || frequency > desc->max)) return -EINVAL; for (i = 0; i < desc->count; i++) { if (frequency > desc->entries[i].limit) continue; break; } if (debug) printk("pll: %s: freq=%d | i=%d/%d\n", desc->name, frequency, i, desc->count); if (i == desc->count) return -EINVAL; div = (frequency + desc->iffreq + desc->entries[i].stepsize/2) / desc->entries[i].stepsize; buf[0] = div >> 8; buf[1] = div & 0xff; buf[2] = desc->entries[i].config; buf[3] = desc->entries[i].cb; if (desc->set) desc->set(fe, buf); if (debug) printk("pll: %s: div=%d | buf=0x%02x,0x%02x,0x%02x,0x%02x\n", desc->name, div, buf[0], buf[1], buf[2], buf[3]); // calculate the frequency we set it to return (div * desc->entries[i].stepsize) - desc->iffreq; } static int dvb_pll_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } static int dvb_pll_sleep(struct dvb_frontend *fe) { struct dvb_pll_priv *priv = fe->tuner_priv; if (priv->i2c == NULL) return -EINVAL; if (priv->pll_desc->sleepdata) { struct i2c_msg msg = { .flags = 0, .addr = priv->pll_i2c_address, .buf = priv->pll_desc->sleepdata + 1, .len = priv->pll_desc->sleepdata[0] }; int result; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if ((result = i2c_transfer(priv->i2c, &msg, 1)) != 1) { return result; } return 0; } /* Shouldn't be called when initdata is NULL, maybe BUG()? */ return -EINVAL; } static int dvb_pll_set_params(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_pll_priv *priv = fe->tuner_priv; u8 buf[4]; struct i2c_msg msg = { .addr = priv->pll_i2c_address, .flags = 0, .buf = buf, .len = sizeof(buf) }; int result; u32 frequency = 0; if (priv->i2c == NULL) return -EINVAL; result = dvb_pll_configure(fe, buf, c->frequency); if (result < 0) return result; else frequency = result; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if ((result = i2c_transfer(priv->i2c, &msg, 1)) != 1) { return result; } priv->frequency = frequency; priv->bandwidth = c->bandwidth_hz; return 0; } static int dvb_pll_calc_regs(struct dvb_frontend *fe, u8 *buf, int buf_len) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_pll_priv *priv = fe->tuner_priv; int result; u32 frequency = 0; if (buf_len < 5) return -EINVAL; result = dvb_pll_configure(fe, buf + 1, c->frequency); if (result < 0) return result; else frequency = result; buf[0] = priv->pll_i2c_address; priv->frequency = frequency; priv->bandwidth = c->bandwidth_hz; return 5; } static int dvb_pll_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct dvb_pll_priv *priv = fe->tuner_priv; *frequency = priv->frequency; return 0; } static int dvb_pll_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth) { struct dvb_pll_priv *priv = fe->tuner_priv; *bandwidth = priv->bandwidth; return 0; } static int dvb_pll_init(struct dvb_frontend *fe) { struct dvb_pll_priv *priv = fe->tuner_priv; if (priv->i2c == NULL) return -EINVAL; if (priv->pll_desc->initdata) { struct i2c_msg msg = { .flags = 0, .addr = priv->pll_i2c_address, .buf = priv->pll_desc->initdata + 1, .len = priv->pll_desc->initdata[0] }; int result; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); result = i2c_transfer(priv->i2c, &msg, 1); if (result != 1) return result; if (priv->pll_desc->initdata2) { msg.buf = priv->pll_desc->initdata2 + 1; msg.len = priv->pll_desc->initdata2[0]; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); result = i2c_transfer(priv->i2c, &msg, 1); if (result != 1) return result; } return 0; } /* Shouldn't be called when initdata is NULL, maybe BUG()? */ return -EINVAL; } static struct dvb_tuner_ops dvb_pll_tuner_ops = { .release = dvb_pll_release, .sleep = dvb_pll_sleep, .init = dvb_pll_init, .set_params = dvb_pll_set_params, .calc_regs = dvb_pll_calc_regs, .get_frequency = dvb_pll_get_frequency, .get_bandwidth = dvb_pll_get_bandwidth, }; struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, struct i2c_adapter *i2c, unsigned int pll_desc_id) { u8 b1 [] = { 0 }; struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, .buf = b1, .len = 1 }; struct dvb_pll_priv *priv = NULL; int ret; struct dvb_pll_desc *desc; if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) && (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list))) pll_desc_id = id[dvb_pll_devcount]; BUG_ON(pll_desc_id < 1 || pll_desc_id >= ARRAY_SIZE(pll_list)); desc = pll_list[pll_desc_id]; if (i2c != NULL) { if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); ret = i2c_transfer (i2c, &msg, 1); if (ret != 1) return NULL; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL); if (priv == NULL) return NULL; priv->pll_i2c_address = pll_addr; priv->i2c = i2c; priv->pll_desc = desc; priv->nr = dvb_pll_devcount++; memcpy(&fe->ops.tuner_ops, &dvb_pll_tuner_ops, sizeof(struct dvb_tuner_ops)); strncpy(fe->ops.tuner_ops.info.name, desc->name, sizeof(fe->ops.tuner_ops.info.name)); fe->ops.tuner_ops.info.frequency_min = desc->min; fe->ops.tuner_ops.info.frequency_max = desc->max; if (!desc->initdata) fe->ops.tuner_ops.init = NULL; if (!desc->sleepdata) fe->ops.tuner_ops.sleep = NULL; fe->tuner_priv = priv; if ((debug) || (id[priv->nr] == pll_desc_id)) { printk("dvb-pll[%d]", priv->nr); if (i2c != NULL) printk(" %d-%04x", i2c_adapter_id(i2c), pll_addr); printk(": id# %d (%s) attached, %s\n", pll_desc_id, desc->name, id[priv->nr] == pll_desc_id ? "insmod option" : "autodetected"); } return fe; } EXPORT_SYMBOL(dvb_pll_attach); MODULE_DESCRIPTION("dvb pll library"); MODULE_AUTHOR("Gerd Knorr"); MODULE_LICENSE("GPL");
gpl-2.0
h8rift/android_kernel_htc_apq8064
arch/powerpc/platforms/cell/axon_msi.c
7331
11916
/* * Copyright 2007, Michael Ellerman, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/msi.h> #include <linux/export.h> #include <linux/of_platform.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <asm/dcr.h> #include <asm/machdep.h> #include <asm/prom.h> /* * MSIC registers, specified as offsets from dcr_base */ #define MSIC_CTRL_REG 0x0 /* Base Address registers specify FIFO location in BE memory */ #define MSIC_BASE_ADDR_HI_REG 0x3 #define MSIC_BASE_ADDR_LO_REG 0x4 /* Hold the read/write offsets into the FIFO */ #define MSIC_READ_OFFSET_REG 0x5 #define MSIC_WRITE_OFFSET_REG 0x6 /* MSIC control register flags */ #define MSIC_CTRL_ENABLE 0x0001 #define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002 #define MSIC_CTRL_IRQ_ENABLE 0x0008 #define MSIC_CTRL_FULL_STOP_ENABLE 0x0010 /* * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB. * Currently we're using a 64KB FIFO size. */ #define MSIC_FIFO_SIZE_SHIFT 16 #define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT) /* * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits * 8-9 of the MSIC control reg. */ #define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300) /* * We need to mask the read/write offsets to make sure they stay within * the bounds of the FIFO. Also they should always be 16-byte aligned. */ #define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu) /* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */ #define MSIC_FIFO_ENTRY_SIZE 0x10 struct axon_msic { struct irq_domain *irq_domain; __le32 *fifo_virt; dma_addr_t fifo_phys; dcr_host_t dcr_host; u32 read_offset; #ifdef DEBUG u32 __iomem *trigger; #endif }; #ifdef DEBUG void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic); #else static inline void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic) { } #endif static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) { pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n); dcr_write(msic->dcr_host, dcr_n, val); } static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct axon_msic *msic = irq_get_handler_data(irq); u32 write_offset, msi; int idx; int retry = 0; write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG); pr_devel("axon_msi: original write_offset 0x%x\n", write_offset); /* write_offset doesn't wrap properly, so we have to mask it */ write_offset &= MSIC_FIFO_SIZE_MASK; while (msic->read_offset != write_offset && retry < 100) { idx = msic->read_offset / sizeof(__le32); msi = le32_to_cpu(msic->fifo_virt[idx]); msi &= 0xFFFF; pr_devel("axon_msi: woff %x roff %x msi %x\n", write_offset, msic->read_offset, msi); if (msi < nr_irqs && irq_get_chip_data(msi) == msic) { generic_handle_irq(msi); msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); } else { /* * Reading the MSIC_WRITE_OFFSET_REG does not * reliably flush the outstanding DMA to the * FIFO buffer. Here we were reading stale * data, so we need to retry. */ udelay(1); retry++; pr_devel("axon_msi: invalid irq 0x%x!\n", msi); continue; } if (retry) { pr_devel("axon_msi: late irq 0x%x, retry %d\n", msi, retry); retry = 0; } msic->read_offset += MSIC_FIFO_ENTRY_SIZE; msic->read_offset &= MSIC_FIFO_SIZE_MASK; } if (retry) { printk(KERN_WARNING "axon_msi: irq timed out\n"); msic->read_offset += MSIC_FIFO_ENTRY_SIZE; msic->read_offset &= MSIC_FIFO_SIZE_MASK; } chip->irq_eoi(&desc->irq_data); } static struct axon_msic *find_msi_translator(struct pci_dev *dev) { struct irq_domain *irq_domain; struct device_node *dn, *tmp; const phandle *ph; struct axon_msic *msic = NULL; dn = of_node_get(pci_device_to_OF_node(dev)); if (!dn) { dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); return NULL; } for (; dn; dn = of_get_next_parent(dn)) { ph = of_get_property(dn, "msi-translator", NULL); if (ph) break; } if (!ph) { dev_dbg(&dev->dev, "axon_msi: no msi-translator property found\n"); goto out_error; } tmp = dn; dn = of_find_node_by_phandle(*ph); of_node_put(tmp); if (!dn) { dev_dbg(&dev->dev, "axon_msi: msi-translator doesn't point to a node\n"); goto out_error; } irq_domain = irq_find_host(dn); if (!irq_domain) { dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %s\n", dn->full_name); goto out_error; } msic = irq_domain->host_data; out_error: of_node_put(dn); return msic; } static int axon_msi_check_device(struct pci_dev *dev, int nvec, int type) { if (!find_msi_translator(dev)) return -ENODEV; return 0; } static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg) { struct device_node *dn; struct msi_desc *entry; int len; const u32 *prop; dn = of_node_get(pci_device_to_OF_node(dev)); if (!dn) { dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); return -ENODEV; } entry = list_first_entry(&dev->msi_list, struct msi_desc, list); for (; dn; dn = of_get_next_parent(dn)) { if (entry->msi_attrib.is_64) { prop = of_get_property(dn, "msi-address-64", &len); if (prop) break; } prop = of_get_property(dn, "msi-address-32", &len); if (prop) break; } if (!prop) { dev_dbg(&dev->dev, "axon_msi: no msi-address-(32|64) properties found\n"); return -ENOENT; } switch (len) { case 8: msg->address_hi = prop[0]; msg->address_lo = prop[1]; break; case 4: msg->address_hi = 0; msg->address_lo = prop[0]; break; default: dev_dbg(&dev->dev, "axon_msi: malformed msi-address-(32|64) property\n"); of_node_put(dn); return -EINVAL; } of_node_put(dn); return 0; } static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { unsigned int virq, rc; struct msi_desc *entry; struct msi_msg msg; struct axon_msic *msic; msic = find_msi_translator(dev); if (!msic) return -ENODEV; rc = setup_msi_msg_address(dev, &msg); if (rc) return rc; list_for_each_entry(entry, &dev->msi_list, list) { virq = irq_create_direct_mapping(msic->irq_domain); if (virq == NO_IRQ) { dev_warn(&dev->dev, "axon_msi: virq allocation failed!\n"); return -1; } dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq); irq_set_msi_desc(virq, entry); msg.data = virq; write_msi_msg(virq, &msg); } return 0; } static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) { struct msi_desc *entry; dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n"); list_for_each_entry(entry, &dev->msi_list, list) { if (entry->irq == NO_IRQ) continue; irq_set_msi_desc(entry->irq, NULL); irq_dispose_mapping(entry->irq); } } static struct irq_chip msic_irq_chip = { .irq_mask = mask_msi_irq, .irq_unmask = unmask_msi_irq, .irq_shutdown = mask_msi_irq, .name = "AXON-MSI", }; static int msic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_data(virq, h->host_data); irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); return 0; } static const struct irq_domain_ops msic_host_ops = { .map = msic_host_map, }; static void axon_msi_shutdown(struct platform_device *device) { struct axon_msic *msic = dev_get_drvdata(&device->dev); u32 tmp; pr_devel("axon_msi: disabling %s\n", msic->irq_domain->of_node->full_name); tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; msic_dcr_write(msic, MSIC_CTRL_REG, tmp); } static int axon_msi_probe(struct platform_device *device) { struct device_node *dn = device->dev.of_node; struct axon_msic *msic; unsigned int virq; int dcr_base, dcr_len; pr_devel("axon_msi: setting up dn %s\n", dn->full_name); msic = kzalloc(sizeof(struct axon_msic), GFP_KERNEL); if (!msic) { printk(KERN_ERR "axon_msi: couldn't allocate msic for %s\n", dn->full_name); goto out; } dcr_base = dcr_resource_start(dn, 0); dcr_len = dcr_resource_len(dn, 0); if (dcr_base == 0 || dcr_len == 0) { printk(KERN_ERR "axon_msi: couldn't parse dcr properties on %s\n", dn->full_name); goto out_free_msic; } msic->dcr_host = dcr_map(dn, dcr_base, dcr_len); if (!DCR_MAP_OK(msic->dcr_host)) { printk(KERN_ERR "axon_msi: dcr_map failed for %s\n", dn->full_name); goto out_free_msic; } msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, &msic->fifo_phys, GFP_KERNEL); if (!msic->fifo_virt) { printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n", dn->full_name); goto out_free_msic; } virq = irq_of_parse_and_map(dn, 0); if (virq == NO_IRQ) { printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n", dn->full_name); goto out_free_fifo; } memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */ msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic); if (!msic->irq_domain) { printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", dn->full_name); goto out_free_fifo; } irq_set_handler_data(virq, msic); irq_set_chained_handler(virq, axon_msi_cascade); pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); /* Enable the MSIC hardware */ msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32); msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG, msic->fifo_phys & 0xFFFFFFFF); msic_dcr_write(msic, MSIC_CTRL_REG, MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE | MSIC_CTRL_FIFO_SIZE); msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG) & MSIC_FIFO_SIZE_MASK; dev_set_drvdata(&device->dev, msic); ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs; ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs; ppc_md.msi_check_device = axon_msi_check_device; axon_msi_debug_setup(dn, msic); printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name); return 0; out_free_fifo: dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt, msic->fifo_phys); out_free_msic: kfree(msic); out: return -1; } static const struct of_device_id axon_msi_device_id[] = { { .compatible = "ibm,axon-msic" }, {} }; static struct platform_driver axon_msi_driver = { .probe = axon_msi_probe, .shutdown = axon_msi_shutdown, .driver = { .name = "axon-msi", .owner = THIS_MODULE, .of_match_table = axon_msi_device_id, }, }; static int __init axon_msi_init(void) { return platform_driver_register(&axon_msi_driver); } subsys_initcall(axon_msi_init); #ifdef DEBUG static int msic_set(void *data, u64 val) { struct axon_msic *msic = data; out_le32(msic->trigger, val); return 0; } static int msic_get(void *data, u64 *val) { *val = 0; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n"); void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic) { char name[8]; u64 addr; addr = of_translate_address(dn, of_get_property(dn, "reg", NULL)); if (addr == OF_BAD_ADDR) { pr_devel("axon_msi: couldn't translate reg property\n"); return; } msic->trigger = ioremap(addr, 0x4); if (!msic->trigger) { pr_devel("axon_msi: ioremap failed\n"); return; } snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn)); if (!debugfs_create_file(name, 0600, powerpc_debugfs_root, msic, &fops_msic)) { pr_devel("axon_msi: debugfs_create_file failed!\n"); return; } } #endif /* DEBUG */
gpl-2.0
TheTypoMaster/ghost
fs/sysfs/bin.c
7843
10980
/* * fs/sysfs/bin.c - sysfs binary file implementation * * Copyright (c) 2003 Patrick Mochel * Copyright (c) 2003 Matthew Wilcox * Copyright (c) 2004 Silicon Graphics, Inc. * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007 Tejun Heo <teheo@suse.de> * * This file is released under the GPLv2. * * Please see Documentation/filesystems/sysfs.txt for more information. */ #undef DEBUG #include <linux/errno.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/kobject.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/mm.h> #include <asm/uaccess.h> #include "sysfs.h" /* * There's one bin_buffer for each open file. * * filp->private_data points to bin_buffer and * sysfs_dirent->s_bin_attr.buffers points to a the bin_buffer s * sysfs_dirent->s_bin_attr.buffers is protected by sysfs_bin_lock */ static DEFINE_MUTEX(sysfs_bin_lock); struct bin_buffer { struct mutex mutex; void *buffer; int mmapped; const struct vm_operations_struct *vm_ops; struct file *file; struct hlist_node list; }; static int fill_read(struct file *file, char *buffer, loff_t off, size_t count) { struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr; struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; int rc; /* need attr_sd for attr, its parent for kobj */ if (!sysfs_get_active(attr_sd)) return -ENODEV; rc = -EIO; if (attr->read) rc = attr->read(file, kobj, attr, buffer, off, count); sysfs_put_active(attr_sd); return rc; } static ssize_t read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off) { struct bin_buffer *bb = file->private_data; int size = file->f_path.dentry->d_inode->i_size; loff_t offs = *off; int count = min_t(size_t, bytes, PAGE_SIZE); char *temp; if (!bytes) return 0; if (size) { if (offs > size) return 0; if (offs + count > size) count = size - offs; } temp = kmalloc(count, GFP_KERNEL); if (!temp) return -ENOMEM; mutex_lock(&bb->mutex); count = fill_read(file, bb->buffer, offs, count); if (count < 0) { mutex_unlock(&bb->mutex); goto out_free; } memcpy(temp, bb->buffer, count); mutex_unlock(&bb->mutex); if (copy_to_user(userbuf, temp, count)) { count = -EFAULT; goto out_free; } pr_debug("offs = %lld, *off = %lld, count = %d\n", offs, *off, count); *off = offs + count; out_free: kfree(temp); return count; } static int flush_write(struct file *file, char *buffer, loff_t offset, size_t count) { struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr; struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; int rc; /* need attr_sd for attr, its parent for kobj */ if (!sysfs_get_active(attr_sd)) return -ENODEV; rc = -EIO; if (attr->write) rc = attr->write(file, kobj, attr, buffer, offset, count); sysfs_put_active(attr_sd); return rc; } static ssize_t write(struct file *file, const char __user *userbuf, size_t bytes, loff_t *off) { struct bin_buffer *bb = file->private_data; int size = file->f_path.dentry->d_inode->i_size; loff_t offs = *off; int count = min_t(size_t, bytes, PAGE_SIZE); char *temp; if (!bytes) return 0; if (size) { if (offs > size) return 0; if (offs + count > size) count = size - offs; } temp = memdup_user(userbuf, count); if (IS_ERR(temp)) return PTR_ERR(temp); mutex_lock(&bb->mutex); memcpy(bb->buffer, temp, count); count = flush_write(file, bb->buffer, offs, count); mutex_unlock(&bb->mutex); if (count > 0) *off = offs + count; kfree(temp); return count; } static void bin_vma_open(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct bin_buffer *bb = file->private_data; struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; if (!bb->vm_ops) return; if (!sysfs_get_active(attr_sd)) return; if (bb->vm_ops->open) bb->vm_ops->open(vma); sysfs_put_active(attr_sd); } static int bin_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct file *file = vma->vm_file; struct bin_buffer *bb = file->private_data; struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; int ret; if (!bb->vm_ops) return VM_FAULT_SIGBUS; if (!sysfs_get_active(attr_sd)) return VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS; if (bb->vm_ops->fault) ret = bb->vm_ops->fault(vma, vmf); sysfs_put_active(attr_sd); return ret; } static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct file *file = vma->vm_file; struct bin_buffer *bb = file->private_data; struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; int ret; if (!bb->vm_ops) return VM_FAULT_SIGBUS; if (!sysfs_get_active(attr_sd)) return VM_FAULT_SIGBUS; ret = 0; if (bb->vm_ops->page_mkwrite) ret = bb->vm_ops->page_mkwrite(vma, vmf); sysfs_put_active(attr_sd); return ret; } static int bin_access(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) { struct file *file = vma->vm_file; struct bin_buffer *bb = file->private_data; struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; int ret; if (!bb->vm_ops) return -EINVAL; if (!sysfs_get_active(attr_sd)) return -EINVAL; ret = -EINVAL; if (bb->vm_ops->access) ret = bb->vm_ops->access(vma, addr, buf, len, write); sysfs_put_active(attr_sd); return ret; } #ifdef CONFIG_NUMA static int bin_set_policy(struct vm_area_struct *vma, struct mempolicy *new) { struct file *file = vma->vm_file; struct bin_buffer *bb = file->private_data; struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; int ret; if (!bb->vm_ops) return 0; if (!sysfs_get_active(attr_sd)) return -EINVAL; ret = 0; if (bb->vm_ops->set_policy) ret = bb->vm_ops->set_policy(vma, new); sysfs_put_active(attr_sd); return ret; } static struct mempolicy *bin_get_policy(struct vm_area_struct *vma, unsigned long addr) { struct file *file = vma->vm_file; struct bin_buffer *bb = file->private_data; struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; struct mempolicy *pol; if (!bb->vm_ops) return vma->vm_policy; if (!sysfs_get_active(attr_sd)) return vma->vm_policy; pol = vma->vm_policy; if (bb->vm_ops->get_policy) pol = bb->vm_ops->get_policy(vma, addr); sysfs_put_active(attr_sd); return pol; } static int bin_migrate(struct vm_area_struct *vma, const nodemask_t *from, const nodemask_t *to, unsigned long flags) { struct file *file = vma->vm_file; struct bin_buffer *bb = file->private_data; struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; int ret; if (!bb->vm_ops) return 0; if (!sysfs_get_active(attr_sd)) return 0; ret = 0; if (bb->vm_ops->migrate) ret = bb->vm_ops->migrate(vma, from, to, flags); sysfs_put_active(attr_sd); return ret; } #endif static const struct vm_operations_struct bin_vm_ops = { .open = bin_vma_open, .fault = bin_fault, .page_mkwrite = bin_page_mkwrite, .access = bin_access, #ifdef CONFIG_NUMA .set_policy = bin_set_policy, .get_policy = bin_get_policy, .migrate = bin_migrate, #endif }; static int mmap(struct file *file, struct vm_area_struct *vma) { struct bin_buffer *bb = file->private_data; struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr; struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; int rc; mutex_lock(&bb->mutex); /* need attr_sd for attr, its parent for kobj */ rc = -ENODEV; if (!sysfs_get_active(attr_sd)) goto out_unlock; rc = -EINVAL; if (!attr->mmap) goto out_put; rc = attr->mmap(file, kobj, attr, vma); if (rc) goto out_put; /* * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup() * to satisfy versions of X which crash if the mmap fails: that * substitutes a new vm_file, and we don't then want bin_vm_ops. */ if (vma->vm_file != file) goto out_put; rc = -EINVAL; if (bb->mmapped && bb->vm_ops != vma->vm_ops) goto out_put; /* * It is not possible to successfully wrap close. * So error if someone is trying to use close. */ rc = -EINVAL; if (vma->vm_ops && vma->vm_ops->close) goto out_put; rc = 0; bb->mmapped = 1; bb->vm_ops = vma->vm_ops; vma->vm_ops = &bin_vm_ops; out_put: sysfs_put_active(attr_sd); out_unlock: mutex_unlock(&bb->mutex); return rc; } static int open(struct inode * inode, struct file * file) { struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr; struct bin_buffer *bb = NULL; int error; /* binary file operations requires both @sd and its parent */ if (!sysfs_get_active(attr_sd)) return -ENODEV; error = -EACCES; if ((file->f_mode & FMODE_WRITE) && !(attr->write || attr->mmap)) goto err_out; if ((file->f_mode & FMODE_READ) && !(attr->read || attr->mmap)) goto err_out; error = -ENOMEM; bb = kzalloc(sizeof(*bb), GFP_KERNEL); if (!bb) goto err_out; bb->buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!bb->buffer) goto err_out; mutex_init(&bb->mutex); bb->file = file; file->private_data = bb; mutex_lock(&sysfs_bin_lock); hlist_add_head(&bb->list, &attr_sd->s_bin_attr.buffers); mutex_unlock(&sysfs_bin_lock); /* open succeeded, put active references */ sysfs_put_active(attr_sd); return 0; err_out: sysfs_put_active(attr_sd); kfree(bb); return error; } static int release(struct inode * inode, struct file * file) { struct bin_buffer *bb = file->private_data; mutex_lock(&sysfs_bin_lock); hlist_del(&bb->list); mutex_unlock(&sysfs_bin_lock); kfree(bb->buffer); kfree(bb); return 0; } const struct file_operations bin_fops = { .read = read, .write = write, .mmap = mmap, .llseek = generic_file_llseek, .open = open, .release = release, }; void unmap_bin_file(struct sysfs_dirent *attr_sd) { struct bin_buffer *bb; struct hlist_node *tmp; if (sysfs_type(attr_sd) != SYSFS_KOBJ_BIN_ATTR) return; mutex_lock(&sysfs_bin_lock); hlist_for_each_entry(bb, tmp, &attr_sd->s_bin_attr.buffers, list) { struct inode *inode = bb->file->f_path.dentry->d_inode; unmap_mapping_range(inode->i_mapping, 0, 0, 1); } mutex_unlock(&sysfs_bin_lock); } /** * sysfs_create_bin_file - create binary file for object. * @kobj: object. * @attr: attribute descriptor. */ int sysfs_create_bin_file(struct kobject *kobj, const struct bin_attribute *attr) { BUG_ON(!kobj || !kobj->sd || !attr); return sysfs_add_file(kobj->sd, &attr->attr, SYSFS_KOBJ_BIN_ATTR); } /** * sysfs_remove_bin_file - remove binary file for object. * @kobj: object. * @attr: attribute descriptor. */ void sysfs_remove_bin_file(struct kobject *kobj, const struct bin_attribute *attr) { sysfs_hash_and_remove(kobj->sd, NULL, attr->attr.name); } EXPORT_SYMBOL_GPL(sysfs_create_bin_file); EXPORT_SYMBOL_GPL(sysfs_remove_bin_file);
gpl-2.0
gabry3795/android_kernel_huawei_mt7_l09
arch/parisc/lib/memset.c
14243
2442
/* Copyright (C) 1991, 1997 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ /* Slight modifications for pa-risc linux - Paul Bame <bame@debian.org> */ #include <linux/types.h> #include <asm/string.h> #define OPSIZ (BITS_PER_LONG/8) typedef unsigned long op_t; void * memset (void *dstpp, int sc, size_t len) { unsigned int c = sc; long int dstp = (long int) dstpp; if (len >= 8) { size_t xlen; op_t cccc; cccc = (unsigned char) c; cccc |= cccc << 8; cccc |= cccc << 16; if (OPSIZ > 4) /* Do the shift in two steps to avoid warning if long has 32 bits. */ cccc |= (cccc << 16) << 16; /* There are at least some bytes to set. No need to test for LEN == 0 in this alignment loop. */ while (dstp % OPSIZ != 0) { ((unsigned char *) dstp)[0] = c; dstp += 1; len -= 1; } /* Write 8 `op_t' per iteration until less than 8 `op_t' remain. */ xlen = len / (OPSIZ * 8); while (xlen > 0) { ((op_t *) dstp)[0] = cccc; ((op_t *) dstp)[1] = cccc; ((op_t *) dstp)[2] = cccc; ((op_t *) dstp)[3] = cccc; ((op_t *) dstp)[4] = cccc; ((op_t *) dstp)[5] = cccc; ((op_t *) dstp)[6] = cccc; ((op_t *) dstp)[7] = cccc; dstp += 8 * OPSIZ; xlen -= 1; } len %= OPSIZ * 8; /* Write 1 `op_t' per iteration until less than OPSIZ bytes remain. */ xlen = len / OPSIZ; while (xlen > 0) { ((op_t *) dstp)[0] = cccc; dstp += OPSIZ; xlen -= 1; } len %= OPSIZ; } /* Write the last few bytes. */ while (len > 0) { ((unsigned char *) dstp)[0] = c; dstp += 1; len -= 1; } return dstpp; }
gpl-2.0
coredumb/linux-grsecurity
sound/soc/soc-core.c
164
120379
/* * soc-core.c -- ALSA SoC Audio Layer * * Copyright 2005 Wolfson Microelectronics PLC. * Copyright 2005 Openedhand Ltd. * Copyright (C) 2010 Slimlogic Ltd. * Copyright (C) 2010 Texas Instruments Inc. * * Author: Liam Girdwood <lrg@slimlogic.co.uk> * with code, comments and ideas from :- * Richard Purdie <richard@openedhand.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * TODO: * o Add hw rules to enforce rates, etc. * o More testing with other codecs/machines. * o Add more codecs and platforms to ensure good API coverage. * o Support TDM on PCM and I2S */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/bitops.h> #include <linux/debugfs.h> #include <linux/platform_device.h> #include <linux/pinctrl/consumer.h> #include <linux/ctype.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/gpio.h> #include <linux/of_gpio.h> #include <sound/ac97_codec.h> #include <sound/core.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dpcm.h> #include <sound/initval.h> #define CREATE_TRACE_POINTS #include <trace/events/asoc.h> #define NAME_SIZE 32 #ifdef CONFIG_DEBUG_FS struct dentry *snd_soc_debugfs_root; EXPORT_SYMBOL_GPL(snd_soc_debugfs_root); #endif static DEFINE_MUTEX(client_mutex); static LIST_HEAD(dai_list); static LIST_HEAD(platform_list); static LIST_HEAD(codec_list); static LIST_HEAD(component_list); /* * This is a timeout to do a DAPM powerdown after a stream is closed(). * It can be used to eliminate pops between different playback streams, e.g. * between two audio tracks. */ static int pmdown_time = 5000; module_param(pmdown_time, int, 0); MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)"); struct snd_ac97_reset_cfg { struct pinctrl *pctl; struct pinctrl_state *pstate_reset; struct pinctrl_state *pstate_warm_reset; struct pinctrl_state *pstate_run; int gpio_sdata; int gpio_sync; int gpio_reset; }; /* returns the minimum number of bytes needed to represent * a particular given value */ static int min_bytes_needed(unsigned long val) { int c = 0; int i; for (i = (sizeof val * 8) - 1; i >= 0; --i, ++c) if (val & (1UL << i)) break; c = (sizeof val * 8) - c; if (!c || (c % 8)) c = (c + 8) / 8; else c /= 8; return c; } /* fill buf which is 'len' bytes with a formatted * string of the form 'reg: value\n' */ static int format_register_str(struct snd_soc_codec *codec, unsigned int reg, char *buf, size_t len) { int wordsize = min_bytes_needed(codec->driver->reg_cache_size) * 2; int regsize = codec->driver->reg_word_size * 2; int ret; char tmpbuf[len + 1]; char regbuf[regsize + 1]; /* since tmpbuf is allocated on the stack, warn the callers if they * try to abuse this function */ WARN_ON(len > 63); /* +2 for ': ' and + 1 for '\n' */ if (wordsize + regsize + 2 + 1 != len) return -EINVAL; ret = snd_soc_read(codec, reg); if (ret < 0) { memset(regbuf, 'X', regsize); regbuf[regsize] = '\0'; } else { snprintf(regbuf, regsize + 1, "%.*x", regsize, ret); } /* prepare the buffer */ snprintf(tmpbuf, len + 1, "%.*x: %s\n", wordsize, reg, regbuf); /* copy it back to the caller without the '\0' */ memcpy(buf, tmpbuf, len); return 0; } /* codec register dump */ static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf, size_t count, loff_t pos) { int i, step = 1; int wordsize, regsize; int len; size_t total = 0; loff_t p = 0; wordsize = min_bytes_needed(codec->driver->reg_cache_size) * 2; regsize = codec->driver->reg_word_size * 2; len = wordsize + regsize + 2 + 1; if (!codec->driver->reg_cache_size) return 0; if (codec->driver->reg_cache_step) step = codec->driver->reg_cache_step; for (i = 0; i < codec->driver->reg_cache_size; i += step) { if (!snd_soc_codec_readable_register(codec, i)) continue; if (codec->driver->display_register) { count += codec->driver->display_register(codec, buf + count, PAGE_SIZE - count, i); } else { /* only support larger than PAGE_SIZE bytes debugfs * entries for the default case */ if (p >= pos) { if (total + len >= count - 1) break; format_register_str(codec, i, buf + total, len); total += len; } p += len; } } total = min(total, count - 1); return total; } static ssize_t codec_reg_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev); return soc_codec_reg_show(rtd->codec, buf, PAGE_SIZE, 0); } static DEVICE_ATTR(codec_reg, 0444, codec_reg_show, NULL); static ssize_t pmdown_time_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev); return sprintf(buf, "%ld\n", rtd->pmdown_time); } static ssize_t pmdown_time_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev); int ret; ret = kstrtol(buf, 10, &rtd->pmdown_time); if (ret) return ret; return count; } static DEVICE_ATTR(pmdown_time, 0644, pmdown_time_show, pmdown_time_set); #ifdef CONFIG_DEBUG_FS static ssize_t codec_reg_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { ssize_t ret; struct snd_soc_codec *codec = file->private_data; char *buf; if (*ppos < 0 || !count) return -EINVAL; buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM; ret = soc_codec_reg_show(codec, buf, count, *ppos); if (ret >= 0) { if (copy_to_user(user_buf, buf, ret)) { kfree(buf); return -EFAULT; } *ppos += ret; } kfree(buf); return ret; } static ssize_t codec_reg_write_file(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; size_t buf_size; char *start = buf; unsigned long reg, value; struct snd_soc_codec *codec = file->private_data; int ret; buf_size = min(count, (sizeof(buf)-1)); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; buf[buf_size] = 0; while (*start == ' ') start++; reg = simple_strtoul(start, &start, 16); while (*start == ' ') start++; ret = kstrtoul(start, 16, &value); if (ret) return ret; /* Userspace has been fiddling around behind the kernel's back */ add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE); snd_soc_write(codec, reg, value); return buf_size; } static const struct file_operations codec_reg_fops = { .open = simple_open, .read = codec_reg_read_file, .write = codec_reg_write_file, .llseek = default_llseek, }; static void soc_init_codec_debugfs(struct snd_soc_codec *codec) { struct dentry *debugfs_card_root = codec->card->debugfs_card_root; codec->debugfs_codec_root = debugfs_create_dir(codec->name, debugfs_card_root); if (!codec->debugfs_codec_root) { dev_warn(codec->dev, "ASoC: Failed to create codec debugfs directory\n"); return; } debugfs_create_bool("cache_sync", 0444, codec->debugfs_codec_root, &codec->cache_sync); debugfs_create_bool("cache_only", 0444, codec->debugfs_codec_root, &codec->cache_only); codec->debugfs_reg = debugfs_create_file("codec_reg", 0644, codec->debugfs_codec_root, codec, &codec_reg_fops); if (!codec->debugfs_reg) dev_warn(codec->dev, "ASoC: Failed to create codec register debugfs file\n"); snd_soc_dapm_debugfs_init(&codec->dapm, codec->debugfs_codec_root); } static void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec) { debugfs_remove_recursive(codec->debugfs_codec_root); } static void soc_init_platform_debugfs(struct snd_soc_platform *platform) { struct dentry *debugfs_card_root = platform->card->debugfs_card_root; platform->debugfs_platform_root = debugfs_create_dir(platform->name, debugfs_card_root); if (!platform->debugfs_platform_root) { dev_warn(platform->dev, "ASoC: Failed to create platform debugfs directory\n"); return; } snd_soc_dapm_debugfs_init(&platform->dapm, platform->debugfs_platform_root); } static void soc_cleanup_platform_debugfs(struct snd_soc_platform *platform) { debugfs_remove_recursive(platform->debugfs_platform_root); } static ssize_t codec_list_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); ssize_t len, ret = 0; struct snd_soc_codec *codec; if (!buf) return -ENOMEM; list_for_each_entry(codec, &codec_list, list) { len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", codec->name); if (len >= 0) ret += len; if (ret > PAGE_SIZE) { ret = PAGE_SIZE; break; } } if (ret >= 0) ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); return ret; } static const struct file_operations codec_list_fops = { .read = codec_list_read_file, .llseek = default_llseek,/* read accesses f_pos */ }; static ssize_t dai_list_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); ssize_t len, ret = 0; struct snd_soc_dai *dai; if (!buf) return -ENOMEM; list_for_each_entry(dai, &dai_list, list) { len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", dai->name); if (len >= 0) ret += len; if (ret > PAGE_SIZE) { ret = PAGE_SIZE; break; } } ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); return ret; } static const struct file_operations dai_list_fops = { .read = dai_list_read_file, .llseek = default_llseek,/* read accesses f_pos */ }; static ssize_t platform_list_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); ssize_t len, ret = 0; struct snd_soc_platform *platform; if (!buf) return -ENOMEM; list_for_each_entry(platform, &platform_list, list) { len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", platform->name); if (len >= 0) ret += len; if (ret > PAGE_SIZE) { ret = PAGE_SIZE; break; } } ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); return ret; } static const struct file_operations platform_list_fops = { .read = platform_list_read_file, .llseek = default_llseek,/* read accesses f_pos */ }; static void soc_init_card_debugfs(struct snd_soc_card *card) { card->debugfs_card_root = debugfs_create_dir(card->name, snd_soc_debugfs_root); if (!card->debugfs_card_root) { dev_warn(card->dev, "ASoC: Failed to create card debugfs directory\n"); return; } card->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0644, card->debugfs_card_root, &card->pop_time); if (!card->debugfs_pop_time) dev_warn(card->dev, "ASoC: Failed to create pop time debugfs file\n"); } static void soc_cleanup_card_debugfs(struct snd_soc_card *card) { debugfs_remove_recursive(card->debugfs_card_root); } #else static inline void soc_init_codec_debugfs(struct snd_soc_codec *codec) { } static inline void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec) { } static inline void soc_init_platform_debugfs(struct snd_soc_platform *platform) { } static inline void soc_cleanup_platform_debugfs(struct snd_soc_platform *platform) { } static inline void soc_init_card_debugfs(struct snd_soc_card *card) { } static inline void soc_cleanup_card_debugfs(struct snd_soc_card *card) { } #endif struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card, const char *dai_link, int stream) { int i; for (i = 0; i < card->num_links; i++) { if (card->rtd[i].dai_link->no_pcm && !strcmp(card->rtd[i].dai_link->name, dai_link)) return card->rtd[i].pcm->streams[stream].substream; } dev_dbg(card->dev, "ASoC: failed to find dai link %s\n", dai_link); return NULL; } EXPORT_SYMBOL_GPL(snd_soc_get_dai_substream); struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card, const char *dai_link) { int i; for (i = 0; i < card->num_links; i++) { if (!strcmp(card->rtd[i].dai_link->name, dai_link)) return &card->rtd[i]; } dev_dbg(card->dev, "ASoC: failed to find rtd %s\n", dai_link); return NULL; } EXPORT_SYMBOL_GPL(snd_soc_get_pcm_runtime); #ifdef CONFIG_SND_SOC_AC97_BUS /* unregister ac97 codec */ static int soc_ac97_dev_unregister(struct snd_soc_codec *codec) { if (codec->ac97->dev.bus) device_unregister(&codec->ac97->dev); return 0; } /* stop no dev release warning */ static void soc_ac97_device_release(struct device *dev){} /* register ac97 codec to bus */ static int soc_ac97_dev_register(struct snd_soc_codec *codec) { int err; codec->ac97->dev.bus = &ac97_bus_type; codec->ac97->dev.parent = codec->card->dev; codec->ac97->dev.release = soc_ac97_device_release; dev_set_name(&codec->ac97->dev, "%d-%d:%s", codec->card->snd_card->number, 0, codec->name); err = device_register(&codec->ac97->dev); if (err < 0) { dev_err(codec->dev, "ASoC: Can't register ac97 bus\n"); codec->ac97->dev.bus = NULL; return err; } return 0; } #endif static void codec2codec_close_delayed_work(struct work_struct *work) { /* Currently nothing to do for c2c links * Since c2c links are internal nodes in the DAPM graph and * don't interface with the outside world or application layer * we don't have to do any special handling on close. */ } #ifdef CONFIG_PM_SLEEP /* powers down audio subsystem for suspend */ int snd_soc_suspend(struct device *dev) { struct snd_soc_card *card = dev_get_drvdata(dev); struct snd_soc_codec *codec; int i; /* If the initialization of this soc device failed, there is no codec * associated with it. Just bail out in this case. */ if (list_empty(&card->codec_dev_list)) return 0; /* Due to the resume being scheduled into a workqueue we could * suspend before that's finished - wait for it to complete. */ snd_power_lock(card->snd_card); snd_power_wait(card->snd_card, SNDRV_CTL_POWER_D0); snd_power_unlock(card->snd_card); /* we're going to block userspace touching us until resume completes */ snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D3hot); /* mute any active DACs */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *dai = card->rtd[i].codec_dai; struct snd_soc_dai_driver *drv = dai->driver; if (card->rtd[i].dai_link->ignore_suspend) continue; if (drv->ops->digital_mute && dai->playback_active) drv->ops->digital_mute(dai, 1); } /* suspend all pcms */ for (i = 0; i < card->num_rtd; i++) { if (card->rtd[i].dai_link->ignore_suspend) continue; snd_pcm_suspend_all(card->rtd[i].pcm); } if (card->suspend_pre) card->suspend_pre(card); for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; struct snd_soc_platform *platform = card->rtd[i].platform; if (card->rtd[i].dai_link->ignore_suspend) continue; if (cpu_dai->driver->suspend && !cpu_dai->driver->ac97_control) cpu_dai->driver->suspend(cpu_dai); if (platform->driver->suspend && !platform->suspended) { platform->driver->suspend(cpu_dai); platform->suspended = 1; } } /* close any waiting streams and save state */ for (i = 0; i < card->num_rtd; i++) { flush_delayed_work(&card->rtd[i].delayed_work); card->rtd[i].codec->dapm.suspend_bias_level = card->rtd[i].codec->dapm.bias_level; } for (i = 0; i < card->num_rtd; i++) { if (card->rtd[i].dai_link->ignore_suspend) continue; snd_soc_dapm_stream_event(&card->rtd[i], SNDRV_PCM_STREAM_PLAYBACK, SND_SOC_DAPM_STREAM_SUSPEND); snd_soc_dapm_stream_event(&card->rtd[i], SNDRV_PCM_STREAM_CAPTURE, SND_SOC_DAPM_STREAM_SUSPEND); } /* Recheck all analogue paths too */ dapm_mark_io_dirty(&card->dapm); snd_soc_dapm_sync(&card->dapm); /* suspend all CODECs */ list_for_each_entry(codec, &card->codec_dev_list, card_list) { /* If there are paths active then the CODEC will be held with * bias _ON and should not be suspended. */ if (!codec->suspended && codec->driver->suspend) { switch (codec->dapm.bias_level) { case SND_SOC_BIAS_STANDBY: /* * If the CODEC is capable of idle * bias off then being in STANDBY * means it's doing something, * otherwise fall through. */ if (codec->dapm.idle_bias_off) { dev_dbg(codec->dev, "ASoC: idle_bias_off CODEC on over suspend\n"); break; } case SND_SOC_BIAS_OFF: codec->driver->suspend(codec); codec->suspended = 1; codec->cache_sync = 1; if (codec->using_regmap) regcache_mark_dirty(codec->control_data); /* deactivate pins to sleep state */ pinctrl_pm_select_sleep_state(codec->dev); break; default: dev_dbg(codec->dev, "ASoC: CODEC is on over suspend\n"); break; } } } for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; if (card->rtd[i].dai_link->ignore_suspend) continue; if (cpu_dai->driver->suspend && cpu_dai->driver->ac97_control) cpu_dai->driver->suspend(cpu_dai); /* deactivate pins to sleep state */ pinctrl_pm_select_sleep_state(cpu_dai->dev); } if (card->suspend_post) card->suspend_post(card); return 0; } EXPORT_SYMBOL_GPL(snd_soc_suspend); /* deferred resume work, so resume can complete before we finished * setting our codec back up, which can be very slow on I2C */ static void soc_resume_deferred(struct work_struct *work) { struct snd_soc_card *card = container_of(work, struct snd_soc_card, deferred_resume_work); struct snd_soc_codec *codec; int i; /* our power state is still SNDRV_CTL_POWER_D3hot from suspend time, * so userspace apps are blocked from touching us */ dev_dbg(card->dev, "ASoC: starting resume work\n"); /* Bring us up into D2 so that DAPM starts enabling things */ snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D2); if (card->resume_pre) card->resume_pre(card); /* resume AC97 DAIs */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; if (card->rtd[i].dai_link->ignore_suspend) continue; if (cpu_dai->driver->resume && cpu_dai->driver->ac97_control) cpu_dai->driver->resume(cpu_dai); } list_for_each_entry(codec, &card->codec_dev_list, card_list) { /* If the CODEC was idle over suspend then it will have been * left with bias OFF or STANDBY and suspended so we must now * resume. Otherwise the suspend was suppressed. */ if (codec->driver->resume && codec->suspended) { switch (codec->dapm.bias_level) { case SND_SOC_BIAS_STANDBY: case SND_SOC_BIAS_OFF: codec->driver->resume(codec); codec->suspended = 0; break; default: dev_dbg(codec->dev, "ASoC: CODEC was on over suspend\n"); break; } } } for (i = 0; i < card->num_rtd; i++) { if (card->rtd[i].dai_link->ignore_suspend) continue; snd_soc_dapm_stream_event(&card->rtd[i], SNDRV_PCM_STREAM_PLAYBACK, SND_SOC_DAPM_STREAM_RESUME); snd_soc_dapm_stream_event(&card->rtd[i], SNDRV_PCM_STREAM_CAPTURE, SND_SOC_DAPM_STREAM_RESUME); } /* unmute any active DACs */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *dai = card->rtd[i].codec_dai; struct snd_soc_dai_driver *drv = dai->driver; if (card->rtd[i].dai_link->ignore_suspend) continue; if (drv->ops->digital_mute && dai->playback_active) drv->ops->digital_mute(dai, 0); } for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; struct snd_soc_platform *platform = card->rtd[i].platform; if (card->rtd[i].dai_link->ignore_suspend) continue; if (cpu_dai->driver->resume && !cpu_dai->driver->ac97_control) cpu_dai->driver->resume(cpu_dai); if (platform->driver->resume && platform->suspended) { platform->driver->resume(cpu_dai); platform->suspended = 0; } } if (card->resume_post) card->resume_post(card); dev_dbg(card->dev, "ASoC: resume work completed\n"); /* userspace can access us now we are back as we were before */ snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0); /* Recheck all analogue paths too */ dapm_mark_io_dirty(&card->dapm); snd_soc_dapm_sync(&card->dapm); } /* powers up audio subsystem after a suspend */ int snd_soc_resume(struct device *dev) { struct snd_soc_card *card = dev_get_drvdata(dev); int i, ac97_control = 0; /* If the initialization of this soc device failed, there is no codec * associated with it. Just bail out in this case. */ if (list_empty(&card->codec_dev_list)) return 0; /* activate pins from sleep state */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; struct snd_soc_dai *codec_dai = card->rtd[i].codec_dai; if (cpu_dai->active) pinctrl_pm_select_default_state(cpu_dai->dev); if (codec_dai->active) pinctrl_pm_select_default_state(codec_dai->dev); } /* AC97 devices might have other drivers hanging off them so * need to resume immediately. Other drivers don't have that * problem and may take a substantial amount of time to resume * due to I/O costs and anti-pop so handle them out of line. */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; ac97_control |= cpu_dai->driver->ac97_control; } if (ac97_control) { dev_dbg(dev, "ASoC: Resuming AC97 immediately\n"); soc_resume_deferred(&card->deferred_resume_work); } else { dev_dbg(dev, "ASoC: Scheduling resume work\n"); if (!schedule_work(&card->deferred_resume_work)) dev_err(dev, "ASoC: resume work item may be lost\n"); } return 0; } EXPORT_SYMBOL_GPL(snd_soc_resume); #else #define snd_soc_suspend NULL #define snd_soc_resume NULL #endif static const struct snd_soc_dai_ops null_dai_ops = { }; static int soc_bind_dai_link(struct snd_soc_card *card, int num) { struct snd_soc_dai_link *dai_link = &card->dai_link[num]; struct snd_soc_pcm_runtime *rtd = &card->rtd[num]; struct snd_soc_codec *codec; struct snd_soc_platform *platform; struct snd_soc_dai *codec_dai, *cpu_dai; const char *platform_name; dev_dbg(card->dev, "ASoC: binding %s at idx %d\n", dai_link->name, num); /* Find CPU DAI from registered DAIs*/ list_for_each_entry(cpu_dai, &dai_list, list) { if (dai_link->cpu_of_node && (cpu_dai->dev->of_node != dai_link->cpu_of_node)) continue; if (dai_link->cpu_name && strcmp(dev_name(cpu_dai->dev), dai_link->cpu_name)) continue; if (dai_link->cpu_dai_name && strcmp(cpu_dai->name, dai_link->cpu_dai_name)) continue; rtd->cpu_dai = cpu_dai; } if (!rtd->cpu_dai) { dev_err(card->dev, "ASoC: CPU DAI %s not registered\n", dai_link->cpu_dai_name); return -EPROBE_DEFER; } /* Find CODEC from registered CODECs */ list_for_each_entry(codec, &codec_list, list) { if (dai_link->codec_of_node) { if (codec->dev->of_node != dai_link->codec_of_node) continue; } else { if (strcmp(codec->name, dai_link->codec_name)) continue; } rtd->codec = codec; /* * CODEC found, so find CODEC DAI from registered DAIs from * this CODEC */ list_for_each_entry(codec_dai, &dai_list, list) { if (codec->dev == codec_dai->dev && !strcmp(codec_dai->name, dai_link->codec_dai_name)) { rtd->codec_dai = codec_dai; } } if (!rtd->codec_dai) { dev_err(card->dev, "ASoC: CODEC DAI %s not registered\n", dai_link->codec_dai_name); return -EPROBE_DEFER; } } if (!rtd->codec) { dev_err(card->dev, "ASoC: CODEC %s not registered\n", dai_link->codec_name); return -EPROBE_DEFER; } /* if there's no platform we match on the empty platform */ platform_name = dai_link->platform_name; if (!platform_name && !dai_link->platform_of_node) platform_name = "snd-soc-dummy"; /* find one from the set of registered platforms */ list_for_each_entry(platform, &platform_list, list) { if (dai_link->platform_of_node) { if (platform->dev->of_node != dai_link->platform_of_node) continue; } else { if (strcmp(platform->name, platform_name)) continue; } rtd->platform = platform; } if (!rtd->platform) { dev_err(card->dev, "ASoC: platform %s not registered\n", dai_link->platform_name); return -EPROBE_DEFER; } card->num_rtd++; return 0; } static int soc_remove_platform(struct snd_soc_platform *platform) { int ret; if (platform->driver->remove) { ret = platform->driver->remove(platform); if (ret < 0) dev_err(platform->dev, "ASoC: failed to remove %d\n", ret); } /* Make sure all DAPM widgets are freed */ snd_soc_dapm_free(&platform->dapm); soc_cleanup_platform_debugfs(platform); platform->probed = 0; list_del(&platform->card_list); module_put(platform->dev->driver->owner); return 0; } static void soc_remove_codec(struct snd_soc_codec *codec) { int err; if (codec->driver->remove) { err = codec->driver->remove(codec); if (err < 0) dev_err(codec->dev, "ASoC: failed to remove %d\n", err); } /* Make sure all DAPM widgets are freed */ snd_soc_dapm_free(&codec->dapm); soc_cleanup_codec_debugfs(codec); codec->probed = 0; list_del(&codec->card_list); module_put(codec->dev->driver->owner); } static void soc_remove_link_dais(struct snd_soc_card *card, int num, int order) { struct snd_soc_pcm_runtime *rtd = &card->rtd[num]; struct snd_soc_dai *codec_dai = rtd->codec_dai, *cpu_dai = rtd->cpu_dai; int err; /* unregister the rtd device */ if (rtd->dev_registered) { device_remove_file(rtd->dev, &dev_attr_pmdown_time); device_remove_file(rtd->dev, &dev_attr_codec_reg); device_unregister(rtd->dev); rtd->dev_registered = 0; } /* remove the CODEC DAI */ if (codec_dai && codec_dai->probed && codec_dai->driver->remove_order == order) { if (codec_dai->driver->remove) { err = codec_dai->driver->remove(codec_dai); if (err < 0) dev_err(codec_dai->dev, "ASoC: failed to remove %s: %d\n", codec_dai->name, err); } codec_dai->probed = 0; list_del(&codec_dai->card_list); } /* remove the cpu_dai */ if (cpu_dai && cpu_dai->probed && cpu_dai->driver->remove_order == order) { if (cpu_dai->driver->remove) { err = cpu_dai->driver->remove(cpu_dai); if (err < 0) dev_err(cpu_dai->dev, "ASoC: failed to remove %s: %d\n", cpu_dai->name, err); } cpu_dai->probed = 0; list_del(&cpu_dai->card_list); if (!cpu_dai->codec) { snd_soc_dapm_free(&cpu_dai->dapm); module_put(cpu_dai->dev->driver->owner); } } } static void soc_remove_link_components(struct snd_soc_card *card, int num, int order) { struct snd_soc_pcm_runtime *rtd = &card->rtd[num]; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_platform *platform = rtd->platform; struct snd_soc_codec *codec; /* remove the platform */ if (platform && platform->probed && platform->driver->remove_order == order) { soc_remove_platform(platform); } /* remove the CODEC-side CODEC */ if (codec_dai) { codec = codec_dai->codec; if (codec && codec->probed && codec->driver->remove_order == order) soc_remove_codec(codec); } /* remove any CPU-side CODEC */ if (cpu_dai) { codec = cpu_dai->codec; if (codec && codec->probed && codec->driver->remove_order == order) soc_remove_codec(codec); } } static void soc_remove_dai_links(struct snd_soc_card *card) { int dai, order; for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST; order++) { for (dai = 0; dai < card->num_rtd; dai++) soc_remove_link_dais(card, dai, order); } for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST; order++) { for (dai = 0; dai < card->num_rtd; dai++) soc_remove_link_components(card, dai, order); } card->num_rtd = 0; } static void soc_set_name_prefix(struct snd_soc_card *card, struct snd_soc_codec *codec) { int i; if (card->codec_conf == NULL) return; for (i = 0; i < card->num_configs; i++) { struct snd_soc_codec_conf *map = &card->codec_conf[i]; if (map->dev_name && !strcmp(codec->name, map->dev_name)) { codec->name_prefix = map->name_prefix; break; } } } static int soc_probe_codec(struct snd_soc_card *card, struct snd_soc_codec *codec) { int ret = 0; const struct snd_soc_codec_driver *driver = codec->driver; struct snd_soc_dai *dai; codec->card = card; codec->dapm.card = card; soc_set_name_prefix(card, codec); if (!try_module_get(codec->dev->driver->owner)) return -ENODEV; soc_init_codec_debugfs(codec); if (driver->dapm_widgets) snd_soc_dapm_new_controls(&codec->dapm, driver->dapm_widgets, driver->num_dapm_widgets); /* Create DAPM widgets for each DAI stream */ list_for_each_entry(dai, &dai_list, list) { if (dai->dev != codec->dev) continue; snd_soc_dapm_new_dai_widgets(&codec->dapm, dai); } codec->dapm.idle_bias_off = driver->idle_bias_off; if (driver->probe) { ret = driver->probe(codec); if (ret < 0) { dev_err(codec->dev, "ASoC: failed to probe CODEC %d\n", ret); goto err_probe; } WARN(codec->dapm.idle_bias_off && codec->dapm.bias_level != SND_SOC_BIAS_OFF, "codec %s can not start from non-off bias with idle_bias_off==1\n", codec->name); } /* If the driver didn't set I/O up try regmap */ if (!codec->write && dev_get_regmap(codec->dev, NULL)) snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP); if (driver->controls) snd_soc_add_codec_controls(codec, driver->controls, driver->num_controls); if (driver->dapm_routes) snd_soc_dapm_add_routes(&codec->dapm, driver->dapm_routes, driver->num_dapm_routes); /* mark codec as probed and add to card codec list */ codec->probed = 1; list_add(&codec->card_list, &card->codec_dev_list); list_add(&codec->dapm.list, &card->dapm_list); return 0; err_probe: soc_cleanup_codec_debugfs(codec); module_put(codec->dev->driver->owner); return ret; } static int soc_probe_platform(struct snd_soc_card *card, struct snd_soc_platform *platform) { int ret = 0; const struct snd_soc_platform_driver *driver = platform->driver; struct snd_soc_dai *dai; platform->card = card; platform->dapm.card = card; if (!try_module_get(platform->dev->driver->owner)) return -ENODEV; soc_init_platform_debugfs(platform); if (driver->dapm_widgets) snd_soc_dapm_new_controls(&platform->dapm, driver->dapm_widgets, driver->num_dapm_widgets); /* Create DAPM widgets for each DAI stream */ list_for_each_entry(dai, &dai_list, list) { if (dai->dev != platform->dev) continue; snd_soc_dapm_new_dai_widgets(&platform->dapm, dai); } platform->dapm.idle_bias_off = 1; if (driver->probe) { ret = driver->probe(platform); if (ret < 0) { dev_err(platform->dev, "ASoC: failed to probe platform %d\n", ret); goto err_probe; } } if (driver->controls) snd_soc_add_platform_controls(platform, driver->controls, driver->num_controls); if (driver->dapm_routes) snd_soc_dapm_add_routes(&platform->dapm, driver->dapm_routes, driver->num_dapm_routes); /* mark platform as probed and add to card platform list */ platform->probed = 1; list_add(&platform->card_list, &card->platform_dev_list); list_add(&platform->dapm.list, &card->dapm_list); return 0; err_probe: soc_cleanup_platform_debugfs(platform); module_put(platform->dev->driver->owner); return ret; } static void rtd_release(struct device *dev) { kfree(dev); } static int soc_post_component_init(struct snd_soc_card *card, struct snd_soc_codec *codec, int num, int dailess) { struct snd_soc_dai_link *dai_link = NULL; struct snd_soc_aux_dev *aux_dev = NULL; struct snd_soc_pcm_runtime *rtd; const char *temp, *name; int ret = 0; if (!dailess) { dai_link = &card->dai_link[num]; rtd = &card->rtd[num]; name = dai_link->name; } else { aux_dev = &card->aux_dev[num]; rtd = &card->rtd_aux[num]; name = aux_dev->name; } rtd->card = card; /* machine controls, routes and widgets are not prefixed */ temp = codec->name_prefix; codec->name_prefix = NULL; /* do machine specific initialization */ if (!dailess && dai_link->init) ret = dai_link->init(rtd); else if (dailess && aux_dev->init) ret = aux_dev->init(&codec->dapm); if (ret < 0) { dev_err(card->dev, "ASoC: failed to init %s: %d\n", name, ret); return ret; } codec->name_prefix = temp; /* register the rtd device */ rtd->codec = codec; rtd->dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!rtd->dev) return -ENOMEM; device_initialize(rtd->dev); rtd->dev->parent = card->dev; rtd->dev->release = rtd_release; rtd->dev->init_name = name; dev_set_drvdata(rtd->dev, rtd); mutex_init(&rtd->pcm_mutex); INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients); INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].be_clients); INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].fe_clients); INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients); ret = device_add(rtd->dev); if (ret < 0) { /* calling put_device() here to free the rtd->dev */ put_device(rtd->dev); dev_err(card->dev, "ASoC: failed to register runtime device: %d\n", ret); return ret; } rtd->dev_registered = 1; /* add DAPM sysfs entries for this codec */ ret = snd_soc_dapm_sys_add(rtd->dev); if (ret < 0) dev_err(codec->dev, "ASoC: failed to add codec dapm sysfs entries: %d\n", ret); /* add codec sysfs entries */ ret = device_create_file(rtd->dev, &dev_attr_codec_reg); if (ret < 0) dev_err(codec->dev, "ASoC: failed to add codec sysfs files: %d\n", ret); #ifdef CONFIG_DEBUG_FS /* add DPCM sysfs entries */ if (!dailess && !dai_link->dynamic) goto out; ret = soc_dpcm_debugfs_add(rtd); if (ret < 0) dev_err(rtd->dev, "ASoC: failed to add dpcm sysfs entries: %d\n", ret); out: #endif return 0; } static int soc_probe_link_components(struct snd_soc_card *card, int num, int order) { struct snd_soc_pcm_runtime *rtd = &card->rtd[num]; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_platform *platform = rtd->platform; int ret; /* probe the CPU-side component, if it is a CODEC */ if (cpu_dai->codec && !cpu_dai->codec->probed && cpu_dai->codec->driver->probe_order == order) { ret = soc_probe_codec(card, cpu_dai->codec); if (ret < 0) return ret; } /* probe the CODEC-side component */ if (!codec_dai->codec->probed && codec_dai->codec->driver->probe_order == order) { ret = soc_probe_codec(card, codec_dai->codec); if (ret < 0) return ret; } /* probe the platform */ if (!platform->probed && platform->driver->probe_order == order) { ret = soc_probe_platform(card, platform); if (ret < 0) return ret; } return 0; } static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order) { struct snd_soc_dai_link *dai_link = &card->dai_link[num]; struct snd_soc_pcm_runtime *rtd = &card->rtd[num]; struct snd_soc_codec *codec = rtd->codec; struct snd_soc_platform *platform = rtd->platform; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct snd_soc_dapm_widget *play_w, *capture_w; int ret; dev_dbg(card->dev, "ASoC: probe %s dai link %d late %d\n", card->name, num, order); /* config components */ cpu_dai->platform = platform; codec_dai->card = card; cpu_dai->card = card; /* set default power off timeout */ rtd->pmdown_time = pmdown_time; /* probe the cpu_dai */ if (!cpu_dai->probed && cpu_dai->driver->probe_order == order) { if (!cpu_dai->codec) { cpu_dai->dapm.card = card; if (!try_module_get(cpu_dai->dev->driver->owner)) return -ENODEV; list_add(&cpu_dai->dapm.list, &card->dapm_list); } if (cpu_dai->driver->probe) { ret = cpu_dai->driver->probe(cpu_dai); if (ret < 0) { dev_err(cpu_dai->dev, "ASoC: failed to probe CPU DAI %s: %d\n", cpu_dai->name, ret); module_put(cpu_dai->dev->driver->owner); return ret; } } cpu_dai->probed = 1; /* mark cpu_dai as probed and add to card dai list */ list_add(&cpu_dai->card_list, &card->dai_dev_list); } /* probe the CODEC DAI */ if (!codec_dai->probed && codec_dai->driver->probe_order == order) { if (codec_dai->driver->probe) { ret = codec_dai->driver->probe(codec_dai); if (ret < 0) { dev_err(codec_dai->dev, "ASoC: failed to probe CODEC DAI %s: %d\n", codec_dai->name, ret); return ret; } } /* mark codec_dai as probed and add to card dai list */ codec_dai->probed = 1; list_add(&codec_dai->card_list, &card->dai_dev_list); } /* complete DAI probe during last probe */ if (order != SND_SOC_COMP_ORDER_LAST) return 0; ret = soc_post_component_init(card, codec, num, 0); if (ret) return ret; ret = device_create_file(rtd->dev, &dev_attr_pmdown_time); if (ret < 0) dev_warn(rtd->dev, "ASoC: failed to add pmdown_time sysfs: %d\n", ret); if (cpu_dai->driver->compress_dai) { /*create compress_device"*/ ret = soc_new_compress(rtd, num); if (ret < 0) { dev_err(card->dev, "ASoC: can't create compress %s\n", dai_link->stream_name); return ret; } } else { if (!dai_link->params) { /* create the pcm */ ret = soc_new_pcm(rtd, num); if (ret < 0) { dev_err(card->dev, "ASoC: can't create pcm %s :%d\n", dai_link->stream_name, ret); return ret; } } else { INIT_DELAYED_WORK(&rtd->delayed_work, codec2codec_close_delayed_work); /* link the DAI widgets */ play_w = codec_dai->playback_widget; capture_w = cpu_dai->capture_widget; if (play_w && capture_w) { ret = snd_soc_dapm_new_pcm(card, dai_link->params, capture_w, play_w); if (ret != 0) { dev_err(card->dev, "ASoC: Can't link %s to %s: %d\n", play_w->name, capture_w->name, ret); return ret; } } play_w = cpu_dai->playback_widget; capture_w = codec_dai->capture_widget; if (play_w && capture_w) { ret = snd_soc_dapm_new_pcm(card, dai_link->params, capture_w, play_w); if (ret != 0) { dev_err(card->dev, "ASoC: Can't link %s to %s: %d\n", play_w->name, capture_w->name, ret); return ret; } } } } /* add platform data for AC97 devices */ if (rtd->codec_dai->driver->ac97_control) snd_ac97_dev_add_pdata(codec->ac97, rtd->cpu_dai->ac97_pdata); return 0; } #ifdef CONFIG_SND_SOC_AC97_BUS static int soc_register_ac97_dai_link(struct snd_soc_pcm_runtime *rtd) { int ret; /* Only instantiate AC97 if not already done by the adaptor * for the generic AC97 subsystem. */ if (rtd->codec_dai->driver->ac97_control && !rtd->codec->ac97_registered) { /* * It is possible that the AC97 device is already registered to * the device subsystem. This happens when the device is created * via snd_ac97_mixer(). Currently only SoC codec that does so * is the generic AC97 glue but others migh emerge. * * In those cases we don't try to register the device again. */ if (!rtd->codec->ac97_created) return 0; ret = soc_ac97_dev_register(rtd->codec); if (ret < 0) { dev_err(rtd->codec->dev, "ASoC: AC97 device register failed: %d\n", ret); return ret; } rtd->codec->ac97_registered = 1; } return 0; } static void soc_unregister_ac97_dai_link(struct snd_soc_codec *codec) { if (codec->ac97_registered) { soc_ac97_dev_unregister(codec); codec->ac97_registered = 0; } } #endif static int soc_check_aux_dev(struct snd_soc_card *card, int num) { struct snd_soc_aux_dev *aux_dev = &card->aux_dev[num]; struct snd_soc_codec *codec; /* find CODEC from registered CODECs*/ list_for_each_entry(codec, &codec_list, list) { if (!strcmp(codec->name, aux_dev->codec_name)) return 0; } dev_err(card->dev, "ASoC: %s not registered\n", aux_dev->codec_name); return -EPROBE_DEFER; } static int soc_probe_aux_dev(struct snd_soc_card *card, int num) { struct snd_soc_aux_dev *aux_dev = &card->aux_dev[num]; struct snd_soc_codec *codec; int ret = -ENODEV; /* find CODEC from registered CODECs*/ list_for_each_entry(codec, &codec_list, list) { if (!strcmp(codec->name, aux_dev->codec_name)) { if (codec->probed) { dev_err(codec->dev, "ASoC: codec already probed"); ret = -EBUSY; goto out; } goto found; } } /* codec not found */ dev_err(card->dev, "ASoC: codec %s not found", aux_dev->codec_name); return -EPROBE_DEFER; found: ret = soc_probe_codec(card, codec); if (ret < 0) return ret; ret = soc_post_component_init(card, codec, num, 1); out: return ret; } static void soc_remove_aux_dev(struct snd_soc_card *card, int num) { struct snd_soc_pcm_runtime *rtd = &card->rtd_aux[num]; struct snd_soc_codec *codec = rtd->codec; /* unregister the rtd device */ if (rtd->dev_registered) { device_remove_file(rtd->dev, &dev_attr_codec_reg); device_unregister(rtd->dev); rtd->dev_registered = 0; } if (codec && codec->probed) soc_remove_codec(codec); } static int snd_soc_init_codec_cache(struct snd_soc_codec *codec) { int ret; if (codec->cache_init) return 0; ret = snd_soc_cache_init(codec); if (ret < 0) { dev_err(codec->dev, "ASoC: Failed to set cache compression type: %d\n", ret); return ret; } codec->cache_init = 1; return 0; } static int snd_soc_instantiate_card(struct snd_soc_card *card) { struct snd_soc_codec *codec; struct snd_soc_dai_link *dai_link; int ret, i, order, dai_fmt; mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT); /* bind DAIs */ for (i = 0; i < card->num_links; i++) { ret = soc_bind_dai_link(card, i); if (ret != 0) goto base_error; } /* check aux_devs too */ for (i = 0; i < card->num_aux_devs; i++) { ret = soc_check_aux_dev(card, i); if (ret != 0) goto base_error; } /* initialize the register cache for each available codec */ list_for_each_entry(codec, &codec_list, list) { if (codec->cache_init) continue; ret = snd_soc_init_codec_cache(codec); if (ret < 0) goto base_error; } /* card bind complete so register a sound card */ ret = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, card->owner, 0, &card->snd_card); if (ret < 0) { dev_err(card->dev, "ASoC: can't create sound card for card %s: %d\n", card->name, ret); goto base_error; } card->snd_card->dev = card->dev; card->dapm.bias_level = SND_SOC_BIAS_OFF; card->dapm.dev = card->dev; card->dapm.card = card; list_add(&card->dapm.list, &card->dapm_list); #ifdef CONFIG_DEBUG_FS snd_soc_dapm_debugfs_init(&card->dapm, card->debugfs_card_root); #endif #ifdef CONFIG_PM_SLEEP /* deferred resume work */ INIT_WORK(&card->deferred_resume_work, soc_resume_deferred); #endif if (card->dapm_widgets) snd_soc_dapm_new_controls(&card->dapm, card->dapm_widgets, card->num_dapm_widgets); /* initialise the sound card only once */ if (card->probe) { ret = card->probe(card); if (ret < 0) goto card_probe_error; } /* probe all components used by DAI links on this card */ for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST; order++) { for (i = 0; i < card->num_links; i++) { ret = soc_probe_link_components(card, i, order); if (ret < 0) { dev_err(card->dev, "ASoC: failed to instantiate card %d\n", ret); goto probe_dai_err; } } } /* probe all DAI links on this card */ for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST; order++) { for (i = 0; i < card->num_links; i++) { ret = soc_probe_link_dais(card, i, order); if (ret < 0) { dev_err(card->dev, "ASoC: failed to instantiate card %d\n", ret); goto probe_dai_err; } } } for (i = 0; i < card->num_aux_devs; i++) { ret = soc_probe_aux_dev(card, i); if (ret < 0) { dev_err(card->dev, "ASoC: failed to add auxiliary devices %d\n", ret); goto probe_aux_dev_err; } } snd_soc_dapm_link_dai_widgets(card); snd_soc_dapm_connect_dai_link_widgets(card); if (card->controls) snd_soc_add_card_controls(card, card->controls, card->num_controls); if (card->dapm_routes) snd_soc_dapm_add_routes(&card->dapm, card->dapm_routes, card->num_dapm_routes); for (i = 0; i < card->num_links; i++) { dai_link = &card->dai_link[i]; dai_fmt = dai_link->dai_fmt; if (dai_fmt) { ret = snd_soc_dai_set_fmt(card->rtd[i].codec_dai, dai_fmt); if (ret != 0 && ret != -ENOTSUPP) dev_warn(card->rtd[i].codec_dai->dev, "ASoC: Failed to set DAI format: %d\n", ret); } /* If this is a regular CPU link there will be a platform */ if (dai_fmt && (dai_link->platform_name || dai_link->platform_of_node)) { ret = snd_soc_dai_set_fmt(card->rtd[i].cpu_dai, dai_fmt); if (ret != 0 && ret != -ENOTSUPP) dev_warn(card->rtd[i].cpu_dai->dev, "ASoC: Failed to set DAI format: %d\n", ret); } else if (dai_fmt) { /* Flip the polarity for the "CPU" end */ dai_fmt &= ~SND_SOC_DAIFMT_MASTER_MASK; switch (dai_link->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: dai_fmt |= SND_SOC_DAIFMT_CBS_CFS; break; case SND_SOC_DAIFMT_CBM_CFS: dai_fmt |= SND_SOC_DAIFMT_CBS_CFM; break; case SND_SOC_DAIFMT_CBS_CFM: dai_fmt |= SND_SOC_DAIFMT_CBM_CFS; break; case SND_SOC_DAIFMT_CBS_CFS: dai_fmt |= SND_SOC_DAIFMT_CBM_CFM; break; } ret = snd_soc_dai_set_fmt(card->rtd[i].cpu_dai, dai_fmt); if (ret != 0 && ret != -ENOTSUPP) dev_warn(card->rtd[i].cpu_dai->dev, "ASoC: Failed to set DAI format: %d\n", ret); } } snprintf(card->snd_card->shortname, sizeof(card->snd_card->shortname), "%s", card->name); snprintf(card->snd_card->longname, sizeof(card->snd_card->longname), "%s", card->long_name ? card->long_name : card->name); snprintf(card->snd_card->driver, sizeof(card->snd_card->driver), "%s", card->driver_name ? card->driver_name : card->name); for (i = 0; i < ARRAY_SIZE(card->snd_card->driver); i++) { switch (card->snd_card->driver[i]) { case '_': case '-': case '\0': break; default: if (!isalnum(card->snd_card->driver[i])) card->snd_card->driver[i] = '_'; break; } } if (card->late_probe) { ret = card->late_probe(card); if (ret < 0) { dev_err(card->dev, "ASoC: %s late_probe() failed: %d\n", card->name, ret); goto probe_aux_dev_err; } } if (card->fully_routed) list_for_each_entry(codec, &card->codec_dev_list, card_list) snd_soc_dapm_auto_nc_codec_pins(codec); snd_soc_dapm_new_widgets(card); ret = snd_card_register(card->snd_card); if (ret < 0) { dev_err(card->dev, "ASoC: failed to register soundcard %d\n", ret); goto probe_aux_dev_err; } #ifdef CONFIG_SND_SOC_AC97_BUS /* register any AC97 codecs */ for (i = 0; i < card->num_rtd; i++) { ret = soc_register_ac97_dai_link(&card->rtd[i]); if (ret < 0) { dev_err(card->dev, "ASoC: failed to register AC97: %d\n", ret); while (--i >= 0) soc_unregister_ac97_dai_link(card->rtd[i].codec); goto probe_aux_dev_err; } } #endif card->instantiated = 1; snd_soc_dapm_sync(&card->dapm); mutex_unlock(&card->mutex); return 0; probe_aux_dev_err: for (i = 0; i < card->num_aux_devs; i++) soc_remove_aux_dev(card, i); probe_dai_err: soc_remove_dai_links(card); card_probe_error: if (card->remove) card->remove(card); snd_card_free(card->snd_card); base_error: mutex_unlock(&card->mutex); return ret; } /* probes a new socdev */ static int soc_probe(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); /* * no card, so machine driver should be registering card * we should not be here in that case so ret error */ if (!card) return -EINVAL; dev_warn(&pdev->dev, "ASoC: machine %s should use snd_soc_register_card()\n", card->name); /* Bodge while we unpick instantiation */ card->dev = &pdev->dev; return snd_soc_register_card(card); } static int soc_cleanup_card_resources(struct snd_soc_card *card) { int i; /* make sure any delayed work runs */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_pcm_runtime *rtd = &card->rtd[i]; flush_delayed_work(&rtd->delayed_work); } /* remove auxiliary devices */ for (i = 0; i < card->num_aux_devs; i++) soc_remove_aux_dev(card, i); /* remove and free each DAI */ soc_remove_dai_links(card); soc_cleanup_card_debugfs(card); /* remove the card */ if (card->remove) card->remove(card); snd_soc_dapm_free(&card->dapm); snd_card_free(card->snd_card); return 0; } /* removes a socdev */ static int soc_remove(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); snd_soc_unregister_card(card); return 0; } int snd_soc_poweroff(struct device *dev) { struct snd_soc_card *card = dev_get_drvdata(dev); int i; if (!card->instantiated) return 0; /* Flush out pmdown_time work - we actually do want to run it * now, we're shutting down so no imminent restart. */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_pcm_runtime *rtd = &card->rtd[i]; flush_delayed_work(&rtd->delayed_work); } snd_soc_dapm_shutdown(card); /* deactivate pins to sleep state */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; struct snd_soc_dai *codec_dai = card->rtd[i].codec_dai; pinctrl_pm_select_sleep_state(codec_dai->dev); pinctrl_pm_select_sleep_state(cpu_dai->dev); } return 0; } EXPORT_SYMBOL_GPL(snd_soc_poweroff); const struct dev_pm_ops snd_soc_pm_ops = { .suspend = snd_soc_suspend, .resume = snd_soc_resume, .freeze = snd_soc_suspend, .thaw = snd_soc_resume, .poweroff = snd_soc_poweroff, .restore = snd_soc_resume, }; EXPORT_SYMBOL_GPL(snd_soc_pm_ops); /* ASoC platform driver */ static struct platform_driver soc_driver = { .driver = { .name = "soc-audio", .owner = THIS_MODULE, .pm = &snd_soc_pm_ops, }, .probe = soc_probe, .remove = soc_remove, }; /** * snd_soc_codec_volatile_register: Report if a register is volatile. * * @codec: CODEC to query. * @reg: Register to query. * * Boolean function indiciating if a CODEC register is volatile. */ int snd_soc_codec_volatile_register(struct snd_soc_codec *codec, unsigned int reg) { if (codec->volatile_register) return codec->volatile_register(codec, reg); else return 0; } EXPORT_SYMBOL_GPL(snd_soc_codec_volatile_register); /** * snd_soc_codec_readable_register: Report if a register is readable. * * @codec: CODEC to query. * @reg: Register to query. * * Boolean function indicating if a CODEC register is readable. */ int snd_soc_codec_readable_register(struct snd_soc_codec *codec, unsigned int reg) { if (codec->readable_register) return codec->readable_register(codec, reg); else return 1; } EXPORT_SYMBOL_GPL(snd_soc_codec_readable_register); /** * snd_soc_codec_writable_register: Report if a register is writable. * * @codec: CODEC to query. * @reg: Register to query. * * Boolean function indicating if a CODEC register is writable. */ int snd_soc_codec_writable_register(struct snd_soc_codec *codec, unsigned int reg) { if (codec->writable_register) return codec->writable_register(codec, reg); else return 1; } EXPORT_SYMBOL_GPL(snd_soc_codec_writable_register); int snd_soc_platform_read(struct snd_soc_platform *platform, unsigned int reg) { unsigned int ret; if (!platform->driver->read) { dev_err(platform->dev, "ASoC: platform has no read back\n"); return -1; } ret = platform->driver->read(platform, reg); dev_dbg(platform->dev, "read %x => %x\n", reg, ret); trace_snd_soc_preg_read(platform, reg, ret); return ret; } EXPORT_SYMBOL_GPL(snd_soc_platform_read); int snd_soc_platform_write(struct snd_soc_platform *platform, unsigned int reg, unsigned int val) { if (!platform->driver->write) { dev_err(platform->dev, "ASoC: platform has no write back\n"); return -1; } dev_dbg(platform->dev, "write %x = %x\n", reg, val); trace_snd_soc_preg_write(platform, reg, val); return platform->driver->write(platform, reg, val); } EXPORT_SYMBOL_GPL(snd_soc_platform_write); /** * snd_soc_new_ac97_codec - initailise AC97 device * @codec: audio codec * @ops: AC97 bus operations * @num: AC97 codec number * * Initialises AC97 codec resources for use by ad-hoc devices only. */ int snd_soc_new_ac97_codec(struct snd_soc_codec *codec, struct snd_ac97_bus_ops *ops, int num) { mutex_lock(&codec->mutex); codec->ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL); if (codec->ac97 == NULL) { mutex_unlock(&codec->mutex); return -ENOMEM; } codec->ac97->bus = kzalloc(sizeof(struct snd_ac97_bus), GFP_KERNEL); if (codec->ac97->bus == NULL) { kfree(codec->ac97); codec->ac97 = NULL; mutex_unlock(&codec->mutex); return -ENOMEM; } codec->ac97->bus->ops = ops; codec->ac97->num = num; /* * Mark the AC97 device to be created by us. This way we ensure that the * device will be registered with the device subsystem later on. */ codec->ac97_created = 1; mutex_unlock(&codec->mutex); return 0; } EXPORT_SYMBOL_GPL(snd_soc_new_ac97_codec); static struct snd_ac97_reset_cfg snd_ac97_rst_cfg; static void snd_soc_ac97_warm_reset(struct snd_ac97 *ac97) { struct pinctrl *pctl = snd_ac97_rst_cfg.pctl; pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_warm_reset); gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 1); udelay(10); gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 0); pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_run); msleep(2); } static void snd_soc_ac97_reset(struct snd_ac97 *ac97) { struct pinctrl *pctl = snd_ac97_rst_cfg.pctl; pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_reset); gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 0); gpio_direction_output(snd_ac97_rst_cfg.gpio_sdata, 0); gpio_direction_output(snd_ac97_rst_cfg.gpio_reset, 0); udelay(10); gpio_direction_output(snd_ac97_rst_cfg.gpio_reset, 1); pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_run); msleep(2); } static int snd_soc_ac97_parse_pinctl(struct device *dev, struct snd_ac97_reset_cfg *cfg) { struct pinctrl *p; struct pinctrl_state *state; int gpio; int ret; p = devm_pinctrl_get(dev); if (IS_ERR(p)) { dev_err(dev, "Failed to get pinctrl\n"); return PTR_RET(p); } cfg->pctl = p; state = pinctrl_lookup_state(p, "ac97-reset"); if (IS_ERR(state)) { dev_err(dev, "Can't find pinctrl state ac97-reset\n"); return PTR_RET(state); } cfg->pstate_reset = state; state = pinctrl_lookup_state(p, "ac97-warm-reset"); if (IS_ERR(state)) { dev_err(dev, "Can't find pinctrl state ac97-warm-reset\n"); return PTR_RET(state); } cfg->pstate_warm_reset = state; state = pinctrl_lookup_state(p, "ac97-running"); if (IS_ERR(state)) { dev_err(dev, "Can't find pinctrl state ac97-running\n"); return PTR_RET(state); } cfg->pstate_run = state; gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 0); if (gpio < 0) { dev_err(dev, "Can't find ac97-sync gpio\n"); return gpio; } ret = devm_gpio_request(dev, gpio, "AC97 link sync"); if (ret) { dev_err(dev, "Failed requesting ac97-sync gpio\n"); return ret; } cfg->gpio_sync = gpio; gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 1); if (gpio < 0) { dev_err(dev, "Can't find ac97-sdata gpio %d\n", gpio); return gpio; } ret = devm_gpio_request(dev, gpio, "AC97 link sdata"); if (ret) { dev_err(dev, "Failed requesting ac97-sdata gpio\n"); return ret; } cfg->gpio_sdata = gpio; gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 2); if (gpio < 0) { dev_err(dev, "Can't find ac97-reset gpio\n"); return gpio; } ret = devm_gpio_request(dev, gpio, "AC97 link reset"); if (ret) { dev_err(dev, "Failed requesting ac97-reset gpio\n"); return ret; } cfg->gpio_reset = gpio; return 0; } struct snd_ac97_bus_ops *soc_ac97_ops; EXPORT_SYMBOL_GPL(soc_ac97_ops); int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops) { if (ops == soc_ac97_ops) return 0; if (soc_ac97_ops && ops) return -EBUSY; soc_ac97_ops = ops; return 0; } EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops); /** * snd_soc_set_ac97_ops_of_reset - Set ac97 ops with generic ac97 reset functions * * This function sets the reset and warm_reset properties of ops and parses * the device node of pdev to get pinctrl states and gpio numbers to use. */ int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops, struct platform_device *pdev) { struct device *dev = &pdev->dev; struct snd_ac97_reset_cfg cfg; int ret; ret = snd_soc_ac97_parse_pinctl(dev, &cfg); if (ret) return ret; ret = snd_soc_set_ac97_ops(ops); if (ret) return ret; ops->warm_reset = snd_soc_ac97_warm_reset; ops->reset = snd_soc_ac97_reset; snd_ac97_rst_cfg = cfg; return 0; } EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops_of_reset); /** * snd_soc_free_ac97_codec - free AC97 codec device * @codec: audio codec * * Frees AC97 codec device resources. */ void snd_soc_free_ac97_codec(struct snd_soc_codec *codec) { mutex_lock(&codec->mutex); #ifdef CONFIG_SND_SOC_AC97_BUS soc_unregister_ac97_dai_link(codec); #endif kfree(codec->ac97->bus); kfree(codec->ac97); codec->ac97 = NULL; codec->ac97_created = 0; mutex_unlock(&codec->mutex); } EXPORT_SYMBOL_GPL(snd_soc_free_ac97_codec); unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg) { unsigned int ret; ret = codec->read(codec, reg); dev_dbg(codec->dev, "read %x => %x\n", reg, ret); trace_snd_soc_reg_read(codec, reg, ret); return ret; } EXPORT_SYMBOL_GPL(snd_soc_read); unsigned int snd_soc_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int val) { dev_dbg(codec->dev, "write %x = %x\n", reg, val); trace_snd_soc_reg_write(codec, reg, val); return codec->write(codec, reg, val); } EXPORT_SYMBOL_GPL(snd_soc_write); /** * snd_soc_update_bits - update codec register bits * @codec: audio codec * @reg: codec register * @mask: register mask * @value: new value * * Writes new register value. * * Returns 1 for change, 0 for no change, or negative error code. */ int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg, unsigned int mask, unsigned int value) { bool change; unsigned int old, new; int ret; if (codec->using_regmap) { ret = regmap_update_bits_check(codec->control_data, reg, mask, value, &change); } else { ret = snd_soc_read(codec, reg); if (ret < 0) return ret; old = ret; new = (old & ~mask) | (value & mask); change = old != new; if (change) ret = snd_soc_write(codec, reg, new); } if (ret < 0) return ret; return change; } EXPORT_SYMBOL_GPL(snd_soc_update_bits); /** * snd_soc_update_bits_locked - update codec register bits * @codec: audio codec * @reg: codec register * @mask: register mask * @value: new value * * Writes new register value, and takes the codec mutex. * * Returns 1 for change else 0. */ int snd_soc_update_bits_locked(struct snd_soc_codec *codec, unsigned short reg, unsigned int mask, unsigned int value) { int change; mutex_lock(&codec->mutex); change = snd_soc_update_bits(codec, reg, mask, value); mutex_unlock(&codec->mutex); return change; } EXPORT_SYMBOL_GPL(snd_soc_update_bits_locked); /** * snd_soc_test_bits - test register for change * @codec: audio codec * @reg: codec register * @mask: register mask * @value: new value * * Tests a register with a new value and checks if the new value is * different from the old value. * * Returns 1 for change else 0. */ int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned short reg, unsigned int mask, unsigned int value) { int change; unsigned int old, new; old = snd_soc_read(codec, reg); new = (old & ~mask) | value; change = old != new; return change; } EXPORT_SYMBOL_GPL(snd_soc_test_bits); /** * snd_soc_cnew - create new control * @_template: control template * @data: control private data * @long_name: control long name * @prefix: control name prefix * * Create a new mixer control from a template control. * * Returns 0 for success, else error. */ struct snd_kcontrol *snd_soc_cnew(const struct snd_kcontrol_new *_template, void *data, const char *long_name, const char *prefix) { struct snd_kcontrol_new template; struct snd_kcontrol *kcontrol; char *name = NULL; memcpy(&template, _template, sizeof(template)); template.index = 0; if (!long_name) long_name = template.name; if (prefix) { name = kasprintf(GFP_KERNEL, "%s %s", prefix, long_name); if (!name) return NULL; template.name = name; } else { template.name = long_name; } kcontrol = snd_ctl_new1(&template, data); kfree(name); return kcontrol; } EXPORT_SYMBOL_GPL(snd_soc_cnew); static int snd_soc_add_controls(struct snd_card *card, struct device *dev, const struct snd_kcontrol_new *controls, int num_controls, const char *prefix, void *data) { int err, i; for (i = 0; i < num_controls; i++) { const struct snd_kcontrol_new *control = &controls[i]; err = snd_ctl_add(card, snd_soc_cnew(control, data, control->name, prefix)); if (err < 0) { dev_err(dev, "ASoC: Failed to add %s: %d\n", control->name, err); return err; } } return 0; } struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card, const char *name) { struct snd_card *card = soc_card->snd_card; struct snd_kcontrol *kctl; if (unlikely(!name)) return NULL; list_for_each_entry(kctl, &card->controls, list) if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name))) return kctl; return NULL; } EXPORT_SYMBOL_GPL(snd_soc_card_get_kcontrol); /** * snd_soc_add_codec_controls - add an array of controls to a codec. * Convenience function to add a list of controls. Many codecs were * duplicating this code. * * @codec: codec to add controls to * @controls: array of controls to add * @num_controls: number of elements in the array * * Return 0 for success, else error. */ int snd_soc_add_codec_controls(struct snd_soc_codec *codec, const struct snd_kcontrol_new *controls, int num_controls) { struct snd_card *card = codec->card->snd_card; return snd_soc_add_controls(card, codec->dev, controls, num_controls, codec->name_prefix, codec); } EXPORT_SYMBOL_GPL(snd_soc_add_codec_controls); /** * snd_soc_add_platform_controls - add an array of controls to a platform. * Convenience function to add a list of controls. * * @platform: platform to add controls to * @controls: array of controls to add * @num_controls: number of elements in the array * * Return 0 for success, else error. */ int snd_soc_add_platform_controls(struct snd_soc_platform *platform, const struct snd_kcontrol_new *controls, int num_controls) { struct snd_card *card = platform->card->snd_card; return snd_soc_add_controls(card, platform->dev, controls, num_controls, NULL, platform); } EXPORT_SYMBOL_GPL(snd_soc_add_platform_controls); /** * snd_soc_add_card_controls - add an array of controls to a SoC card. * Convenience function to add a list of controls. * * @soc_card: SoC card to add controls to * @controls: array of controls to add * @num_controls: number of elements in the array * * Return 0 for success, else error. */ int snd_soc_add_card_controls(struct snd_soc_card *soc_card, const struct snd_kcontrol_new *controls, int num_controls) { struct snd_card *card = soc_card->snd_card; return snd_soc_add_controls(card, soc_card->dev, controls, num_controls, NULL, soc_card); } EXPORT_SYMBOL_GPL(snd_soc_add_card_controls); /** * snd_soc_add_dai_controls - add an array of controls to a DAI. * Convienience function to add a list of controls. * * @dai: DAI to add controls to * @controls: array of controls to add * @num_controls: number of elements in the array * * Return 0 for success, else error. */ int snd_soc_add_dai_controls(struct snd_soc_dai *dai, const struct snd_kcontrol_new *controls, int num_controls) { struct snd_card *card = dai->card->snd_card; return snd_soc_add_controls(card, dai->dev, controls, num_controls, NULL, dai); } EXPORT_SYMBOL_GPL(snd_soc_add_dai_controls); /** * snd_soc_info_enum_double - enumerated double mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about a double enumerated * mixer control. * * Returns 0 for success. */ int snd_soc_info_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = e->shift_l == e->shift_r ? 1 : 2; uinfo->value.enumerated.items = e->max; if (uinfo->value.enumerated.item > e->max - 1) uinfo->value.enumerated.item = e->max - 1; strlcpy(uinfo->value.enumerated.name, e->texts[uinfo->value.enumerated.item], sizeof(uinfo->value.enumerated.name)); return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_enum_double); /** * snd_soc_get_enum_double - enumerated double mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a double enumerated mixer. * * Returns 0 for success. */ int snd_soc_get_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int val; val = snd_soc_read(codec, e->reg); ucontrol->value.enumerated.item[0] = (val >> e->shift_l) & e->mask; if (e->shift_l != e->shift_r) ucontrol->value.enumerated.item[1] = (val >> e->shift_r) & e->mask; return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_enum_double); /** * snd_soc_put_enum_double - enumerated double mixer put callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a double enumerated mixer. * * Returns 0 for success. */ int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int val; unsigned int mask; if (ucontrol->value.enumerated.item[0] > e->max - 1) return -EINVAL; val = ucontrol->value.enumerated.item[0] << e->shift_l; mask = e->mask << e->shift_l; if (e->shift_l != e->shift_r) { if (ucontrol->value.enumerated.item[1] > e->max - 1) return -EINVAL; val |= ucontrol->value.enumerated.item[1] << e->shift_r; mask |= e->mask << e->shift_r; } return snd_soc_update_bits_locked(codec, e->reg, mask, val); } EXPORT_SYMBOL_GPL(snd_soc_put_enum_double); /** * snd_soc_get_value_enum_double - semi enumerated double mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a double semi enumerated mixer. * * Semi enumerated mixer: the enumerated items are referred as values. Can be * used for handling bitfield coded enumeration for example. * * Returns 0 for success. */ int snd_soc_get_value_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int reg_val, val, mux; reg_val = snd_soc_read(codec, e->reg); val = (reg_val >> e->shift_l) & e->mask; for (mux = 0; mux < e->max; mux++) { if (val == e->values[mux]) break; } ucontrol->value.enumerated.item[0] = mux; if (e->shift_l != e->shift_r) { val = (reg_val >> e->shift_r) & e->mask; for (mux = 0; mux < e->max; mux++) { if (val == e->values[mux]) break; } ucontrol->value.enumerated.item[1] = mux; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_value_enum_double); /** * snd_soc_put_value_enum_double - semi enumerated double mixer put callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a double semi enumerated mixer. * * Semi enumerated mixer: the enumerated items are referred as values. Can be * used for handling bitfield coded enumeration for example. * * Returns 0 for success. */ int snd_soc_put_value_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int val; unsigned int mask; if (ucontrol->value.enumerated.item[0] > e->max - 1) return -EINVAL; val = e->values[ucontrol->value.enumerated.item[0]] << e->shift_l; mask = e->mask << e->shift_l; if (e->shift_l != e->shift_r) { if (ucontrol->value.enumerated.item[1] > e->max - 1) return -EINVAL; val |= e->values[ucontrol->value.enumerated.item[1]] << e->shift_r; mask |= e->mask << e->shift_r; } return snd_soc_update_bits_locked(codec, e->reg, mask, val); } EXPORT_SYMBOL_GPL(snd_soc_put_value_enum_double); /** * snd_soc_info_volsw - single mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about a single mixer control, or a double * mixer control that spans 2 registers. * * Returns 0 for success. */ int snd_soc_info_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int platform_max; if (!mc->platform_max) mc->platform_max = mc->max; platform_max = mc->platform_max; if (platform_max == 1 && !strstr(kcontrol->id.name, " Volume")) uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; else uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = platform_max; return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_volsw); /** * snd_soc_get_volsw - single mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a single mixer control, or a double mixer * control that spans 2 registers. * * Returns 0 for success. */ int snd_soc_get_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; unsigned int reg2 = mc->rreg; unsigned int shift = mc->shift; unsigned int rshift = mc->rshift; int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; ucontrol->value.integer.value[0] = (snd_soc_read(codec, reg) >> shift) & mask; if (invert) ucontrol->value.integer.value[0] = max - ucontrol->value.integer.value[0]; if (snd_soc_volsw_is_stereo(mc)) { if (reg == reg2) ucontrol->value.integer.value[1] = (snd_soc_read(codec, reg) >> rshift) & mask; else ucontrol->value.integer.value[1] = (snd_soc_read(codec, reg2) >> shift) & mask; if (invert) ucontrol->value.integer.value[1] = max - ucontrol->value.integer.value[1]; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_volsw); /** * snd_soc_put_volsw - single mixer put callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a single mixer control, or a double mixer * control that spans 2 registers. * * Returns 0 for success. */ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; unsigned int reg2 = mc->rreg; unsigned int shift = mc->shift; unsigned int rshift = mc->rshift; int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; int err; bool type_2r = 0; unsigned int val2 = 0; unsigned int val, val_mask; val = (ucontrol->value.integer.value[0] & mask); if (invert) val = max - val; val_mask = mask << shift; val = val << shift; if (snd_soc_volsw_is_stereo(mc)) { val2 = (ucontrol->value.integer.value[1] & mask); if (invert) val2 = max - val2; if (reg == reg2) { val_mask |= mask << rshift; val |= val2 << rshift; } else { val2 = val2 << shift; type_2r = 1; } } err = snd_soc_update_bits_locked(codec, reg, val_mask, val); if (err < 0) return err; if (type_2r) err = snd_soc_update_bits_locked(codec, reg2, val_mask, val2); return err; } EXPORT_SYMBOL_GPL(snd_soc_put_volsw); /** * snd_soc_get_volsw_sx - single mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a single mixer control, or a double mixer * control that spans 2 registers. * * Returns 0 for success. */ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int reg = mc->reg; unsigned int reg2 = mc->rreg; unsigned int shift = mc->shift; unsigned int rshift = mc->rshift; int max = mc->max; int min = mc->min; int mask = (1 << (fls(min + max) - 1)) - 1; ucontrol->value.integer.value[0] = ((snd_soc_read(codec, reg) >> shift) - min) & mask; if (snd_soc_volsw_is_stereo(mc)) ucontrol->value.integer.value[1] = ((snd_soc_read(codec, reg2) >> rshift) - min) & mask; return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_volsw_sx); /** * snd_soc_put_volsw_sx - double mixer set callback * @kcontrol: mixer control * @uinfo: control element information * * Callback to set the value of a double mixer control that spans 2 registers. * * Returns 0 for success. */ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int reg = mc->reg; unsigned int reg2 = mc->rreg; unsigned int shift = mc->shift; unsigned int rshift = mc->rshift; int max = mc->max; int min = mc->min; int mask = (1 << (fls(min + max) - 1)) - 1; int err = 0; unsigned short val, val_mask, val2 = 0; val_mask = mask << shift; val = (ucontrol->value.integer.value[0] + min) & mask; val = val << shift; err = snd_soc_update_bits_locked(codec, reg, val_mask, val); if (err < 0) return err; if (snd_soc_volsw_is_stereo(mc)) { val_mask = mask << rshift; val2 = (ucontrol->value.integer.value[1] + min) & mask; val2 = val2 << rshift; if (snd_soc_update_bits_locked(codec, reg2, val_mask, val2)) return err; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_put_volsw_sx); /** * snd_soc_info_volsw_s8 - signed mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about a signed mixer control. * * Returns 0 for success. */ int snd_soc_info_volsw_s8(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int platform_max; int min = mc->min; if (!mc->platform_max) mc->platform_max = mc->max; platform_max = mc->platform_max; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = platform_max - min; return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_volsw_s8); /** * snd_soc_get_volsw_s8 - signed mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a signed mixer control. * * Returns 0 for success. */ int snd_soc_get_volsw_s8(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; int min = mc->min; int val = snd_soc_read(codec, reg); ucontrol->value.integer.value[0] = ((signed char)(val & 0xff))-min; ucontrol->value.integer.value[1] = ((signed char)((val >> 8) & 0xff))-min; return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_volsw_s8); /** * snd_soc_put_volsw_sgn - signed mixer put callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a signed mixer control. * * Returns 0 for success. */ int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; int min = mc->min; unsigned int val; val = (ucontrol->value.integer.value[0]+min) & 0xff; val |= ((ucontrol->value.integer.value[1]+min) & 0xff) << 8; return snd_soc_update_bits_locked(codec, reg, 0xffff, val); } EXPORT_SYMBOL_GPL(snd_soc_put_volsw_s8); /** * snd_soc_info_volsw_range - single mixer info callback with range. * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information, within a range, about a single * mixer control. * * returns 0 for success. */ int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int platform_max; int min = mc->min; if (!mc->platform_max) mc->platform_max = mc->max; platform_max = mc->platform_max; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = platform_max - min; return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_volsw_range); /** * snd_soc_put_volsw_range - single mixer put value callback with range. * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value, within a range, for a single mixer control. * * Returns 0 for success. */ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; unsigned int rreg = mc->rreg; unsigned int shift = mc->shift; int min = mc->min; int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; unsigned int val, val_mask; int ret; val = ((ucontrol->value.integer.value[0] + min) & mask); if (invert) val = max - val; val_mask = mask << shift; val = val << shift; ret = snd_soc_update_bits_locked(codec, reg, val_mask, val); if (ret < 0) return ret; if (snd_soc_volsw_is_stereo(mc)) { val = ((ucontrol->value.integer.value[1] + min) & mask); if (invert) val = max - val; val_mask = mask << shift; val = val << shift; ret = snd_soc_update_bits_locked(codec, rreg, val_mask, val); } return ret; } EXPORT_SYMBOL_GPL(snd_soc_put_volsw_range); /** * snd_soc_get_volsw_range - single mixer get callback with range * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value, within a range, of a single mixer control. * * Returns 0 for success. */ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; unsigned int rreg = mc->rreg; unsigned int shift = mc->shift; int min = mc->min; int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; ucontrol->value.integer.value[0] = (snd_soc_read(codec, reg) >> shift) & mask; if (invert) ucontrol->value.integer.value[0] = max - ucontrol->value.integer.value[0]; ucontrol->value.integer.value[0] = ucontrol->value.integer.value[0] - min; if (snd_soc_volsw_is_stereo(mc)) { ucontrol->value.integer.value[1] = (snd_soc_read(codec, rreg) >> shift) & mask; if (invert) ucontrol->value.integer.value[1] = max - ucontrol->value.integer.value[1]; ucontrol->value.integer.value[1] = ucontrol->value.integer.value[1] - min; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range); /** * snd_soc_limit_volume - Set new limit to an existing volume control. * * @codec: where to look for the control * @name: Name of the control * @max: new maximum limit * * Return 0 for success, else error. */ int snd_soc_limit_volume(struct snd_soc_codec *codec, const char *name, int max) { struct snd_card *card = codec->card->snd_card; struct snd_kcontrol *kctl; struct soc_mixer_control *mc; int found = 0; int ret = -EINVAL; /* Sanity check for name and max */ if (unlikely(!name || max <= 0)) return -EINVAL; list_for_each_entry(kctl, &card->controls, list) { if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name))) { found = 1; break; } } if (found) { mc = (struct soc_mixer_control *)kctl->private_value; if (max <= mc->max) { mc->platform_max = max; ret = 0; } } return ret; } EXPORT_SYMBOL_GPL(snd_soc_limit_volume); int snd_soc_bytes_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct soc_bytes *params = (void *)kcontrol->private_value; uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES; uinfo->count = params->num_regs * codec->val_bytes; return 0; } EXPORT_SYMBOL_GPL(snd_soc_bytes_info); int snd_soc_bytes_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_bytes *params = (void *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); int ret; if (codec->using_regmap) ret = regmap_raw_read(codec->control_data, params->base, ucontrol->value.bytes.data, params->num_regs * codec->val_bytes); else ret = -EINVAL; /* Hide any masked bytes to ensure consistent data reporting */ if (ret == 0 && params->mask) { switch (codec->val_bytes) { case 1: ucontrol->value.bytes.data[0] &= ~params->mask; break; case 2: ((u16 *)(&ucontrol->value.bytes.data))[0] &= cpu_to_be16(~params->mask); break; case 4: ((u32 *)(&ucontrol->value.bytes.data))[0] &= cpu_to_be32(~params->mask); break; default: return -EINVAL; } } return ret; } EXPORT_SYMBOL_GPL(snd_soc_bytes_get); int snd_soc_bytes_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_bytes *params = (void *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); int ret, len; unsigned int val; void *data; if (!codec->using_regmap) return -EINVAL; len = params->num_regs * codec->val_bytes; data = kmemdup(ucontrol->value.bytes.data, len, GFP_KERNEL | GFP_DMA); if (!data) return -ENOMEM; /* * If we've got a mask then we need to preserve the register * bits. We shouldn't modify the incoming data so take a * copy. */ if (params->mask) { ret = regmap_read(codec->control_data, params->base, &val); if (ret != 0) goto out; val &= params->mask; switch (codec->val_bytes) { case 1: ((u8 *)data)[0] &= ~params->mask; ((u8 *)data)[0] |= val; break; case 2: ((u16 *)data)[0] &= cpu_to_be16(~params->mask); ((u16 *)data)[0] |= cpu_to_be16(val); break; case 4: ((u32 *)data)[0] &= cpu_to_be32(~params->mask); ((u32 *)data)[0] |= cpu_to_be32(val); break; default: ret = -EINVAL; goto out; } } ret = regmap_raw_write(codec->control_data, params->base, data, len); out: kfree(data); return ret; } EXPORT_SYMBOL_GPL(snd_soc_bytes_put); /** * snd_soc_info_xr_sx - signed multi register info callback * @kcontrol: mreg control * @uinfo: control element information * * Callback to provide information of a control that can * span multiple codec registers which together * forms a single signed value in a MSB/LSB manner. * * Returns 0 for success. */ int snd_soc_info_xr_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_mreg_control *mc = (struct soc_mreg_control *)kcontrol->private_value; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = mc->min; uinfo->value.integer.max = mc->max; return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_xr_sx); /** * snd_soc_get_xr_sx - signed multi register get callback * @kcontrol: mreg control * @ucontrol: control element information * * Callback to get the value of a control that can span * multiple codec registers which together forms a single * signed value in a MSB/LSB manner. The control supports * specifying total no of bits used to allow for bitfields * across the multiple codec registers. * * Returns 0 for success. */ int snd_soc_get_xr_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mreg_control *mc = (struct soc_mreg_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int regbase = mc->regbase; unsigned int regcount = mc->regcount; unsigned int regwshift = codec->driver->reg_word_size * BITS_PER_BYTE; unsigned int regwmask = (1<<regwshift)-1; unsigned int invert = mc->invert; unsigned long mask = (1UL<<mc->nbits)-1; long min = mc->min; long max = mc->max; long val = 0; unsigned long regval; unsigned int i; for (i = 0; i < regcount; i++) { regval = snd_soc_read(codec, regbase+i) & regwmask; val |= regval << (regwshift*(regcount-i-1)); } val &= mask; if (min < 0 && val > max) val |= ~mask; if (invert) val = max - val; ucontrol->value.integer.value[0] = val; return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_xr_sx); /** * snd_soc_put_xr_sx - signed multi register get callback * @kcontrol: mreg control * @ucontrol: control element information * * Callback to set the value of a control that can span * multiple codec registers which together forms a single * signed value in a MSB/LSB manner. The control supports * specifying total no of bits used to allow for bitfields * across the multiple codec registers. * * Returns 0 for success. */ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mreg_control *mc = (struct soc_mreg_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int regbase = mc->regbase; unsigned int regcount = mc->regcount; unsigned int regwshift = codec->driver->reg_word_size * BITS_PER_BYTE; unsigned int regwmask = (1<<regwshift)-1; unsigned int invert = mc->invert; unsigned long mask = (1UL<<mc->nbits)-1; long max = mc->max; long val = ucontrol->value.integer.value[0]; unsigned int i, regval, regmask; int err; if (invert) val = max - val; val &= mask; for (i = 0; i < regcount; i++) { regval = (val >> (regwshift*(regcount-i-1))) & regwmask; regmask = (mask >> (regwshift*(regcount-i-1))) & regwmask; err = snd_soc_update_bits_locked(codec, regbase+i, regmask, regval); if (err < 0) return err; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_put_xr_sx); /** * snd_soc_get_strobe - strobe get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback get the value of a strobe mixer control. * * Returns 0 for success. */ int snd_soc_get_strobe(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; unsigned int shift = mc->shift; unsigned int mask = 1 << shift; unsigned int invert = mc->invert != 0; unsigned int val = snd_soc_read(codec, reg) & mask; if (shift != 0 && val != 0) val = val >> shift; ucontrol->value.enumerated.item[0] = val ^ invert; return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_strobe); /** * snd_soc_put_strobe - strobe put callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback strobe a register bit to high then low (or the inverse) * in one pass of a single mixer enum control. * * Returns 1 for success. */ int snd_soc_put_strobe(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; unsigned int shift = mc->shift; unsigned int mask = 1 << shift; unsigned int invert = mc->invert != 0; unsigned int strobe = ucontrol->value.enumerated.item[0] != 0; unsigned int val1 = (strobe ^ invert) ? mask : 0; unsigned int val2 = (strobe ^ invert) ? 0 : mask; int err; err = snd_soc_update_bits_locked(codec, reg, mask, val1); if (err < 0) return err; err = snd_soc_update_bits_locked(codec, reg, mask, val2); return err; } EXPORT_SYMBOL_GPL(snd_soc_put_strobe); /** * snd_soc_dai_set_sysclk - configure DAI system or master clock. * @dai: DAI * @clk_id: DAI specific clock ID * @freq: new clock frequency in Hz * @dir: new clock direction - input/output. * * Configures the DAI master (MCLK) or system (SYSCLK) clocking. */ int snd_soc_dai_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { if (dai->driver && dai->driver->ops->set_sysclk) return dai->driver->ops->set_sysclk(dai, clk_id, freq, dir); else if (dai->codec && dai->codec->driver->set_sysclk) return dai->codec->driver->set_sysclk(dai->codec, clk_id, 0, freq, dir); else return -ENOTSUPP; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_sysclk); /** * snd_soc_codec_set_sysclk - configure CODEC system or master clock. * @codec: CODEC * @clk_id: DAI specific clock ID * @source: Source for the clock * @freq: new clock frequency in Hz * @dir: new clock direction - input/output. * * Configures the CODEC master (MCLK) or system (SYSCLK) clocking. */ int snd_soc_codec_set_sysclk(struct snd_soc_codec *codec, int clk_id, int source, unsigned int freq, int dir) { if (codec->driver->set_sysclk) return codec->driver->set_sysclk(codec, clk_id, source, freq, dir); else return -ENOTSUPP; } EXPORT_SYMBOL_GPL(snd_soc_codec_set_sysclk); /** * snd_soc_dai_set_clkdiv - configure DAI clock dividers. * @dai: DAI * @div_id: DAI specific clock divider ID * @div: new clock divisor. * * Configures the clock dividers. This is used to derive the best DAI bit and * frame clocks from the system or master clock. It's best to set the DAI bit * and frame clocks as low as possible to save system power. */ int snd_soc_dai_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div) { if (dai->driver && dai->driver->ops->set_clkdiv) return dai->driver->ops->set_clkdiv(dai, div_id, div); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_clkdiv); /** * snd_soc_dai_set_pll - configure DAI PLL. * @dai: DAI * @pll_id: DAI specific PLL ID * @source: DAI specific source for the PLL * @freq_in: PLL input clock frequency in Hz * @freq_out: requested PLL output clock frequency in Hz * * Configures and enables PLL to generate output clock based on input clock. */ int snd_soc_dai_set_pll(struct snd_soc_dai *dai, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { if (dai->driver && dai->driver->ops->set_pll) return dai->driver->ops->set_pll(dai, pll_id, source, freq_in, freq_out); else if (dai->codec && dai->codec->driver->set_pll) return dai->codec->driver->set_pll(dai->codec, pll_id, source, freq_in, freq_out); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_pll); /* * snd_soc_codec_set_pll - configure codec PLL. * @codec: CODEC * @pll_id: DAI specific PLL ID * @source: DAI specific source for the PLL * @freq_in: PLL input clock frequency in Hz * @freq_out: requested PLL output clock frequency in Hz * * Configures and enables PLL to generate output clock based on input clock. */ int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { if (codec->driver->set_pll) return codec->driver->set_pll(codec, pll_id, source, freq_in, freq_out); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_codec_set_pll); /** * snd_soc_dai_set_bclk_ratio - configure BCLK to sample rate ratio. * @dai: DAI * @ratio Ratio of BCLK to Sample rate. * * Configures the DAI for a preset BCLK to sample rate ratio. */ int snd_soc_dai_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio) { if (dai->driver && dai->driver->ops->set_bclk_ratio) return dai->driver->ops->set_bclk_ratio(dai, ratio); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_bclk_ratio); /** * snd_soc_dai_set_fmt - configure DAI hardware audio format. * @dai: DAI * @fmt: SND_SOC_DAIFMT_ format value. * * Configures the DAI hardware format and clocking. */ int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { if (dai->driver == NULL) return -EINVAL; if (dai->driver->ops->set_fmt == NULL) return -ENOTSUPP; return dai->driver->ops->set_fmt(dai, fmt); } EXPORT_SYMBOL_GPL(snd_soc_dai_set_fmt); /** * snd_soc_dai_set_tdm_slot - configure DAI TDM. * @dai: DAI * @tx_mask: bitmask representing active TX slots. * @rx_mask: bitmask representing active RX slots. * @slots: Number of slots in use. * @slot_width: Width in bits for each slot. * * Configures a DAI for TDM operation. Both mask and slots are codec and DAI * specific. */ int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) { if (dai->driver && dai->driver->ops->set_tdm_slot) return dai->driver->ops->set_tdm_slot(dai, tx_mask, rx_mask, slots, slot_width); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_tdm_slot); /** * snd_soc_dai_set_channel_map - configure DAI audio channel map * @dai: DAI * @tx_num: how many TX channels * @tx_slot: pointer to an array which imply the TX slot number channel * 0~num-1 uses * @rx_num: how many RX channels * @rx_slot: pointer to an array which imply the RX slot number channel * 0~num-1 uses * * configure the relationship between channel number and TDM slot number. */ int snd_soc_dai_set_channel_map(struct snd_soc_dai *dai, unsigned int tx_num, unsigned int *tx_slot, unsigned int rx_num, unsigned int *rx_slot) { if (dai->driver && dai->driver->ops->set_channel_map) return dai->driver->ops->set_channel_map(dai, tx_num, tx_slot, rx_num, rx_slot); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_channel_map); /** * snd_soc_dai_set_tristate - configure DAI system or master clock. * @dai: DAI * @tristate: tristate enable * * Tristates the DAI so that others can use it. */ int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate) { if (dai->driver && dai->driver->ops->set_tristate) return dai->driver->ops->set_tristate(dai, tristate); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_tristate); /** * snd_soc_dai_digital_mute - configure DAI system or master clock. * @dai: DAI * @mute: mute enable * @direction: stream to mute * * Mutes the DAI DAC. */ int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute, int direction) { if (!dai->driver) return -ENOTSUPP; if (dai->driver->ops->mute_stream) return dai->driver->ops->mute_stream(dai, mute, direction); else if (direction == SNDRV_PCM_STREAM_PLAYBACK && dai->driver->ops->digital_mute) return dai->driver->ops->digital_mute(dai, mute); else return -ENOTSUPP; } EXPORT_SYMBOL_GPL(snd_soc_dai_digital_mute); /** * snd_soc_register_card - Register a card with the ASoC core * * @card: Card to register * */ int snd_soc_register_card(struct snd_soc_card *card) { int i, ret; if (!card->name || !card->dev) return -EINVAL; for (i = 0; i < card->num_links; i++) { struct snd_soc_dai_link *link = &card->dai_link[i]; /* * Codec must be specified by 1 of name or OF node, * not both or neither. */ if (!!link->codec_name == !!link->codec_of_node) { dev_err(card->dev, "ASoC: Neither/both codec name/of_node are set for %s\n", link->name); return -EINVAL; } /* Codec DAI name must be specified */ if (!link->codec_dai_name) { dev_err(card->dev, "ASoC: codec_dai_name not set for %s\n", link->name); return -EINVAL; } /* * Platform may be specified by either name or OF node, but * can be left unspecified, and a dummy platform will be used. */ if (link->platform_name && link->platform_of_node) { dev_err(card->dev, "ASoC: Both platform name/of_node are set for %s\n", link->name); return -EINVAL; } /* * CPU device may be specified by either name or OF node, but * can be left unspecified, and will be matched based on DAI * name alone.. */ if (link->cpu_name && link->cpu_of_node) { dev_err(card->dev, "ASoC: Neither/both cpu name/of_node are set for %s\n", link->name); return -EINVAL; } /* * At least one of CPU DAI name or CPU device name/node must be * specified */ if (!link->cpu_dai_name && !(link->cpu_name || link->cpu_of_node)) { dev_err(card->dev, "ASoC: Neither cpu_dai_name nor cpu_name/of_node are set for %s\n", link->name); return -EINVAL; } } dev_set_drvdata(card->dev, card); snd_soc_initialize_card_lists(card); soc_init_card_debugfs(card); card->rtd = devm_kzalloc(card->dev, sizeof(struct snd_soc_pcm_runtime) * (card->num_links + card->num_aux_devs), GFP_KERNEL); if (card->rtd == NULL) return -ENOMEM; card->num_rtd = 0; card->rtd_aux = &card->rtd[card->num_links]; for (i = 0; i < card->num_links; i++) card->rtd[i].dai_link = &card->dai_link[i]; INIT_LIST_HEAD(&card->list); INIT_LIST_HEAD(&card->dapm_dirty); card->instantiated = 0; mutex_init(&card->mutex); mutex_init(&card->dapm_mutex); ret = snd_soc_instantiate_card(card); if (ret != 0) soc_cleanup_card_debugfs(card); /* deactivate pins to sleep state */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; struct snd_soc_dai *codec_dai = card->rtd[i].codec_dai; if (!codec_dai->active) pinctrl_pm_select_sleep_state(codec_dai->dev); if (!cpu_dai->active) pinctrl_pm_select_sleep_state(cpu_dai->dev); } return ret; } EXPORT_SYMBOL_GPL(snd_soc_register_card); /** * snd_soc_unregister_card - Unregister a card with the ASoC core * * @card: Card to unregister * */ int snd_soc_unregister_card(struct snd_soc_card *card) { if (card->instantiated) soc_cleanup_card_resources(card); dev_dbg(card->dev, "ASoC: Unregistered card '%s'\n", card->name); return 0; } EXPORT_SYMBOL_GPL(snd_soc_unregister_card); /* * Simplify DAI link configuration by removing ".-1" from device names * and sanitizing names. */ static char *fmt_single_name(struct device *dev, int *id) { char *found, name[NAME_SIZE]; int id1, id2; if (dev_name(dev) == NULL) return NULL; strlcpy(name, dev_name(dev), NAME_SIZE); /* are we a "%s.%d" name (platform and SPI components) */ found = strstr(name, dev->driver->name); if (found) { /* get ID */ if (sscanf(&found[strlen(dev->driver->name)], ".%d", id) == 1) { /* discard ID from name if ID == -1 */ if (*id == -1) found[strlen(dev->driver->name)] = '\0'; } } else { /* I2C component devices are named "bus-addr" */ if (sscanf(name, "%x-%x", &id1, &id2) == 2) { char tmp[NAME_SIZE]; /* create unique ID number from I2C addr and bus */ *id = ((id1 & 0xffff) << 16) + id2; /* sanitize component name for DAI link creation */ snprintf(tmp, NAME_SIZE, "%s.%s", dev->driver->name, name); strlcpy(name, tmp, NAME_SIZE); } else *id = 0; } return kstrdup(name, GFP_KERNEL); } /* * Simplify DAI link naming for single devices with multiple DAIs by removing * any ".-1" and using the DAI name (instead of device name). */ static inline char *fmt_multiple_name(struct device *dev, struct snd_soc_dai_driver *dai_drv) { if (dai_drv->name == NULL) { dev_err(dev, "ASoC: error - multiple DAI %s registered with no name\n", dev_name(dev)); return NULL; } return kstrdup(dai_drv->name, GFP_KERNEL); } /** * snd_soc_register_dai - Register a DAI with the ASoC core * * @dai: DAI to register */ static int snd_soc_register_dai(struct device *dev, struct snd_soc_dai_driver *dai_drv) { struct snd_soc_codec *codec; struct snd_soc_dai *dai; dev_dbg(dev, "ASoC: dai register %s\n", dev_name(dev)); dai = kzalloc(sizeof(struct snd_soc_dai), GFP_KERNEL); if (dai == NULL) return -ENOMEM; /* create DAI component name */ dai->name = fmt_single_name(dev, &dai->id); if (dai->name == NULL) { kfree(dai); return -ENOMEM; } dai->dev = dev; dai->driver = dai_drv; dai->dapm.dev = dev; if (!dai->driver->ops) dai->driver->ops = &null_dai_ops; mutex_lock(&client_mutex); list_for_each_entry(codec, &codec_list, list) { if (codec->dev == dev) { dev_dbg(dev, "ASoC: Mapped DAI %s to CODEC %s\n", dai->name, codec->name); dai->codec = codec; break; } } if (!dai->codec) dai->dapm.idle_bias_off = 1; list_add(&dai->list, &dai_list); mutex_unlock(&client_mutex); dev_dbg(dev, "ASoC: Registered DAI '%s'\n", dai->name); return 0; } /** * snd_soc_unregister_dai - Unregister a DAI from the ASoC core * * @dai: DAI to unregister */ static void snd_soc_unregister_dai(struct device *dev) { struct snd_soc_dai *dai; list_for_each_entry(dai, &dai_list, list) { if (dev == dai->dev) goto found; } return; found: mutex_lock(&client_mutex); list_del(&dai->list); mutex_unlock(&client_mutex); dev_dbg(dev, "ASoC: Unregistered DAI '%s'\n", dai->name); kfree(dai->name); kfree(dai); } /** * snd_soc_register_dais - Register multiple DAIs with the ASoC core * * @dai: Array of DAIs to register * @count: Number of DAIs */ static int snd_soc_register_dais(struct device *dev, struct snd_soc_dai_driver *dai_drv, size_t count) { struct snd_soc_codec *codec; struct snd_soc_dai *dai; int i, ret = 0; dev_dbg(dev, "ASoC: dai register %s #%Zu\n", dev_name(dev), count); for (i = 0; i < count; i++) { dai = kzalloc(sizeof(struct snd_soc_dai), GFP_KERNEL); if (dai == NULL) { ret = -ENOMEM; goto err; } /* create DAI component name */ dai->name = fmt_multiple_name(dev, &dai_drv[i]); if (dai->name == NULL) { kfree(dai); ret = -EINVAL; goto err; } dai->dev = dev; dai->driver = &dai_drv[i]; if (dai->driver->id) dai->id = dai->driver->id; else dai->id = i; dai->dapm.dev = dev; if (!dai->driver->ops) dai->driver->ops = &null_dai_ops; mutex_lock(&client_mutex); list_for_each_entry(codec, &codec_list, list) { if (codec->dev == dev) { dev_dbg(dev, "ASoC: Mapped DAI %s to CODEC %s\n", dai->name, codec->name); dai->codec = codec; break; } } if (!dai->codec) dai->dapm.idle_bias_off = 1; list_add(&dai->list, &dai_list); mutex_unlock(&client_mutex); dev_dbg(dai->dev, "ASoC: Registered DAI '%s'\n", dai->name); } return 0; err: for (i--; i >= 0; i--) snd_soc_unregister_dai(dev); return ret; } /** * snd_soc_unregister_dais - Unregister multiple DAIs from the ASoC core * * @dai: Array of DAIs to unregister * @count: Number of DAIs */ static void snd_soc_unregister_dais(struct device *dev, size_t count) { int i; for (i = 0; i < count; i++) snd_soc_unregister_dai(dev); } /** * snd_soc_register_component - Register a component with the ASoC core * */ static int __snd_soc_register_component(struct device *dev, struct snd_soc_component *cmpnt, const struct snd_soc_component_driver *cmpnt_drv, struct snd_soc_dai_driver *dai_drv, int num_dai, bool allow_single_dai) { int ret; dev_dbg(dev, "component register %s\n", dev_name(dev)); if (!cmpnt) { dev_err(dev, "ASoC: Failed to connecting component\n"); return -ENOMEM; } cmpnt->name = fmt_single_name(dev, &cmpnt->id); if (!cmpnt->name) { dev_err(dev, "ASoC: Failed to simplifying name\n"); return -ENOMEM; } cmpnt->dev = dev; cmpnt->driver = cmpnt_drv; cmpnt->dai_drv = dai_drv; cmpnt->num_dai = num_dai; /* * snd_soc_register_dai() uses fmt_single_name(), and * snd_soc_register_dais() uses fmt_multiple_name() * for dai->name which is used for name based matching * * this function is used from cpu/codec. * allow_single_dai flag can ignore "codec" driver reworking * since it had been used snd_soc_register_dais(), */ if ((1 == num_dai) && allow_single_dai) ret = snd_soc_register_dai(dev, dai_drv); else ret = snd_soc_register_dais(dev, dai_drv, num_dai); if (ret < 0) { dev_err(dev, "ASoC: Failed to regster DAIs: %d\n", ret); goto error_component_name; } mutex_lock(&client_mutex); list_add(&cmpnt->list, &component_list); mutex_unlock(&client_mutex); dev_dbg(cmpnt->dev, "ASoC: Registered component '%s'\n", cmpnt->name); return ret; error_component_name: kfree(cmpnt->name); return ret; } int snd_soc_register_component(struct device *dev, const struct snd_soc_component_driver *cmpnt_drv, struct snd_soc_dai_driver *dai_drv, int num_dai) { struct snd_soc_component *cmpnt; cmpnt = devm_kzalloc(dev, sizeof(*cmpnt), GFP_KERNEL); if (!cmpnt) { dev_err(dev, "ASoC: Failed to allocate memory\n"); return -ENOMEM; } return __snd_soc_register_component(dev, cmpnt, cmpnt_drv, dai_drv, num_dai, true); } EXPORT_SYMBOL_GPL(snd_soc_register_component); /** * snd_soc_unregister_component - Unregister a component from the ASoC core * */ void snd_soc_unregister_component(struct device *dev) { struct snd_soc_component *cmpnt; list_for_each_entry(cmpnt, &component_list, list) { if (dev == cmpnt->dev) goto found; } return; found: snd_soc_unregister_dais(dev, cmpnt->num_dai); mutex_lock(&client_mutex); list_del(&cmpnt->list); mutex_unlock(&client_mutex); dev_dbg(dev, "ASoC: Unregistered component '%s'\n", cmpnt->name); kfree(cmpnt->name); } EXPORT_SYMBOL_GPL(snd_soc_unregister_component); /** * snd_soc_add_platform - Add a platform to the ASoC core * @dev: The parent device for the platform * @platform: The platform to add * @platform_driver: The driver for the platform */ int snd_soc_add_platform(struct device *dev, struct snd_soc_platform *platform, const struct snd_soc_platform_driver *platform_drv) { /* create platform component name */ platform->name = fmt_single_name(dev, &platform->id); if (platform->name == NULL) return -ENOMEM; platform->dev = dev; platform->driver = platform_drv; platform->dapm.dev = dev; platform->dapm.platform = platform; platform->dapm.stream_event = platform_drv->stream_event; mutex_init(&platform->mutex); mutex_lock(&client_mutex); list_add(&platform->list, &platform_list); mutex_unlock(&client_mutex); dev_dbg(dev, "ASoC: Registered platform '%s'\n", platform->name); return 0; } EXPORT_SYMBOL_GPL(snd_soc_add_platform); /** * snd_soc_register_platform - Register a platform with the ASoC core * * @platform: platform to register */ int snd_soc_register_platform(struct device *dev, const struct snd_soc_platform_driver *platform_drv) { struct snd_soc_platform *platform; int ret; dev_dbg(dev, "ASoC: platform register %s\n", dev_name(dev)); platform = kzalloc(sizeof(struct snd_soc_platform), GFP_KERNEL); if (platform == NULL) return -ENOMEM; ret = snd_soc_add_platform(dev, platform, platform_drv); if (ret) kfree(platform); return ret; } EXPORT_SYMBOL_GPL(snd_soc_register_platform); /** * snd_soc_remove_platform - Remove a platform from the ASoC core * @platform: the platform to remove */ void snd_soc_remove_platform(struct snd_soc_platform *platform) { mutex_lock(&client_mutex); list_del(&platform->list); mutex_unlock(&client_mutex); dev_dbg(platform->dev, "ASoC: Unregistered platform '%s'\n", platform->name); kfree(platform->name); } EXPORT_SYMBOL_GPL(snd_soc_remove_platform); struct snd_soc_platform *snd_soc_lookup_platform(struct device *dev) { struct snd_soc_platform *platform; list_for_each_entry(platform, &platform_list, list) { if (dev == platform->dev) return platform; } return NULL; } EXPORT_SYMBOL_GPL(snd_soc_lookup_platform); /** * snd_soc_unregister_platform - Unregister a platform from the ASoC core * * @platform: platform to unregister */ void snd_soc_unregister_platform(struct device *dev) { struct snd_soc_platform *platform; platform = snd_soc_lookup_platform(dev); if (!platform) return; snd_soc_remove_platform(platform); kfree(platform); } EXPORT_SYMBOL_GPL(snd_soc_unregister_platform); static u64 codec_format_map[] = { SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE, SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U16_BE, SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE, SNDRV_PCM_FMTBIT_U24_LE | SNDRV_PCM_FMTBIT_U24_BE, SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE, SNDRV_PCM_FMTBIT_U32_LE | SNDRV_PCM_FMTBIT_U32_BE, SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_U24_3BE, SNDRV_PCM_FMTBIT_U24_3LE | SNDRV_PCM_FMTBIT_U24_3BE, SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S20_3BE, SNDRV_PCM_FMTBIT_U20_3LE | SNDRV_PCM_FMTBIT_U20_3BE, SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S18_3BE, SNDRV_PCM_FMTBIT_U18_3LE | SNDRV_PCM_FMTBIT_U18_3BE, SNDRV_PCM_FMTBIT_FLOAT_LE | SNDRV_PCM_FMTBIT_FLOAT_BE, SNDRV_PCM_FMTBIT_FLOAT64_LE | SNDRV_PCM_FMTBIT_FLOAT64_BE, SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE | SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE, }; /* Fix up the DAI formats for endianness: codecs don't actually see * the endianness of the data but we're using the CPU format * definitions which do need to include endianness so we ensure that * codec DAIs always have both big and little endian variants set. */ static void fixup_codec_formats(struct snd_soc_pcm_stream *stream) { int i; for (i = 0; i < ARRAY_SIZE(codec_format_map); i++) if (stream->formats & codec_format_map[i]) stream->formats |= codec_format_map[i]; } /** * snd_soc_register_codec - Register a codec with the ASoC core * * @codec: codec to register */ int snd_soc_register_codec(struct device *dev, const struct snd_soc_codec_driver *codec_drv, struct snd_soc_dai_driver *dai_drv, int num_dai) { struct snd_soc_codec *codec; int ret, i; dev_dbg(dev, "codec register %s\n", dev_name(dev)); codec = kzalloc(sizeof(struct snd_soc_codec), GFP_KERNEL); if (codec == NULL) return -ENOMEM; /* create CODEC component name */ codec->name = fmt_single_name(dev, &codec->id); if (codec->name == NULL) { ret = -ENOMEM; goto fail_codec; } codec->write = codec_drv->write; codec->read = codec_drv->read; codec->volatile_register = codec_drv->volatile_register; codec->readable_register = codec_drv->readable_register; codec->writable_register = codec_drv->writable_register; codec->ignore_pmdown_time = codec_drv->ignore_pmdown_time; codec->dapm.bias_level = SND_SOC_BIAS_OFF; codec->dapm.dev = dev; codec->dapm.codec = codec; codec->dapm.seq_notifier = codec_drv->seq_notifier; codec->dapm.stream_event = codec_drv->stream_event; codec->dev = dev; codec->driver = codec_drv; codec->num_dai = num_dai; mutex_init(&codec->mutex); for (i = 0; i < num_dai; i++) { fixup_codec_formats(&dai_drv[i].playback); fixup_codec_formats(&dai_drv[i].capture); } mutex_lock(&client_mutex); list_add(&codec->list, &codec_list); mutex_unlock(&client_mutex); /* register component */ ret = __snd_soc_register_component(dev, &codec->component, &codec_drv->component_driver, dai_drv, num_dai, false); if (ret < 0) { dev_err(codec->dev, "ASoC: Failed to regster component: %d\n", ret); goto fail_codec_name; } dev_dbg(codec->dev, "ASoC: Registered codec '%s'\n", codec->name); return 0; fail_codec_name: mutex_lock(&client_mutex); list_del(&codec->list); mutex_unlock(&client_mutex); kfree(codec->name); fail_codec: kfree(codec); return ret; } EXPORT_SYMBOL_GPL(snd_soc_register_codec); /** * snd_soc_unregister_codec - Unregister a codec from the ASoC core * * @codec: codec to unregister */ void snd_soc_unregister_codec(struct device *dev) { struct snd_soc_codec *codec; list_for_each_entry(codec, &codec_list, list) { if (dev == codec->dev) goto found; } return; found: snd_soc_unregister_component(dev); mutex_lock(&client_mutex); list_del(&codec->list); mutex_unlock(&client_mutex); dev_dbg(codec->dev, "ASoC: Unregistered codec '%s'\n", codec->name); snd_soc_cache_exit(codec); kfree(codec->name); kfree(codec); } EXPORT_SYMBOL_GPL(snd_soc_unregister_codec); /* Retrieve a card's name from device tree */ int snd_soc_of_parse_card_name(struct snd_soc_card *card, const char *propname) { struct device_node *np = card->dev->of_node; int ret; ret = of_property_read_string_index(np, propname, 0, &card->name); /* * EINVAL means the property does not exist. This is fine providing * card->name was previously set, which is checked later in * snd_soc_register_card. */ if (ret < 0 && ret != -EINVAL) { dev_err(card->dev, "ASoC: Property '%s' could not be read: %d\n", propname, ret); return ret; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_of_parse_card_name); int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, const char *propname) { struct device_node *np = card->dev->of_node; int num_routes; struct snd_soc_dapm_route *routes; int i, ret; num_routes = of_property_count_strings(np, propname); if (num_routes < 0 || num_routes & 1) { dev_err(card->dev, "ASoC: Property '%s' does not exist or its length is not even\n", propname); return -EINVAL; } num_routes /= 2; if (!num_routes) { dev_err(card->dev, "ASoC: Property '%s's length is zero\n", propname); return -EINVAL; } routes = devm_kzalloc(card->dev, num_routes * sizeof(*routes), GFP_KERNEL); if (!routes) { dev_err(card->dev, "ASoC: Could not allocate DAPM route table\n"); return -EINVAL; } for (i = 0; i < num_routes; i++) { ret = of_property_read_string_index(np, propname, 2 * i, &routes[i].sink); if (ret) { dev_err(card->dev, "ASoC: Property '%s' index %d could not be read: %d\n", propname, 2 * i, ret); return -EINVAL; } ret = of_property_read_string_index(np, propname, (2 * i) + 1, &routes[i].source); if (ret) { dev_err(card->dev, "ASoC: Property '%s' index %d could not be read: %d\n", propname, (2 * i) + 1, ret); return -EINVAL; } } card->num_dapm_routes = num_routes; card->dapm_routes = routes; return 0; } EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_routing); unsigned int snd_soc_of_parse_daifmt(struct device_node *np, const char *prefix) { int ret, i; char prop[128]; unsigned int format = 0; int bit, frame; const char *str; struct { char *name; unsigned int val; } of_fmt_table[] = { { "i2s", SND_SOC_DAIFMT_I2S }, { "right_j", SND_SOC_DAIFMT_RIGHT_J }, { "left_j", SND_SOC_DAIFMT_LEFT_J }, { "dsp_a", SND_SOC_DAIFMT_DSP_A }, { "dsp_b", SND_SOC_DAIFMT_DSP_B }, { "ac97", SND_SOC_DAIFMT_AC97 }, { "pdm", SND_SOC_DAIFMT_PDM}, { "msb", SND_SOC_DAIFMT_MSB }, { "lsb", SND_SOC_DAIFMT_LSB }, }; if (!prefix) prefix = ""; /* * check "[prefix]format = xxx" * SND_SOC_DAIFMT_FORMAT_MASK area */ snprintf(prop, sizeof(prop), "%sformat", prefix); ret = of_property_read_string(np, prop, &str); if (ret == 0) { for (i = 0; i < ARRAY_SIZE(of_fmt_table); i++) { if (strcmp(str, of_fmt_table[i].name) == 0) { format |= of_fmt_table[i].val; break; } } } /* * check "[prefix]continuous-clock" * SND_SOC_DAIFMT_CLOCK_MASK area */ snprintf(prop, sizeof(prop), "%scontinuous-clock", prefix); if (of_get_property(np, prop, NULL)) format |= SND_SOC_DAIFMT_CONT; else format |= SND_SOC_DAIFMT_GATED; /* * check "[prefix]bitclock-inversion" * check "[prefix]frame-inversion" * SND_SOC_DAIFMT_INV_MASK area */ snprintf(prop, sizeof(prop), "%sbitclock-inversion", prefix); bit = !!of_get_property(np, prop, NULL); snprintf(prop, sizeof(prop), "%sframe-inversion", prefix); frame = !!of_get_property(np, prop, NULL); switch ((bit << 4) + frame) { case 0x11: format |= SND_SOC_DAIFMT_IB_IF; break; case 0x10: format |= SND_SOC_DAIFMT_IB_NF; break; case 0x01: format |= SND_SOC_DAIFMT_NB_IF; break; default: /* SND_SOC_DAIFMT_NB_NF is default */ break; } /* * check "[prefix]bitclock-master" * check "[prefix]frame-master" * SND_SOC_DAIFMT_MASTER_MASK area */ snprintf(prop, sizeof(prop), "%sbitclock-master", prefix); bit = !!of_get_property(np, prop, NULL); snprintf(prop, sizeof(prop), "%sframe-master", prefix); frame = !!of_get_property(np, prop, NULL); switch ((bit << 4) + frame) { case 0x11: format |= SND_SOC_DAIFMT_CBM_CFM; break; case 0x10: format |= SND_SOC_DAIFMT_CBM_CFS; break; case 0x01: format |= SND_SOC_DAIFMT_CBS_CFM; break; default: format |= SND_SOC_DAIFMT_CBS_CFS; break; } return format; } EXPORT_SYMBOL_GPL(snd_soc_of_parse_daifmt); int snd_soc_of_get_dai_name(struct device_node *of_node, const char **dai_name) { struct snd_soc_component *pos; struct of_phandle_args args; int ret; ret = of_parse_phandle_with_args(of_node, "sound-dai", "#sound-dai-cells", 0, &args); if (ret) return ret; ret = -EPROBE_DEFER; mutex_lock(&client_mutex); list_for_each_entry(pos, &component_list, list) { if (pos->dev->of_node != args.np) continue; if (pos->driver->of_xlate_dai_name) { ret = pos->driver->of_xlate_dai_name(pos, &args, dai_name); } else { int id = -1; switch (args.args_count) { case 0: id = 0; /* same as dai_drv[0] */ break; case 1: id = args.args[0]; break; default: /* not supported */ break; } if (id < 0 || id >= pos->num_dai) { ret = -EINVAL; break; } ret = 0; *dai_name = pos->dai_drv[id].name; if (!*dai_name) *dai_name = pos->name; } break; } mutex_unlock(&client_mutex); of_node_put(args.np); return ret; } EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_name); static int __init snd_soc_init(void) { #ifdef CONFIG_DEBUG_FS snd_soc_debugfs_root = debugfs_create_dir("asoc", NULL); if (IS_ERR(snd_soc_debugfs_root) || !snd_soc_debugfs_root) { pr_warn("ASoC: Failed to create debugfs directory\n"); snd_soc_debugfs_root = NULL; } if (!debugfs_create_file("codecs", 0444, snd_soc_debugfs_root, NULL, &codec_list_fops)) pr_warn("ASoC: Failed to create CODEC list debugfs file\n"); if (!debugfs_create_file("dais", 0444, snd_soc_debugfs_root, NULL, &dai_list_fops)) pr_warn("ASoC: Failed to create DAI list debugfs file\n"); if (!debugfs_create_file("platforms", 0444, snd_soc_debugfs_root, NULL, &platform_list_fops)) pr_warn("ASoC: Failed to create platform list debugfs file\n"); #endif snd_soc_util_init(); return platform_driver_register(&soc_driver); } module_init(snd_soc_init); static void __exit snd_soc_exit(void) { snd_soc_util_exit(); #ifdef CONFIG_DEBUG_FS debugfs_remove_recursive(snd_soc_debugfs_root); #endif platform_driver_unregister(&soc_driver); } module_exit(snd_soc_exit); /* Module information */ MODULE_AUTHOR("Liam Girdwood, lrg@slimlogic.co.uk"); MODULE_DESCRIPTION("ALSA SoC Core"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:soc-audio");
gpl-2.0
prototype-U/Hells-Fusion-SGYD
arch/arm/mach-omap2/omap-iommu.c
676
3455
/* * omap iommu: omap device registration * * Copyright (C) 2008-2009 Nokia Corporation * * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/platform_device.h> #include <plat/iommu.h> #include <plat/irqs.h> struct iommu_device { resource_size_t base; int irq; struct iommu_platform_data pdata; struct resource res[2]; }; static struct iommu_device *devices; static int num_iommu_devices; #ifdef CONFIG_ARCH_OMAP3 static struct iommu_device omap3_devices[] = { { .base = 0x480bd400, .irq = 24, .pdata = { .name = "isp", .nr_tlb_entries = 8, .clk_name = "cam_ick", }, }, #if defined(CONFIG_MPU_BRIDGE_IOMMU) { .base = 0x5d000000, .irq = 28, .pdata = { .name = "iva2", .nr_tlb_entries = 32, .clk_name = "iva2_ck", }, }, #endif }; #define NR_OMAP3_IOMMU_DEVICES ARRAY_SIZE(omap3_devices) static struct platform_device *omap3_iommu_pdev[NR_OMAP3_IOMMU_DEVICES]; #else #define omap3_devices NULL #define NR_OMAP3_IOMMU_DEVICES 0 #define omap3_iommu_pdev NULL #endif #ifdef CONFIG_ARCH_OMAP4 static struct iommu_device omap4_devices[] = { { .base = OMAP4_MMU1_BASE, .irq = INT_44XX_DUCATI_MMU_IRQ, .pdata = { .name = "ducati", .nr_tlb_entries = 32, .clk_name = "ducati_ick", }, }, #if defined(CONFIG_MPU_TESLA_IOMMU) { .base = OMAP4_MMU2_BASE, .irq = INT_44XX_DSP_MMU, .pdata = { .name = "tesla", .nr_tlb_entries = 32, .clk_name = "tesla_ick", }, }, #endif }; #define NR_OMAP4_IOMMU_DEVICES ARRAY_SIZE(omap4_devices) static struct platform_device *omap4_iommu_pdev[NR_OMAP4_IOMMU_DEVICES]; #else #define omap4_devices NULL #define NR_OMAP4_IOMMU_DEVICES 0 #define omap4_iommu_pdev NULL #endif static struct platform_device **omap_iommu_pdev; static int __init omap_iommu_init(void) { int i, err; struct resource res[] = { { .flags = IORESOURCE_MEM }, { .flags = IORESOURCE_IRQ }, }; if (cpu_is_omap34xx()) { devices = omap3_devices; omap_iommu_pdev = omap3_iommu_pdev; num_iommu_devices = NR_OMAP3_IOMMU_DEVICES; } else if (cpu_is_omap44xx()) { devices = omap4_devices; omap_iommu_pdev = omap4_iommu_pdev; num_iommu_devices = NR_OMAP4_IOMMU_DEVICES; } else return -ENODEV; for (i = 0; i < num_iommu_devices; i++) { struct platform_device *pdev; const struct iommu_device *d = &devices[i]; pdev = platform_device_alloc("omap-iommu", i); if (!pdev) { err = -ENOMEM; goto err_out; } res[0].start = d->base; res[0].end = d->base + MMU_REG_SIZE - 1; res[1].start = res[1].end = d->irq; err = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); if (err) goto err_out; err = platform_device_add_data(pdev, &d->pdata, sizeof(d->pdata)); if (err) goto err_out; err = platform_device_add(pdev); if (err) goto err_out; omap_iommu_pdev[i] = pdev; } return 0; err_out: while (i--) platform_device_put(omap_iommu_pdev[i]); return err; } module_init(omap_iommu_init); static void __exit omap_iommu_exit(void) { int i; for (i = 0; i < num_iommu_devices; i++) platform_device_unregister(omap_iommu_pdev[i]); } module_exit(omap_iommu_exit); MODULE_AUTHOR("Hiroshi DOYU"); MODULE_DESCRIPTION("omap iommu: omap device registration"); MODULE_LICENSE("GPL v2");
gpl-2.0
MojieBuddhist/linux
drivers/power/apm_power.c
1956
10386
/* * Copyright © 2007 Anton Vorontsov <cbou@mail.ru> * Copyright © 2007 Eugeny Boger <eugenyboger@dgap.mipt.ru> * * Author: Eugeny Boger <eugenyboger@dgap.mipt.ru> * * Use consistent with the GNU GPL is permitted, * provided that this copyright notice is * preserved in its entirety in all copies and derived works. */ #include <linux/module.h> #include <linux/device.h> #include <linux/power_supply.h> #include <linux/apm-emulation.h> #define PSY_PROP(psy, prop, val) (power_supply_get_property(psy, \ POWER_SUPPLY_PROP_##prop, val)) #define _MPSY_PROP(prop, val) (power_supply_get_property(main_battery, \ prop, val)) #define MPSY_PROP(prop, val) _MPSY_PROP(POWER_SUPPLY_PROP_##prop, val) static DEFINE_MUTEX(apm_mutex); static struct power_supply *main_battery; enum apm_source { SOURCE_ENERGY, SOURCE_CHARGE, SOURCE_VOLTAGE, }; struct find_bat_param { struct power_supply *main; struct power_supply *bat; struct power_supply *max_charge_bat; struct power_supply *max_energy_bat; union power_supply_propval full; int max_charge; int max_energy; }; static int __find_main_battery(struct device *dev, void *data) { struct find_bat_param *bp = (struct find_bat_param *)data; bp->bat = dev_get_drvdata(dev); if (bp->bat->desc->use_for_apm) { /* nice, we explicitly asked to report this battery. */ bp->main = bp->bat; return 1; } if (!PSY_PROP(bp->bat, CHARGE_FULL_DESIGN, &bp->full) || !PSY_PROP(bp->bat, CHARGE_FULL, &bp->full)) { if (bp->full.intval > bp->max_charge) { bp->max_charge_bat = bp->bat; bp->max_charge = bp->full.intval; } } else if (!PSY_PROP(bp->bat, ENERGY_FULL_DESIGN, &bp->full) || !PSY_PROP(bp->bat, ENERGY_FULL, &bp->full)) { if (bp->full.intval > bp->max_energy) { bp->max_energy_bat = bp->bat; bp->max_energy = bp->full.intval; } } return 0; } static void find_main_battery(void) { struct find_bat_param bp; int error; memset(&bp, 0, sizeof(struct find_bat_param)); main_battery = NULL; bp.main = main_battery; error = class_for_each_device(power_supply_class, NULL, &bp, __find_main_battery); if (error) { main_battery = bp.main; return; } if ((bp.max_energy_bat && bp.max_charge_bat) && (bp.max_energy_bat != bp.max_charge_bat)) { /* try guess battery with more capacity */ if (!PSY_PROP(bp.max_charge_bat, VOLTAGE_MAX_DESIGN, &bp.full)) { if (bp.max_energy > bp.max_charge * bp.full.intval) main_battery = bp.max_energy_bat; else main_battery = bp.max_charge_bat; } else if (!PSY_PROP(bp.max_energy_bat, VOLTAGE_MAX_DESIGN, &bp.full)) { if (bp.max_charge > bp.max_energy / bp.full.intval) main_battery = bp.max_charge_bat; else main_battery = bp.max_energy_bat; } else { /* give up, choice any */ main_battery = bp.max_energy_bat; } } else if (bp.max_charge_bat) { main_battery = bp.max_charge_bat; } else if (bp.max_energy_bat) { main_battery = bp.max_energy_bat; } else { /* give up, try the last if any */ main_battery = bp.bat; } } static int do_calculate_time(int status, enum apm_source source) { union power_supply_propval full; union power_supply_propval empty; union power_supply_propval cur; union power_supply_propval I; enum power_supply_property full_prop; enum power_supply_property full_design_prop; enum power_supply_property empty_prop; enum power_supply_property empty_design_prop; enum power_supply_property cur_avg_prop; enum power_supply_property cur_now_prop; if (MPSY_PROP(CURRENT_AVG, &I)) { /* if battery can't report average value, use momentary */ if (MPSY_PROP(CURRENT_NOW, &I)) return -1; } if (!I.intval) return 0; switch (source) { case SOURCE_CHARGE: full_prop = POWER_SUPPLY_PROP_CHARGE_FULL; full_design_prop = POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN; empty_prop = POWER_SUPPLY_PROP_CHARGE_EMPTY; empty_design_prop = POWER_SUPPLY_PROP_CHARGE_EMPTY; cur_avg_prop = POWER_SUPPLY_PROP_CHARGE_AVG; cur_now_prop = POWER_SUPPLY_PROP_CHARGE_NOW; break; case SOURCE_ENERGY: full_prop = POWER_SUPPLY_PROP_ENERGY_FULL; full_design_prop = POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN; empty_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY; empty_design_prop = POWER_SUPPLY_PROP_CHARGE_EMPTY; cur_avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG; cur_now_prop = POWER_SUPPLY_PROP_ENERGY_NOW; break; case SOURCE_VOLTAGE: full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX; full_design_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN; empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN; empty_design_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN; cur_avg_prop = POWER_SUPPLY_PROP_VOLTAGE_AVG; cur_now_prop = POWER_SUPPLY_PROP_VOLTAGE_NOW; break; default: printk(KERN_ERR "Unsupported source: %d\n", source); return -1; } if (_MPSY_PROP(full_prop, &full)) { /* if battery can't report this property, use design value */ if (_MPSY_PROP(full_design_prop, &full)) return -1; } if (_MPSY_PROP(empty_prop, &empty)) { /* if battery can't report this property, use design value */ if (_MPSY_PROP(empty_design_prop, &empty)) empty.intval = 0; } if (_MPSY_PROP(cur_avg_prop, &cur)) { /* if battery can't report average value, use momentary */ if (_MPSY_PROP(cur_now_prop, &cur)) return -1; } if (status == POWER_SUPPLY_STATUS_CHARGING) return ((cur.intval - full.intval) * 60L) / I.intval; else return -((cur.intval - empty.intval) * 60L) / I.intval; } static int calculate_time(int status) { int time; time = do_calculate_time(status, SOURCE_ENERGY); if (time != -1) return time; time = do_calculate_time(status, SOURCE_CHARGE); if (time != -1) return time; time = do_calculate_time(status, SOURCE_VOLTAGE); if (time != -1) return time; return -1; } static int calculate_capacity(enum apm_source source) { enum power_supply_property full_prop, empty_prop; enum power_supply_property full_design_prop, empty_design_prop; enum power_supply_property now_prop, avg_prop; union power_supply_propval empty, full, cur; int ret; switch (source) { case SOURCE_CHARGE: full_prop = POWER_SUPPLY_PROP_CHARGE_FULL; empty_prop = POWER_SUPPLY_PROP_CHARGE_EMPTY; full_design_prop = POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN; empty_design_prop = POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN; now_prop = POWER_SUPPLY_PROP_CHARGE_NOW; avg_prop = POWER_SUPPLY_PROP_CHARGE_AVG; break; case SOURCE_ENERGY: full_prop = POWER_SUPPLY_PROP_ENERGY_FULL; empty_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY; full_design_prop = POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN; empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN; now_prop = POWER_SUPPLY_PROP_ENERGY_NOW; avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG; break; case SOURCE_VOLTAGE: full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX; empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN; full_design_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN; empty_design_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN; now_prop = POWER_SUPPLY_PROP_VOLTAGE_NOW; avg_prop = POWER_SUPPLY_PROP_VOLTAGE_AVG; break; default: printk(KERN_ERR "Unsupported source: %d\n", source); return -1; } if (_MPSY_PROP(full_prop, &full)) { /* if battery can't report this property, use design value */ if (_MPSY_PROP(full_design_prop, &full)) return -1; } if (_MPSY_PROP(avg_prop, &cur)) { /* if battery can't report average value, use momentary */ if (_MPSY_PROP(now_prop, &cur)) return -1; } if (_MPSY_PROP(empty_prop, &empty)) { /* if battery can't report this property, use design value */ if (_MPSY_PROP(empty_design_prop, &empty)) empty.intval = 0; } if (full.intval - empty.intval) ret = ((cur.intval - empty.intval) * 100L) / (full.intval - empty.intval); else return -1; if (ret > 100) return 100; else if (ret < 0) return 0; return ret; } static void apm_battery_apm_get_power_status(struct apm_power_info *info) { union power_supply_propval status; union power_supply_propval capacity, time_to_full, time_to_empty; mutex_lock(&apm_mutex); find_main_battery(); if (!main_battery) { mutex_unlock(&apm_mutex); return; } /* status */ if (MPSY_PROP(STATUS, &status)) status.intval = POWER_SUPPLY_STATUS_UNKNOWN; /* ac line status */ if ((status.intval == POWER_SUPPLY_STATUS_CHARGING) || (status.intval == POWER_SUPPLY_STATUS_NOT_CHARGING) || (status.intval == POWER_SUPPLY_STATUS_FULL)) info->ac_line_status = APM_AC_ONLINE; else info->ac_line_status = APM_AC_OFFLINE; /* battery life (i.e. capacity, in percents) */ if (MPSY_PROP(CAPACITY, &capacity) == 0) { info->battery_life = capacity.intval; } else { /* try calculate using energy */ info->battery_life = calculate_capacity(SOURCE_ENERGY); /* if failed try calculate using charge instead */ if (info->battery_life == -1) info->battery_life = calculate_capacity(SOURCE_CHARGE); if (info->battery_life == -1) info->battery_life = calculate_capacity(SOURCE_VOLTAGE); } /* charging status */ if (status.intval == POWER_SUPPLY_STATUS_CHARGING) { info->battery_status = APM_BATTERY_STATUS_CHARGING; } else { if (info->battery_life > 50) info->battery_status = APM_BATTERY_STATUS_HIGH; else if (info->battery_life > 5) info->battery_status = APM_BATTERY_STATUS_LOW; else info->battery_status = APM_BATTERY_STATUS_CRITICAL; } info->battery_flag = info->battery_status; /* time */ info->units = APM_UNITS_MINS; if (status.intval == POWER_SUPPLY_STATUS_CHARGING) { if (!MPSY_PROP(TIME_TO_FULL_AVG, &time_to_full) || !MPSY_PROP(TIME_TO_FULL_NOW, &time_to_full)) info->time = time_to_full.intval / 60; else info->time = calculate_time(status.intval); } else { if (!MPSY_PROP(TIME_TO_EMPTY_AVG, &time_to_empty) || !MPSY_PROP(TIME_TO_EMPTY_NOW, &time_to_empty)) info->time = time_to_empty.intval / 60; else info->time = calculate_time(status.intval); } mutex_unlock(&apm_mutex); } static int __init apm_battery_init(void) { printk(KERN_INFO "APM Battery Driver\n"); apm_get_power_status = apm_battery_apm_get_power_status; return 0; } static void __exit apm_battery_exit(void) { apm_get_power_status = NULL; } module_init(apm_battery_init); module_exit(apm_battery_exit); MODULE_AUTHOR("Eugeny Boger <eugenyboger@dgap.mipt.ru>"); MODULE_DESCRIPTION("APM emulation driver for battery monitoring class"); MODULE_LICENSE("GPL");
gpl-2.0
TheTypoMaster/android_kernel_htc_a31ul
drivers/vlynq/vlynq.c
2468
19737
/* * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Parts of the VLYNQ specification can be found here: * http://www.ti.com/litv/pdf/sprue36a */ #include <linux/init.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/device.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/vlynq.h> #define VLYNQ_CTRL_PM_ENABLE 0x80000000 #define VLYNQ_CTRL_CLOCK_INT 0x00008000 #define VLYNQ_CTRL_CLOCK_DIV(x) (((x) & 7) << 16) #define VLYNQ_CTRL_INT_LOCAL 0x00004000 #define VLYNQ_CTRL_INT_ENABLE 0x00002000 #define VLYNQ_CTRL_INT_VECTOR(x) (((x) & 0x1f) << 8) #define VLYNQ_CTRL_INT2CFG 0x00000080 #define VLYNQ_CTRL_RESET 0x00000001 #define VLYNQ_CTRL_CLOCK_MASK (0x7 << 16) #define VLYNQ_INT_OFFSET 0x00000014 #define VLYNQ_REMOTE_OFFSET 0x00000080 #define VLYNQ_STATUS_LINK 0x00000001 #define VLYNQ_STATUS_LERROR 0x00000080 #define VLYNQ_STATUS_RERROR 0x00000100 #define VINT_ENABLE 0x00000100 #define VINT_TYPE_EDGE 0x00000080 #define VINT_LEVEL_LOW 0x00000040 #define VINT_VECTOR(x) ((x) & 0x1f) #define VINT_OFFSET(irq) (8 * ((irq) % 4)) #define VLYNQ_AUTONEGO_V2 0x00010000 struct vlynq_regs { u32 revision; u32 control; u32 status; u32 int_prio; u32 int_status; u32 int_pending; u32 int_ptr; u32 tx_offset; struct vlynq_mapping rx_mapping[4]; u32 chip; u32 autonego; u32 unused[6]; u32 int_device[8]; }; #ifdef CONFIG_VLYNQ_DEBUG static void vlynq_dump_regs(struct vlynq_device *dev) { int i; printk(KERN_DEBUG "VLYNQ local=%p remote=%p\n", dev->local, dev->remote); for (i = 0; i < 32; i++) { printk(KERN_DEBUG "VLYNQ: local %d: %08x\n", i + 1, ((u32 *)dev->local)[i]); printk(KERN_DEBUG "VLYNQ: remote %d: %08x\n", i + 1, ((u32 *)dev->remote)[i]); } } static void vlynq_dump_mem(u32 *base, int count) { int i; for (i = 0; i < (count + 3) / 4; i++) { if (i % 4 == 0) printk(KERN_DEBUG "\nMEM[0x%04x]:", i * 4); printk(KERN_DEBUG " 0x%08x", *(base + i)); } printk(KERN_DEBUG "\n"); } #endif /* Check the VLYNQ link status with a given device */ static int vlynq_linked(struct vlynq_device *dev) { int i; for (i = 0; i < 100; i++) if (readl(&dev->local->status) & VLYNQ_STATUS_LINK) return 1; else cpu_relax(); return 0; } static void vlynq_reset(struct vlynq_device *dev) { writel(readl(&dev->local->control) | VLYNQ_CTRL_RESET, &dev->local->control); /* Wait for the devices to finish resetting */ msleep(5); /* Remove reset bit */ writel(readl(&dev->local->control) & ~VLYNQ_CTRL_RESET, &dev->local->control); /* Give some time for the devices to settle */ msleep(5); } static void vlynq_irq_unmask(struct irq_data *d) { struct vlynq_device *dev = irq_data_get_irq_chip_data(d); int virq; u32 val; BUG_ON(!dev); virq = d->irq - dev->irq_start; val = readl(&dev->remote->int_device[virq >> 2]); val |= (VINT_ENABLE | virq) << VINT_OFFSET(virq); writel(val, &dev->remote->int_device[virq >> 2]); } static void vlynq_irq_mask(struct irq_data *d) { struct vlynq_device *dev = irq_data_get_irq_chip_data(d); int virq; u32 val; BUG_ON(!dev); virq = d->irq - dev->irq_start; val = readl(&dev->remote->int_device[virq >> 2]); val &= ~(VINT_ENABLE << VINT_OFFSET(virq)); writel(val, &dev->remote->int_device[virq >> 2]); } static int vlynq_irq_type(struct irq_data *d, unsigned int flow_type) { struct vlynq_device *dev = irq_data_get_irq_chip_data(d); int virq; u32 val; BUG_ON(!dev); virq = d->irq - dev->irq_start; val = readl(&dev->remote->int_device[virq >> 2]); switch (flow_type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_RISING: case IRQ_TYPE_EDGE_FALLING: case IRQ_TYPE_EDGE_BOTH: val |= VINT_TYPE_EDGE << VINT_OFFSET(virq); val &= ~(VINT_LEVEL_LOW << VINT_OFFSET(virq)); break; case IRQ_TYPE_LEVEL_HIGH: val &= ~(VINT_TYPE_EDGE << VINT_OFFSET(virq)); val &= ~(VINT_LEVEL_LOW << VINT_OFFSET(virq)); break; case IRQ_TYPE_LEVEL_LOW: val &= ~(VINT_TYPE_EDGE << VINT_OFFSET(virq)); val |= VINT_LEVEL_LOW << VINT_OFFSET(virq); break; default: return -EINVAL; } writel(val, &dev->remote->int_device[virq >> 2]); return 0; } static void vlynq_local_ack(struct irq_data *d) { struct vlynq_device *dev = irq_data_get_irq_chip_data(d); u32 status = readl(&dev->local->status); pr_debug("%s: local status: 0x%08x\n", dev_name(&dev->dev), status); writel(status, &dev->local->status); } static void vlynq_remote_ack(struct irq_data *d) { struct vlynq_device *dev = irq_data_get_irq_chip_data(d); u32 status = readl(&dev->remote->status); pr_debug("%s: remote status: 0x%08x\n", dev_name(&dev->dev), status); writel(status, &dev->remote->status); } static irqreturn_t vlynq_irq(int irq, void *dev_id) { struct vlynq_device *dev = dev_id; u32 status; int virq = 0; status = readl(&dev->local->int_status); writel(status, &dev->local->int_status); if (unlikely(!status)) spurious_interrupt(); while (status) { if (status & 1) do_IRQ(dev->irq_start + virq); status >>= 1; virq++; } return IRQ_HANDLED; } static struct irq_chip vlynq_irq_chip = { .name = "vlynq", .irq_unmask = vlynq_irq_unmask, .irq_mask = vlynq_irq_mask, .irq_set_type = vlynq_irq_type, }; static struct irq_chip vlynq_local_chip = { .name = "vlynq local error", .irq_unmask = vlynq_irq_unmask, .irq_mask = vlynq_irq_mask, .irq_ack = vlynq_local_ack, }; static struct irq_chip vlynq_remote_chip = { .name = "vlynq local error", .irq_unmask = vlynq_irq_unmask, .irq_mask = vlynq_irq_mask, .irq_ack = vlynq_remote_ack, }; static int vlynq_setup_irq(struct vlynq_device *dev) { u32 val; int i, virq; if (dev->local_irq == dev->remote_irq) { printk(KERN_ERR "%s: local vlynq irq should be different from remote\n", dev_name(&dev->dev)); return -EINVAL; } /* Clear local and remote error bits */ writel(readl(&dev->local->status), &dev->local->status); writel(readl(&dev->remote->status), &dev->remote->status); /* Now setup interrupts */ val = VLYNQ_CTRL_INT_VECTOR(dev->local_irq); val |= VLYNQ_CTRL_INT_ENABLE | VLYNQ_CTRL_INT_LOCAL | VLYNQ_CTRL_INT2CFG; val |= readl(&dev->local->control); writel(VLYNQ_INT_OFFSET, &dev->local->int_ptr); writel(val, &dev->local->control); val = VLYNQ_CTRL_INT_VECTOR(dev->remote_irq); val |= VLYNQ_CTRL_INT_ENABLE; val |= readl(&dev->remote->control); writel(VLYNQ_INT_OFFSET, &dev->remote->int_ptr); writel(val, &dev->remote->int_ptr); writel(val, &dev->remote->control); for (i = dev->irq_start; i <= dev->irq_end; i++) { virq = i - dev->irq_start; if (virq == dev->local_irq) { irq_set_chip_and_handler(i, &vlynq_local_chip, handle_level_irq); irq_set_chip_data(i, dev); } else if (virq == dev->remote_irq) { irq_set_chip_and_handler(i, &vlynq_remote_chip, handle_level_irq); irq_set_chip_data(i, dev); } else { irq_set_chip_and_handler(i, &vlynq_irq_chip, handle_simple_irq); irq_set_chip_data(i, dev); writel(0, &dev->remote->int_device[virq >> 2]); } } if (request_irq(dev->irq, vlynq_irq, IRQF_SHARED, "vlynq", dev)) { printk(KERN_ERR "%s: request_irq failed\n", dev_name(&dev->dev)); return -EAGAIN; } return 0; } static void vlynq_device_release(struct device *dev) { struct vlynq_device *vdev = to_vlynq_device(dev); kfree(vdev); } static int vlynq_device_match(struct device *dev, struct device_driver *drv) { struct vlynq_device *vdev = to_vlynq_device(dev); struct vlynq_driver *vdrv = to_vlynq_driver(drv); struct vlynq_device_id *ids = vdrv->id_table; while (ids->id) { if (ids->id == vdev->dev_id) { vdev->divisor = ids->divisor; vlynq_set_drvdata(vdev, ids); printk(KERN_INFO "Driver found for VLYNQ " "device: %08x\n", vdev->dev_id); return 1; } printk(KERN_DEBUG "Not using the %08x VLYNQ device's driver" " for VLYNQ device: %08x\n", ids->id, vdev->dev_id); ids++; } return 0; } static int vlynq_device_probe(struct device *dev) { struct vlynq_device *vdev = to_vlynq_device(dev); struct vlynq_driver *drv = to_vlynq_driver(dev->driver); struct vlynq_device_id *id = vlynq_get_drvdata(vdev); int result = -ENODEV; if (drv->probe) result = drv->probe(vdev, id); if (result) put_device(dev); return result; } static int vlynq_device_remove(struct device *dev) { struct vlynq_driver *drv = to_vlynq_driver(dev->driver); if (drv->remove) drv->remove(to_vlynq_device(dev)); return 0; } int __vlynq_register_driver(struct vlynq_driver *driver, struct module *owner) { driver->driver.name = driver->name; driver->driver.bus = &vlynq_bus_type; return driver_register(&driver->driver); } EXPORT_SYMBOL(__vlynq_register_driver); void vlynq_unregister_driver(struct vlynq_driver *driver) { driver_unregister(&driver->driver); } EXPORT_SYMBOL(vlynq_unregister_driver); /* * A VLYNQ remote device can clock the VLYNQ bus master * using a dedicated clock line. In that case, both the * remove device and the bus master should have the same * serial clock dividers configured. Iterate through the * 8 possible dividers until we actually link with the * device. */ static int __vlynq_try_remote(struct vlynq_device *dev) { int i; vlynq_reset(dev); for (i = dev->dev_id ? vlynq_rdiv2 : vlynq_rdiv8; dev->dev_id ? i <= vlynq_rdiv8 : i >= vlynq_rdiv2; dev->dev_id ? i++ : i--) { if (!vlynq_linked(dev)) break; writel((readl(&dev->remote->control) & ~VLYNQ_CTRL_CLOCK_MASK) | VLYNQ_CTRL_CLOCK_INT | VLYNQ_CTRL_CLOCK_DIV(i - vlynq_rdiv1), &dev->remote->control); writel((readl(&dev->local->control) & ~(VLYNQ_CTRL_CLOCK_INT | VLYNQ_CTRL_CLOCK_MASK)) | VLYNQ_CTRL_CLOCK_DIV(i - vlynq_rdiv1), &dev->local->control); if (vlynq_linked(dev)) { printk(KERN_DEBUG "%s: using remote clock divisor %d\n", dev_name(&dev->dev), i - vlynq_rdiv1 + 1); dev->divisor = i; return 0; } else { vlynq_reset(dev); } } return -ENODEV; } /* * A VLYNQ remote device can be clocked by the VLYNQ bus * master using a dedicated clock line. In that case, only * the bus master configures the serial clock divider. * Iterate through the 8 possible dividers until we * actually get a link with the device. */ static int __vlynq_try_local(struct vlynq_device *dev) { int i; vlynq_reset(dev); for (i = dev->dev_id ? vlynq_ldiv2 : vlynq_ldiv8; dev->dev_id ? i <= vlynq_ldiv8 : i >= vlynq_ldiv2; dev->dev_id ? i++ : i--) { writel((readl(&dev->local->control) & ~VLYNQ_CTRL_CLOCK_MASK) | VLYNQ_CTRL_CLOCK_INT | VLYNQ_CTRL_CLOCK_DIV(i - vlynq_ldiv1), &dev->local->control); if (vlynq_linked(dev)) { printk(KERN_DEBUG "%s: using local clock divisor %d\n", dev_name(&dev->dev), i - vlynq_ldiv1 + 1); dev->divisor = i; return 0; } else { vlynq_reset(dev); } } return -ENODEV; } /* * When using external clocking method, serial clock * is supplied by an external oscillator, therefore we * should mask the local clock bit in the clock control * register for both the bus master and the remote device. */ static int __vlynq_try_external(struct vlynq_device *dev) { vlynq_reset(dev); if (!vlynq_linked(dev)) return -ENODEV; writel((readl(&dev->remote->control) & ~VLYNQ_CTRL_CLOCK_INT), &dev->remote->control); writel((readl(&dev->local->control) & ~VLYNQ_CTRL_CLOCK_INT), &dev->local->control); if (vlynq_linked(dev)) { printk(KERN_DEBUG "%s: using external clock\n", dev_name(&dev->dev)); dev->divisor = vlynq_div_external; return 0; } return -ENODEV; } static int __vlynq_enable_device(struct vlynq_device *dev) { int result; struct plat_vlynq_ops *ops = dev->dev.platform_data; result = ops->on(dev); if (result) return result; switch (dev->divisor) { case vlynq_div_external: case vlynq_div_auto: /* When the device is brought from reset it should have clock * generation negotiated by hardware. * Check which device is generating clocks and perform setup * accordingly */ if (vlynq_linked(dev) && readl(&dev->remote->control) & VLYNQ_CTRL_CLOCK_INT) { if (!__vlynq_try_remote(dev) || !__vlynq_try_local(dev) || !__vlynq_try_external(dev)) return 0; } else { if (!__vlynq_try_external(dev) || !__vlynq_try_local(dev) || !__vlynq_try_remote(dev)) return 0; } break; case vlynq_ldiv1: case vlynq_ldiv2: case vlynq_ldiv3: case vlynq_ldiv4: case vlynq_ldiv5: case vlynq_ldiv6: case vlynq_ldiv7: case vlynq_ldiv8: writel(VLYNQ_CTRL_CLOCK_INT | VLYNQ_CTRL_CLOCK_DIV(dev->divisor - vlynq_ldiv1), &dev->local->control); writel(0, &dev->remote->control); if (vlynq_linked(dev)) { printk(KERN_DEBUG "%s: using local clock divisor %d\n", dev_name(&dev->dev), dev->divisor - vlynq_ldiv1 + 1); return 0; } break; case vlynq_rdiv1: case vlynq_rdiv2: case vlynq_rdiv3: case vlynq_rdiv4: case vlynq_rdiv5: case vlynq_rdiv6: case vlynq_rdiv7: case vlynq_rdiv8: writel(0, &dev->local->control); writel(VLYNQ_CTRL_CLOCK_INT | VLYNQ_CTRL_CLOCK_DIV(dev->divisor - vlynq_rdiv1), &dev->remote->control); if (vlynq_linked(dev)) { printk(KERN_DEBUG "%s: using remote clock divisor %d\n", dev_name(&dev->dev), dev->divisor - vlynq_rdiv1 + 1); return 0; } break; } ops->off(dev); return -ENODEV; } int vlynq_enable_device(struct vlynq_device *dev) { struct plat_vlynq_ops *ops = dev->dev.platform_data; int result = -ENODEV; result = __vlynq_enable_device(dev); if (result) return result; result = vlynq_setup_irq(dev); if (result) ops->off(dev); dev->enabled = !result; return result; } EXPORT_SYMBOL(vlynq_enable_device); void vlynq_disable_device(struct vlynq_device *dev) { struct plat_vlynq_ops *ops = dev->dev.platform_data; dev->enabled = 0; free_irq(dev->irq, dev); ops->off(dev); } EXPORT_SYMBOL(vlynq_disable_device); int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset, struct vlynq_mapping *mapping) { int i; if (!dev->enabled) return -ENXIO; writel(tx_offset, &dev->local->tx_offset); for (i = 0; i < 4; i++) { writel(mapping[i].offset, &dev->local->rx_mapping[i].offset); writel(mapping[i].size, &dev->local->rx_mapping[i].size); } return 0; } EXPORT_SYMBOL(vlynq_set_local_mapping); int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset, struct vlynq_mapping *mapping) { int i; if (!dev->enabled) return -ENXIO; writel(tx_offset, &dev->remote->tx_offset); for (i = 0; i < 4; i++) { writel(mapping[i].offset, &dev->remote->rx_mapping[i].offset); writel(mapping[i].size, &dev->remote->rx_mapping[i].size); } return 0; } EXPORT_SYMBOL(vlynq_set_remote_mapping); int vlynq_set_local_irq(struct vlynq_device *dev, int virq) { int irq = dev->irq_start + virq; if (dev->enabled) return -EBUSY; if ((irq < dev->irq_start) || (irq > dev->irq_end)) return -EINVAL; if (virq == dev->remote_irq) return -EINVAL; dev->local_irq = virq; return 0; } EXPORT_SYMBOL(vlynq_set_local_irq); int vlynq_set_remote_irq(struct vlynq_device *dev, int virq) { int irq = dev->irq_start + virq; if (dev->enabled) return -EBUSY; if ((irq < dev->irq_start) || (irq > dev->irq_end)) return -EINVAL; if (virq == dev->local_irq) return -EINVAL; dev->remote_irq = virq; return 0; } EXPORT_SYMBOL(vlynq_set_remote_irq); static int vlynq_probe(struct platform_device *pdev) { struct vlynq_device *dev; struct resource *regs_res, *mem_res, *irq_res; int len, result; regs_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); if (!regs_res) return -ENODEV; mem_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); if (!mem_res) return -ENODEV; irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "devirq"); if (!irq_res) return -ENODEV; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { printk(KERN_ERR "vlynq: failed to allocate device structure\n"); return -ENOMEM; } dev->id = pdev->id; dev->dev.bus = &vlynq_bus_type; dev->dev.parent = &pdev->dev; dev_set_name(&dev->dev, "vlynq%d", dev->id); dev->dev.platform_data = pdev->dev.platform_data; dev->dev.release = vlynq_device_release; dev->regs_start = regs_res->start; dev->regs_end = regs_res->end; dev->mem_start = mem_res->start; dev->mem_end = mem_res->end; len = resource_size(regs_res); if (!request_mem_region(regs_res->start, len, dev_name(&dev->dev))) { printk(KERN_ERR "%s: Can't request vlynq registers\n", dev_name(&dev->dev)); result = -ENXIO; goto fail_request; } dev->local = ioremap(regs_res->start, len); if (!dev->local) { printk(KERN_ERR "%s: Can't remap vlynq registers\n", dev_name(&dev->dev)); result = -ENXIO; goto fail_remap; } dev->remote = (struct vlynq_regs *)((void *)dev->local + VLYNQ_REMOTE_OFFSET); dev->irq = platform_get_irq_byname(pdev, "irq"); dev->irq_start = irq_res->start; dev->irq_end = irq_res->end; dev->local_irq = dev->irq_end - dev->irq_start; dev->remote_irq = dev->local_irq - 1; if (device_register(&dev->dev)) goto fail_register; platform_set_drvdata(pdev, dev); printk(KERN_INFO "%s: regs 0x%p, irq %d, mem 0x%p\n", dev_name(&dev->dev), (void *)dev->regs_start, dev->irq, (void *)dev->mem_start); dev->dev_id = 0; dev->divisor = vlynq_div_auto; result = __vlynq_enable_device(dev); if (result == 0) { dev->dev_id = readl(&dev->remote->chip); ((struct plat_vlynq_ops *)(dev->dev.platform_data))->off(dev); } if (dev->dev_id) printk(KERN_INFO "Found a VLYNQ device: %08x\n", dev->dev_id); return 0; fail_register: iounmap(dev->local); fail_remap: fail_request: release_mem_region(regs_res->start, len); kfree(dev); return result; } static int vlynq_remove(struct platform_device *pdev) { struct vlynq_device *dev = platform_get_drvdata(pdev); device_unregister(&dev->dev); iounmap(dev->local); release_mem_region(dev->regs_start, dev->regs_end - dev->regs_start); kfree(dev); return 0; } static struct platform_driver vlynq_platform_driver = { .driver.name = "vlynq", .probe = vlynq_probe, .remove = vlynq_remove, }; struct bus_type vlynq_bus_type = { .name = "vlynq", .match = vlynq_device_match, .probe = vlynq_device_probe, .remove = vlynq_device_remove, }; EXPORT_SYMBOL(vlynq_bus_type); static int vlynq_init(void) { int res = 0; res = bus_register(&vlynq_bus_type); if (res) goto fail_bus; res = platform_driver_register(&vlynq_platform_driver); if (res) goto fail_platform; return 0; fail_platform: bus_unregister(&vlynq_bus_type); fail_bus: return res; } static void vlynq_exit(void) { platform_driver_unregister(&vlynq_platform_driver); bus_unregister(&vlynq_bus_type); } module_init(vlynq_init); module_exit(vlynq_exit);
gpl-2.0
defreez/thesis-ics-kernel
arch/blackfin/kernel/traps.c
2980
16057
/* * Main exception handling logic. * * Copyright 2004-2010 Analog Devices Inc. * * Licensed under the GPL-2 or later */ #include <linux/bug.h> #include <linux/uaccess.h> #include <linux/module.h> #include <asm/traps.h> #include <asm/cplb.h> #include <asm/blackfin.h> #include <asm/irq_handler.h> #include <linux/irq.h> #include <asm/trace.h> #include <asm/fixed_code.h> #include <asm/pseudo_instructions.h> #ifdef CONFIG_KGDB # include <linux/kgdb.h> # define CHK_DEBUGGER_TRAP() \ do { \ kgdb_handle_exception(trapnr, sig, info.si_code, fp); \ } while (0) # define CHK_DEBUGGER_TRAP_MAYBE() \ do { \ if (kgdb_connected) \ CHK_DEBUGGER_TRAP(); \ } while (0) #else # define CHK_DEBUGGER_TRAP() do { } while (0) # define CHK_DEBUGGER_TRAP_MAYBE() do { } while (0) #endif #ifdef CONFIG_DEBUG_VERBOSE #define verbose_printk(fmt, arg...) \ printk(fmt, ##arg) #else #define verbose_printk(fmt, arg...) \ ({ if (0) printk(fmt, ##arg); 0; }) #endif #if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE) u32 last_seqstat; #ifdef CONFIG_DEBUG_MMRS_MODULE EXPORT_SYMBOL(last_seqstat); #endif #endif /* Initiate the event table handler */ void __init trap_init(void) { CSYNC(); bfin_write_EVT3(trap); CSYNC(); } static int kernel_mode_regs(struct pt_regs *regs) { return regs->ipend & 0xffc0; } asmlinkage notrace void trap_c(struct pt_regs *fp) { #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON int j; #endif #ifdef CONFIG_BFIN_PSEUDODBG_INSNS int opcode; #endif unsigned int cpu = raw_smp_processor_id(); const char *strerror = NULL; int sig = 0; siginfo_t info; unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE; trace_buffer_save(j); #if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE) last_seqstat = (u32)fp->seqstat; #endif /* Important - be very careful dereferncing pointers - will lead to * double faults if the stack has become corrupt */ /* trap_c() will be called for exceptions. During exceptions * processing, the pc value should be set with retx value. * With this change we can cleanup some code in signal.c- TODO */ fp->orig_pc = fp->retx; /* printk("exception: 0x%x, ipend=%x, reti=%x, retx=%x\n", trapnr, fp->ipend, fp->pc, fp->retx); */ /* send the appropriate signal to the user program */ switch (trapnr) { /* This table works in conjunction with the one in ./mach-common/entry.S * Some exceptions are handled there (in assembly, in exception space) * Some are handled here, (in C, in interrupt space) * Some, like CPLB, are handled in both, where the normal path is * handled in assembly/exception space, and the error path is handled * here */ /* 0x00 - Linux Syscall, getting here is an error */ /* 0x01 - userspace gdb breakpoint, handled here */ case VEC_EXCPT01: info.si_code = TRAP_ILLTRAP; sig = SIGTRAP; CHK_DEBUGGER_TRAP_MAYBE(); /* Check if this is a breakpoint in kernel space */ if (kernel_mode_regs(fp)) goto traps_done; else break; /* 0x03 - User Defined, userspace stack overflow */ case VEC_EXCPT03: info.si_code = SEGV_STACKFLOW; sig = SIGSEGV; strerror = KERN_NOTICE EXC_0x03(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x02 - KGDB initial connection and break signal trap */ case VEC_EXCPT02: #ifdef CONFIG_KGDB info.si_code = TRAP_ILLTRAP; sig = SIGTRAP; CHK_DEBUGGER_TRAP(); goto traps_done; #endif /* 0x04 - User Defined */ /* 0x05 - User Defined */ /* 0x06 - User Defined */ /* 0x07 - User Defined */ /* 0x08 - User Defined */ /* 0x09 - User Defined */ /* 0x0A - User Defined */ /* 0x0B - User Defined */ /* 0x0C - User Defined */ /* 0x0D - User Defined */ /* 0x0E - User Defined */ /* 0x0F - User Defined */ /* If we got here, it is most likely that someone was trying to use a * custom exception handler, and it is not actually installed properly */ case VEC_EXCPT04 ... VEC_EXCPT15: info.si_code = ILL_ILLPARAOP; sig = SIGILL; strerror = KERN_NOTICE EXC_0x04(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x10 HW Single step, handled here */ case VEC_STEP: info.si_code = TRAP_STEP; sig = SIGTRAP; CHK_DEBUGGER_TRAP_MAYBE(); /* Check if this is a single step in kernel space */ if (kernel_mode_regs(fp)) goto traps_done; else break; /* 0x11 - Trace Buffer Full, handled here */ case VEC_OVFLOW: info.si_code = TRAP_TRACEFLOW; sig = SIGTRAP; strerror = KERN_NOTICE EXC_0x11(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x12 - Reserved, Caught by default */ /* 0x13 - Reserved, Caught by default */ /* 0x14 - Reserved, Caught by default */ /* 0x15 - Reserved, Caught by default */ /* 0x16 - Reserved, Caught by default */ /* 0x17 - Reserved, Caught by default */ /* 0x18 - Reserved, Caught by default */ /* 0x19 - Reserved, Caught by default */ /* 0x1A - Reserved, Caught by default */ /* 0x1B - Reserved, Caught by default */ /* 0x1C - Reserved, Caught by default */ /* 0x1D - Reserved, Caught by default */ /* 0x1E - Reserved, Caught by default */ /* 0x1F - Reserved, Caught by default */ /* 0x20 - Reserved, Caught by default */ /* 0x21 - Undefined Instruction, handled here */ case VEC_UNDEF_I: #ifdef CONFIG_BUG if (kernel_mode_regs(fp)) { switch (report_bug(fp->pc, fp)) { case BUG_TRAP_TYPE_NONE: break; case BUG_TRAP_TYPE_WARN: dump_bfin_trace_buffer(); fp->pc += 2; goto traps_done; case BUG_TRAP_TYPE_BUG: /* call to panic() will dump trace, and it is * off at this point, so it won't be clobbered */ panic("BUG()"); } } #endif #ifdef CONFIG_BFIN_PSEUDODBG_INSNS /* * Support for the fake instructions, if the instruction fails, * then just execute a illegal opcode failure (like normal). * Don't support these instructions inside the kernel */ if (!kernel_mode_regs(fp) && get_instruction(&opcode, (unsigned short *)fp->pc)) { if (execute_pseudodbg_assert(fp, opcode)) goto traps_done; if (execute_pseudodbg(fp, opcode)) goto traps_done; } #endif info.si_code = ILL_ILLOPC; sig = SIGILL; strerror = KERN_NOTICE EXC_0x21(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x22 - Illegal Instruction Combination, handled here */ case VEC_ILGAL_I: info.si_code = ILL_ILLPARAOP; sig = SIGILL; strerror = KERN_NOTICE EXC_0x22(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x23 - Data CPLB protection violation, handled here */ case VEC_CPLB_VL: info.si_code = ILL_CPLB_VI; sig = SIGSEGV; strerror = KERN_NOTICE EXC_0x23(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x24 - Data access misaligned, handled here */ case VEC_MISALI_D: info.si_code = BUS_ADRALN; sig = SIGBUS; strerror = KERN_NOTICE EXC_0x24(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x25 - Unrecoverable Event, handled here */ case VEC_UNCOV: info.si_code = ILL_ILLEXCPT; sig = SIGILL; strerror = KERN_NOTICE EXC_0x25(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr, error case is handled here */ case VEC_CPLB_M: info.si_code = BUS_ADRALN; sig = SIGBUS; strerror = KERN_NOTICE EXC_0x26(KERN_NOTICE); break; /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */ case VEC_CPLB_MHIT: info.si_code = ILL_CPLB_MULHIT; sig = SIGSEGV; #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO if (cpu_pda[cpu].dcplb_fault_addr < FIXED_CODE_START) strerror = KERN_NOTICE "NULL pointer access\n"; else #endif strerror = KERN_NOTICE EXC_0x27(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x28 - Emulation Watchpoint, handled here */ case VEC_WATCH: info.si_code = TRAP_WATCHPT; sig = SIGTRAP; pr_debug(EXC_0x28(KERN_DEBUG)); CHK_DEBUGGER_TRAP_MAYBE(); /* Check if this is a watchpoint in kernel space */ if (kernel_mode_regs(fp)) goto traps_done; else break; #ifdef CONFIG_BF535 /* 0x29 - Instruction fetch access error (535 only) */ case VEC_ISTRU_VL: /* ADSP-BF535 only (MH) */ info.si_code = BUS_OPFETCH; sig = SIGBUS; strerror = KERN_NOTICE "BF535: VEC_ISTRU_VL\n"; CHK_DEBUGGER_TRAP_MAYBE(); break; #else /* 0x29 - Reserved, Caught by default */ #endif /* 0x2A - Instruction fetch misaligned, handled here */ case VEC_MISALI_I: info.si_code = BUS_ADRALN; sig = SIGBUS; strerror = KERN_NOTICE EXC_0x2A(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x2B - Instruction CPLB protection violation, handled here */ case VEC_CPLB_I_VL: info.si_code = ILL_CPLB_VI; sig = SIGBUS; strerror = KERN_NOTICE EXC_0x2B(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x2C - Instruction CPLB miss, handled in _cplb_hdr */ case VEC_CPLB_I_M: info.si_code = ILL_CPLB_MISS; sig = SIGBUS; strerror = KERN_NOTICE EXC_0x2C(KERN_NOTICE); break; /* 0x2D - Instruction CPLB Multiple Hits, handled here */ case VEC_CPLB_I_MHIT: info.si_code = ILL_CPLB_MULHIT; sig = SIGSEGV; #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO if (cpu_pda[cpu].icplb_fault_addr < FIXED_CODE_START) strerror = KERN_NOTICE "Jump to NULL address\n"; else #endif strerror = KERN_NOTICE EXC_0x2D(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x2E - Illegal use of Supervisor Resource, handled here */ case VEC_ILL_RES: info.si_code = ILL_PRVOPC; sig = SIGILL; strerror = KERN_NOTICE EXC_0x2E(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x2F - Reserved, Caught by default */ /* 0x30 - Reserved, Caught by default */ /* 0x31 - Reserved, Caught by default */ /* 0x32 - Reserved, Caught by default */ /* 0x33 - Reserved, Caught by default */ /* 0x34 - Reserved, Caught by default */ /* 0x35 - Reserved, Caught by default */ /* 0x36 - Reserved, Caught by default */ /* 0x37 - Reserved, Caught by default */ /* 0x38 - Reserved, Caught by default */ /* 0x39 - Reserved, Caught by default */ /* 0x3A - Reserved, Caught by default */ /* 0x3B - Reserved, Caught by default */ /* 0x3C - Reserved, Caught by default */ /* 0x3D - Reserved, Caught by default */ /* 0x3E - Reserved, Caught by default */ /* 0x3F - Reserved, Caught by default */ case VEC_HWERR: info.si_code = BUS_ADRALN; sig = SIGBUS; switch (fp->seqstat & SEQSTAT_HWERRCAUSE) { /* System MMR Error */ case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR): info.si_code = BUS_ADRALN; sig = SIGBUS; strerror = KERN_NOTICE HWC_x2(KERN_NOTICE); break; /* External Memory Addressing Error */ case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR): if (ANOMALY_05000310) { static unsigned long anomaly_rets; if ((fp->pc >= (L1_CODE_START + L1_CODE_LENGTH - 512)) && (fp->pc < (L1_CODE_START + L1_CODE_LENGTH))) { /* * A false hardware error will happen while fetching at * the L1 instruction SRAM boundary. Ignore it. */ anomaly_rets = fp->rets; goto traps_done; } else if (fp->rets == anomaly_rets) { /* * While boundary code returns to a function, at the ret * point, a new false hardware error might occur too based * on tests. Ignore it too. */ goto traps_done; } else if ((fp->rets >= (L1_CODE_START + L1_CODE_LENGTH - 512)) && (fp->rets < (L1_CODE_START + L1_CODE_LENGTH))) { /* * If boundary code calls a function, at the entry point, * a new false hardware error maybe happen based on tests. * Ignore it too. */ goto traps_done; } else anomaly_rets = 0; } info.si_code = BUS_ADRERR; sig = SIGBUS; strerror = KERN_NOTICE HWC_x3(KERN_NOTICE); break; /* Performance Monitor Overflow */ case (SEQSTAT_HWERRCAUSE_PERF_FLOW): strerror = KERN_NOTICE HWC_x12(KERN_NOTICE); break; /* RAISE 5 instruction */ case (SEQSTAT_HWERRCAUSE_RAISE_5): printk(KERN_NOTICE HWC_x18(KERN_NOTICE)); break; default: /* Reserved */ printk(KERN_NOTICE HWC_default(KERN_NOTICE)); break; } CHK_DEBUGGER_TRAP_MAYBE(); break; /* * We should be handling all known exception types above, * if we get here we hit a reserved one, so panic */ default: info.si_code = ILL_ILLPARAOP; sig = SIGILL; verbose_printk(KERN_EMERG "Caught Unhandled Exception, code = %08lx\n", (fp->seqstat & SEQSTAT_EXCAUSE)); CHK_DEBUGGER_TRAP_MAYBE(); break; } BUG_ON(sig == 0); /* If the fault was caused by a kernel thread, or interrupt handler * we will kernel panic, so the system reboots. */ if (kernel_mode_regs(fp) || (current && !current->mm)) { console_verbose(); oops_in_progress = 1; } if (sig != SIGTRAP) { if (strerror) verbose_printk(strerror); dump_bfin_process(fp); dump_bfin_mem(fp); show_regs(fp); /* Print out the trace buffer if it makes sense */ #ifndef CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE if (trapnr == VEC_CPLB_I_M || trapnr == VEC_CPLB_M) verbose_printk(KERN_NOTICE "No trace since you do not have " "CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE enabled\n\n"); else #endif dump_bfin_trace_buffer(); if (oops_in_progress) { /* Dump the current kernel stack */ verbose_printk(KERN_NOTICE "Kernel Stack\n"); show_stack(current, NULL); print_modules(); #ifndef CONFIG_ACCESS_CHECK verbose_printk(KERN_EMERG "Please turn on " "CONFIG_ACCESS_CHECK\n"); #endif panic("Kernel exception"); } else { #ifdef CONFIG_DEBUG_VERBOSE unsigned long *stack; /* Dump the user space stack */ stack = (unsigned long *)rdusp(); verbose_printk(KERN_NOTICE "Userspace Stack\n"); show_stack(NULL, stack); #endif } } #ifdef CONFIG_IPIPE if (!ipipe_trap_notify(fp->seqstat & 0x3f, fp)) #endif { info.si_signo = sig; info.si_errno = 0; switch (trapnr) { case VEC_CPLB_VL: case VEC_MISALI_D: case VEC_CPLB_M: case VEC_CPLB_MHIT: info.si_addr = (void __user *)cpu_pda[cpu].dcplb_fault_addr; break; default: info.si_addr = (void __user *)fp->pc; break; } force_sig_info(sig, &info, current); } if ((ANOMALY_05000461 && trapnr == VEC_HWERR && !access_ok(VERIFY_READ, fp->pc, 8)) || (ANOMALY_05000281 && trapnr == VEC_HWERR) || (ANOMALY_05000189 && (trapnr == VEC_CPLB_I_VL || trapnr == VEC_CPLB_VL))) fp->pc = SAFE_USER_INSTRUCTION; traps_done: trace_buffer_restore(j); } asmlinkage void double_fault_c(struct pt_regs *fp) { #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON int j; trace_buffer_save(j); #endif console_verbose(); oops_in_progress = 1; #ifdef CONFIG_DEBUG_VERBOSE printk(KERN_EMERG "Double Fault\n"); #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) { unsigned int cpu = raw_smp_processor_id(); char buf[150]; decode_address(buf, cpu_pda[cpu].retx_doublefault); printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n", (unsigned int)cpu_pda[cpu].seqstat_doublefault & SEQSTAT_EXCAUSE, buf); decode_address(buf, cpu_pda[cpu].dcplb_doublefault_addr); printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf); decode_address(buf, cpu_pda[cpu].icplb_doublefault_addr); printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf); decode_address(buf, fp->retx); printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf); } else #endif { dump_bfin_process(fp); dump_bfin_mem(fp); show_regs(fp); dump_bfin_trace_buffer(); } #endif panic("Double Fault - unrecoverable event"); } void panic_cplb_error(int cplb_panic, struct pt_regs *fp) { switch (cplb_panic) { case CPLB_NO_UNLOCKED: printk(KERN_EMERG "All CPLBs are locked\n"); break; case CPLB_PROT_VIOL: return; case CPLB_NO_ADDR_MATCH: return; case CPLB_UNKNOWN_ERR: printk(KERN_EMERG "Unknown CPLB Exception\n"); break; } oops_in_progress = 1; dump_bfin_process(fp); dump_bfin_mem(fp); show_regs(fp); dump_stack(); panic("Unrecoverable event"); } #ifdef CONFIG_BUG int is_valid_bugaddr(unsigned long addr) { unsigned int opcode; if (!get_instruction(&opcode, (unsigned short *)addr)) return 0; return opcode == BFIN_BUG_OPCODE; } #endif /* stub this out */ #ifndef CONFIG_DEBUG_VERBOSE void show_regs(struct pt_regs *fp) { } #endif
gpl-2.0
ronasimi/aosp-kernel_lge_geeb
sound/soc/blackfin/bf5xx-ssm2602.c
4772
4141
/* * File: sound/soc/blackfin/bf5xx-ssm2602.c * Author: Cliff Cai <Cliff.Cai@analog.com> * * Created: Tue June 06 2008 * Description: board driver for SSM2602 sound chip * * Modified: * Copyright 2008 Analog Devices Inc. * * Bugs: Enter bugs at http://blackfin.uclinux.org/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/pcm_params.h> #include <asm/dma.h> #include <asm/portmux.h> #include <linux/gpio.h> #include "../codecs/ssm2602.h" #include "bf5xx-sport.h" #include "bf5xx-i2s-pcm.h" static struct snd_soc_card bf5xx_ssm2602; static int bf5xx_ssm2602_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; unsigned int clk = 0; int ret = 0; pr_debug("%s rate %d format %x\n", __func__, params_rate(params), params_format(params)); /* * If you are using a crystal source which frequency is not 12MHz * then modify the below case statement with frequency of the crystal. * * If you are using the SPORT to generate clocking then this is * where to do it. */ switch (params_rate(params)) { case 8000: case 16000: case 48000: case 96000: case 11025: case 22050: case 44100: clk = 12000000; break; } ret = snd_soc_dai_set_sysclk(codec_dai, SSM2602_SYSCLK, clk, SND_SOC_CLOCK_IN); if (ret < 0) return ret; return 0; } static struct snd_soc_ops bf5xx_ssm2602_ops = { .hw_params = bf5xx_ssm2602_hw_params, }; /* CODEC is master for BCLK and LRC in this configuration. */ #define BF5XX_SSM2602_DAIFMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \ SND_SOC_DAIFMT_CBM_CFM) static struct snd_soc_dai_link bf5xx_ssm2602_dai[] = { { .name = "ssm2602", .stream_name = "SSM2602", .cpu_dai_name = "bfin-i2s.0", .codec_dai_name = "ssm2602-hifi", .platform_name = "bfin-i2s-pcm-audio", .codec_name = "ssm2602.0-001b", .ops = &bf5xx_ssm2602_ops, .dai_fmt = BF5XX_SSM2602_DAIFMT, }, { .name = "ssm2602", .stream_name = "SSM2602", .cpu_dai_name = "bfin-i2s.1", .codec_dai_name = "ssm2602-hifi", .platform_name = "bfin-i2s-pcm-audio", .codec_name = "ssm2602.0-001b", .ops = &bf5xx_ssm2602_ops, .dai_fmt = BF5XX_SSM2602_DAIFMT, }, }; static struct snd_soc_card bf5xx_ssm2602 = { .name = "bfin-ssm2602", .owner = THIS_MODULE, .dai_link = &bf5xx_ssm2602_dai[CONFIG_SND_BF5XX_SPORT_NUM], .num_links = 1, }; static struct platform_device *bf5xx_ssm2602_snd_device; static int __init bf5xx_ssm2602_init(void) { int ret; pr_debug("%s enter\n", __func__); bf5xx_ssm2602_snd_device = platform_device_alloc("soc-audio", -1); if (!bf5xx_ssm2602_snd_device) return -ENOMEM; platform_set_drvdata(bf5xx_ssm2602_snd_device, &bf5xx_ssm2602); ret = platform_device_add(bf5xx_ssm2602_snd_device); if (ret) platform_device_put(bf5xx_ssm2602_snd_device); return ret; } static void __exit bf5xx_ssm2602_exit(void) { pr_debug("%s enter\n", __func__); platform_device_unregister(bf5xx_ssm2602_snd_device); } module_init(bf5xx_ssm2602_init); module_exit(bf5xx_ssm2602_exit); /* Module information */ MODULE_AUTHOR("Cliff Cai"); MODULE_DESCRIPTION("ALSA SoC SSM2602 BF527-EZKIT"); MODULE_LICENSE("GPL");
gpl-2.0
ffolkes/android_kernel_samsung_smdk4412
arch/powerpc/platforms/pasemi/pci.c
4772
5727
/* * Copyright (C) 2006 PA Semi, Inc * * Authors: Kip Walker, PA Semi * Olof Johansson, PA Semi * * Maintained by: Olof Johansson <olof@lixom.net> * * Based on arch/powerpc/platforms/maple/pci.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/pci.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/ppc-pci.h> #define PA_PXP_CFA(bus, devfn, off) (((bus) << 20) | ((devfn) << 12) | (off)) static inline int pa_pxp_offset_valid(u8 bus, u8 devfn, int offset) { /* Device 0 Function 0 is special: It's config space spans function 1 as * well, so allow larger offset. It's really a two-function device but the * second function does not probe. */ if (bus == 0 && devfn == 0) return offset < 8192; else return offset < 4096; } static void volatile __iomem *pa_pxp_cfg_addr(struct pci_controller *hose, u8 bus, u8 devfn, int offset) { return hose->cfg_data + PA_PXP_CFA(bus, devfn, offset); } static inline int is_root_port(int busno, int devfn) { return ((busno == 0) && (PCI_FUNC(devfn) < 4) && ((PCI_SLOT(devfn) == 16) || (PCI_SLOT(devfn) == 17))); } static inline int is_5945_reg(int reg) { return (((reg >= 0x18) && (reg < 0x34)) || ((reg >= 0x158) && (reg < 0x178))); } static int workaround_5945(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { struct pci_controller *hose; void volatile __iomem *addr, *dummy; int byte; u32 tmp; if (!is_root_port(bus->number, devfn) || !is_5945_reg(offset)) return 0; hose = pci_bus_to_host(bus); addr = pa_pxp_cfg_addr(hose, bus->number, devfn, offset & ~0x3); byte = offset & 0x3; /* Workaround bug 5945: write 0 to a dummy register before reading, * and write back what we read. We must read/write the full 32-bit * contents so we need to shift and mask by hand. */ dummy = pa_pxp_cfg_addr(hose, bus->number, devfn, 0x10); out_le32(dummy, 0); tmp = in_le32(addr); out_le32(addr, tmp); switch (len) { case 1: *val = (tmp >> (8*byte)) & 0xff; break; case 2: if (byte == 0) *val = tmp & 0xffff; else *val = (tmp >> 16) & 0xffff; break; default: *val = tmp; break; } return 1; } static int pa_pxp_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { struct pci_controller *hose; void volatile __iomem *addr; hose = pci_bus_to_host(bus); if (!hose) return PCIBIOS_DEVICE_NOT_FOUND; if (!pa_pxp_offset_valid(bus->number, devfn, offset)) return PCIBIOS_BAD_REGISTER_NUMBER; if (workaround_5945(bus, devfn, offset, len, val)) return PCIBIOS_SUCCESSFUL; addr = pa_pxp_cfg_addr(hose, bus->number, devfn, offset); /* * Note: the caller has already checked that offset is * suitably aligned and that len is 1, 2 or 4. */ switch (len) { case 1: *val = in_8(addr); break; case 2: *val = in_le16(addr); break; default: *val = in_le32(addr); break; } return PCIBIOS_SUCCESSFUL; } static int pa_pxp_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { struct pci_controller *hose; void volatile __iomem *addr; hose = pci_bus_to_host(bus); if (!hose) return PCIBIOS_DEVICE_NOT_FOUND; if (!pa_pxp_offset_valid(bus->number, devfn, offset)) return PCIBIOS_BAD_REGISTER_NUMBER; addr = pa_pxp_cfg_addr(hose, bus->number, devfn, offset); /* * Note: the caller has already checked that offset is * suitably aligned and that len is 1, 2 or 4. */ switch (len) { case 1: out_8(addr, val); break; case 2: out_le16(addr, val); break; default: out_le32(addr, val); break; } return PCIBIOS_SUCCESSFUL; } static struct pci_ops pa_pxp_ops = { .read = pa_pxp_read_config, .write = pa_pxp_write_config, }; static void __init setup_pa_pxp(struct pci_controller *hose) { hose->ops = &pa_pxp_ops; hose->cfg_data = ioremap(0xe0000000, 0x10000000); } static int __init pas_add_bridge(struct device_node *dev) { struct pci_controller *hose; pr_debug("Adding PCI host bridge %s\n", dev->full_name); hose = pcibios_alloc_controller(dev); if (!hose) return -ENOMEM; hose->first_busno = 0; hose->last_busno = 0xff; setup_pa_pxp(hose); printk(KERN_INFO "Found PA-PXP PCI host bridge.\n"); /* Interpret the "ranges" property */ pci_process_bridge_OF_ranges(hose, dev, 1); return 0; } void __init pas_pci_init(void) { struct device_node *np, *root; root = of_find_node_by_path("/"); if (!root) { printk(KERN_CRIT "pas_pci_init: can't find root " "of device tree\n"); return; } for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) if (np->name && !strcmp(np->name, "pxp") && !pas_add_bridge(np)) of_node_get(np); of_node_put(root); /* Setup the linkage between OF nodes and PHBs */ pci_devs_phb_init(); /* Use the common resource allocation mechanism */ pci_probe_only = 1; } void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset) { struct pci_controller *hose; hose = pci_bus_to_host(dev->bus); return (void __iomem *)pa_pxp_cfg_addr(hose, dev->bus->number, dev->devfn, offset); }
gpl-2.0
C-Aniruddh/Axiom_totoro
arch/sh/kernel/dwarf.c
10916
30049
/* * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * This is an implementation of a DWARF unwinder. Its main purpose is * for generating stacktrace information. Based on the DWARF 3 * specification from http://www.dwarfstd.org. * * TODO: * - DWARF64 doesn't work. * - Registers with DWARF_VAL_OFFSET rules aren't handled properly. */ /* #define DEBUG */ #include <linux/kernel.h> #include <linux/io.h> #include <linux/list.h> #include <linux/mempool.h> #include <linux/mm.h> #include <linux/elf.h> #include <linux/ftrace.h> #include <linux/module.h> #include <linux/slab.h> #include <asm/dwarf.h> #include <asm/unwinder.h> #include <asm/sections.h> #include <asm/unaligned.h> #include <asm/stacktrace.h> /* Reserve enough memory for two stack frames */ #define DWARF_FRAME_MIN_REQ 2 /* ... with 4 registers per frame. */ #define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4) static struct kmem_cache *dwarf_frame_cachep; static mempool_t *dwarf_frame_pool; static struct kmem_cache *dwarf_reg_cachep; static mempool_t *dwarf_reg_pool; static struct rb_root cie_root; static DEFINE_SPINLOCK(dwarf_cie_lock); static struct rb_root fde_root; static DEFINE_SPINLOCK(dwarf_fde_lock); static struct dwarf_cie *cached_cie; static unsigned int dwarf_unwinder_ready; /** * dwarf_frame_alloc_reg - allocate memory for a DWARF register * @frame: the DWARF frame whose list of registers we insert on * @reg_num: the register number * * Allocate space for, and initialise, a dwarf reg from * dwarf_reg_pool and insert it onto the (unsorted) linked-list of * dwarf registers for @frame. * * Return the initialised DWARF reg. */ static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame, unsigned int reg_num) { struct dwarf_reg *reg; reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC); if (!reg) { printk(KERN_WARNING "Unable to allocate a DWARF register\n"); /* * Let's just bomb hard here, we have no way to * gracefully recover. */ UNWINDER_BUG(); } reg->number = reg_num; reg->addr = 0; reg->flags = 0; list_add(&reg->link, &frame->reg_list); return reg; } static void dwarf_frame_free_regs(struct dwarf_frame *frame) { struct dwarf_reg *reg, *n; list_for_each_entry_safe(reg, n, &frame->reg_list, link) { list_del(&reg->link); mempool_free(reg, dwarf_reg_pool); } } /** * dwarf_frame_reg - return a DWARF register * @frame: the DWARF frame to search in for @reg_num * @reg_num: the register number to search for * * Lookup and return the dwarf reg @reg_num for this frame. Return * NULL if @reg_num is an register invalid number. */ static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame, unsigned int reg_num) { struct dwarf_reg *reg; list_for_each_entry(reg, &frame->reg_list, link) { if (reg->number == reg_num) return reg; } return NULL; } /** * dwarf_read_addr - read dwarf data * @src: source address of data * @dst: destination address to store the data to * * Read 'n' bytes from @src, where 'n' is the size of an address on * the native machine. We return the number of bytes read, which * should always be 'n'. We also have to be careful when reading * from @src and writing to @dst, because they can be arbitrarily * aligned. Return 'n' - the number of bytes read. */ static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst) { u32 val = get_unaligned(src); put_unaligned(val, dst); return sizeof(unsigned long *); } /** * dwarf_read_uleb128 - read unsigned LEB128 data * @addr: the address where the ULEB128 data is stored * @ret: address to store the result * * Decode an unsigned LEB128 encoded datum. The algorithm is taken * from Appendix C of the DWARF 3 spec. For information on the * encodings refer to section "7.6 - Variable Length Data". Return * the number of bytes read. */ static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret) { unsigned int result; unsigned char byte; int shift, count; result = 0; shift = 0; count = 0; while (1) { byte = __raw_readb(addr); addr++; count++; result |= (byte & 0x7f) << shift; shift += 7; if (!(byte & 0x80)) break; } *ret = result; return count; } /** * dwarf_read_leb128 - read signed LEB128 data * @addr: the address of the LEB128 encoded data * @ret: address to store the result * * Decode signed LEB128 data. The algorithm is taken from Appendix * C of the DWARF 3 spec. Return the number of bytes read. */ static inline unsigned long dwarf_read_leb128(char *addr, int *ret) { unsigned char byte; int result, shift; int num_bits; int count; result = 0; shift = 0; count = 0; while (1) { byte = __raw_readb(addr); addr++; result |= (byte & 0x7f) << shift; shift += 7; count++; if (!(byte & 0x80)) break; } /* The number of bits in a signed integer. */ num_bits = 8 * sizeof(result); if ((shift < num_bits) && (byte & 0x40)) result |= (-1 << shift); *ret = result; return count; } /** * dwarf_read_encoded_value - return the decoded value at @addr * @addr: the address of the encoded value * @val: where to write the decoded value * @encoding: the encoding with which we can decode @addr * * GCC emits encoded address in the .eh_frame FDE entries. Decode * the value at @addr using @encoding. The decoded value is written * to @val and the number of bytes read is returned. */ static int dwarf_read_encoded_value(char *addr, unsigned long *val, char encoding) { unsigned long decoded_addr = 0; int count = 0; switch (encoding & 0x70) { case DW_EH_PE_absptr: break; case DW_EH_PE_pcrel: decoded_addr = (unsigned long)addr; break; default: pr_debug("encoding=0x%x\n", (encoding & 0x70)); UNWINDER_BUG(); } if ((encoding & 0x07) == 0x00) encoding |= DW_EH_PE_udata4; switch (encoding & 0x0f) { case DW_EH_PE_sdata4: case DW_EH_PE_udata4: count += 4; decoded_addr += get_unaligned((u32 *)addr); __raw_writel(decoded_addr, val); break; default: pr_debug("encoding=0x%x\n", encoding); UNWINDER_BUG(); } return count; } /** * dwarf_entry_len - return the length of an FDE or CIE * @addr: the address of the entry * @len: the length of the entry * * Read the initial_length field of the entry and store the size of * the entry in @len. We return the number of bytes read. Return a * count of 0 on error. */ static inline int dwarf_entry_len(char *addr, unsigned long *len) { u32 initial_len; int count; initial_len = get_unaligned((u32 *)addr); count = 4; /* * An initial length field value in the range DW_LEN_EXT_LO - * DW_LEN_EXT_HI indicates an extension, and should not be * interpreted as a length. The only extension that we currently * understand is the use of DWARF64 addresses. */ if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) { /* * The 64-bit length field immediately follows the * compulsory 32-bit length field. */ if (initial_len == DW_EXT_DWARF64) { *len = get_unaligned((u64 *)addr + 4); count = 12; } else { printk(KERN_WARNING "Unknown DWARF extension\n"); count = 0; } } else *len = initial_len; return count; } /** * dwarf_lookup_cie - locate the cie * @cie_ptr: pointer to help with lookup */ static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) { struct rb_node **rb_node = &cie_root.rb_node; struct dwarf_cie *cie = NULL; unsigned long flags; spin_lock_irqsave(&dwarf_cie_lock, flags); /* * We've cached the last CIE we looked up because chances are * that the FDE wants this CIE. */ if (cached_cie && cached_cie->cie_pointer == cie_ptr) { cie = cached_cie; goto out; } while (*rb_node) { struct dwarf_cie *cie_tmp; cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node); BUG_ON(!cie_tmp); if (cie_ptr == cie_tmp->cie_pointer) { cie = cie_tmp; cached_cie = cie_tmp; goto out; } else { if (cie_ptr < cie_tmp->cie_pointer) rb_node = &(*rb_node)->rb_left; else rb_node = &(*rb_node)->rb_right; } } out: spin_unlock_irqrestore(&dwarf_cie_lock, flags); return cie; } /** * dwarf_lookup_fde - locate the FDE that covers pc * @pc: the program counter */ struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) { struct rb_node **rb_node = &fde_root.rb_node; struct dwarf_fde *fde = NULL; unsigned long flags; spin_lock_irqsave(&dwarf_fde_lock, flags); while (*rb_node) { struct dwarf_fde *fde_tmp; unsigned long tmp_start, tmp_end; fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node); BUG_ON(!fde_tmp); tmp_start = fde_tmp->initial_location; tmp_end = fde_tmp->initial_location + fde_tmp->address_range; if (pc < tmp_start) { rb_node = &(*rb_node)->rb_left; } else { if (pc < tmp_end) { fde = fde_tmp; goto out; } else rb_node = &(*rb_node)->rb_right; } } out: spin_unlock_irqrestore(&dwarf_fde_lock, flags); return fde; } /** * dwarf_cfa_execute_insns - execute instructions to calculate a CFA * @insn_start: address of the first instruction * @insn_end: address of the last instruction * @cie: the CIE for this function * @fde: the FDE for this function * @frame: the instructions calculate the CFA for this frame * @pc: the program counter of the address we're interested in * * Execute the Call Frame instruction sequence starting at * @insn_start and ending at @insn_end. The instructions describe * how to calculate the Canonical Frame Address of a stackframe. * Store the results in @frame. */ static int dwarf_cfa_execute_insns(unsigned char *insn_start, unsigned char *insn_end, struct dwarf_cie *cie, struct dwarf_fde *fde, struct dwarf_frame *frame, unsigned long pc) { unsigned char insn; unsigned char *current_insn; unsigned int count, delta, reg, expr_len, offset; struct dwarf_reg *regp; current_insn = insn_start; while (current_insn < insn_end && frame->pc <= pc) { insn = __raw_readb(current_insn++); /* * Firstly, handle the opcodes that embed their operands * in the instructions. */ switch (DW_CFA_opcode(insn)) { case DW_CFA_advance_loc: delta = DW_CFA_operand(insn); delta *= cie->code_alignment_factor; frame->pc += delta; continue; /* NOTREACHED */ case DW_CFA_offset: reg = DW_CFA_operand(insn); count = dwarf_read_uleb128(current_insn, &offset); current_insn += count; offset *= cie->data_alignment_factor; regp = dwarf_frame_alloc_reg(frame, reg); regp->addr = offset; regp->flags |= DWARF_REG_OFFSET; continue; /* NOTREACHED */ case DW_CFA_restore: reg = DW_CFA_operand(insn); continue; /* NOTREACHED */ } /* * Secondly, handle the opcodes that don't embed their * operands in the instruction. */ switch (insn) { case DW_CFA_nop: continue; case DW_CFA_advance_loc1: delta = *current_insn++; frame->pc += delta * cie->code_alignment_factor; break; case DW_CFA_advance_loc2: delta = get_unaligned((u16 *)current_insn); current_insn += 2; frame->pc += delta * cie->code_alignment_factor; break; case DW_CFA_advance_loc4: delta = get_unaligned((u32 *)current_insn); current_insn += 4; frame->pc += delta * cie->code_alignment_factor; break; case DW_CFA_offset_extended: count = dwarf_read_uleb128(current_insn, &reg); current_insn += count; count = dwarf_read_uleb128(current_insn, &offset); current_insn += count; offset *= cie->data_alignment_factor; break; case DW_CFA_restore_extended: count = dwarf_read_uleb128(current_insn, &reg); current_insn += count; break; case DW_CFA_undefined: count = dwarf_read_uleb128(current_insn, &reg); current_insn += count; regp = dwarf_frame_alloc_reg(frame, reg); regp->flags |= DWARF_UNDEFINED; break; case DW_CFA_def_cfa: count = dwarf_read_uleb128(current_insn, &frame->cfa_register); current_insn += count; count = dwarf_read_uleb128(current_insn, &frame->cfa_offset); current_insn += count; frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; break; case DW_CFA_def_cfa_register: count = dwarf_read_uleb128(current_insn, &frame->cfa_register); current_insn += count; frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; break; case DW_CFA_def_cfa_offset: count = dwarf_read_uleb128(current_insn, &offset); current_insn += count; frame->cfa_offset = offset; break; case DW_CFA_def_cfa_expression: count = dwarf_read_uleb128(current_insn, &expr_len); current_insn += count; frame->cfa_expr = current_insn; frame->cfa_expr_len = expr_len; current_insn += expr_len; frame->flags |= DWARF_FRAME_CFA_REG_EXP; break; case DW_CFA_offset_extended_sf: count = dwarf_read_uleb128(current_insn, &reg); current_insn += count; count = dwarf_read_leb128(current_insn, &offset); current_insn += count; offset *= cie->data_alignment_factor; regp = dwarf_frame_alloc_reg(frame, reg); regp->flags |= DWARF_REG_OFFSET; regp->addr = offset; break; case DW_CFA_val_offset: count = dwarf_read_uleb128(current_insn, &reg); current_insn += count; count = dwarf_read_leb128(current_insn, &offset); offset *= cie->data_alignment_factor; regp = dwarf_frame_alloc_reg(frame, reg); regp->flags |= DWARF_VAL_OFFSET; regp->addr = offset; break; case DW_CFA_GNU_args_size: count = dwarf_read_uleb128(current_insn, &offset); current_insn += count; break; case DW_CFA_GNU_negative_offset_extended: count = dwarf_read_uleb128(current_insn, &reg); current_insn += count; count = dwarf_read_uleb128(current_insn, &offset); offset *= cie->data_alignment_factor; regp = dwarf_frame_alloc_reg(frame, reg); regp->flags |= DWARF_REG_OFFSET; regp->addr = -offset; break; default: pr_debug("unhandled DWARF instruction 0x%x\n", insn); UNWINDER_BUG(); break; } } return 0; } /** * dwarf_free_frame - free the memory allocated for @frame * @frame: the frame to free */ void dwarf_free_frame(struct dwarf_frame *frame) { dwarf_frame_free_regs(frame); mempool_free(frame, dwarf_frame_pool); } extern void ret_from_irq(void); /** * dwarf_unwind_stack - unwind the stack * * @pc: address of the function to unwind * @prev: struct dwarf_frame of the previous stackframe on the callstack * * Return a struct dwarf_frame representing the most recent frame * on the callstack. Each of the lower (older) stack frames are * linked via the "prev" member. */ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, struct dwarf_frame *prev) { struct dwarf_frame *frame; struct dwarf_cie *cie; struct dwarf_fde *fde; struct dwarf_reg *reg; unsigned long addr; /* * If we've been called in to before initialization has * completed, bail out immediately. */ if (!dwarf_unwinder_ready) return NULL; /* * If we're starting at the top of the stack we need get the * contents of a physical register to get the CFA in order to * begin the virtual unwinding of the stack. * * NOTE: the return address is guaranteed to be setup by the * time this function makes its first function call. */ if (!pc || !prev) pc = (unsigned long)current_text_addr(); #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* * If our stack has been patched by the function graph tracer * then we might see the address of return_to_handler() where we * expected to find the real return address. */ if (pc == (unsigned long)&return_to_handler) { int index = current->curr_ret_stack; /* * We currently have no way of tracking how many * return_to_handler()'s we've seen. If there is more * than one patched return address on our stack, * complain loudly. */ WARN_ON(index > 0); pc = current->ret_stack[index].ret; } #endif frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC); if (!frame) { printk(KERN_ERR "Unable to allocate a dwarf frame\n"); UNWINDER_BUG(); } INIT_LIST_HEAD(&frame->reg_list); frame->flags = 0; frame->prev = prev; frame->return_addr = 0; fde = dwarf_lookup_fde(pc); if (!fde) { /* * This is our normal exit path. There are two reasons * why we might exit here, * * a) pc has no asscociated DWARF frame info and so * we don't know how to unwind this frame. This is * usually the case when we're trying to unwind a * frame that was called from some assembly code * that has no DWARF info, e.g. syscalls. * * b) the DEBUG info for pc is bogus. There's * really no way to distinguish this case from the * case above, which sucks because we could print a * warning here. */ goto bail; } cie = dwarf_lookup_cie(fde->cie_pointer); frame->pc = fde->initial_location; /* CIE initial instructions */ dwarf_cfa_execute_insns(cie->initial_instructions, cie->instructions_end, cie, fde, frame, pc); /* FDE instructions */ dwarf_cfa_execute_insns(fde->instructions, fde->end, cie, fde, frame, pc); /* Calculate the CFA */ switch (frame->flags) { case DWARF_FRAME_CFA_REG_OFFSET: if (prev) { reg = dwarf_frame_reg(prev, frame->cfa_register); UNWINDER_BUG_ON(!reg); UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET); addr = prev->cfa + reg->addr; frame->cfa = __raw_readl(addr); } else { /* * Again, we're starting from the top of the * stack. We need to physically read * the contents of a register in order to get * the Canonical Frame Address for this * function. */ frame->cfa = dwarf_read_arch_reg(frame->cfa_register); } frame->cfa += frame->cfa_offset; break; default: UNWINDER_BUG(); } reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG); /* * If we haven't seen the return address register or the return * address column is undefined then we must assume that this is * the end of the callstack. */ if (!reg || reg->flags == DWARF_UNDEFINED) goto bail; UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET); addr = frame->cfa + reg->addr; frame->return_addr = __raw_readl(addr); /* * Ah, the joys of unwinding through interrupts. * * Interrupts are tricky - the DWARF info needs to be _really_ * accurate and unfortunately I'm seeing a lot of bogus DWARF * info. For example, I've seen interrupts occur in epilogues * just after the frame pointer (r14) had been restored. The * problem was that the DWARF info claimed that the CFA could be * reached by using the value of the frame pointer before it was * restored. * * So until the compiler can be trusted to produce reliable * DWARF info when it really matters, let's stop unwinding once * we've calculated the function that was interrupted. */ if (prev && prev->pc == (unsigned long)ret_from_irq) frame->return_addr = 0; return frame; bail: dwarf_free_frame(frame); return NULL; } static int dwarf_parse_cie(void *entry, void *p, unsigned long len, unsigned char *end, struct module *mod) { struct rb_node **rb_node = &cie_root.rb_node; struct rb_node *parent = *rb_node; struct dwarf_cie *cie; unsigned long flags; int count; cie = kzalloc(sizeof(*cie), GFP_KERNEL); if (!cie) return -ENOMEM; cie->length = len; /* * Record the offset into the .eh_frame section * for this CIE. It allows this CIE to be * quickly and easily looked up from the * corresponding FDE. */ cie->cie_pointer = (unsigned long)entry; cie->version = *(char *)p++; UNWINDER_BUG_ON(cie->version != 1); cie->augmentation = p; p += strlen(cie->augmentation) + 1; count = dwarf_read_uleb128(p, &cie->code_alignment_factor); p += count; count = dwarf_read_leb128(p, &cie->data_alignment_factor); p += count; /* * Which column in the rule table contains the * return address? */ if (cie->version == 1) { cie->return_address_reg = __raw_readb(p); p++; } else { count = dwarf_read_uleb128(p, &cie->return_address_reg); p += count; } if (cie->augmentation[0] == 'z') { unsigned int length, count; cie->flags |= DWARF_CIE_Z_AUGMENTATION; count = dwarf_read_uleb128(p, &length); p += count; UNWINDER_BUG_ON((unsigned char *)p > end); cie->initial_instructions = p + length; cie->augmentation++; } while (*cie->augmentation) { /* * "L" indicates a byte showing how the * LSDA pointer is encoded. Skip it. */ if (*cie->augmentation == 'L') { p++; cie->augmentation++; } else if (*cie->augmentation == 'R') { /* * "R" indicates a byte showing * how FDE addresses are * encoded. */ cie->encoding = *(char *)p++; cie->augmentation++; } else if (*cie->augmentation == 'P') { /* * "R" indicates a personality * routine in the CIE * augmentation. */ UNWINDER_BUG(); } else if (*cie->augmentation == 'S') { UNWINDER_BUG(); } else { /* * Unknown augmentation. Assume * 'z' augmentation. */ p = cie->initial_instructions; UNWINDER_BUG_ON(!p); break; } } cie->initial_instructions = p; cie->instructions_end = end; /* Add to list */ spin_lock_irqsave(&dwarf_cie_lock, flags); while (*rb_node) { struct dwarf_cie *cie_tmp; cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node); parent = *rb_node; if (cie->cie_pointer < cie_tmp->cie_pointer) rb_node = &parent->rb_left; else if (cie->cie_pointer >= cie_tmp->cie_pointer) rb_node = &parent->rb_right; else WARN_ON(1); } rb_link_node(&cie->node, parent, rb_node); rb_insert_color(&cie->node, &cie_root); #ifdef CONFIG_MODULES if (mod != NULL) list_add_tail(&cie->link, &mod->arch.cie_list); #endif spin_unlock_irqrestore(&dwarf_cie_lock, flags); return 0; } static int dwarf_parse_fde(void *entry, u32 entry_type, void *start, unsigned long len, unsigned char *end, struct module *mod) { struct rb_node **rb_node = &fde_root.rb_node; struct rb_node *parent = *rb_node; struct dwarf_fde *fde; struct dwarf_cie *cie; unsigned long flags; int count; void *p = start; fde = kzalloc(sizeof(*fde), GFP_KERNEL); if (!fde) return -ENOMEM; fde->length = len; /* * In a .eh_frame section the CIE pointer is the * delta between the address within the FDE */ fde->cie_pointer = (unsigned long)(p - entry_type - 4); cie = dwarf_lookup_cie(fde->cie_pointer); fde->cie = cie; if (cie->encoding) count = dwarf_read_encoded_value(p, &fde->initial_location, cie->encoding); else count = dwarf_read_addr(p, &fde->initial_location); p += count; if (cie->encoding) count = dwarf_read_encoded_value(p, &fde->address_range, cie->encoding & 0x0f); else count = dwarf_read_addr(p, &fde->address_range); p += count; if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) { unsigned int length; count = dwarf_read_uleb128(p, &length); p += count + length; } /* Call frame instructions. */ fde->instructions = p; fde->end = end; /* Add to list. */ spin_lock_irqsave(&dwarf_fde_lock, flags); while (*rb_node) { struct dwarf_fde *fde_tmp; unsigned long tmp_start, tmp_end; unsigned long start, end; fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node); start = fde->initial_location; end = fde->initial_location + fde->address_range; tmp_start = fde_tmp->initial_location; tmp_end = fde_tmp->initial_location + fde_tmp->address_range; parent = *rb_node; if (start < tmp_start) rb_node = &parent->rb_left; else if (start >= tmp_end) rb_node = &parent->rb_right; else WARN_ON(1); } rb_link_node(&fde->node, parent, rb_node); rb_insert_color(&fde->node, &fde_root); #ifdef CONFIG_MODULES if (mod != NULL) list_add_tail(&fde->link, &mod->arch.fde_list); #endif spin_unlock_irqrestore(&dwarf_fde_lock, flags); return 0; } static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, const struct stacktrace_ops *ops, void *data) { struct dwarf_frame *frame, *_frame; unsigned long return_addr; _frame = NULL; return_addr = 0; while (1) { frame = dwarf_unwind_stack(return_addr, _frame); if (_frame) dwarf_free_frame(_frame); _frame = frame; if (!frame || !frame->return_addr) break; return_addr = frame->return_addr; ops->address(data, return_addr, 1); } if (frame) dwarf_free_frame(frame); } static struct unwinder dwarf_unwinder = { .name = "dwarf-unwinder", .dump = dwarf_unwinder_dump, .rating = 150, }; static void dwarf_unwinder_cleanup(void) { struct rb_node **fde_rb_node = &fde_root.rb_node; struct rb_node **cie_rb_node = &cie_root.rb_node; /* * Deallocate all the memory allocated for the DWARF unwinder. * Traverse all the FDE/CIE lists and remove and free all the * memory associated with those data structures. */ while (*fde_rb_node) { struct dwarf_fde *fde; fde = rb_entry(*fde_rb_node, struct dwarf_fde, node); rb_erase(*fde_rb_node, &fde_root); kfree(fde); } while (*cie_rb_node) { struct dwarf_cie *cie; cie = rb_entry(*cie_rb_node, struct dwarf_cie, node); rb_erase(*cie_rb_node, &cie_root); kfree(cie); } kmem_cache_destroy(dwarf_reg_cachep); kmem_cache_destroy(dwarf_frame_cachep); } /** * dwarf_parse_section - parse DWARF section * @eh_frame_start: start address of the .eh_frame section * @eh_frame_end: end address of the .eh_frame section * @mod: the kernel module containing the .eh_frame section * * Parse the information in a .eh_frame section. */ static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end, struct module *mod) { u32 entry_type; void *p, *entry; int count, err = 0; unsigned long len = 0; unsigned int c_entries, f_entries; unsigned char *end; c_entries = 0; f_entries = 0; entry = eh_frame_start; while ((char *)entry < eh_frame_end) { p = entry; count = dwarf_entry_len(p, &len); if (count == 0) { /* * We read a bogus length field value. There is * nothing we can do here apart from disabling * the DWARF unwinder. We can't even skip this * entry and move to the next one because 'len' * tells us where our next entry is. */ err = -EINVAL; goto out; } else p += count; /* initial length does not include itself */ end = p + len; entry_type = get_unaligned((u32 *)p); p += 4; if (entry_type == DW_EH_FRAME_CIE) { err = dwarf_parse_cie(entry, p, len, end, mod); if (err < 0) goto out; else c_entries++; } else { err = dwarf_parse_fde(entry, entry_type, p, len, end, mod); if (err < 0) goto out; else f_entries++; } entry = (char *)entry + len + 4; } printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", c_entries, f_entries); return 0; out: return err; } #ifdef CONFIG_MODULES int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { unsigned int i, err; unsigned long start, end; char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; start = end = 0; for (i = 1; i < hdr->e_shnum; i++) { /* Alloc bit cleared means "ignore it." */ if ((sechdrs[i].sh_flags & SHF_ALLOC) && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) { start = sechdrs[i].sh_addr; end = start + sechdrs[i].sh_size; break; } } /* Did we find the .eh_frame section? */ if (i != hdr->e_shnum) { INIT_LIST_HEAD(&me->arch.cie_list); INIT_LIST_HEAD(&me->arch.fde_list); err = dwarf_parse_section((char *)start, (char *)end, me); if (err) { printk(KERN_WARNING "%s: failed to parse DWARF info\n", me->name); return err; } } return 0; } /** * module_dwarf_cleanup - remove FDE/CIEs associated with @mod * @mod: the module that is being unloaded * * Remove any FDEs and CIEs from the global lists that came from * @mod's .eh_frame section because @mod is being unloaded. */ void module_dwarf_cleanup(struct module *mod) { struct dwarf_fde *fde, *ftmp; struct dwarf_cie *cie, *ctmp; unsigned long flags; spin_lock_irqsave(&dwarf_cie_lock, flags); list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) { list_del(&cie->link); rb_erase(&cie->node, &cie_root); kfree(cie); } spin_unlock_irqrestore(&dwarf_cie_lock, flags); spin_lock_irqsave(&dwarf_fde_lock, flags); list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) { list_del(&fde->link); rb_erase(&fde->node, &fde_root); kfree(fde); } spin_unlock_irqrestore(&dwarf_fde_lock, flags); } #endif /* CONFIG_MODULES */ /** * dwarf_unwinder_init - initialise the dwarf unwinder * * Build the data structures describing the .dwarf_frame section to * make it easier to lookup CIE and FDE entries. Because the * .eh_frame section is packed as tightly as possible it is not * easy to lookup the FDE for a given PC, so we build a list of FDE * and CIE entries that make it easier. */ static int __init dwarf_unwinder_init(void) { int err = -ENOMEM; dwarf_frame_cachep = kmem_cache_create("dwarf_frames", sizeof(struct dwarf_frame), 0, SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); dwarf_reg_cachep = kmem_cache_create("dwarf_regs", sizeof(struct dwarf_reg), 0, SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, mempool_alloc_slab, mempool_free_slab, dwarf_frame_cachep); if (!dwarf_frame_pool) goto out; dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, mempool_alloc_slab, mempool_free_slab, dwarf_reg_cachep); if (!dwarf_reg_pool) goto out; err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL); if (err) goto out; err = unwinder_register(&dwarf_unwinder); if (err) goto out; dwarf_unwinder_ready = 1; return 0; out: printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err); dwarf_unwinder_cleanup(); return err; } early_initcall(dwarf_unwinder_init);
gpl-2.0
3EleVen/kernel_common
sound/synth/emux/emux_hwdep.c
11940
3731
/* * Interface for hwdep device * * Copyright (C) 2004 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <sound/core.h> #include <sound/hwdep.h> #include <asm/uaccess.h> #include "emux_voice.h" #define TMP_CLIENT_ID 0x1001 /* * load patch */ static int snd_emux_hwdep_load_patch(struct snd_emux *emu, void __user *arg) { int err; struct soundfont_patch_info patch; if (copy_from_user(&patch, arg, sizeof(patch))) return -EFAULT; if (patch.type >= SNDRV_SFNT_LOAD_INFO && patch.type <= SNDRV_SFNT_PROBE_DATA) { err = snd_soundfont_load(emu->sflist, arg, patch.len + sizeof(patch), TMP_CLIENT_ID); if (err < 0) return err; } else { if (emu->ops.load_fx) return emu->ops.load_fx(emu, patch.type, patch.optarg, arg, patch.len + sizeof(patch)); else return -EINVAL; } return 0; } /* * set misc mode */ static int snd_emux_hwdep_misc_mode(struct snd_emux *emu, void __user *arg) { struct snd_emux_misc_mode info; int i; if (copy_from_user(&info, arg, sizeof(info))) return -EFAULT; if (info.mode < 0 || info.mode >= EMUX_MD_END) return -EINVAL; if (info.port < 0) { for (i = 0; i < emu->num_ports; i++) emu->portptrs[i]->ctrls[info.mode] = info.value; } else { if (info.port < emu->num_ports) emu->portptrs[info.port]->ctrls[info.mode] = info.value; } return 0; } /* * ioctl */ static int snd_emux_hwdep_ioctl(struct snd_hwdep * hw, struct file *file, unsigned int cmd, unsigned long arg) { struct snd_emux *emu = hw->private_data; switch (cmd) { case SNDRV_EMUX_IOCTL_VERSION: return put_user(SNDRV_EMUX_VERSION, (unsigned int __user *)arg); case SNDRV_EMUX_IOCTL_LOAD_PATCH: return snd_emux_hwdep_load_patch(emu, (void __user *)arg); case SNDRV_EMUX_IOCTL_RESET_SAMPLES: snd_soundfont_remove_samples(emu->sflist); break; case SNDRV_EMUX_IOCTL_REMOVE_LAST_SAMPLES: snd_soundfont_remove_unlocked(emu->sflist); break; case SNDRV_EMUX_IOCTL_MEM_AVAIL: if (emu->memhdr) { int size = snd_util_mem_avail(emu->memhdr); return put_user(size, (unsigned int __user *)arg); } break; case SNDRV_EMUX_IOCTL_MISC_MODE: return snd_emux_hwdep_misc_mode(emu, (void __user *)arg); } return 0; } /* * register hwdep device */ int snd_emux_init_hwdep(struct snd_emux *emu) { struct snd_hwdep *hw; int err; if ((err = snd_hwdep_new(emu->card, SNDRV_EMUX_HWDEP_NAME, emu->hwdep_idx, &hw)) < 0) return err; emu->hwdep = hw; strcpy(hw->name, SNDRV_EMUX_HWDEP_NAME); hw->iface = SNDRV_HWDEP_IFACE_EMUX_WAVETABLE; hw->ops.ioctl = snd_emux_hwdep_ioctl; /* The ioctl parameter types are compatible between 32- and * 64-bit architectures, so use the same function. */ hw->ops.ioctl_compat = snd_emux_hwdep_ioctl; hw->exclusive = 1; hw->private_data = emu; if ((err = snd_card_register(emu->card)) < 0) return err; return 0; } /* * unregister */ void snd_emux_delete_hwdep(struct snd_emux *emu) { if (emu->hwdep) { snd_device_free(emu->card, emu->hwdep); emu->hwdep = NULL; } }
gpl-2.0
touchpro/android_kernel_lge_msm8226
arch/alpha/lib/fls.c
11940
1117
/* * arch/alpha/lib/fls.c */ #include <linux/module.h> #include <linux/bitops.h> /* This is fls(x)-1, except zero is held to zero. This allows most efficient input into extbl, plus it allows easy handling of fls(0)=0. */ const unsigned char __flsm1_tab[256] = { 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, }; EXPORT_SYMBOL(__flsm1_tab);
gpl-2.0
intervigilium/android_kernel_htc_msm8660
arch/alpha/kernel/err_titan.c
11940
23439
/* * linux/arch/alpha/kernel/err_titan.c * * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) * * Error handling code supporting TITAN systems */ #include <linux/init.h> #include <linux/pci.h> #include <linux/sched.h> #include <asm/io.h> #include <asm/core_titan.h> #include <asm/hwrpb.h> #include <asm/smp.h> #include <asm/err_common.h> #include <asm/err_ev6.h> #include <asm/irq_regs.h> #include "err_impl.h" #include "proto.h" static int titan_parse_c_misc(u64 c_misc, int print) { #ifdef CONFIG_VERBOSE_MCHECK char *src; int nxs = 0; #endif int status = MCHK_DISPOSITION_REPORT; #define TITAN__CCHIP_MISC__NXM (1UL << 28) #define TITAN__CCHIP_MISC__NXS__S (29) #define TITAN__CCHIP_MISC__NXS__M (0x7) if (!(c_misc & TITAN__CCHIP_MISC__NXM)) return MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; nxs = EXTRACT(c_misc, TITAN__CCHIP_MISC__NXS); switch(nxs) { case 0: /* CPU 0 */ case 1: /* CPU 1 */ case 2: /* CPU 2 */ case 3: /* CPU 3 */ src = "CPU"; /* num is already the CPU number */ break; case 4: /* Pchip 0 */ case 5: /* Pchip 1 */ src = "Pchip"; nxs -= 4; break; default:/* reserved */ src = "Unknown, NXS ="; /* leave num untouched */ break; } printk("%s Non-existent memory access from: %s %d\n", err_print_prefix, src, nxs); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_serror(int which, u64 serror, int print) { int status = MCHK_DISPOSITION_REPORT; #ifdef CONFIG_VERBOSE_MCHECK static const char * const serror_src[] = { "GPCI", "APCI", "AGP HP", "AGP LP" }; static const char * const serror_cmd[] = { "DMA Read", "DMA RMW", "SGTE Read", "Reserved" }; #endif /* CONFIG_VERBOSE_MCHECK */ #define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0) #define TITAN__PCHIP_SERROR__UECC (1UL << 1) #define TITAN__PCHIP_SERROR__CRE (1UL << 2) #define TITAN__PCHIP_SERROR__NXIO (1UL << 3) #define TITAN__PCHIP_SERROR__LOST_CRE (1UL << 4) #define TITAN__PCHIP_SERROR__ECCMASK (TITAN__PCHIP_SERROR__UECC | \ TITAN__PCHIP_SERROR__CRE) #define TITAN__PCHIP_SERROR__ERRMASK (TITAN__PCHIP_SERROR__LOST_UECC | \ TITAN__PCHIP_SERROR__UECC | \ TITAN__PCHIP_SERROR__CRE | \ TITAN__PCHIP_SERROR__NXIO | \ TITAN__PCHIP_SERROR__LOST_CRE) #define TITAN__PCHIP_SERROR__SRC__S (52) #define TITAN__PCHIP_SERROR__SRC__M (0x3) #define TITAN__PCHIP_SERROR__CMD__S (54) #define TITAN__PCHIP_SERROR__CMD__M (0x3) #define TITAN__PCHIP_SERROR__SYN__S (56) #define TITAN__PCHIP_SERROR__SYN__M (0xff) #define TITAN__PCHIP_SERROR__ADDR__S (15) #define TITAN__PCHIP_SERROR__ADDR__M (0xffffffffUL) if (!(serror & TITAN__PCHIP_SERROR__ERRMASK)) return MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; printk("%s PChip %d SERROR: %016llx\n", err_print_prefix, which, serror); if (serror & TITAN__PCHIP_SERROR__ECCMASK) { printk("%s %sorrectable ECC Error:\n" " Source: %-6s Command: %-8s Syndrome: 0x%08x\n" " Address: 0x%llx\n", err_print_prefix, (serror & TITAN__PCHIP_SERROR__UECC) ? "Unc" : "C", serror_src[EXTRACT(serror, TITAN__PCHIP_SERROR__SRC)], serror_cmd[EXTRACT(serror, TITAN__PCHIP_SERROR__CMD)], (unsigned)EXTRACT(serror, TITAN__PCHIP_SERROR__SYN), EXTRACT(serror, TITAN__PCHIP_SERROR__ADDR)); } if (serror & TITAN__PCHIP_SERROR__NXIO) printk("%s Non Existent I/O Error\n", err_print_prefix); if (serror & TITAN__PCHIP_SERROR__LOST_UECC) printk("%s Lost Uncorrectable ECC Error\n", err_print_prefix); if (serror & TITAN__PCHIP_SERROR__LOST_CRE) printk("%s Lost Correctable ECC Error\n", err_print_prefix); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_perror(int which, int port, u64 perror, int print) { int cmd; unsigned long addr; int status = MCHK_DISPOSITION_REPORT; #ifdef CONFIG_VERBOSE_MCHECK static const char * const perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle", "I/O Read", "I/O Write", "Reserved", "Reserved", "Memory Read", "Memory Write", "Reserved", "Reserved", "Configuration Read", "Configuration Write", "Memory Read Multiple", "Dual Address Cycle", "Memory Read Line", "Memory Write and Invalidate" }; #endif /* CONFIG_VERBOSE_MCHECK */ #define TITAN__PCHIP_PERROR__LOST (1UL << 0) #define TITAN__PCHIP_PERROR__SERR (1UL << 1) #define TITAN__PCHIP_PERROR__PERR (1UL << 2) #define TITAN__PCHIP_PERROR__DCRTO (1UL << 3) #define TITAN__PCHIP_PERROR__SGE (1UL << 4) #define TITAN__PCHIP_PERROR__APE (1UL << 5) #define TITAN__PCHIP_PERROR__TA (1UL << 6) #define TITAN__PCHIP_PERROR__DPE (1UL << 7) #define TITAN__PCHIP_PERROR__NDS (1UL << 8) #define TITAN__PCHIP_PERROR__IPTPR (1UL << 9) #define TITAN__PCHIP_PERROR__IPTPW (1UL << 10) #define TITAN__PCHIP_PERROR__ERRMASK (TITAN__PCHIP_PERROR__LOST | \ TITAN__PCHIP_PERROR__SERR | \ TITAN__PCHIP_PERROR__PERR | \ TITAN__PCHIP_PERROR__DCRTO | \ TITAN__PCHIP_PERROR__SGE | \ TITAN__PCHIP_PERROR__APE | \ TITAN__PCHIP_PERROR__TA | \ TITAN__PCHIP_PERROR__DPE | \ TITAN__PCHIP_PERROR__NDS | \ TITAN__PCHIP_PERROR__IPTPR | \ TITAN__PCHIP_PERROR__IPTPW) #define TITAN__PCHIP_PERROR__DAC (1UL << 47) #define TITAN__PCHIP_PERROR__MWIN (1UL << 48) #define TITAN__PCHIP_PERROR__CMD__S (52) #define TITAN__PCHIP_PERROR__CMD__M (0x0f) #define TITAN__PCHIP_PERROR__ADDR__S (14) #define TITAN__PCHIP_PERROR__ADDR__M (0x1fffffffful) if (!(perror & TITAN__PCHIP_PERROR__ERRMASK)) return MCHK_DISPOSITION_UNKNOWN_ERROR; cmd = EXTRACT(perror, TITAN__PCHIP_PERROR__CMD); addr = EXTRACT(perror, TITAN__PCHIP_PERROR__ADDR) << 2; /* * Initializing the BIOS on a video card on a bus without * a south bridge (subtractive decode agent) can result in * master aborts as the BIOS probes the capabilities of the * card. XFree86 does such initialization. If the error * is a master abort (No DevSel as PCI Master) and the command * is an I/O read or write below the address where we start * assigning PCI I/O spaces (SRM uses 0x1000), then mark the * error as dismissable so starting XFree86 doesn't result * in a series of uncorrectable errors being reported. Also * dismiss master aborts to VGA frame buffer space * (0xA0000 - 0xC0000) and legacy BIOS space (0xC0000 - 0x100000) * for the same reason. * * Also mark the error dismissible if it looks like the right * error but only the Lost bit is set. Since the BIOS initialization * can cause multiple master aborts and the error interrupt can * be handled on a different CPU than the BIOS code is run on, * it is possible for a second master abort to occur between the * time the PALcode reads PERROR and the time it writes PERROR * to acknowledge the error. If this timing happens, a second * error will be signalled after the first, and if no additional * errors occur, will look like a Lost error with no additional * errors on the same transaction as the previous error. */ if (((perror & TITAN__PCHIP_PERROR__NDS) || ((perror & TITAN__PCHIP_PERROR__ERRMASK) == TITAN__PCHIP_PERROR__LOST)) && ((((cmd & 0xE) == 2) && (addr < 0x1000)) || (((cmd & 0xE) == 6) && (addr >= 0xA0000) && (addr < 0x100000)))) { status = MCHK_DISPOSITION_DISMISS; } #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; printk("%s PChip %d %cPERROR: %016llx\n", err_print_prefix, which, port ? 'A' : 'G', perror); if (perror & TITAN__PCHIP_PERROR__IPTPW) printk("%s Invalid Peer-to-Peer Write\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__IPTPR) printk("%s Invalid Peer-to-Peer Read\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__NDS) printk("%s No DEVSEL as PCI Master [Master Abort]\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__DPE) printk("%s Data Parity Error\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__TA) printk("%s Target Abort\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__APE) printk("%s Address Parity Error\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__SGE) printk("%s Scatter-Gather Error, Invalid PTE\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__DCRTO) printk("%s Delayed-Completion Retry Timeout\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__PERR) printk("%s PERR Asserted\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__SERR) printk("%s SERR Asserted\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__LOST) printk("%s Lost Error\n", err_print_prefix); printk("%s Command: 0x%x - %s\n" " Address: 0x%lx\n", err_print_prefix, cmd, perror_cmd[cmd], addr); if (perror & TITAN__PCHIP_PERROR__DAC) printk("%s Dual Address Cycle\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__MWIN) printk("%s Hit in Monster Window\n", err_print_prefix); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_agperror(int which, u64 agperror, int print) { int status = MCHK_DISPOSITION_REPORT; #ifdef CONFIG_VERBOSE_MCHECK int cmd, len; unsigned long addr; static const char * const agperror_cmd[] = { "Read (low-priority)", "Read (high-priority)", "Write (low-priority)", "Write (high-priority)", "Reserved", "Reserved", "Flush", "Fence" }; #endif /* CONFIG_VERBOSE_MCHECK */ #define TITAN__PCHIP_AGPERROR__LOST (1UL << 0) #define TITAN__PCHIP_AGPERROR__LPQFULL (1UL << 1) #define TITAN__PCHIP_AGPERROR__HPQFULL (1UL << 2) #define TITAN__PCHIP_AGPERROR__RESCMD (1UL << 3) #define TITAN__PCHIP_AGPERROR__IPTE (1UL << 4) #define TITAN__PCHIP_AGPERROR__PTP (1UL << 5) #define TITAN__PCHIP_AGPERROR__NOWINDOW (1UL << 6) #define TITAN__PCHIP_AGPERROR__ERRMASK (TITAN__PCHIP_AGPERROR__LOST | \ TITAN__PCHIP_AGPERROR__LPQFULL | \ TITAN__PCHIP_AGPERROR__HPQFULL | \ TITAN__PCHIP_AGPERROR__RESCMD | \ TITAN__PCHIP_AGPERROR__IPTE | \ TITAN__PCHIP_AGPERROR__PTP | \ TITAN__PCHIP_AGPERROR__NOWINDOW) #define TITAN__PCHIP_AGPERROR__DAC (1UL << 48) #define TITAN__PCHIP_AGPERROR__MWIN (1UL << 49) #define TITAN__PCHIP_AGPERROR__FENCE (1UL << 59) #define TITAN__PCHIP_AGPERROR__CMD__S (50) #define TITAN__PCHIP_AGPERROR__CMD__M (0x07) #define TITAN__PCHIP_AGPERROR__ADDR__S (15) #define TITAN__PCHIP_AGPERROR__ADDR__M (0xffffffffUL) #define TITAN__PCHIP_AGPERROR__LEN__S (53) #define TITAN__PCHIP_AGPERROR__LEN__M (0x3f) if (!(agperror & TITAN__PCHIP_AGPERROR__ERRMASK)) return MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; cmd = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__CMD); addr = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__ADDR) << 3; len = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__LEN); printk("%s PChip %d AGPERROR: %016llx\n", err_print_prefix, which, agperror); if (agperror & TITAN__PCHIP_AGPERROR__NOWINDOW) printk("%s No Window\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__PTP) printk("%s Peer-to-Peer set\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__IPTE) printk("%s Invalid PTE\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__RESCMD) printk("%s Reserved Command\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__HPQFULL) printk("%s HP Transaction Received while Queue Full\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__LPQFULL) printk("%s LP Transaction Received while Queue Full\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__LOST) printk("%s Lost Error\n", err_print_prefix); printk("%s Command: 0x%x - %s, %d Quadwords%s\n" " Address: 0x%lx\n", err_print_prefix, cmd, agperror_cmd[cmd], len, (agperror & TITAN__PCHIP_AGPERROR__FENCE) ? ", FENCE" : "", addr); if (agperror & TITAN__PCHIP_AGPERROR__DAC) printk("%s Dual Address Cycle\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__MWIN) printk("%s Hit in Monster Window\n", err_print_prefix); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_chip(int which, u64 serror, u64 gperror, u64 aperror, u64 agperror, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; status |= titan_parse_p_serror(which, serror, print); status |= titan_parse_p_perror(which, 0, gperror, print); status |= titan_parse_p_perror(which, 1, aperror, print); status |= titan_parse_p_agperror(which, agperror, print); return status; } int titan_process_logout_frame(struct el_common *mchk_header, int print) { struct el_TITAN_sysdata_mcheck *tmchk = (struct el_TITAN_sysdata_mcheck *) ((unsigned long)mchk_header + mchk_header->sys_offset); int status = MCHK_DISPOSITION_UNKNOWN_ERROR; status |= titan_parse_c_misc(tmchk->c_misc, print); status |= titan_parse_p_chip(0, tmchk->p0_serror, tmchk->p0_gperror, tmchk->p0_aperror, tmchk->p0_agperror, print); status |= titan_parse_p_chip(1, tmchk->p1_serror, tmchk->p1_gperror, tmchk->p1_aperror, tmchk->p1_agperror, print); return status; } void titan_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_common *mchk_header = (struct el_common *)la_ptr; struct el_TITAN_sysdata_mcheck *tmchk = (struct el_TITAN_sysdata_mcheck *) ((unsigned long)mchk_header + mchk_header->sys_offset); u64 irqmask; /* * Mask of Titan interrupt sources which are reported as machine checks * * 63 - CChip Error * 62 - PChip 0 H_Error * 61 - PChip 1 H_Error * 60 - PChip 0 C_Error * 59 - PChip 1 C_Error */ #define TITAN_MCHECK_INTERRUPT_MASK 0xF800000000000000UL /* * Sync the processor */ mb(); draina(); /* * Only handle system errors here */ if ((vector != SCB_Q_SYSMCHK) && (vector != SCB_Q_SYSERR)) { ev6_machine_check(vector, la_ptr); return; } /* * It's a system error, handle it here * * The PALcode has already cleared the error, so just parse it */ /* * Parse the logout frame without printing first. If the only error(s) * found are classified as "dismissable", then just dismiss them and * don't print any message */ if (titan_process_logout_frame(mchk_header, 0) != MCHK_DISPOSITION_DISMISS) { char *saved_err_prefix = err_print_prefix; err_print_prefix = KERN_CRIT; /* * Either a nondismissable error was detected or no * recognized error was detected in the logout frame * -- report the error in either case */ printk("%s" "*System %s Error (Vector 0x%x) reported on CPU %d:\n", err_print_prefix, (vector == SCB_Q_SYSERR)?"Correctable":"Uncorrectable", (unsigned int)vector, (int)smp_processor_id()); #ifdef CONFIG_VERBOSE_MCHECK titan_process_logout_frame(mchk_header, alpha_verbose_mcheck); if (alpha_verbose_mcheck) dik_show_regs(get_irq_regs(), NULL); #endif /* CONFIG_VERBOSE_MCHECK */ err_print_prefix = saved_err_prefix; /* * Convert any pending interrupts which report as system * machine checks to interrupts */ irqmask = tmchk->c_dirx & TITAN_MCHECK_INTERRUPT_MASK; titan_dispatch_irqs(irqmask); } /* * Release the logout frame */ wrmces(0x7); mb(); } /* * Subpacket Annotations */ static char *el_titan_pchip0_extended_annotation[] = { "Subpacket Header", "P0_SCTL", "P0_SERREN", "P0_APCTL", "P0_APERREN", "P0_AGPERREN", "P0_ASPRST", "P0_AWSBA0", "P0_AWSBA1", "P0_AWSBA2", "P0_AWSBA3", "P0_AWSM0", "P0_AWSM1", "P0_AWSM2", "P0_AWSM3", "P0_ATBA0", "P0_ATBA1", "P0_ATBA2", "P0_ATBA3", "P0_GPCTL", "P0_GPERREN", "P0_GSPRST", "P0_GWSBA0", "P0_GWSBA1", "P0_GWSBA2", "P0_GWSBA3", "P0_GWSM0", "P0_GWSM1", "P0_GWSM2", "P0_GWSM3", "P0_GTBA0", "P0_GTBA1", "P0_GTBA2", "P0_GTBA3", NULL }; static char *el_titan_pchip1_extended_annotation[] = { "Subpacket Header", "P1_SCTL", "P1_SERREN", "P1_APCTL", "P1_APERREN", "P1_AGPERREN", "P1_ASPRST", "P1_AWSBA0", "P1_AWSBA1", "P1_AWSBA2", "P1_AWSBA3", "P1_AWSM0", "P1_AWSM1", "P1_AWSM2", "P1_AWSM3", "P1_ATBA0", "P1_ATBA1", "P1_ATBA2", "P1_ATBA3", "P1_GPCTL", "P1_GPERREN", "P1_GSPRST", "P1_GWSBA0", "P1_GWSBA1", "P1_GWSBA2", "P1_GWSBA3", "P1_GWSM0", "P1_GWSM1", "P1_GWSM2", "P1_GWSM3", "P1_GTBA0", "P1_GTBA1", "P1_GTBA2", "P1_GTBA3", NULL }; static char *el_titan_memory_extended_annotation[] = { "Subpacket Header", "AAR0", "AAR1", "AAR2", "AAR3", "P0_SCTL", "P0_GPCTL", "P0_APCTL", "P1_SCTL", "P1_GPCTL", "P1_SCTL", NULL }; static struct el_subpacket_annotation el_titan_annotations[] = { SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__REGATTA__TITAN_PCHIP0_EXTENDED, 1, "Titan PChip 0 Extended Frame", el_titan_pchip0_extended_annotation), SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__REGATTA__TITAN_PCHIP1_EXTENDED, 1, "Titan PChip 1 Extended Frame", el_titan_pchip1_extended_annotation), SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__REGATTA__TITAN_MEMORY_EXTENDED, 1, "Titan Memory Extended Frame", el_titan_memory_extended_annotation), SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__TERMINATION__TERMINATION, 1, "Termination Subpacket", NULL) }; static struct el_subpacket * el_process_regatta_subpacket(struct el_subpacket *header) { if (header->class != EL_CLASS__REGATTA_FAMILY) { printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", err_print_prefix, header->class, header->type); return NULL; } switch(header->type) { case EL_TYPE__REGATTA__PROCESSOR_ERROR_FRAME: case EL_TYPE__REGATTA__SYSTEM_ERROR_FRAME: case EL_TYPE__REGATTA__ENVIRONMENTAL_FRAME: case EL_TYPE__REGATTA__PROCESSOR_DBL_ERROR_HALT: case EL_TYPE__REGATTA__SYSTEM_DBL_ERROR_HALT: printk("%s ** Occurred on CPU %d:\n", err_print_prefix, (int)header->by_type.regatta_frame.cpuid); privateer_process_logout_frame((struct el_common *) header->by_type.regatta_frame.data_start, 1); break; default: printk("%s ** REGATTA TYPE %d SUBPACKET\n", err_print_prefix, header->type); el_annotate_subpacket(header); break; } return (struct el_subpacket *)((unsigned long)header + header->length); } static struct el_subpacket_handler titan_subpacket_handler = SUBPACKET_HANDLER_INIT(EL_CLASS__REGATTA_FAMILY, el_process_regatta_subpacket); void __init titan_register_error_handlers(void) { size_t i; for (i = 0; i < ARRAY_SIZE (el_titan_annotations); i++) cdl_register_subpacket_annotation(&el_titan_annotations[i]); cdl_register_subpacket_handler(&titan_subpacket_handler); ev6_register_error_handlers(); } /* * Privateer */ static int privateer_process_680_frame(struct el_common *mchk_header, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK struct el_PRIVATEER_envdata_mcheck *emchk = (struct el_PRIVATEER_envdata_mcheck *) ((unsigned long)mchk_header + mchk_header->sys_offset); /* TODO - categorize errors, for now, no error */ if (!print) return status; /* TODO - decode instead of just dumping... */ printk("%s Summary Flags: %016llx\n" " CChip DIRx: %016llx\n" " System Management IR: %016llx\n" " CPU IR: %016llx\n" " Power Supply IR: %016llx\n" " LM78 Fault Status: %016llx\n" " System Doors: %016llx\n" " Temperature Warning: %016llx\n" " Fan Control: %016llx\n" " Fatal Power Down Code: %016llx\n", err_print_prefix, emchk->summary, emchk->c_dirx, emchk->smir, emchk->cpuir, emchk->psir, emchk->fault, emchk->sys_doors, emchk->temp_warn, emchk->fan_ctrl, emchk->code); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } int privateer_process_logout_frame(struct el_common *mchk_header, int print) { struct el_common_EV6_mcheck *ev6mchk = (struct el_common_EV6_mcheck *)mchk_header; int status = MCHK_DISPOSITION_UNKNOWN_ERROR; /* * Machine check codes */ #define PRIVATEER_MCHK__CORR_ECC 0x86 /* 630 */ #define PRIVATEER_MCHK__DC_TAG_PERR 0x9E /* 630 */ #define PRIVATEER_MCHK__PAL_BUGCHECK 0x8E /* 670 */ #define PRIVATEER_MCHK__OS_BUGCHECK 0x90 /* 670 */ #define PRIVATEER_MCHK__PROC_HRD_ERR 0x98 /* 670 */ #define PRIVATEER_MCHK__ISTREAM_CMOV_PRX 0xA0 /* 670 */ #define PRIVATEER_MCHK__ISTREAM_CMOV_FLT 0xA2 /* 670 */ #define PRIVATEER_MCHK__SYS_HRD_ERR 0x202 /* 660 */ #define PRIVATEER_MCHK__SYS_CORR_ERR 0x204 /* 620 */ #define PRIVATEER_MCHK__SYS_ENVIRON 0x206 /* 680 */ switch(ev6mchk->MCHK_Code) { /* * Vector 630 - Processor, Correctable */ case PRIVATEER_MCHK__CORR_ECC: case PRIVATEER_MCHK__DC_TAG_PERR: /* * Fall through to vector 670 for processing... */ /* * Vector 670 - Processor, Uncorrectable */ case PRIVATEER_MCHK__PAL_BUGCHECK: case PRIVATEER_MCHK__OS_BUGCHECK: case PRIVATEER_MCHK__PROC_HRD_ERR: case PRIVATEER_MCHK__ISTREAM_CMOV_PRX: case PRIVATEER_MCHK__ISTREAM_CMOV_FLT: status |= ev6_process_logout_frame(mchk_header, print); break; /* * Vector 620 - System, Correctable */ case PRIVATEER_MCHK__SYS_CORR_ERR: /* * Fall through to vector 660 for processing... */ /* * Vector 660 - System, Uncorrectable */ case PRIVATEER_MCHK__SYS_HRD_ERR: status |= titan_process_logout_frame(mchk_header, print); break; /* * Vector 680 - System, Environmental */ case PRIVATEER_MCHK__SYS_ENVIRON: /* System, Environmental */ status |= privateer_process_680_frame(mchk_header, print); break; /* * Unknown */ default: status |= MCHK_DISPOSITION_REPORT; if (print) { printk("%s** Unknown Error, frame follows\n", err_print_prefix); mchk_dump_logout_frame(mchk_header); } } return status; } void privateer_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_common *mchk_header = (struct el_common *)la_ptr; struct el_TITAN_sysdata_mcheck *tmchk = (struct el_TITAN_sysdata_mcheck *) (la_ptr + mchk_header->sys_offset); u64 irqmask; char *saved_err_prefix = err_print_prefix; #define PRIVATEER_680_INTERRUPT_MASK (0xE00UL) #define PRIVATEER_HOTPLUG_INTERRUPT_MASK (0xE00UL) /* * Sync the processor. */ mb(); draina(); /* * Only handle system events here. */ if (vector != SCB_Q_SYSEVENT) return titan_machine_check(vector, la_ptr); /* * Report the event - System Events should be reported even if no * error is indicated since the event could indicate the return * to normal status. */ err_print_prefix = KERN_CRIT; printk("%s*System Event (Vector 0x%x) reported on CPU %d:\n", err_print_prefix, (unsigned int)vector, (int)smp_processor_id()); privateer_process_680_frame(mchk_header, 1); err_print_prefix = saved_err_prefix; /* * Convert any pending interrupts which report as 680 machine * checks to interrupts. */ irqmask = tmchk->c_dirx & PRIVATEER_680_INTERRUPT_MASK; /* * Dispatch the interrupt(s). */ titan_dispatch_irqs(irqmask); /* * Release the logout frame. */ wrmces(0x7); mb(); }
gpl-2.0
kevinzhang1986/android_kernel_oneplus_msm8994
fs/isofs/util.c
13476
2441
/* * linux/fs/isofs/util.c */ #include "isofs.h" /* * We have to convert from a MM/DD/YY format to the Unix ctime format. * We have to take into account leap years and all of that good stuff. * Unfortunately, the kernel does not have the information on hand to * take into account daylight savings time, but it shouldn't matter. * The time stored should be localtime (with or without DST in effect), * and the timezone offset should hold the offset required to get back * to GMT. Thus we should always be correct. */ int iso_date(char * p, int flag) { int year, month, day, hour, minute, second, tz; int crtime, days, i; year = p[0] - 70; month = p[1]; day = p[2]; hour = p[3]; minute = p[4]; second = p[5]; if (flag == 0) tz = p[6]; /* High sierra has no time zone */ else tz = 0; if (year < 0) { crtime = 0; } else { int monlen[12] = {31,28,31,30,31,30,31,31,30,31,30,31}; days = year * 365; if (year > 2) days += (year+1) / 4; for (i = 1; i < month; i++) days += monlen[i-1]; if (((year+2) % 4) == 0 && month > 2) days++; days += day - 1; crtime = ((((days * 24) + hour) * 60 + minute) * 60) + second; /* sign extend */ if (tz & 0x80) tz |= (-1 << 8); /* * The timezone offset is unreliable on some disks, * so we make a sanity check. In no case is it ever * more than 13 hours from GMT, which is 52*15min. * The time is always stored in localtime with the * timezone offset being what get added to GMT to * get to localtime. Thus we need to subtract the offset * to get to true GMT, which is what we store the time * as internally. On the local system, the user may set * their timezone any way they wish, of course, so GMT * gets converted back to localtime on the receiving * system. * * NOTE: mkisofs in versions prior to mkisofs-1.10 had * the sign wrong on the timezone offset. This has now * been corrected there too, but if you are getting screwy * results this may be the explanation. If enough people * complain, a user configuration option could be added * to add the timezone offset in with the wrong sign * for 'compatibility' with older discs, but I cannot see how * it will matter that much. * * Thanks to kuhlmav@elec.canterbury.ac.nz (Volker Kuhlmann) * for pointing out the sign error. */ if (-52 <= tz && tz <= 52) crtime -= tz * 15 * 60; } return crtime; }
gpl-2.0
SdtBarbarossa/kernel-m8qlul
arch/m32r/lib/usercopy.c
13732
9059
/* * User address space access functions. * The non inlined parts of asm-m32r/uaccess.h are here. * * Copyright 1997 Andi Kleen <ak@muc.de> * Copyright 1997 Linus Torvalds * Copyright 2001, 2002, 2004 Hirokazu Takata */ #include <linux/prefetch.h> #include <linux/string.h> #include <linux/thread_info.h> #include <asm/uaccess.h> unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n) { prefetch(from); if (access_ok(VERIFY_WRITE, to, n)) __copy_user(to,from,n); return n; } unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n) { prefetchw(to); if (access_ok(VERIFY_READ, from, n)) __copy_user_zeroing(to,from,n); else memset(to, 0, n); return n; } /* * Copy a null terminated string from userspace. */ #ifdef CONFIG_ISA_DUAL_ISSUE #define __do_strncpy_from_user(dst,src,count,res) \ do { \ int __d0, __d1, __d2; \ __asm__ __volatile__( \ " beqz %1, 2f\n" \ " .fillinsn\n" \ "0: ldb r14, @%3 || addi %3, #1\n" \ " stb r14, @%4 || addi %4, #1\n" \ " beqz r14, 1f\n" \ " addi %1, #-1\n" \ " bnez %1, 0b\n" \ " .fillinsn\n" \ "1: sub %0, %1\n" \ " .fillinsn\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ " .balign 4\n" \ "3: seth r14, #high(2b)\n" \ " or3 r14, r14, #low(2b)\n" \ " jmp r14 || ldi %0, #%5\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .balign 4\n" \ " .long 0b,3b\n" \ ".previous" \ : "=&r"(res), "=&r"(count), "=&r" (__d0), "=&r" (__d1), \ "=&r" (__d2) \ : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), \ "4"(dst) \ : "r14", "cbit", "memory"); \ } while (0) #else /* not CONFIG_ISA_DUAL_ISSUE */ #define __do_strncpy_from_user(dst,src,count,res) \ do { \ int __d0, __d1, __d2; \ __asm__ __volatile__( \ " beqz %1, 2f\n" \ " .fillinsn\n" \ "0: ldb r14, @%3\n" \ " stb r14, @%4\n" \ " addi %3, #1\n" \ " addi %4, #1\n" \ " beqz r14, 1f\n" \ " addi %1, #-1\n" \ " bnez %1, 0b\n" \ " .fillinsn\n" \ "1: sub %0, %1\n" \ " .fillinsn\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ " .balign 4\n" \ "3: ldi %0, #%5\n" \ " seth r14, #high(2b)\n" \ " or3 r14, r14, #low(2b)\n" \ " jmp r14\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .balign 4\n" \ " .long 0b,3b\n" \ ".previous" \ : "=&r"(res), "=&r"(count), "=&r" (__d0), "=&r" (__d1), \ "=&r" (__d2) \ : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), \ "4"(dst) \ : "r14", "cbit", "memory"); \ } while (0) #endif /* CONFIG_ISA_DUAL_ISSUE */ long __strncpy_from_user(char *dst, const char __user *src, long count) { long res; __do_strncpy_from_user(dst, src, count, res); return res; } long strncpy_from_user(char *dst, const char __user *src, long count) { long res = -EFAULT; if (access_ok(VERIFY_READ, src, 1)) __do_strncpy_from_user(dst, src, count, res); return res; } /* * Zero Userspace */ #ifdef CONFIG_ISA_DUAL_ISSUE #define __do_clear_user(addr,size) \ do { \ int __dst, __c; \ __asm__ __volatile__( \ " beqz %1, 9f\n" \ " and3 r14, %0, #3\n" \ " bnez r14, 2f\n" \ " and3 r14, %1, #3\n" \ " bnez r14, 2f\n" \ " and3 %1, %1, #3\n" \ " beqz %2, 2f\n" \ " addi %0, #-4\n" \ " .fillinsn\n" \ "0: ; word clear \n" \ " st %6, @+%0 || addi %2, #-1\n" \ " bnez %2, 0b\n" \ " beqz %1, 9f\n" \ " .fillinsn\n" \ "2: ; byte clear \n" \ " stb %6, @%0 || addi %1, #-1\n" \ " addi %0, #1\n" \ " bnez %1, 2b\n" \ " .fillinsn\n" \ "9:\n" \ ".section .fixup,\"ax\"\n" \ " .balign 4\n" \ "4: slli %2, #2\n" \ " seth r14, #high(9b)\n" \ " or3 r14, r14, #low(9b)\n" \ " jmp r14 || add %1, %2\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .balign 4\n" \ " .long 0b,4b\n" \ " .long 2b,9b\n" \ ".previous\n" \ : "=&r"(__dst), "=&r"(size), "=&r"(__c) \ : "0"(addr), "1"(size), "2"(size / 4), "r"(0) \ : "r14", "cbit", "memory"); \ } while (0) #else /* not CONFIG_ISA_DUAL_ISSUE */ #define __do_clear_user(addr,size) \ do { \ int __dst, __c; \ __asm__ __volatile__( \ " beqz %1, 9f\n" \ " and3 r14, %0, #3\n" \ " bnez r14, 2f\n" \ " and3 r14, %1, #3\n" \ " bnez r14, 2f\n" \ " and3 %1, %1, #3\n" \ " beqz %2, 2f\n" \ " addi %0, #-4\n" \ " .fillinsn\n" \ "0: st %6, @+%0 ; word clear \n" \ " addi %2, #-1\n" \ " bnez %2, 0b\n" \ " beqz %1, 9f\n" \ " .fillinsn\n" \ "2: stb %6, @%0 ; byte clear \n" \ " addi %1, #-1\n" \ " addi %0, #1\n" \ " bnez %1, 2b\n" \ " .fillinsn\n" \ "9:\n" \ ".section .fixup,\"ax\"\n" \ " .balign 4\n" \ "4: slli %2, #2\n" \ " add %1, %2\n" \ " seth r14, #high(9b)\n" \ " or3 r14, r14, #low(9b)\n" \ " jmp r14\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .balign 4\n" \ " .long 0b,4b\n" \ " .long 2b,9b\n" \ ".previous\n" \ : "=&r"(__dst), "=&r"(size), "=&r"(__c) \ : "0"(addr), "1"(size), "2"(size / 4), "r"(0) \ : "r14", "cbit", "memory"); \ } while (0) #endif /* not CONFIG_ISA_DUAL_ISSUE */ unsigned long clear_user(void __user *to, unsigned long n) { if (access_ok(VERIFY_WRITE, to, n)) __do_clear_user(to, n); return n; } unsigned long __clear_user(void __user *to, unsigned long n) { __do_clear_user(to, n); return n; } /* * Return the size of a string (including the ending 0) * * Return 0 on exception, a value greater than N if too long */ #ifdef CONFIG_ISA_DUAL_ISSUE long strnlen_user(const char __user *s, long n) { unsigned long mask = -__addr_ok(s); unsigned long res; __asm__ __volatile__( " and %0, %5 || mv r1, %1\n" " beqz %0, strnlen_exit\n" " and3 r0, %1, #3\n" " bnez r0, strnlen_byte_loop\n" " cmpui %0, #4\n" " bc strnlen_byte_loop\n" "strnlen_word_loop:\n" "0: ld r0, @%1+\n" " pcmpbz r0\n" " bc strnlen_last_bytes_fixup\n" " addi %0, #-4\n" " beqz %0, strnlen_exit\n" " bgtz %0, strnlen_word_loop\n" "strnlen_last_bytes:\n" " mv %0, %4\n" "strnlen_last_bytes_fixup:\n" " addi %1, #-4\n" "strnlen_byte_loop:\n" "1: ldb r0, @%1 || addi %0, #-1\n" " beqz r0, strnlen_exit\n" " addi %1, #1\n" " bnez %0, strnlen_byte_loop\n" "strnlen_exit:\n" " sub %1, r1\n" " add3 %0, %1, #1\n" " .fillinsn\n" "9:\n" ".section .fixup,\"ax\"\n" " .balign 4\n" "4: addi %1, #-4\n" " .fillinsn\n" "5: seth r1, #high(9b)\n" " or3 r1, r1, #low(9b)\n" " jmp r1 || ldi %0, #0\n" ".previous\n" ".section __ex_table,\"a\"\n" " .balign 4\n" " .long 0b,4b\n" " .long 1b,5b\n" ".previous" : "=&r" (res), "=r" (s) : "0" (n), "1" (s), "r" (n & 3), "r" (mask), "r"(0x01010101) : "r0", "r1", "cbit"); /* NOTE: strnlen_user() algorithm: * { * char *p; * for (p = s; n-- && *p != '\0'; ++p) * ; * return p - s + 1; * } */ /* NOTE: If a null char. exists, return 0. * if ((x - 0x01010101) & ~x & 0x80808080)\n" * return 0;\n" */ return res & mask; } #else /* not CONFIG_ISA_DUAL_ISSUE */ long strnlen_user(const char __user *s, long n) { unsigned long mask = -__addr_ok(s); unsigned long res; __asm__ __volatile__( " and %0, %5\n" " mv r1, %1\n" " beqz %0, strnlen_exit\n" " and3 r0, %1, #3\n" " bnez r0, strnlen_byte_loop\n" " cmpui %0, #4\n" " bc strnlen_byte_loop\n" " sll3 r3, %6, #7\n" "strnlen_word_loop:\n" "0: ld r0, @%1+\n" " not r2, r0\n" " sub r0, %6\n" " and r2, r3\n" " and r2, r0\n" " bnez r2, strnlen_last_bytes_fixup\n" " addi %0, #-4\n" " beqz %0, strnlen_exit\n" " bgtz %0, strnlen_word_loop\n" "strnlen_last_bytes:\n" " mv %0, %4\n" "strnlen_last_bytes_fixup:\n" " addi %1, #-4\n" "strnlen_byte_loop:\n" "1: ldb r0, @%1\n" " addi %0, #-1\n" " beqz r0, strnlen_exit\n" " addi %1, #1\n" " bnez %0, strnlen_byte_loop\n" "strnlen_exit:\n" " sub %1, r1\n" " add3 %0, %1, #1\n" " .fillinsn\n" "9:\n" ".section .fixup,\"ax\"\n" " .balign 4\n" "4: addi %1, #-4\n" " .fillinsn\n" "5: ldi %0, #0\n" " seth r1, #high(9b)\n" " or3 r1, r1, #low(9b)\n" " jmp r1\n" ".previous\n" ".section __ex_table,\"a\"\n" " .balign 4\n" " .long 0b,4b\n" " .long 1b,5b\n" ".previous" : "=&r" (res), "=r" (s) : "0" (n), "1" (s), "r" (n & 3), "r" (mask), "r"(0x01010101) : "r0", "r1", "r2", "r3", "cbit"); /* NOTE: strnlen_user() algorithm: * { * char *p; * for (p = s; n-- && *p != '\0'; ++p) * ; * return p - s + 1; * } */ /* NOTE: If a null char. exists, return 0. * if ((x - 0x01010101) & ~x & 0x80808080)\n" * return 0;\n" */ return res & mask; } #endif /* CONFIG_ISA_DUAL_ISSUE */
gpl-2.0
silverbullettechnology/linux-adi
drivers/dma/acpi-dma.c
165
12612
/* * ACPI helpers for DMA request / controller * * Based on of-dma.c * * Copyright (C) 2013, Intel Corporation * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com> * Mika Westerberg <mika.westerberg@linux.intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/device.h> #include <linux/module.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/acpi.h> #include <linux/acpi_dma.h> static LIST_HEAD(acpi_dma_list); static DEFINE_MUTEX(acpi_dma_lock); /** * acpi_dma_parse_resource_group - match device and parse resource group * @grp: CSRT resource group * @adev: ACPI device to match with * @adma: struct acpi_dma of the given DMA controller * * Returns 1 on success, 0 when no information is available, or appropriate * errno value on error. * * In order to match a device from DSDT table to the corresponding CSRT device * we use MMIO address and IRQ. */ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, struct acpi_device *adev, struct acpi_dma *adma) { const struct acpi_csrt_shared_info *si; struct list_head resource_list; struct resource_list_entry *rentry; resource_size_t mem = 0, irq = 0; int ret; if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info)) return -ENODEV; INIT_LIST_HEAD(&resource_list); ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); if (ret <= 0) return 0; list_for_each_entry(rentry, &resource_list, node) { if (resource_type(&rentry->res) == IORESOURCE_MEM) mem = rentry->res.start; else if (resource_type(&rentry->res) == IORESOURCE_IRQ) irq = rentry->res.start; } acpi_dev_free_resource_list(&resource_list); /* Consider initial zero values as resource not found */ if (mem == 0 && irq == 0) return 0; si = (const struct acpi_csrt_shared_info *)&grp[1]; /* Match device by MMIO and IRQ */ if (si->mmio_base_low != mem || si->gsi_interrupt != irq) return 0; dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n", (char *)&grp->vendor_id, grp->device_id, grp->revision); /* Check if the request line range is available */ if (si->base_request_line == 0 && si->num_handshake_signals == 0) return 0; adma->base_request_line = si->base_request_line; adma->end_request_line = si->base_request_line + si->num_handshake_signals - 1; dev_dbg(&adev->dev, "request line base: 0x%04x end: 0x%04x\n", adma->base_request_line, adma->end_request_line); return 1; } /** * acpi_dma_parse_csrt - parse CSRT to exctract additional DMA resources * @adev: ACPI device to match with * @adma: struct acpi_dma of the given DMA controller * * CSRT or Core System Resources Table is a proprietary ACPI table * introduced by Microsoft. This table can contain devices that are not in * the system DSDT table. In particular DMA controllers might be described * here. * * We are using this table to get the request line range of the specific DMA * controller to be used later. * */ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma) { struct acpi_csrt_group *grp, *end; struct acpi_table_csrt *csrt; acpi_status status; int ret; status = acpi_get_table(ACPI_SIG_CSRT, 0, (struct acpi_table_header **)&csrt); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) dev_warn(&adev->dev, "failed to get the CSRT table\n"); return; } grp = (struct acpi_csrt_group *)(csrt + 1); end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length); while (grp < end) { ret = acpi_dma_parse_resource_group(grp, adev, adma); if (ret < 0) { dev_warn(&adev->dev, "error in parsing resource group\n"); return; } grp = (struct acpi_csrt_group *)((void *)grp + grp->length); } } /** * acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers * @dev: struct device of DMA controller * @acpi_dma_xlate: translation function which converts a dma specifier * into a dma_chan structure * @data pointer to controller specific data to be used by * translation function * * Returns 0 on success or appropriate errno value on error. * * Allocated memory should be freed with appropriate acpi_dma_controller_free() * call. */ int acpi_dma_controller_register(struct device *dev, struct dma_chan *(*acpi_dma_xlate) (struct acpi_dma_spec *, struct acpi_dma *), void *data) { struct acpi_device *adev; struct acpi_dma *adma; if (!dev || !acpi_dma_xlate) return -EINVAL; /* Check if the device was enumerated by ACPI */ if (!ACPI_HANDLE(dev)) return -EINVAL; if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev)) return -EINVAL; adma = kzalloc(sizeof(*adma), GFP_KERNEL); if (!adma) return -ENOMEM; adma->dev = dev; adma->acpi_dma_xlate = acpi_dma_xlate; adma->data = data; acpi_dma_parse_csrt(adev, adma); /* Now queue acpi_dma controller structure in list */ mutex_lock(&acpi_dma_lock); list_add_tail(&adma->dma_controllers, &acpi_dma_list); mutex_unlock(&acpi_dma_lock); return 0; } EXPORT_SYMBOL_GPL(acpi_dma_controller_register); /** * acpi_dma_controller_free - Remove a DMA controller from ACPI DMA helpers list * @dev: struct device of DMA controller * * Memory allocated by acpi_dma_controller_register() is freed here. */ int acpi_dma_controller_free(struct device *dev) { struct acpi_dma *adma; if (!dev) return -EINVAL; mutex_lock(&acpi_dma_lock); list_for_each_entry(adma, &acpi_dma_list, dma_controllers) if (adma->dev == dev) { list_del(&adma->dma_controllers); mutex_unlock(&acpi_dma_lock); kfree(adma); return 0; } mutex_unlock(&acpi_dma_lock); return -ENODEV; } EXPORT_SYMBOL_GPL(acpi_dma_controller_free); static void devm_acpi_dma_release(struct device *dev, void *res) { acpi_dma_controller_free(dev); } /** * devm_acpi_dma_controller_register - resource managed acpi_dma_controller_register() * @dev: device that is registering this DMA controller * @acpi_dma_xlate: translation function * @data pointer to controller specific data * * Managed acpi_dma_controller_register(). DMA controller registered by this * function are automatically freed on driver detach. See * acpi_dma_controller_register() for more information. */ int devm_acpi_dma_controller_register(struct device *dev, struct dma_chan *(*acpi_dma_xlate) (struct acpi_dma_spec *, struct acpi_dma *), void *data) { void *res; int ret; res = devres_alloc(devm_acpi_dma_release, 0, GFP_KERNEL); if (!res) return -ENOMEM; ret = acpi_dma_controller_register(dev, acpi_dma_xlate, data); if (ret) { devres_free(res); return ret; } devres_add(dev, res); return 0; } EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register); /** * devm_acpi_dma_controller_free - resource managed acpi_dma_controller_free() * * Unregister a DMA controller registered with * devm_acpi_dma_controller_register(). Normally this function will not need to * be called and the resource management code will ensure that the resource is * freed. */ void devm_acpi_dma_controller_free(struct device *dev) { WARN_ON(devres_destroy(dev, devm_acpi_dma_release, NULL, NULL)); } EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free); /** * acpi_dma_update_dma_spec - prepare dma specifier to pass to translation function * @adma: struct acpi_dma of DMA controller * @dma_spec: dma specifier to update * * Returns 0, if no information is avaiable, -1 on mismatch, and 1 otherwise. * * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource * Descriptor": * DMA Request Line bits is a platform-relative number uniquely * identifying the request line assigned. Request line-to-Controller * mapping is done in a controller-specific OS driver. * That's why we can safely adjust slave_id when the appropriate controller is * found. */ static int acpi_dma_update_dma_spec(struct acpi_dma *adma, struct acpi_dma_spec *dma_spec) { /* Set link to the DMA controller device */ dma_spec->dev = adma->dev; /* Check if the request line range is available */ if (adma->base_request_line == 0 && adma->end_request_line == 0) return 0; /* Check if slave_id falls to the range */ if (dma_spec->slave_id < adma->base_request_line || dma_spec->slave_id > adma->end_request_line) return -1; /* * Here we adjust slave_id. It should be a relative number to the base * request line. */ dma_spec->slave_id -= adma->base_request_line; return 1; } struct acpi_dma_parser_data { struct acpi_dma_spec dma_spec; size_t index; size_t n; }; /** * acpi_dma_parse_fixed_dma - Parse FixedDMA ACPI resources to a DMA specifier * @res: struct acpi_resource to get FixedDMA resources from * @data: pointer to a helper struct acpi_dma_parser_data */ static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data) { struct acpi_dma_parser_data *pdata = data; if (res->type == ACPI_RESOURCE_TYPE_FIXED_DMA) { struct acpi_resource_fixed_dma *dma = &res->data.fixed_dma; if (pdata->n++ == pdata->index) { pdata->dma_spec.chan_id = dma->channels; pdata->dma_spec.slave_id = dma->request_lines; } } /* Tell the ACPI core to skip this resource */ return 1; } /** * acpi_dma_request_slave_chan_by_index - Get the DMA slave channel * @dev: struct device to get DMA request from * @index: index of FixedDMA descriptor for @dev * * Returns pointer to appropriate dma channel on success or NULL on error. */ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, size_t index) { struct acpi_dma_parser_data pdata; struct acpi_dma_spec *dma_spec = &pdata.dma_spec; struct list_head resource_list; struct acpi_device *adev; struct acpi_dma *adma; struct dma_chan *chan = NULL; int found; /* Check if the device was enumerated by ACPI */ if (!dev || !ACPI_HANDLE(dev)) return NULL; if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev)) return NULL; memset(&pdata, 0, sizeof(pdata)); pdata.index = index; /* Initial values for the request line and channel */ dma_spec->chan_id = -1; dma_spec->slave_id = -1; INIT_LIST_HEAD(&resource_list); acpi_dev_get_resources(adev, &resource_list, acpi_dma_parse_fixed_dma, &pdata); acpi_dev_free_resource_list(&resource_list); if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0) return NULL; mutex_lock(&acpi_dma_lock); list_for_each_entry(adma, &acpi_dma_list, dma_controllers) { /* * We are not going to call translation function if slave_id * doesn't fall to the request range. */ found = acpi_dma_update_dma_spec(adma, dma_spec); if (found < 0) continue; chan = adma->acpi_dma_xlate(dma_spec, adma); /* * Try to get a channel only from the DMA controller that * matches the slave_id. See acpi_dma_update_dma_spec() * description for the details. */ if (found > 0 || chan) break; } mutex_unlock(&acpi_dma_lock); return chan; } EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index); /** * acpi_dma_request_slave_chan_by_name - Get the DMA slave channel * @dev: struct device to get DMA request from * @name: represents corresponding FixedDMA descriptor for @dev * * In order to support both Device Tree and ACPI in a single driver we * translate the names "tx" and "rx" here based on the most common case where * the first FixedDMA descriptor is TX and second is RX. * * Returns pointer to appropriate dma channel on success or NULL on error. */ struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev, const char *name) { size_t index; if (!strcmp(name, "tx")) index = 0; else if (!strcmp(name, "rx")) index = 1; else return NULL; return acpi_dma_request_slave_chan_by_index(dev, index); } EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name); /** * acpi_dma_simple_xlate - Simple ACPI DMA engine translation helper * @dma_spec: pointer to ACPI DMA specifier * @adma: pointer to ACPI DMA controller data * * A simple translation function for ACPI based devices. Passes &struct * dma_spec to the DMA controller driver provided filter function. Returns * pointer to the channel if found or %NULL otherwise. */ struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec, struct acpi_dma *adma) { struct acpi_dma_filter_info *info = adma->data; if (!info || !info->filter_fn) return NULL; return dma_request_channel(info->dma_cap, info->filter_fn, dma_spec); } EXPORT_SYMBOL_GPL(acpi_dma_simple_xlate);
gpl-2.0
thicklizard/m9-patches
net/bluetooth/lib.c
165
2979
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth kernel library. */ #define pr_fmt(fmt) "Bluetooth: " fmt #include <linux/export.h> #include <net/bluetooth/bluetooth.h> void baswap(bdaddr_t *dst, bdaddr_t *src) { unsigned char *d = (unsigned char *) dst; unsigned char *s = (unsigned char *) src; unsigned int i; for (i = 0; i < 6; i++) d[i] = s[5 - i]; } EXPORT_SYMBOL(baswap); /* Bluetooth error codes to Unix errno mapping */ int bt_to_errno(__u16 code) { switch (code) { case 0: return 0; case 0x01: return EBADRQC; case 0x02: return ENOTCONN; case 0x03: return EIO; case 0x04: return EHOSTDOWN; case 0x05: return EACCES; case 0x06: return EBADE; case 0x07: return ENOMEM; case 0x08: return ETIMEDOUT; case 0x09: return EMLINK; case 0x0a: return EMLINK; case 0x0b: return EALREADY; case 0x0c: return EBUSY; case 0x0d: case 0x0e: case 0x0f: return ECONNREFUSED; case 0x10: return ETIMEDOUT; case 0x11: case 0x27: case 0x29: case 0x20: return EOPNOTSUPP; case 0x12: return EINVAL; case 0x13: case 0x14: case 0x15: return ECONNRESET; case 0x16: return ECONNABORTED; case 0x17: return ELOOP; case 0x18: return EACCES; case 0x1a: return EPROTONOSUPPORT; case 0x1b: return ECONNREFUSED; case 0x19: case 0x1e: case 0x23: case 0x24: case 0x25: return EPROTO; default: return ENOSYS; } } EXPORT_SYMBOL(bt_to_errno); int bt_info(const char *format, ...) { struct va_format vaf; va_list args; int r; va_start(args, format); vaf.fmt = format; vaf.va = &args; r = pr_info("%pKV", &vaf); va_end(args); return r; } EXPORT_SYMBOL(bt_info); int bt_err(const char *format, ...) { struct va_format vaf; va_list args; int r; va_start(args, format); vaf.fmt = format; vaf.va = &args; r = pr_err("%pKV", &vaf); va_end(args); return r; } EXPORT_SYMBOL(bt_err);
gpl-2.0
bbedward/ZenKernel_G920
drivers/usb/serial/usb-serial.c
677
40918
/* * USB Serial Converter driver * * Copyright (C) 2009 - 2013 Johan Hovold (jhovold@gmail.com) * Copyright (C) 1999 - 2012 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2000 Peter Berger (pberger@brimson.com) * Copyright (C) 2000 Al Borchers (borchers@steinerpoint.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This driver was originally based on the ACM driver by Armin Fuerst (which was * based on a driver by Brad Keryan) * * See Documentation/usb/usb-serial.txt for more information on using this * driver */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/uaccess.h> #include <linux/serial.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/kfifo.h> #include "pl2303.h" #define DRIVER_AUTHOR "Greg Kroah-Hartman <gregkh@linuxfoundation.org>" #define DRIVER_DESC "USB Serial Driver core" /* There is no MODULE_DEVICE_TABLE for usbserial.c. Instead the MODULE_DEVICE_TABLE declarations in each serial driver cause the "hotplug" program to pull in whatever module is necessary via modprobe, and modprobe will load usbserial because the serial drivers depend on it. */ static struct usb_serial *serial_table[SERIAL_TTY_MINORS]; static DEFINE_MUTEX(table_lock); static LIST_HEAD(usb_serial_driver_list); /* * Look up the serial structure. If it is found and it hasn't been * disconnected, return with its disc_mutex held and its refcount * incremented. Otherwise return NULL. */ struct usb_serial *usb_serial_get_by_index(unsigned index) { struct usb_serial *serial; mutex_lock(&table_lock); serial = serial_table[index]; if (serial) { mutex_lock(&serial->disc_mutex); if (serial->disconnected) { mutex_unlock(&serial->disc_mutex); serial = NULL; } else { kref_get(&serial->kref); } } mutex_unlock(&table_lock); return serial; } static struct usb_serial *get_free_serial(struct usb_serial *serial, int num_ports, unsigned int *minor) { unsigned int i, j; int good_spot; dev_dbg(&serial->interface->dev, "%s %d\n", __func__, num_ports); *minor = 0; mutex_lock(&table_lock); for (i = 0; i < SERIAL_TTY_MINORS; ++i) { if (serial_table[i]) continue; good_spot = 1; for (j = 1; j <= num_ports-1; ++j) if ((i+j >= SERIAL_TTY_MINORS) || (serial_table[i+j])) { good_spot = 0; i += j; break; } if (good_spot == 0) continue; *minor = i; j = 0; dev_dbg(&serial->interface->dev, "%s - minor base = %d\n", __func__, *minor); for (i = *minor; (i < (*minor + num_ports)) && (i < SERIAL_TTY_MINORS); ++i) { serial_table[i] = serial; serial->port[j++]->number = i; } mutex_unlock(&table_lock); return serial; } mutex_unlock(&table_lock); return NULL; } static void return_serial(struct usb_serial *serial) { int i; mutex_lock(&table_lock); for (i = 0; i < serial->num_ports; ++i) serial_table[serial->minor + i] = NULL; mutex_unlock(&table_lock); } static void destroy_serial(struct kref *kref) { struct usb_serial *serial; struct usb_serial_port *port; int i; serial = to_usb_serial(kref); /* return the minor range that this device had */ if (serial->minor != SERIAL_TTY_NO_MINOR) return_serial(serial); if (serial->attached && serial->type->release) serial->type->release(serial); /* Now that nothing is using the ports, they can be freed */ for (i = 0; i < serial->num_port_pointers; ++i) { port = serial->port[i]; if (port) { port->serial = NULL; put_device(&port->dev); } } usb_put_intf(serial->interface); usb_put_dev(serial->dev); kfree(serial); } void usb_serial_put(struct usb_serial *serial) { kref_put(&serial->kref, destroy_serial); } /***************************************************************************** * Driver tty interface functions *****************************************************************************/ /** * serial_install - install tty * @driver: the driver (USB in our case) * @tty: the tty being created * * Create the termios objects for this tty. We use the default * USB serial settings but permit them to be overridden by * serial->type->init_termios. * * This is the first place a new tty gets used. Hence this is where we * acquire references to the usb_serial structure and the driver module, * where we store a pointer to the port, and where we do an autoresume. * All these actions are reversed in serial_cleanup(). */ static int serial_install(struct tty_driver *driver, struct tty_struct *tty) { int idx = tty->index; struct usb_serial *serial; struct usb_serial_port *port; int retval = -ENODEV; serial = usb_serial_get_by_index(idx); if (!serial) return retval; port = serial->port[idx - serial->minor]; if (!port) goto error_no_port; if (!try_module_get(serial->type->driver.owner)) goto error_module_get; retval = usb_autopm_get_interface(serial->interface); if (retval) goto error_get_interface; retval = tty_port_install(&port->port, driver, tty); if (retval) goto error_init_termios; mutex_unlock(&serial->disc_mutex); /* allow the driver to update the settings */ if (serial->type->init_termios) serial->type->init_termios(tty); tty->driver_data = port; return retval; error_init_termios: usb_autopm_put_interface(serial->interface); error_get_interface: module_put(serial->type->driver.owner); error_module_get: error_no_port: usb_serial_put(serial); mutex_unlock(&serial->disc_mutex); return retval; } static int serial_port_activate(struct tty_port *tport, struct tty_struct *tty) { struct usb_serial_port *port = container_of(tport, struct usb_serial_port, port); struct usb_serial *serial = port->serial; int retval; mutex_lock(&serial->disc_mutex); if (serial->disconnected) retval = -ENODEV; else retval = port->serial->type->open(tty, port); mutex_unlock(&serial->disc_mutex); if (retval < 0) retval = usb_translate_errors(retval); return retval; } static int serial_open(struct tty_struct *tty, struct file *filp) { struct usb_serial_port *port = tty->driver_data; dev_dbg(tty->dev, "%s\n", __func__); return tty_port_open(&port->port, tty, filp); } /** * serial_port_shutdown - shut down hardware * @tport: tty port to shut down * * Shut down a USB serial port. Serialized against activate by the * tport mutex and kept to matching open/close pairs * of calls by the ASYNCB_INITIALIZED flag. * * Not called if tty is console. */ static void serial_port_shutdown(struct tty_port *tport) { struct usb_serial_port *port = container_of(tport, struct usb_serial_port, port); struct usb_serial_driver *drv = port->serial->type; if (drv->close) drv->close(port); } static void serial_hangup(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; dev_dbg(tty->dev, "%s\n", __func__); tty_port_hangup(&port->port); } static void serial_close(struct tty_struct *tty, struct file *filp) { struct usb_serial_port *port = tty->driver_data; dev_dbg(tty->dev, "%s\n", __func__); tty_port_close(&port->port, tty, filp); } /** * serial_cleanup - free resources post close/hangup * @port: port to free up * * Do the resource freeing and refcount dropping for the port. * Avoid freeing the console. * * Called asynchronously after the last tty kref is dropped. */ static void serial_cleanup(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial; struct module *owner; dev_dbg(tty->dev, "%s\n", __func__); /* The console is magical. Do not hang up the console hardware * or there will be tears. */ if (port->port.console) return; tty->driver_data = NULL; serial = port->serial; owner = serial->type->driver.owner; mutex_lock(&serial->disc_mutex); if (!serial->disconnected) usb_autopm_put_interface(serial->interface); mutex_unlock(&serial->disc_mutex); usb_serial_put(serial); module_put(owner); } static int serial_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct usb_serial_port *port = tty->driver_data; int retval = -ENODEV; if (port->serial->dev->state == USB_STATE_NOTATTACHED) goto exit; dev_dbg(tty->dev, "%s - %d byte(s)\n", __func__, count); retval = port->serial->type->write(tty, port, buf, count); if (retval < 0) retval = usb_translate_errors(retval); exit: return retval; } static int serial_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; dev_dbg(tty->dev, "%s\n", __func__); return port->serial->type->write_room(tty); } static int serial_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial = port->serial; dev_dbg(tty->dev, "%s\n", __func__); if (serial->disconnected) return 0; return serial->type->chars_in_buffer(tty); } static void serial_wait_until_sent(struct tty_struct *tty, int timeout) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial = port->serial; dev_dbg(tty->dev, "%s\n", __func__); if (!port->serial->type->wait_until_sent) return; mutex_lock(&serial->disc_mutex); if (!serial->disconnected) port->serial->type->wait_until_sent(tty, timeout); mutex_unlock(&serial->disc_mutex); } static void serial_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; dev_dbg(tty->dev, "%s\n", __func__); if (port->serial->type->throttle) port->serial->type->throttle(tty); } static void serial_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; dev_dbg(tty->dev, "%s\n", __func__); if (port->serial->type->unthrottle) port->serial->type->unthrottle(tty); } static int serial_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; int retval = -ENOIOCTLCMD; dev_dbg(tty->dev, "%s - cmd 0x%.4x\n", __func__, cmd); switch (cmd) { case TIOCMIWAIT: if (port->serial->type->tiocmiwait) retval = port->serial->type->tiocmiwait(tty, arg); break; default: if (port->serial->type->ioctl) retval = port->serial->type->ioctl(tty, cmd, arg); } return retval; } static void serial_set_termios(struct tty_struct *tty, struct ktermios *old) { struct usb_serial_port *port = tty->driver_data; dev_dbg(tty->dev, "%s\n", __func__); if (port->serial->type->set_termios) port->serial->type->set_termios(tty, port, old); else tty_termios_copy_hw(&tty->termios, old); } static int serial_break(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; dev_dbg(tty->dev, "%s\n", __func__); if (port->serial->type->break_ctl) port->serial->type->break_ctl(tty, break_state); return 0; } static int serial_proc_show(struct seq_file *m, void *v) { struct usb_serial *serial; int i; char tmp[40]; seq_puts(m, "usbserinfo:1.0 driver:2.0\n"); for (i = 0; i < SERIAL_TTY_MINORS; ++i) { serial = usb_serial_get_by_index(i); if (serial == NULL) continue; seq_printf(m, "%d:", i); if (serial->type->driver.owner) seq_printf(m, " module:%s", module_name(serial->type->driver.owner)); seq_printf(m, " name:\"%s\"", serial->type->description); seq_printf(m, " vendor:%04x product:%04x", le16_to_cpu(serial->dev->descriptor.idVendor), le16_to_cpu(serial->dev->descriptor.idProduct)); seq_printf(m, " num_ports:%d", serial->num_ports); seq_printf(m, " port:%d", i - serial->minor + 1); usb_make_path(serial->dev, tmp, sizeof(tmp)); seq_printf(m, " path:%s", tmp); seq_putc(m, '\n'); usb_serial_put(serial); mutex_unlock(&serial->disc_mutex); } return 0; } static int serial_proc_open(struct inode *inode, struct file *file) { return single_open(file, serial_proc_show, NULL); } static const struct file_operations serial_proc_fops = { .owner = THIS_MODULE, .open = serial_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int serial_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; dev_dbg(tty->dev, "%s\n", __func__); if (port->serial->type->tiocmget) return port->serial->type->tiocmget(tty); return -EINVAL; } static int serial_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; dev_dbg(tty->dev, "%s\n", __func__); if (port->serial->type->tiocmset) return port->serial->type->tiocmset(tty, set, clear); return -EINVAL; } static int serial_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount) { struct usb_serial_port *port = tty->driver_data; dev_dbg(tty->dev, "%s\n", __func__); if (port->serial->type->get_icount) return port->serial->type->get_icount(tty, icount); return -EINVAL; } /* * We would be calling tty_wakeup here, but unfortunately some line * disciplines have an annoying habit of calling tty->write from * the write wakeup callback (e.g. n_hdlc.c). */ void usb_serial_port_softint(struct usb_serial_port *port) { schedule_work(&port->work); } EXPORT_SYMBOL_GPL(usb_serial_port_softint); static void usb_serial_port_work(struct work_struct *work) { struct usb_serial_port *port = container_of(work, struct usb_serial_port, work); tty_port_tty_wakeup(&port->port); } static void usb_serial_port_poison_urbs(struct usb_serial_port *port) { int i; for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) usb_poison_urb(port->read_urbs[i]); for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) usb_poison_urb(port->write_urbs[i]); usb_poison_urb(port->interrupt_in_urb); usb_poison_urb(port->interrupt_out_urb); } static void usb_serial_port_unpoison_urbs(struct usb_serial_port *port) { int i; for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) usb_unpoison_urb(port->read_urbs[i]); for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) usb_unpoison_urb(port->write_urbs[i]); usb_unpoison_urb(port->interrupt_in_urb); usb_unpoison_urb(port->interrupt_out_urb); } static void usb_serial_port_release(struct device *dev) { struct usb_serial_port *port = to_usb_serial_port(dev); int i; dev_dbg(dev, "%s\n", __func__); usb_free_urb(port->interrupt_in_urb); usb_free_urb(port->interrupt_out_urb); for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) { usb_free_urb(port->read_urbs[i]); kfree(port->bulk_in_buffers[i]); } for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) { usb_free_urb(port->write_urbs[i]); kfree(port->bulk_out_buffers[i]); } kfifo_free(&port->write_fifo); kfree(port->interrupt_in_buffer); kfree(port->interrupt_out_buffer); tty_port_destroy(&port->port); kfree(port); } static struct usb_serial *create_serial(struct usb_device *dev, struct usb_interface *interface, struct usb_serial_driver *driver) { struct usb_serial *serial; serial = kzalloc(sizeof(*serial), GFP_KERNEL); if (!serial) return NULL; serial->dev = usb_get_dev(dev); serial->type = driver; serial->interface = usb_get_intf(interface); kref_init(&serial->kref); mutex_init(&serial->disc_mutex); serial->minor = SERIAL_TTY_NO_MINOR; return serial; } static const struct usb_device_id *match_dynamic_id(struct usb_interface *intf, struct usb_serial_driver *drv) { struct usb_dynid *dynid; spin_lock(&drv->dynids.lock); list_for_each_entry(dynid, &drv->dynids.list, node) { if (usb_match_one_id(intf, &dynid->id)) { spin_unlock(&drv->dynids.lock); return &dynid->id; } } spin_unlock(&drv->dynids.lock); return NULL; } static const struct usb_device_id *get_iface_id(struct usb_serial_driver *drv, struct usb_interface *intf) { const struct usb_device_id *id; id = usb_match_id(intf, drv->id_table); if (id) { dev_dbg(&intf->dev, "static descriptor matches\n"); goto exit; } id = match_dynamic_id(intf, drv); if (id) dev_dbg(&intf->dev, "dynamic descriptor matches\n"); exit: return id; } /* Caller must hold table_lock */ static struct usb_serial_driver *search_serial_device( struct usb_interface *iface) { const struct usb_device_id *id = NULL; struct usb_serial_driver *drv; struct usb_driver *driver = to_usb_driver(iface->dev.driver); /* Check if the usb id matches a known device */ list_for_each_entry(drv, &usb_serial_driver_list, driver_list) { if (drv->usb_driver == driver) id = get_iface_id(drv, iface); if (id) return drv; } return NULL; } static int serial_port_carrier_raised(struct tty_port *port) { struct usb_serial_port *p = container_of(port, struct usb_serial_port, port); struct usb_serial_driver *drv = p->serial->type; if (drv->carrier_raised) return drv->carrier_raised(p); /* No carrier control - don't block */ return 1; } static void serial_port_dtr_rts(struct tty_port *port, int on) { struct usb_serial_port *p = container_of(port, struct usb_serial_port, port); struct usb_serial *serial = p->serial; struct usb_serial_driver *drv = serial->type; if (!drv->dtr_rts) return; /* * Work-around bug in the tty-layer which can result in dtr_rts * being called after a disconnect (and tty_unregister_device * has returned). Remove once bug has been squashed. */ mutex_lock(&serial->disc_mutex); if (!serial->disconnected) drv->dtr_rts(p, on); mutex_unlock(&serial->disc_mutex); } static const struct tty_port_operations serial_port_ops = { .carrier_raised = serial_port_carrier_raised, .dtr_rts = serial_port_dtr_rts, .activate = serial_port_activate, .shutdown = serial_port_shutdown, }; static int usb_serial_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct device *ddev = &interface->dev; struct usb_device *dev = interface_to_usbdev(interface); struct usb_serial *serial = NULL; struct usb_serial_port *port; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; struct usb_endpoint_descriptor *interrupt_in_endpoint[MAX_NUM_PORTS]; struct usb_endpoint_descriptor *interrupt_out_endpoint[MAX_NUM_PORTS]; struct usb_endpoint_descriptor *bulk_in_endpoint[MAX_NUM_PORTS]; struct usb_endpoint_descriptor *bulk_out_endpoint[MAX_NUM_PORTS]; struct usb_serial_driver *type = NULL; int retval; unsigned int minor; int buffer_size; int i; int j; int num_interrupt_in = 0; int num_interrupt_out = 0; int num_bulk_in = 0; int num_bulk_out = 0; int num_ports = 0; int max_endpoints; mutex_lock(&table_lock); type = search_serial_device(interface); if (!type) { mutex_unlock(&table_lock); dev_dbg(ddev, "none matched\n"); return -ENODEV; } if (!try_module_get(type->driver.owner)) { mutex_unlock(&table_lock); dev_err(ddev, "module get failed, exiting\n"); return -EIO; } mutex_unlock(&table_lock); serial = create_serial(dev, interface, type); if (!serial) { module_put(type->driver.owner); return -ENOMEM; } /* if this device type has a probe function, call it */ if (type->probe) { const struct usb_device_id *id; id = get_iface_id(type, interface); retval = type->probe(serial, id); if (retval) { dev_dbg(ddev, "sub driver rejected device\n"); usb_serial_put(serial); module_put(type->driver.owner); return retval; } } /* descriptor matches, let's find the endpoints needed */ /* check out the endpoints */ iface_desc = interface->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (usb_endpoint_is_bulk_in(endpoint)) { /* we found a bulk in endpoint */ dev_dbg(ddev, "found bulk in on endpoint %d\n", i); if (num_bulk_in < MAX_NUM_PORTS) { bulk_in_endpoint[num_bulk_in] = endpoint; ++num_bulk_in; } } if (usb_endpoint_is_bulk_out(endpoint)) { /* we found a bulk out endpoint */ dev_dbg(ddev, "found bulk out on endpoint %d\n", i); if (num_bulk_out < MAX_NUM_PORTS) { bulk_out_endpoint[num_bulk_out] = endpoint; ++num_bulk_out; } } if (usb_endpoint_is_int_in(endpoint)) { /* we found a interrupt in endpoint */ dev_dbg(ddev, "found interrupt in on endpoint %d\n", i); if (num_interrupt_in < MAX_NUM_PORTS) { interrupt_in_endpoint[num_interrupt_in] = endpoint; ++num_interrupt_in; } } if (usb_endpoint_is_int_out(endpoint)) { /* we found an interrupt out endpoint */ dev_dbg(ddev, "found interrupt out on endpoint %d\n", i); if (num_interrupt_out < MAX_NUM_PORTS) { interrupt_out_endpoint[num_interrupt_out] = endpoint; ++num_interrupt_out; } } } #if defined(CONFIG_USB_SERIAL_PL2303) || defined(CONFIG_USB_SERIAL_PL2303_MODULE) /* BEGIN HORRIBLE HACK FOR PL2303 */ /* this is needed due to the looney way its endpoints are set up */ if (((le16_to_cpu(dev->descriptor.idVendor) == PL2303_VENDOR_ID) && (le16_to_cpu(dev->descriptor.idProduct) == PL2303_PRODUCT_ID)) || ((le16_to_cpu(dev->descriptor.idVendor) == ATEN_VENDOR_ID) && (le16_to_cpu(dev->descriptor.idProduct) == ATEN_PRODUCT_ID)) || ((le16_to_cpu(dev->descriptor.idVendor) == ALCOR_VENDOR_ID) && (le16_to_cpu(dev->descriptor.idProduct) == ALCOR_PRODUCT_ID)) || ((le16_to_cpu(dev->descriptor.idVendor) == SIEMENS_VENDOR_ID) && (le16_to_cpu(dev->descriptor.idProduct) == SIEMENS_PRODUCT_ID_EF81))) { if (interface != dev->actconfig->interface[0]) { /* check out the endpoints of the other interface*/ iface_desc = dev->actconfig->interface[0]->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (usb_endpoint_is_int_in(endpoint)) { /* we found a interrupt in endpoint */ dev_dbg(ddev, "found interrupt in for Prolific device on separate interface\n"); if (num_interrupt_in < MAX_NUM_PORTS) { interrupt_in_endpoint[num_interrupt_in] = endpoint; ++num_interrupt_in; } } } } /* Now make sure the PL-2303 is configured correctly. * If not, give up now and hope this hack will work * properly during a later invocation of usb_serial_probe */ if (num_bulk_in == 0 || num_bulk_out == 0) { dev_info(ddev, "PL-2303 hack: descriptors matched but endpoints did not\n"); usb_serial_put(serial); module_put(type->driver.owner); return -ENODEV; } } /* END HORRIBLE HACK FOR PL2303 */ #endif #ifdef CONFIG_USB_SERIAL_GENERIC if (type == &usb_serial_generic_device) { num_ports = num_bulk_out; if (num_ports == 0) { dev_err(ddev, "Generic device with no bulk out, not allowed.\n"); usb_serial_put(serial); module_put(type->driver.owner); return -EIO; } dev_info(ddev, "The \"generic\" usb-serial driver is only for testing and one-off prototypes.\n"); dev_info(ddev, "Tell linux-usb@vger.kernel.org to add your device to a proper driver.\n"); } #endif if (!num_ports) { /* if this device type has a calc_num_ports function, call it */ if (type->calc_num_ports) num_ports = type->calc_num_ports(serial); if (!num_ports) num_ports = type->num_ports; } if (num_ports > MAX_NUM_PORTS) { dev_warn(ddev, "too many ports requested: %d\n", num_ports); num_ports = MAX_NUM_PORTS; } serial->num_ports = num_ports; serial->num_bulk_in = num_bulk_in; serial->num_bulk_out = num_bulk_out; serial->num_interrupt_in = num_interrupt_in; serial->num_interrupt_out = num_interrupt_out; /* found all that we need */ dev_info(ddev, "%s converter detected\n", type->description); /* create our ports, we need as many as the max endpoints */ /* we don't use num_ports here because some devices have more endpoint pairs than ports */ max_endpoints = max(num_bulk_in, num_bulk_out); max_endpoints = max(max_endpoints, num_interrupt_in); max_endpoints = max(max_endpoints, num_interrupt_out); max_endpoints = max(max_endpoints, (int)serial->num_ports); serial->num_port_pointers = max_endpoints; dev_dbg(ddev, "setting up %d port structures for this device", max_endpoints); for (i = 0; i < max_endpoints; ++i) { port = kzalloc(sizeof(struct usb_serial_port), GFP_KERNEL); if (!port) goto probe_error; tty_port_init(&port->port); port->port.ops = &serial_port_ops; port->serial = serial; spin_lock_init(&port->lock); /* Keep this for private driver use for the moment but should probably go away */ INIT_WORK(&port->work, usb_serial_port_work); serial->port[i] = port; port->dev.parent = &interface->dev; port->dev.driver = NULL; port->dev.bus = &usb_serial_bus_type; port->dev.release = &usb_serial_port_release; device_initialize(&port->dev); } /* set up the endpoint information */ for (i = 0; i < num_bulk_in; ++i) { endpoint = bulk_in_endpoint[i]; port = serial->port[i]; buffer_size = max_t(int, serial->type->bulk_in_size, usb_endpoint_maxp(endpoint)); port->bulk_in_size = buffer_size; port->bulk_in_endpointAddress = endpoint->bEndpointAddress; for (j = 0; j < ARRAY_SIZE(port->read_urbs); ++j) { set_bit(j, &port->read_urbs_free); port->read_urbs[j] = usb_alloc_urb(0, GFP_KERNEL); if (!port->read_urbs[j]) goto probe_error; port->bulk_in_buffers[j] = kmalloc(buffer_size, GFP_KERNEL); if (!port->bulk_in_buffers[j]) goto probe_error; usb_fill_bulk_urb(port->read_urbs[j], dev, usb_rcvbulkpipe(dev, endpoint->bEndpointAddress), port->bulk_in_buffers[j], buffer_size, serial->type->read_bulk_callback, port); } port->read_urb = port->read_urbs[0]; port->bulk_in_buffer = port->bulk_in_buffers[0]; } for (i = 0; i < num_bulk_out; ++i) { endpoint = bulk_out_endpoint[i]; port = serial->port[i]; if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL)) goto probe_error; buffer_size = serial->type->bulk_out_size; if (!buffer_size) buffer_size = usb_endpoint_maxp(endpoint); port->bulk_out_size = buffer_size; port->bulk_out_endpointAddress = endpoint->bEndpointAddress; for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j) { set_bit(j, &port->write_urbs_free); port->write_urbs[j] = usb_alloc_urb(0, GFP_KERNEL); if (!port->write_urbs[j]) goto probe_error; port->bulk_out_buffers[j] = kmalloc(buffer_size, GFP_KERNEL); if (!port->bulk_out_buffers[j]) goto probe_error; usb_fill_bulk_urb(port->write_urbs[j], dev, usb_sndbulkpipe(dev, endpoint->bEndpointAddress), port->bulk_out_buffers[j], buffer_size, serial->type->write_bulk_callback, port); } port->write_urb = port->write_urbs[0]; port->bulk_out_buffer = port->bulk_out_buffers[0]; } if (serial->type->read_int_callback) { for (i = 0; i < num_interrupt_in; ++i) { endpoint = interrupt_in_endpoint[i]; port = serial->port[i]; port->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL); if (!port->interrupt_in_urb) goto probe_error; buffer_size = usb_endpoint_maxp(endpoint); port->interrupt_in_endpointAddress = endpoint->bEndpointAddress; port->interrupt_in_buffer = kmalloc(buffer_size, GFP_KERNEL); if (!port->interrupt_in_buffer) goto probe_error; usb_fill_int_urb(port->interrupt_in_urb, dev, usb_rcvintpipe(dev, endpoint->bEndpointAddress), port->interrupt_in_buffer, buffer_size, serial->type->read_int_callback, port, endpoint->bInterval); } } else if (num_interrupt_in) { dev_dbg(ddev, "The device claims to support interrupt in transfers, but read_int_callback is not defined\n"); } if (serial->type->write_int_callback) { for (i = 0; i < num_interrupt_out; ++i) { endpoint = interrupt_out_endpoint[i]; port = serial->port[i]; port->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL); if (!port->interrupt_out_urb) goto probe_error; buffer_size = usb_endpoint_maxp(endpoint); port->interrupt_out_size = buffer_size; port->interrupt_out_endpointAddress = endpoint->bEndpointAddress; port->interrupt_out_buffer = kmalloc(buffer_size, GFP_KERNEL); if (!port->interrupt_out_buffer) goto probe_error; usb_fill_int_urb(port->interrupt_out_urb, dev, usb_sndintpipe(dev, endpoint->bEndpointAddress), port->interrupt_out_buffer, buffer_size, serial->type->write_int_callback, port, endpoint->bInterval); } } else if (num_interrupt_out) { dev_dbg(ddev, "The device claims to support interrupt out transfers, but write_int_callback is not defined\n"); } usb_set_intfdata(interface, serial); /* if this device type has an attach function, call it */ if (type->attach) { retval = type->attach(serial); if (retval < 0) goto probe_error; serial->attached = 1; if (retval > 0) { /* quietly accept this device, but don't bind to a serial port as it's about to disappear */ serial->num_ports = 0; goto exit; } } else { serial->attached = 1; } /* Avoid race with tty_open and serial_install by setting the * disconnected flag and not clearing it until all ports have been * registered. */ serial->disconnected = 1; if (get_free_serial(serial, num_ports, &minor) == NULL) { dev_err(ddev, "No more free serial devices\n"); goto probe_error; } serial->minor = minor; /* register all of the individual ports with the driver core */ for (i = 0; i < num_ports; ++i) { port = serial->port[i]; dev_set_name(&port->dev, "ttyUSB%d", port->number); dev_dbg(ddev, "registering %s", dev_name(&port->dev)); device_enable_async_suspend(&port->dev); retval = device_add(&port->dev); if (retval) dev_err(ddev, "Error registering port device, continuing\n"); } serial->disconnected = 0; usb_serial_console_init(minor); exit: module_put(type->driver.owner); return 0; probe_error: usb_serial_put(serial); module_put(type->driver.owner); return -EIO; } static void usb_serial_disconnect(struct usb_interface *interface) { int i; struct usb_serial *serial = usb_get_intfdata(interface); struct device *dev = &interface->dev; struct usb_serial_port *port; usb_serial_console_disconnect(serial); mutex_lock(&serial->disc_mutex); /* must set a flag, to signal subdrivers */ serial->disconnected = 1; mutex_unlock(&serial->disc_mutex); for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; if (port) { struct tty_struct *tty = tty_port_tty_get(&port->port); if (tty) { tty_vhangup(tty); tty_kref_put(tty); } usb_serial_port_poison_urbs(port); wake_up_interruptible(&port->port.delta_msr_wait); cancel_work_sync(&port->work); if (device_is_registered(&port->dev)) device_del(&port->dev); } } if (serial->type->disconnect) serial->type->disconnect(serial); /* let the last holder of this object cause it to be cleaned up */ usb_serial_put(serial); dev_info(dev, "device disconnected\n"); } int usb_serial_suspend(struct usb_interface *intf, pm_message_t message) { struct usb_serial *serial = usb_get_intfdata(intf); struct usb_serial_port *port; int i, r = 0; serial->suspending = 1; /* * serial->type->suspend() MUST return 0 in system sleep context, * otherwise, the resume callback has to recover device from * previous suspend failure. */ if (serial->type->suspend) { r = serial->type->suspend(serial, message); if (r < 0) { serial->suspending = 0; goto err_out; } } for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; if (port) usb_serial_port_poison_urbs(port); } err_out: return r; } EXPORT_SYMBOL(usb_serial_suspend); static void usb_serial_unpoison_port_urbs(struct usb_serial *serial) { struct usb_serial_port *port; int i; for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; if (port) usb_serial_port_unpoison_urbs(port); } } int usb_serial_resume(struct usb_interface *intf) { struct usb_serial *serial = usb_get_intfdata(intf); int rv; usb_serial_unpoison_port_urbs(serial); serial->suspending = 0; if (serial->type->resume) rv = serial->type->resume(serial); else rv = usb_serial_generic_resume(serial); return rv; } EXPORT_SYMBOL(usb_serial_resume); static int usb_serial_reset_resume(struct usb_interface *intf) { struct usb_serial *serial = usb_get_intfdata(intf); int rv; usb_serial_unpoison_port_urbs(serial); serial->suspending = 0; if (serial->type->reset_resume) rv = serial->type->reset_resume(serial); else { rv = -EOPNOTSUPP; intf->needs_binding = 1; } return rv; } static const struct tty_operations serial_ops = { .open = serial_open, .close = serial_close, .write = serial_write, .hangup = serial_hangup, .write_room = serial_write_room, .ioctl = serial_ioctl, .set_termios = serial_set_termios, .throttle = serial_throttle, .unthrottle = serial_unthrottle, .break_ctl = serial_break, .chars_in_buffer = serial_chars_in_buffer, .wait_until_sent = serial_wait_until_sent, .tiocmget = serial_tiocmget, .tiocmset = serial_tiocmset, .get_icount = serial_get_icount, .cleanup = serial_cleanup, .install = serial_install, .proc_fops = &serial_proc_fops, }; struct tty_driver *usb_serial_tty_driver; /* Driver structure we register with the USB core */ static struct usb_driver usb_serial_driver = { .name = "usbserial", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, .suspend = usb_serial_suspend, .resume = usb_serial_resume, .no_dynamic_id = 1, .supports_autosuspend = 1, }; static int __init usb_serial_init(void) { int i; int result; usb_serial_tty_driver = alloc_tty_driver(SERIAL_TTY_MINORS); if (!usb_serial_tty_driver) return -ENOMEM; /* Initialize our global data */ for (i = 0; i < SERIAL_TTY_MINORS; ++i) serial_table[i] = NULL; result = bus_register(&usb_serial_bus_type); if (result) { pr_err("%s - registering bus driver failed\n", __func__); goto exit_bus; } usb_serial_tty_driver->driver_name = "usbserial"; usb_serial_tty_driver->name = "ttyUSB"; usb_serial_tty_driver->major = SERIAL_TTY_MAJOR; usb_serial_tty_driver->minor_start = 0; usb_serial_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; usb_serial_tty_driver->subtype = SERIAL_TYPE_NORMAL; usb_serial_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; usb_serial_tty_driver->init_termios = tty_std_termios; usb_serial_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; usb_serial_tty_driver->init_termios.c_ispeed = 9600; usb_serial_tty_driver->init_termios.c_ospeed = 9600; tty_set_operations(usb_serial_tty_driver, &serial_ops); result = tty_register_driver(usb_serial_tty_driver); if (result) { pr_err("%s - tty_register_driver failed\n", __func__); goto exit_reg_driver; } /* register the USB driver */ result = usb_register(&usb_serial_driver); if (result < 0) { pr_err("%s - usb_register failed\n", __func__); goto exit_tty; } /* register the generic driver, if we should */ result = usb_serial_generic_register(); if (result < 0) { pr_err("%s - registering generic driver failed\n", __func__); goto exit_generic; } return result; exit_generic: usb_deregister(&usb_serial_driver); exit_tty: tty_unregister_driver(usb_serial_tty_driver); exit_reg_driver: bus_unregister(&usb_serial_bus_type); exit_bus: pr_err("%s - returning with error %d\n", __func__, result); put_tty_driver(usb_serial_tty_driver); return result; } static void __exit usb_serial_exit(void) { usb_serial_console_exit(); usb_serial_generic_deregister(); usb_deregister(&usb_serial_driver); tty_unregister_driver(usb_serial_tty_driver); put_tty_driver(usb_serial_tty_driver); bus_unregister(&usb_serial_bus_type); } module_init(usb_serial_init); module_exit(usb_serial_exit); #define set_to_generic_if_null(type, function) \ do { \ if (!type->function) { \ type->function = usb_serial_generic_##function; \ pr_debug("%s: using generic " #function "\n", \ type->driver.name); \ } \ } while (0) static void usb_serial_operations_init(struct usb_serial_driver *device) { set_to_generic_if_null(device, open); set_to_generic_if_null(device, write); set_to_generic_if_null(device, close); set_to_generic_if_null(device, write_room); set_to_generic_if_null(device, chars_in_buffer); if (device->tx_empty) set_to_generic_if_null(device, wait_until_sent); set_to_generic_if_null(device, read_bulk_callback); set_to_generic_if_null(device, write_bulk_callback); set_to_generic_if_null(device, process_read_urb); set_to_generic_if_null(device, prepare_write_buffer); } static int usb_serial_register(struct usb_serial_driver *driver) { int retval; if (usb_disabled()) return -ENODEV; if (!driver->description) driver->description = driver->driver.name; if (!driver->usb_driver) { WARN(1, "Serial driver %s has no usb_driver\n", driver->description); return -EINVAL; } usb_serial_operations_init(driver); /* Add this device to our list of devices */ mutex_lock(&table_lock); list_add(&driver->driver_list, &usb_serial_driver_list); retval = usb_serial_bus_register(driver); if (retval) { pr_err("problem %d when registering driver %s\n", retval, driver->description); list_del(&driver->driver_list); } else pr_info("USB Serial support registered for %s\n", driver->description); mutex_unlock(&table_lock); return retval; } static void usb_serial_deregister(struct usb_serial_driver *device) { pr_info("USB Serial deregistering driver %s\n", device->description); mutex_lock(&table_lock); list_del(&device->driver_list); mutex_unlock(&table_lock); usb_serial_bus_deregister(device); } /** * usb_serial_register_drivers - register drivers for a usb-serial module * @serial_drivers: NULL-terminated array of pointers to drivers to be registered * @name: name of the usb_driver for this set of @serial_drivers * @id_table: list of all devices this @serial_drivers set binds to * * Registers all the drivers in the @serial_drivers array, and dynamically * creates a struct usb_driver with the name @name and id_table of @id_table. */ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[], const char *name, const struct usb_device_id *id_table) { int rc; struct usb_driver *udriver; struct usb_serial_driver * const *sd; /* * udriver must be registered before any of the serial drivers, * because the store_new_id() routine for the serial drivers (in * bus.c) probes udriver. * * Performance hack: We don't want udriver to be probed until * the serial drivers are registered, because the probe would * simply fail for lack of a matching serial driver. * So we leave udriver's id_table set to NULL until we are all set. * * Suspend/resume support is implemented in the usb-serial core, * so fill in the PM-related fields in udriver. */ udriver = kzalloc(sizeof(*udriver), GFP_KERNEL); if (!udriver) return -ENOMEM; udriver->name = name; udriver->no_dynamic_id = 1; udriver->supports_autosuspend = 1; udriver->suspend = usb_serial_suspend; udriver->resume = usb_serial_resume; udriver->probe = usb_serial_probe; udriver->disconnect = usb_serial_disconnect; /* we only set the reset_resume field if the serial_driver has one */ for (sd = serial_drivers; *sd; ++sd) { if ((*sd)->reset_resume) { udriver->reset_resume = usb_serial_reset_resume; break; } } rc = usb_register(udriver); if (rc) return rc; for (sd = serial_drivers; *sd; ++sd) { (*sd)->usb_driver = udriver; rc = usb_serial_register(*sd); if (rc) goto failed; } /* Now set udriver's id_table and look for matches */ udriver->id_table = id_table; rc = driver_attach(&udriver->drvwrap.driver); return 0; failed: while (sd-- > serial_drivers) usb_serial_deregister(*sd); usb_deregister(udriver); return rc; } EXPORT_SYMBOL_GPL(usb_serial_register_drivers); /** * usb_serial_deregister_drivers - deregister drivers for a usb-serial module * @serial_drivers: NULL-terminated array of pointers to drivers to be deregistered * * Deregisters all the drivers in the @serial_drivers array and deregisters and * frees the struct usb_driver that was created by the call to * usb_serial_register_drivers(). */ void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[]) { struct usb_driver *udriver = (*serial_drivers)->usb_driver; for (; *serial_drivers; ++serial_drivers) usb_serial_deregister(*serial_drivers); usb_deregister(udriver); kfree(udriver); } EXPORT_SYMBOL_GPL(usb_serial_deregister_drivers); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
necioerrante/kernel
fs/ecryptfs/file.c
933
10940
/** * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 1997-2004 Erez Zadok * Copyright (C) 2001-2004 Stony Brook University * Copyright (C) 2004-2007 International Business Machines Corp. * Author(s): Michael A. Halcrow <mhalcrow@us.ibm.com> * Michael C. Thompson <mcthomps@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <linux/file.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/security.h> #include <linux/compat.h> #include <linux/fs_stack.h> #include <linux/aio.h> #include "ecryptfs_kernel.h" /** * ecryptfs_read_update_atime * * generic_file_read updates the atime of upper layer inode. But, it * doesn't give us a chance to update the atime of the lower layer * inode. This function is a wrapper to generic_file_read. It * updates the atime of the lower level inode if generic_file_read * returns without any errors. This is to be used only for file reads. * The function to be used for directory reads is ecryptfs_read. */ static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { ssize_t rc; struct path lower; struct file *file = iocb->ki_filp; rc = generic_file_aio_read(iocb, iov, nr_segs, pos); /* * Even though this is a async interface, we need to wait * for IO to finish to update atime */ if (-EIOCBQUEUED == rc) rc = wait_on_sync_kiocb(iocb); if (rc >= 0) { lower.dentry = ecryptfs_dentry_to_lower(file->f_path.dentry); lower.mnt = ecryptfs_dentry_to_lower_mnt(file->f_path.dentry); touch_atime(&lower); } return rc; } struct ecryptfs_getdents_callback { void *dirent; struct dentry *dentry; filldir_t filldir; int filldir_called; int entries_written; }; /* Inspired by generic filldir in fs/readdir.c */ static int ecryptfs_filldir(void *dirent, const char *lower_name, int lower_namelen, loff_t offset, u64 ino, unsigned int d_type) { struct ecryptfs_getdents_callback *buf = (struct ecryptfs_getdents_callback *)dirent; size_t name_size; char *name; int rc; buf->filldir_called++; rc = ecryptfs_decode_and_decrypt_filename(&name, &name_size, buf->dentry, lower_name, lower_namelen); if (rc) { printk(KERN_ERR "%s: Error attempting to decode and decrypt " "filename [%s]; rc = [%d]\n", __func__, lower_name, rc); goto out; } rc = buf->filldir(buf->dirent, name, name_size, offset, ino, d_type); kfree(name); if (rc >= 0) buf->entries_written++; out: return rc; } /** * ecryptfs_readdir * @file: The eCryptfs directory file * @dirent: Directory entry handle * @filldir: The filldir callback function */ static int ecryptfs_readdir(struct file *file, void *dirent, filldir_t filldir) { int rc; struct file *lower_file; struct inode *inode; struct ecryptfs_getdents_callback buf; lower_file = ecryptfs_file_to_lower(file); lower_file->f_pos = file->f_pos; inode = file_inode(file); memset(&buf, 0, sizeof(buf)); buf.dirent = dirent; buf.dentry = file->f_path.dentry; buf.filldir = filldir; buf.filldir_called = 0; buf.entries_written = 0; rc = vfs_readdir(lower_file, ecryptfs_filldir, (void *)&buf); file->f_pos = lower_file->f_pos; if (rc < 0) goto out; if (buf.filldir_called && !buf.entries_written) goto out; if (rc >= 0) fsstack_copy_attr_atime(inode, file_inode(lower_file)); out: return rc; } struct kmem_cache *ecryptfs_file_info_cache; static int read_or_initialize_metadata(struct dentry *dentry) { struct inode *inode = dentry->d_inode; struct ecryptfs_mount_crypt_stat *mount_crypt_stat; struct ecryptfs_crypt_stat *crypt_stat; int rc; crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat; mount_crypt_stat = &ecryptfs_superblock_to_private( inode->i_sb)->mount_crypt_stat; mutex_lock(&crypt_stat->cs_mutex); if (crypt_stat->flags & ECRYPTFS_POLICY_APPLIED && crypt_stat->flags & ECRYPTFS_KEY_VALID) { rc = 0; goto out; } rc = ecryptfs_read_metadata(dentry); if (!rc) goto out; if (mount_crypt_stat->flags & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED) { crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED | ECRYPTFS_ENCRYPTED); rc = 0; goto out; } if (!(mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED) && !i_size_read(ecryptfs_inode_to_lower(inode))) { rc = ecryptfs_initialize_file(dentry, inode); if (!rc) goto out; } rc = -EIO; out: mutex_unlock(&crypt_stat->cs_mutex); return rc; } /** * ecryptfs_open * @inode: inode speciying file to open * @file: Structure to return filled in * * Opens the file specified by inode. * * Returns zero on success; non-zero otherwise */ static int ecryptfs_open(struct inode *inode, struct file *file) { int rc = 0; struct ecryptfs_crypt_stat *crypt_stat = NULL; struct ecryptfs_mount_crypt_stat *mount_crypt_stat; struct dentry *ecryptfs_dentry = file->f_path.dentry; /* Private value of ecryptfs_dentry allocated in * ecryptfs_lookup() */ struct ecryptfs_file_info *file_info; mount_crypt_stat = &ecryptfs_superblock_to_private( ecryptfs_dentry->d_sb)->mount_crypt_stat; if ((mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) && ((file->f_flags & O_WRONLY) || (file->f_flags & O_RDWR) || (file->f_flags & O_CREAT) || (file->f_flags & O_TRUNC) || (file->f_flags & O_APPEND))) { printk(KERN_WARNING "Mount has encrypted view enabled; " "files may only be read\n"); rc = -EPERM; goto out; } /* Released in ecryptfs_release or end of function if failure */ file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL); ecryptfs_set_file_private(file, file_info); if (!file_info) { ecryptfs_printk(KERN_ERR, "Error attempting to allocate memory\n"); rc = -ENOMEM; goto out; } crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat; mutex_lock(&crypt_stat->cs_mutex); if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)) { ecryptfs_printk(KERN_DEBUG, "Setting flags for stat...\n"); /* Policy code enabled in future release */ crypt_stat->flags |= (ECRYPTFS_POLICY_APPLIED | ECRYPTFS_ENCRYPTED); } mutex_unlock(&crypt_stat->cs_mutex); rc = ecryptfs_get_lower_file(ecryptfs_dentry, inode); if (rc) { printk(KERN_ERR "%s: Error attempting to initialize " "the lower file for the dentry with name " "[%s]; rc = [%d]\n", __func__, ecryptfs_dentry->d_name.name, rc); goto out_free; } if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_ACCMODE) == O_RDONLY && (file->f_flags & O_ACCMODE) != O_RDONLY) { rc = -EPERM; printk(KERN_WARNING "%s: Lower file is RO; eCryptfs " "file must hence be opened RO\n", __func__); goto out_put; } ecryptfs_set_file_lower( file, ecryptfs_inode_to_private(inode)->lower_file); if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) { ecryptfs_printk(KERN_DEBUG, "This is a directory\n"); mutex_lock(&crypt_stat->cs_mutex); crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); mutex_unlock(&crypt_stat->cs_mutex); rc = 0; goto out; } rc = read_or_initialize_metadata(ecryptfs_dentry); if (rc) goto out_put; ecryptfs_printk(KERN_DEBUG, "inode w/ addr = [0x%p], i_ino = " "[0x%.16lx] size: [0x%.16llx]\n", inode, inode->i_ino, (unsigned long long)i_size_read(inode)); goto out; out_put: ecryptfs_put_lower_file(inode); out_free: kmem_cache_free(ecryptfs_file_info_cache, ecryptfs_file_to_private(file)); out: return rc; } static int ecryptfs_flush(struct file *file, fl_owner_t td) { struct file *lower_file = ecryptfs_file_to_lower(file); if (lower_file->f_op && lower_file->f_op->flush) { filemap_write_and_wait(file->f_mapping); return lower_file->f_op->flush(lower_file, td); } return 0; } static int ecryptfs_release(struct inode *inode, struct file *file) { ecryptfs_put_lower_file(inode); kmem_cache_free(ecryptfs_file_info_cache, ecryptfs_file_to_private(file)); return 0; } static int ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { int rc; rc = filemap_write_and_wait(file->f_mapping); if (rc) return rc; return vfs_fsync(ecryptfs_file_to_lower(file), datasync); } static int ecryptfs_fasync(int fd, struct file *file, int flag) { int rc = 0; struct file *lower_file = NULL; lower_file = ecryptfs_file_to_lower(file); if (lower_file->f_op && lower_file->f_op->fasync) rc = lower_file->f_op->fasync(fd, lower_file, flag); return rc; } static long ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct file *lower_file = NULL; long rc = -ENOTTY; if (ecryptfs_file_to_private(file)) lower_file = ecryptfs_file_to_lower(file); if (lower_file && lower_file->f_op && lower_file->f_op->unlocked_ioctl) rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg); return rc; } #ifdef CONFIG_COMPAT static long ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct file *lower_file = NULL; long rc = -ENOIOCTLCMD; if (ecryptfs_file_to_private(file)) lower_file = ecryptfs_file_to_lower(file); if (lower_file && lower_file->f_op && lower_file->f_op->compat_ioctl) rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg); return rc; } #endif const struct file_operations ecryptfs_dir_fops = { .readdir = ecryptfs_readdir, .read = generic_read_dir, .unlocked_ioctl = ecryptfs_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ecryptfs_compat_ioctl, #endif .open = ecryptfs_open, .flush = ecryptfs_flush, .release = ecryptfs_release, .fsync = ecryptfs_fsync, .fasync = ecryptfs_fasync, .splice_read = generic_file_splice_read, .llseek = default_llseek, }; const struct file_operations ecryptfs_main_fops = { .llseek = generic_file_llseek, .read = do_sync_read, .aio_read = ecryptfs_read_update_atime, .write = do_sync_write, .aio_write = generic_file_aio_write, .readdir = ecryptfs_readdir, .unlocked_ioctl = ecryptfs_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ecryptfs_compat_ioctl, #endif .mmap = generic_file_mmap, .open = ecryptfs_open, .flush = ecryptfs_flush, .release = ecryptfs_release, .fsync = ecryptfs_fsync, .fasync = ecryptfs_fasync, .splice_read = generic_file_splice_read, };
gpl-2.0
johnhubbard/pnotify-linux-3.12.20
drivers/mtd/maps/physmap.c
933
6936
/* * Normal mappings of chips in physical memory * * Copyright (C) 2003 MontaVista Software Inc. * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net * * 031022 - [jsun] add run-time configure and partition setup */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/mtd/concat.h> #include <linux/io.h> #define MAX_RESOURCES 4 struct physmap_flash_info { struct mtd_info *mtd[MAX_RESOURCES]; struct mtd_info *cmtd; struct map_info map[MAX_RESOURCES]; spinlock_t vpp_lock; int vpp_refcnt; }; static int physmap_flash_remove(struct platform_device *dev) { struct physmap_flash_info *info; struct physmap_flash_data *physmap_data; int i; info = platform_get_drvdata(dev); if (info == NULL) return 0; physmap_data = dev_get_platdata(&dev->dev); if (info->cmtd) { mtd_device_unregister(info->cmtd); if (info->cmtd != info->mtd[0]) mtd_concat_destroy(info->cmtd); } for (i = 0; i < MAX_RESOURCES; i++) { if (info->mtd[i] != NULL) map_destroy(info->mtd[i]); } if (physmap_data->exit) physmap_data->exit(dev); return 0; } static void physmap_set_vpp(struct map_info *map, int state) { struct platform_device *pdev; struct physmap_flash_data *physmap_data; struct physmap_flash_info *info; unsigned long flags; pdev = (struct platform_device *)map->map_priv_1; physmap_data = dev_get_platdata(&pdev->dev); if (!physmap_data->set_vpp) return; info = platform_get_drvdata(pdev); spin_lock_irqsave(&info->vpp_lock, flags); if (state) { if (++info->vpp_refcnt == 1) /* first nested 'on' */ physmap_data->set_vpp(pdev, 1); } else { if (--info->vpp_refcnt == 0) /* last nested 'off' */ physmap_data->set_vpp(pdev, 0); } spin_unlock_irqrestore(&info->vpp_lock, flags); } static const char * const rom_probe_types[] = { "cfi_probe", "jedec_probe", "qinfo_probe", "map_rom", NULL }; static const char * const part_probe_types[] = { "cmdlinepart", "RedBoot", "afs", NULL }; static int physmap_flash_probe(struct platform_device *dev) { struct physmap_flash_data *physmap_data; struct physmap_flash_info *info; const char * const *probe_type; const char * const *part_types; int err = 0; int i; int devices_found = 0; physmap_data = dev_get_platdata(&dev->dev); if (physmap_data == NULL) return -ENODEV; info = devm_kzalloc(&dev->dev, sizeof(struct physmap_flash_info), GFP_KERNEL); if (info == NULL) { err = -ENOMEM; goto err_out; } if (physmap_data->init) { err = physmap_data->init(dev); if (err) goto err_out; } platform_set_drvdata(dev, info); for (i = 0; i < dev->num_resources; i++) { printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n", (unsigned long long)resource_size(&dev->resource[i]), (unsigned long long)dev->resource[i].start); if (!devm_request_mem_region(&dev->dev, dev->resource[i].start, resource_size(&dev->resource[i]), dev_name(&dev->dev))) { dev_err(&dev->dev, "Could not reserve memory region\n"); err = -ENOMEM; goto err_out; } info->map[i].name = dev_name(&dev->dev); info->map[i].phys = dev->resource[i].start; info->map[i].size = resource_size(&dev->resource[i]); info->map[i].bankwidth = physmap_data->width; info->map[i].set_vpp = physmap_set_vpp; info->map[i].pfow_base = physmap_data->pfow_base; info->map[i].map_priv_1 = (unsigned long)dev; info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys, info->map[i].size); if (info->map[i].virt == NULL) { dev_err(&dev->dev, "Failed to ioremap flash region\n"); err = -EIO; goto err_out; } simple_map_init(&info->map[i]); probe_type = rom_probe_types; if (physmap_data->probe_type == NULL) { for (; info->mtd[i] == NULL && *probe_type != NULL; probe_type++) info->mtd[i] = do_map_probe(*probe_type, &info->map[i]); } else info->mtd[i] = do_map_probe(physmap_data->probe_type, &info->map[i]); if (info->mtd[i] == NULL) { dev_err(&dev->dev, "map_probe failed\n"); err = -ENXIO; goto err_out; } else { devices_found++; } info->mtd[i]->owner = THIS_MODULE; info->mtd[i]->dev.parent = &dev->dev; } if (devices_found == 1) { info->cmtd = info->mtd[0]; } else if (devices_found > 1) { /* * We detected multiple devices. Concatenate them together. */ info->cmtd = mtd_concat_create(info->mtd, devices_found, dev_name(&dev->dev)); if (info->cmtd == NULL) err = -ENXIO; } if (err) goto err_out; spin_lock_init(&info->vpp_lock); part_types = physmap_data->part_probe_types ? : part_probe_types; mtd_device_parse_register(info->cmtd, part_types, NULL, physmap_data->parts, physmap_data->nr_parts); return 0; err_out: physmap_flash_remove(dev); return err; } #ifdef CONFIG_PM static void physmap_flash_shutdown(struct platform_device *dev) { struct physmap_flash_info *info = platform_get_drvdata(dev); int i; for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++) if (mtd_suspend(info->mtd[i]) == 0) mtd_resume(info->mtd[i]); } #else #define physmap_flash_shutdown NULL #endif static struct platform_driver physmap_flash_driver = { .probe = physmap_flash_probe, .remove = physmap_flash_remove, .shutdown = physmap_flash_shutdown, .driver = { .name = "physmap-flash", .owner = THIS_MODULE, }, }; #ifdef CONFIG_MTD_PHYSMAP_COMPAT static struct physmap_flash_data physmap_flash_data = { .width = CONFIG_MTD_PHYSMAP_BANKWIDTH, }; static struct resource physmap_flash_resource = { .start = CONFIG_MTD_PHYSMAP_START, .end = CONFIG_MTD_PHYSMAP_START + CONFIG_MTD_PHYSMAP_LEN - 1, .flags = IORESOURCE_MEM, }; static struct platform_device physmap_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &physmap_flash_data, }, .num_resources = 1, .resource = &physmap_flash_resource, }; #endif static int __init physmap_init(void) { int err; err = platform_driver_register(&physmap_flash_driver); #ifdef CONFIG_MTD_PHYSMAP_COMPAT if (err == 0) { err = platform_device_register(&physmap_flash); if (err) platform_driver_unregister(&physmap_flash_driver); } #endif return err; } static void __exit physmap_exit(void) { #ifdef CONFIG_MTD_PHYSMAP_COMPAT platform_device_unregister(&physmap_flash); #endif platform_driver_unregister(&physmap_flash_driver); } module_init(physmap_init); module_exit(physmap_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_DESCRIPTION("Generic configurable MTD map driver"); /* legacy platform drivers can't hotplug or coldplg */ #ifndef CONFIG_MTD_PHYSMAP_COMPAT /* work with hotplug and coldplug */ MODULE_ALIAS("platform:physmap-flash"); #endif
gpl-2.0
bryan2894/D851_Kernel
drivers/char/rdbg.c
1701
30244
/* * Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/types.h> #include <linux/cdev.h> #include <linux/gfp.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/completion.h> #include <linux/of_gpio.h> #include <linux/mutex.h> #include <mach/msm_smsm.h> #include <linux/uaccess.h> #include <asm/system.h> #define SMP2P_NUM_PROCS 8 #define SM_VERSION 1 #define SM_BLOCKSIZE 128 #define SMQ_MAGIC_INIT 0xFF00FF00 #define SMQ_MAGIC_PRODUCER (SMQ_MAGIC_INIT | 0x1) #define SMQ_MAGIC_CONSUMER (SMQ_MAGIC_INIT | 0x2) enum SMQ_STATUS { SMQ_SUCCESS = 0, SMQ_ENOMEMORY = -1, SMQ_EBADPARM = -2, SMQ_UNDERFLOW = -3, SMQ_OVERFLOW = -4 }; enum smq_type { PRODUCER = 1, CONSUMER = 2, INVALID = 3 }; struct smq_block_map { uint32_t index_read; uint32_t num_blocks; uint8_t *map; }; struct smq_node { uint16_t index_block; uint16_t num_blocks; } __attribute__ ((__packed__)); struct smq_hdr { uint8_t producer_version; uint8_t consumer_version; } __attribute__ ((__packed__)); struct smq_out_state { uint32_t init; uint32_t index_check_queue_for_reset; uint32_t index_sent_write; uint32_t index_free_read; } __attribute__ ((__packed__)); struct smq_out { struct smq_out_state s; struct smq_node sent[1]; }; struct smq_in_state { uint32_t init; uint32_t index_check_queue_for_reset_ack; uint32_t index_sent_read; uint32_t index_free_write; } __attribute__ ((__packed__)); struct smq_in { struct smq_in_state s; struct smq_node free[1]; }; struct smq { struct smq_hdr *hdr; struct smq_out *out; struct smq_in *in; uint8_t *blocks; uint32_t num_blocks; struct mutex *lock; uint32_t initialized; struct smq_block_map block_map; enum smq_type type; }; struct gpio_info { int gpio_base_id; int irq_base_id; }; struct rdbg_data { struct device *device; struct completion work; struct gpio_info in; struct gpio_info out; bool device_initialized; int gpio_out_offset; bool device_opened; void *smem_addr; size_t smem_size; struct smq producer_smrb; struct smq consumer_smrb; struct mutex write_mutex; }; struct rdbg_device { struct cdev cdev; struct class *class; dev_t dev_no; int num_devices; struct rdbg_data *rdbg_data; }; static struct rdbg_device g_rdbg_instance = { { {0} }, NULL, 0, SMP2P_NUM_PROCS, NULL }; struct processor_specific_info { char *name; unsigned int smem_buffer_addr; size_t smem_buffer_size; }; static struct processor_specific_info proc_info[SMP2P_NUM_PROCS] = { {0}, /*APPS*/ {"rdbg_modem", 0, 0}, /*MODEM*/ {"rdbg_adsp", SMEM_LC_DEBUGGER, 16*1024}, /*ADSP*/ {0}, /*SMP2P_RESERVED_PROC_1*/ {"rdbg_wcnss", 0, 0}, /*WCNSS*/ {0}, /*SMP2P_RESERVED_PROC_2*/ {0}, /*SMP2P_POWER_PROC*/ {0} /*SMP2P_REMOTE_MOCK_PROC*/ }; static int smq_blockmap_get(struct smq_block_map *block_map, uint32_t *block_index, uint32_t n) { uint32_t start; uint32_t mark = 0; uint32_t found = 0; uint32_t i = 0; start = block_map->index_read; if (n == 1) { do { if (!block_map->map[block_map->index_read]) { *block_index = block_map->index_read; block_map->map[block_map->index_read] = 1; block_map->index_read++; block_map->index_read %= block_map->num_blocks; return SMQ_SUCCESS; } block_map->index_read++; } while (start != (block_map->index_read %= block_map->num_blocks)); } else { mark = block_map->num_blocks; do { if (!block_map->map[block_map->index_read]) { if (mark > block_map->index_read) { mark = block_map->index_read; start = block_map->index_read; found = 0; } found++; if (found == n) { *block_index = mark; for (i = 0; i < n; i++) block_map->map[mark + i] = (uint8_t)(n - i); block_map->index_read += block_map->map [block_map->index_read] - 1; return SMQ_SUCCESS; } } else { found = 0; block_map->index_read += block_map->map [block_map->index_read] - 1; mark = block_map->num_blocks; } block_map->index_read++; } while (start != (block_map->index_read %= block_map->num_blocks)); } return SMQ_ENOMEMORY; } static void smq_blockmap_put(struct smq_block_map *block_map, uint32_t i) { uint32_t num_blocks = block_map->map[i]; while (num_blocks--) { block_map->map[i] = 0; i++; } } static int smq_blockmap_reset(struct smq_block_map *block_map) { if (!block_map->map) return SMQ_ENOMEMORY; memset(block_map->map, 0 , block_map->num_blocks + 1); block_map->index_read = 0; return SMQ_SUCCESS; } static int smq_blockmap_ctor(struct smq_block_map *block_map, uint32_t num_blocks) { if (num_blocks <= 1) return SMQ_ENOMEMORY; block_map->map = kcalloc(num_blocks, sizeof(uint8_t), GFP_KERNEL); if (!block_map->map) return SMQ_ENOMEMORY; block_map->num_blocks = num_blocks - 1; smq_blockmap_reset(block_map); return SMQ_SUCCESS; } static void smq_blockmap_dtor(struct smq_block_map *block_map) { kfree(block_map->map); block_map->map = NULL; } static int smq_free(struct smq *smq, void *data) { struct smq_node node; uint32_t index_block; int err = SMQ_SUCCESS; if (smq->lock) mutex_lock(smq->lock); if ((SM_VERSION != smq->hdr->producer_version) && (SMQ_MAGIC_PRODUCER != smq->out->s.init)) { err = SMQ_UNDERFLOW; goto bail; } index_block = ((uint8_t *)data - smq->blocks) / SM_BLOCKSIZE; if (index_block >= smq->num_blocks) { err = SMQ_EBADPARM; goto bail; } node.index_block = (uint16_t)index_block; node.num_blocks = 0; *((struct smq_node *)(smq->in->free + smq->in-> s.index_free_write)) = node; smq->in->s.index_free_write = (smq->in->s.index_free_write + 1) % smq->num_blocks; bail: if (smq->lock) mutex_unlock(smq->lock); return err; } static int smq_receive(struct smq *smq, void **pp, int *pnsize, int *pbmore) { struct smq_node *node; int err = SMQ_SUCCESS; int more = 0; if ((SM_VERSION != smq->hdr->producer_version) && (SMQ_MAGIC_PRODUCER != smq->out->s.init)) return SMQ_UNDERFLOW; if (smq->in->s.index_sent_read == smq->out->s.index_sent_write) { err = SMQ_UNDERFLOW; goto bail; } node = (struct smq_node *)(smq->out->sent + smq->in->s.index_sent_read); if (node->index_block >= smq->num_blocks) { err = SMQ_EBADPARM; goto bail; } smq->in->s.index_sent_read = (smq->in->s.index_sent_read + 1) % smq->num_blocks; *pp = smq->blocks + (node->index_block * SM_BLOCKSIZE); *pnsize = SM_BLOCKSIZE * node->num_blocks; rmb(); if (smq->in->s.index_sent_read != smq->out->s.index_sent_write) more = 1; bail: *pbmore = more; return err; } static int smq_alloc_send(struct smq *smq, const uint8_t *pcb, int nsize) { void *pv = 0; int num_blocks; uint32_t index_block = 0; int err = SMQ_SUCCESS; struct smq_node *node = NULL; mutex_lock(smq->lock); if ((SMQ_MAGIC_CONSUMER == smq->in->s.init) && (SM_VERSION == smq->hdr->consumer_version)) { if (smq->out->s.index_check_queue_for_reset == smq->in->s.index_check_queue_for_reset_ack) { while (smq->out->s.index_free_read != smq->in->s.index_free_write) { node = (struct smq_node *)( smq->in->free + smq->out->s.index_free_read); if (node->index_block >= smq->num_blocks) { err = SMQ_EBADPARM; goto bail; } smq->out->s.index_free_read = (smq->out->s.index_free_read + 1) % smq->num_blocks; smq_blockmap_put(&smq->block_map, node->index_block); rmb(); } } } num_blocks = ALIGN(nsize, SM_BLOCKSIZE)/SM_BLOCKSIZE; err = smq_blockmap_get(&smq->block_map, &index_block, num_blocks); if (SMQ_SUCCESS != err) goto bail; pv = smq->blocks + (SM_BLOCKSIZE * index_block); err = copy_from_user((void *)pv, (void *)pcb, nsize); if (0 != err) goto bail; ((struct smq_node *)(smq->out->sent + smq->out->s.index_sent_write))->index_block = (uint16_t)index_block; ((struct smq_node *)(smq->out->sent + smq->out->s.index_sent_write))->num_blocks = (uint16_t)num_blocks; smq->out->s.index_sent_write = (smq->out->s.index_sent_write + 1) % smq->num_blocks; bail: if (SMQ_SUCCESS != err) { if (pv) smq_blockmap_put(&smq->block_map, index_block); } mutex_unlock(smq->lock); return err; } static int smq_reset_producer_queue_internal(struct smq *smq, uint32_t reset_num) { int retval = 0; uint32_t i; if (PRODUCER != smq->type) goto bail; mutex_lock(smq->lock); if (smq->out->s.index_check_queue_for_reset != reset_num) { smq->out->s.index_check_queue_for_reset = reset_num; for (i = 0; i < smq->num_blocks; i++) (smq->out->sent + i)->index_block = 0xFFFF; smq_blockmap_reset(&smq->block_map); smq->out->s.index_sent_write = 0; smq->out->s.index_free_read = 0; retval = 1; } mutex_unlock(smq->lock); bail: return retval; } static int smq_check_queue_reset(struct smq *p_cons, struct smq *p_prod) { int retval = 0; uint32_t reset_num, i; if ((CONSUMER != p_cons->type) || (SMQ_MAGIC_PRODUCER != p_cons->out->s.init) || (SM_VERSION != p_cons->hdr->producer_version)) goto bail; reset_num = p_cons->out->s.index_check_queue_for_reset; if (p_cons->in->s.index_check_queue_for_reset_ack != reset_num) { p_cons->in->s.index_check_queue_for_reset_ack = reset_num; for (i = 0; i < p_cons->num_blocks; i++) (p_cons->in->free + i)->index_block = 0xFFFF; p_cons->in->s.index_sent_read = 0; p_cons->in->s.index_free_write = 0; retval = smq_reset_producer_queue_internal(p_prod, reset_num); } bail: return retval; } static int check_subsystem_debug_enabled(void *base_addr, int size) { int num_blocks; uint8_t *pb_orig; uint8_t *pb; struct smq smq; int err = 0; pb = pb_orig = (uint8_t *)base_addr; pb += sizeof(struct smq_hdr); pb = PTR_ALIGN(pb, 8); size -= pb - (uint8_t *)pb_orig; num_blocks = (int)((size - sizeof(struct smq_out_state) - sizeof(struct smq_in_state))/(SM_BLOCKSIZE + sizeof(struct smq_node) * 2)); if (0 >= num_blocks) { err = SMQ_EBADPARM; goto bail; } pb += num_blocks * SM_BLOCKSIZE; smq.out = (struct smq_out *)pb; pb += sizeof(struct smq_out_state) + (num_blocks * sizeof(struct smq_node)); smq.in = (struct smq_in *)pb; if (SMQ_MAGIC_CONSUMER != smq.in->s.init) { pr_err("%s, smq in consumer not initialized", __func__); err = -ECOMM; } bail: return err; } static void smq_dtor(struct smq *smq) { if (SMQ_MAGIC_INIT == smq->initialized) { switch (smq->type) { case PRODUCER: smq->out->s.init = 0; smq_blockmap_dtor(&smq->block_map); break; case CONSUMER: smq->in->s.init = 0; break; default: case INVALID: break; } smq->initialized = 0; } } /* * The shared memory is used as a circular ring buffer in each direction. * Thus we have a bi-directional shared memory channel between the AP * and a subsystem. We call this SMQ. Each memory channel contains a header, * data and a control mechanism that is used to synchronize read and write * of data between the AP and the remote subsystem. * * Overall SMQ memory view: * * +------------------------------------------------+ * | SMEM buffer | * |-----------------------+------------------------| * |Producer: LA | Producer: Remote | * |Consumer: Remote | subsystem | * | subsystem | Consumer: LA | * | | | * | Producer| Consumer| * +-----------------------+------------------------+ * | | * | | * | +--------------------------------------+ * | | * | | * v v * +--------------------------------------------------------------+ * | Header | Data | Control | * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+ * | | b | b | b | | S |n |n | | S |n |n | | * | Producer | l | l | l | | M |o |o | | M |o |o | | * | Ver | o | o | o | | Q |d |d | | Q |d |d | | * |-----------| c | c | c | ... | |e |e | ... | |e |e | ... | * | | k | k | k | | O | | | | I | | | | * | Consumer | | | | | u |0 |1 | | n |0 |1 | | * | Ver | 0 | 1 | 2 | | t | | | | | | | | * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+ * | | * + | * | * +------------------------+ * | * v * +----+----+----+----+ * | SMQ Nodes | * |----|----|----|----| * Node # | 0 | 1 | 2 | ...| * |----|----|----|----| * Starting Block Index # | 0 | 3 | 8 | ...| * |----|----|----|----| * # of blocks | 3 | 5 | 1 | ...| * +----+----+----+----+ * * Header: Contains version numbers for software compatibility to ensure * that both producers and consumers on the AP and subsystems know how to * read from and write to the queue. * Both the producer and consumer versions are 1. * +---------+-------------------+ * | Size | Field | * +---------+-------------------+ * | 1 byte | Producer Version | * +---------+-------------------+ * | 1 byte | Consumer Version | * +---------+-------------------+ * * Data: The data portion contains multiple blocks [0..N] of a fixed size. * The block size SM_BLOCKSIZE is fixed to 128 bytes for header version #1. * Payload sent from the debug agent app is split (if necessary) and placed * in these blocks. The first data block is placed at the next 8 byte aligned * address after the header. * * The number of blocks for a given SMEM allocation is derived as follows: * Number of Blocks = ((Total Size - Alignment - Size of Header * - Size of SMQIn - Size of SMQOut)/(SM_BLOCKSIZE)) * * The producer maintains a private block map of each of these blocks to * determine which of these blocks in the queue is available and which are free. * * Control: * The control portion contains a list of nodes [0..N] where N is number * of available data blocks. Each node identifies the data * block indexes that contain a particular debug message to be transfered, * and the number of blocks it took to hold the contents of the message. * * Each node has the following structure: * +---------+-------------------+ * | Size | Field | * +---------+-------------------+ * | 2 bytes |Staring Block Index| * +---------+-------------------+ * | 2 bytes |Number of Blocks | * +---------+-------------------+ * * The producer and the consumer update different parts of the control channel * (SMQOut / SMQIn) respectively. Each of these control data structures contains * information about the last node that was written / read, and the actual nodes * that were written/read. * * SMQOut Structure (R/W by producer, R by consumer): * +---------+-------------------+ * | Size | Field | * +---------+-------------------+ * | 4 bytes | Magic Init Number | * +---------+-------------------+ * | 4 bytes | Reset | * +---------+-------------------+ * | 4 bytes | Last Sent Index | * +---------+-------------------+ * | 4 bytes | Index Free Read | * +---------+-------------------+ * * SMQIn Structure (R/W by consumer, R by producer): * +---------+-------------------+ * | Size | Field | * +---------+-------------------+ * | 4 bytes | Magic Init Number | * +---------+-------------------+ * | 4 bytes | Reset ACK | * +---------+-------------------+ * | 4 bytes | Last Read Index | * +---------+-------------------+ * | 4 bytes | Index Free Write | * +---------+-------------------+ * * Magic Init Number: * Both SMQ Out and SMQ In initialize this field with a predefined magic * number so as to make sure that both the consumer and producer blocks * have fully initialized and have valid data in the shared memory control area. * Producer Magic #: 0xFF00FF01 * Consumer Magic #: 0xFF00FF02 */ static int smq_ctor(struct smq *smq, void *base_addr, int size, enum smq_type type, struct mutex *lock_ptr) { int num_blocks; uint8_t *pb_orig; uint8_t *pb; uint32_t i; int err; if (SMQ_MAGIC_INIT == smq->initialized) { err = SMQ_EBADPARM; goto bail; } if (!base_addr || !size) { err = SMQ_EBADPARM; goto bail; } if (type == PRODUCER) smq->lock = lock_ptr; pb_orig = (uint8_t *)base_addr; smq->hdr = (struct smq_hdr *)pb_orig; pb = pb_orig; pb += sizeof(struct smq_hdr); pb = PTR_ALIGN(pb, 8); size -= pb - (uint8_t *)pb_orig; num_blocks = (int)((size - sizeof(struct smq_out_state) - sizeof(struct smq_in_state))/(SM_BLOCKSIZE + sizeof(struct smq_node) * 2)); if (0 >= num_blocks) { err = SMQ_ENOMEMORY; goto bail; } smq->blocks = pb; smq->num_blocks = num_blocks; pb += num_blocks * SM_BLOCKSIZE; smq->out = (struct smq_out *)pb; pb += sizeof(struct smq_out_state) + (num_blocks * sizeof(struct smq_node)); smq->in = (struct smq_in *)pb; smq->type = type; if (PRODUCER == type) { smq->hdr->producer_version = SM_VERSION; for (i = 0; i < smq->num_blocks; i++) (smq->out->sent + i)->index_block = 0xFFFF; err = smq_blockmap_ctor(&smq->block_map, smq->num_blocks); if (SMQ_SUCCESS != err) goto bail; smq->out->s.index_sent_write = 0; smq->out->s.index_free_read = 0; if (smq->out->s.init == SMQ_MAGIC_PRODUCER) { smq->out->s.index_check_queue_for_reset += 1; } else { smq->out->s.index_check_queue_for_reset = 1; smq->out->s.init = SMQ_MAGIC_PRODUCER; } } else { smq->hdr->consumer_version = SM_VERSION; for (i = 0; i < smq->num_blocks; i++) (smq->in->free + i)->index_block = 0xFFFF; smq->in->s.index_sent_read = 0; smq->in->s.index_free_write = 0; if (smq->out->s.init == SMQ_MAGIC_PRODUCER) { smq->in->s.index_check_queue_for_reset_ack = smq->out->s.index_check_queue_for_reset; } else { smq->in->s.index_check_queue_for_reset_ack = 0; } smq->in->s.init = SMQ_MAGIC_CONSUMER; } smq->initialized = SMQ_MAGIC_INIT; err = SMQ_SUCCESS; bail: return err; } static void send_interrupt_to_subsystem(struct rdbg_data *rdbgdata) { int offset = rdbgdata->gpio_out_offset; int val = 1 ^ gpio_get_value(rdbgdata->out.gpio_base_id + offset); gpio_set_value(rdbgdata->out.gpio_base_id + offset, val); rdbgdata->gpio_out_offset = (offset + 1) % 32; dev_dbg(rdbgdata->device, "%s: sent interrupt %d to subsystem", __func__, val); } static irqreturn_t on_interrupt_from(int irq, void *ptr) { struct rdbg_data *rdbgdata = (struct rdbg_data *) ptr; dev_dbg(rdbgdata->device, "%s: Received interrupt %d from subsystem", __func__, irq); complete(&(rdbgdata->work)); return IRQ_HANDLED; } static int initialize_smq(struct rdbg_data *rdbgdata) { int err = 0; if (smq_ctor(&(rdbgdata->producer_smrb), (void *)(rdbgdata->smem_addr), ((rdbgdata->smem_size)/2), PRODUCER, &rdbgdata->write_mutex)) { dev_err(rdbgdata->device, "%s: smq producer allocation failed", __func__); err = -ENOMEM; goto bail; } if (smq_ctor(&(rdbgdata->consumer_smrb), (void *)((uint32_t) (rdbgdata->smem_addr) + ((rdbgdata->smem_size)/2)), ((rdbgdata->smem_size)/2), CONSUMER, NULL)) { dev_err(rdbgdata->device, "%s: smq conmsumer allocation failed", __func__); err = -ENOMEM; } bail: return err; } static int rdbg_open(struct inode *inode, struct file *filp) { int device_id = -1; struct rdbg_device *device = &g_rdbg_instance; struct rdbg_data *rdbgdata = NULL; int err = 0; if (!inode || !device->rdbg_data) { pr_err("Memory not allocated yet"); err = -ENODEV; goto bail; } device_id = MINOR(inode->i_rdev); rdbgdata = &device->rdbg_data[device_id]; if (rdbgdata->device_opened) { dev_err(rdbgdata->device, "%s: Device already opened", __func__); err = -EEXIST; goto bail; } rdbgdata->smem_size = proc_info[device_id].smem_buffer_size; if (!rdbgdata->smem_size) { dev_err(rdbgdata->device, "%s: smem not initialized", __func__); err = -ENOMEM; goto bail; } rdbgdata->smem_addr = smem_alloc(proc_info[device_id].smem_buffer_addr, rdbgdata->smem_size); if (!rdbgdata->smem_addr) { dev_err(rdbgdata->device, "%s: Could not allocate smem memory", __func__); err = -ENOMEM; goto bail; } dev_dbg(rdbgdata->device, "%s: SMEM address=0x%x smem_size=%d", __func__, (unsigned int)rdbgdata->smem_addr, rdbgdata->smem_size); if (check_subsystem_debug_enabled(rdbgdata->smem_addr, rdbgdata->smem_size/2)) { dev_err(rdbgdata->device, "%s: Subsystem %s is not debug enabled", __func__, proc_info[device_id].name); err = -ECOMM; goto bail; } init_completion(&rdbgdata->work); err = request_irq(rdbgdata->in.irq_base_id, on_interrupt_from, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, proc_info[device_id].name, (void *)&device->rdbg_data[device_id]); if (err) { dev_err(rdbgdata->device, "%s: Failed to register interrupt.Err=%d,irqid=%d.", __func__, err, rdbgdata->in.irq_base_id); goto irq_bail; } err = enable_irq_wake(rdbgdata->in.irq_base_id); if (err < 0) { dev_dbg(rdbgdata->device, "enable_irq_wake() failed with err=%d", err); err = 0; } mutex_init(&rdbgdata->write_mutex); err = initialize_smq(rdbgdata); if (err) { dev_err(rdbgdata->device, "Error initializing smq. Err=%d", err); goto smq_bail; } rdbgdata->device_opened = 1; filp->private_data = (void *)rdbgdata; return 0; smq_bail: smq_dtor(&(rdbgdata->producer_smrb)); smq_dtor(&(rdbgdata->consumer_smrb)); mutex_destroy(&rdbgdata->write_mutex); irq_bail: free_irq(rdbgdata->in.irq_base_id, (void *) &device->rdbg_data[device_id]); bail: return err; } static int rdbg_release(struct inode *inode, struct file *filp) { int device_id = -1; struct rdbg_device *rdbgdevice = &g_rdbg_instance; struct rdbg_data *rdbgdata = NULL; int err = 0; if (!inode || !rdbgdevice->rdbg_data) { pr_err("Memory not allocated yet"); err = -ENODEV; goto bail; } device_id = MINOR(inode->i_rdev); rdbgdata = &rdbgdevice->rdbg_data[device_id]; if (rdbgdata->device_opened == 1) { dev_dbg(rdbgdata->device, "%s: Destroying %s.", __func__, proc_info[device_id].name); rdbgdata->device_opened = 0; complete(&(rdbgdata->work)); free_irq(rdbgdata->in.irq_base_id, (void *) &rdbgdevice->rdbg_data[device_id]); if (rdbgdevice->rdbg_data[device_id].producer_smrb.initialized) smq_dtor(&(rdbgdevice->rdbg_data[device_id]. producer_smrb)); if (rdbgdevice->rdbg_data[device_id].consumer_smrb.initialized) smq_dtor(&(rdbgdevice->rdbg_data[device_id]. consumer_smrb)); mutex_destroy(&rdbgdata->write_mutex); } filp->private_data = NULL; bail: return err; } static ssize_t rdbg_read(struct file *filp, char __user *buf, size_t size, loff_t *offset) { int err = 0; struct rdbg_data *rdbgdata = filp->private_data; void *p_sent_buffer = NULL; int nsize = 0; int more = 0; if (!rdbgdata) { pr_err("Invalid argument"); err = -EINVAL; goto bail; } dev_dbg(rdbgdata->device, "%s: In receive", __func__); err = wait_for_completion_interruptible(&(rdbgdata->work)); if (err) { dev_err(rdbgdata->device, "%s: Error in wait", __func__); goto bail; } smq_check_queue_reset(&(rdbgdata->consumer_smrb), &(rdbgdata->producer_smrb)); if (SMQ_SUCCESS != smq_receive(&(rdbgdata->consumer_smrb), &p_sent_buffer, &nsize, &more)) { dev_err(rdbgdata->device, "%s: Error in smq_recv(). Err code = %d", __func__, err); err = -ENODATA; goto bail; } size = ((size < nsize) ? size : nsize); err = copy_to_user(buf, p_sent_buffer, size); if (err != 0) { dev_err(rdbgdata->device, "%s: Error in copy_to_user(). Err code = %d", __func__, err); err = -ENODATA; goto bail; } smq_free(&(rdbgdata->consumer_smrb), p_sent_buffer); err = size; dev_dbg(rdbgdata->device, "%s: Read data to buffer with address 0x%x", __func__, (unsigned int) buf); bail: dev_dbg(rdbgdata->device, "%s: Returning from receive", __func__); return err; } static ssize_t rdbg_write(struct file *filp, const char __user *buf, size_t size, loff_t *offset) { int err = 0; struct rdbg_data *rdbgdata = filp->private_data; if (!rdbgdata) { pr_err("Invalid argument"); err = -EINVAL; goto bail; } if (smq_alloc_send(&(rdbgdata->producer_smrb), buf, size)) { dev_err(rdbgdata->device, "%s, Error sending", __func__); err = -ECOMM; goto bail; } send_interrupt_to_subsystem(rdbgdata); err = size; bail: return err; } static const struct file_operations rdbg_fops = { .open = rdbg_open, .read = rdbg_read, .write = rdbg_write, .release = rdbg_release, }; static int register_smp2p(char *node_name, struct gpio_info *gpio_info_ptr) { struct device_node *node = NULL; int cnt = 0; int id = 0; node = of_find_compatible_node(NULL, NULL, node_name); if (node) { cnt = of_gpio_count(node); if (cnt && gpio_info_ptr) { id = of_get_gpio(node, 0); gpio_info_ptr->gpio_base_id = id; gpio_info_ptr->irq_base_id = gpio_to_irq(id); return 0; } } return -EINVAL; } static int __init rdbg_init(void) { int err = 0; struct rdbg_device *rdbgdevice = &g_rdbg_instance; int minor = 0; int major = 0; int minor_nodes_created = 0; char *rdbg_compatible_string = "qcom,smp2pgpio_client_rdbg_"; int max_len = strlen(rdbg_compatible_string) + strlen("xx_out"); char *node_name = kcalloc(max_len, sizeof(char), GFP_KERNEL); if (!node_name) { pr_err("Not enough memory"); err = -ENOMEM; goto bail; } if (rdbgdevice->num_devices < 1 || rdbgdevice->num_devices > SMP2P_NUM_PROCS) { pr_err("rgdb: invalid num_devices"); err = -EDOM; goto name_bail; } rdbgdevice->rdbg_data = kcalloc(rdbgdevice->num_devices, sizeof(struct rdbg_data), GFP_KERNEL); if (!rdbgdevice->rdbg_data) { pr_err("Not enough memory for rdbg devices"); err = -ENOMEM; goto name_bail; } err = alloc_chrdev_region(&rdbgdevice->dev_no, 0, rdbgdevice->num_devices, "rdbgctl"); if (err) { pr_err("Error in alloc_chrdev_region."); goto data_bail; } major = MAJOR(rdbgdevice->dev_no); cdev_init(&rdbgdevice->cdev, &rdbg_fops); rdbgdevice->cdev.owner = THIS_MODULE; err = cdev_add(&rdbgdevice->cdev, MKDEV(major, 0), rdbgdevice->num_devices); if (err) { pr_err("Error in cdev_add"); goto chrdev_bail; } rdbgdevice->class = class_create(THIS_MODULE, "rdbg"); if (IS_ERR(rdbgdevice->class)) { err = PTR_ERR(rdbgdevice->class); pr_err("Error in class_create"); goto cdev_bail; } for (minor = 0; minor < rdbgdevice->num_devices; minor++) { if (!proc_info[minor].name) continue; if (snprintf(node_name, max_len, "%s%d_in", rdbg_compatible_string, minor) <= 0) { pr_err("Error in snprintf"); err = -ENOMEM; goto device_bail; } if (register_smp2p(node_name, &rdbgdevice->rdbg_data[minor].in)) { pr_debug("No incoming device tree entry found for %s", proc_info[minor].name); continue; } if (snprintf(node_name, max_len, "%s%d_out", rdbg_compatible_string, minor) <= 0) { pr_err("Error in snprintf"); err = -ENOMEM; goto device_bail; } if (register_smp2p(node_name, &rdbgdevice->rdbg_data[minor].out)) { pr_err("No outgoing device tree entry found for %s", proc_info[minor].name); err = -EINVAL; goto device_bail; } rdbgdevice->rdbg_data[minor].device = device_create( rdbgdevice->class, NULL, MKDEV(major, minor), NULL, "%s", proc_info[minor].name); if (IS_ERR(rdbgdevice->rdbg_data[minor].device)) { err = PTR_ERR(rdbgdevice->rdbg_data[minor].device); pr_err("Error in device_create"); goto device_bail; } rdbgdevice->rdbg_data[minor].device_initialized = 1; minor_nodes_created++; dev_dbg(rdbgdevice->rdbg_data[minor].device, "%s: created /dev/%s c %d %d'", __func__, proc_info[minor].name, major, minor); } if (!minor_nodes_created) { pr_err("No device tree entries found"); err = -EINVAL; goto class_bail; } goto name_bail; device_bail: for (--minor; minor >= 0; minor--) { if (rdbgdevice->rdbg_data[minor].device_initialized) device_destroy(rdbgdevice->class, MKDEV(MAJOR(rdbgdevice->dev_no), minor)); } class_bail: class_destroy(rdbgdevice->class); cdev_bail: cdev_del(&rdbgdevice->cdev); chrdev_bail: unregister_chrdev_region(rdbgdevice->dev_no, rdbgdevice->num_devices); data_bail: kfree(rdbgdevice->rdbg_data); name_bail: kfree(node_name); bail: return err; } static void __exit rdbg_exit(void) { struct rdbg_device *rdbgdevice = &g_rdbg_instance; int minor; for (minor = 0; minor < rdbgdevice->num_devices; minor++) { if (rdbgdevice->rdbg_data[minor].device_initialized) { device_destroy(rdbgdevice->class, MKDEV(MAJOR(rdbgdevice->dev_no), minor)); } } class_destroy(rdbgdevice->class); cdev_del(&rdbgdevice->cdev); unregister_chrdev_region(rdbgdevice->dev_no, 1); kfree(rdbgdevice->rdbg_data); } module_init(rdbg_init); module_exit(rdbg_exit); MODULE_DESCRIPTION("rdbg module"); MODULE_LICENSE("GPL v2");
gpl-2.0
davidmueller13/android_kernel_lge_msm8974
drivers/char/rdbg.c
1701
30244
/* * Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/types.h> #include <linux/cdev.h> #include <linux/gfp.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/completion.h> #include <linux/of_gpio.h> #include <linux/mutex.h> #include <mach/msm_smsm.h> #include <linux/uaccess.h> #include <asm/system.h> #define SMP2P_NUM_PROCS 8 #define SM_VERSION 1 #define SM_BLOCKSIZE 128 #define SMQ_MAGIC_INIT 0xFF00FF00 #define SMQ_MAGIC_PRODUCER (SMQ_MAGIC_INIT | 0x1) #define SMQ_MAGIC_CONSUMER (SMQ_MAGIC_INIT | 0x2) enum SMQ_STATUS { SMQ_SUCCESS = 0, SMQ_ENOMEMORY = -1, SMQ_EBADPARM = -2, SMQ_UNDERFLOW = -3, SMQ_OVERFLOW = -4 }; enum smq_type { PRODUCER = 1, CONSUMER = 2, INVALID = 3 }; struct smq_block_map { uint32_t index_read; uint32_t num_blocks; uint8_t *map; }; struct smq_node { uint16_t index_block; uint16_t num_blocks; } __attribute__ ((__packed__)); struct smq_hdr { uint8_t producer_version; uint8_t consumer_version; } __attribute__ ((__packed__)); struct smq_out_state { uint32_t init; uint32_t index_check_queue_for_reset; uint32_t index_sent_write; uint32_t index_free_read; } __attribute__ ((__packed__)); struct smq_out { struct smq_out_state s; struct smq_node sent[1]; }; struct smq_in_state { uint32_t init; uint32_t index_check_queue_for_reset_ack; uint32_t index_sent_read; uint32_t index_free_write; } __attribute__ ((__packed__)); struct smq_in { struct smq_in_state s; struct smq_node free[1]; }; struct smq { struct smq_hdr *hdr; struct smq_out *out; struct smq_in *in; uint8_t *blocks; uint32_t num_blocks; struct mutex *lock; uint32_t initialized; struct smq_block_map block_map; enum smq_type type; }; struct gpio_info { int gpio_base_id; int irq_base_id; }; struct rdbg_data { struct device *device; struct completion work; struct gpio_info in; struct gpio_info out; bool device_initialized; int gpio_out_offset; bool device_opened; void *smem_addr; size_t smem_size; struct smq producer_smrb; struct smq consumer_smrb; struct mutex write_mutex; }; struct rdbg_device { struct cdev cdev; struct class *class; dev_t dev_no; int num_devices; struct rdbg_data *rdbg_data; }; static struct rdbg_device g_rdbg_instance = { { {0} }, NULL, 0, SMP2P_NUM_PROCS, NULL }; struct processor_specific_info { char *name; unsigned int smem_buffer_addr; size_t smem_buffer_size; }; static struct processor_specific_info proc_info[SMP2P_NUM_PROCS] = { {0}, /*APPS*/ {"rdbg_modem", 0, 0}, /*MODEM*/ {"rdbg_adsp", SMEM_LC_DEBUGGER, 16*1024}, /*ADSP*/ {0}, /*SMP2P_RESERVED_PROC_1*/ {"rdbg_wcnss", 0, 0}, /*WCNSS*/ {0}, /*SMP2P_RESERVED_PROC_2*/ {0}, /*SMP2P_POWER_PROC*/ {0} /*SMP2P_REMOTE_MOCK_PROC*/ }; static int smq_blockmap_get(struct smq_block_map *block_map, uint32_t *block_index, uint32_t n) { uint32_t start; uint32_t mark = 0; uint32_t found = 0; uint32_t i = 0; start = block_map->index_read; if (n == 1) { do { if (!block_map->map[block_map->index_read]) { *block_index = block_map->index_read; block_map->map[block_map->index_read] = 1; block_map->index_read++; block_map->index_read %= block_map->num_blocks; return SMQ_SUCCESS; } block_map->index_read++; } while (start != (block_map->index_read %= block_map->num_blocks)); } else { mark = block_map->num_blocks; do { if (!block_map->map[block_map->index_read]) { if (mark > block_map->index_read) { mark = block_map->index_read; start = block_map->index_read; found = 0; } found++; if (found == n) { *block_index = mark; for (i = 0; i < n; i++) block_map->map[mark + i] = (uint8_t)(n - i); block_map->index_read += block_map->map [block_map->index_read] - 1; return SMQ_SUCCESS; } } else { found = 0; block_map->index_read += block_map->map [block_map->index_read] - 1; mark = block_map->num_blocks; } block_map->index_read++; } while (start != (block_map->index_read %= block_map->num_blocks)); } return SMQ_ENOMEMORY; } static void smq_blockmap_put(struct smq_block_map *block_map, uint32_t i) { uint32_t num_blocks = block_map->map[i]; while (num_blocks--) { block_map->map[i] = 0; i++; } } static int smq_blockmap_reset(struct smq_block_map *block_map) { if (!block_map->map) return SMQ_ENOMEMORY; memset(block_map->map, 0 , block_map->num_blocks + 1); block_map->index_read = 0; return SMQ_SUCCESS; } static int smq_blockmap_ctor(struct smq_block_map *block_map, uint32_t num_blocks) { if (num_blocks <= 1) return SMQ_ENOMEMORY; block_map->map = kcalloc(num_blocks, sizeof(uint8_t), GFP_KERNEL); if (!block_map->map) return SMQ_ENOMEMORY; block_map->num_blocks = num_blocks - 1; smq_blockmap_reset(block_map); return SMQ_SUCCESS; } static void smq_blockmap_dtor(struct smq_block_map *block_map) { kfree(block_map->map); block_map->map = NULL; } static int smq_free(struct smq *smq, void *data) { struct smq_node node; uint32_t index_block; int err = SMQ_SUCCESS; if (smq->lock) mutex_lock(smq->lock); if ((SM_VERSION != smq->hdr->producer_version) && (SMQ_MAGIC_PRODUCER != smq->out->s.init)) { err = SMQ_UNDERFLOW; goto bail; } index_block = ((uint8_t *)data - smq->blocks) / SM_BLOCKSIZE; if (index_block >= smq->num_blocks) { err = SMQ_EBADPARM; goto bail; } node.index_block = (uint16_t)index_block; node.num_blocks = 0; *((struct smq_node *)(smq->in->free + smq->in-> s.index_free_write)) = node; smq->in->s.index_free_write = (smq->in->s.index_free_write + 1) % smq->num_blocks; bail: if (smq->lock) mutex_unlock(smq->lock); return err; } static int smq_receive(struct smq *smq, void **pp, int *pnsize, int *pbmore) { struct smq_node *node; int err = SMQ_SUCCESS; int more = 0; if ((SM_VERSION != smq->hdr->producer_version) && (SMQ_MAGIC_PRODUCER != smq->out->s.init)) return SMQ_UNDERFLOW; if (smq->in->s.index_sent_read == smq->out->s.index_sent_write) { err = SMQ_UNDERFLOW; goto bail; } node = (struct smq_node *)(smq->out->sent + smq->in->s.index_sent_read); if (node->index_block >= smq->num_blocks) { err = SMQ_EBADPARM; goto bail; } smq->in->s.index_sent_read = (smq->in->s.index_sent_read + 1) % smq->num_blocks; *pp = smq->blocks + (node->index_block * SM_BLOCKSIZE); *pnsize = SM_BLOCKSIZE * node->num_blocks; rmb(); if (smq->in->s.index_sent_read != smq->out->s.index_sent_write) more = 1; bail: *pbmore = more; return err; } static int smq_alloc_send(struct smq *smq, const uint8_t *pcb, int nsize) { void *pv = 0; int num_blocks; uint32_t index_block = 0; int err = SMQ_SUCCESS; struct smq_node *node = NULL; mutex_lock(smq->lock); if ((SMQ_MAGIC_CONSUMER == smq->in->s.init) && (SM_VERSION == smq->hdr->consumer_version)) { if (smq->out->s.index_check_queue_for_reset == smq->in->s.index_check_queue_for_reset_ack) { while (smq->out->s.index_free_read != smq->in->s.index_free_write) { node = (struct smq_node *)( smq->in->free + smq->out->s.index_free_read); if (node->index_block >= smq->num_blocks) { err = SMQ_EBADPARM; goto bail; } smq->out->s.index_free_read = (smq->out->s.index_free_read + 1) % smq->num_blocks; smq_blockmap_put(&smq->block_map, node->index_block); rmb(); } } } num_blocks = ALIGN(nsize, SM_BLOCKSIZE)/SM_BLOCKSIZE; err = smq_blockmap_get(&smq->block_map, &index_block, num_blocks); if (SMQ_SUCCESS != err) goto bail; pv = smq->blocks + (SM_BLOCKSIZE * index_block); err = copy_from_user((void *)pv, (void *)pcb, nsize); if (0 != err) goto bail; ((struct smq_node *)(smq->out->sent + smq->out->s.index_sent_write))->index_block = (uint16_t)index_block; ((struct smq_node *)(smq->out->sent + smq->out->s.index_sent_write))->num_blocks = (uint16_t)num_blocks; smq->out->s.index_sent_write = (smq->out->s.index_sent_write + 1) % smq->num_blocks; bail: if (SMQ_SUCCESS != err) { if (pv) smq_blockmap_put(&smq->block_map, index_block); } mutex_unlock(smq->lock); return err; } static int smq_reset_producer_queue_internal(struct smq *smq, uint32_t reset_num) { int retval = 0; uint32_t i; if (PRODUCER != smq->type) goto bail; mutex_lock(smq->lock); if (smq->out->s.index_check_queue_for_reset != reset_num) { smq->out->s.index_check_queue_for_reset = reset_num; for (i = 0; i < smq->num_blocks; i++) (smq->out->sent + i)->index_block = 0xFFFF; smq_blockmap_reset(&smq->block_map); smq->out->s.index_sent_write = 0; smq->out->s.index_free_read = 0; retval = 1; } mutex_unlock(smq->lock); bail: return retval; } static int smq_check_queue_reset(struct smq *p_cons, struct smq *p_prod) { int retval = 0; uint32_t reset_num, i; if ((CONSUMER != p_cons->type) || (SMQ_MAGIC_PRODUCER != p_cons->out->s.init) || (SM_VERSION != p_cons->hdr->producer_version)) goto bail; reset_num = p_cons->out->s.index_check_queue_for_reset; if (p_cons->in->s.index_check_queue_for_reset_ack != reset_num) { p_cons->in->s.index_check_queue_for_reset_ack = reset_num; for (i = 0; i < p_cons->num_blocks; i++) (p_cons->in->free + i)->index_block = 0xFFFF; p_cons->in->s.index_sent_read = 0; p_cons->in->s.index_free_write = 0; retval = smq_reset_producer_queue_internal(p_prod, reset_num); } bail: return retval; } static int check_subsystem_debug_enabled(void *base_addr, int size) { int num_blocks; uint8_t *pb_orig; uint8_t *pb; struct smq smq; int err = 0; pb = pb_orig = (uint8_t *)base_addr; pb += sizeof(struct smq_hdr); pb = PTR_ALIGN(pb, 8); size -= pb - (uint8_t *)pb_orig; num_blocks = (int)((size - sizeof(struct smq_out_state) - sizeof(struct smq_in_state))/(SM_BLOCKSIZE + sizeof(struct smq_node) * 2)); if (0 >= num_blocks) { err = SMQ_EBADPARM; goto bail; } pb += num_blocks * SM_BLOCKSIZE; smq.out = (struct smq_out *)pb; pb += sizeof(struct smq_out_state) + (num_blocks * sizeof(struct smq_node)); smq.in = (struct smq_in *)pb; if (SMQ_MAGIC_CONSUMER != smq.in->s.init) { pr_err("%s, smq in consumer not initialized", __func__); err = -ECOMM; } bail: return err; } static void smq_dtor(struct smq *smq) { if (SMQ_MAGIC_INIT == smq->initialized) { switch (smq->type) { case PRODUCER: smq->out->s.init = 0; smq_blockmap_dtor(&smq->block_map); break; case CONSUMER: smq->in->s.init = 0; break; default: case INVALID: break; } smq->initialized = 0; } } /* * The shared memory is used as a circular ring buffer in each direction. * Thus we have a bi-directional shared memory channel between the AP * and a subsystem. We call this SMQ. Each memory channel contains a header, * data and a control mechanism that is used to synchronize read and write * of data between the AP and the remote subsystem. * * Overall SMQ memory view: * * +------------------------------------------------+ * | SMEM buffer | * |-----------------------+------------------------| * |Producer: LA | Producer: Remote | * |Consumer: Remote | subsystem | * | subsystem | Consumer: LA | * | | | * | Producer| Consumer| * +-----------------------+------------------------+ * | | * | | * | +--------------------------------------+ * | | * | | * v v * +--------------------------------------------------------------+ * | Header | Data | Control | * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+ * | | b | b | b | | S |n |n | | S |n |n | | * | Producer | l | l | l | | M |o |o | | M |o |o | | * | Ver | o | o | o | | Q |d |d | | Q |d |d | | * |-----------| c | c | c | ... | |e |e | ... | |e |e | ... | * | | k | k | k | | O | | | | I | | | | * | Consumer | | | | | u |0 |1 | | n |0 |1 | | * | Ver | 0 | 1 | 2 | | t | | | | | | | | * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+ * | | * + | * | * +------------------------+ * | * v * +----+----+----+----+ * | SMQ Nodes | * |----|----|----|----| * Node # | 0 | 1 | 2 | ...| * |----|----|----|----| * Starting Block Index # | 0 | 3 | 8 | ...| * |----|----|----|----| * # of blocks | 3 | 5 | 1 | ...| * +----+----+----+----+ * * Header: Contains version numbers for software compatibility to ensure * that both producers and consumers on the AP and subsystems know how to * read from and write to the queue. * Both the producer and consumer versions are 1. * +---------+-------------------+ * | Size | Field | * +---------+-------------------+ * | 1 byte | Producer Version | * +---------+-------------------+ * | 1 byte | Consumer Version | * +---------+-------------------+ * * Data: The data portion contains multiple blocks [0..N] of a fixed size. * The block size SM_BLOCKSIZE is fixed to 128 bytes for header version #1. * Payload sent from the debug agent app is split (if necessary) and placed * in these blocks. The first data block is placed at the next 8 byte aligned * address after the header. * * The number of blocks for a given SMEM allocation is derived as follows: * Number of Blocks = ((Total Size - Alignment - Size of Header * - Size of SMQIn - Size of SMQOut)/(SM_BLOCKSIZE)) * * The producer maintains a private block map of each of these blocks to * determine which of these blocks in the queue is available and which are free. * * Control: * The control portion contains a list of nodes [0..N] where N is number * of available data blocks. Each node identifies the data * block indexes that contain a particular debug message to be transfered, * and the number of blocks it took to hold the contents of the message. * * Each node has the following structure: * +---------+-------------------+ * | Size | Field | * +---------+-------------------+ * | 2 bytes |Staring Block Index| * +---------+-------------------+ * | 2 bytes |Number of Blocks | * +---------+-------------------+ * * The producer and the consumer update different parts of the control channel * (SMQOut / SMQIn) respectively. Each of these control data structures contains * information about the last node that was written / read, and the actual nodes * that were written/read. * * SMQOut Structure (R/W by producer, R by consumer): * +---------+-------------------+ * | Size | Field | * +---------+-------------------+ * | 4 bytes | Magic Init Number | * +---------+-------------------+ * | 4 bytes | Reset | * +---------+-------------------+ * | 4 bytes | Last Sent Index | * +---------+-------------------+ * | 4 bytes | Index Free Read | * +---------+-------------------+ * * SMQIn Structure (R/W by consumer, R by producer): * +---------+-------------------+ * | Size | Field | * +---------+-------------------+ * | 4 bytes | Magic Init Number | * +---------+-------------------+ * | 4 bytes | Reset ACK | * +---------+-------------------+ * | 4 bytes | Last Read Index | * +---------+-------------------+ * | 4 bytes | Index Free Write | * +---------+-------------------+ * * Magic Init Number: * Both SMQ Out and SMQ In initialize this field with a predefined magic * number so as to make sure that both the consumer and producer blocks * have fully initialized and have valid data in the shared memory control area. * Producer Magic #: 0xFF00FF01 * Consumer Magic #: 0xFF00FF02 */ static int smq_ctor(struct smq *smq, void *base_addr, int size, enum smq_type type, struct mutex *lock_ptr) { int num_blocks; uint8_t *pb_orig; uint8_t *pb; uint32_t i; int err; if (SMQ_MAGIC_INIT == smq->initialized) { err = SMQ_EBADPARM; goto bail; } if (!base_addr || !size) { err = SMQ_EBADPARM; goto bail; } if (type == PRODUCER) smq->lock = lock_ptr; pb_orig = (uint8_t *)base_addr; smq->hdr = (struct smq_hdr *)pb_orig; pb = pb_orig; pb += sizeof(struct smq_hdr); pb = PTR_ALIGN(pb, 8); size -= pb - (uint8_t *)pb_orig; num_blocks = (int)((size - sizeof(struct smq_out_state) - sizeof(struct smq_in_state))/(SM_BLOCKSIZE + sizeof(struct smq_node) * 2)); if (0 >= num_blocks) { err = SMQ_ENOMEMORY; goto bail; } smq->blocks = pb; smq->num_blocks = num_blocks; pb += num_blocks * SM_BLOCKSIZE; smq->out = (struct smq_out *)pb; pb += sizeof(struct smq_out_state) + (num_blocks * sizeof(struct smq_node)); smq->in = (struct smq_in *)pb; smq->type = type; if (PRODUCER == type) { smq->hdr->producer_version = SM_VERSION; for (i = 0; i < smq->num_blocks; i++) (smq->out->sent + i)->index_block = 0xFFFF; err = smq_blockmap_ctor(&smq->block_map, smq->num_blocks); if (SMQ_SUCCESS != err) goto bail; smq->out->s.index_sent_write = 0; smq->out->s.index_free_read = 0; if (smq->out->s.init == SMQ_MAGIC_PRODUCER) { smq->out->s.index_check_queue_for_reset += 1; } else { smq->out->s.index_check_queue_for_reset = 1; smq->out->s.init = SMQ_MAGIC_PRODUCER; } } else { smq->hdr->consumer_version = SM_VERSION; for (i = 0; i < smq->num_blocks; i++) (smq->in->free + i)->index_block = 0xFFFF; smq->in->s.index_sent_read = 0; smq->in->s.index_free_write = 0; if (smq->out->s.init == SMQ_MAGIC_PRODUCER) { smq->in->s.index_check_queue_for_reset_ack = smq->out->s.index_check_queue_for_reset; } else { smq->in->s.index_check_queue_for_reset_ack = 0; } smq->in->s.init = SMQ_MAGIC_CONSUMER; } smq->initialized = SMQ_MAGIC_INIT; err = SMQ_SUCCESS; bail: return err; } static void send_interrupt_to_subsystem(struct rdbg_data *rdbgdata) { int offset = rdbgdata->gpio_out_offset; int val = 1 ^ gpio_get_value(rdbgdata->out.gpio_base_id + offset); gpio_set_value(rdbgdata->out.gpio_base_id + offset, val); rdbgdata->gpio_out_offset = (offset + 1) % 32; dev_dbg(rdbgdata->device, "%s: sent interrupt %d to subsystem", __func__, val); } static irqreturn_t on_interrupt_from(int irq, void *ptr) { struct rdbg_data *rdbgdata = (struct rdbg_data *) ptr; dev_dbg(rdbgdata->device, "%s: Received interrupt %d from subsystem", __func__, irq); complete(&(rdbgdata->work)); return IRQ_HANDLED; } static int initialize_smq(struct rdbg_data *rdbgdata) { int err = 0; if (smq_ctor(&(rdbgdata->producer_smrb), (void *)(rdbgdata->smem_addr), ((rdbgdata->smem_size)/2), PRODUCER, &rdbgdata->write_mutex)) { dev_err(rdbgdata->device, "%s: smq producer allocation failed", __func__); err = -ENOMEM; goto bail; } if (smq_ctor(&(rdbgdata->consumer_smrb), (void *)((uint32_t) (rdbgdata->smem_addr) + ((rdbgdata->smem_size)/2)), ((rdbgdata->smem_size)/2), CONSUMER, NULL)) { dev_err(rdbgdata->device, "%s: smq conmsumer allocation failed", __func__); err = -ENOMEM; } bail: return err; } static int rdbg_open(struct inode *inode, struct file *filp) { int device_id = -1; struct rdbg_device *device = &g_rdbg_instance; struct rdbg_data *rdbgdata = NULL; int err = 0; if (!inode || !device->rdbg_data) { pr_err("Memory not allocated yet"); err = -ENODEV; goto bail; } device_id = MINOR(inode->i_rdev); rdbgdata = &device->rdbg_data[device_id]; if (rdbgdata->device_opened) { dev_err(rdbgdata->device, "%s: Device already opened", __func__); err = -EEXIST; goto bail; } rdbgdata->smem_size = proc_info[device_id].smem_buffer_size; if (!rdbgdata->smem_size) { dev_err(rdbgdata->device, "%s: smem not initialized", __func__); err = -ENOMEM; goto bail; } rdbgdata->smem_addr = smem_alloc(proc_info[device_id].smem_buffer_addr, rdbgdata->smem_size); if (!rdbgdata->smem_addr) { dev_err(rdbgdata->device, "%s: Could not allocate smem memory", __func__); err = -ENOMEM; goto bail; } dev_dbg(rdbgdata->device, "%s: SMEM address=0x%x smem_size=%d", __func__, (unsigned int)rdbgdata->smem_addr, rdbgdata->smem_size); if (check_subsystem_debug_enabled(rdbgdata->smem_addr, rdbgdata->smem_size/2)) { dev_err(rdbgdata->device, "%s: Subsystem %s is not debug enabled", __func__, proc_info[device_id].name); err = -ECOMM; goto bail; } init_completion(&rdbgdata->work); err = request_irq(rdbgdata->in.irq_base_id, on_interrupt_from, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, proc_info[device_id].name, (void *)&device->rdbg_data[device_id]); if (err) { dev_err(rdbgdata->device, "%s: Failed to register interrupt.Err=%d,irqid=%d.", __func__, err, rdbgdata->in.irq_base_id); goto irq_bail; } err = enable_irq_wake(rdbgdata->in.irq_base_id); if (err < 0) { dev_dbg(rdbgdata->device, "enable_irq_wake() failed with err=%d", err); err = 0; } mutex_init(&rdbgdata->write_mutex); err = initialize_smq(rdbgdata); if (err) { dev_err(rdbgdata->device, "Error initializing smq. Err=%d", err); goto smq_bail; } rdbgdata->device_opened = 1; filp->private_data = (void *)rdbgdata; return 0; smq_bail: smq_dtor(&(rdbgdata->producer_smrb)); smq_dtor(&(rdbgdata->consumer_smrb)); mutex_destroy(&rdbgdata->write_mutex); irq_bail: free_irq(rdbgdata->in.irq_base_id, (void *) &device->rdbg_data[device_id]); bail: return err; } static int rdbg_release(struct inode *inode, struct file *filp) { int device_id = -1; struct rdbg_device *rdbgdevice = &g_rdbg_instance; struct rdbg_data *rdbgdata = NULL; int err = 0; if (!inode || !rdbgdevice->rdbg_data) { pr_err("Memory not allocated yet"); err = -ENODEV; goto bail; } device_id = MINOR(inode->i_rdev); rdbgdata = &rdbgdevice->rdbg_data[device_id]; if (rdbgdata->device_opened == 1) { dev_dbg(rdbgdata->device, "%s: Destroying %s.", __func__, proc_info[device_id].name); rdbgdata->device_opened = 0; complete(&(rdbgdata->work)); free_irq(rdbgdata->in.irq_base_id, (void *) &rdbgdevice->rdbg_data[device_id]); if (rdbgdevice->rdbg_data[device_id].producer_smrb.initialized) smq_dtor(&(rdbgdevice->rdbg_data[device_id]. producer_smrb)); if (rdbgdevice->rdbg_data[device_id].consumer_smrb.initialized) smq_dtor(&(rdbgdevice->rdbg_data[device_id]. consumer_smrb)); mutex_destroy(&rdbgdata->write_mutex); } filp->private_data = NULL; bail: return err; } static ssize_t rdbg_read(struct file *filp, char __user *buf, size_t size, loff_t *offset) { int err = 0; struct rdbg_data *rdbgdata = filp->private_data; void *p_sent_buffer = NULL; int nsize = 0; int more = 0; if (!rdbgdata) { pr_err("Invalid argument"); err = -EINVAL; goto bail; } dev_dbg(rdbgdata->device, "%s: In receive", __func__); err = wait_for_completion_interruptible(&(rdbgdata->work)); if (err) { dev_err(rdbgdata->device, "%s: Error in wait", __func__); goto bail; } smq_check_queue_reset(&(rdbgdata->consumer_smrb), &(rdbgdata->producer_smrb)); if (SMQ_SUCCESS != smq_receive(&(rdbgdata->consumer_smrb), &p_sent_buffer, &nsize, &more)) { dev_err(rdbgdata->device, "%s: Error in smq_recv(). Err code = %d", __func__, err); err = -ENODATA; goto bail; } size = ((size < nsize) ? size : nsize); err = copy_to_user(buf, p_sent_buffer, size); if (err != 0) { dev_err(rdbgdata->device, "%s: Error in copy_to_user(). Err code = %d", __func__, err); err = -ENODATA; goto bail; } smq_free(&(rdbgdata->consumer_smrb), p_sent_buffer); err = size; dev_dbg(rdbgdata->device, "%s: Read data to buffer with address 0x%x", __func__, (unsigned int) buf); bail: dev_dbg(rdbgdata->device, "%s: Returning from receive", __func__); return err; } static ssize_t rdbg_write(struct file *filp, const char __user *buf, size_t size, loff_t *offset) { int err = 0; struct rdbg_data *rdbgdata = filp->private_data; if (!rdbgdata) { pr_err("Invalid argument"); err = -EINVAL; goto bail; } if (smq_alloc_send(&(rdbgdata->producer_smrb), buf, size)) { dev_err(rdbgdata->device, "%s, Error sending", __func__); err = -ECOMM; goto bail; } send_interrupt_to_subsystem(rdbgdata); err = size; bail: return err; } static const struct file_operations rdbg_fops = { .open = rdbg_open, .read = rdbg_read, .write = rdbg_write, .release = rdbg_release, }; static int register_smp2p(char *node_name, struct gpio_info *gpio_info_ptr) { struct device_node *node = NULL; int cnt = 0; int id = 0; node = of_find_compatible_node(NULL, NULL, node_name); if (node) { cnt = of_gpio_count(node); if (cnt && gpio_info_ptr) { id = of_get_gpio(node, 0); gpio_info_ptr->gpio_base_id = id; gpio_info_ptr->irq_base_id = gpio_to_irq(id); return 0; } } return -EINVAL; } static int __init rdbg_init(void) { int err = 0; struct rdbg_device *rdbgdevice = &g_rdbg_instance; int minor = 0; int major = 0; int minor_nodes_created = 0; char *rdbg_compatible_string = "qcom,smp2pgpio_client_rdbg_"; int max_len = strlen(rdbg_compatible_string) + strlen("xx_out"); char *node_name = kcalloc(max_len, sizeof(char), GFP_KERNEL); if (!node_name) { pr_err("Not enough memory"); err = -ENOMEM; goto bail; } if (rdbgdevice->num_devices < 1 || rdbgdevice->num_devices > SMP2P_NUM_PROCS) { pr_err("rgdb: invalid num_devices"); err = -EDOM; goto name_bail; } rdbgdevice->rdbg_data = kcalloc(rdbgdevice->num_devices, sizeof(struct rdbg_data), GFP_KERNEL); if (!rdbgdevice->rdbg_data) { pr_err("Not enough memory for rdbg devices"); err = -ENOMEM; goto name_bail; } err = alloc_chrdev_region(&rdbgdevice->dev_no, 0, rdbgdevice->num_devices, "rdbgctl"); if (err) { pr_err("Error in alloc_chrdev_region."); goto data_bail; } major = MAJOR(rdbgdevice->dev_no); cdev_init(&rdbgdevice->cdev, &rdbg_fops); rdbgdevice->cdev.owner = THIS_MODULE; err = cdev_add(&rdbgdevice->cdev, MKDEV(major, 0), rdbgdevice->num_devices); if (err) { pr_err("Error in cdev_add"); goto chrdev_bail; } rdbgdevice->class = class_create(THIS_MODULE, "rdbg"); if (IS_ERR(rdbgdevice->class)) { err = PTR_ERR(rdbgdevice->class); pr_err("Error in class_create"); goto cdev_bail; } for (minor = 0; minor < rdbgdevice->num_devices; minor++) { if (!proc_info[minor].name) continue; if (snprintf(node_name, max_len, "%s%d_in", rdbg_compatible_string, minor) <= 0) { pr_err("Error in snprintf"); err = -ENOMEM; goto device_bail; } if (register_smp2p(node_name, &rdbgdevice->rdbg_data[minor].in)) { pr_debug("No incoming device tree entry found for %s", proc_info[minor].name); continue; } if (snprintf(node_name, max_len, "%s%d_out", rdbg_compatible_string, minor) <= 0) { pr_err("Error in snprintf"); err = -ENOMEM; goto device_bail; } if (register_smp2p(node_name, &rdbgdevice->rdbg_data[minor].out)) { pr_err("No outgoing device tree entry found for %s", proc_info[minor].name); err = -EINVAL; goto device_bail; } rdbgdevice->rdbg_data[minor].device = device_create( rdbgdevice->class, NULL, MKDEV(major, minor), NULL, "%s", proc_info[minor].name); if (IS_ERR(rdbgdevice->rdbg_data[minor].device)) { err = PTR_ERR(rdbgdevice->rdbg_data[minor].device); pr_err("Error in device_create"); goto device_bail; } rdbgdevice->rdbg_data[minor].device_initialized = 1; minor_nodes_created++; dev_dbg(rdbgdevice->rdbg_data[minor].device, "%s: created /dev/%s c %d %d'", __func__, proc_info[minor].name, major, minor); } if (!minor_nodes_created) { pr_err("No device tree entries found"); err = -EINVAL; goto class_bail; } goto name_bail; device_bail: for (--minor; minor >= 0; minor--) { if (rdbgdevice->rdbg_data[minor].device_initialized) device_destroy(rdbgdevice->class, MKDEV(MAJOR(rdbgdevice->dev_no), minor)); } class_bail: class_destroy(rdbgdevice->class); cdev_bail: cdev_del(&rdbgdevice->cdev); chrdev_bail: unregister_chrdev_region(rdbgdevice->dev_no, rdbgdevice->num_devices); data_bail: kfree(rdbgdevice->rdbg_data); name_bail: kfree(node_name); bail: return err; } static void __exit rdbg_exit(void) { struct rdbg_device *rdbgdevice = &g_rdbg_instance; int minor; for (minor = 0; minor < rdbgdevice->num_devices; minor++) { if (rdbgdevice->rdbg_data[minor].device_initialized) { device_destroy(rdbgdevice->class, MKDEV(MAJOR(rdbgdevice->dev_no), minor)); } } class_destroy(rdbgdevice->class); cdev_del(&rdbgdevice->cdev); unregister_chrdev_region(rdbgdevice->dev_no, 1); kfree(rdbgdevice->rdbg_data); } module_init(rdbg_init); module_exit(rdbg_exit); MODULE_DESCRIPTION("rdbg module"); MODULE_LICENSE("GPL v2");
gpl-2.0
drowningchild/msm-2.6.38
drivers/net/ibm_newemac/debug.c
3493
7498
/* * drivers/net/ibm_newemac/debug.c * * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines. * * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * Based on the arch/ppc version of the driver: * * Copyright (c) 2004, 2005 Zultys Technologies * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/sysrq.h> #include <asm/io.h> #include "core.h" static DEFINE_SPINLOCK(emac_dbg_lock); static void emac_desc_dump(struct emac_instance *p) { int i; printk("** EMAC %s TX BDs **\n" " tx_cnt = %d tx_slot = %d ack_slot = %d\n", p->ofdev->dev.of_node->full_name, p->tx_cnt, p->tx_slot, p->ack_slot); for (i = 0; i < NUM_TX_BUFF / 2; ++i) printk ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n", i, p->tx_desc[i].data_ptr, p->tx_skb[i] ? 'V' : ' ', p->tx_desc[i].ctrl, p->tx_desc[i].data_len, NUM_TX_BUFF / 2 + i, p->tx_desc[NUM_TX_BUFF / 2 + i].data_ptr, p->tx_skb[NUM_TX_BUFF / 2 + i] ? 'V' : ' ', p->tx_desc[NUM_TX_BUFF / 2 + i].ctrl, p->tx_desc[NUM_TX_BUFF / 2 + i].data_len); printk("** EMAC %s RX BDs **\n" " rx_slot = %d flags = 0x%lx rx_skb_size = %d rx_sync_size = %d\n" " rx_sg_skb = 0x%p\n", p->ofdev->dev.of_node->full_name, p->rx_slot, p->commac.flags, p->rx_skb_size, p->rx_sync_size, p->rx_sg_skb); for (i = 0; i < NUM_RX_BUFF / 2; ++i) printk ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n", i, p->rx_desc[i].data_ptr, p->rx_skb[i] ? 'V' : ' ', p->rx_desc[i].ctrl, p->rx_desc[i].data_len, NUM_RX_BUFF / 2 + i, p->rx_desc[NUM_RX_BUFF / 2 + i].data_ptr, p->rx_skb[NUM_RX_BUFF / 2 + i] ? 'V' : ' ', p->rx_desc[NUM_RX_BUFF / 2 + i].ctrl, p->rx_desc[NUM_RX_BUFF / 2 + i].data_len); } static void emac_mac_dump(struct emac_instance *dev) { struct emac_regs __iomem *p = dev->emacp; const int xaht_regs = EMAC_XAHT_REGS(dev); u32 *gaht_base = emac_gaht_base(dev); u32 *iaht_base = emac_iaht_base(dev); int emac4sync = emac_has_feature(dev, EMAC_FTR_EMAC4SYNC); int n; printk("** EMAC %s registers **\n" "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n" "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n" "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n", dev->ofdev->dev.of_node->full_name, in_be32(&p->mr0), in_be32(&p->mr1), in_be32(&p->tmr0), in_be32(&p->tmr1), in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser), in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid), in_be32(&p->vtci) ); if (emac4sync) printk("MAR = %04x%08x MMAR = %04x%08x\n", in_be32(&p->u0.emac4sync.mahr), in_be32(&p->u0.emac4sync.malr), in_be32(&p->u0.emac4sync.mmahr), in_be32(&p->u0.emac4sync.mmalr) ); for (n = 0; n < xaht_regs; n++) printk("IAHT%02d = 0x%08x\n", n + 1, in_be32(iaht_base + n)); for (n = 0; n < xaht_regs; n++) printk("GAHT%02d = 0x%08x\n", n + 1, in_be32(gaht_base + n)); printk("LSA = %04x%08x IPGVR = 0x%04x\n" "STACR = 0x%08x TRTR = 0x%08x RWMR = 0x%08x\n" "OCTX = 0x%08x OCRX = 0x%08x\n", in_be32(&p->lsah), in_be32(&p->lsal), in_be32(&p->ipgvr), in_be32(&p->stacr), in_be32(&p->trtr), in_be32(&p->rwmr), in_be32(&p->octx), in_be32(&p->ocrx) ); if (!emac4sync) { printk("IPCR = 0x%08x\n", in_be32(&p->u1.emac4.ipcr) ); } else { printk("REVID = 0x%08x TPC = 0x%08x\n", in_be32(&p->u1.emac4sync.revid), in_be32(&p->u1.emac4sync.tpc) ); } emac_desc_dump(dev); } static void emac_mal_dump(struct mal_instance *mal) { int i; printk("** MAL %s Registers **\n" "CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n" "TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n" "RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n", mal->ofdev->dev.of_node->full_name, get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR), get_mal_dcrn(mal, MAL_IER), get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR), get_mal_dcrn(mal, MAL_TXEOBISR), get_mal_dcrn(mal, MAL_TXDEIR), get_mal_dcrn(mal, MAL_RXCASR), get_mal_dcrn(mal, MAL_RXCARR), get_mal_dcrn(mal, MAL_RXEOBISR), get_mal_dcrn(mal, MAL_RXDEIR) ); printk("TX|"); for (i = 0; i < mal->num_tx_chans; ++i) { if (i && !(i % 4)) printk("\n "); printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_TXCTPR(i))); } printk("\nRX|"); for (i = 0; i < mal->num_rx_chans; ++i) { if (i && !(i % 4)) printk("\n "); printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_RXCTPR(i))); } printk("\n "); for (i = 0; i < mal->num_rx_chans; ++i) { u32 r = get_mal_dcrn(mal, MAL_RCBS(i)); if (i && !(i % 3)) printk("\n "); printk("RCBS%d = 0x%08x (%d) ", i, r, r * 16); } printk("\n"); } static struct emac_instance *__emacs[4]; static struct mal_instance *__mals[1]; void emac_dbg_register(struct emac_instance *dev) { unsigned long flags; int i; spin_lock_irqsave(&emac_dbg_lock, flags); for (i = 0; i < ARRAY_SIZE(__emacs); i++) if (__emacs[i] == NULL) { __emacs[i] = dev; break; } spin_unlock_irqrestore(&emac_dbg_lock, flags); } void emac_dbg_unregister(struct emac_instance *dev) { unsigned long flags; int i; spin_lock_irqsave(&emac_dbg_lock, flags); for (i = 0; i < ARRAY_SIZE(__emacs); i++) if (__emacs[i] == dev) { __emacs[i] = NULL; break; } spin_unlock_irqrestore(&emac_dbg_lock, flags); } void mal_dbg_register(struct mal_instance *mal) { unsigned long flags; int i; spin_lock_irqsave(&emac_dbg_lock, flags); for (i = 0; i < ARRAY_SIZE(__mals); i++) if (__mals[i] == NULL) { __mals[i] = mal; break; } spin_unlock_irqrestore(&emac_dbg_lock, flags); } void mal_dbg_unregister(struct mal_instance *mal) { unsigned long flags; int i; spin_lock_irqsave(&emac_dbg_lock, flags); for (i = 0; i < ARRAY_SIZE(__mals); i++) if (__mals[i] == mal) { __mals[i] = NULL; break; } spin_unlock_irqrestore(&emac_dbg_lock, flags); } void emac_dbg_dump_all(void) { unsigned int i; unsigned long flags; spin_lock_irqsave(&emac_dbg_lock, flags); for (i = 0; i < ARRAY_SIZE(__mals); ++i) if (__mals[i]) emac_mal_dump(__mals[i]); for (i = 0; i < ARRAY_SIZE(__emacs); ++i) if (__emacs[i]) emac_mac_dump(__emacs[i]); spin_unlock_irqrestore(&emac_dbg_lock, flags); } #if defined(CONFIG_MAGIC_SYSRQ) static void emac_sysrq_handler(int key) { emac_dbg_dump_all(); } static struct sysrq_key_op emac_sysrq_op = { .handler = emac_sysrq_handler, .help_msg = "emaC", .action_msg = "Show EMAC(s) status", }; int __init emac_init_debug(void) { return register_sysrq_key('c', &emac_sysrq_op); } void __exit emac_fini_debug(void) { unregister_sysrq_key('c', &emac_sysrq_op); } #else int __init emac_init_debug(void) { return 0; } void __exit emac_fini_debug(void) { } #endif /* CONFIG_MAGIC_SYSRQ */
gpl-2.0
MoKee/android_kernel_oppo_find5
arch/x86/kernel/cpu/common.c
4005
31731
#include <linux/bootmem.h> #include <linux/linkage.h> #include <linux/bitops.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/percpu.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/kgdb.h> #include <linux/smp.h> #include <linux/io.h> #include <asm/stackprotector.h> #include <asm/perf_event.h> #include <asm/mmu_context.h> #include <asm/archrandom.h> #include <asm/hypervisor.h> #include <asm/processor.h> #include <asm/debugreg.h> #include <asm/sections.h> #include <linux/topology.h> #include <linux/cpumask.h> #include <asm/pgtable.h> #include <linux/atomic.h> #include <asm/proto.h> #include <asm/setup.h> #include <asm/apic.h> #include <asm/desc.h> #include <asm/i387.h> #include <asm/fpu-internal.h> #include <asm/mtrr.h> #include <linux/numa.h> #include <asm/asm.h> #include <asm/cpu.h> #include <asm/mce.h> #include <asm/msr.h> #include <asm/pat.h> #ifdef CONFIG_X86_LOCAL_APIC #include <asm/uv/uv.h> #endif #include "cpu.h" /* all of these masks are initialized in setup_cpu_local_masks() */ cpumask_var_t cpu_initialized_mask; cpumask_var_t cpu_callout_mask; cpumask_var_t cpu_callin_mask; /* representing cpus for which sibling maps can be computed */ cpumask_var_t cpu_sibling_setup_mask; /* correctly size the local cpu masks */ void __init setup_cpu_local_masks(void) { alloc_bootmem_cpumask_var(&cpu_initialized_mask); alloc_bootmem_cpumask_var(&cpu_callin_mask); alloc_bootmem_cpumask_var(&cpu_callout_mask); alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); } static void __cpuinit default_init(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_64 cpu_detect_cache_sizes(c); #else /* Not much we can do here... */ /* Check if at least it has cpuid */ if (c->cpuid_level == -1) { /* No cpuid. It must be an ancient CPU */ if (c->x86 == 4) strcpy(c->x86_model_id, "486"); else if (c->x86 == 3) strcpy(c->x86_model_id, "386"); } #endif } static const struct cpu_dev __cpuinitconst default_cpu = { .c_init = default_init, .c_vendor = "Unknown", .c_x86_vendor = X86_VENDOR_UNKNOWN, }; static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { #ifdef CONFIG_X86_64 /* * We need valid kernel segments for data and code in long mode too * IRET will check the segment types kkeil 2000/10/28 * Also sysret mandates a special GDT layout * * TLS descriptors are currently at a different place compared to i386. * Hopefully nobody expects them at a fixed place (Wine?) */ [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), #else [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), /* * Segments used for calling PnP BIOS have byte granularity. * They code segments and data segments have fixed 64k limits, * the transfer segment sizes are set at run time. */ /* 32-bit code */ [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), /* 16-bit code */ [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), /* 16-bit data */ [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), /* 16-bit data */ [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), /* 16-bit data */ [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), /* * The APM segments have byte granularity and their bases * are set at run time. All have 64k limits. */ /* 32-bit code */ [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), /* 16-bit code */ [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), /* data */ [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), GDT_STACK_CANARY_INIT #endif } }; EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); static int __init x86_xsave_setup(char *s) { setup_clear_cpu_cap(X86_FEATURE_XSAVE); setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); return 1; } __setup("noxsave", x86_xsave_setup); static int __init x86_xsaveopt_setup(char *s) { setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); return 1; } __setup("noxsaveopt", x86_xsaveopt_setup); #ifdef CONFIG_X86_32 static int cachesize_override __cpuinitdata = -1; static int disable_x86_serial_nr __cpuinitdata = 1; static int __init cachesize_setup(char *str) { get_option(&str, &cachesize_override); return 1; } __setup("cachesize=", cachesize_setup); static int __init x86_fxsr_setup(char *s) { setup_clear_cpu_cap(X86_FEATURE_FXSR); setup_clear_cpu_cap(X86_FEATURE_XMM); return 1; } __setup("nofxsr", x86_fxsr_setup); static int __init x86_sep_setup(char *s) { setup_clear_cpu_cap(X86_FEATURE_SEP); return 1; } __setup("nosep", x86_sep_setup); /* Standard macro to see if a specific flag is changeable */ static inline int flag_is_changeable_p(u32 flag) { u32 f1, f2; /* * Cyrix and IDT cpus allow disabling of CPUID * so the code below may return different results * when it is executed before and after enabling * the CPUID. Add "volatile" to not allow gcc to * optimize the subsequent calls to this function. */ asm volatile ("pushfl \n\t" "pushfl \n\t" "popl %0 \n\t" "movl %0, %1 \n\t" "xorl %2, %0 \n\t" "pushl %0 \n\t" "popfl \n\t" "pushfl \n\t" "popl %0 \n\t" "popfl \n\t" : "=&r" (f1), "=&r" (f2) : "ir" (flag)); return ((f1^f2) & flag) != 0; } /* Probe for the CPUID instruction */ static int __cpuinit have_cpuid_p(void) { return flag_is_changeable_p(X86_EFLAGS_ID); } static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) { unsigned long lo, hi; if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) return; /* Disable processor serial number: */ rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); lo |= 0x200000; wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); printk(KERN_NOTICE "CPU serial number disabled.\n"); clear_cpu_cap(c, X86_FEATURE_PN); /* Disabling the serial number may affect the cpuid level */ c->cpuid_level = cpuid_eax(0); } static int __init x86_serial_nr_setup(char *s) { disable_x86_serial_nr = 0; return 1; } __setup("serialnumber", x86_serial_nr_setup); #else static inline int flag_is_changeable_p(u32 flag) { return 1; } /* Probe for the CPUID instruction */ static inline int have_cpuid_p(void) { return 1; } static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) { } #endif static int disable_smep __cpuinitdata; static __init int setup_disable_smep(char *arg) { disable_smep = 1; return 1; } __setup("nosmep", setup_disable_smep); static __cpuinit void setup_smep(struct cpuinfo_x86 *c) { if (cpu_has(c, X86_FEATURE_SMEP)) { if (unlikely(disable_smep)) { setup_clear_cpu_cap(X86_FEATURE_SMEP); clear_in_cr4(X86_CR4_SMEP); } else set_in_cr4(X86_CR4_SMEP); } } /* * Some CPU features depend on higher CPUID levels, which may not always * be available due to CPUID level capping or broken virtualization * software. Add those features to this table to auto-disable them. */ struct cpuid_dependent_feature { u32 feature; u32 level; }; static const struct cpuid_dependent_feature __cpuinitconst cpuid_dependent_features[] = { { X86_FEATURE_MWAIT, 0x00000005 }, { X86_FEATURE_DCA, 0x00000009 }, { X86_FEATURE_XSAVE, 0x0000000d }, { 0, 0 } }; static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) { const struct cpuid_dependent_feature *df; for (df = cpuid_dependent_features; df->feature; df++) { if (!cpu_has(c, df->feature)) continue; /* * Note: cpuid_level is set to -1 if unavailable, but * extended_extended_level is set to 0 if unavailable * and the legitimate extended levels are all negative * when signed; hence the weird messing around with * signs here... */ if (!((s32)df->level < 0 ? (u32)df->level > (u32)c->extended_cpuid_level : (s32)df->level > (s32)c->cpuid_level)) continue; clear_cpu_cap(c, df->feature); if (!warn) continue; printk(KERN_WARNING "CPU: CPU feature %s disabled, no CPUID level 0x%x\n", x86_cap_flags[df->feature], df->level); } } /* * Naming convention should be: <Name> [(<Codename>)] * This table only is used unless init_<vendor>() below doesn't set it; * in particular, if CPUID levels 0x80000002..4 are supported, this * isn't used */ /* Look up CPU names by table lookup. */ static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) { const struct cpu_model_info *info; if (c->x86_model >= 16) return NULL; /* Range check */ if (!this_cpu) return NULL; info = this_cpu->c_models; while (info && info->family) { if (info->family == c->x86) return info->model_names[c->x86_model]; info++; } return NULL; /* Not found */ } __u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata; __u32 cpu_caps_set[NCAPINTS] __cpuinitdata; void load_percpu_segment(int cpu) { #ifdef CONFIG_X86_32 loadsegment(fs, __KERNEL_PERCPU); #else loadsegment(gs, 0); wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); #endif load_stack_canary_segment(); } /* * Current gdt points %fs at the "master" per-cpu area: after this, * it's on the real one. */ void switch_to_new_gdt(int cpu) { struct desc_ptr gdt_descr; gdt_descr.address = (long)get_cpu_gdt_table(cpu); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); /* Reload the per-cpu base */ load_percpu_segment(cpu); } static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; static void __cpuinit get_model_name(struct cpuinfo_x86 *c) { unsigned int *v; char *p, *q; if (c->extended_cpuid_level < 0x80000004) return; v = (unsigned int *)c->x86_model_id; cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); c->x86_model_id[48] = 0; /* * Intel chips right-justify this string for some dumb reason; * undo that brain damage: */ p = q = &c->x86_model_id[0]; while (*p == ' ') p++; if (p != q) { while (*p) *q++ = *p++; while (q <= &c->x86_model_id[48]) *q++ = '\0'; /* Zero-pad the rest */ } } void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c) { unsigned int n, dummy, ebx, ecx, edx, l2size; n = c->extended_cpuid_level; if (n >= 0x80000005) { cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); c->x86_cache_size = (ecx>>24) + (edx>>24); #ifdef CONFIG_X86_64 /* On K8 L1 TLB is inclusive, so don't count it */ c->x86_tlbsize = 0; #endif } if (n < 0x80000006) /* Some chips just has a large L1. */ return; cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); l2size = ecx >> 16; #ifdef CONFIG_X86_64 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); #else /* do processor-specific cache resizing */ if (this_cpu->c_size_cache) l2size = this_cpu->c_size_cache(c, l2size); /* Allow user to override all this if necessary. */ if (cachesize_override != -1) l2size = cachesize_override; if (l2size == 0) return; /* Again, no L2 cache is possible */ #endif c->x86_cache_size = l2size; } void __cpuinit detect_ht(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_HT u32 eax, ebx, ecx, edx; int index_msb, core_bits; static bool printed; if (!cpu_has(c, X86_FEATURE_HT)) return; if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) goto out; if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) return; cpuid(1, &eax, &ebx, &ecx, &edx); smp_num_siblings = (ebx & 0xff0000) >> 16; if (smp_num_siblings == 1) { printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n"); goto out; } if (smp_num_siblings <= 1) goto out; index_msb = get_count_order(smp_num_siblings); c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); smp_num_siblings = smp_num_siblings / c->x86_max_cores; index_msb = get_count_order(smp_num_siblings); core_bits = get_count_order(c->x86_max_cores); c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & ((1 << core_bits) - 1); out: if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id); printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id); printed = 1; } #endif } static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) { char *v = c->x86_vendor_id; int i; for (i = 0; i < X86_VENDOR_NUM; i++) { if (!cpu_devs[i]) break; if (!strcmp(v, cpu_devs[i]->c_ident[0]) || (cpu_devs[i]->c_ident[1] && !strcmp(v, cpu_devs[i]->c_ident[1]))) { this_cpu = cpu_devs[i]; c->x86_vendor = this_cpu->c_x86_vendor; return; } } printk_once(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n" \ "CPU: Your system may be unstable.\n", v); c->x86_vendor = X86_VENDOR_UNKNOWN; this_cpu = &default_cpu; } void __cpuinit cpu_detect(struct cpuinfo_x86 *c) { /* Get vendor name */ cpuid(0x00000000, (unsigned int *)&c->cpuid_level, (unsigned int *)&c->x86_vendor_id[0], (unsigned int *)&c->x86_vendor_id[8], (unsigned int *)&c->x86_vendor_id[4]); c->x86 = 4; /* Intel-defined flags: level 0x00000001 */ if (c->cpuid_level >= 0x00000001) { u32 junk, tfms, cap0, misc; cpuid(0x00000001, &tfms, &misc, &junk, &cap0); c->x86 = (tfms >> 8) & 0xf; c->x86_model = (tfms >> 4) & 0xf; c->x86_mask = tfms & 0xf; if (c->x86 == 0xf) c->x86 += (tfms >> 20) & 0xff; if (c->x86 >= 0x6) c->x86_model += ((tfms >> 16) & 0xf) << 4; if (cap0 & (1<<19)) { c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; c->x86_cache_alignment = c->x86_clflush_size; } } } void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) { u32 tfms, xlvl; u32 ebx; /* Intel-defined flags: level 0x00000001 */ if (c->cpuid_level >= 0x00000001) { u32 capability, excap; cpuid(0x00000001, &tfms, &ebx, &excap, &capability); c->x86_capability[0] = capability; c->x86_capability[4] = excap; } /* Additional Intel-defined flags: level 0x00000007 */ if (c->cpuid_level >= 0x00000007) { u32 eax, ebx, ecx, edx; cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); c->x86_capability[9] = ebx; } /* AMD-defined flags: level 0x80000001 */ xlvl = cpuid_eax(0x80000000); c->extended_cpuid_level = xlvl; if ((xlvl & 0xffff0000) == 0x80000000) { if (xlvl >= 0x80000001) { c->x86_capability[1] = cpuid_edx(0x80000001); c->x86_capability[6] = cpuid_ecx(0x80000001); } } if (c->extended_cpuid_level >= 0x80000008) { u32 eax = cpuid_eax(0x80000008); c->x86_virt_bits = (eax >> 8) & 0xff; c->x86_phys_bits = eax & 0xff; } #ifdef CONFIG_X86_32 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) c->x86_phys_bits = 36; #endif if (c->extended_cpuid_level >= 0x80000007) c->x86_power = cpuid_edx(0x80000007); init_scattered_cpuid_features(c); } static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_32 int i; /* * First of all, decide if this is a 486 or higher * It's a 486 if we can modify the AC flag */ if (flag_is_changeable_p(X86_EFLAGS_AC)) c->x86 = 4; else c->x86 = 3; for (i = 0; i < X86_VENDOR_NUM; i++) if (cpu_devs[i] && cpu_devs[i]->c_identify) { c->x86_vendor_id[0] = 0; cpu_devs[i]->c_identify(c); if (c->x86_vendor_id[0]) { get_cpu_vendor(c); break; } } #endif } /* * Do minimum CPU detection early. * Fields really needed: vendor, cpuid_level, family, model, mask, * cache alignment. * The others are not touched to avoid unwanted side effects. * * WARNING: this function is only called on the BP. Don't add code here * that is supposed to run on all CPUs. */ static void __init early_identify_cpu(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_64 c->x86_clflush_size = 64; c->x86_phys_bits = 36; c->x86_virt_bits = 48; #else c->x86_clflush_size = 32; c->x86_phys_bits = 32; c->x86_virt_bits = 32; #endif c->x86_cache_alignment = c->x86_clflush_size; memset(&c->x86_capability, 0, sizeof c->x86_capability); c->extended_cpuid_level = 0; if (!have_cpuid_p()) identify_cpu_without_cpuid(c); /* cyrix could have cpuid enabled via c_identify()*/ if (!have_cpuid_p()) return; cpu_detect(c); get_cpu_vendor(c); get_cpu_cap(c); if (this_cpu->c_early_init) this_cpu->c_early_init(c); c->cpu_index = 0; filter_cpuid_features(c, false); setup_smep(c); if (this_cpu->c_bsp_init) this_cpu->c_bsp_init(c); } void __init early_cpu_init(void) { const struct cpu_dev *const *cdev; int count = 0; #ifdef CONFIG_PROCESSOR_SELECT printk(KERN_INFO "KERNEL supported cpus:\n"); #endif for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { const struct cpu_dev *cpudev = *cdev; if (count >= X86_VENDOR_NUM) break; cpu_devs[count] = cpudev; count++; #ifdef CONFIG_PROCESSOR_SELECT { unsigned int j; for (j = 0; j < 2; j++) { if (!cpudev->c_ident[j]) continue; printk(KERN_INFO " %s %s\n", cpudev->c_vendor, cpudev->c_ident[j]); } } #endif } early_identify_cpu(&boot_cpu_data); } /* * The NOPL instruction is supposed to exist on all CPUs of family >= 6; * unfortunately, that's not true in practice because of early VIA * chips and (more importantly) broken virtualizers that are not easy * to detect. In the latter case it doesn't even *fail* reliably, so * probing for it doesn't even work. Disable it completely on 32-bit * unless we can find a reliable way to detect all the broken cases. * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). */ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_32 clear_cpu_cap(c, X86_FEATURE_NOPL); #else set_cpu_cap(c, X86_FEATURE_NOPL); #endif } static void __cpuinit generic_identify(struct cpuinfo_x86 *c) { c->extended_cpuid_level = 0; if (!have_cpuid_p()) identify_cpu_without_cpuid(c); /* cyrix could have cpuid enabled via c_identify()*/ if (!have_cpuid_p()) return; cpu_detect(c); get_cpu_vendor(c); get_cpu_cap(c); if (c->cpuid_level >= 0x00000001) { c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; #ifdef CONFIG_X86_32 # ifdef CONFIG_X86_HT c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); # else c->apicid = c->initial_apicid; # endif #endif c->phys_proc_id = c->initial_apicid; } setup_smep(c); get_model_name(c); /* Default name */ detect_nopl(c); } /* * This does the hard work of actually picking apart the CPU stuff... */ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) { int i; c->loops_per_jiffy = loops_per_jiffy; c->x86_cache_size = -1; c->x86_vendor = X86_VENDOR_UNKNOWN; c->x86_model = c->x86_mask = 0; /* So far unknown... */ c->x86_vendor_id[0] = '\0'; /* Unset */ c->x86_model_id[0] = '\0'; /* Unset */ c->x86_max_cores = 1; c->x86_coreid_bits = 0; #ifdef CONFIG_X86_64 c->x86_clflush_size = 64; c->x86_phys_bits = 36; c->x86_virt_bits = 48; #else c->cpuid_level = -1; /* CPUID not detected */ c->x86_clflush_size = 32; c->x86_phys_bits = 32; c->x86_virt_bits = 32; #endif c->x86_cache_alignment = c->x86_clflush_size; memset(&c->x86_capability, 0, sizeof c->x86_capability); generic_identify(c); if (this_cpu->c_identify) this_cpu->c_identify(c); /* Clear/Set all flags overriden by options, after probe */ for (i = 0; i < NCAPINTS; i++) { c->x86_capability[i] &= ~cpu_caps_cleared[i]; c->x86_capability[i] |= cpu_caps_set[i]; } #ifdef CONFIG_X86_64 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); #endif /* * Vendor-specific initialization. In this section we * canonicalize the feature flags, meaning if there are * features a certain CPU supports which CPUID doesn't * tell us, CPUID claiming incorrect flags, or other bugs, * we handle them here. * * At the end of this section, c->x86_capability better * indicate the features this CPU genuinely supports! */ if (this_cpu->c_init) this_cpu->c_init(c); /* Disable the PN if appropriate */ squash_the_stupid_serial_number(c); /* * The vendor-specific functions might have changed features. * Now we do "generic changes." */ /* Filter out anything that depends on CPUID levels we don't have */ filter_cpuid_features(c, true); /* If the model name is still unset, do table lookup. */ if (!c->x86_model_id[0]) { const char *p; p = table_lookup_model(c); if (p) strcpy(c->x86_model_id, p); else /* Last resort... */ sprintf(c->x86_model_id, "%02x/%02x", c->x86, c->x86_model); } #ifdef CONFIG_X86_64 detect_ht(c); #endif init_hypervisor(c); x86_init_rdrand(c); /* * Clear/Set all flags overriden by options, need do it * before following smp all cpus cap AND. */ for (i = 0; i < NCAPINTS; i++) { c->x86_capability[i] &= ~cpu_caps_cleared[i]; c->x86_capability[i] |= cpu_caps_set[i]; } /* * On SMP, boot_cpu_data holds the common feature set between * all CPUs; so make sure that we indicate which features are * common between the CPUs. The first time this routine gets * executed, c == &boot_cpu_data. */ if (c != &boot_cpu_data) { /* AND the already accumulated flags with these */ for (i = 0; i < NCAPINTS; i++) boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; } /* Init Machine Check Exception if available. */ mcheck_cpu_init(c); select_idle_routine(c); #ifdef CONFIG_NUMA numa_add_cpu(smp_processor_id()); #endif } #ifdef CONFIG_X86_64 static void vgetcpu_set_mode(void) { if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP)) vgetcpu_mode = VGETCPU_RDTSCP; else vgetcpu_mode = VGETCPU_LSL; } #endif void __init identify_boot_cpu(void) { identify_cpu(&boot_cpu_data); init_amd_e400_c1e_mask(); #ifdef CONFIG_X86_32 sysenter_setup(); enable_sep_cpu(); #else vgetcpu_set_mode(); #endif } void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) { BUG_ON(c == &boot_cpu_data); identify_cpu(c); #ifdef CONFIG_X86_32 enable_sep_cpu(); #endif mtrr_ap_init(); } struct msr_range { unsigned min; unsigned max; }; static const struct msr_range msr_range_array[] __cpuinitconst = { { 0x00000000, 0x00000418}, { 0xc0000000, 0xc000040b}, { 0xc0010000, 0xc0010142}, { 0xc0011000, 0xc001103b}, }; static void __cpuinit __print_cpu_msr(void) { unsigned index_min, index_max; unsigned index; u64 val; int i; for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { index_min = msr_range_array[i].min; index_max = msr_range_array[i].max; for (index = index_min; index < index_max; index++) { if (rdmsrl_amd_safe(index, &val)) continue; printk(KERN_INFO " MSR%08x: %016llx\n", index, val); } } } static int show_msr __cpuinitdata; static __init int setup_show_msr(char *arg) { int num; get_option(&arg, &num); if (num > 0) show_msr = num; return 1; } __setup("show_msr=", setup_show_msr); static __init int setup_noclflush(char *arg) { setup_clear_cpu_cap(X86_FEATURE_CLFLSH); return 1; } __setup("noclflush", setup_noclflush); void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) { const char *vendor = NULL; if (c->x86_vendor < X86_VENDOR_NUM) { vendor = this_cpu->c_vendor; } else { if (c->cpuid_level >= 0) vendor = c->x86_vendor_id; } if (vendor && !strstr(c->x86_model_id, vendor)) printk(KERN_CONT "%s ", vendor); if (c->x86_model_id[0]) printk(KERN_CONT "%s", c->x86_model_id); else printk(KERN_CONT "%d86", c->x86); if (c->x86_mask || c->cpuid_level >= 0) printk(KERN_CONT " stepping %02x\n", c->x86_mask); else printk(KERN_CONT "\n"); print_cpu_msr(c); } void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c) { if (c->cpu_index < show_msr) __print_cpu_msr(); } static __init int setup_disablecpuid(char *arg) { int bit; if (get_option(&arg, &bit) && bit < NCAPINTS*32) setup_clear_cpu_cap(bit); else return 0; return 1; } __setup("clearcpuid=", setup_disablecpuid); #ifdef CONFIG_X86_64 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table }; DEFINE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __aligned(PAGE_SIZE); /* * The following four percpu variables are hot. Align current_task to * cacheline size such that all four fall in the same cacheline. */ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = &init_task; EXPORT_PER_CPU_SYMBOL(current_task); DEFINE_PER_CPU(unsigned long, kernel_stack) = (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; EXPORT_PER_CPU_SYMBOL(kernel_stack); DEFINE_PER_CPU(char *, irq_stack_ptr) = init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; DEFINE_PER_CPU(unsigned int, irq_count) = -1; DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); /* * Special IST stacks which the CPU switches to when it calls * an IST-marked descriptor entry. Up to 7 stacks (hardware * limit), all of them are 4K, except the debug stack which * is 8K. */ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, [DEBUG_STACK - 1] = DEBUG_STKSZ }; static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); /* May not be marked __init: used by software suspend */ void syscall_init(void) { /* * LSTAR and STAR live in a bit strange symbiosis. * They both write to the same internal register. STAR allows to * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. */ wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); wrmsrl(MSR_LSTAR, system_call); wrmsrl(MSR_CSTAR, ignore_sysret); #ifdef CONFIG_IA32_EMULATION syscall32_cpu_init(); #endif /* Flags to clear on syscall */ wrmsrl(MSR_SYSCALL_MASK, X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL); } unsigned long kernel_eflags; /* * Copies of the original ist values from the tss are only accessed during * debugging, no special alignment required. */ DEFINE_PER_CPU(struct orig_ist, orig_ist); static DEFINE_PER_CPU(unsigned long, debug_stack_addr); DEFINE_PER_CPU(int, debug_stack_usage); int is_debug_stack(unsigned long addr) { return __get_cpu_var(debug_stack_usage) || (addr <= __get_cpu_var(debug_stack_addr) && addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); } void debug_stack_set_zero(void) { load_idt((const struct desc_ptr *)&nmi_idt_descr); } void debug_stack_reset(void) { load_idt((const struct desc_ptr *)&idt_descr); } #else /* CONFIG_X86_64 */ DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; EXPORT_PER_CPU_SYMBOL(current_task); DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); #ifdef CONFIG_CC_STACKPROTECTOR DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); #endif /* Make sure %fs and %gs are initialized properly in idle threads */ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) { memset(regs, 0, sizeof(struct pt_regs)); regs->fs = __KERNEL_PERCPU; regs->gs = __KERNEL_STACK_CANARY; return regs; } #endif /* CONFIG_X86_64 */ /* * Clear all 6 debug registers: */ static void clear_all_debug_regs(void) { int i; for (i = 0; i < 8; i++) { /* Ignore db4, db5 */ if ((i == 4) || (i == 5)) continue; set_debugreg(0, i); } } #ifdef CONFIG_KGDB /* * Restore debug regs if using kgdbwait and you have a kernel debugger * connection established. */ static void dbg_restore_debug_regs(void) { if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) arch_kgdb_ops.correct_hw_break(); } #else /* ! CONFIG_KGDB */ #define dbg_restore_debug_regs() #endif /* ! CONFIG_KGDB */ /* * cpu_init() initializes state that is per-CPU. Some data is already * initialized (naturally) in the bootstrap process, such as the GDT * and IDT. We reload them nevertheless, this function acts as a * 'CPU state barrier', nothing should get across. * A lot of state is already set up in PDA init for 64 bit */ #ifdef CONFIG_X86_64 void __cpuinit cpu_init(void) { struct orig_ist *oist; struct task_struct *me; struct tss_struct *t; unsigned long v; int cpu; int i; cpu = stack_smp_processor_id(); t = &per_cpu(init_tss, cpu); oist = &per_cpu(orig_ist, cpu); #ifdef CONFIG_NUMA if (cpu != 0 && percpu_read(numa_node) == 0 && early_cpu_to_node(cpu) != NUMA_NO_NODE) set_numa_node(early_cpu_to_node(cpu)); #endif me = current; if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) panic("CPU#%d already initialized!\n", cpu); pr_debug("Initializing CPU#%d\n", cpu); clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); /* * Initialize the per-CPU GDT with the boot GDT, * and set up the GDT descriptor: */ switch_to_new_gdt(cpu); loadsegment(fs, 0); load_idt((const struct desc_ptr *)&idt_descr); memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); syscall_init(); wrmsrl(MSR_FS_BASE, 0); wrmsrl(MSR_KERNEL_GS_BASE, 0); barrier(); x86_configure_nx(); if (cpu != 0) enable_x2apic(); /* * set up and load the per-CPU TSS */ if (!oist->ist[0]) { char *estacks = per_cpu(exception_stacks, cpu); for (v = 0; v < N_EXCEPTION_STACKS; v++) { estacks += exception_stack_sizes[v]; oist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks; if (v == DEBUG_STACK-1) per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks; } } t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); /* * <= is required because the CPU will access up to * 8 bits beyond the end of the IO permission bitmap. */ for (i = 0; i <= IO_BITMAP_LONGS; i++) t->io_bitmap[i] = ~0UL; atomic_inc(&init_mm.mm_count); me->active_mm = &init_mm; BUG_ON(me->mm); enter_lazy_tlb(&init_mm, me); load_sp0(t, &current->thread); set_tss_desc(cpu, t); load_TR_desc(); load_LDT(&init_mm.context); clear_all_debug_regs(); dbg_restore_debug_regs(); fpu_init(); xsave_init(); raw_local_save_flags(kernel_eflags); if (is_uv_system()) uv_cpu_init(); } #else void __cpuinit cpu_init(void) { int cpu = smp_processor_id(); struct task_struct *curr = current; struct tss_struct *t = &per_cpu(init_tss, cpu); struct thread_struct *thread = &curr->thread; if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); for (;;) local_irq_enable(); } printk(KERN_INFO "Initializing CPU#%d\n", cpu); if (cpu_has_vme || cpu_has_tsc || cpu_has_de) clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); load_idt(&idt_descr); switch_to_new_gdt(cpu); /* * Set up and load the per-CPU TSS and LDT */ atomic_inc(&init_mm.mm_count); curr->active_mm = &init_mm; BUG_ON(curr->mm); enter_lazy_tlb(&init_mm, curr); load_sp0(t, thread); set_tss_desc(cpu, t); load_TR_desc(); load_LDT(&init_mm.context); t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); #ifdef CONFIG_DOUBLEFAULT /* Set up doublefault TSS pointer in the GDT */ __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); #endif clear_all_debug_regs(); dbg_restore_debug_regs(); fpu_init(); xsave_init(); } #endif
gpl-2.0
profglavcho/test
drivers/macintosh/windfarm_pm112.c
4261
18443
/* * Windfarm PowerMac thermal control. * Control loops for machines with SMU and PPC970MP processors. * * Copyright (C) 2005 Paul Mackerras, IBM Corp. <paulus@samba.org> * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp. * * Use and redistribute under the terms of the GNU GPL v2. */ #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/reboot.h> #include <asm/prom.h> #include <asm/smu.h> #include "windfarm.h" #include "windfarm_pid.h" #define VERSION "0.2" #define DEBUG #undef LOTSA_DEBUG #ifdef DEBUG #define DBG(args...) printk(args) #else #define DBG(args...) do { } while(0) #endif #ifdef LOTSA_DEBUG #define DBG_LOTS(args...) printk(args) #else #define DBG_LOTS(args...) do { } while(0) #endif /* define this to force CPU overtemp to 60 degree, useful for testing * the overtemp code */ #undef HACKED_OVERTEMP /* We currently only handle 2 chips, 4 cores... */ #define NR_CHIPS 2 #define NR_CORES 4 #define NR_CPU_FANS 3 * NR_CHIPS /* Controls and sensors */ static struct wf_sensor *sens_cpu_temp[NR_CORES]; static struct wf_sensor *sens_cpu_power[NR_CORES]; static struct wf_sensor *hd_temp; static struct wf_sensor *slots_power; static struct wf_sensor *u4_temp; static struct wf_control *cpu_fans[NR_CPU_FANS]; static char *cpu_fan_names[NR_CPU_FANS] = { "cpu-rear-fan-0", "cpu-rear-fan-1", "cpu-front-fan-0", "cpu-front-fan-1", "cpu-pump-0", "cpu-pump-1", }; static struct wf_control *cpufreq_clamp; /* Second pump isn't required (and isn't actually present) */ #define CPU_FANS_REQD (NR_CPU_FANS - 2) #define FIRST_PUMP 4 #define LAST_PUMP 5 /* We keep a temperature history for average calculation of 180s */ #define CPU_TEMP_HIST_SIZE 180 /* Scale factor for fan speed, *100 */ static int cpu_fan_scale[NR_CPU_FANS] = { 100, 100, 97, /* inlet fans run at 97% of exhaust fan */ 97, 100, /* updated later */ 100, /* updated later */ }; static struct wf_control *backside_fan; static struct wf_control *slots_fan; static struct wf_control *drive_bay_fan; /* PID loop state */ static struct wf_cpu_pid_state cpu_pid[NR_CORES]; static u32 cpu_thist[CPU_TEMP_HIST_SIZE]; static int cpu_thist_pt; static s64 cpu_thist_total; static s32 cpu_all_tmax = 100 << 16; static int cpu_last_target; static struct wf_pid_state backside_pid; static int backside_tick; static struct wf_pid_state slots_pid; static int slots_started; static struct wf_pid_state drive_bay_pid; static int drive_bay_tick; static int nr_cores; static int have_all_controls; static int have_all_sensors; static int started; static int failure_state; #define FAILURE_SENSOR 1 #define FAILURE_FAN 2 #define FAILURE_PERM 4 #define FAILURE_LOW_OVERTEMP 8 #define FAILURE_HIGH_OVERTEMP 16 /* Overtemp values */ #define LOW_OVER_AVERAGE 0 #define LOW_OVER_IMMEDIATE (10 << 16) #define LOW_OVER_CLEAR ((-10) << 16) #define HIGH_OVER_IMMEDIATE (14 << 16) #define HIGH_OVER_AVERAGE (10 << 16) #define HIGH_OVER_IMMEDIATE (14 << 16) /* Implementation... */ static int create_cpu_loop(int cpu) { int chip = cpu / 2; int core = cpu & 1; struct smu_sdbp_header *hdr; struct smu_sdbp_cpupiddata *piddata; struct wf_cpu_pid_param pid; struct wf_control *main_fan = cpu_fans[0]; s32 tmax; int fmin; /* Get PID params from the appropriate SAT */ hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL); if (hdr == NULL) { printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n"); return -EINVAL; } piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; /* Get FVT params to get Tmax; if not found, assume default */ hdr = smu_sat_get_sdb_partition(chip, 0xC4 + core, NULL); if (hdr) { struct smu_sdbp_fvt *fvt = (struct smu_sdbp_fvt *)&hdr[1]; tmax = fvt->maxtemp << 16; } else tmax = 95 << 16; /* default to 95 degrees C */ /* We keep a global tmax for overtemp calculations */ if (tmax < cpu_all_tmax) cpu_all_tmax = tmax; /* * Darwin has a minimum fan speed of 1000 rpm for the 4-way and * 515 for the 2-way. That appears to be overkill, so for now, * impose a minimum of 750 or 515. */ fmin = (nr_cores > 2) ? 750 : 515; /* Initialize PID loop */ pid.interval = 1; /* seconds */ pid.history_len = piddata->history_len; pid.gd = piddata->gd; pid.gp = piddata->gp; pid.gr = piddata->gr / piddata->history_len; pid.pmaxadj = (piddata->max_power << 16) - (piddata->power_adj << 8); pid.ttarget = tmax - (piddata->target_temp_delta << 16); pid.tmax = tmax; pid.min = main_fan->ops->get_min(main_fan); pid.max = main_fan->ops->get_max(main_fan); if (pid.min < fmin) pid.min = fmin; wf_cpu_pid_init(&cpu_pid[cpu], &pid); return 0; } static void cpu_max_all_fans(void) { int i; /* We max all CPU fans in case of a sensor error. We also do the * cpufreq clamping now, even if it's supposedly done later by the * generic code anyway, we do it earlier here to react faster */ if (cpufreq_clamp) wf_control_set_max(cpufreq_clamp); for (i = 0; i < NR_CPU_FANS; ++i) if (cpu_fans[i]) wf_control_set_max(cpu_fans[i]); } static int cpu_check_overtemp(s32 temp) { int new_state = 0; s32 t_avg, t_old; /* First check for immediate overtemps */ if (temp >= (cpu_all_tmax + LOW_OVER_IMMEDIATE)) { new_state |= FAILURE_LOW_OVERTEMP; if ((failure_state & FAILURE_LOW_OVERTEMP) == 0) printk(KERN_ERR "windfarm: Overtemp due to immediate CPU" " temperature !\n"); } if (temp >= (cpu_all_tmax + HIGH_OVER_IMMEDIATE)) { new_state |= FAILURE_HIGH_OVERTEMP; if ((failure_state & FAILURE_HIGH_OVERTEMP) == 0) printk(KERN_ERR "windfarm: Critical overtemp due to" " immediate CPU temperature !\n"); } /* We calculate a history of max temperatures and use that for the * overtemp management */ t_old = cpu_thist[cpu_thist_pt]; cpu_thist[cpu_thist_pt] = temp; cpu_thist_pt = (cpu_thist_pt + 1) % CPU_TEMP_HIST_SIZE; cpu_thist_total -= t_old; cpu_thist_total += temp; t_avg = cpu_thist_total / CPU_TEMP_HIST_SIZE; DBG_LOTS("t_avg = %d.%03d (out: %d.%03d, in: %d.%03d)\n", FIX32TOPRINT(t_avg), FIX32TOPRINT(t_old), FIX32TOPRINT(temp)); /* Now check for average overtemps */ if (t_avg >= (cpu_all_tmax + LOW_OVER_AVERAGE)) { new_state |= FAILURE_LOW_OVERTEMP; if ((failure_state & FAILURE_LOW_OVERTEMP) == 0) printk(KERN_ERR "windfarm: Overtemp due to average CPU" " temperature !\n"); } if (t_avg >= (cpu_all_tmax + HIGH_OVER_AVERAGE)) { new_state |= FAILURE_HIGH_OVERTEMP; if ((failure_state & FAILURE_HIGH_OVERTEMP) == 0) printk(KERN_ERR "windfarm: Critical overtemp due to" " average CPU temperature !\n"); } /* Now handle overtemp conditions. We don't currently use the windfarm * overtemp handling core as it's not fully suited to the needs of those * new machine. This will be fixed later. */ if (new_state) { /* High overtemp -> immediate shutdown */ if (new_state & FAILURE_HIGH_OVERTEMP) machine_power_off(); if ((failure_state & new_state) != new_state) cpu_max_all_fans(); failure_state |= new_state; } else if ((failure_state & FAILURE_LOW_OVERTEMP) && (temp < (cpu_all_tmax + LOW_OVER_CLEAR))) { printk(KERN_ERR "windfarm: Overtemp condition cleared !\n"); failure_state &= ~FAILURE_LOW_OVERTEMP; } return failure_state & (FAILURE_LOW_OVERTEMP | FAILURE_HIGH_OVERTEMP); } static void cpu_fans_tick(void) { int err, cpu; s32 greatest_delta = 0; s32 temp, power, t_max = 0; int i, t, target = 0; struct wf_sensor *sr; struct wf_control *ct; struct wf_cpu_pid_state *sp; DBG_LOTS(KERN_DEBUG); for (cpu = 0; cpu < nr_cores; ++cpu) { /* Get CPU core temperature */ sr = sens_cpu_temp[cpu]; err = sr->ops->get_value(sr, &temp); if (err) { DBG("\n"); printk(KERN_WARNING "windfarm: CPU %d temperature " "sensor error %d\n", cpu, err); failure_state |= FAILURE_SENSOR; cpu_max_all_fans(); return; } /* Keep track of highest temp */ t_max = max(t_max, temp); /* Get CPU power */ sr = sens_cpu_power[cpu]; err = sr->ops->get_value(sr, &power); if (err) { DBG("\n"); printk(KERN_WARNING "windfarm: CPU %d power " "sensor error %d\n", cpu, err); failure_state |= FAILURE_SENSOR; cpu_max_all_fans(); return; } /* Run PID */ sp = &cpu_pid[cpu]; t = wf_cpu_pid_run(sp, power, temp); if (cpu == 0 || sp->last_delta > greatest_delta) { greatest_delta = sp->last_delta; target = t; } DBG_LOTS("[%d] P=%d.%.3d T=%d.%.3d ", cpu, FIX32TOPRINT(power), FIX32TOPRINT(temp)); } DBG_LOTS("fans = %d, t_max = %d.%03d\n", target, FIX32TOPRINT(t_max)); /* Darwin limits decrease to 20 per iteration */ if (target < (cpu_last_target - 20)) target = cpu_last_target - 20; cpu_last_target = target; for (cpu = 0; cpu < nr_cores; ++cpu) cpu_pid[cpu].target = target; /* Handle possible overtemps */ if (cpu_check_overtemp(t_max)) return; /* Set fans */ for (i = 0; i < NR_CPU_FANS; ++i) { ct = cpu_fans[i]; if (ct == NULL) continue; err = ct->ops->set_value(ct, target * cpu_fan_scale[i] / 100); if (err) { printk(KERN_WARNING "windfarm: fan %s reports " "error %d\n", ct->name, err); failure_state |= FAILURE_FAN; break; } } } /* Backside/U4 fan */ static struct wf_pid_param backside_param = { .interval = 5, .history_len = 2, .gd = 48 << 20, .gp = 5 << 20, .gr = 0, .itarget = 64 << 16, .additive = 1, }; static void backside_fan_tick(void) { s32 temp; int speed; int err; if (!backside_fan || !u4_temp) return; if (!backside_tick) { /* first time; initialize things */ printk(KERN_INFO "windfarm: Backside control loop started.\n"); backside_param.min = backside_fan->ops->get_min(backside_fan); backside_param.max = backside_fan->ops->get_max(backside_fan); wf_pid_init(&backside_pid, &backside_param); backside_tick = 1; } if (--backside_tick > 0) return; backside_tick = backside_pid.param.interval; err = u4_temp->ops->get_value(u4_temp, &temp); if (err) { printk(KERN_WARNING "windfarm: U4 temp sensor error %d\n", err); failure_state |= FAILURE_SENSOR; wf_control_set_max(backside_fan); return; } speed = wf_pid_run(&backside_pid, temp); DBG_LOTS("backside PID temp=%d.%.3d speed=%d\n", FIX32TOPRINT(temp), speed); err = backside_fan->ops->set_value(backside_fan, speed); if (err) { printk(KERN_WARNING "windfarm: backside fan error %d\n", err); failure_state |= FAILURE_FAN; } } /* Drive bay fan */ static struct wf_pid_param drive_bay_prm = { .interval = 5, .history_len = 2, .gd = 30 << 20, .gp = 5 << 20, .gr = 0, .itarget = 40 << 16, .additive = 1, }; static void drive_bay_fan_tick(void) { s32 temp; int speed; int err; if (!drive_bay_fan || !hd_temp) return; if (!drive_bay_tick) { /* first time; initialize things */ printk(KERN_INFO "windfarm: Drive bay control loop started.\n"); drive_bay_prm.min = drive_bay_fan->ops->get_min(drive_bay_fan); drive_bay_prm.max = drive_bay_fan->ops->get_max(drive_bay_fan); wf_pid_init(&drive_bay_pid, &drive_bay_prm); drive_bay_tick = 1; } if (--drive_bay_tick > 0) return; drive_bay_tick = drive_bay_pid.param.interval; err = hd_temp->ops->get_value(hd_temp, &temp); if (err) { printk(KERN_WARNING "windfarm: drive bay temp sensor " "error %d\n", err); failure_state |= FAILURE_SENSOR; wf_control_set_max(drive_bay_fan); return; } speed = wf_pid_run(&drive_bay_pid, temp); DBG_LOTS("drive_bay PID temp=%d.%.3d speed=%d\n", FIX32TOPRINT(temp), speed); err = drive_bay_fan->ops->set_value(drive_bay_fan, speed); if (err) { printk(KERN_WARNING "windfarm: drive bay fan error %d\n", err); failure_state |= FAILURE_FAN; } } /* PCI slots area fan */ /* This makes the fan speed proportional to the power consumed */ static struct wf_pid_param slots_param = { .interval = 1, .history_len = 2, .gd = 0, .gp = 0, .gr = 0x1277952, .itarget = 0, .min = 1560, .max = 3510, }; static void slots_fan_tick(void) { s32 power; int speed; int err; if (!slots_fan || !slots_power) return; if (!slots_started) { /* first time; initialize things */ printk(KERN_INFO "windfarm: Slots control loop started.\n"); wf_pid_init(&slots_pid, &slots_param); slots_started = 1; } err = slots_power->ops->get_value(slots_power, &power); if (err) { printk(KERN_WARNING "windfarm: slots power sensor error %d\n", err); failure_state |= FAILURE_SENSOR; wf_control_set_max(slots_fan); return; } speed = wf_pid_run(&slots_pid, power); DBG_LOTS("slots PID power=%d.%.3d speed=%d\n", FIX32TOPRINT(power), speed); err = slots_fan->ops->set_value(slots_fan, speed); if (err) { printk(KERN_WARNING "windfarm: slots fan error %d\n", err); failure_state |= FAILURE_FAN; } } static void set_fail_state(void) { int i; if (cpufreq_clamp) wf_control_set_max(cpufreq_clamp); for (i = 0; i < NR_CPU_FANS; ++i) if (cpu_fans[i]) wf_control_set_max(cpu_fans[i]); if (backside_fan) wf_control_set_max(backside_fan); if (slots_fan) wf_control_set_max(slots_fan); if (drive_bay_fan) wf_control_set_max(drive_bay_fan); } static void pm112_tick(void) { int i, last_failure; if (!started) { started = 1; printk(KERN_INFO "windfarm: CPUs control loops started.\n"); for (i = 0; i < nr_cores; ++i) { if (create_cpu_loop(i) < 0) { failure_state = FAILURE_PERM; set_fail_state(); break; } } DBG_LOTS("cpu_all_tmax=%d.%03d\n", FIX32TOPRINT(cpu_all_tmax)); #ifdef HACKED_OVERTEMP cpu_all_tmax = 60 << 16; #endif } /* Permanent failure, bail out */ if (failure_state & FAILURE_PERM) return; /* Clear all failure bits except low overtemp which will be eventually * cleared by the control loop itself */ last_failure = failure_state; failure_state &= FAILURE_LOW_OVERTEMP; cpu_fans_tick(); backside_fan_tick(); slots_fan_tick(); drive_bay_fan_tick(); DBG_LOTS("last_failure: 0x%x, failure_state: %x\n", last_failure, failure_state); /* Check for failures. Any failure causes cpufreq clamping */ if (failure_state && last_failure == 0 && cpufreq_clamp) wf_control_set_max(cpufreq_clamp); if (failure_state == 0 && last_failure && cpufreq_clamp) wf_control_set_min(cpufreq_clamp); /* That's it for now, we might want to deal with other failures * differently in the future though */ } static void pm112_new_control(struct wf_control *ct) { int i, max_exhaust; if (cpufreq_clamp == NULL && !strcmp(ct->name, "cpufreq-clamp")) { if (wf_get_control(ct) == 0) cpufreq_clamp = ct; } for (i = 0; i < NR_CPU_FANS; ++i) { if (!strcmp(ct->name, cpu_fan_names[i])) { if (cpu_fans[i] == NULL && wf_get_control(ct) == 0) cpu_fans[i] = ct; break; } } if (i >= NR_CPU_FANS) { /* not a CPU fan, try the others */ if (!strcmp(ct->name, "backside-fan")) { if (backside_fan == NULL && wf_get_control(ct) == 0) backside_fan = ct; } else if (!strcmp(ct->name, "slots-fan")) { if (slots_fan == NULL && wf_get_control(ct) == 0) slots_fan = ct; } else if (!strcmp(ct->name, "drive-bay-fan")) { if (drive_bay_fan == NULL && wf_get_control(ct) == 0) drive_bay_fan = ct; } return; } for (i = 0; i < CPU_FANS_REQD; ++i) if (cpu_fans[i] == NULL) return; /* work out pump scaling factors */ max_exhaust = cpu_fans[0]->ops->get_max(cpu_fans[0]); for (i = FIRST_PUMP; i <= LAST_PUMP; ++i) if ((ct = cpu_fans[i]) != NULL) cpu_fan_scale[i] = ct->ops->get_max(ct) * 100 / max_exhaust; have_all_controls = 1; } static void pm112_new_sensor(struct wf_sensor *sr) { unsigned int i; if (!strncmp(sr->name, "cpu-temp-", 9)) { i = sr->name[9] - '0'; if (sr->name[10] == 0 && i < NR_CORES && sens_cpu_temp[i] == NULL && wf_get_sensor(sr) == 0) sens_cpu_temp[i] = sr; } else if (!strncmp(sr->name, "cpu-power-", 10)) { i = sr->name[10] - '0'; if (sr->name[11] == 0 && i < NR_CORES && sens_cpu_power[i] == NULL && wf_get_sensor(sr) == 0) sens_cpu_power[i] = sr; } else if (!strcmp(sr->name, "hd-temp")) { if (hd_temp == NULL && wf_get_sensor(sr) == 0) hd_temp = sr; } else if (!strcmp(sr->name, "slots-power")) { if (slots_power == NULL && wf_get_sensor(sr) == 0) slots_power = sr; } else if (!strcmp(sr->name, "backside-temp")) { if (u4_temp == NULL && wf_get_sensor(sr) == 0) u4_temp = sr; } else return; /* check if we have all the sensors we need */ for (i = 0; i < nr_cores; ++i) if (sens_cpu_temp[i] == NULL || sens_cpu_power[i] == NULL) return; have_all_sensors = 1; } static int pm112_wf_notify(struct notifier_block *self, unsigned long event, void *data) { switch (event) { case WF_EVENT_NEW_SENSOR: pm112_new_sensor(data); break; case WF_EVENT_NEW_CONTROL: pm112_new_control(data); break; case WF_EVENT_TICK: if (have_all_controls && have_all_sensors) pm112_tick(); } return 0; } static struct notifier_block pm112_events = { .notifier_call = pm112_wf_notify, }; static int wf_pm112_probe(struct platform_device *dev) { wf_register_client(&pm112_events); return 0; } static int wf_pm112_remove(struct platform_device *dev) { wf_unregister_client(&pm112_events); /* should release all sensors and controls */ return 0; } static struct platform_driver wf_pm112_driver = { .probe = wf_pm112_probe, .remove = wf_pm112_remove, .driver = { .name = "windfarm", .owner = THIS_MODULE, }, }; static int __init wf_pm112_init(void) { struct device_node *cpu; if (!of_machine_is_compatible("PowerMac11,2")) return -ENODEV; /* Count the number of CPU cores */ nr_cores = 0; for_each_node_by_type(cpu, "cpu") ++nr_cores; printk(KERN_INFO "windfarm: initializing for dual-core desktop G5\n"); #ifdef MODULE request_module("windfarm_smu_controls"); request_module("windfarm_smu_sensors"); request_module("windfarm_smu_sat"); request_module("windfarm_lm75_sensor"); request_module("windfarm_max6690_sensor"); request_module("windfarm_cpufreq_clamp"); #endif /* MODULE */ platform_driver_register(&wf_pm112_driver); return 0; } static void __exit wf_pm112_exit(void) { platform_driver_unregister(&wf_pm112_driver); } module_init(wf_pm112_init); module_exit(wf_pm112_exit); MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>"); MODULE_DESCRIPTION("Thermal control for PowerMac11,2"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:windfarm");
gpl-2.0
jthatch12/SKJT
arch/arm/mach-s3c64xx/setup-ide.c
8101
1223
/* linux/arch/arm/mach-s3c64xx/setup-ide.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * S3C64XX setup information for IDE * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/gpio.h> #include <linux/io.h> #include <mach/map.h> #include <mach/regs-clock.h> #include <plat/gpio-cfg.h> #include <plat/ata.h> void s3c64xx_ide_setup_gpio(void) { u32 reg; reg = readl(S3C_MEM_SYS_CFG) & (~0x3f); /* Independent CF interface, CF chip select configuration */ writel(reg | MEM_SYS_CFG_INDEP_CF | MEM_SYS_CFG_EBI_FIX_PRI_CFCON, S3C_MEM_SYS_CFG); s3c_gpio_cfgpin(S3C64XX_GPB(4), S3C_GPIO_SFN(4)); /* Set XhiDATA[15:0] pins as CF Data[15:0] */ s3c_gpio_cfgpin_range(S3C64XX_GPK(0), 16, S3C_GPIO_SFN(5)); /* Set XhiADDR[2:0] pins as CF ADDR[2:0] */ s3c_gpio_cfgpin_range(S3C64XX_GPL(0), 3, S3C_GPIO_SFN(6)); /* Set Xhi ctrl pins as CF ctrl pins(IORDY, IOWR, IORD, CE[0:1]) */ s3c_gpio_cfgpin(S3C64XX_GPM(5), S3C_GPIO_SFN(1)); s3c_gpio_cfgpin_range(S3C64XX_GPM(0), 5, S3C_GPIO_SFN(6)); }
gpl-2.0
billchen1977/kernel_sony_msm8x60
drivers/input/joystick/magellan.c
9893
5973
/* * Copyright (c) 1999-2001 Vojtech Pavlik */ /* * Magellan and Space Mouse 6dof controller driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/init.h> #define DRIVER_DESC "Magellan and SpaceMouse 6dof controller driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* * Definitions & global arrays. */ #define MAGELLAN_MAX_LENGTH 32 static int magellan_buttons[] = { BTN_0, BTN_1, BTN_2, BTN_3, BTN_4, BTN_5, BTN_6, BTN_7, BTN_8 }; static int magellan_axes[] = { ABS_X, ABS_Y, ABS_Z, ABS_RX, ABS_RY, ABS_RZ }; /* * Per-Magellan data. */ struct magellan { struct input_dev *dev; int idx; unsigned char data[MAGELLAN_MAX_LENGTH]; char phys[32]; }; /* * magellan_crunch_nibbles() verifies that the bytes sent from the Magellan * have correct upper nibbles for the lower ones, if not, the packet will * be thrown away. It also strips these upper halves to simplify further * processing. */ static int magellan_crunch_nibbles(unsigned char *data, int count) { static unsigned char nibbles[16] = "0AB3D56GH9:K<MN?"; do { if (data[count] == nibbles[data[count] & 0xf]) data[count] = data[count] & 0xf; else return -1; } while (--count); return 0; } static void magellan_process_packet(struct magellan* magellan) { struct input_dev *dev = magellan->dev; unsigned char *data = magellan->data; int i, t; if (!magellan->idx) return; switch (magellan->data[0]) { case 'd': /* Axis data */ if (magellan->idx != 25) return; if (magellan_crunch_nibbles(data, 24)) return; for (i = 0; i < 6; i++) input_report_abs(dev, magellan_axes[i], (data[(i << 2) + 1] << 12 | data[(i << 2) + 2] << 8 | data[(i << 2) + 3] << 4 | data[(i << 2) + 4]) - 32768); break; case 'k': /* Button data */ if (magellan->idx != 4) return; if (magellan_crunch_nibbles(data, 3)) return; t = (data[1] << 1) | (data[2] << 5) | data[3]; for (i = 0; i < 9; i++) input_report_key(dev, magellan_buttons[i], (t >> i) & 1); break; } input_sync(dev); } static irqreturn_t magellan_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct magellan* magellan = serio_get_drvdata(serio); if (data == '\r') { magellan_process_packet(magellan); magellan->idx = 0; } else { if (magellan->idx < MAGELLAN_MAX_LENGTH) magellan->data[magellan->idx++] = data; } return IRQ_HANDLED; } /* * magellan_disconnect() is the opposite of magellan_connect() */ static void magellan_disconnect(struct serio *serio) { struct magellan* magellan = serio_get_drvdata(serio); serio_close(serio); serio_set_drvdata(serio, NULL); input_unregister_device(magellan->dev); kfree(magellan); } /* * magellan_connect() is the routine that is called when someone adds a * new serio device that supports Magellan protocol and registers it as * an input device. */ static int magellan_connect(struct serio *serio, struct serio_driver *drv) { struct magellan *magellan; struct input_dev *input_dev; int err = -ENOMEM; int i; magellan = kzalloc(sizeof(struct magellan), GFP_KERNEL); input_dev = input_allocate_device(); if (!magellan || !input_dev) goto fail1; magellan->dev = input_dev; snprintf(magellan->phys, sizeof(magellan->phys), "%s/input0", serio->phys); input_dev->name = "LogiCad3D Magellan / SpaceMouse"; input_dev->phys = magellan->phys; input_dev->id.bustype = BUS_RS232; input_dev->id.vendor = SERIO_MAGELLAN; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); for (i = 0; i < 9; i++) set_bit(magellan_buttons[i], input_dev->keybit); for (i = 0; i < 6; i++) input_set_abs_params(input_dev, magellan_axes[i], -360, 360, 0, 0); serio_set_drvdata(serio, magellan); err = serio_open(serio, drv); if (err) goto fail2; err = input_register_device(magellan->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(magellan); return err; } /* * The serio driver structure. */ static struct serio_device_id magellan_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_MAGELLAN, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, magellan_serio_ids); static struct serio_driver magellan_drv = { .driver = { .name = "magellan", }, .description = DRIVER_DESC, .id_table = magellan_serio_ids, .interrupt = magellan_interrupt, .connect = magellan_connect, .disconnect = magellan_disconnect, }; /* * The functions for inserting/removing us as a module. */ static int __init magellan_init(void) { return serio_register_driver(&magellan_drv); } static void __exit magellan_exit(void) { serio_unregister_driver(&magellan_drv); } module_init(magellan_init); module_exit(magellan_exit);
gpl-2.0
arnavgosain/android_kernel_sony_msm8x27
arch/alpha/kernel/core_lca.c
11941
14094
/* * linux/arch/alpha/kernel/core_lca.c * * Written by David Mosberger (davidm@cs.arizona.edu) with some code * taken from Dave Rusling's (david.rusling@reo.mts.dec.com) 32-bit * bios code. * * Code common to all LCA core logic chips. */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_lca.h> #undef __EXTERN_INLINE #include <linux/types.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/tty.h> #include <asm/ptrace.h> #include <asm/irq_regs.h> #include <asm/smp.h> #include "proto.h" #include "pci_impl.h" /* * BIOS32-style PCI interface: */ /* * Machine check reasons. Defined according to PALcode sources * (osf.h and platform.h). */ #define MCHK_K_TPERR 0x0080 #define MCHK_K_TCPERR 0x0082 #define MCHK_K_HERR 0x0084 #define MCHK_K_ECC_C 0x0086 #define MCHK_K_ECC_NC 0x0088 #define MCHK_K_UNKNOWN 0x008A #define MCHK_K_CACKSOFT 0x008C #define MCHK_K_BUGCHECK 0x008E #define MCHK_K_OS_BUGCHECK 0x0090 #define MCHK_K_DCPERR 0x0092 #define MCHK_K_ICPERR 0x0094 /* * Platform-specific machine-check reasons: */ #define MCHK_K_SIO_SERR 0x204 /* all platforms so far */ #define MCHK_K_SIO_IOCHK 0x206 /* all platforms so far */ #define MCHK_K_DCSR 0x208 /* all but Noname */ /* * Given a bus, device, and function number, compute resulting * configuration space address and setup the LCA_IOC_CONF register * accordingly. It is therefore not safe to have concurrent * invocations to configuration space access routines, but there * really shouldn't be any need for this. * * Type 0: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | | | | | | | | | | | | | | | | | | | | | | |F|F|F|R|R|R|R|R|R|0|0| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:11 Device select bit. * 10:8 Function number * 7:2 Register number * * Type 1: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:24 reserved * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * The function number selects which function of a multi-function device * (e.g., SCSI and Ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static int mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, unsigned long *pci_addr) { unsigned long addr; u8 bus = pbus->number; if (bus == 0) { int device = device_fn >> 3; int func = device_fn & 0x7; /* Type 0 configuration cycle. */ if (device > 12) { return -1; } *(vulp)LCA_IOC_CONF = 0; addr = (1 << (11 + device)) | (func << 8) | where; } else { /* Type 1 configuration cycle. */ *(vulp)LCA_IOC_CONF = 1; addr = (bus << 16) | (device_fn << 8) | where; } *pci_addr = addr; return 0; } static unsigned int conf_read(unsigned long addr) { unsigned long flags, code, stat0; unsigned int value; local_irq_save(flags); /* Reset status register to avoid losing errors. */ stat0 = *(vulp)LCA_IOC_STAT0; *(vulp)LCA_IOC_STAT0 = stat0; mb(); /* Access configuration space. */ value = *(vuip)addr; draina(); stat0 = *(vulp)LCA_IOC_STAT0; if (stat0 & LCA_IOC_STAT0_ERR) { code = ((stat0 >> LCA_IOC_STAT0_CODE_SHIFT) & LCA_IOC_STAT0_CODE_MASK); if (code != 1) { printk("lca.c:conf_read: got stat0=%lx\n", stat0); } /* Reset error status. */ *(vulp)LCA_IOC_STAT0 = stat0; mb(); /* Reset machine check. */ wrmces(0x7); value = 0xffffffff; } local_irq_restore(flags); return value; } static void conf_write(unsigned long addr, unsigned int value) { unsigned long flags, code, stat0; local_irq_save(flags); /* avoid getting hit by machine check */ /* Reset status register to avoid losing errors. */ stat0 = *(vulp)LCA_IOC_STAT0; *(vulp)LCA_IOC_STAT0 = stat0; mb(); /* Access configuration space. */ *(vuip)addr = value; draina(); stat0 = *(vulp)LCA_IOC_STAT0; if (stat0 & LCA_IOC_STAT0_ERR) { code = ((stat0 >> LCA_IOC_STAT0_CODE_SHIFT) & LCA_IOC_STAT0_CODE_MASK); if (code != 1) { printk("lca.c:conf_write: got stat0=%lx\n", stat0); } /* Reset error status. */ *(vulp)LCA_IOC_STAT0 = stat0; mb(); /* Reset machine check. */ wrmces(0x7); } local_irq_restore(flags); } static int lca_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr, pci_addr; long mask; int shift; if (mk_conf_addr(bus, devfn, where, &pci_addr)) return PCIBIOS_DEVICE_NOT_FOUND; shift = (where & 3) * 8; mask = (size - 1) * 8; addr = (pci_addr << 5) + mask + LCA_CONF; *value = conf_read(addr) >> (shift); return PCIBIOS_SUCCESSFUL; } static int lca_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr, pci_addr; long mask; if (mk_conf_addr(bus, devfn, where, &pci_addr)) return PCIBIOS_DEVICE_NOT_FOUND; mask = (size - 1) * 8; addr = (pci_addr << 5) + mask + LCA_CONF; conf_write(addr, value << ((where & 3) * 8)); return PCIBIOS_SUCCESSFUL; } struct pci_ops lca_pci_ops = { .read = lca_read_config, .write = lca_write_config, }; void lca_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) { wmb(); *(vulp)LCA_IOC_TBIA = 0; mb(); } void __init lca_init_arch(void) { struct pci_controller *hose; /* * Create our single hose. */ pci_isa_hose = hose = alloc_pci_controller(); hose->io_space = &ioport_resource; hose->mem_space = &iomem_resource; hose->index = 0; hose->sparse_mem_base = LCA_SPARSE_MEM - IDENT_ADDR; hose->dense_mem_base = LCA_DENSE_MEM - IDENT_ADDR; hose->sparse_io_base = LCA_IO - IDENT_ADDR; hose->dense_io_base = 0; /* * Set up the PCI to main memory translation windows. * * Mimic the SRM settings for the direct-map window. * Window 0 is scatter-gather 8MB at 8MB (for isa). * Window 1 is direct access 1GB at 1GB. * * Note that we do not try to save any of the DMA window CSRs * before setting them, since we cannot read those CSRs on LCA. */ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); hose->sg_pci = NULL; __direct_map_base = 0x40000000; __direct_map_size = 0x40000000; *(vulp)LCA_IOC_W_BASE0 = hose->sg_isa->dma_base | (3UL << 32); *(vulp)LCA_IOC_W_MASK0 = (hose->sg_isa->size - 1) & 0xfff00000; *(vulp)LCA_IOC_T_BASE0 = virt_to_phys(hose->sg_isa->ptes); *(vulp)LCA_IOC_W_BASE1 = __direct_map_base | (2UL << 32); *(vulp)LCA_IOC_W_MASK1 = (__direct_map_size - 1) & 0xfff00000; *(vulp)LCA_IOC_T_BASE1 = 0; *(vulp)LCA_IOC_TB_ENA = 0x80; lca_pci_tbi(hose, 0, -1); /* * Disable PCI parity for now. The NCR53c810 chip has * troubles meeting the PCI spec which results in * data parity errors. */ *(vulp)LCA_IOC_PAR_DIS = 1UL<<5; /* * Finally, set up for restoring the correct HAE if using SRM. * Again, since we cannot read many of the CSRs on the LCA, * one of which happens to be the HAE, we save the value that * the SRM will expect... */ if (alpha_using_srm) srm_hae = 0x80000000UL; } /* * Constants used during machine-check handling. I suppose these * could be moved into lca.h but I don't see much reason why anybody * else would want to use them. */ #define ESR_EAV (1UL<< 0) /* error address valid */ #define ESR_CEE (1UL<< 1) /* correctable error */ #define ESR_UEE (1UL<< 2) /* uncorrectable error */ #define ESR_WRE (1UL<< 3) /* write-error */ #define ESR_SOR (1UL<< 4) /* error source */ #define ESR_CTE (1UL<< 7) /* cache-tag error */ #define ESR_MSE (1UL<< 9) /* multiple soft errors */ #define ESR_MHE (1UL<<10) /* multiple hard errors */ #define ESR_NXM (1UL<<12) /* non-existent memory */ #define IOC_ERR ( 1<<4) /* ioc logs an error */ #define IOC_CMD_SHIFT 0 #define IOC_CMD (0xf<<IOC_CMD_SHIFT) #define IOC_CODE_SHIFT 8 #define IOC_CODE (0xf<<IOC_CODE_SHIFT) #define IOC_LOST ( 1<<5) #define IOC_P_NBR ((__u32) ~((1<<13) - 1)) static void mem_error(unsigned long esr, unsigned long ear) { printk(" %s %s error to %s occurred at address %x\n", ((esr & ESR_CEE) ? "Correctable" : (esr & ESR_UEE) ? "Uncorrectable" : "A"), (esr & ESR_WRE) ? "write" : "read", (esr & ESR_SOR) ? "memory" : "b-cache", (unsigned) (ear & 0x1ffffff8)); if (esr & ESR_CTE) { printk(" A b-cache tag parity error was detected.\n"); } if (esr & ESR_MSE) { printk(" Several other correctable errors occurred.\n"); } if (esr & ESR_MHE) { printk(" Several other uncorrectable errors occurred.\n"); } if (esr & ESR_NXM) { printk(" Attempted to access non-existent memory.\n"); } } static void ioc_error(__u32 stat0, __u32 stat1) { static const char * const pci_cmd[] = { "Interrupt Acknowledge", "Special", "I/O Read", "I/O Write", "Rsvd 1", "Rsvd 2", "Memory Read", "Memory Write", "Rsvd3", "Rsvd4", "Configuration Read", "Configuration Write", "Memory Read Multiple", "Dual Address", "Memory Read Line", "Memory Write and Invalidate" }; static const char * const err_name[] = { "exceeded retry limit", "no device", "bad data parity", "target abort", "bad address parity", "page table read error", "invalid page", "data error" }; unsigned code = (stat0 & IOC_CODE) >> IOC_CODE_SHIFT; unsigned cmd = (stat0 & IOC_CMD) >> IOC_CMD_SHIFT; printk(" %s initiated PCI %s cycle to address %x" " failed due to %s.\n", code > 3 ? "PCI" : "CPU", pci_cmd[cmd], stat1, err_name[code]); if (code == 5 || code == 6) { printk(" (Error occurred at PCI memory address %x.)\n", (stat0 & ~IOC_P_NBR)); } if (stat0 & IOC_LOST) { printk(" Other PCI errors occurred simultaneously.\n"); } } void lca_machine_check(unsigned long vector, unsigned long la_ptr) { const char * reason; union el_lca el; el.c = (struct el_common *) la_ptr; wrmces(rdmces()); /* reset machine check pending flag */ printk(KERN_CRIT "LCA machine check: vector=%#lx pc=%#lx code=%#x\n", vector, get_irq_regs()->pc, (unsigned int) el.c->code); /* * The first quadword after the common header always seems to * be the machine check reason---don't know why this isn't * part of the common header instead. In the case of a long * logout frame, the upper 32 bits is the machine check * revision level, which we ignore for now. */ switch ((unsigned int) el.c->code) { case MCHK_K_TPERR: reason = "tag parity error"; break; case MCHK_K_TCPERR: reason = "tag control parity error"; break; case MCHK_K_HERR: reason = "access to non-existent memory"; break; case MCHK_K_ECC_C: reason = "correctable ECC error"; break; case MCHK_K_ECC_NC: reason = "non-correctable ECC error"; break; case MCHK_K_CACKSOFT: reason = "MCHK_K_CACKSOFT"; break; case MCHK_K_BUGCHECK: reason = "illegal exception in PAL mode"; break; case MCHK_K_OS_BUGCHECK: reason = "callsys in kernel mode"; break; case MCHK_K_DCPERR: reason = "d-cache parity error"; break; case MCHK_K_ICPERR: reason = "i-cache parity error"; break; case MCHK_K_SIO_SERR: reason = "SIO SERR occurred on PCI bus"; break; case MCHK_K_SIO_IOCHK: reason = "SIO IOCHK occurred on ISA bus"; break; case MCHK_K_DCSR: reason = "MCHK_K_DCSR"; break; case MCHK_K_UNKNOWN: default: reason = "unknown"; break; } switch (el.c->size) { case sizeof(struct el_lca_mcheck_short): printk(KERN_CRIT " Reason: %s (short frame%s, dc_stat=%#lx):\n", reason, el.c->retry ? ", retryable" : "", el.s->dc_stat); if (el.s->esr & ESR_EAV) { mem_error(el.s->esr, el.s->ear); } if (el.s->ioc_stat0 & IOC_ERR) { ioc_error(el.s->ioc_stat0, el.s->ioc_stat1); } break; case sizeof(struct el_lca_mcheck_long): printk(KERN_CRIT " Reason: %s (long frame%s):\n", reason, el.c->retry ? ", retryable" : ""); printk(KERN_CRIT " reason: %#lx exc_addr: %#lx dc_stat: %#lx\n", el.l->pt[0], el.l->exc_addr, el.l->dc_stat); printk(KERN_CRIT " car: %#lx\n", el.l->car); if (el.l->esr & ESR_EAV) { mem_error(el.l->esr, el.l->ear); } if (el.l->ioc_stat0 & IOC_ERR) { ioc_error(el.l->ioc_stat0, el.l->ioc_stat1); } break; default: printk(KERN_CRIT " Unknown errorlog size %d\n", el.c->size); } /* Dump the logout area to give all info. */ #ifdef CONFIG_VERBOSE_MCHECK if (alpha_verbose_mcheck > 1) { unsigned long * ptr = (unsigned long *) la_ptr; long i; for (i = 0; i < el.c->size / sizeof(long); i += 2) { printk(KERN_CRIT " +%8lx %016lx %016lx\n", i*sizeof(long), ptr[i], ptr[i+1]); } } #endif /* CONFIG_VERBOSE_MCHECK */ } /* * The following routines are needed to support the SPEED changing * necessary to successfully manage the thermal problem on the AlphaBook1. */ void lca_clock_print(void) { long pmr_reg; pmr_reg = LCA_READ_PMR; printk("Status of clock control:\n"); printk("\tPrimary clock divisor\t0x%lx\n", LCA_GET_PRIMARY(pmr_reg)); printk("\tOverride clock divisor\t0x%lx\n", LCA_GET_OVERRIDE(pmr_reg)); printk("\tInterrupt override is %s\n", (pmr_reg & LCA_PMR_INTO) ? "on" : "off"); printk("\tDMA override is %s\n", (pmr_reg & LCA_PMR_DMAO) ? "on" : "off"); } int lca_get_clock(void) { long pmr_reg; pmr_reg = LCA_READ_PMR; return(LCA_GET_PRIMARY(pmr_reg)); } void lca_clock_fiddle(int divisor) { long pmr_reg; pmr_reg = LCA_READ_PMR; LCA_SET_PRIMARY_CLOCK(pmr_reg, divisor); /* lca_norm_clock = divisor; */ LCA_WRITE_PMR(pmr_reg); mb(); }
gpl-2.0
SlimForce/kernel_lge_hammerhead
arch/alpha/kernel/core_mcpcia.c
11941
16130
/* * linux/arch/alpha/kernel/core_mcpcia.c * * Based on code written by David A Rusling (david.rusling@reo.mts.dec.com). * * Code common to all MCbus-PCI Adaptor core logic chipsets */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_mcpcia.h> #undef __EXTERN_INLINE #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/delay.h> #include <asm/ptrace.h> #include "proto.h" #include "pci_impl.h" /* * NOTE: Herein lie back-to-back mb instructions. They are magic. * One plausible explanation is that the i/o controller does not properly * handle the system transaction. Another involves timing. Ho hum. */ /* * BIOS32-style PCI interface: */ #define DEBUG_CFG 0 #if DEBUG_CFG # define DBG_CFG(args) printk args #else # define DBG_CFG(args) #endif /* * Given a bus, device, and function number, compute resulting * configuration space address and setup the MCPCIA_HAXR2 register * accordingly. It is therefore not safe to have concurrent * invocations to configuration space access routines, but there * really shouldn't be any need for this. * * Type 0: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:11 Device select bit. * 10:8 Function number * 7:2 Register number * * Type 1: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:24 reserved * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * The function number selects which function of a multi-function device * (e.g., SCSI and Ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static unsigned int conf_read(unsigned long addr, unsigned char type1, struct pci_controller *hose) { unsigned long flags; unsigned long mid = MCPCIA_HOSE2MID(hose->index); unsigned int stat0, value, cpu; cpu = smp_processor_id(); local_irq_save(flags); DBG_CFG(("conf_read(addr=0x%lx, type1=%d, hose=%d)\n", addr, type1, mid)); /* Reset status register to avoid losing errors. */ stat0 = *(vuip)MCPCIA_CAP_ERR(mid); *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); *(vuip)MCPCIA_CAP_ERR(mid); DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0)); mb(); draina(); mcheck_expected(cpu) = 1; mcheck_taken(cpu) = 0; mcheck_extra(cpu) = mid; mb(); /* Access configuration space. */ value = *((vuip)addr); mb(); mb(); /* magic */ if (mcheck_taken(cpu)) { mcheck_taken(cpu) = 0; value = 0xffffffffU; mb(); } mcheck_expected(cpu) = 0; mb(); DBG_CFG(("conf_read(): finished\n")); local_irq_restore(flags); return value; } static void conf_write(unsigned long addr, unsigned int value, unsigned char type1, struct pci_controller *hose) { unsigned long flags; unsigned long mid = MCPCIA_HOSE2MID(hose->index); unsigned int stat0, cpu; cpu = smp_processor_id(); local_irq_save(flags); /* avoid getting hit by machine check */ /* Reset status register to avoid losing errors. */ stat0 = *(vuip)MCPCIA_CAP_ERR(mid); *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); *(vuip)MCPCIA_CAP_ERR(mid); DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0)); draina(); mcheck_expected(cpu) = 1; mcheck_extra(cpu) = mid; mb(); /* Access configuration space. */ *((vuip)addr) = value; mb(); mb(); /* magic */ *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */ mcheck_expected(cpu) = 0; mb(); DBG_CFG(("conf_write(): finished\n")); local_irq_restore(flags); } static int mk_conf_addr(struct pci_bus *pbus, unsigned int devfn, int where, struct pci_controller *hose, unsigned long *pci_addr, unsigned char *type1) { u8 bus = pbus->number; unsigned long addr; DBG_CFG(("mk_conf_addr(bus=%d,devfn=0x%x,hose=%d,where=0x%x," " pci_addr=0x%p, type1=0x%p)\n", bus, devfn, hose->index, where, pci_addr, type1)); /* Type 1 configuration cycle for *ALL* busses. */ *type1 = 1; if (!pbus->parent) /* No parent means peer PCI bus. */ bus = 0; addr = (bus << 16) | (devfn << 8) | (where); addr <<= 5; /* swizzle for SPARSE */ addr |= hose->config_space_base; *pci_addr = addr; DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); return 0; } static int mcpcia_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { struct pci_controller *hose = bus->sysdata; unsigned long addr, w; unsigned char type1; if (mk_conf_addr(bus, devfn, where, hose, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; addr |= (size - 1) * 8; w = conf_read(addr, type1, hose); switch (size) { case 1: *value = __kernel_extbl(w, where & 3); break; case 2: *value = __kernel_extwl(w, where & 3); break; case 4: *value = w; break; } return PCIBIOS_SUCCESSFUL; } static int mcpcia_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { struct pci_controller *hose = bus->sysdata; unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, hose, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; addr |= (size - 1) * 8; value = __kernel_insql(value, where & 3); conf_write(addr, value, type1, hose); return PCIBIOS_SUCCESSFUL; } struct pci_ops mcpcia_pci_ops = { .read = mcpcia_read_config, .write = mcpcia_write_config, }; void mcpcia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) { wmb(); *(vuip)MCPCIA_SG_TBIA(MCPCIA_HOSE2MID(hose->index)) = 0; mb(); } static int __init mcpcia_probe_hose(int h) { int cpu = smp_processor_id(); int mid = MCPCIA_HOSE2MID(h); unsigned int pci_rev; /* Gotta be REAL careful. If hose is absent, we get an mcheck. */ mb(); mb(); draina(); wrmces(7); mcheck_expected(cpu) = 2; /* indicates probing */ mcheck_taken(cpu) = 0; mcheck_extra(cpu) = mid; mb(); /* Access the bus revision word. */ pci_rev = *(vuip)MCPCIA_REV(mid); mb(); mb(); /* magic */ if (mcheck_taken(cpu)) { mcheck_taken(cpu) = 0; pci_rev = 0xffffffff; mb(); } mcheck_expected(cpu) = 0; mb(); return (pci_rev >> 16) == PCI_CLASS_BRIDGE_HOST; } static void __init mcpcia_new_hose(int h) { struct pci_controller *hose; struct resource *io, *mem, *hae_mem; int mid = MCPCIA_HOSE2MID(h); hose = alloc_pci_controller(); if (h == 0) pci_isa_hose = hose; io = alloc_resource(); mem = alloc_resource(); hae_mem = alloc_resource(); hose->io_space = io; hose->mem_space = hae_mem; hose->sparse_mem_base = MCPCIA_SPARSE(mid) - IDENT_ADDR; hose->dense_mem_base = MCPCIA_DENSE(mid) - IDENT_ADDR; hose->sparse_io_base = MCPCIA_IO(mid) - IDENT_ADDR; hose->dense_io_base = 0; hose->config_space_base = MCPCIA_CONF(mid); hose->index = h; io->start = MCPCIA_IO(mid) - MCPCIA_IO_BIAS; io->end = io->start + 0xffff; io->name = pci_io_names[h]; io->flags = IORESOURCE_IO; mem->start = MCPCIA_DENSE(mid) - MCPCIA_MEM_BIAS; mem->end = mem->start + 0xffffffff; mem->name = pci_mem_names[h]; mem->flags = IORESOURCE_MEM; hae_mem->start = mem->start; hae_mem->end = mem->start + MCPCIA_MEM_MASK; hae_mem->name = pci_hae0_name; hae_mem->flags = IORESOURCE_MEM; if (request_resource(&ioport_resource, io) < 0) printk(KERN_ERR "Failed to request IO on hose %d\n", h); if (request_resource(&iomem_resource, mem) < 0) printk(KERN_ERR "Failed to request MEM on hose %d\n", h); if (request_resource(mem, hae_mem) < 0) printk(KERN_ERR "Failed to request HAE_MEM on hose %d\n", h); } static void mcpcia_pci_clr_err(int mid) { *(vuip)MCPCIA_CAP_ERR(mid); *(vuip)MCPCIA_CAP_ERR(mid) = 0xffffffff; /* Clear them all. */ mb(); *(vuip)MCPCIA_CAP_ERR(mid); /* Re-read for force write. */ } static void __init mcpcia_startup_hose(struct pci_controller *hose) { int mid = MCPCIA_HOSE2MID(hose->index); unsigned int tmp; mcpcia_pci_clr_err(mid); /* * Set up error reporting. */ tmp = *(vuip)MCPCIA_CAP_ERR(mid); tmp |= 0x0006; /* master/target abort */ *(vuip)MCPCIA_CAP_ERR(mid) = tmp; mb(); tmp = *(vuip)MCPCIA_CAP_ERR(mid); /* * Set up the PCI->physical memory translation windows. * * Window 0 is scatter-gather 8MB at 8MB (for isa) * Window 1 is scatter-gather (up to) 1GB at 1GB (for pci) * Window 2 is direct access 2GB at 2GB */ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); hose->sg_pci = iommu_arena_new(hose, 0x40000000, size_for_memory(0x40000000), 0); __direct_map_base = 0x80000000; __direct_map_size = 0x80000000; *(vuip)MCPCIA_W0_BASE(mid) = hose->sg_isa->dma_base | 3; *(vuip)MCPCIA_W0_MASK(mid) = (hose->sg_isa->size - 1) & 0xfff00000; *(vuip)MCPCIA_T0_BASE(mid) = virt_to_phys(hose->sg_isa->ptes) >> 8; *(vuip)MCPCIA_W1_BASE(mid) = hose->sg_pci->dma_base | 3; *(vuip)MCPCIA_W1_MASK(mid) = (hose->sg_pci->size - 1) & 0xfff00000; *(vuip)MCPCIA_T1_BASE(mid) = virt_to_phys(hose->sg_pci->ptes) >> 8; *(vuip)MCPCIA_W2_BASE(mid) = __direct_map_base | 1; *(vuip)MCPCIA_W2_MASK(mid) = (__direct_map_size - 1) & 0xfff00000; *(vuip)MCPCIA_T2_BASE(mid) = 0; *(vuip)MCPCIA_W3_BASE(mid) = 0x0; mcpcia_pci_tbi(hose, 0, -1); *(vuip)MCPCIA_HBASE(mid) = 0x0; mb(); *(vuip)MCPCIA_HAE_MEM(mid) = 0U; mb(); *(vuip)MCPCIA_HAE_MEM(mid); /* read it back. */ *(vuip)MCPCIA_HAE_IO(mid) = 0; mb(); *(vuip)MCPCIA_HAE_IO(mid); /* read it back. */ } void __init mcpcia_init_arch(void) { /* With multiple PCI busses, we play with I/O as physical addrs. */ ioport_resource.end = ~0UL; /* Allocate hose 0. That's the one that all the ISA junk hangs off of, from which we'll be registering stuff here in a bit. Other hose detection is done in mcpcia_init_hoses, which is called from init_IRQ. */ mcpcia_new_hose(0); } /* This is called from init_IRQ, since we cannot take interrupts before then. Which means we cannot do this in init_arch. */ void __init mcpcia_init_hoses(void) { struct pci_controller *hose; int hose_count; int h; /* First, find how many hoses we have. */ hose_count = 0; for (h = 0; h < MCPCIA_MAX_HOSES; ++h) { if (mcpcia_probe_hose(h)) { if (h != 0) mcpcia_new_hose(h); hose_count++; } } printk("mcpcia_init_hoses: found %d hoses\n", hose_count); /* Now do init for each hose. */ for (hose = hose_head; hose; hose = hose->next) mcpcia_startup_hose(hose); } static void mcpcia_print_uncorrectable(struct el_MCPCIA_uncorrected_frame_mcheck *logout) { struct el_common_EV5_uncorrectable_mcheck *frame; int i; frame = &logout->procdata; /* Print PAL fields */ for (i = 0; i < 24; i += 2) { printk(" paltmp[%d-%d] = %16lx %16lx\n", i, i+1, frame->paltemp[i], frame->paltemp[i+1]); } for (i = 0; i < 8; i += 2) { printk(" shadow[%d-%d] = %16lx %16lx\n", i, i+1, frame->shadow[i], frame->shadow[i+1]); } printk(" Addr of excepting instruction = %16lx\n", frame->exc_addr); printk(" Summary of arithmetic traps = %16lx\n", frame->exc_sum); printk(" Exception mask = %16lx\n", frame->exc_mask); printk(" Base address for PALcode = %16lx\n", frame->pal_base); printk(" Interrupt Status Reg = %16lx\n", frame->isr); printk(" CURRENT SETUP OF EV5 IBOX = %16lx\n", frame->icsr); printk(" I-CACHE Reg %s parity error = %16lx\n", (frame->ic_perr_stat & 0x800L) ? "Data" : "Tag", frame->ic_perr_stat); printk(" D-CACHE error Reg = %16lx\n", frame->dc_perr_stat); if (frame->dc_perr_stat & 0x2) { switch (frame->dc_perr_stat & 0x03c) { case 8: printk(" Data error in bank 1\n"); break; case 4: printk(" Data error in bank 0\n"); break; case 20: printk(" Tag error in bank 1\n"); break; case 10: printk(" Tag error in bank 0\n"); break; } } printk(" Effective VA = %16lx\n", frame->va); printk(" Reason for D-stream = %16lx\n", frame->mm_stat); printk(" EV5 SCache address = %16lx\n", frame->sc_addr); printk(" EV5 SCache TAG/Data parity = %16lx\n", frame->sc_stat); printk(" EV5 BC_TAG_ADDR = %16lx\n", frame->bc_tag_addr); printk(" EV5 EI_ADDR: Phys addr of Xfer = %16lx\n", frame->ei_addr); printk(" Fill Syndrome = %16lx\n", frame->fill_syndrome); printk(" EI_STAT reg = %16lx\n", frame->ei_stat); printk(" LD_LOCK = %16lx\n", frame->ld_lock); } static void mcpcia_print_system_area(unsigned long la_ptr) { struct el_common *frame; struct pci_controller *hose; struct IOD_subpacket { unsigned long base; unsigned int whoami; unsigned int rsvd1; unsigned int pci_rev; unsigned int cap_ctrl; unsigned int hae_mem; unsigned int hae_io; unsigned int int_ctl; unsigned int int_reg; unsigned int int_mask0; unsigned int int_mask1; unsigned int mc_err0; unsigned int mc_err1; unsigned int cap_err; unsigned int rsvd2; unsigned int pci_err1; unsigned int mdpa_stat; unsigned int mdpa_syn; unsigned int mdpb_stat; unsigned int mdpb_syn; unsigned int rsvd3; unsigned int rsvd4; unsigned int rsvd5; } *iodpp; frame = (struct el_common *)la_ptr; iodpp = (struct IOD_subpacket *) (la_ptr + frame->sys_offset); for (hose = hose_head; hose; hose = hose->next, iodpp++) { printk("IOD %d Register Subpacket - Bridge Base Address %16lx\n", hose->index, iodpp->base); printk(" WHOAMI = %8x\n", iodpp->whoami); printk(" PCI_REV = %8x\n", iodpp->pci_rev); printk(" CAP_CTRL = %8x\n", iodpp->cap_ctrl); printk(" HAE_MEM = %8x\n", iodpp->hae_mem); printk(" HAE_IO = %8x\n", iodpp->hae_io); printk(" INT_CTL = %8x\n", iodpp->int_ctl); printk(" INT_REG = %8x\n", iodpp->int_reg); printk(" INT_MASK0 = %8x\n", iodpp->int_mask0); printk(" INT_MASK1 = %8x\n", iodpp->int_mask1); printk(" MC_ERR0 = %8x\n", iodpp->mc_err0); printk(" MC_ERR1 = %8x\n", iodpp->mc_err1); printk(" CAP_ERR = %8x\n", iodpp->cap_err); printk(" PCI_ERR1 = %8x\n", iodpp->pci_err1); printk(" MDPA_STAT = %8x\n", iodpp->mdpa_stat); printk(" MDPA_SYN = %8x\n", iodpp->mdpa_syn); printk(" MDPB_STAT = %8x\n", iodpp->mdpb_stat); printk(" MDPB_SYN = %8x\n", iodpp->mdpb_syn); } } void mcpcia_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout; unsigned int cpu = smp_processor_id(); int expected; mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr; expected = mcheck_expected(cpu); mb(); mb(); /* magic */ draina(); switch (expected) { case 0: { /* FIXME: how do we figure out which hose the error was on? */ struct pci_controller *hose; for (hose = hose_head; hose; hose = hose->next) mcpcia_pci_clr_err(MCPCIA_HOSE2MID(hose->index)); break; } case 1: mcpcia_pci_clr_err(mcheck_extra(cpu)); break; default: /* Otherwise, we're being called from mcpcia_probe_hose and there's no hose clear an error from. */ break; } wrmces(0x7); mb(); process_mcheck_info(vector, la_ptr, "MCPCIA", expected != 0); if (!expected && vector != 0x620 && vector != 0x630) { mcpcia_print_uncorrectable(mchk_logout); mcpcia_print_system_area(la_ptr); } }
gpl-2.0
SlimRoms/kernel_lge_hammerhead
tools/perf/util/run-command.c
12197
4121
#include "cache.h" #include "run-command.h" #include "exec_cmd.h" static inline void close_pair(int fd[2]) { close(fd[0]); close(fd[1]); } static inline void dup_devnull(int to) { int fd = open("/dev/null", O_RDWR); dup2(fd, to); close(fd); } int start_command(struct child_process *cmd) { int need_in, need_out, need_err; int fdin[2], fdout[2], fderr[2]; /* * In case of errors we must keep the promise to close FDs * that have been passed in via ->in and ->out. */ need_in = !cmd->no_stdin && cmd->in < 0; if (need_in) { if (pipe(fdin) < 0) { if (cmd->out > 0) close(cmd->out); return -ERR_RUN_COMMAND_PIPE; } cmd->in = fdin[1]; } need_out = !cmd->no_stdout && !cmd->stdout_to_stderr && cmd->out < 0; if (need_out) { if (pipe(fdout) < 0) { if (need_in) close_pair(fdin); else if (cmd->in) close(cmd->in); return -ERR_RUN_COMMAND_PIPE; } cmd->out = fdout[0]; } need_err = !cmd->no_stderr && cmd->err < 0; if (need_err) { if (pipe(fderr) < 0) { if (need_in) close_pair(fdin); else if (cmd->in) close(cmd->in); if (need_out) close_pair(fdout); else if (cmd->out) close(cmd->out); return -ERR_RUN_COMMAND_PIPE; } cmd->err = fderr[0]; } fflush(NULL); cmd->pid = fork(); if (!cmd->pid) { if (cmd->no_stdin) dup_devnull(0); else if (need_in) { dup2(fdin[0], 0); close_pair(fdin); } else if (cmd->in) { dup2(cmd->in, 0); close(cmd->in); } if (cmd->no_stderr) dup_devnull(2); else if (need_err) { dup2(fderr[1], 2); close_pair(fderr); } if (cmd->no_stdout) dup_devnull(1); else if (cmd->stdout_to_stderr) dup2(2, 1); else if (need_out) { dup2(fdout[1], 1); close_pair(fdout); } else if (cmd->out > 1) { dup2(cmd->out, 1); close(cmd->out); } if (cmd->dir && chdir(cmd->dir)) die("exec %s: cd to %s failed (%s)", cmd->argv[0], cmd->dir, strerror(errno)); if (cmd->env) { for (; *cmd->env; cmd->env++) { if (strchr(*cmd->env, '=')) putenv((char*)*cmd->env); else unsetenv(*cmd->env); } } if (cmd->preexec_cb) cmd->preexec_cb(); if (cmd->perf_cmd) { execv_perf_cmd(cmd->argv); } else { execvp(cmd->argv[0], (char *const*) cmd->argv); } exit(127); } if (cmd->pid < 0) { int err = errno; if (need_in) close_pair(fdin); else if (cmd->in) close(cmd->in); if (need_out) close_pair(fdout); else if (cmd->out) close(cmd->out); if (need_err) close_pair(fderr); return err == ENOENT ? -ERR_RUN_COMMAND_EXEC : -ERR_RUN_COMMAND_FORK; } if (need_in) close(fdin[0]); else if (cmd->in) close(cmd->in); if (need_out) close(fdout[1]); else if (cmd->out) close(cmd->out); if (need_err) close(fderr[1]); return 0; } static int wait_or_whine(pid_t pid) { for (;;) { int status, code; pid_t waiting = waitpid(pid, &status, 0); if (waiting < 0) { if (errno == EINTR) continue; error("waitpid failed (%s)", strerror(errno)); return -ERR_RUN_COMMAND_WAITPID; } if (waiting != pid) return -ERR_RUN_COMMAND_WAITPID_WRONG_PID; if (WIFSIGNALED(status)) return -ERR_RUN_COMMAND_WAITPID_SIGNAL; if (!WIFEXITED(status)) return -ERR_RUN_COMMAND_WAITPID_NOEXIT; code = WEXITSTATUS(status); switch (code) { case 127: return -ERR_RUN_COMMAND_EXEC; case 0: return 0; default: return -code; } } } int finish_command(struct child_process *cmd) { return wait_or_whine(cmd->pid); } int run_command(struct child_process *cmd) { int code = start_command(cmd); if (code) return code; return finish_command(cmd); } static void prepare_run_command_v_opt(struct child_process *cmd, const char **argv, int opt) { memset(cmd, 0, sizeof(*cmd)); cmd->argv = argv; cmd->no_stdin = opt & RUN_COMMAND_NO_STDIN ? 1 : 0; cmd->perf_cmd = opt & RUN_PERF_CMD ? 1 : 0; cmd->stdout_to_stderr = opt & RUN_COMMAND_STDOUT_TO_STDERR ? 1 : 0; } int run_command_v_opt(const char **argv, int opt) { struct child_process cmd; prepare_run_command_v_opt(&cmd, argv, opt); return run_command(&cmd); }
gpl-2.0
RomanHargrave/pf-kernel
net/netfilter/xt_state.c
12709
2008
/* Kernel module to match connection tracking information. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2005 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/skbuff.h> #include <net/netfilter/nf_conntrack.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_state.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>"); MODULE_DESCRIPTION("ip[6]_tables connection tracking state match module"); MODULE_ALIAS("ipt_state"); MODULE_ALIAS("ip6t_state"); static bool state_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_state_info *sinfo = par->matchinfo; enum ip_conntrack_info ctinfo; unsigned int statebit; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); if (!ct) statebit = XT_STATE_INVALID; else { if (nf_ct_is_untracked(ct)) statebit = XT_STATE_UNTRACKED; else statebit = XT_STATE_BIT(ctinfo); } return (sinfo->statemask & statebit); } static int state_mt_check(const struct xt_mtchk_param *par) { int ret; ret = nf_ct_l3proto_try_module_get(par->family); if (ret < 0) pr_info("cannot load conntrack support for proto=%u\n", par->family); return ret; } static void state_mt_destroy(const struct xt_mtdtor_param *par) { nf_ct_l3proto_module_put(par->family); } static struct xt_match state_mt_reg __read_mostly = { .name = "state", .family = NFPROTO_UNSPEC, .checkentry = state_mt_check, .match = state_mt, .destroy = state_mt_destroy, .matchsize = sizeof(struct xt_state_info), .me = THIS_MODULE, }; static int __init state_mt_init(void) { return xt_register_match(&state_mt_reg); } static void __exit state_mt_exit(void) { xt_unregister_match(&state_mt_reg); } module_init(state_mt_init); module_exit(state_mt_exit);
gpl-2.0
thepasto/kernel_acer_salsa
arch/mips/sgi-ip32/ip32-setup.c
13477
2405
/* * IP32 basic setup * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2000 Harald Koerfgen * Copyright (C) 2002, 2003, 2005 Ilya A. Volynets * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org> */ #include <linux/console.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/mc146818rtc.h> #include <linux/param.h> #include <linux/sched.h> #include <asm/bootinfo.h> #include <asm/mc146818-time.h> #include <asm/mipsregs.h> #include <asm/mmu_context.h> #include <asm/sgialib.h> #include <asm/time.h> #include <asm/traps.h> #include <asm/io.h> #include <asm/ip32/crime.h> #include <asm/ip32/mace.h> #include <asm/ip32/ip32_ints.h> extern void ip32_be_init(void); extern void crime_init(void); #ifdef CONFIG_SGI_O2MACE_ETH /* * This is taken care of in here 'cause they say using Arc later on is * problematic */ extern char o2meth_eaddr[8]; static inline unsigned char str2hexnum(unsigned char c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; return 0; /* foo */ } static inline void str2eaddr(unsigned char *ea, unsigned char *str) { int i; for (i = 0; i < 6; i++) { unsigned char num; if(*str == ':') str++; num = str2hexnum(*str++) << 4; num |= (str2hexnum(*str++)); ea[i] = num; } } #endif /* An arbitrary time; this can be decreased if reliability looks good */ #define WAIT_MS 10 void __init plat_time_init(void) { printk(KERN_INFO "Calibrating system timer... "); write_c0_count(0); crime->timer = 0; while (crime->timer < CRIME_MASTER_FREQ * WAIT_MS / 1000) ; mips_hpt_frequency = read_c0_count() * 1000 / WAIT_MS; printk("%d MHz CPU detected\n", mips_hpt_frequency * 2 / 1000000); } void __init plat_mem_setup(void) { board_be_init = ip32_be_init; #ifdef CONFIG_SGI_O2MACE_ETH { char *mac = ArcGetEnvironmentVariable("eaddr"); str2eaddr(o2meth_eaddr, mac); } #endif #if defined(CONFIG_SERIAL_CORE_CONSOLE) { char* con = ArcGetEnvironmentVariable("console"); if (con && *con == 'd') { static char options[8] __initdata; char *baud = ArcGetEnvironmentVariable("dbaud"); if (baud) strcpy(options, baud); add_preferred_console("ttyS", *(con + 1) == '2' ? 1 : 0, baud ? options : NULL); } } #endif }
gpl-2.0
leshak/i5700-leshak-kernel
fs/char_dev.c
166
13526
/* * linux/fs/char_dev.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/init.h> #include <linux/fs.h> #include <linux/kdev_t.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/smp_lock.h> #include <linux/seq_file.h> #include <linux/kobject.h> #include <linux/kobj_map.h> #include <linux/cdev.h> #include <linux/mutex.h> #include <linux/backing-dev.h> #include "internal.h" /* * capabilities for /dev/mem, /dev/kmem and similar directly mappable character * devices * - permits shared-mmap for read, write and/or exec * - does not permit private mmap in NOMMU mode (can't do COW) * - no readahead or I/O queue unplugging required */ struct backing_dev_info directly_mappable_cdev_bdi = { .capabilities = ( #ifdef CONFIG_MMU /* permit private copies of the data to be taken */ BDI_CAP_MAP_COPY | #endif /* permit direct mmap, for read, write or exec */ BDI_CAP_MAP_DIRECT | BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP), }; static struct kobj_map *cdev_map; static DEFINE_MUTEX(chrdevs_lock); static struct char_device_struct { struct char_device_struct *next; unsigned int major; unsigned int baseminor; int minorct; char name[64]; struct cdev *cdev; /* will die */ } *chrdevs[CHRDEV_MAJOR_HASH_SIZE]; /* index in the above */ static inline int major_to_index(int major) { return major % CHRDEV_MAJOR_HASH_SIZE; } #ifdef CONFIG_PROC_FS void chrdev_show(struct seq_file *f, off_t offset) { struct char_device_struct *cd; if (offset < CHRDEV_MAJOR_HASH_SIZE) { mutex_lock(&chrdevs_lock); for (cd = chrdevs[offset]; cd; cd = cd->next) seq_printf(f, "%3d %s\n", cd->major, cd->name); mutex_unlock(&chrdevs_lock); } } #endif /* CONFIG_PROC_FS */ /* * Register a single major with a specified minor range. * * If major == 0 this functions will dynamically allocate a major and return * its number. * * If major > 0 this function will attempt to reserve the passed range of * minors and will return zero on success. * * Returns a -ve errno on failure. */ static struct char_device_struct * __register_chrdev_region(unsigned int major, unsigned int baseminor, int minorct, const char *name) { struct char_device_struct *cd, **cp; int ret = 0; int i; cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL); if (cd == NULL) return ERR_PTR(-ENOMEM); mutex_lock(&chrdevs_lock); /* temporary */ if (major == 0) { for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) { if (chrdevs[i] == NULL) break; } if (i == 0) { ret = -EBUSY; goto out; } major = i; ret = major; } cd->major = major; cd->baseminor = baseminor; cd->minorct = minorct; strlcpy(cd->name, name, sizeof(cd->name)); i = major_to_index(major); for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) if ((*cp)->major > major || ((*cp)->major == major && (((*cp)->baseminor >= baseminor) || ((*cp)->baseminor + (*cp)->minorct > baseminor)))) break; /* Check for overlapping minor ranges. */ if (*cp && (*cp)->major == major) { int old_min = (*cp)->baseminor; int old_max = (*cp)->baseminor + (*cp)->minorct - 1; int new_min = baseminor; int new_max = baseminor + minorct - 1; /* New driver overlaps from the left. */ if (new_max >= old_min && new_max <= old_max) { ret = -EBUSY; goto out; } /* New driver overlaps from the right. */ if (new_min <= old_max && new_min >= old_min) { ret = -EBUSY; goto out; } } cd->next = *cp; *cp = cd; mutex_unlock(&chrdevs_lock); return cd; out: mutex_unlock(&chrdevs_lock); kfree(cd); return ERR_PTR(ret); } static struct char_device_struct * __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct) { struct char_device_struct *cd = NULL, **cp; int i = major_to_index(major); mutex_lock(&chrdevs_lock); for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) if ((*cp)->major == major && (*cp)->baseminor == baseminor && (*cp)->minorct == minorct) break; if (*cp) { cd = *cp; *cp = cd->next; } mutex_unlock(&chrdevs_lock); return cd; } /** * register_chrdev_region() - register a range of device numbers * @from: the first in the desired range of device numbers; must include * the major number. * @count: the number of consecutive device numbers required * @name: the name of the device or driver. * * Return value is zero on success, a negative error code on failure. */ int register_chrdev_region(dev_t from, unsigned count, const char *name) { struct char_device_struct *cd; dev_t to = from + count; dev_t n, next; for (n = from; n < to; n = next) { next = MKDEV(MAJOR(n)+1, 0); if (next > to) next = to; cd = __register_chrdev_region(MAJOR(n), MINOR(n), next - n, name); if (IS_ERR(cd)) goto fail; } return 0; fail: to = n; for (n = from; n < to; n = next) { next = MKDEV(MAJOR(n)+1, 0); kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n)); } return PTR_ERR(cd); } /** * alloc_chrdev_region() - register a range of char device numbers * @dev: output parameter for first assigned number * @baseminor: first of the requested range of minor numbers * @count: the number of minor numbers required * @name: the name of the associated device or driver * * Allocates a range of char device numbers. The major number will be * chosen dynamically, and returned (along with the first minor number) * in @dev. Returns zero or a negative error code. */ int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count, const char *name) { struct char_device_struct *cd; cd = __register_chrdev_region(0, baseminor, count, name); if (IS_ERR(cd)) return PTR_ERR(cd); *dev = MKDEV(cd->major, cd->baseminor); return 0; } /** * register_chrdev() - Register a major number for character devices. * @major: major device number or 0 for dynamic allocation * @name: name of this range of devices * @fops: file operations associated with this devices * * If @major == 0 this functions will dynamically allocate a major and return * its number. * * If @major > 0 this function will attempt to reserve a device with the given * major number and will return zero on success. * * Returns a -ve errno on failure. * * The name of this device has nothing to do with the name of the device in * /dev. It only helps to keep track of the different owners of devices. If * your module name has only one type of devices it's ok to use e.g. the name * of the module here. * * This function registers a range of 256 minor numbers. The first minor number * is 0. */ int register_chrdev(unsigned int major, const char *name, const struct file_operations *fops) { struct char_device_struct *cd; struct cdev *cdev; char *s; int err = -ENOMEM; cd = __register_chrdev_region(major, 0, 256, name); if (IS_ERR(cd)) return PTR_ERR(cd); cdev = cdev_alloc(); if (!cdev) goto out2; cdev->owner = fops->owner; cdev->ops = fops; kobject_set_name(&cdev->kobj, "%s", name); for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/')) *s = '!'; err = cdev_add(cdev, MKDEV(cd->major, 0), 256); if (err) goto out; cd->cdev = cdev; return major ? 0 : cd->major; out: kobject_put(&cdev->kobj); out2: kfree(__unregister_chrdev_region(cd->major, 0, 256)); return err; } /** * unregister_chrdev_region() - return a range of device numbers * @from: the first in the range of numbers to unregister * @count: the number of device numbers to unregister * * This function will unregister a range of @count device numbers, * starting with @from. The caller should normally be the one who * allocated those numbers in the first place... */ void unregister_chrdev_region(dev_t from, unsigned count) { dev_t to = from + count; dev_t n, next; for (n = from; n < to; n = next) { next = MKDEV(MAJOR(n)+1, 0); if (next > to) next = to; kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n)); } } void unregister_chrdev(unsigned int major, const char *name) { struct char_device_struct *cd; cd = __unregister_chrdev_region(major, 0, 256); if (cd && cd->cdev) cdev_del(cd->cdev); kfree(cd); } static DEFINE_SPINLOCK(cdev_lock); static struct kobject *cdev_get(struct cdev *p) { struct module *owner = p->owner; struct kobject *kobj; if (owner && !try_module_get(owner)) return NULL; kobj = kobject_get(&p->kobj); if (!kobj) module_put(owner); return kobj; } void cdev_put(struct cdev *p) { if (p) { struct module *owner = p->owner; kobject_put(&p->kobj); module_put(owner); } } /* * Called every time a character special file is opened */ static int chrdev_open(struct inode *inode, struct file *filp) { struct cdev *p; struct cdev *new = NULL; int ret = 0; spin_lock(&cdev_lock); p = inode->i_cdev; if (!p) { struct kobject *kobj; int idx; spin_unlock(&cdev_lock); kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx); if (!kobj) return -ENXIO; new = container_of(kobj, struct cdev, kobj); spin_lock(&cdev_lock); /* Check i_cdev again in case somebody beat us to it while we dropped the lock. */ p = inode->i_cdev; if (!p) { inode->i_cdev = p = new; inode->i_cindex = idx; list_add(&inode->i_devices, &p->list); new = NULL; } else if (!cdev_get(p)) ret = -ENXIO; } else if (!cdev_get(p)) ret = -ENXIO; spin_unlock(&cdev_lock); cdev_put(new); if (ret) return ret; ret = -ENXIO; filp->f_op = fops_get(p->ops); if (!filp->f_op) goto out_cdev_put; if (filp->f_op->open) { ret = filp->f_op->open(inode,filp); if (ret) goto out_cdev_put; } return 0; out_cdev_put: cdev_put(p); return ret; } void cd_forget(struct inode *inode) { spin_lock(&cdev_lock); list_del_init(&inode->i_devices); inode->i_cdev = NULL; spin_unlock(&cdev_lock); } static void cdev_purge(struct cdev *cdev) { spin_lock(&cdev_lock); while (!list_empty(&cdev->list)) { struct inode *inode; inode = container_of(cdev->list.next, struct inode, i_devices); list_del_init(&inode->i_devices); inode->i_cdev = NULL; } spin_unlock(&cdev_lock); } /* * Dummy default file-operations: the only thing this does * is contain the open that then fills in the correct operations * depending on the special file... */ const struct file_operations def_chr_fops = { .open = chrdev_open, }; static struct kobject *exact_match(dev_t dev, int *part, void *data) { struct cdev *p = data; return &p->kobj; } static int exact_lock(dev_t dev, void *data) { struct cdev *p = data; return cdev_get(p) ? 0 : -1; } /** * cdev_add() - add a char device to the system * @p: the cdev structure for the device * @dev: the first device number for which this device is responsible * @count: the number of consecutive minor numbers corresponding to this * device * * cdev_add() adds the device represented by @p to the system, making it * live immediately. A negative error code is returned on failure. */ int cdev_add(struct cdev *p, dev_t dev, unsigned count) { p->dev = dev; p->count = count; return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p); } static void cdev_unmap(dev_t dev, unsigned count) { kobj_unmap(cdev_map, dev, count); } /** * cdev_del() - remove a cdev from the system * @p: the cdev structure to be removed * * cdev_del() removes @p from the system, possibly freeing the structure * itself. */ void cdev_del(struct cdev *p) { cdev_unmap(p->dev, p->count); kobject_put(&p->kobj); } static void cdev_default_release(struct kobject *kobj) { struct cdev *p = container_of(kobj, struct cdev, kobj); cdev_purge(p); } static void cdev_dynamic_release(struct kobject *kobj) { struct cdev *p = container_of(kobj, struct cdev, kobj); cdev_purge(p); kfree(p); } static struct kobj_type ktype_cdev_default = { .release = cdev_default_release, }; static struct kobj_type ktype_cdev_dynamic = { .release = cdev_dynamic_release, }; /** * cdev_alloc() - allocate a cdev structure * * Allocates and returns a cdev structure, or NULL on failure. */ struct cdev *cdev_alloc(void) { struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL); if (p) { INIT_LIST_HEAD(&p->list); kobject_init(&p->kobj, &ktype_cdev_dynamic); } return p; } /** * cdev_init() - initialize a cdev structure * @cdev: the structure to initialize * @fops: the file_operations for this device * * Initializes @cdev, remembering @fops, making it ready to add to the * system with cdev_add(). */ void cdev_init(struct cdev *cdev, const struct file_operations *fops) { memset(cdev, 0, sizeof *cdev); INIT_LIST_HEAD(&cdev->list); kobject_init(&cdev->kobj, &ktype_cdev_default); cdev->ops = fops; } static struct kobject *base_probe(dev_t dev, int *part, void *data) { if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0) /* Make old-style 2.4 aliases work */ request_module("char-major-%d", MAJOR(dev)); return NULL; } void __init chrdev_init(void) { cdev_map = kobj_map_init(base_probe, &chrdevs_lock); bdi_init(&directly_mappable_cdev_bdi); } /* Let modules do char dev stuff */ EXPORT_SYMBOL(register_chrdev_region); EXPORT_SYMBOL(unregister_chrdev_region); EXPORT_SYMBOL(alloc_chrdev_region); EXPORT_SYMBOL(cdev_init); EXPORT_SYMBOL(cdev_alloc); EXPORT_SYMBOL(cdev_del); EXPORT_SYMBOL(cdev_add); EXPORT_SYMBOL(register_chrdev); EXPORT_SYMBOL(unregister_chrdev); EXPORT_SYMBOL(directly_mappable_cdev_bdi);
gpl-2.0
sudipm-mukherjee/parport
arch/alpha/lib/udelay.c
678
1143
/* * Copyright (C) 1993, 2000 Linus Torvalds * * Delay routines, using a pre-computed "loops_per_jiffy" value. */ #include <linux/module.h> #include <linux/sched.h> /* for udelay's use of smp_processor_id */ #include <asm/param.h> #include <asm/smp.h> #include <linux/delay.h> /* * Use only for very small delays (< 1 msec). * * The active part of our cycle counter is only 32-bits wide, and * we're treating the difference between two marks as signed. On * a 1GHz box, that's about 2 seconds. */ void __delay(int loops) { int tmp; __asm__ __volatile__( " rpcc %0\n" " addl %1,%0,%1\n" "1: rpcc %0\n" " subl %1,%0,%0\n" " bgt %0,1b" : "=&r" (tmp), "=r" (loops) : "1"(loops)); } EXPORT_SYMBOL(__delay); #ifdef CONFIG_SMP #define LPJ cpu_data[smp_processor_id()].loops_per_jiffy #else #define LPJ loops_per_jiffy #endif void udelay(unsigned long usecs) { usecs *= (((unsigned long)HZ << 32) / 1000000) * LPJ; __delay((long)usecs >> 32); } EXPORT_SYMBOL(udelay); void ndelay(unsigned long nsecs) { nsecs *= (((unsigned long)HZ << 32) / 1000000000) * LPJ; __delay((long)nsecs >> 32); } EXPORT_SYMBOL(ndelay);
gpl-2.0
pavgup/WRT1900AC
target/linux/ar71xx/files/arch/mips/ath79/mach-whr-hp-g300n.c
678
4125
/* * Buffalo WHR-HP-G300N board support * * based on ... * * TP-LINK TL-WR741ND board support * * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <asm/mach-ath79/ath79.h> #include <asm/mach-ath79/ar71xx_regs.h> #include "common.h" #include "dev-ap9x-pci.h" #include "dev-eth.h" #include "dev-gpio-buttons.h" #include "dev-leds-gpio.h" #include "dev-m25p80.h" #include "machtypes.h" #define WHRHPG300N_GPIO_LED_SECURITY 0 #define WHRHPG300N_GPIO_LED_DIAG 1 #define WHRHPG300N_GPIO_LED_ROUTER 6 #define WHRHPG300N_GPIO_BTN_ROUTER_ON 7 #define WHRHPG300N_GPIO_BTN_ROUTER_AUTO 8 #define WHRHPG300N_GPIO_BTN_RESET 11 #define WHRHPG300N_GPIO_BTN_AOSS 12 #define WHRHPG300N_GPIO_LED_LAN1 13 #define WHRHPG300N_GPIO_LED_LAN2 14 #define WHRHPG300N_GPIO_LED_LAN3 15 #define WHRHPG300N_GPIO_LED_LAN4 16 #define WHRHPG300N_GPIO_LED_WAN 17 #define WHRHPG300N_KEYS_POLL_INTERVAL 20 /* msecs */ #define WHRHPG300N_KEYS_DEBOUNCE_INTERVAL (3 * WHRHPG300N_KEYS_POLL_INTERVAL) #define WHRHPG300N_MAC_OFFSET 0x20c static struct gpio_led whrhpg300n_leds_gpio[] __initdata = { { .name = "buffalo:orange:security", .gpio = WHRHPG300N_GPIO_LED_SECURITY, .active_low = 1, }, { .name = "buffalo:red:diag", .gpio = WHRHPG300N_GPIO_LED_DIAG, .active_low = 1, }, { .name = "buffalo:green:router", .gpio = WHRHPG300N_GPIO_LED_ROUTER, .active_low = 1, }, { .name = "buffalo:green:wan", .gpio = WHRHPG300N_GPIO_LED_WAN, .active_low = 1, }, { .name = "buffalo:green:lan1", .gpio = WHRHPG300N_GPIO_LED_LAN1, .active_low = 1, }, { .name = "buffalo:green:lan2", .gpio = WHRHPG300N_GPIO_LED_LAN2, .active_low = 1, }, { .name = "buffalo:green:lan3", .gpio = WHRHPG300N_GPIO_LED_LAN3, .active_low = 1, }, { .name = "buffalo:green:lan4", .gpio = WHRHPG300N_GPIO_LED_LAN4, .active_low = 1, } }; static struct gpio_keys_button whrhpg300n_gpio_keys[] __initdata = { { .desc = "reset", .type = EV_KEY, .code = KEY_RESTART, .debounce_interval = WHRHPG300N_KEYS_DEBOUNCE_INTERVAL, .gpio = WHRHPG300N_GPIO_BTN_RESET, .active_low = 1, }, { .desc = "aoss/wps", .type = EV_KEY, .code = KEY_WPS_BUTTON, .gpio = WHRHPG300N_GPIO_BTN_AOSS, .debounce_interval = WHRHPG300N_KEYS_DEBOUNCE_INTERVAL, .active_low = 1, }, { .desc = "router_on", .type = EV_KEY, .code = BTN_2, .gpio = WHRHPG300N_GPIO_BTN_ROUTER_ON, .debounce_interval = WHRHPG300N_KEYS_DEBOUNCE_INTERVAL, .active_low = 1, }, { .desc = "router_auto", .type = EV_KEY, .code = BTN_3, .gpio = WHRHPG300N_GPIO_BTN_ROUTER_AUTO, .debounce_interval = WHRHPG300N_KEYS_DEBOUNCE_INTERVAL, .active_low = 1, } }; static void __init whrhpg300n_setup(void) { u8 *ee = (u8 *) KSEG1ADDR(0x1fff1000); u8 *mac = (u8 *) KSEG1ADDR(ee + WHRHPG300N_MAC_OFFSET); ath79_register_m25p80(NULL); ath79_gpio_function_disable(AR724X_GPIO_FUNC_ETH_SWITCH_LED0_EN | AR724X_GPIO_FUNC_ETH_SWITCH_LED1_EN | AR724X_GPIO_FUNC_ETH_SWITCH_LED2_EN | AR724X_GPIO_FUNC_ETH_SWITCH_LED3_EN | AR724X_GPIO_FUNC_ETH_SWITCH_LED4_EN); ath79_register_leds_gpio(-1, ARRAY_SIZE(whrhpg300n_leds_gpio), whrhpg300n_leds_gpio); ath79_register_gpio_keys_polled(-1, WHRHPG300N_KEYS_POLL_INTERVAL, ARRAY_SIZE(whrhpg300n_gpio_keys), whrhpg300n_gpio_keys); ath79_init_mac(ath79_eth0_data.mac_addr, mac, 0); ath79_init_mac(ath79_eth1_data.mac_addr, mac, 1); ath79_register_mdio(0, 0x0); /* LAN ports */ ath79_register_eth(1); /* WAN port */ ath79_register_eth(0); ap9x_pci_setup_wmac_led_pin(0, 1); ap91_pci_init(ee, mac); } MIPS_MACHINE(ATH79_MACH_WHR_HP_G300N, "WHR-HP-G300N", "Buffalo WHR-HP-G300N", whrhpg300n_setup); MIPS_MACHINE(ATH79_MACH_WHR_G301N, "WHR-G301N", "Buffalo WHR-G301N", whrhpg300n_setup); MIPS_MACHINE(ATH79_MACH_WHR_HP_GN, "WHR-HP-GN", "Buffalo WHR-HP-GN", whrhpg300n_setup);
gpl-2.0
mukulsoni/android_kernel_samsung_ms013g-1
drivers/usb/core/urb.c
2726
28965
#include <linux/module.h> #include <linux/string.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/log2.h> #include <linux/usb.h> #include <linux/wait.h> #include <linux/usb/hcd.h> #define to_urb(d) container_of(d, struct urb, kref) static void urb_destroy(struct kref *kref) { struct urb *urb = to_urb(kref); if (urb->transfer_flags & URB_FREE_BUFFER) kfree(urb->transfer_buffer); kfree(urb); } /** * usb_init_urb - initializes a urb so that it can be used by a USB driver * @urb: pointer to the urb to initialize * * Initializes a urb so that the USB subsystem can use it properly. * * If a urb is created with a call to usb_alloc_urb() it is not * necessary to call this function. Only use this if you allocate the * space for a struct urb on your own. If you call this function, be * careful when freeing the memory for your urb that it is no longer in * use by the USB core. * * Only use this function if you _really_ understand what you are doing. */ void usb_init_urb(struct urb *urb) { if (urb) { memset(urb, 0, sizeof(*urb)); kref_init(&urb->kref); INIT_LIST_HEAD(&urb->anchor_list); } } EXPORT_SYMBOL_GPL(usb_init_urb); /** * usb_alloc_urb - creates a new urb for a USB driver to use * @iso_packets: number of iso packets for this urb * @mem_flags: the type of memory to allocate, see kmalloc() for a list of * valid options for this. * * Creates an urb for the USB driver to use, initializes a few internal * structures, incrementes the usage counter, and returns a pointer to it. * * If no memory is available, NULL is returned. * * If the driver want to use this urb for interrupt, control, or bulk * endpoints, pass '0' as the number of iso packets. * * The driver must call usb_free_urb() when it is finished with the urb. */ struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags) { struct urb *urb; urb = kmalloc(sizeof(struct urb) + iso_packets * sizeof(struct usb_iso_packet_descriptor), mem_flags); if (!urb) { printk(KERN_ERR "alloc_urb: kmalloc failed\n"); return NULL; } usb_init_urb(urb); return urb; } EXPORT_SYMBOL_GPL(usb_alloc_urb); /** * usb_free_urb - frees the memory used by a urb when all users of it are finished * @urb: pointer to the urb to free, may be NULL * * Must be called when a user of a urb is finished with it. When the last user * of the urb calls this function, the memory of the urb is freed. * * Note: The transfer buffer associated with the urb is not freed unless the * URB_FREE_BUFFER transfer flag is set. */ void usb_free_urb(struct urb *urb) { if (urb) kref_put(&urb->kref, urb_destroy); } EXPORT_SYMBOL_GPL(usb_free_urb); /** * usb_get_urb - increments the reference count of the urb * @urb: pointer to the urb to modify, may be NULL * * This must be called whenever a urb is transferred from a device driver to a * host controller driver. This allows proper reference counting to happen * for urbs. * * A pointer to the urb with the incremented reference counter is returned. */ struct urb *usb_get_urb(struct urb *urb) { if (urb) kref_get(&urb->kref); return urb; } EXPORT_SYMBOL_GPL(usb_get_urb); /** * usb_anchor_urb - anchors an URB while it is processed * @urb: pointer to the urb to anchor * @anchor: pointer to the anchor * * This can be called to have access to URBs which are to be executed * without bothering to track them */ void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor) { unsigned long flags; spin_lock_irqsave(&anchor->lock, flags); usb_get_urb(urb); list_add_tail(&urb->anchor_list, &anchor->urb_list); urb->anchor = anchor; if (unlikely(anchor->poisoned)) { atomic_inc(&urb->reject); } spin_unlock_irqrestore(&anchor->lock, flags); } EXPORT_SYMBOL_GPL(usb_anchor_urb); /* Callers must hold anchor->lock */ static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor) { urb->anchor = NULL; list_del(&urb->anchor_list); usb_put_urb(urb); if (list_empty(&anchor->urb_list)) wake_up(&anchor->wait); } /** * usb_unanchor_urb - unanchors an URB * @urb: pointer to the urb to anchor * * Call this to stop the system keeping track of this URB */ void usb_unanchor_urb(struct urb *urb) { unsigned long flags; struct usb_anchor *anchor; if (!urb) return; anchor = urb->anchor; if (!anchor) return; spin_lock_irqsave(&anchor->lock, flags); /* * At this point, we could be competing with another thread which * has the same intention. To protect the urb from being unanchored * twice, only the winner of the race gets the job. */ if (likely(anchor == urb->anchor)) __usb_unanchor_urb(urb, anchor); spin_unlock_irqrestore(&anchor->lock, flags); } EXPORT_SYMBOL_GPL(usb_unanchor_urb); /*-------------------------------------------------------------------*/ /** * usb_submit_urb - issue an asynchronous transfer request for an endpoint * @urb: pointer to the urb describing the request * @mem_flags: the type of memory to allocate, see kmalloc() for a list * of valid options for this. * * This submits a transfer request, and transfers control of the URB * describing that request to the USB subsystem. Request completion will * be indicated later, asynchronously, by calling the completion handler. * The three types of completion are success, error, and unlink * (a software-induced fault, also called "request cancellation"). * * URBs may be submitted in interrupt context. * * The caller must have correctly initialized the URB before submitting * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are * available to ensure that most fields are correctly initialized, for * the particular kind of transfer, although they will not initialize * any transfer flags. * * Successful submissions return 0; otherwise this routine returns a * negative error number. If the submission is successful, the complete() * callback from the URB will be called exactly once, when the USB core and * Host Controller Driver (HCD) are finished with the URB. When the completion * function is called, control of the URB is returned to the device * driver which issued the request. The completion handler may then * immediately free or reuse that URB. * * With few exceptions, USB device drivers should never access URB fields * provided by usbcore or the HCD until its complete() is called. * The exceptions relate to periodic transfer scheduling. For both * interrupt and isochronous urbs, as part of successful URB submission * urb->interval is modified to reflect the actual transfer period used * (normally some power of two units). And for isochronous urbs, * urb->start_frame is modified to reflect when the URB's transfers were * scheduled to start. Not all isochronous transfer scheduling policies * will work, but most host controller drivers should easily handle ISO * queues going from now until 10-200 msec into the future. * * For control endpoints, the synchronous usb_control_msg() call is * often used (in non-interrupt context) instead of this call. * That is often used through convenience wrappers, for the requests * that are standardized in the USB 2.0 specification. For bulk * endpoints, a synchronous usb_bulk_msg() call is available. * * Request Queuing: * * URBs may be submitted to endpoints before previous ones complete, to * minimize the impact of interrupt latencies and system overhead on data * throughput. With that queuing policy, an endpoint's queue would never * be empty. This is required for continuous isochronous data streams, * and may also be required for some kinds of interrupt transfers. Such * queuing also maximizes bandwidth utilization by letting USB controllers * start work on later requests before driver software has finished the * completion processing for earlier (successful) requests. * * As of Linux 2.6, all USB endpoint transfer queues support depths greater * than one. This was previously a HCD-specific behavior, except for ISO * transfers. Non-isochronous endpoint queues are inactive during cleanup * after faults (transfer errors or cancellation). * * Reserved Bandwidth Transfers: * * Periodic transfers (interrupt or isochronous) are performed repeatedly, * using the interval specified in the urb. Submitting the first urb to * the endpoint reserves the bandwidth necessary to make those transfers. * If the USB subsystem can't allocate sufficient bandwidth to perform * the periodic request, submitting such a periodic request should fail. * * For devices under xHCI, the bandwidth is reserved at configuration time, or * when the alt setting is selected. If there is not enough bus bandwidth, the * configuration/alt setting request will fail. Therefore, submissions to * periodic endpoints on devices under xHCI should never fail due to bandwidth * constraints. * * Device drivers must explicitly request that repetition, by ensuring that * some URB is always on the endpoint's queue (except possibly for short * periods during completion callacks). When there is no longer an urb * queued, the endpoint's bandwidth reservation is canceled. This means * drivers can use their completion handlers to ensure they keep bandwidth * they need, by reinitializing and resubmitting the just-completed urb * until the driver longer needs that periodic bandwidth. * * Memory Flags: * * The general rules for how to decide which mem_flags to use * are the same as for kmalloc. There are four * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and * GFP_ATOMIC. * * GFP_NOFS is not ever used, as it has not been implemented yet. * * GFP_ATOMIC is used when * (a) you are inside a completion handler, an interrupt, bottom half, * tasklet or timer, or * (b) you are holding a spinlock or rwlock (does not apply to * semaphores), or * (c) current->state != TASK_RUNNING, this is the case only after * you've changed it. * * GFP_NOIO is used in the block io path and error handling of storage * devices. * * All other situations use GFP_KERNEL. * * Some more specific rules for mem_flags can be inferred, such as * (1) start_xmit, timeout, and receive methods of network drivers must * use GFP_ATOMIC (they are called with a spinlock held); * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also * called with a spinlock held); * (3) If you use a kernel thread with a network driver you must use * GFP_NOIO, unless (b) or (c) apply; * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c) * apply or your are in a storage driver's block io path; * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and * (6) changing firmware on a running storage or net device uses * GFP_NOIO, unless b) or c) apply * */ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) { int xfertype, max; struct usb_device *dev; struct usb_host_endpoint *ep; int is_out; if (!urb || urb->hcpriv || !urb->complete) return -EINVAL; dev = urb->dev; if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED)) return -ENODEV; /* For now, get the endpoint from the pipe. Eventually drivers * will be required to set urb->ep directly and we will eliminate * urb->pipe. */ ep = usb_pipe_endpoint(dev, urb->pipe); if (!ep) return -ENOENT; urb->ep = ep; urb->status = -EINPROGRESS; urb->actual_length = 0; /* Lots of sanity checks, so HCDs can rely on clean data * and don't need to duplicate tests */ xfertype = usb_endpoint_type(&ep->desc); if (xfertype == USB_ENDPOINT_XFER_CONTROL) { struct usb_ctrlrequest *setup = (struct usb_ctrlrequest *) urb->setup_packet; if (!setup) return -ENOEXEC; is_out = !(setup->bRequestType & USB_DIR_IN) || !setup->wLength; } else { is_out = usb_endpoint_dir_out(&ep->desc); } /* Clear the internal flags and cache the direction for later use */ urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE | URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL | URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL | URB_DMA_SG_COMBINED); urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN); if (xfertype != USB_ENDPOINT_XFER_CONTROL && dev->state < USB_STATE_CONFIGURED) return -ENODEV; max = usb_endpoint_maxp(&ep->desc); if (max <= 0) { dev_dbg(&dev->dev, "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n", usb_endpoint_num(&ep->desc), is_out ? "out" : "in", __func__, max); return -EMSGSIZE; } /* periodic transfers limit size per frame/uframe, * but drivers only control those sizes for ISO. * while we're checking, initialize return status. */ if (xfertype == USB_ENDPOINT_XFER_ISOC) { int n, len; /* SuperSpeed isoc endpoints have up to 16 bursts of up to * 3 packets each */ if (dev->speed == USB_SPEED_SUPER) { int burst = 1 + ep->ss_ep_comp.bMaxBurst; int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes); max *= burst; max *= mult; } /* "high bandwidth" mode, 1-3 packets/uframe? */ if (dev->speed == USB_SPEED_HIGH) { int mult = 1 + ((max >> 11) & 0x03); max &= 0x07ff; max *= mult; } if (urb->number_of_packets <= 0) return -EINVAL; for (n = 0; n < urb->number_of_packets; n++) { len = urb->iso_frame_desc[n].length; if (len < 0 || len > max) return -EMSGSIZE; urb->iso_frame_desc[n].status = -EXDEV; urb->iso_frame_desc[n].actual_length = 0; } } /* the I/O buffer must be mapped/unmapped, except when length=0 */ if (urb->transfer_buffer_length > INT_MAX) return -EMSGSIZE; #ifdef DEBUG /* stuff that drivers shouldn't do, but which shouldn't * cause problems in HCDs if they get it wrong. */ { unsigned int allowed; static int pipetypes[4] = { PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT }; /* Check that the pipe's type matches the endpoint's type */ if (usb_pipetype(urb->pipe) != pipetypes[xfertype]) dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n", usb_pipetype(urb->pipe), pipetypes[xfertype]); /* Check against a simple/standard policy */ allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK | URB_FREE_BUFFER); switch (xfertype) { case USB_ENDPOINT_XFER_BULK: if (is_out) allowed |= URB_ZERO_PACKET; /* FALLTHROUGH */ case USB_ENDPOINT_XFER_CONTROL: allowed |= URB_NO_FSBR; /* only affects UHCI */ /* FALLTHROUGH */ default: /* all non-iso endpoints */ if (!is_out) allowed |= URB_SHORT_NOT_OK; break; case USB_ENDPOINT_XFER_ISOC: allowed |= URB_ISO_ASAP; break; } allowed &= urb->transfer_flags; /* warn if submitter gave bogus flags */ if (allowed != urb->transfer_flags) dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n", urb->transfer_flags, allowed); } #endif /* * Force periodic transfer intervals to be legal values that are * a power of two (so HCDs don't need to). * * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC * supports different values... this uses EHCI/UHCI defaults (and * EHCI can use smaller non-default values). */ switch (xfertype) { case USB_ENDPOINT_XFER_ISOC: case USB_ENDPOINT_XFER_INT: /* too small? */ switch (dev->speed) { case USB_SPEED_WIRELESS: if (urb->interval < 6) return -EINVAL; break; default: if (urb->interval <= 0) return -EINVAL; break; } /* too big? */ switch (dev->speed) { case USB_SPEED_SUPER: /* units are 125us */ /* Handle up to 2^(16-1) microframes */ if (urb->interval > (1 << 15)) return -EINVAL; max = 1 << 15; break; case USB_SPEED_WIRELESS: if (urb->interval > 16) return -EINVAL; break; case USB_SPEED_HIGH: /* units are microframes */ /* NOTE usb handles 2^15 */ if (urb->interval > (1024 * 8)) urb->interval = 1024 * 8; max = 1024 * 8; break; case USB_SPEED_FULL: /* units are frames/msec */ case USB_SPEED_LOW: if (xfertype == USB_ENDPOINT_XFER_INT) { if (urb->interval > 255) return -EINVAL; /* NOTE ohci only handles up to 32 */ max = 128; } else { if (urb->interval > 1024) urb->interval = 1024; /* NOTE usb and ohci handle up to 2^15 */ max = 1024; } break; default: return -EINVAL; } if (dev->speed != USB_SPEED_WIRELESS) { /* Round down to a power of 2, no more than max */ urb->interval = min(max, 1 << ilog2(urb->interval)); } } return usb_hcd_submit_urb(urb, mem_flags); } EXPORT_SYMBOL_GPL(usb_submit_urb); /*-------------------------------------------------------------------*/ /** * usb_unlink_urb - abort/cancel a transfer request for an endpoint * @urb: pointer to urb describing a previously submitted request, * may be NULL * * This routine cancels an in-progress request. URBs complete only once * per submission, and may be canceled only once per submission. * Successful cancellation means termination of @urb will be expedited * and the completion handler will be called with a status code * indicating that the request has been canceled (rather than any other * code). * * Drivers should not call this routine or related routines, such as * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect * method has returned. The disconnect function should synchronize with * a driver's I/O routines to insure that all URB-related activity has * completed before it returns. * * This request is asynchronous, however the HCD might call the ->complete() * callback during unlink. Therefore when drivers call usb_unlink_urb(), they * must not hold any locks that may be taken by the completion function. * Success is indicated by returning -EINPROGRESS, at which time the URB will * probably not yet have been given back to the device driver. When it is * eventually called, the completion function will see @urb->status == * -ECONNRESET. * Failure is indicated by usb_unlink_urb() returning any other value. * Unlinking will fail when @urb is not currently "linked" (i.e., it was * never submitted, or it was unlinked before, or the hardware is already * finished with it), even if the completion handler has not yet run. * * The URB must not be deallocated while this routine is running. In * particular, when a driver calls this routine, it must insure that the * completion handler cannot deallocate the URB. * * Unlinking and Endpoint Queues: * * [The behaviors and guarantees described below do not apply to virtual * root hubs but only to endpoint queues for physical USB devices.] * * Host Controller Drivers (HCDs) place all the URBs for a particular * endpoint in a queue. Normally the queue advances as the controller * hardware processes each request. But when an URB terminates with an * error its queue generally stops (see below), at least until that URB's * completion routine returns. It is guaranteed that a stopped queue * will not restart until all its unlinked URBs have been fully retired, * with their completion routines run, even if that's not until some time * after the original completion handler returns. The same behavior and * guarantee apply when an URB terminates because it was unlinked. * * Bulk and interrupt endpoint queues are guaranteed to stop whenever an * URB terminates with any sort of error, including -ECONNRESET, -ENOENT, * and -EREMOTEIO. Control endpoint queues behave the same way except * that they are not guaranteed to stop for -EREMOTEIO errors. Queues * for isochronous endpoints are treated differently, because they must * advance at fixed rates. Such queues do not stop when an URB * encounters an error or is unlinked. An unlinked isochronous URB may * leave a gap in the stream of packets; it is undefined whether such * gaps can be filled in. * * Note that early termination of an URB because a short packet was * received will generate a -EREMOTEIO error if and only if the * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device * drivers can build deep queues for large or complex bulk transfers * and clean them up reliably after any sort of aborted transfer by * unlinking all pending URBs at the first fault. * * When a control URB terminates with an error other than -EREMOTEIO, it * is quite likely that the status stage of the transfer will not take * place. */ int usb_unlink_urb(struct urb *urb) { if (!urb) return -EINVAL; if (!urb->dev) return -ENODEV; if (!urb->ep) return -EIDRM; return usb_hcd_unlink_urb(urb, -ECONNRESET); } EXPORT_SYMBOL_GPL(usb_unlink_urb); /** * usb_kill_urb - cancel a transfer request and wait for it to finish * @urb: pointer to URB describing a previously submitted request, * may be NULL * * This routine cancels an in-progress request. It is guaranteed that * upon return all completion handlers will have finished and the URB * will be totally idle and available for reuse. These features make * this an ideal way to stop I/O in a disconnect() callback or close() * function. If the request has not already finished or been unlinked * the completion handler will see urb->status == -ENOENT. * * While the routine is running, attempts to resubmit the URB will fail * with error -EPERM. Thus even if the URB's completion handler always * tries to resubmit, it will not succeed and the URB will become idle. * * The URB must not be deallocated while this routine is running. In * particular, when a driver calls this routine, it must insure that the * completion handler cannot deallocate the URB. * * This routine may not be used in an interrupt context (such as a bottom * half or a completion handler), or when holding a spinlock, or in other * situations where the caller can't schedule(). * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_kill_urb(struct urb *urb) { might_sleep(); if (!(urb && urb->dev && urb->ep)) return; atomic_inc(&urb->reject); usb_hcd_unlink_urb(urb, -ENOENT); wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); atomic_dec(&urb->reject); } EXPORT_SYMBOL_GPL(usb_kill_urb); /** * usb_poison_urb - reliably kill a transfer and prevent further use of an URB * @urb: pointer to URB describing a previously submitted request, * may be NULL * * This routine cancels an in-progress request. It is guaranteed that * upon return all completion handlers will have finished and the URB * will be totally idle and cannot be reused. These features make * this an ideal way to stop I/O in a disconnect() callback. * If the request has not already finished or been unlinked * the completion handler will see urb->status == -ENOENT. * * After and while the routine runs, attempts to resubmit the URB will fail * with error -EPERM. Thus even if the URB's completion handler always * tries to resubmit, it will not succeed and the URB will become idle. * * The URB must not be deallocated while this routine is running. In * particular, when a driver calls this routine, it must insure that the * completion handler cannot deallocate the URB. * * This routine may not be used in an interrupt context (such as a bottom * half or a completion handler), or when holding a spinlock, or in other * situations where the caller can't schedule(). * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_poison_urb(struct urb *urb) { might_sleep(); if (!(urb && urb->dev && urb->ep)) return; atomic_inc(&urb->reject); usb_hcd_unlink_urb(urb, -ENOENT); wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); } EXPORT_SYMBOL_GPL(usb_poison_urb); void usb_unpoison_urb(struct urb *urb) { if (!urb) return; atomic_dec(&urb->reject); } EXPORT_SYMBOL_GPL(usb_unpoison_urb); /** * usb_kill_anchored_urbs - cancel transfer requests en masse * @anchor: anchor the requests are bound to * * this allows all outstanding URBs to be killed starting * from the back of the queue * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_kill_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; spin_lock_irq(&anchor->lock); while (!list_empty(&anchor->urb_list)) { victim = list_entry(anchor->urb_list.prev, struct urb, anchor_list); /* we must make sure the URB isn't freed before we kill it*/ usb_get_urb(victim); spin_unlock_irq(&anchor->lock); /* this will unanchor the URB */ usb_kill_urb(victim); usb_put_urb(victim); spin_lock_irq(&anchor->lock); } spin_unlock_irq(&anchor->lock); } EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); /** * usb_poison_anchored_urbs - cease all traffic from an anchor * @anchor: anchor the requests are bound to * * this allows all outstanding URBs to be poisoned starting * from the back of the queue. Newly added URBs will also be * poisoned * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_poison_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; spin_lock_irq(&anchor->lock); anchor->poisoned = 1; while (!list_empty(&anchor->urb_list)) { victim = list_entry(anchor->urb_list.prev, struct urb, anchor_list); /* we must make sure the URB isn't freed before we kill it*/ usb_get_urb(victim); spin_unlock_irq(&anchor->lock); /* this will unanchor the URB */ usb_poison_urb(victim); usb_put_urb(victim); spin_lock_irq(&anchor->lock); } spin_unlock_irq(&anchor->lock); } EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs); /** * usb_unpoison_anchored_urbs - let an anchor be used successfully again * @anchor: anchor the requests are bound to * * Reverses the effect of usb_poison_anchored_urbs * the anchor can be used normally after it returns */ void usb_unpoison_anchored_urbs(struct usb_anchor *anchor) { unsigned long flags; struct urb *lazarus; spin_lock_irqsave(&anchor->lock, flags); list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) { usb_unpoison_urb(lazarus); } anchor->poisoned = 0; spin_unlock_irqrestore(&anchor->lock, flags); } EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs); /** * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse * @anchor: anchor the requests are bound to * * this allows all outstanding URBs to be unlinked starting * from the back of the queue. This function is asynchronous. * The unlinking is just tiggered. It may happen after this * function has returned. * * This routine should not be called by a driver after its disconnect * method has returned. */ void usb_unlink_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; while ((victim = usb_get_from_anchor(anchor)) != NULL) { usb_unlink_urb(victim); usb_put_urb(victim); } } EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs); /** * usb_wait_anchor_empty_timeout - wait for an anchor to be unused * @anchor: the anchor you want to become unused * @timeout: how long you are willing to wait in milliseconds * * Call this is you want to be sure all an anchor's * URBs have finished */ int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor, unsigned int timeout) { return wait_event_timeout(anchor->wait, list_empty(&anchor->urb_list), msecs_to_jiffies(timeout)); } EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout); /** * usb_get_from_anchor - get an anchor's oldest urb * @anchor: the anchor whose urb you want * * this will take the oldest urb from an anchor, * unanchor and return it */ struct urb *usb_get_from_anchor(struct usb_anchor *anchor) { struct urb *victim; unsigned long flags; spin_lock_irqsave(&anchor->lock, flags); if (!list_empty(&anchor->urb_list)) { victim = list_entry(anchor->urb_list.next, struct urb, anchor_list); usb_get_urb(victim); __usb_unanchor_urb(victim, anchor); } else { victim = NULL; } spin_unlock_irqrestore(&anchor->lock, flags); return victim; } EXPORT_SYMBOL_GPL(usb_get_from_anchor); /** * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs * @anchor: the anchor whose urbs you want to unanchor * * use this to get rid of all an anchor's urbs */ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; unsigned long flags; spin_lock_irqsave(&anchor->lock, flags); while (!list_empty(&anchor->urb_list)) { victim = list_entry(anchor->urb_list.prev, struct urb, anchor_list); __usb_unanchor_urb(victim, anchor); } spin_unlock_irqrestore(&anchor->lock, flags); } EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs); /** * usb_anchor_empty - is an anchor empty * @anchor: the anchor you want to query * * returns 1 if the anchor has no urbs associated with it */ int usb_anchor_empty(struct usb_anchor *anchor) { return list_empty(&anchor->urb_list); } EXPORT_SYMBOL_GPL(usb_anchor_empty);
gpl-2.0
BanBxda/Linux-3.0.X
drivers/of/irq.c
2726
10923
/* * Derived from arch/i386/kernel/irq.c * Copyright (C) 1992 Linus Torvalds * Adapted from arch/i386 by Gary Thomas * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Updated and modified by Cort Dougan <cort@fsmlabs.com> * Copyright (C) 1996-2001 Cort Dougan * Adapted for Power Macintosh by Paul Mackerras * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * This file contains the code used to make IRQ descriptions in the * device tree to actual irq numbers on an interrupt controller * driver. */ #include <linux/errno.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/string.h> /* For archs that don't support NO_IRQ (such as x86), provide a dummy value */ #ifndef NO_IRQ #define NO_IRQ 0 #endif /** * irq_of_parse_and_map - Parse and map an interrupt into linux virq space * @device: Device node of the device whose interrupt is to be mapped * @index: Index of the interrupt to map * * This function is a wrapper that chains of_irq_map_one() and * irq_create_of_mapping() to make things easier to callers */ unsigned int irq_of_parse_and_map(struct device_node *dev, int index) { struct of_irq oirq; if (of_irq_map_one(dev, index, &oirq)) return NO_IRQ; return irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size); } EXPORT_SYMBOL_GPL(irq_of_parse_and_map); /** * of_irq_find_parent - Given a device node, find its interrupt parent node * @child: pointer to device node * * Returns a pointer to the interrupt parent node, or NULL if the interrupt * parent could not be determined. */ struct device_node *of_irq_find_parent(struct device_node *child) { struct device_node *p; const __be32 *parp; if (!of_node_get(child)) return NULL; do { parp = of_get_property(child, "interrupt-parent", NULL); if (parp == NULL) p = of_get_parent(child); else { if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) p = of_node_get(of_irq_dflt_pic); else p = of_find_node_by_phandle(be32_to_cpup(parp)); } of_node_put(child); child = p; } while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL); return p; } /** * of_irq_map_raw - Low level interrupt tree parsing * @parent: the device interrupt parent * @intspec: interrupt specifier ("interrupts" property of the device) * @ointsize: size of the passed in interrupt specifier * @addr: address specifier (start of "reg" property of the device) * @out_irq: structure of_irq filled by this function * * Returns 0 on success and a negative number on error * * This function is a low-level interrupt tree walking function. It * can be used to do a partial walk with synthetized reg and interrupts * properties, for example when resolving PCI interrupts when no device * node exist for the parent. */ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec, u32 ointsize, const __be32 *addr, struct of_irq *out_irq) { struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL; const __be32 *tmp, *imap, *imask; u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0; int imaplen, match, i; pr_debug("of_irq_map_raw: par=%s,intspec=[0x%08x 0x%08x...],ointsize=%d\n", parent->full_name, be32_to_cpup(intspec), be32_to_cpup(intspec + 1), ointsize); ipar = of_node_get(parent); /* First get the #interrupt-cells property of the current cursor * that tells us how to interpret the passed-in intspec. If there * is none, we are nice and just walk up the tree */ do { tmp = of_get_property(ipar, "#interrupt-cells", NULL); if (tmp != NULL) { intsize = be32_to_cpu(*tmp); break; } tnode = ipar; ipar = of_irq_find_parent(ipar); of_node_put(tnode); } while (ipar); if (ipar == NULL) { pr_debug(" -> no parent found !\n"); goto fail; } pr_debug("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize); if (ointsize != intsize) return -EINVAL; /* Look for this #address-cells. We have to implement the old linux * trick of looking for the parent here as some device-trees rely on it */ old = of_node_get(ipar); do { tmp = of_get_property(old, "#address-cells", NULL); tnode = of_get_parent(old); of_node_put(old); old = tnode; } while (old && tmp == NULL); of_node_put(old); old = NULL; addrsize = (tmp == NULL) ? 2 : be32_to_cpu(*tmp); pr_debug(" -> addrsize=%d\n", addrsize); /* Now start the actual "proper" walk of the interrupt tree */ while (ipar != NULL) { /* Now check if cursor is an interrupt-controller and if it is * then we are done */ if (of_get_property(ipar, "interrupt-controller", NULL) != NULL) { pr_debug(" -> got it !\n"); for (i = 0; i < intsize; i++) out_irq->specifier[i] = of_read_number(intspec +i, 1); out_irq->size = intsize; out_irq->controller = ipar; of_node_put(old); return 0; } /* Now look for an interrupt-map */ imap = of_get_property(ipar, "interrupt-map", &imaplen); /* No interrupt map, check for an interrupt parent */ if (imap == NULL) { pr_debug(" -> no map, getting parent\n"); newpar = of_irq_find_parent(ipar); goto skiplevel; } imaplen /= sizeof(u32); /* Look for a mask */ imask = of_get_property(ipar, "interrupt-map-mask", NULL); /* If we were passed no "reg" property and we attempt to parse * an interrupt-map, then #address-cells must be 0. * Fail if it's not. */ if (addr == NULL && addrsize != 0) { pr_debug(" -> no reg passed in when needed !\n"); goto fail; } /* Parse interrupt-map */ match = 0; while (imaplen > (addrsize + intsize + 1) && !match) { /* Compare specifiers */ match = 1; for (i = 0; i < addrsize && match; ++i) { u32 mask = imask ? imask[i] : 0xffffffffu; match = ((addr[i] ^ imap[i]) & mask) == 0; } for (; i < (addrsize + intsize) && match; ++i) { u32 mask = imask ? imask[i] : 0xffffffffu; match = ((intspec[i-addrsize] ^ imap[i]) & mask) == 0; } imap += addrsize + intsize; imaplen -= addrsize + intsize; pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen); /* Get the interrupt parent */ if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) newpar = of_node_get(of_irq_dflt_pic); else newpar = of_find_node_by_phandle(be32_to_cpup(imap)); imap++; --imaplen; /* Check if not found */ if (newpar == NULL) { pr_debug(" -> imap parent not found !\n"); goto fail; } /* Get #interrupt-cells and #address-cells of new * parent */ tmp = of_get_property(newpar, "#interrupt-cells", NULL); if (tmp == NULL) { pr_debug(" -> parent lacks #interrupt-cells!\n"); goto fail; } newintsize = be32_to_cpu(*tmp); tmp = of_get_property(newpar, "#address-cells", NULL); newaddrsize = (tmp == NULL) ? 0 : be32_to_cpu(*tmp); pr_debug(" -> newintsize=%d, newaddrsize=%d\n", newintsize, newaddrsize); /* Check for malformed properties */ if (imaplen < (newaddrsize + newintsize)) goto fail; imap += newaddrsize + newintsize; imaplen -= newaddrsize + newintsize; pr_debug(" -> imaplen=%d\n", imaplen); } if (!match) goto fail; of_node_put(old); old = of_node_get(newpar); addrsize = newaddrsize; intsize = newintsize; intspec = imap - intsize; addr = intspec - addrsize; skiplevel: /* Iterate again with new parent */ pr_debug(" -> new parent: %s\n", newpar ? newpar->full_name : "<>"); of_node_put(ipar); ipar = newpar; newpar = NULL; } fail: of_node_put(ipar); of_node_put(old); of_node_put(newpar); return -EINVAL; } EXPORT_SYMBOL_GPL(of_irq_map_raw); /** * of_irq_map_one - Resolve an interrupt for a device * @device: the device whose interrupt is to be resolved * @index: index of the interrupt to resolve * @out_irq: structure of_irq filled by this function * * This function resolves an interrupt, walking the tree, for a given * device-tree node. It's the high level pendant to of_irq_map_raw(). */ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq) { struct device_node *p; const __be32 *intspec, *tmp, *addr; u32 intsize, intlen; int res = -EINVAL; pr_debug("of_irq_map_one: dev=%s, index=%d\n", device->full_name, index); /* OldWorld mac stuff is "special", handle out of line */ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC) return of_irq_map_oldworld(device, index, out_irq); /* Get the interrupts property */ intspec = of_get_property(device, "interrupts", &intlen); if (intspec == NULL) return -EINVAL; intlen /= sizeof(*intspec); pr_debug(" intspec=%d intlen=%d\n", be32_to_cpup(intspec), intlen); /* Get the reg property (if any) */ addr = of_get_property(device, "reg", NULL); /* Look for the interrupt parent. */ p = of_irq_find_parent(device); if (p == NULL) return -EINVAL; /* Get size of interrupt specifier */ tmp = of_get_property(p, "#interrupt-cells", NULL); if (tmp == NULL) goto out; intsize = be32_to_cpu(*tmp); pr_debug(" intsize=%d intlen=%d\n", intsize, intlen); /* Check index */ if ((index + 1) * intsize > intlen) goto out; /* Get new specifier and map it */ res = of_irq_map_raw(p, intspec + index * intsize, intsize, addr, out_irq); out: of_node_put(p); return res; } EXPORT_SYMBOL_GPL(of_irq_map_one); /** * of_irq_to_resource - Decode a node's IRQ and return it as a resource * @dev: pointer to device tree node * @index: zero-based index of the irq * @r: pointer to resource structure to return result into. */ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) { int irq = irq_of_parse_and_map(dev, index); /* Only dereference the resource if both the * resource and the irq are valid. */ if (r && irq != NO_IRQ) { r->start = r->end = irq; r->flags = IORESOURCE_IRQ; r->name = dev->full_name; } return irq; } EXPORT_SYMBOL_GPL(of_irq_to_resource); /** * of_irq_count - Count the number of IRQs a node uses * @dev: pointer to device tree node */ int of_irq_count(struct device_node *dev) { int nr = 0; while (of_irq_to_resource(dev, nr, NULL) != NO_IRQ) nr++; return nr; } /** * of_irq_to_resource_table - Fill in resource table with node's IRQ info * @dev: pointer to device tree node * @res: array of resources to fill in * @nr_irqs: the number of IRQs (and upper bound for num of @res elements) * * Returns the size of the filled in table (up to @nr_irqs). */ int of_irq_to_resource_table(struct device_node *dev, struct resource *res, int nr_irqs) { int i; for (i = 0; i < nr_irqs; i++, res++) if (of_irq_to_resource(dev, i, res) == NO_IRQ) break; return i; }
gpl-2.0
sbreen94/Zeus_d2spr
arch/sh/kernel/setup.c
2726
7930
/* * arch/sh/kernel/setup.c * * This file handles the architecture-dependent parts of initialization * * Copyright (C) 1999 Niibe Yutaka * Copyright (C) 2002 - 2010 Paul Mundt */ #include <linux/screen_info.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/initrd.h> #include <linux/bootmem.h> #include <linux/console.h> #include <linux/root_dev.h> #include <linux/utsname.h> #include <linux/nodemask.h> #include <linux/cpu.h> #include <linux/pfn.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/kexec.h> #include <linux/module.h> #include <linux/smp.h> #include <linux/err.h> #include <linux/crash_dump.h> #include <linux/mmzone.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/memblock.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/page.h> #include <asm/elf.h> #include <asm/sections.h> #include <asm/irq.h> #include <asm/setup.h> #include <asm/clock.h> #include <asm/smp.h> #include <asm/mmu_context.h> #include <asm/mmzone.h> #include <asm/sparsemem.h> /* * Initialize loops_per_jiffy as 10000000 (1000MIPS). * This value will be used at the very early stage of serial setup. * The bigger value means no problem. */ struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = { [0] = { .type = CPU_SH_NONE, .family = CPU_FAMILY_UNKNOWN, .loops_per_jiffy = 10000000, .phys_bits = MAX_PHYSMEM_BITS, }, }; EXPORT_SYMBOL(cpu_data); /* * The machine vector. First entry in .machvec.init, or clobbered by * sh_mv= on the command line, prior to .machvec.init teardown. */ struct sh_machine_vector sh_mv = { .mv_name = "generic", }; EXPORT_SYMBOL(sh_mv); #ifdef CONFIG_VT struct screen_info screen_info; #endif extern int root_mountflags; #define RAMDISK_IMAGE_START_MASK 0x07FF #define RAMDISK_PROMPT_FLAG 0x8000 #define RAMDISK_LOAD_FLAG 0x4000 static char __initdata command_line[COMMAND_LINE_SIZE] = { 0, }; static struct resource code_resource = { .name = "Kernel code", .flags = IORESOURCE_BUSY | IORESOURCE_MEM, }; static struct resource data_resource = { .name = "Kernel data", .flags = IORESOURCE_BUSY | IORESOURCE_MEM, }; static struct resource bss_resource = { .name = "Kernel bss", .flags = IORESOURCE_BUSY | IORESOURCE_MEM, }; unsigned long memory_start; EXPORT_SYMBOL(memory_start); unsigned long memory_end = 0; EXPORT_SYMBOL(memory_end); unsigned long memory_limit = 0; static struct resource mem_resources[MAX_NUMNODES]; int l1i_cache_shape, l1d_cache_shape, l2_cache_shape; static int __init early_parse_mem(char *p) { if (!p) return 1; memory_limit = PAGE_ALIGN(memparse(p, &p)); pr_notice("Memory limited to %ldMB\n", memory_limit >> 20); return 0; } early_param("mem", early_parse_mem); void __init check_for_initrd(void) { #ifdef CONFIG_BLK_DEV_INITRD unsigned long start, end; /* * Check for the rare cases where boot loaders adhere to the boot * ABI. */ if (!LOADER_TYPE || !INITRD_START || !INITRD_SIZE) goto disable; start = INITRD_START + __MEMORY_START; end = start + INITRD_SIZE; if (unlikely(end <= start)) goto disable; if (unlikely(start & ~PAGE_MASK)) { pr_err("initrd must be page aligned\n"); goto disable; } if (unlikely(start < __MEMORY_START)) { pr_err("initrd start (%08lx) < __MEMORY_START(%x)\n", start, __MEMORY_START); goto disable; } if (unlikely(end > memblock_end_of_DRAM())) { pr_err("initrd extends beyond end of memory " "(0x%08lx > 0x%08lx)\ndisabling initrd\n", end, (unsigned long)memblock_end_of_DRAM()); goto disable; } /* * If we got this far in spite of the boot loader's best efforts * to the contrary, assume we actually have a valid initrd and * fix up the root dev. */ ROOT_DEV = Root_RAM0; /* * Address sanitization */ initrd_start = (unsigned long)__va(start); initrd_end = initrd_start + INITRD_SIZE; memblock_reserve(__pa(initrd_start), INITRD_SIZE); return; disable: pr_info("initrd disabled\n"); initrd_start = initrd_end = 0; #endif } void __cpuinit calibrate_delay(void) { struct clk *clk = clk_get(NULL, "cpu_clk"); if (IS_ERR(clk)) panic("Need a sane CPU clock definition!"); loops_per_jiffy = (clk_get_rate(clk) >> 1) / HZ; printk(KERN_INFO "Calibrating delay loop (skipped)... " "%lu.%02lu BogoMIPS PRESET (lpj=%lu)\n", loops_per_jiffy/(500000/HZ), (loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy); } void __init __add_active_range(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn) { struct resource *res = &mem_resources[nid]; unsigned long start, end; WARN_ON(res->name); /* max one active range per node for now */ start = start_pfn << PAGE_SHIFT; end = end_pfn << PAGE_SHIFT; res->name = "System RAM"; res->start = start; res->end = end - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; if (request_resource(&iomem_resource, res)) { pr_err("unable to request memory_resource 0x%lx 0x%lx\n", start_pfn, end_pfn); return; } /* * We don't know which RAM region contains kernel data, * so we try it repeatedly and let the resource manager * test it. */ request_resource(res, &code_resource); request_resource(res, &data_resource); request_resource(res, &bss_resource); /* * Also make sure that there is a PMB mapping that covers this * range before we attempt to activate it, to avoid reset by MMU. * We can hit this path with NUMA or memory hot-add. */ pmb_bolt_mapping((unsigned long)__va(start), start, end - start, PAGE_KERNEL); add_active_range(nid, start_pfn, end_pfn); } void __init __weak plat_early_device_setup(void) { } void __init setup_arch(char **cmdline_p) { enable_mmu(); ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); printk(KERN_NOTICE "Boot params:\n" "... MOUNT_ROOT_RDONLY - %08lx\n" "... RAMDISK_FLAGS - %08lx\n" "... ORIG_ROOT_DEV - %08lx\n" "... LOADER_TYPE - %08lx\n" "... INITRD_START - %08lx\n" "... INITRD_SIZE - %08lx\n", MOUNT_ROOT_RDONLY, RAMDISK_FLAGS, ORIG_ROOT_DEV, LOADER_TYPE, INITRD_START, INITRD_SIZE); #ifdef CONFIG_BLK_DEV_RAM rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif if (!MOUNT_ROOT_RDONLY) root_mountflags &= ~MS_RDONLY; init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; code_resource.start = virt_to_phys(_text); code_resource.end = virt_to_phys(_etext)-1; data_resource.start = virt_to_phys(_etext); data_resource.end = virt_to_phys(_edata)-1; bss_resource.start = virt_to_phys(__bss_start); bss_resource.end = virt_to_phys(_ebss)-1; #ifdef CONFIG_CMDLINE_OVERWRITE strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line)); #else strlcpy(command_line, COMMAND_LINE, sizeof(command_line)); #ifdef CONFIG_CMDLINE_EXTEND strlcat(command_line, " ", sizeof(command_line)); strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line)); #endif #endif /* Save unparsed command line copy for /proc/cmdline */ memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); *cmdline_p = command_line; parse_early_param(); plat_early_device_setup(); sh_mv_setup(); /* Let earlyprintk output early console messages */ early_platform_driver_probe("earlyprintk", 1, 1); paging_init(); #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif /* Perform the machine specific initialisation */ if (likely(sh_mv.mv_setup)) sh_mv.mv_setup(cmdline_p); plat_smp_setup(); } /* processor boot mode configuration */ int generic_mode_pins(void) { pr_warning("generic_mode_pins(): missing mode pin configuration\n"); return 0; } int test_mode_pin(int pin) { return sh_mv.mv_mode_pins() & pin; }
gpl-2.0
zeroprobe/ZeroGolf-Overclocked
kernel/sched_stoptask.c
2726
2148
/* * stop-task scheduling class. * * The stop task is the highest priority task in the system, it preempts * everything and will be preempted by nothing. * * See kernel/stop_machine.c */ #ifdef CONFIG_SMP static int select_task_rq_stop(struct task_struct *p, int sd_flag, int flags) { return task_cpu(p); /* stop tasks as never migrate */ } #endif /* CONFIG_SMP */ static void check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) { /* we're never preempted */ } static struct task_struct *pick_next_task_stop(struct rq *rq) { struct task_struct *stop = rq->stop; if (stop && stop->on_rq) return stop; return NULL; } static void enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) { } static void dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) { } static void yield_task_stop(struct rq *rq) { BUG(); /* the stop task should never yield, its pointless. */ } static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) { } static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) { } static void set_curr_task_stop(struct rq *rq) { } static void switched_to_stop(struct rq *rq, struct task_struct *p) { BUG(); /* its impossible to change to this class */ } static void prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio) { BUG(); /* how!?, what priority? */ } static unsigned int get_rr_interval_stop(struct rq *rq, struct task_struct *task) { return 0; } /* * Simple, special scheduling class for the per-CPU stop tasks: */ static const struct sched_class stop_sched_class = { .next = &rt_sched_class, .enqueue_task = enqueue_task_stop, .dequeue_task = dequeue_task_stop, .yield_task = yield_task_stop, .check_preempt_curr = check_preempt_curr_stop, .pick_next_task = pick_next_task_stop, .put_prev_task = put_prev_task_stop, #ifdef CONFIG_SMP .select_task_rq = select_task_rq_stop, #endif .set_curr_task = set_curr_task_stop, .task_tick = task_tick_stop, .get_rr_interval = get_rr_interval_stop, .prio_changed = prio_changed_stop, .switched_to = switched_to_stop, };
gpl-2.0
4u2ore/memul-aurora
drivers/ata/libata-eh.c
3238
108312
/* * libata-eh.c - libata error handling * * Maintained by: Jeff Garzik <jgarzik@pobox.com> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * * Copyright 2006 Tejun Heo <htejun@gmail.com> * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, * USA. * * * libata documentation is available via 'make {ps|pdf}docs', * as Documentation/DocBook/libata.* * * Hardware documentation available from http://www.t13.org/ and * http://www.sata-io.org/ * */ #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/export.h> #include <linux/pci.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include "../scsi/scsi_transport_api.h" #include <linux/libata.h> #include "libata.h" enum { /* speed down verdicts */ ATA_EH_SPDN_NCQ_OFF = (1 << 0), ATA_EH_SPDN_SPEED_DOWN = (1 << 1), ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), /* error flags */ ATA_EFLAG_IS_IO = (1 << 0), ATA_EFLAG_DUBIOUS_XFER = (1 << 1), ATA_EFLAG_OLD_ER = (1 << 31), /* error categories */ ATA_ECAT_NONE = 0, ATA_ECAT_ATA_BUS = 1, ATA_ECAT_TOUT_HSM = 2, ATA_ECAT_UNK_DEV = 3, ATA_ECAT_DUBIOUS_NONE = 4, ATA_ECAT_DUBIOUS_ATA_BUS = 5, ATA_ECAT_DUBIOUS_TOUT_HSM = 6, ATA_ECAT_DUBIOUS_UNK_DEV = 7, ATA_ECAT_NR = 8, ATA_EH_CMD_DFL_TIMEOUT = 5000, /* always put at least this amount of time between resets */ ATA_EH_RESET_COOL_DOWN = 5000, /* Waiting in ->prereset can never be reliable. It's * sometimes nice to wait there but it can't be depended upon; * otherwise, we wouldn't be resetting. Just give it enough * time for most drives to spin up. */ ATA_EH_PRERESET_TIMEOUT = 10000, ATA_EH_FASTDRAIN_INTERVAL = 3000, ATA_EH_UA_TRIES = 5, /* probe speed down parameters, see ata_eh_schedule_probe() */ ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ ATA_EH_PROBE_TRIALS = 2, }; /* The following table determines how we sequence resets. Each entry * represents timeout for that try. The first try can be soft or * hardreset. All others are hardreset if available. In most cases * the first reset w/ 10sec timeout should succeed. Following entries * are mostly for error handling, hotplug and retarded devices. */ static const unsigned long ata_eh_reset_timeouts[] = { 10000, /* most drives spin up by 10sec */ 10000, /* > 99% working drives spin up before 20sec */ 35000, /* give > 30 secs of idleness for retarded devices */ 5000, /* and sweet one last chance */ ULONG_MAX, /* > 1 min has elapsed, give up */ }; static const unsigned long ata_eh_identify_timeouts[] = { 5000, /* covers > 99% of successes and not too boring on failures */ 10000, /* combined time till here is enough even for media access */ 30000, /* for true idiots */ ULONG_MAX, }; static const unsigned long ata_eh_flush_timeouts[] = { 15000, /* be generous with flush */ 15000, /* ditto */ 30000, /* and even more generous */ ULONG_MAX, }; static const unsigned long ata_eh_other_timeouts[] = { 5000, /* same rationale as identify timeout */ 10000, /* ditto */ /* but no merciful 30sec for other commands, it just isn't worth it */ ULONG_MAX, }; struct ata_eh_cmd_timeout_ent { const u8 *commands; const unsigned long *timeouts; }; /* The following table determines timeouts to use for EH internal * commands. Each table entry is a command class and matches the * commands the entry applies to and the timeout table to use. * * On the retry after a command timed out, the next timeout value from * the table is used. If the table doesn't contain further entries, * the last value is used. * * ehc->cmd_timeout_idx keeps track of which timeout to use per * command class, so if SET_FEATURES times out on the first try, the * next try will use the second timeout value only for that class. */ #define CMDS(cmds...) (const u8 []){ cmds, 0 } static const struct ata_eh_cmd_timeout_ent ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), .timeouts = ata_eh_identify_timeouts, }, { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), .timeouts = ata_eh_other_timeouts, }, { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), .timeouts = ata_eh_other_timeouts, }, { .commands = CMDS(ATA_CMD_SET_FEATURES), .timeouts = ata_eh_other_timeouts, }, { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), .timeouts = ata_eh_other_timeouts, }, { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), .timeouts = ata_eh_flush_timeouts }, }; #undef CMDS static void __ata_port_freeze(struct ata_port *ap); #ifdef CONFIG_PM static void ata_eh_handle_port_suspend(struct ata_port *ap); static void ata_eh_handle_port_resume(struct ata_port *ap); #else /* CONFIG_PM */ static void ata_eh_handle_port_suspend(struct ata_port *ap) { } static void ata_eh_handle_port_resume(struct ata_port *ap) { } #endif /* CONFIG_PM */ static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, va_list args) { ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, ATA_EH_DESC_LEN - ehi->desc_len, fmt, args); } /** * __ata_ehi_push_desc - push error description without adding separator * @ehi: target EHI * @fmt: printf format string * * Format string according to @fmt and append it to @ehi->desc. * * LOCKING: * spin_lock_irqsave(host lock) */ void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) { va_list args; va_start(args, fmt); __ata_ehi_pushv_desc(ehi, fmt, args); va_end(args); } /** * ata_ehi_push_desc - push error description with separator * @ehi: target EHI * @fmt: printf format string * * Format string according to @fmt and append it to @ehi->desc. * If @ehi->desc is not empty, ", " is added in-between. * * LOCKING: * spin_lock_irqsave(host lock) */ void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) { va_list args; if (ehi->desc_len) __ata_ehi_push_desc(ehi, ", "); va_start(args, fmt); __ata_ehi_pushv_desc(ehi, fmt, args); va_end(args); } /** * ata_ehi_clear_desc - clean error description * @ehi: target EHI * * Clear @ehi->desc. * * LOCKING: * spin_lock_irqsave(host lock) */ void ata_ehi_clear_desc(struct ata_eh_info *ehi) { ehi->desc[0] = '\0'; ehi->desc_len = 0; } /** * ata_port_desc - append port description * @ap: target ATA port * @fmt: printf format string * * Format string according to @fmt and append it to port * description. If port description is not empty, " " is added * in-between. This function is to be used while initializing * ata_host. The description is printed on host registration. * * LOCKING: * None. */ void ata_port_desc(struct ata_port *ap, const char *fmt, ...) { va_list args; WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); if (ap->link.eh_info.desc_len) __ata_ehi_push_desc(&ap->link.eh_info, " "); va_start(args, fmt); __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); va_end(args); } #ifdef CONFIG_PCI /** * ata_port_pbar_desc - append PCI BAR description * @ap: target ATA port * @bar: target PCI BAR * @offset: offset into PCI BAR * @name: name of the area * * If @offset is negative, this function formats a string which * contains the name, address, size and type of the BAR and * appends it to the port description. If @offset is zero or * positive, only name and offsetted address is appended. * * LOCKING: * None. */ void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, const char *name) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); char *type = ""; unsigned long long start, len; if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) type = "m"; else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) type = "i"; start = (unsigned long long)pci_resource_start(pdev, bar); len = (unsigned long long)pci_resource_len(pdev, bar); if (offset < 0) ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); else ata_port_desc(ap, "%s 0x%llx", name, start + (unsigned long long)offset); } #endif /* CONFIG_PCI */ static int ata_lookup_timeout_table(u8 cmd) { int i; for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { const u8 *cur; for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) if (*cur == cmd) return i; } return -1; } /** * ata_internal_cmd_timeout - determine timeout for an internal command * @dev: target device * @cmd: internal command to be issued * * Determine timeout for internal command @cmd for @dev. * * LOCKING: * EH context. * * RETURNS: * Determined timeout. */ unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) { struct ata_eh_context *ehc = &dev->link->eh_context; int ent = ata_lookup_timeout_table(cmd); int idx; if (ent < 0) return ATA_EH_CMD_DFL_TIMEOUT; idx = ehc->cmd_timeout_idx[dev->devno][ent]; return ata_eh_cmd_timeout_table[ent].timeouts[idx]; } /** * ata_internal_cmd_timed_out - notification for internal command timeout * @dev: target device * @cmd: internal command which timed out * * Notify EH that internal command @cmd for @dev timed out. This * function should be called only for commands whose timeouts are * determined using ata_internal_cmd_timeout(). * * LOCKING: * EH context. */ void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) { struct ata_eh_context *ehc = &dev->link->eh_context; int ent = ata_lookup_timeout_table(cmd); int idx; if (ent < 0) return; idx = ehc->cmd_timeout_idx[dev->devno][ent]; if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) ehc->cmd_timeout_idx[dev->devno][ent]++; } static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, unsigned int err_mask) { struct ata_ering_entry *ent; WARN_ON(!err_mask); ering->cursor++; ering->cursor %= ATA_ERING_SIZE; ent = &ering->ring[ering->cursor]; ent->eflags = eflags; ent->err_mask = err_mask; ent->timestamp = get_jiffies_64(); } static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) { struct ata_ering_entry *ent = &ering->ring[ering->cursor]; if (ent->err_mask) return ent; return NULL; } int ata_ering_map(struct ata_ering *ering, int (*map_fn)(struct ata_ering_entry *, void *), void *arg) { int idx, rc = 0; struct ata_ering_entry *ent; idx = ering->cursor; do { ent = &ering->ring[idx]; if (!ent->err_mask) break; rc = map_fn(ent, arg); if (rc) break; idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; } while (idx != ering->cursor); return rc; } int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg) { ent->eflags |= ATA_EFLAG_OLD_ER; return 0; } static void ata_ering_clear(struct ata_ering *ering) { ata_ering_map(ering, ata_ering_clear_cb, NULL); } static unsigned int ata_eh_dev_action(struct ata_device *dev) { struct ata_eh_context *ehc = &dev->link->eh_context; return ehc->i.action | ehc->i.dev_action[dev->devno]; } static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, struct ata_eh_info *ehi, unsigned int action) { struct ata_device *tdev; if (!dev) { ehi->action &= ~action; ata_for_each_dev(tdev, link, ALL) ehi->dev_action[tdev->devno] &= ~action; } else { /* doesn't make sense for port-wide EH actions */ WARN_ON(!(action & ATA_EH_PERDEV_MASK)); /* break ehi->action into ehi->dev_action */ if (ehi->action & action) { ata_for_each_dev(tdev, link, ALL) ehi->dev_action[tdev->devno] |= ehi->action & action; ehi->action &= ~action; } /* turn off the specified per-dev action */ ehi->dev_action[dev->devno] &= ~action; } } /** * ata_eh_acquire - acquire EH ownership * @ap: ATA port to acquire EH ownership for * * Acquire EH ownership for @ap. This is the basic exclusion * mechanism for ports sharing a host. Only one port hanging off * the same host can claim the ownership of EH. * * LOCKING: * EH context. */ void ata_eh_acquire(struct ata_port *ap) { mutex_lock(&ap->host->eh_mutex); WARN_ON_ONCE(ap->host->eh_owner); ap->host->eh_owner = current; } /** * ata_eh_release - release EH ownership * @ap: ATA port to release EH ownership for * * Release EH ownership for @ap if the caller. The caller must * have acquired EH ownership using ata_eh_acquire() previously. * * LOCKING: * EH context. */ void ata_eh_release(struct ata_port *ap) { WARN_ON_ONCE(ap->host->eh_owner != current); ap->host->eh_owner = NULL; mutex_unlock(&ap->host->eh_mutex); } /** * ata_scsi_timed_out - SCSI layer time out callback * @cmd: timed out SCSI command * * Handles SCSI layer timeout. We race with normal completion of * the qc for @cmd. If the qc is already gone, we lose and let * the scsi command finish (EH_HANDLED). Otherwise, the qc has * timed out and EH should be invoked. Prevent ata_qc_complete() * from finishing it by setting EH_SCHEDULED and return * EH_NOT_HANDLED. * * TODO: kill this function once old EH is gone. * * LOCKING: * Called from timer context * * RETURNS: * EH_HANDLED or EH_NOT_HANDLED */ enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct ata_port *ap = ata_shost_to_port(host); unsigned long flags; struct ata_queued_cmd *qc; enum blk_eh_timer_return ret; DPRINTK("ENTER\n"); if (ap->ops->error_handler) { ret = BLK_EH_NOT_HANDLED; goto out; } ret = BLK_EH_HANDLED; spin_lock_irqsave(ap->lock, flags); qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc) { WARN_ON(qc->scsicmd != cmd); qc->flags |= ATA_QCFLAG_EH_SCHEDULED; qc->err_mask |= AC_ERR_TIMEOUT; ret = BLK_EH_NOT_HANDLED; } spin_unlock_irqrestore(ap->lock, flags); out: DPRINTK("EXIT, ret=%d\n", ret); return ret; } static void ata_eh_unload(struct ata_port *ap) { struct ata_link *link; struct ata_device *dev; unsigned long flags; /* Restore SControl IPM and SPD for the next driver and * disable attached devices. */ ata_for_each_link(link, ap, PMP_FIRST) { sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); ata_for_each_dev(dev, link, ALL) ata_dev_disable(dev); } /* freeze and set UNLOADED */ spin_lock_irqsave(ap->lock, flags); ata_port_freeze(ap); /* won't be thawed */ ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ ap->pflags |= ATA_PFLAG_UNLOADED; spin_unlock_irqrestore(ap->lock, flags); } /** * ata_scsi_error - SCSI layer error handler callback * @host: SCSI host on which error occurred * * Handles SCSI-layer-thrown error events. * * LOCKING: * Inherited from SCSI layer (none, can sleep) * * RETURNS: * Zero. */ void ata_scsi_error(struct Scsi_Host *host) { struct ata_port *ap = ata_shost_to_port(host); unsigned long flags; LIST_HEAD(eh_work_q); DPRINTK("ENTER\n"); spin_lock_irqsave(host->host_lock, flags); list_splice_init(&host->eh_cmd_q, &eh_work_q); spin_unlock_irqrestore(host->host_lock, flags); ata_scsi_cmd_error_handler(host, ap, &eh_work_q); /* If we timed raced normal completion and there is nothing to recover nr_timedout == 0 why exactly are we doing error recovery ? */ ata_scsi_port_error_handler(host, ap); /* finish or retry handled scmd's and clean up */ WARN_ON(host->host_failed || !list_empty(&eh_work_q)); DPRINTK("EXIT\n"); } /** * ata_scsi_cmd_error_handler - error callback for a list of commands * @host: scsi host containing the port * @ap: ATA port within the host * @eh_work_q: list of commands to process * * process the given list of commands and return those finished to the * ap->eh_done_q. This function is the first part of the libata error * handler which processes a given list of failed commands. */ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_work_q) { int i; unsigned long flags; /* make sure sff pio task is not running */ ata_sff_flush_pio_task(ap); /* synchronize with host lock and sort out timeouts */ /* For new EH, all qcs are finished in one of three ways - * normal completion, error completion, and SCSI timeout. * Both completions can race against SCSI timeout. When normal * completion wins, the qc never reaches EH. When error * completion wins, the qc has ATA_QCFLAG_FAILED set. * * When SCSI timeout wins, things are a bit more complex. * Normal or error completion can occur after the timeout but * before this point. In such cases, both types of * completions are honored. A scmd is determined to have * timed out iff its associated qc is active and not failed. */ if (ap->ops->error_handler) { struct scsi_cmnd *scmd, *tmp; int nr_timedout = 0; spin_lock_irqsave(ap->lock, flags); /* This must occur under the ap->lock as we don't want a polled recovery to race the real interrupt handler The lost_interrupt handler checks for any completed but non-notified command and completes much like an IRQ handler. We then fall into the error recovery code which will treat this as if normal completion won the race */ if (ap->ops->lost_interrupt) ap->ops->lost_interrupt(ap); list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { struct ata_queued_cmd *qc; for (i = 0; i < ATA_MAX_QUEUE; i++) { qc = __ata_qc_from_tag(ap, i); if (qc->flags & ATA_QCFLAG_ACTIVE && qc->scsicmd == scmd) break; } if (i < ATA_MAX_QUEUE) { /* the scmd has an associated qc */ if (!(qc->flags & ATA_QCFLAG_FAILED)) { /* which hasn't failed yet, timeout */ qc->err_mask |= AC_ERR_TIMEOUT; qc->flags |= ATA_QCFLAG_FAILED; nr_timedout++; } } else { /* Normal completion occurred after * SCSI timeout but before this point. * Successfully complete it. */ scmd->retries = scmd->allowed; scsi_eh_finish_cmd(scmd, &ap->eh_done_q); } } /* If we have timed out qcs. They belong to EH from * this point but the state of the controller is * unknown. Freeze the port to make sure the IRQ * handler doesn't diddle with those qcs. This must * be done atomically w.r.t. setting QCFLAG_FAILED. */ if (nr_timedout) __ata_port_freeze(ap); spin_unlock_irqrestore(ap->lock, flags); /* initialize eh_tries */ ap->eh_tries = ATA_EH_MAX_TRIES; } else spin_unlock_wait(ap->lock); } EXPORT_SYMBOL(ata_scsi_cmd_error_handler); /** * ata_scsi_port_error_handler - recover the port after the commands * @host: SCSI host containing the port * @ap: the ATA port * * Handle the recovery of the port @ap after all the commands * have been recovered. */ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) { unsigned long flags; /* invoke error handler */ if (ap->ops->error_handler) { struct ata_link *link; /* acquire EH ownership */ ata_eh_acquire(ap); repeat: /* kill fast drain timer */ del_timer_sync(&ap->fastdrain_timer); /* process port resume request */ ata_eh_handle_port_resume(ap); /* fetch & clear EH info */ spin_lock_irqsave(ap->lock, flags); ata_for_each_link(link, ap, HOST_FIRST) { struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev; memset(&link->eh_context, 0, sizeof(link->eh_context)); link->eh_context.i = link->eh_info; memset(&link->eh_info, 0, sizeof(link->eh_info)); ata_for_each_dev(dev, link, ENABLED) { int devno = dev->devno; ehc->saved_xfer_mode[devno] = dev->xfer_mode; if (ata_ncq_enabled(dev)) ehc->saved_ncq_enabled |= 1 << devno; } } ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; ap->pflags &= ~ATA_PFLAG_EH_PENDING; ap->excl_link = NULL; /* don't maintain exclusion over EH */ spin_unlock_irqrestore(ap->lock, flags); /* invoke EH, skip if unloading or suspended */ if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) ap->ops->error_handler(ap); else { /* if unloading, commence suicide */ if ((ap->pflags & ATA_PFLAG_UNLOADING) && !(ap->pflags & ATA_PFLAG_UNLOADED)) ata_eh_unload(ap); ata_eh_finish(ap); } /* process port suspend request */ ata_eh_handle_port_suspend(ap); /* Exception might have happened after ->error_handler * recovered the port but before this point. Repeat * EH in such case. */ spin_lock_irqsave(ap->lock, flags); if (ap->pflags & ATA_PFLAG_EH_PENDING) { if (--ap->eh_tries) { spin_unlock_irqrestore(ap->lock, flags); goto repeat; } ata_port_err(ap, "EH pending after %d tries, giving up\n", ATA_EH_MAX_TRIES); ap->pflags &= ~ATA_PFLAG_EH_PENDING; } /* this run is complete, make sure EH info is clear */ ata_for_each_link(link, ap, HOST_FIRST) memset(&link->eh_info, 0, sizeof(link->eh_info)); /* Clear host_eh_scheduled while holding ap->lock such * that if exception occurs after this point but * before EH completion, SCSI midlayer will * re-initiate EH. */ host->host_eh_scheduled = 0; spin_unlock_irqrestore(ap->lock, flags); ata_eh_release(ap); } else { WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); ap->ops->eng_timeout(ap); } scsi_eh_flush_done_q(&ap->eh_done_q); /* clean up */ spin_lock_irqsave(ap->lock, flags); if (ap->pflags & ATA_PFLAG_LOADING) ap->pflags &= ~ATA_PFLAG_LOADING; else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) schedule_delayed_work(&ap->hotplug_task, 0); if (ap->pflags & ATA_PFLAG_RECOVERED) ata_port_info(ap, "EH complete\n"); ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); /* tell wait_eh that we're done */ ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; wake_up_all(&ap->eh_wait_q); spin_unlock_irqrestore(ap->lock, flags); } EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler); /** * ata_port_wait_eh - Wait for the currently pending EH to complete * @ap: Port to wait EH for * * Wait until the currently pending EH is complete. * * LOCKING: * Kernel thread context (may sleep). */ void ata_port_wait_eh(struct ata_port *ap) { unsigned long flags; DEFINE_WAIT(wait); retry: spin_lock_irqsave(ap->lock, flags); while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); spin_unlock_irqrestore(ap->lock, flags); schedule(); spin_lock_irqsave(ap->lock, flags); } finish_wait(&ap->eh_wait_q, &wait); spin_unlock_irqrestore(ap->lock, flags); /* make sure SCSI EH is complete */ if (scsi_host_in_recovery(ap->scsi_host)) { ata_msleep(ap, 10); goto retry; } } EXPORT_SYMBOL_GPL(ata_port_wait_eh); static int ata_eh_nr_in_flight(struct ata_port *ap) { unsigned int tag; int nr = 0; /* count only non-internal commands */ for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) if (ata_qc_from_tag(ap, tag)) nr++; return nr; } void ata_eh_fastdrain_timerfn(unsigned long arg) { struct ata_port *ap = (void *)arg; unsigned long flags; int cnt; spin_lock_irqsave(ap->lock, flags); cnt = ata_eh_nr_in_flight(ap); /* are we done? */ if (!cnt) goto out_unlock; if (cnt == ap->fastdrain_cnt) { unsigned int tag; /* No progress during the last interval, tag all * in-flight qcs as timed out and freeze the port. */ for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); if (qc) qc->err_mask |= AC_ERR_TIMEOUT; } ata_port_freeze(ap); } else { /* some qcs have finished, give it another chance */ ap->fastdrain_cnt = cnt; ap->fastdrain_timer.expires = ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); add_timer(&ap->fastdrain_timer); } out_unlock: spin_unlock_irqrestore(ap->lock, flags); } /** * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain * @ap: target ATA port * @fastdrain: activate fast drain * * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain * is non-zero and EH wasn't pending before. Fast drain ensures * that EH kicks in in timely manner. * * LOCKING: * spin_lock_irqsave(host lock) */ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) { int cnt; /* already scheduled? */ if (ap->pflags & ATA_PFLAG_EH_PENDING) return; ap->pflags |= ATA_PFLAG_EH_PENDING; if (!fastdrain) return; /* do we have in-flight qcs? */ cnt = ata_eh_nr_in_flight(ap); if (!cnt) return; /* activate fast drain */ ap->fastdrain_cnt = cnt; ap->fastdrain_timer.expires = ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); add_timer(&ap->fastdrain_timer); } /** * ata_qc_schedule_eh - schedule qc for error handling * @qc: command to schedule error handling for * * Schedule error handling for @qc. EH will kick in as soon as * other commands are drained. * * LOCKING: * spin_lock_irqsave(host lock) */ void ata_qc_schedule_eh(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct request_queue *q = qc->scsicmd->device->request_queue; unsigned long flags; WARN_ON(!ap->ops->error_handler); qc->flags |= ATA_QCFLAG_FAILED; ata_eh_set_pending(ap, 1); /* The following will fail if timeout has already expired. * ata_scsi_error() takes care of such scmds on EH entry. * Note that ATA_QCFLAG_FAILED is unconditionally set after * this function completes. */ spin_lock_irqsave(q->queue_lock, flags); blk_abort_request(qc->scsicmd->request); spin_unlock_irqrestore(q->queue_lock, flags); } /** * ata_port_schedule_eh - schedule error handling without a qc * @ap: ATA port to schedule EH for * * Schedule error handling for @ap. EH will kick in as soon as * all commands are drained. * * LOCKING: * spin_lock_irqsave(host lock) */ void ata_port_schedule_eh(struct ata_port *ap) { WARN_ON(!ap->ops->error_handler); if (ap->pflags & ATA_PFLAG_INITIALIZING) return; ata_eh_set_pending(ap, 1); scsi_schedule_eh(ap->scsi_host); DPRINTK("port EH scheduled\n"); } static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) { int tag, nr_aborted = 0; WARN_ON(!ap->ops->error_handler); /* we're gonna abort all commands, no need for fast drain */ ata_eh_set_pending(ap, 0); for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); if (qc && (!link || qc->dev->link == link)) { qc->flags |= ATA_QCFLAG_FAILED; ata_qc_complete(qc); nr_aborted++; } } if (!nr_aborted) ata_port_schedule_eh(ap); return nr_aborted; } /** * ata_link_abort - abort all qc's on the link * @link: ATA link to abort qc's for * * Abort all active qc's active on @link and schedule EH. * * LOCKING: * spin_lock_irqsave(host lock) * * RETURNS: * Number of aborted qc's. */ int ata_link_abort(struct ata_link *link) { return ata_do_link_abort(link->ap, link); } /** * ata_port_abort - abort all qc's on the port * @ap: ATA port to abort qc's for * * Abort all active qc's of @ap and schedule EH. * * LOCKING: * spin_lock_irqsave(host_set lock) * * RETURNS: * Number of aborted qc's. */ int ata_port_abort(struct ata_port *ap) { return ata_do_link_abort(ap, NULL); } /** * __ata_port_freeze - freeze port * @ap: ATA port to freeze * * This function is called when HSM violation or some other * condition disrupts normal operation of the port. Frozen port * is not allowed to perform any operation until the port is * thawed, which usually follows a successful reset. * * ap->ops->freeze() callback can be used for freezing the port * hardware-wise (e.g. mask interrupt and stop DMA engine). If a * port cannot be frozen hardware-wise, the interrupt handler * must ack and clear interrupts unconditionally while the port * is frozen. * * LOCKING: * spin_lock_irqsave(host lock) */ static void __ata_port_freeze(struct ata_port *ap) { WARN_ON(!ap->ops->error_handler); if (ap->ops->freeze) ap->ops->freeze(ap); ap->pflags |= ATA_PFLAG_FROZEN; DPRINTK("ata%u port frozen\n", ap->print_id); } /** * ata_port_freeze - abort & freeze port * @ap: ATA port to freeze * * Abort and freeze @ap. The freeze operation must be called * first, because some hardware requires special operations * before the taskfile registers are accessible. * * LOCKING: * spin_lock_irqsave(host lock) * * RETURNS: * Number of aborted commands. */ int ata_port_freeze(struct ata_port *ap) { int nr_aborted; WARN_ON(!ap->ops->error_handler); __ata_port_freeze(ap); nr_aborted = ata_port_abort(ap); return nr_aborted; } /** * sata_async_notification - SATA async notification handler * @ap: ATA port where async notification is received * * Handler to be called when async notification via SDB FIS is * received. This function schedules EH if necessary. * * LOCKING: * spin_lock_irqsave(host lock) * * RETURNS: * 1 if EH is scheduled, 0 otherwise. */ int sata_async_notification(struct ata_port *ap) { u32 sntf; int rc; if (!(ap->flags & ATA_FLAG_AN)) return 0; rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); if (rc == 0) sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); if (!sata_pmp_attached(ap) || rc) { /* PMP is not attached or SNTF is not available */ if (!sata_pmp_attached(ap)) { /* PMP is not attached. Check whether ATAPI * AN is configured. If so, notify media * change. */ struct ata_device *dev = ap->link.device; if ((dev->class == ATA_DEV_ATAPI) && (dev->flags & ATA_DFLAG_AN)) ata_scsi_media_change_notify(dev); return 0; } else { /* PMP is attached but SNTF is not available. * ATAPI async media change notification is * not used. The PMP must be reporting PHY * status change, schedule EH. */ ata_port_schedule_eh(ap); return 1; } } else { /* PMP is attached and SNTF is available */ struct ata_link *link; /* check and notify ATAPI AN */ ata_for_each_link(link, ap, EDGE) { if (!(sntf & (1 << link->pmp))) continue; if ((link->device->class == ATA_DEV_ATAPI) && (link->device->flags & ATA_DFLAG_AN)) ata_scsi_media_change_notify(link->device); } /* If PMP is reporting that PHY status of some * downstream ports has changed, schedule EH. */ if (sntf & (1 << SATA_PMP_CTRL_PORT)) { ata_port_schedule_eh(ap); return 1; } return 0; } } /** * ata_eh_freeze_port - EH helper to freeze port * @ap: ATA port to freeze * * Freeze @ap. * * LOCKING: * None. */ void ata_eh_freeze_port(struct ata_port *ap) { unsigned long flags; if (!ap->ops->error_handler) return; spin_lock_irqsave(ap->lock, flags); __ata_port_freeze(ap); spin_unlock_irqrestore(ap->lock, flags); } /** * ata_port_thaw_port - EH helper to thaw port * @ap: ATA port to thaw * * Thaw frozen port @ap. * * LOCKING: * None. */ void ata_eh_thaw_port(struct ata_port *ap) { unsigned long flags; if (!ap->ops->error_handler) return; spin_lock_irqsave(ap->lock, flags); ap->pflags &= ~ATA_PFLAG_FROZEN; if (ap->ops->thaw) ap->ops->thaw(ap); spin_unlock_irqrestore(ap->lock, flags); DPRINTK("ata%u port thawed\n", ap->print_id); } static void ata_eh_scsidone(struct scsi_cmnd *scmd) { /* nada */ } static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct scsi_cmnd *scmd = qc->scsicmd; unsigned long flags; spin_lock_irqsave(ap->lock, flags); qc->scsidone = ata_eh_scsidone; __ata_qc_complete(qc); WARN_ON(ata_tag_valid(qc->tag)); spin_unlock_irqrestore(ap->lock, flags); scsi_eh_finish_cmd(scmd, &ap->eh_done_q); } /** * ata_eh_qc_complete - Complete an active ATA command from EH * @qc: Command to complete * * Indicate to the mid and upper layers that an ATA command has * completed. To be used from EH. */ void ata_eh_qc_complete(struct ata_queued_cmd *qc) { struct scsi_cmnd *scmd = qc->scsicmd; scmd->retries = scmd->allowed; __ata_eh_qc_complete(qc); } /** * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH * @qc: Command to retry * * Indicate to the mid and upper layers that an ATA command * should be retried. To be used from EH. * * SCSI midlayer limits the number of retries to scmd->allowed. * scmd->retries is decremented for commands which get retried * due to unrelated failures (qc->err_mask is zero). */ void ata_eh_qc_retry(struct ata_queued_cmd *qc) { struct scsi_cmnd *scmd = qc->scsicmd; if (!qc->err_mask && scmd->retries) scmd->retries--; __ata_eh_qc_complete(qc); } /** * ata_dev_disable - disable ATA device * @dev: ATA device to disable * * Disable @dev. * * Locking: * EH context. */ void ata_dev_disable(struct ata_device *dev) { if (!ata_dev_enabled(dev)) return; if (ata_msg_drv(dev->link->ap)) ata_dev_warn(dev, "disabled\n"); ata_acpi_on_disable(dev); ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); dev->class++; /* From now till the next successful probe, ering is used to * track probe failures. Clear accumulated device error info. */ ata_ering_clear(&dev->ering); } /** * ata_eh_detach_dev - detach ATA device * @dev: ATA device to detach * * Detach @dev. * * LOCKING: * None. */ void ata_eh_detach_dev(struct ata_device *dev) { struct ata_link *link = dev->link; struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; unsigned long flags; ata_dev_disable(dev); spin_lock_irqsave(ap->lock, flags); dev->flags &= ~ATA_DFLAG_DETACH; if (ata_scsi_offline_dev(dev)) { dev->flags |= ATA_DFLAG_DETACHED; ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; } /* clear per-dev EH info */ ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); ehc->saved_xfer_mode[dev->devno] = 0; ehc->saved_ncq_enabled &= ~(1 << dev->devno); spin_unlock_irqrestore(ap->lock, flags); } /** * ata_eh_about_to_do - about to perform eh_action * @link: target ATA link * @dev: target ATA dev for per-dev action (can be NULL) * @action: action about to be performed * * Called just before performing EH actions to clear related bits * in @link->eh_info such that eh actions are not unnecessarily * repeated. * * LOCKING: * None. */ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, unsigned int action) { struct ata_port *ap = link->ap; struct ata_eh_info *ehi = &link->eh_info; struct ata_eh_context *ehc = &link->eh_context; unsigned long flags; spin_lock_irqsave(ap->lock, flags); ata_eh_clear_action(link, dev, ehi, action); /* About to take EH action, set RECOVERED. Ignore actions on * slave links as master will do them again. */ if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) ap->pflags |= ATA_PFLAG_RECOVERED; spin_unlock_irqrestore(ap->lock, flags); } /** * ata_eh_done - EH action complete * @ap: target ATA port * @dev: target ATA dev for per-dev action (can be NULL) * @action: action just completed * * Called right after performing EH actions to clear related bits * in @link->eh_context. * * LOCKING: * None. */ void ata_eh_done(struct ata_link *link, struct ata_device *dev, unsigned int action) { struct ata_eh_context *ehc = &link->eh_context; ata_eh_clear_action(link, dev, &ehc->i, action); } /** * ata_err_string - convert err_mask to descriptive string * @err_mask: error mask to convert to string * * Convert @err_mask to descriptive string. Errors are * prioritized according to severity and only the most severe * error is reported. * * LOCKING: * None. * * RETURNS: * Descriptive string for @err_mask */ static const char *ata_err_string(unsigned int err_mask) { if (err_mask & AC_ERR_HOST_BUS) return "host bus error"; if (err_mask & AC_ERR_ATA_BUS) return "ATA bus error"; if (err_mask & AC_ERR_TIMEOUT) return "timeout"; if (err_mask & AC_ERR_HSM) return "HSM violation"; if (err_mask & AC_ERR_SYSTEM) return "internal error"; if (err_mask & AC_ERR_MEDIA) return "media error"; if (err_mask & AC_ERR_INVALID) return "invalid argument"; if (err_mask & AC_ERR_DEV) return "device error"; return "unknown error"; } /** * ata_read_log_page - read a specific log page * @dev: target device * @page: page to read * @buf: buffer to store read page * @sectors: number of sectors to read * * Read log page using READ_LOG_EXT command. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * 0 on success, AC_ERR_* mask otherwise. */ static unsigned int ata_read_log_page(struct ata_device *dev, u8 page, void *buf, unsigned int sectors) { struct ata_taskfile tf; unsigned int err_mask; DPRINTK("read log page - page %d\n", page); ata_tf_init(dev, &tf); tf.command = ATA_CMD_READ_LOG_EXT; tf.lbal = page; tf.nsect = sectors; tf.hob_nsect = sectors >> 8; tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; tf.protocol = ATA_PROT_PIO; err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, buf, sectors * ATA_SECT_SIZE, 0); DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; } /** * ata_eh_read_log_10h - Read log page 10h for NCQ error details * @dev: Device to read log page 10h from * @tag: Resulting tag of the failed command * @tf: Resulting taskfile registers of the failed command * * Read log page 10h to obtain NCQ error details and clear error * condition. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * 0 on success, -errno otherwise. */ static int ata_eh_read_log_10h(struct ata_device *dev, int *tag, struct ata_taskfile *tf) { u8 *buf = dev->link->ap->sector_buf; unsigned int err_mask; u8 csum; int i; err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1); if (err_mask) return -EIO; csum = 0; for (i = 0; i < ATA_SECT_SIZE; i++) csum += buf[i]; if (csum) ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n", csum); if (buf[0] & 0x80) return -ENOENT; *tag = buf[0] & 0x1f; tf->command = buf[2]; tf->feature = buf[3]; tf->lbal = buf[4]; tf->lbam = buf[5]; tf->lbah = buf[6]; tf->device = buf[7]; tf->hob_lbal = buf[8]; tf->hob_lbam = buf[9]; tf->hob_lbah = buf[10]; tf->nsect = buf[12]; tf->hob_nsect = buf[13]; return 0; } /** * atapi_eh_tur - perform ATAPI TEST_UNIT_READY * @dev: target ATAPI device * @r_sense_key: out parameter for sense_key * * Perform ATAPI TEST_UNIT_READY. * * LOCKING: * EH context (may sleep). * * RETURNS: * 0 on success, AC_ERR_* mask on failure. */ static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) { u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; struct ata_taskfile tf; unsigned int err_mask; ata_tf_init(dev, &tf); tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf.command = ATA_CMD_PACKET; tf.protocol = ATAPI_PROT_NODATA; err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); if (err_mask == AC_ERR_DEV) *r_sense_key = tf.feature >> 4; return err_mask; } /** * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE * @dev: device to perform REQUEST_SENSE to * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) * @dfl_sense_key: default sense key to use * * Perform ATAPI REQUEST_SENSE after the device reported CHECK * SENSE. This function is EH helper. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * 0 on success, AC_ERR_* mask on failure */ static unsigned int atapi_eh_request_sense(struct ata_device *dev, u8 *sense_buf, u8 dfl_sense_key) { u8 cdb[ATAPI_CDB_LEN] = { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; struct ata_port *ap = dev->link->ap; struct ata_taskfile tf; DPRINTK("ATAPI request sense\n"); /* FIXME: is this needed? */ memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); /* initialize sense_buf with the error register, * for the case where they are -not- overwritten */ sense_buf[0] = 0x70; sense_buf[2] = dfl_sense_key; /* some devices time out if garbage left in tf */ ata_tf_init(dev, &tf); tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf.command = ATA_CMD_PACKET; /* is it pointless to prefer PIO for "safety reasons"? */ if (ap->flags & ATA_FLAG_PIO_DMA) { tf.protocol = ATAPI_PROT_DMA; tf.feature |= ATAPI_PKT_DMA; } else { tf.protocol = ATAPI_PROT_PIO; tf.lbam = SCSI_SENSE_BUFFERSIZE; tf.lbah = 0; } return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, sense_buf, SCSI_SENSE_BUFFERSIZE, 0); } /** * ata_eh_analyze_serror - analyze SError for a failed port * @link: ATA link to analyze SError for * * Analyze SError if available and further determine cause of * failure. * * LOCKING: * None. */ static void ata_eh_analyze_serror(struct ata_link *link) { struct ata_eh_context *ehc = &link->eh_context; u32 serror = ehc->i.serror; unsigned int err_mask = 0, action = 0; u32 hotplug_mask; if (serror & (SERR_PERSISTENT | SERR_DATA)) { err_mask |= AC_ERR_ATA_BUS; action |= ATA_EH_RESET; } if (serror & SERR_PROTOCOL) { err_mask |= AC_ERR_HSM; action |= ATA_EH_RESET; } if (serror & SERR_INTERNAL) { err_mask |= AC_ERR_SYSTEM; action |= ATA_EH_RESET; } /* Determine whether a hotplug event has occurred. Both * SError.N/X are considered hotplug events for enabled or * host links. For disabled PMP links, only N bit is * considered as X bit is left at 1 for link plugging. */ if (link->lpm_policy > ATA_LPM_MAX_POWER) hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; else hotplug_mask = SERR_PHYRDY_CHG; if (serror & hotplug_mask) ata_ehi_hotplugged(&ehc->i); ehc->i.err_mask |= err_mask; ehc->i.action |= action; } /** * ata_eh_analyze_ncq_error - analyze NCQ error * @link: ATA link to analyze NCQ error for * * Read log page 10h, determine the offending qc and acquire * error status TF. For NCQ device errors, all LLDDs have to do * is setting AC_ERR_DEV in ehi->err_mask. This function takes * care of the rest. * * LOCKING: * Kernel thread context (may sleep). */ void ata_eh_analyze_ncq_error(struct ata_link *link) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev = link->device; struct ata_queued_cmd *qc; struct ata_taskfile tf; int tag, rc; /* if frozen, we can't do much */ if (ap->pflags & ATA_PFLAG_FROZEN) return; /* is it NCQ device error? */ if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) return; /* has LLDD analyzed already? */ for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { qc = __ata_qc_from_tag(ap, tag); if (!(qc->flags & ATA_QCFLAG_FAILED)) continue; if (qc->err_mask) return; } /* okay, this error is ours */ memset(&tf, 0, sizeof(tf)); rc = ata_eh_read_log_10h(dev, &tag, &tf); if (rc) { ata_link_err(link, "failed to read log page 10h (errno=%d)\n", rc); return; } if (!(link->sactive & (1 << tag))) { ata_link_err(link, "log page 10h reported inactive tag %d\n", tag); return; } /* we've got the perpetrator, condemn it */ qc = __ata_qc_from_tag(ap, tag); memcpy(&qc->result_tf, &tf, sizeof(tf)); qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; ehc->i.err_mask &= ~AC_ERR_DEV; } /** * ata_eh_analyze_tf - analyze taskfile of a failed qc * @qc: qc to analyze * @tf: Taskfile registers to analyze * * Analyze taskfile of @qc and further determine cause of * failure. This function also requests ATAPI sense data if * available. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * Determined recovery action */ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, const struct ata_taskfile *tf) { unsigned int tmp, action = 0; u8 stat = tf->command, err = tf->feature; if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { qc->err_mask |= AC_ERR_HSM; return ATA_EH_RESET; } if (stat & (ATA_ERR | ATA_DF)) qc->err_mask |= AC_ERR_DEV; else return 0; switch (qc->dev->class) { case ATA_DEV_ATA: if (err & ATA_ICRC) qc->err_mask |= AC_ERR_ATA_BUS; if (err & ATA_UNC) qc->err_mask |= AC_ERR_MEDIA; if (err & ATA_IDNF) qc->err_mask |= AC_ERR_INVALID; break; case ATA_DEV_ATAPI: if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { tmp = atapi_eh_request_sense(qc->dev, qc->scsicmd->sense_buffer, qc->result_tf.feature >> 4); if (!tmp) { /* ATA_QCFLAG_SENSE_VALID is used to * tell atapi_qc_complete() that sense * data is already valid. * * TODO: interpret sense data and set * appropriate err_mask. */ qc->flags |= ATA_QCFLAG_SENSE_VALID; } else qc->err_mask |= tmp; } } if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) action |= ATA_EH_RESET; return action; } static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, int *xfer_ok) { int base = 0; if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) *xfer_ok = 1; if (!*xfer_ok) base = ATA_ECAT_DUBIOUS_NONE; if (err_mask & AC_ERR_ATA_BUS) return base + ATA_ECAT_ATA_BUS; if (err_mask & AC_ERR_TIMEOUT) return base + ATA_ECAT_TOUT_HSM; if (eflags & ATA_EFLAG_IS_IO) { if (err_mask & AC_ERR_HSM) return base + ATA_ECAT_TOUT_HSM; if ((err_mask & (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) return base + ATA_ECAT_UNK_DEV; } return 0; } struct speed_down_verdict_arg { u64 since; int xfer_ok; int nr_errors[ATA_ECAT_NR]; }; static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) { struct speed_down_verdict_arg *arg = void_arg; int cat; if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) return -1; cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, &arg->xfer_ok); arg->nr_errors[cat]++; return 0; } /** * ata_eh_speed_down_verdict - Determine speed down verdict * @dev: Device of interest * * This function examines error ring of @dev and determines * whether NCQ needs to be turned off, transfer speed should be * stepped down, or falling back to PIO is necessary. * * ECAT_ATA_BUS : ATA_BUS error for any command * * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for * IO commands * * ECAT_UNK_DEV : Unknown DEV error for IO commands * * ECAT_DUBIOUS_* : Identical to above three but occurred while * data transfer hasn't been verified. * * Verdicts are * * NCQ_OFF : Turn off NCQ. * * SPEED_DOWN : Speed down transfer speed but don't fall back * to PIO. * * FALLBACK_TO_PIO : Fall back to PIO. * * Even if multiple verdicts are returned, only one action is * taken per error. An action triggered by non-DUBIOUS errors * clears ering, while one triggered by DUBIOUS_* errors doesn't. * This is to expedite speed down decisions right after device is * initially configured. * * The followings are speed down rules. #1 and #2 deal with * DUBIOUS errors. * * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. * * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors * occurred during last 5 mins, NCQ_OFF. * * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors * occurred during last 5 mins, FALLBACK_TO_PIO * * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred * during last 10 mins, NCQ_OFF. * * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. * * LOCKING: * Inherited from caller. * * RETURNS: * OR of ATA_EH_SPDN_* flags. */ static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) { const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; u64 j64 = get_jiffies_64(); struct speed_down_verdict_arg arg; unsigned int verdict = 0; /* scan past 5 mins of error history */ memset(&arg, 0, sizeof(arg)); arg.since = j64 - min(j64, j5mins); ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) verdict |= ATA_EH_SPDN_SPEED_DOWN | ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; if (arg.nr_errors[ATA_ECAT_ATA_BUS] + arg.nr_errors[ATA_ECAT_TOUT_HSM] + arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; /* scan past 10 mins of error history */ memset(&arg, 0, sizeof(arg)); arg.since = j64 - min(j64, j10mins); ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) verdict |= ATA_EH_SPDN_NCQ_OFF; if (arg.nr_errors[ATA_ECAT_ATA_BUS] + arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) verdict |= ATA_EH_SPDN_SPEED_DOWN; return verdict; } /** * ata_eh_speed_down - record error and speed down if necessary * @dev: Failed device * @eflags: mask of ATA_EFLAG_* flags * @err_mask: err_mask of the error * * Record error and examine error history to determine whether * adjusting transmission speed is necessary. It also sets * transmission limits appropriately if such adjustment is * necessary. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * Determined recovery action. */ static unsigned int ata_eh_speed_down(struct ata_device *dev, unsigned int eflags, unsigned int err_mask) { struct ata_link *link = ata_dev_phys_link(dev); int xfer_ok = 0; unsigned int verdict; unsigned int action = 0; /* don't bother if Cat-0 error */ if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) return 0; /* record error and determine whether speed down is necessary */ ata_ering_record(&dev->ering, eflags, err_mask); verdict = ata_eh_speed_down_verdict(dev); /* turn off NCQ? */ if ((verdict & ATA_EH_SPDN_NCQ_OFF) && (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { dev->flags |= ATA_DFLAG_NCQ_OFF; ata_dev_warn(dev, "NCQ disabled due to excessive errors\n"); goto done; } /* speed down? */ if (verdict & ATA_EH_SPDN_SPEED_DOWN) { /* speed down SATA link speed if possible */ if (sata_down_spd_limit(link, 0) == 0) { action |= ATA_EH_RESET; goto done; } /* lower transfer mode */ if (dev->spdn_cnt < 2) { static const int dma_dnxfer_sel[] = { ATA_DNXFER_DMA, ATA_DNXFER_40C }; static const int pio_dnxfer_sel[] = { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; int sel; if (dev->xfer_shift != ATA_SHIFT_PIO) sel = dma_dnxfer_sel[dev->spdn_cnt]; else sel = pio_dnxfer_sel[dev->spdn_cnt]; dev->spdn_cnt++; if (ata_down_xfermask_limit(dev, sel) == 0) { action |= ATA_EH_RESET; goto done; } } } /* Fall back to PIO? Slowing down to PIO is meaningless for * SATA ATA devices. Consider it only for PATA and SATAPI. */ if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && (dev->xfer_shift != ATA_SHIFT_PIO)) { if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { dev->spdn_cnt = 0; action |= ATA_EH_RESET; goto done; } } return 0; done: /* device has been slowed down, blow error history */ if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) ata_ering_clear(&dev->ering); return action; } /** * ata_eh_link_autopsy - analyze error and determine recovery action * @link: host link to perform autopsy on * * Analyze why @link failed and determine which recovery actions * are needed. This function also sets more detailed AC_ERR_* * values and fills sense data for ATAPI CHECK SENSE. * * LOCKING: * Kernel thread context (may sleep). */ static void ata_eh_link_autopsy(struct ata_link *link) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev; unsigned int all_err_mask = 0, eflags = 0; int tag; u32 serror; int rc; DPRINTK("ENTER\n"); if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) return; /* obtain and analyze SError */ rc = sata_scr_read(link, SCR_ERROR, &serror); if (rc == 0) { ehc->i.serror |= serror; ata_eh_analyze_serror(link); } else if (rc != -EOPNOTSUPP) { /* SError read failed, force reset and probing */ ehc->i.probe_mask |= ATA_ALL_DEVICES; ehc->i.action |= ATA_EH_RESET; ehc->i.err_mask |= AC_ERR_OTHER; } /* analyze NCQ failure */ ata_eh_analyze_ncq_error(link); /* any real error trumps AC_ERR_OTHER */ if (ehc->i.err_mask & ~AC_ERR_OTHER) ehc->i.err_mask &= ~AC_ERR_OTHER; all_err_mask |= ehc->i.err_mask; for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); if (!(qc->flags & ATA_QCFLAG_FAILED) || ata_dev_phys_link(qc->dev) != link) continue; /* inherit upper level err_mask */ qc->err_mask |= ehc->i.err_mask; /* analyze TF */ ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); /* DEV errors are probably spurious in case of ATA_BUS error */ if (qc->err_mask & AC_ERR_ATA_BUS) qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | AC_ERR_INVALID); /* any real error trumps unknown error */ if (qc->err_mask & ~AC_ERR_OTHER) qc->err_mask &= ~AC_ERR_OTHER; /* SENSE_VALID trumps dev/unknown error and revalidation */ if (qc->flags & ATA_QCFLAG_SENSE_VALID) qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); /* determine whether the command is worth retrying */ if (qc->flags & ATA_QCFLAG_IO || (!(qc->err_mask & AC_ERR_INVALID) && qc->err_mask != AC_ERR_DEV)) qc->flags |= ATA_QCFLAG_RETRY; /* accumulate error info */ ehc->i.dev = qc->dev; all_err_mask |= qc->err_mask; if (qc->flags & ATA_QCFLAG_IO) eflags |= ATA_EFLAG_IS_IO; } /* enforce default EH actions */ if (ap->pflags & ATA_PFLAG_FROZEN || all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) ehc->i.action |= ATA_EH_RESET; else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) ehc->i.action |= ATA_EH_REVALIDATE; /* If we have offending qcs and the associated failed device, * perform per-dev EH action only on the offending device. */ if (ehc->i.dev) { ehc->i.dev_action[ehc->i.dev->devno] |= ehc->i.action & ATA_EH_PERDEV_MASK; ehc->i.action &= ~ATA_EH_PERDEV_MASK; } /* propagate timeout to host link */ if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; /* record error and consider speeding down */ dev = ehc->i.dev; if (!dev && ((ata_link_max_devices(link) == 1 && ata_dev_enabled(link->device)))) dev = link->device; if (dev) { if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) eflags |= ATA_EFLAG_DUBIOUS_XFER; ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); } DPRINTK("EXIT\n"); } /** * ata_eh_autopsy - analyze error and determine recovery action * @ap: host port to perform autopsy on * * Analyze all links of @ap and determine why they failed and * which recovery actions are needed. * * LOCKING: * Kernel thread context (may sleep). */ void ata_eh_autopsy(struct ata_port *ap) { struct ata_link *link; ata_for_each_link(link, ap, EDGE) ata_eh_link_autopsy(link); /* Handle the frigging slave link. Autopsy is done similarly * but actions and flags are transferred over to the master * link and handled from there. */ if (ap->slave_link) { struct ata_eh_context *mehc = &ap->link.eh_context; struct ata_eh_context *sehc = &ap->slave_link->eh_context; /* transfer control flags from master to slave */ sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; /* perform autopsy on the slave link */ ata_eh_link_autopsy(ap->slave_link); /* transfer actions from slave to master and clear slave */ ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); mehc->i.action |= sehc->i.action; mehc->i.dev_action[1] |= sehc->i.dev_action[1]; mehc->i.flags |= sehc->i.flags; ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); } /* Autopsy of fanout ports can affect host link autopsy. * Perform host link autopsy last. */ if (sata_pmp_attached(ap)) ata_eh_link_autopsy(&ap->link); } /** * ata_get_cmd_descript - get description for ATA command * @command: ATA command code to get description for * * Return a textual description of the given command, or NULL if the * command is not known. * * LOCKING: * None */ const char *ata_get_cmd_descript(u8 command) { #ifdef CONFIG_ATA_VERBOSE_ERROR static const struct { u8 command; const char *text; } cmd_descr[] = { { ATA_CMD_DEV_RESET, "DEVICE RESET" }, { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, { ATA_CMD_STANDBY, "STANDBY" }, { ATA_CMD_IDLE, "IDLE" }, { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, { ATA_CMD_NOP, "NOP" }, { ATA_CMD_FLUSH, "FLUSH CACHE" }, { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, { ATA_CMD_SERVICE, "SERVICE" }, { ATA_CMD_READ, "READ DMA" }, { ATA_CMD_READ_EXT, "READ DMA EXT" }, { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, { ATA_CMD_WRITE, "WRITE DMA" }, { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, { ATA_CMD_SET_FEATURES, "SET FEATURES" }, { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, { ATA_CMD_SLEEP, "SLEEP" }, { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, { ATA_CMD_PMP_READ, "READ BUFFER" }, { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, { ATA_CMD_SMART, "SMART" }, { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, { ATA_CMD_RESTORE, "RECALIBRATE" }, { 0, NULL } /* terminate list */ }; unsigned int i; for (i = 0; cmd_descr[i].text; i++) if (cmd_descr[i].command == command) return cmd_descr[i].text; #endif return NULL; } /** * ata_eh_link_report - report error handling to user * @link: ATA link EH is going on * * Report EH to user. * * LOCKING: * None. */ static void ata_eh_link_report(struct ata_link *link) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; const char *frozen, *desc; char tries_buf[6]; int tag, nr_failed = 0; if (ehc->i.flags & ATA_EHI_QUIET) return; desc = NULL; if (ehc->i.desc[0] != '\0') desc = ehc->i.desc; for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); if (!(qc->flags & ATA_QCFLAG_FAILED) || ata_dev_phys_link(qc->dev) != link || ((qc->flags & ATA_QCFLAG_QUIET) && qc->err_mask == AC_ERR_DEV)) continue; if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) continue; nr_failed++; } if (!nr_failed && !ehc->i.err_mask) return; frozen = ""; if (ap->pflags & ATA_PFLAG_FROZEN) frozen = " frozen"; memset(tries_buf, 0, sizeof(tries_buf)); if (ap->eh_tries < ATA_EH_MAX_TRIES) snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d", ap->eh_tries); if (ehc->i.dev) { ata_dev_err(ehc->i.dev, "exception Emask 0x%x " "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", ehc->i.err_mask, link->sactive, ehc->i.serror, ehc->i.action, frozen, tries_buf); if (desc) ata_dev_err(ehc->i.dev, "%s\n", desc); } else { ata_link_err(link, "exception Emask 0x%x " "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", ehc->i.err_mask, link->sactive, ehc->i.serror, ehc->i.action, frozen, tries_buf); if (desc) ata_link_err(link, "%s\n", desc); } #ifdef CONFIG_ATA_VERBOSE_ERROR if (ehc->i.serror) ata_link_err(link, "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", ehc->i.serror & SERR_DATA ? "UnrecovData " : "", ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", ehc->i.serror & SERR_CRC ? "BadCRC " : "", ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); #endif for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; const u8 *cdb = qc->cdb; char data_buf[20] = ""; char cdb_buf[70] = ""; if (!(qc->flags & ATA_QCFLAG_FAILED) || ata_dev_phys_link(qc->dev) != link || !qc->err_mask) continue; if (qc->dma_dir != DMA_NONE) { static const char *dma_str[] = { [DMA_BIDIRECTIONAL] = "bidi", [DMA_TO_DEVICE] = "out", [DMA_FROM_DEVICE] = "in", }; static const char *prot_str[] = { [ATA_PROT_PIO] = "pio", [ATA_PROT_DMA] = "dma", [ATA_PROT_NCQ] = "ncq", [ATAPI_PROT_PIO] = "pio", [ATAPI_PROT_DMA] = "dma", }; snprintf(data_buf, sizeof(data_buf), " %s %u %s", prot_str[qc->tf.protocol], qc->nbytes, dma_str[qc->dma_dir]); } if (ata_is_atapi(qc->tf.protocol)) { if (qc->scsicmd) scsi_print_command(qc->scsicmd); else snprintf(cdb_buf, sizeof(cdb_buf), "cdb %02x %02x %02x %02x %02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x %02x %02x\n ", cdb[0], cdb[1], cdb[2], cdb[3], cdb[4], cdb[5], cdb[6], cdb[7], cdb[8], cdb[9], cdb[10], cdb[11], cdb[12], cdb[13], cdb[14], cdb[15]); } else { const char *descr = ata_get_cmd_descript(cmd->command); if (descr) ata_dev_err(qc->dev, "failed command: %s\n", descr); } ata_dev_err(qc->dev, "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " "tag %d%s\n %s" "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " "Emask 0x%x (%s)%s\n", cmd->command, cmd->feature, cmd->nsect, cmd->lbal, cmd->lbam, cmd->lbah, cmd->hob_feature, cmd->hob_nsect, cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, cmd->device, qc->tag, data_buf, cdb_buf, res->command, res->feature, res->nsect, res->lbal, res->lbam, res->lbah, res->hob_feature, res->hob_nsect, res->hob_lbal, res->hob_lbam, res->hob_lbah, res->device, qc->err_mask, ata_err_string(qc->err_mask), qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); #ifdef CONFIG_ATA_VERBOSE_ERROR if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR)) { if (res->command & ATA_BUSY) ata_dev_err(qc->dev, "status: { Busy }\n"); else ata_dev_err(qc->dev, "status: { %s%s%s%s}\n", res->command & ATA_DRDY ? "DRDY " : "", res->command & ATA_DF ? "DF " : "", res->command & ATA_DRQ ? "DRQ " : "", res->command & ATA_ERR ? "ERR " : ""); } if (cmd->command != ATA_CMD_PACKET && (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | ATA_ABORTED))) ata_dev_err(qc->dev, "error: { %s%s%s%s}\n", res->feature & ATA_ICRC ? "ICRC " : "", res->feature & ATA_UNC ? "UNC " : "", res->feature & ATA_IDNF ? "IDNF " : "", res->feature & ATA_ABORTED ? "ABRT " : ""); #endif } } /** * ata_eh_report - report error handling to user * @ap: ATA port to report EH about * * Report EH to user. * * LOCKING: * None. */ void ata_eh_report(struct ata_port *ap) { struct ata_link *link; ata_for_each_link(link, ap, HOST_FIRST) ata_eh_link_report(link); } static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, unsigned int *classes, unsigned long deadline, bool clear_classes) { struct ata_device *dev; if (clear_classes) ata_for_each_dev(dev, link, ALL) classes[dev->devno] = ATA_DEV_UNKNOWN; return reset(link, classes, deadline); } static int ata_eh_followup_srst_needed(struct ata_link *link, int rc) { if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) return 0; if (rc == -EAGAIN) return 1; if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) return 1; return 0; } int ata_eh_reset(struct ata_link *link, int classify, ata_prereset_fn_t prereset, ata_reset_fn_t softreset, ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) { struct ata_port *ap = link->ap; struct ata_link *slave = ap->slave_link; struct ata_eh_context *ehc = &link->eh_context; struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; unsigned int *classes = ehc->classes; unsigned int lflags = link->flags; int verbose = !(ehc->i.flags & ATA_EHI_QUIET); int max_tries = 0, try = 0; struct ata_link *failed_link; struct ata_device *dev; unsigned long deadline, now; ata_reset_fn_t reset; unsigned long flags; u32 sstatus; int nr_unknown, rc; /* * Prepare to reset */ while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) max_tries++; if (link->flags & ATA_LFLAG_NO_HRST) hardreset = NULL; if (link->flags & ATA_LFLAG_NO_SRST) softreset = NULL; /* make sure each reset attempt is at least COOL_DOWN apart */ if (ehc->i.flags & ATA_EHI_DID_RESET) { now = jiffies; WARN_ON(time_after(ehc->last_reset, now)); deadline = ata_deadline(ehc->last_reset, ATA_EH_RESET_COOL_DOWN); if (time_before(now, deadline)) schedule_timeout_uninterruptible(deadline - now); } spin_lock_irqsave(ap->lock, flags); ap->pflags |= ATA_PFLAG_RESETTING; spin_unlock_irqrestore(ap->lock, flags); ata_eh_about_to_do(link, NULL, ATA_EH_RESET); ata_for_each_dev(dev, link, ALL) { /* If we issue an SRST then an ATA drive (not ATAPI) * may change configuration and be in PIO0 timing. If * we do a hard reset (or are coming from power on) * this is true for ATA or ATAPI. Until we've set a * suitable controller mode we should not touch the * bus as we may be talking too fast. */ dev->pio_mode = XFER_PIO_0; /* If the controller has a pio mode setup function * then use it to set the chipset to rights. Don't * touch the DMA setup as that will be dealt with when * configuring devices. */ if (ap->ops->set_piomode) ap->ops->set_piomode(ap, dev); } /* prefer hardreset */ reset = NULL; ehc->i.action &= ~ATA_EH_RESET; if (hardreset) { reset = hardreset; ehc->i.action |= ATA_EH_HARDRESET; } else if (softreset) { reset = softreset; ehc->i.action |= ATA_EH_SOFTRESET; } if (prereset) { unsigned long deadline = ata_deadline(jiffies, ATA_EH_PRERESET_TIMEOUT); if (slave) { sehc->i.action &= ~ATA_EH_RESET; sehc->i.action |= ehc->i.action; } rc = prereset(link, deadline); /* If present, do prereset on slave link too. Reset * is skipped iff both master and slave links report * -ENOENT or clear ATA_EH_RESET. */ if (slave && (rc == 0 || rc == -ENOENT)) { int tmp; tmp = prereset(slave, deadline); if (tmp != -ENOENT) rc = tmp; ehc->i.action |= sehc->i.action; } if (rc) { if (rc == -ENOENT) { ata_link_dbg(link, "port disabled--ignoring\n"); ehc->i.action &= ~ATA_EH_RESET; ata_for_each_dev(dev, link, ALL) classes[dev->devno] = ATA_DEV_NONE; rc = 0; } else ata_link_err(link, "prereset failed (errno=%d)\n", rc); goto out; } /* prereset() might have cleared ATA_EH_RESET. If so, * bang classes, thaw and return. */ if (reset && !(ehc->i.action & ATA_EH_RESET)) { ata_for_each_dev(dev, link, ALL) classes[dev->devno] = ATA_DEV_NONE; if ((ap->pflags & ATA_PFLAG_FROZEN) && ata_is_host_link(link)) ata_eh_thaw_port(ap); rc = 0; goto out; } } retry: /* * Perform reset */ if (ata_is_host_link(link)) ata_eh_freeze_port(ap); deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); if (reset) { if (verbose) ata_link_info(link, "%s resetting link\n", reset == softreset ? "soft" : "hard"); /* mark that this EH session started with reset */ ehc->last_reset = jiffies; if (reset == hardreset) ehc->i.flags |= ATA_EHI_DID_HARDRESET; else ehc->i.flags |= ATA_EHI_DID_SOFTRESET; rc = ata_do_reset(link, reset, classes, deadline, true); if (rc && rc != -EAGAIN) { failed_link = link; goto fail; } /* hardreset slave link if existent */ if (slave && reset == hardreset) { int tmp; if (verbose) ata_link_info(slave, "hard resetting link\n"); ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); tmp = ata_do_reset(slave, reset, classes, deadline, false); switch (tmp) { case -EAGAIN: rc = -EAGAIN; case 0: break; default: failed_link = slave; rc = tmp; goto fail; } } /* perform follow-up SRST if necessary */ if (reset == hardreset && ata_eh_followup_srst_needed(link, rc)) { reset = softreset; if (!reset) { ata_link_err(link, "follow-up softreset required but no softreset available\n"); failed_link = link; rc = -EINVAL; goto fail; } ata_eh_about_to_do(link, NULL, ATA_EH_RESET); rc = ata_do_reset(link, reset, classes, deadline, true); if (rc) { failed_link = link; goto fail; } } } else { if (verbose) ata_link_info(link, "no reset method available, skipping reset\n"); if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) lflags |= ATA_LFLAG_ASSUME_ATA; } /* * Post-reset processing */ ata_for_each_dev(dev, link, ALL) { /* After the reset, the device state is PIO 0 and the * controller state is undefined. Reset also wakes up * drives from sleeping mode. */ dev->pio_mode = XFER_PIO_0; dev->flags &= ~ATA_DFLAG_SLEEPING; if (ata_phys_link_offline(ata_dev_phys_link(dev))) continue; /* apply class override */ if (lflags & ATA_LFLAG_ASSUME_ATA) classes[dev->devno] = ATA_DEV_ATA; else if (lflags & ATA_LFLAG_ASSUME_SEMB) classes[dev->devno] = ATA_DEV_SEMB_UNSUP; } /* record current link speed */ if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) link->sata_spd = (sstatus >> 4) & 0xf; if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) slave->sata_spd = (sstatus >> 4) & 0xf; /* thaw the port */ if (ata_is_host_link(link)) ata_eh_thaw_port(ap); /* postreset() should clear hardware SError. Although SError * is cleared during link resume, clearing SError here is * necessary as some PHYs raise hotplug events after SRST. * This introduces race condition where hotplug occurs between * reset and here. This race is mediated by cross checking * link onlineness and classification result later. */ if (postreset) { postreset(link, classes); if (slave) postreset(slave, classes); } /* * Some controllers can't be frozen very well and may set spurious * error conditions during reset. Clear accumulated error * information and re-thaw the port if frozen. As reset is the * final recovery action and we cross check link onlineness against * device classification later, no hotplug event is lost by this. */ spin_lock_irqsave(link->ap->lock, flags); memset(&link->eh_info, 0, sizeof(link->eh_info)); if (slave) memset(&slave->eh_info, 0, sizeof(link->eh_info)); ap->pflags &= ~ATA_PFLAG_EH_PENDING; spin_unlock_irqrestore(link->ap->lock, flags); if (ap->pflags & ATA_PFLAG_FROZEN) ata_eh_thaw_port(ap); /* * Make sure onlineness and classification result correspond. * Hotplug could have happened during reset and some * controllers fail to wait while a drive is spinning up after * being hotplugged causing misdetection. By cross checking * link on/offlineness and classification result, those * conditions can be reliably detected and retried. */ nr_unknown = 0; ata_for_each_dev(dev, link, ALL) { if (ata_phys_link_online(ata_dev_phys_link(dev))) { if (classes[dev->devno] == ATA_DEV_UNKNOWN) { ata_dev_dbg(dev, "link online but device misclassified\n"); classes[dev->devno] = ATA_DEV_NONE; nr_unknown++; } } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) { if (ata_class_enabled(classes[dev->devno])) ata_dev_dbg(dev, "link offline, clearing class %d to NONE\n", classes[dev->devno]); classes[dev->devno] = ATA_DEV_NONE; } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { ata_dev_dbg(dev, "link status unknown, clearing UNKNOWN to NONE\n"); classes[dev->devno] = ATA_DEV_NONE; } } if (classify && nr_unknown) { if (try < max_tries) { ata_link_warn(link, "link online but %d devices misclassified, retrying\n", nr_unknown); failed_link = link; rc = -EAGAIN; goto fail; } ata_link_warn(link, "link online but %d devices misclassified, " "device detection might fail\n", nr_unknown); } /* reset successful, schedule revalidation */ ata_eh_done(link, NULL, ATA_EH_RESET); if (slave) ata_eh_done(slave, NULL, ATA_EH_RESET); ehc->last_reset = jiffies; /* update to completion time */ ehc->i.action |= ATA_EH_REVALIDATE; link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ rc = 0; out: /* clear hotplug flag */ ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; if (slave) sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; spin_lock_irqsave(ap->lock, flags); ap->pflags &= ~ATA_PFLAG_RESETTING; spin_unlock_irqrestore(ap->lock, flags); return rc; fail: /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ if (!ata_is_host_link(link) && sata_scr_read(link, SCR_STATUS, &sstatus)) rc = -ERESTART; if (try >= max_tries) { /* * Thaw host port even if reset failed, so that the port * can be retried on the next phy event. This risks * repeated EH runs but seems to be a better tradeoff than * shutting down a port after a botched hotplug attempt. */ if (ata_is_host_link(link)) ata_eh_thaw_port(ap); goto out; } now = jiffies; if (time_before(now, deadline)) { unsigned long delta = deadline - now; ata_link_warn(failed_link, "reset failed (errno=%d), retrying in %u secs\n", rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); ata_eh_release(ap); while (delta) delta = schedule_timeout_uninterruptible(delta); ata_eh_acquire(ap); } /* * While disks spinup behind PMP, some controllers fail sending SRST. * They need to be reset - as well as the PMP - before retrying. */ if (rc == -ERESTART) { if (ata_is_host_link(link)) ata_eh_thaw_port(ap); goto out; } if (try == max_tries - 1) { sata_down_spd_limit(link, 0); if (slave) sata_down_spd_limit(slave, 0); } else if (rc == -EPIPE) sata_down_spd_limit(failed_link, 0); if (hardreset) reset = hardreset; goto retry; } static inline void ata_eh_pull_park_action(struct ata_port *ap) { struct ata_link *link; struct ata_device *dev; unsigned long flags; /* * This function can be thought of as an extended version of * ata_eh_about_to_do() specially crafted to accommodate the * requirements of ATA_EH_PARK handling. Since the EH thread * does not leave the do {} while () loop in ata_eh_recover as * long as the timeout for a park request to *one* device on * the port has not expired, and since we still want to pick * up park requests to other devices on the same port or * timeout updates for the same device, we have to pull * ATA_EH_PARK actions from eh_info into eh_context.i * ourselves at the beginning of each pass over the loop. * * Additionally, all write accesses to &ap->park_req_pending * through INIT_COMPLETION() (see below) or complete_all() * (see ata_scsi_park_store()) are protected by the host lock. * As a result we have that park_req_pending.done is zero on * exit from this function, i.e. when ATA_EH_PARK actions for * *all* devices on port ap have been pulled into the * respective eh_context structs. If, and only if, * park_req_pending.done is non-zero by the time we reach * wait_for_completion_timeout(), another ATA_EH_PARK action * has been scheduled for at least one of the devices on port * ap and we have to cycle over the do {} while () loop in * ata_eh_recover() again. */ spin_lock_irqsave(ap->lock, flags); INIT_COMPLETION(ap->park_req_pending); ata_for_each_link(link, ap, EDGE) { ata_for_each_dev(dev, link, ALL) { struct ata_eh_info *ehi = &link->eh_info; link->eh_context.i.dev_action[dev->devno] |= ehi->dev_action[dev->devno] & ATA_EH_PARK; ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); } } spin_unlock_irqrestore(ap->lock, flags); } static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) { struct ata_eh_context *ehc = &dev->link->eh_context; struct ata_taskfile tf; unsigned int err_mask; ata_tf_init(dev, &tf); if (park) { ehc->unloaded_mask |= 1 << dev->devno; tf.command = ATA_CMD_IDLEIMMEDIATE; tf.feature = 0x44; tf.lbal = 0x4c; tf.lbam = 0x4e; tf.lbah = 0x55; } else { ehc->unloaded_mask &= ~(1 << dev->devno); tf.command = ATA_CMD_CHK_POWER; } tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; tf.protocol |= ATA_PROT_NODATA; err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); if (park && (err_mask || tf.lbal != 0xc4)) { ata_dev_err(dev, "head unload failed!\n"); ehc->unloaded_mask &= ~(1 << dev->devno); } } static int ata_eh_revalidate_and_attach(struct ata_link *link, struct ata_device **r_failed_dev) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev; unsigned int new_mask = 0; unsigned long flags; int rc = 0; DPRINTK("ENTER\n"); /* For PATA drive side cable detection to work, IDENTIFY must * be done backwards such that PDIAG- is released by the slave * device before the master device is identified. */ ata_for_each_dev(dev, link, ALL_REVERSE) { unsigned int action = ata_eh_dev_action(dev); unsigned int readid_flags = 0; if (ehc->i.flags & ATA_EHI_DID_RESET) readid_flags |= ATA_READID_POSTRESET; if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { WARN_ON(dev->class == ATA_DEV_PMP); if (ata_phys_link_offline(ata_dev_phys_link(dev))) { rc = -EIO; goto err; } ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], readid_flags); if (rc) goto err; ata_eh_done(link, dev, ATA_EH_REVALIDATE); /* Configuration may have changed, reconfigure * transfer mode. */ ehc->i.flags |= ATA_EHI_SETMODE; /* schedule the scsi_rescan_device() here */ schedule_work(&(ap->scsi_rescan_task)); } else if (dev->class == ATA_DEV_UNKNOWN && ehc->tries[dev->devno] && ata_class_enabled(ehc->classes[dev->devno])) { /* Temporarily set dev->class, it will be * permanently set once all configurations are * complete. This is necessary because new * device configuration is done in two * separate loops. */ dev->class = ehc->classes[dev->devno]; if (dev->class == ATA_DEV_PMP) rc = sata_pmp_attach(dev); else rc = ata_dev_read_id(dev, &dev->class, readid_flags, dev->id); /* read_id might have changed class, store and reset */ ehc->classes[dev->devno] = dev->class; dev->class = ATA_DEV_UNKNOWN; switch (rc) { case 0: /* clear error info accumulated during probe */ ata_ering_clear(&dev->ering); new_mask |= 1 << dev->devno; break; case -ENOENT: /* IDENTIFY was issued to non-existent * device. No need to reset. Just * thaw and ignore the device. */ ata_eh_thaw_port(ap); break; default: goto err; } } } /* PDIAG- should have been released, ask cable type if post-reset */ if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { if (ap->ops->cable_detect) ap->cbl = ap->ops->cable_detect(ap); ata_force_cbl(ap); } /* Configure new devices forward such that user doesn't see * device detection messages backwards. */ ata_for_each_dev(dev, link, ALL) { if (!(new_mask & (1 << dev->devno))) continue; dev->class = ehc->classes[dev->devno]; if (dev->class == ATA_DEV_PMP) continue; ehc->i.flags |= ATA_EHI_PRINTINFO; rc = ata_dev_configure(dev); ehc->i.flags &= ~ATA_EHI_PRINTINFO; if (rc) { dev->class = ATA_DEV_UNKNOWN; goto err; } spin_lock_irqsave(ap->lock, flags); ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; spin_unlock_irqrestore(ap->lock, flags); /* new device discovered, configure xfermode */ ehc->i.flags |= ATA_EHI_SETMODE; } return 0; err: *r_failed_dev = dev; DPRINTK("EXIT rc=%d\n", rc); return rc; } /** * ata_set_mode - Program timings and issue SET FEATURES - XFER * @link: link on which timings will be programmed * @r_failed_dev: out parameter for failed device * * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If * ata_set_mode() fails, pointer to the failing device is * returned in @r_failed_dev. * * LOCKING: * PCI/etc. bus probe sem. * * RETURNS: * 0 on success, negative errno otherwise */ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) { struct ata_port *ap = link->ap; struct ata_device *dev; int rc; /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ ata_for_each_dev(dev, link, ENABLED) { if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { struct ata_ering_entry *ent; ent = ata_ering_top(&dev->ering); if (ent) ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; } } /* has private set_mode? */ if (ap->ops->set_mode) rc = ap->ops->set_mode(link, r_failed_dev); else rc = ata_do_set_mode(link, r_failed_dev); /* if transfer mode has changed, set DUBIOUS_XFER on device */ ata_for_each_dev(dev, link, ENABLED) { struct ata_eh_context *ehc = &link->eh_context; u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); if (dev->xfer_mode != saved_xfer_mode || ata_ncq_enabled(dev) != saved_ncq) dev->flags |= ATA_DFLAG_DUBIOUS_XFER; } return rc; } /** * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset * @dev: ATAPI device to clear UA for * * Resets and other operations can make an ATAPI device raise * UNIT ATTENTION which causes the next operation to fail. This * function clears UA. * * LOCKING: * EH context (may sleep). * * RETURNS: * 0 on success, -errno on failure. */ static int atapi_eh_clear_ua(struct ata_device *dev) { int i; for (i = 0; i < ATA_EH_UA_TRIES; i++) { u8 *sense_buffer = dev->link->ap->sector_buf; u8 sense_key = 0; unsigned int err_mask; err_mask = atapi_eh_tur(dev, &sense_key); if (err_mask != 0 && err_mask != AC_ERR_DEV) { ata_dev_warn(dev, "TEST_UNIT_READY failed (err_mask=0x%x)\n", err_mask); return -EIO; } if (!err_mask || sense_key != UNIT_ATTENTION) return 0; err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); if (err_mask) { ata_dev_warn(dev, "failed to clear " "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); return -EIO; } } ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES); return 0; } /** * ata_eh_maybe_retry_flush - Retry FLUSH if necessary * @dev: ATA device which may need FLUSH retry * * If @dev failed FLUSH, it needs to be reported upper layer * immediately as it means that @dev failed to remap and already * lost at least a sector and further FLUSH retrials won't make * any difference to the lost sector. However, if FLUSH failed * for other reasons, for example transmission error, FLUSH needs * to be retried. * * This function determines whether FLUSH failure retry is * necessary and performs it if so. * * RETURNS: * 0 if EH can continue, -errno if EH needs to be repeated. */ static int ata_eh_maybe_retry_flush(struct ata_device *dev) { struct ata_link *link = dev->link; struct ata_port *ap = link->ap; struct ata_queued_cmd *qc; struct ata_taskfile tf; unsigned int err_mask; int rc = 0; /* did flush fail for this device? */ if (!ata_tag_valid(link->active_tag)) return 0; qc = __ata_qc_from_tag(ap, link->active_tag); if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && qc->tf.command != ATA_CMD_FLUSH)) return 0; /* if the device failed it, it should be reported to upper layers */ if (qc->err_mask & AC_ERR_DEV) return 0; /* flush failed for some other reason, give it another shot */ ata_tf_init(dev, &tf); tf.command = qc->tf.command; tf.flags |= ATA_TFLAG_DEVICE; tf.protocol = ATA_PROT_NODATA; ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n", tf.command, qc->err_mask); err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); if (!err_mask) { /* * FLUSH is complete but there's no way to * successfully complete a failed command from EH. * Making sure retry is allowed at least once and * retrying it should do the trick - whatever was in * the cache is already on the platter and this won't * cause infinite loop. */ qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); } else { ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n", err_mask); rc = -EIO; /* if device failed it, report it to upper layers */ if (err_mask & AC_ERR_DEV) { qc->err_mask |= AC_ERR_DEV; qc->result_tf = tf; if (!(ap->pflags & ATA_PFLAG_FROZEN)) rc = 0; } } return rc; } /** * ata_eh_set_lpm - configure SATA interface power management * @link: link to configure power management * @policy: the link power management policy * @r_failed_dev: out parameter for failed device * * Enable SATA Interface power management. This will enable * Device Interface Power Management (DIPM) for min_power * policy, and then call driver specific callbacks for * enabling Host Initiated Power management. * * LOCKING: * EH context. * * RETURNS: * 0 on success, -errno on failure. */ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, struct ata_device **r_failed_dev) { struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; enum ata_lpm_policy old_policy = link->lpm_policy; bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; unsigned int err_mask; int rc; /* if the link or host doesn't do LPM, noop */ if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) return 0; /* * DIPM is enabled only for MIN_POWER as some devices * misbehave when the host NACKs transition to SLUMBER. Order * device and link configurations such that the host always * allows DIPM requests. */ ata_for_each_dev(dev, link, ENABLED) { bool hipm = ata_id_has_hipm(dev->id); bool dipm = ata_id_has_dipm(dev->id) && !no_dipm; /* find the first enabled and LPM enabled devices */ if (!link_dev) link_dev = dev; if (!lpm_dev && (hipm || dipm)) lpm_dev = dev; hints &= ~ATA_LPM_EMPTY; if (!hipm) hints &= ~ATA_LPM_HIPM; /* disable DIPM before changing link config */ if (policy != ATA_LPM_MIN_POWER && dipm) { err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_DISABLE, SATA_DIPM); if (err_mask && err_mask != AC_ERR_DEV) { ata_dev_warn(dev, "failed to disable DIPM, Emask 0x%x\n", err_mask); rc = -EIO; goto fail; } } } if (ap) { rc = ap->ops->set_lpm(link, policy, hints); if (!rc && ap->slave_link) rc = ap->ops->set_lpm(ap->slave_link, policy, hints); } else rc = sata_pmp_set_lpm(link, policy, hints); /* * Attribute link config failure to the first (LPM) enabled * device on the link. */ if (rc) { if (rc == -EOPNOTSUPP) { link->flags |= ATA_LFLAG_NO_LPM; return 0; } dev = lpm_dev ? lpm_dev : link_dev; goto fail; } /* * Low level driver acked the transition. Issue DIPM command * with the new policy set. */ link->lpm_policy = policy; if (ap && ap->slave_link) ap->slave_link->lpm_policy = policy; /* host config updated, enable DIPM if transitioning to MIN_POWER */ ata_for_each_dev(dev, link, ENABLED) { if (policy == ATA_LPM_MIN_POWER && !no_dipm && ata_id_has_dipm(dev->id)) { err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, SATA_DIPM); if (err_mask && err_mask != AC_ERR_DEV) { ata_dev_warn(dev, "failed to enable DIPM, Emask 0x%x\n", err_mask); rc = -EIO; goto fail; } } } return 0; fail: /* restore the old policy */ link->lpm_policy = old_policy; if (ap && ap->slave_link) ap->slave_link->lpm_policy = old_policy; /* if no device or only one more chance is left, disable LPM */ if (!dev || ehc->tries[dev->devno] <= 2) { ata_link_warn(link, "disabling LPM on the link\n"); link->flags |= ATA_LFLAG_NO_LPM; } if (r_failed_dev) *r_failed_dev = dev; return rc; } int ata_link_nr_enabled(struct ata_link *link) { struct ata_device *dev; int cnt = 0; ata_for_each_dev(dev, link, ENABLED) cnt++; return cnt; } static int ata_link_nr_vacant(struct ata_link *link) { struct ata_device *dev; int cnt = 0; ata_for_each_dev(dev, link, ALL) if (dev->class == ATA_DEV_UNKNOWN) cnt++; return cnt; } static int ata_eh_skip_recovery(struct ata_link *link) { struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev; /* skip disabled links */ if (link->flags & ATA_LFLAG_DISABLED) return 1; /* skip if explicitly requested */ if (ehc->i.flags & ATA_EHI_NO_RECOVERY) return 1; /* thaw frozen port and recover failed devices */ if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) return 0; /* reset at least once if reset is requested */ if ((ehc->i.action & ATA_EH_RESET) && !(ehc->i.flags & ATA_EHI_DID_RESET)) return 0; /* skip if class codes for all vacant slots are ATA_DEV_NONE */ ata_for_each_dev(dev, link, ALL) { if (dev->class == ATA_DEV_UNKNOWN && ehc->classes[dev->devno] != ATA_DEV_NONE) return 0; } return 1; } static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) { u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); u64 now = get_jiffies_64(); int *trials = void_arg; if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < now - min(now, interval))) return -1; (*trials)++; return 0; } static int ata_eh_schedule_probe(struct ata_device *dev) { struct ata_eh_context *ehc = &dev->link->eh_context; struct ata_link *link = ata_dev_phys_link(dev); int trials = 0; if (!(ehc->i.probe_mask & (1 << dev->devno)) || (ehc->did_probe_mask & (1 << dev->devno))) return 0; ata_eh_detach_dev(dev); ata_dev_init(dev); ehc->did_probe_mask |= (1 << dev->devno); ehc->i.action |= ATA_EH_RESET; ehc->saved_xfer_mode[dev->devno] = 0; ehc->saved_ncq_enabled &= ~(1 << dev->devno); /* the link maybe in a deep sleep, wake it up */ if (link->lpm_policy > ATA_LPM_MAX_POWER) { if (ata_is_host_link(link)) link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, ATA_LPM_EMPTY); else sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER, ATA_LPM_EMPTY); } /* Record and count probe trials on the ering. The specific * error mask used is irrelevant. Because a successful device * detection clears the ering, this count accumulates only if * there are consecutive failed probes. * * If the count is equal to or higher than ATA_EH_PROBE_TRIALS * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is * forced to 1.5Gbps. * * This is to work around cases where failed link speed * negotiation results in device misdetection leading to * infinite DEVXCHG or PHRDY CHG events. */ ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); if (trials > ATA_EH_PROBE_TRIALS) sata_down_spd_limit(link, 1); return 1; } static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) { struct ata_eh_context *ehc = &dev->link->eh_context; /* -EAGAIN from EH routine indicates retry without prejudice. * The requester is responsible for ensuring forward progress. */ if (err != -EAGAIN) ehc->tries[dev->devno]--; switch (err) { case -ENODEV: /* device missing or wrong IDENTIFY data, schedule probing */ ehc->i.probe_mask |= (1 << dev->devno); case -EINVAL: /* give it just one more chance */ ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); case -EIO: if (ehc->tries[dev->devno] == 1) { /* This is the last chance, better to slow * down than lose it. */ sata_down_spd_limit(ata_dev_phys_link(dev), 0); if (dev->pio_mode > XFER_PIO_0) ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); } } if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { /* disable device if it has used up all its chances */ ata_dev_disable(dev); /* detach if offline */ if (ata_phys_link_offline(ata_dev_phys_link(dev))) ata_eh_detach_dev(dev); /* schedule probe if necessary */ if (ata_eh_schedule_probe(dev)) { ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; memset(ehc->cmd_timeout_idx[dev->devno], 0, sizeof(ehc->cmd_timeout_idx[dev->devno])); } return 1; } else { ehc->i.action |= ATA_EH_RESET; return 0; } } /** * ata_eh_recover - recover host port after error * @ap: host port to recover * @prereset: prereset method (can be NULL) * @softreset: softreset method (can be NULL) * @hardreset: hardreset method (can be NULL) * @postreset: postreset method (can be NULL) * @r_failed_link: out parameter for failed link * * This is the alpha and omega, eum and yang, heart and soul of * libata exception handling. On entry, actions required to * recover each link and hotplug requests are recorded in the * link's eh_context. This function executes all the operations * with appropriate retrials and fallbacks to resurrect failed * devices, detach goners and greet newcomers. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * 0 on success, -errno on failure. */ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, ata_reset_fn_t softreset, ata_reset_fn_t hardreset, ata_postreset_fn_t postreset, struct ata_link **r_failed_link) { struct ata_link *link; struct ata_device *dev; int rc, nr_fails; unsigned long flags, deadline; DPRINTK("ENTER\n"); /* prep for recovery */ ata_for_each_link(link, ap, EDGE) { struct ata_eh_context *ehc = &link->eh_context; /* re-enable link? */ if (ehc->i.action & ATA_EH_ENABLE_LINK) { ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); spin_lock_irqsave(ap->lock, flags); link->flags &= ~ATA_LFLAG_DISABLED; spin_unlock_irqrestore(ap->lock, flags); ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); } ata_for_each_dev(dev, link, ALL) { if (link->flags & ATA_LFLAG_NO_RETRY) ehc->tries[dev->devno] = 1; else ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; /* collect port action mask recorded in dev actions */ ehc->i.action |= ehc->i.dev_action[dev->devno] & ~ATA_EH_PERDEV_MASK; ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; /* process hotplug request */ if (dev->flags & ATA_DFLAG_DETACH) ata_eh_detach_dev(dev); /* schedule probe if necessary */ if (!ata_dev_enabled(dev)) ata_eh_schedule_probe(dev); } } retry: rc = 0; /* if UNLOADING, finish immediately */ if (ap->pflags & ATA_PFLAG_UNLOADING) goto out; /* prep for EH */ ata_for_each_link(link, ap, EDGE) { struct ata_eh_context *ehc = &link->eh_context; /* skip EH if possible. */ if (ata_eh_skip_recovery(link)) ehc->i.action = 0; ata_for_each_dev(dev, link, ALL) ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; } /* reset */ ata_for_each_link(link, ap, EDGE) { struct ata_eh_context *ehc = &link->eh_context; if (!(ehc->i.action & ATA_EH_RESET)) continue; rc = ata_eh_reset(link, ata_link_nr_vacant(link), prereset, softreset, hardreset, postreset); if (rc) { ata_link_err(link, "reset failed, giving up\n"); goto out; } } do { unsigned long now; /* * clears ATA_EH_PARK in eh_info and resets * ap->park_req_pending */ ata_eh_pull_park_action(ap); deadline = jiffies; ata_for_each_link(link, ap, EDGE) { ata_for_each_dev(dev, link, ALL) { struct ata_eh_context *ehc = &link->eh_context; unsigned long tmp; if (dev->class != ATA_DEV_ATA) continue; if (!(ehc->i.dev_action[dev->devno] & ATA_EH_PARK)) continue; tmp = dev->unpark_deadline; if (time_before(deadline, tmp)) deadline = tmp; else if (time_before_eq(tmp, jiffies)) continue; if (ehc->unloaded_mask & (1 << dev->devno)) continue; ata_eh_park_issue_cmd(dev, 1); } } now = jiffies; if (time_before_eq(deadline, now)) break; ata_eh_release(ap); deadline = wait_for_completion_timeout(&ap->park_req_pending, deadline - now); ata_eh_acquire(ap); } while (deadline); ata_for_each_link(link, ap, EDGE) { ata_for_each_dev(dev, link, ALL) { if (!(link->eh_context.unloaded_mask & (1 << dev->devno))) continue; ata_eh_park_issue_cmd(dev, 0); ata_eh_done(link, dev, ATA_EH_PARK); } } /* the rest */ nr_fails = 0; ata_for_each_link(link, ap, PMP_FIRST) { struct ata_eh_context *ehc = &link->eh_context; if (sata_pmp_attached(ap) && ata_is_host_link(link)) goto config_lpm; /* revalidate existing devices and attach new ones */ rc = ata_eh_revalidate_and_attach(link, &dev); if (rc) goto rest_fail; /* if PMP got attached, return, pmp EH will take care of it */ if (link->device->class == ATA_DEV_PMP) { ehc->i.action = 0; return 0; } /* configure transfer mode if necessary */ if (ehc->i.flags & ATA_EHI_SETMODE) { rc = ata_set_mode(link, &dev); if (rc) goto rest_fail; ehc->i.flags &= ~ATA_EHI_SETMODE; } /* If reset has been issued, clear UA to avoid * disrupting the current users of the device. */ if (ehc->i.flags & ATA_EHI_DID_RESET) { ata_for_each_dev(dev, link, ALL) { if (dev->class != ATA_DEV_ATAPI) continue; rc = atapi_eh_clear_ua(dev); if (rc) goto rest_fail; } } /* retry flush if necessary */ ata_for_each_dev(dev, link, ALL) { if (dev->class != ATA_DEV_ATA) continue; rc = ata_eh_maybe_retry_flush(dev); if (rc) goto rest_fail; } config_lpm: /* configure link power saving */ if (link->lpm_policy != ap->target_lpm_policy) { rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); if (rc) goto rest_fail; } /* this link is okay now */ ehc->i.flags = 0; continue; rest_fail: nr_fails++; if (dev) ata_eh_handle_dev_fail(dev, rc); if (ap->pflags & ATA_PFLAG_FROZEN) { /* PMP reset requires working host port. * Can't retry if it's frozen. */ if (sata_pmp_attached(ap)) goto out; break; } } if (nr_fails) goto retry; out: if (rc && r_failed_link) *r_failed_link = link; DPRINTK("EXIT, rc=%d\n", rc); return rc; } /** * ata_eh_finish - finish up EH * @ap: host port to finish EH for * * Recovery is complete. Clean up EH states and retry or finish * failed qcs. * * LOCKING: * None. */ void ata_eh_finish(struct ata_port *ap) { int tag; /* retry or finish qcs */ for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); if (!(qc->flags & ATA_QCFLAG_FAILED)) continue; if (qc->err_mask) { /* FIXME: Once EH migration is complete, * generate sense data in this function, * considering both err_mask and tf. */ if (qc->flags & ATA_QCFLAG_RETRY) ata_eh_qc_retry(qc); else ata_eh_qc_complete(qc); } else { if (qc->flags & ATA_QCFLAG_SENSE_VALID) { ata_eh_qc_complete(qc); } else { /* feed zero TF to sense generation */ memset(&qc->result_tf, 0, sizeof(qc->result_tf)); ata_eh_qc_retry(qc); } } } /* make sure nr_active_links is zero after EH */ WARN_ON(ap->nr_active_links); ap->nr_active_links = 0; } /** * ata_do_eh - do standard error handling * @ap: host port to handle error for * * @prereset: prereset method (can be NULL) * @softreset: softreset method (can be NULL) * @hardreset: hardreset method (can be NULL) * @postreset: postreset method (can be NULL) * * Perform standard error handling sequence. * * LOCKING: * Kernel thread context (may sleep). */ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, ata_reset_fn_t softreset, ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) { struct ata_device *dev; int rc; ata_eh_autopsy(ap); ata_eh_report(ap); rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, NULL); if (rc) { ata_for_each_dev(dev, &ap->link, ALL) ata_dev_disable(dev); } ata_eh_finish(ap); } /** * ata_std_error_handler - standard error handler * @ap: host port to handle error for * * Standard error handler * * LOCKING: * Kernel thread context (may sleep). */ void ata_std_error_handler(struct ata_port *ap) { struct ata_port_operations *ops = ap->ops; ata_reset_fn_t hardreset = ops->hardreset; /* ignore built-in hardreset if SCR access is not available */ if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) hardreset = NULL; ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); } #ifdef CONFIG_PM /** * ata_eh_handle_port_suspend - perform port suspend operation * @ap: port to suspend * * Suspend @ap. * * LOCKING: * Kernel thread context (may sleep). */ static void ata_eh_handle_port_suspend(struct ata_port *ap) { unsigned long flags; int rc = 0; /* are we suspending? */ spin_lock_irqsave(ap->lock, flags); if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || ap->pm_mesg.event == PM_EVENT_ON) { spin_unlock_irqrestore(ap->lock, flags); return; } spin_unlock_irqrestore(ap->lock, flags); WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); /* tell ACPI we're suspending */ rc = ata_acpi_on_suspend(ap); if (rc) goto out; /* suspend */ ata_eh_freeze_port(ap); if (ap->ops->port_suspend) rc = ap->ops->port_suspend(ap, ap->pm_mesg); ata_acpi_set_state(ap, PMSG_SUSPEND); out: /* report result */ spin_lock_irqsave(ap->lock, flags); ap->pflags &= ~ATA_PFLAG_PM_PENDING; if (rc == 0) ap->pflags |= ATA_PFLAG_SUSPENDED; else if (ap->pflags & ATA_PFLAG_FROZEN) ata_port_schedule_eh(ap); if (ap->pm_result) { *ap->pm_result = rc; ap->pm_result = NULL; } spin_unlock_irqrestore(ap->lock, flags); return; } /** * ata_eh_handle_port_resume - perform port resume operation * @ap: port to resume * * Resume @ap. * * LOCKING: * Kernel thread context (may sleep). */ static void ata_eh_handle_port_resume(struct ata_port *ap) { struct ata_link *link; struct ata_device *dev; unsigned long flags; int rc = 0; /* are we resuming? */ spin_lock_irqsave(ap->lock, flags); if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || ap->pm_mesg.event != PM_EVENT_ON) { spin_unlock_irqrestore(ap->lock, flags); return; } spin_unlock_irqrestore(ap->lock, flags); WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); /* * Error timestamps are in jiffies which doesn't run while * suspended and PHY events during resume isn't too uncommon. * When the two are combined, it can lead to unnecessary speed * downs if the machine is suspended and resumed repeatedly. * Clear error history. */ ata_for_each_link(link, ap, HOST_FIRST) ata_for_each_dev(dev, link, ALL) ata_ering_clear(&dev->ering); ata_acpi_set_state(ap, PMSG_ON); if (ap->ops->port_resume) rc = ap->ops->port_resume(ap); /* tell ACPI that we're resuming */ ata_acpi_on_resume(ap); /* report result */ spin_lock_irqsave(ap->lock, flags); ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); if (ap->pm_result) { *ap->pm_result = rc; ap->pm_result = NULL; } spin_unlock_irqrestore(ap->lock, flags); } #endif /* CONFIG_PM */
gpl-2.0
FreonRoms/freon_kernel_samsung_d2
drivers/usb/gadget/tcm_usb_gadget.c
3494
2970
/* Target based USB-Gadget Function * * UAS protocol handling, target callbacks, configfs handling, * BBB (USB Mass Storage Class Bulk-Only (BBB) and Transport protocol handling. * * Author: Sebastian Andrzej Siewior <bigeasy at linutronix dot de> * License: GPLv2 as published by FSF. */ #include <linux/init.h> #include <linux/module.h> #include <linux/usb/composite.h> #include <linux/usb/gadget.h> #include "usbstring.c" #include "epautoconf.c" #include "config.c" #include "composite.c" #include "f_tcm.c" #define UAS_VENDOR_ID 0x0525 /* NetChip */ #define UAS_PRODUCT_ID 0xa4a5 /* Linux-USB File-backed Storage Gadget */ #define USB_G_STR_MANUFACTOR 1 #define USB_G_STR_PRODUCT 2 #define USB_G_STR_SERIAL 3 #define USB_G_STR_CONFIG 4 static struct usb_device_descriptor usbg_device_desc = { .bLength = sizeof(usbg_device_desc), .bDescriptorType = USB_DT_DEVICE, .bcdUSB = cpu_to_le16(0x0200), .bDeviceClass = USB_CLASS_PER_INTERFACE, .idVendor = cpu_to_le16(UAS_VENDOR_ID), .idProduct = cpu_to_le16(UAS_PRODUCT_ID), .iManufacturer = USB_G_STR_MANUFACTOR, .iProduct = USB_G_STR_PRODUCT, .iSerialNumber = USB_G_STR_SERIAL, .bNumConfigurations = 1, }; static struct usb_string usbg_us_strings[] = { { USB_G_STR_MANUFACTOR, "Target Manufactor"}, { USB_G_STR_PRODUCT, "Target Product"}, { USB_G_STR_SERIAL, "000000000001"}, { USB_G_STR_CONFIG, "default config"}, { }, }; static struct usb_gadget_strings usbg_stringtab = { .language = 0x0409, .strings = usbg_us_strings, }; static struct usb_gadget_strings *usbg_strings[] = { &usbg_stringtab, NULL, }; static struct usb_configuration usbg_config_driver = { .label = "Linux Target", .bConfigurationValue = 1, .iConfiguration = USB_G_STR_CONFIG, .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; static int usbg_cfg_bind(struct usb_configuration *c) { return tcm_bind_config(c); } static int usb_target_bind(struct usb_composite_dev *cdev) { int ret; ret = usb_add_config(cdev, &usbg_config_driver, usbg_cfg_bind); return ret; } static int guas_unbind(struct usb_composite_dev *cdev) { return 0; } static struct usb_composite_driver usbg_driver = { .name = "g_target", .dev = &usbg_device_desc, .strings = usbg_strings, .max_speed = USB_SPEED_SUPER, .unbind = guas_unbind, }; static int usbg_attach_cb(bool connect) { int ret = 0; if (connect) ret = usb_composite_probe(&usbg_driver, usb_target_bind); else usb_composite_unregister(&usbg_driver); return ret; } static int __init usb_target_gadget_init(void) { int ret; ret = f_tcm_init(&usbg_attach_cb); return ret; } module_init(usb_target_gadget_init); static void __exit usb_target_gadget_exit(void) { f_tcm_exit(); } module_exit(usb_target_gadget_exit); MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>"); MODULE_DESCRIPTION("usb-gadget fabric"); MODULE_LICENSE("GPL v2");
gpl-2.0
CyanideL/android_kernel_lge_g3
drivers/tty/serial/sunsab.c
4262
30015
/* sunsab.c: ASYNC Driver for the SIEMENS SAB82532 DUSCC. * * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 2002, 2006 David S. Miller (davem@davemloft.net) * * Rewrote buffer handling to use CIRC(Circular Buffer) macros. * Maxim Krasnyanskiy <maxk@qualcomm.com> * * Fixed to use tty_get_baud_rate, and to allow for arbitrary baud * rates to be programmed into the UART. Also eliminated a lot of * duplicated code in the console setup. * Theodore Ts'o <tytso@mit.edu>, 2001-Oct-12 * * Ported to new 2.5.x UART layer. * David S. Miller <davem@davemloft.net> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/major.h> #include <linux/string.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/circ_buf.h> #include <linux/serial.h> #include <linux/sysrq.h> #include <linux/console.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/of_device.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/prom.h> #include <asm/setup.h> #if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/serial_core.h> #include <linux/sunserialcore.h> #include "sunsab.h" struct uart_sunsab_port { struct uart_port port; /* Generic UART port */ union sab82532_async_regs __iomem *regs; /* Chip registers */ unsigned long irqflags; /* IRQ state flags */ int dsr; /* Current DSR state */ unsigned int cec_timeout; /* Chip poll timeout... */ unsigned int tec_timeout; /* likewise */ unsigned char interrupt_mask0;/* ISR0 masking */ unsigned char interrupt_mask1;/* ISR1 masking */ unsigned char pvr_dtr_bit; /* Which PVR bit is DTR */ unsigned char pvr_dsr_bit; /* Which PVR bit is DSR */ unsigned int gis_shift; int type; /* SAB82532 version */ /* Setting configuration bits while the transmitter is active * can cause garbage characters to get emitted by the chip. * Therefore, we cache such writes here and do the real register * write the next time the transmitter becomes idle. */ unsigned int cached_ebrg; unsigned char cached_mode; unsigned char cached_pvr; unsigned char cached_dafo; }; /* * This assumes you have a 29.4912 MHz clock for your UART. */ #define SAB_BASE_BAUD ( 29491200 / 16 ) static char *sab82532_version[16] = { "V1.0", "V2.0", "V3.2", "V(0x03)", "V(0x04)", "V(0x05)", "V(0x06)", "V(0x07)", "V(0x08)", "V(0x09)", "V(0x0a)", "V(0x0b)", "V(0x0c)", "V(0x0d)", "V(0x0e)", "V(0x0f)" }; #define SAB82532_MAX_TEC_TIMEOUT 200000 /* 1 character time (at 50 baud) */ #define SAB82532_MAX_CEC_TIMEOUT 50000 /* 2.5 TX CLKs (at 50 baud) */ #define SAB82532_RECV_FIFO_SIZE 32 /* Standard async fifo sizes */ #define SAB82532_XMIT_FIFO_SIZE 32 static __inline__ void sunsab_tec_wait(struct uart_sunsab_port *up) { int timeout = up->tec_timeout; while ((readb(&up->regs->r.star) & SAB82532_STAR_TEC) && --timeout) udelay(1); } static __inline__ void sunsab_cec_wait(struct uart_sunsab_port *up) { int timeout = up->cec_timeout; while ((readb(&up->regs->r.star) & SAB82532_STAR_CEC) && --timeout) udelay(1); } static struct tty_struct * receive_chars(struct uart_sunsab_port *up, union sab82532_irq_status *stat) { struct tty_struct *tty = NULL; unsigned char buf[32]; int saw_console_brk = 0; int free_fifo = 0; int count = 0; int i; if (up->port.state != NULL) /* Unopened serial console */ tty = up->port.state->port.tty; /* Read number of BYTES (Character + Status) available. */ if (stat->sreg.isr0 & SAB82532_ISR0_RPF) { count = SAB82532_RECV_FIFO_SIZE; free_fifo++; } if (stat->sreg.isr0 & SAB82532_ISR0_TCD) { count = readb(&up->regs->r.rbcl) & (SAB82532_RECV_FIFO_SIZE - 1); free_fifo++; } /* Issue a FIFO read command in case we where idle. */ if (stat->sreg.isr0 & SAB82532_ISR0_TIME) { sunsab_cec_wait(up); writeb(SAB82532_CMDR_RFRD, &up->regs->w.cmdr); return tty; } if (stat->sreg.isr0 & SAB82532_ISR0_RFO) free_fifo++; /* Read the FIFO. */ for (i = 0; i < count; i++) buf[i] = readb(&up->regs->r.rfifo[i]); /* Issue Receive Message Complete command. */ if (free_fifo) { sunsab_cec_wait(up); writeb(SAB82532_CMDR_RMC, &up->regs->w.cmdr); } /* Count may be zero for BRK, so we check for it here */ if ((stat->sreg.isr1 & SAB82532_ISR1_BRK) && (up->port.line == up->port.cons->index)) saw_console_brk = 1; for (i = 0; i < count; i++) { unsigned char ch = buf[i], flag; if (tty == NULL) { uart_handle_sysrq_char(&up->port, ch); continue; } flag = TTY_NORMAL; up->port.icount.rx++; if (unlikely(stat->sreg.isr0 & (SAB82532_ISR0_PERR | SAB82532_ISR0_FERR | SAB82532_ISR0_RFO)) || unlikely(stat->sreg.isr1 & SAB82532_ISR1_BRK)) { /* * For statistics only */ if (stat->sreg.isr1 & SAB82532_ISR1_BRK) { stat->sreg.isr0 &= ~(SAB82532_ISR0_PERR | SAB82532_ISR0_FERR); up->port.icount.brk++; /* * We do the SysRQ and SAK checking * here because otherwise the break * may get masked by ignore_status_mask * or read_status_mask. */ if (uart_handle_break(&up->port)) continue; } else if (stat->sreg.isr0 & SAB82532_ISR0_PERR) up->port.icount.parity++; else if (stat->sreg.isr0 & SAB82532_ISR0_FERR) up->port.icount.frame++; if (stat->sreg.isr0 & SAB82532_ISR0_RFO) up->port.icount.overrun++; /* * Mask off conditions which should be ingored. */ stat->sreg.isr0 &= (up->port.read_status_mask & 0xff); stat->sreg.isr1 &= ((up->port.read_status_mask >> 8) & 0xff); if (stat->sreg.isr1 & SAB82532_ISR1_BRK) { flag = TTY_BREAK; } else if (stat->sreg.isr0 & SAB82532_ISR0_PERR) flag = TTY_PARITY; else if (stat->sreg.isr0 & SAB82532_ISR0_FERR) flag = TTY_FRAME; } if (uart_handle_sysrq_char(&up->port, ch)) continue; if ((stat->sreg.isr0 & (up->port.ignore_status_mask & 0xff)) == 0 && (stat->sreg.isr1 & ((up->port.ignore_status_mask >> 8) & 0xff)) == 0) tty_insert_flip_char(tty, ch, flag); if (stat->sreg.isr0 & SAB82532_ISR0_RFO) tty_insert_flip_char(tty, 0, TTY_OVERRUN); } if (saw_console_brk) sun_do_break(); return tty; } static void sunsab_stop_tx(struct uart_port *); static void sunsab_tx_idle(struct uart_sunsab_port *); static void transmit_chars(struct uart_sunsab_port *up, union sab82532_irq_status *stat) { struct circ_buf *xmit = &up->port.state->xmit; int i; if (stat->sreg.isr1 & SAB82532_ISR1_ALLS) { up->interrupt_mask1 |= SAB82532_IMR1_ALLS; writeb(up->interrupt_mask1, &up->regs->w.imr1); set_bit(SAB82532_ALLS, &up->irqflags); } #if 0 /* bde@nwlink.com says this check causes problems */ if (!(stat->sreg.isr1 & SAB82532_ISR1_XPR)) return; #endif if (!(readb(&up->regs->r.star) & SAB82532_STAR_XFW)) return; set_bit(SAB82532_XPR, &up->irqflags); sunsab_tx_idle(up); if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { up->interrupt_mask1 |= SAB82532_IMR1_XPR; writeb(up->interrupt_mask1, &up->regs->w.imr1); return; } up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR); writeb(up->interrupt_mask1, &up->regs->w.imr1); clear_bit(SAB82532_ALLS, &up->irqflags); /* Stuff 32 bytes into Transmit FIFO. */ clear_bit(SAB82532_XPR, &up->irqflags); for (i = 0; i < up->port.fifosize; i++) { writeb(xmit->buf[xmit->tail], &up->regs->w.xfifo[i]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); up->port.icount.tx++; if (uart_circ_empty(xmit)) break; } /* Issue a Transmit Frame command. */ sunsab_cec_wait(up); writeb(SAB82532_CMDR_XF, &up->regs->w.cmdr); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&up->port); if (uart_circ_empty(xmit)) sunsab_stop_tx(&up->port); } static void check_status(struct uart_sunsab_port *up, union sab82532_irq_status *stat) { if (stat->sreg.isr0 & SAB82532_ISR0_CDSC) uart_handle_dcd_change(&up->port, !(readb(&up->regs->r.vstr) & SAB82532_VSTR_CD)); if (stat->sreg.isr1 & SAB82532_ISR1_CSC) uart_handle_cts_change(&up->port, (readb(&up->regs->r.star) & SAB82532_STAR_CTS)); if ((readb(&up->regs->r.pvr) & up->pvr_dsr_bit) ^ up->dsr) { up->dsr = (readb(&up->regs->r.pvr) & up->pvr_dsr_bit) ? 0 : 1; up->port.icount.dsr++; } wake_up_interruptible(&up->port.state->port.delta_msr_wait); } static irqreturn_t sunsab_interrupt(int irq, void *dev_id) { struct uart_sunsab_port *up = dev_id; struct tty_struct *tty; union sab82532_irq_status status; unsigned long flags; unsigned char gis; spin_lock_irqsave(&up->port.lock, flags); status.stat = 0; gis = readb(&up->regs->r.gis) >> up->gis_shift; if (gis & 1) status.sreg.isr0 = readb(&up->regs->r.isr0); if (gis & 2) status.sreg.isr1 = readb(&up->regs->r.isr1); tty = NULL; if (status.stat) { if ((status.sreg.isr0 & (SAB82532_ISR0_TCD | SAB82532_ISR0_TIME | SAB82532_ISR0_RFO | SAB82532_ISR0_RPF)) || (status.sreg.isr1 & SAB82532_ISR1_BRK)) tty = receive_chars(up, &status); if ((status.sreg.isr0 & SAB82532_ISR0_CDSC) || (status.sreg.isr1 & SAB82532_ISR1_CSC)) check_status(up, &status); if (status.sreg.isr1 & (SAB82532_ISR1_ALLS | SAB82532_ISR1_XPR)) transmit_chars(up, &status); } spin_unlock_irqrestore(&up->port.lock, flags); if (tty) tty_flip_buffer_push(tty); return IRQ_HANDLED; } /* port->lock is not held. */ static unsigned int sunsab_tx_empty(struct uart_port *port) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; int ret; /* Do not need a lock for a state test like this. */ if (test_bit(SAB82532_ALLS, &up->irqflags)) ret = TIOCSER_TEMT; else ret = 0; return ret; } /* port->lock held by caller. */ static void sunsab_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; if (mctrl & TIOCM_RTS) { up->cached_mode &= ~SAB82532_MODE_FRTS; up->cached_mode |= SAB82532_MODE_RTS; } else { up->cached_mode |= (SAB82532_MODE_FRTS | SAB82532_MODE_RTS); } if (mctrl & TIOCM_DTR) { up->cached_pvr &= ~(up->pvr_dtr_bit); } else { up->cached_pvr |= up->pvr_dtr_bit; } set_bit(SAB82532_REGS_PENDING, &up->irqflags); if (test_bit(SAB82532_XPR, &up->irqflags)) sunsab_tx_idle(up); } /* port->lock is held by caller and interrupts are disabled. */ static unsigned int sunsab_get_mctrl(struct uart_port *port) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; unsigned char val; unsigned int result; result = 0; val = readb(&up->regs->r.pvr); result |= (val & up->pvr_dsr_bit) ? 0 : TIOCM_DSR; val = readb(&up->regs->r.vstr); result |= (val & SAB82532_VSTR_CD) ? 0 : TIOCM_CAR; val = readb(&up->regs->r.star); result |= (val & SAB82532_STAR_CTS) ? TIOCM_CTS : 0; return result; } /* port->lock held by caller. */ static void sunsab_stop_tx(struct uart_port *port) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; up->interrupt_mask1 |= SAB82532_IMR1_XPR; writeb(up->interrupt_mask1, &up->regs->w.imr1); } /* port->lock held by caller. */ static void sunsab_tx_idle(struct uart_sunsab_port *up) { if (test_bit(SAB82532_REGS_PENDING, &up->irqflags)) { u8 tmp; clear_bit(SAB82532_REGS_PENDING, &up->irqflags); writeb(up->cached_mode, &up->regs->rw.mode); writeb(up->cached_pvr, &up->regs->rw.pvr); writeb(up->cached_dafo, &up->regs->w.dafo); writeb(up->cached_ebrg & 0xff, &up->regs->w.bgr); tmp = readb(&up->regs->rw.ccr2); tmp &= ~0xc0; tmp |= (up->cached_ebrg >> 2) & 0xc0; writeb(tmp, &up->regs->rw.ccr2); } } /* port->lock held by caller. */ static void sunsab_start_tx(struct uart_port *port) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; struct circ_buf *xmit = &up->port.state->xmit; int i; up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR); writeb(up->interrupt_mask1, &up->regs->w.imr1); if (!test_bit(SAB82532_XPR, &up->irqflags)) return; clear_bit(SAB82532_ALLS, &up->irqflags); clear_bit(SAB82532_XPR, &up->irqflags); for (i = 0; i < up->port.fifosize; i++) { writeb(xmit->buf[xmit->tail], &up->regs->w.xfifo[i]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); up->port.icount.tx++; if (uart_circ_empty(xmit)) break; } /* Issue a Transmit Frame command. */ sunsab_cec_wait(up); writeb(SAB82532_CMDR_XF, &up->regs->w.cmdr); } /* port->lock is not held. */ static void sunsab_send_xchar(struct uart_port *port, char ch) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; unsigned long flags; spin_lock_irqsave(&up->port.lock, flags); sunsab_tec_wait(up); writeb(ch, &up->regs->w.tic); spin_unlock_irqrestore(&up->port.lock, flags); } /* port->lock held by caller. */ static void sunsab_stop_rx(struct uart_port *port) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; up->interrupt_mask0 |= SAB82532_IMR0_TCD; writeb(up->interrupt_mask1, &up->regs->w.imr0); } /* port->lock held by caller. */ static void sunsab_enable_ms(struct uart_port *port) { /* For now we always receive these interrupts. */ } /* port->lock is not held. */ static void sunsab_break_ctl(struct uart_port *port, int break_state) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; unsigned long flags; unsigned char val; spin_lock_irqsave(&up->port.lock, flags); val = up->cached_dafo; if (break_state) val |= SAB82532_DAFO_XBRK; else val &= ~SAB82532_DAFO_XBRK; up->cached_dafo = val; set_bit(SAB82532_REGS_PENDING, &up->irqflags); if (test_bit(SAB82532_XPR, &up->irqflags)) sunsab_tx_idle(up); spin_unlock_irqrestore(&up->port.lock, flags); } /* port->lock is not held. */ static int sunsab_startup(struct uart_port *port) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; unsigned long flags; unsigned char tmp; int err = request_irq(up->port.irq, sunsab_interrupt, IRQF_SHARED, "sab", up); if (err) return err; spin_lock_irqsave(&up->port.lock, flags); /* * Wait for any commands or immediate characters */ sunsab_cec_wait(up); sunsab_tec_wait(up); /* * Clear the FIFO buffers. */ writeb(SAB82532_CMDR_RRES, &up->regs->w.cmdr); sunsab_cec_wait(up); writeb(SAB82532_CMDR_XRES, &up->regs->w.cmdr); /* * Clear the interrupt registers. */ (void) readb(&up->regs->r.isr0); (void) readb(&up->regs->r.isr1); /* * Now, initialize the UART */ writeb(0, &up->regs->w.ccr0); /* power-down */ writeb(SAB82532_CCR0_MCE | SAB82532_CCR0_SC_NRZ | SAB82532_CCR0_SM_ASYNC, &up->regs->w.ccr0); writeb(SAB82532_CCR1_ODS | SAB82532_CCR1_BCR | 7, &up->regs->w.ccr1); writeb(SAB82532_CCR2_BDF | SAB82532_CCR2_SSEL | SAB82532_CCR2_TOE, &up->regs->w.ccr2); writeb(0, &up->regs->w.ccr3); writeb(SAB82532_CCR4_MCK4 | SAB82532_CCR4_EBRG, &up->regs->w.ccr4); up->cached_mode = (SAB82532_MODE_RTS | SAB82532_MODE_FCTS | SAB82532_MODE_RAC); writeb(up->cached_mode, &up->regs->w.mode); writeb(SAB82532_RFC_DPS|SAB82532_RFC_RFTH_32, &up->regs->w.rfc); tmp = readb(&up->regs->rw.ccr0); tmp |= SAB82532_CCR0_PU; /* power-up */ writeb(tmp, &up->regs->rw.ccr0); /* * Finally, enable interrupts */ up->interrupt_mask0 = (SAB82532_IMR0_PERR | SAB82532_IMR0_FERR | SAB82532_IMR0_PLLA); writeb(up->interrupt_mask0, &up->regs->w.imr0); up->interrupt_mask1 = (SAB82532_IMR1_BRKT | SAB82532_IMR1_ALLS | SAB82532_IMR1_XOFF | SAB82532_IMR1_TIN | SAB82532_IMR1_CSC | SAB82532_IMR1_XON | SAB82532_IMR1_XPR); writeb(up->interrupt_mask1, &up->regs->w.imr1); set_bit(SAB82532_ALLS, &up->irqflags); set_bit(SAB82532_XPR, &up->irqflags); spin_unlock_irqrestore(&up->port.lock, flags); return 0; } /* port->lock is not held. */ static void sunsab_shutdown(struct uart_port *port) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; unsigned long flags; spin_lock_irqsave(&up->port.lock, flags); /* Disable Interrupts */ up->interrupt_mask0 = 0xff; writeb(up->interrupt_mask0, &up->regs->w.imr0); up->interrupt_mask1 = 0xff; writeb(up->interrupt_mask1, &up->regs->w.imr1); /* Disable break condition */ up->cached_dafo = readb(&up->regs->rw.dafo); up->cached_dafo &= ~SAB82532_DAFO_XBRK; writeb(up->cached_dafo, &up->regs->rw.dafo); /* Disable Receiver */ up->cached_mode &= ~SAB82532_MODE_RAC; writeb(up->cached_mode, &up->regs->rw.mode); /* * XXX FIXME * * If the chip is powered down here the system hangs/crashes during * reboot or shutdown. This needs to be investigated further, * similar behaviour occurs in 2.4 when the driver is configured * as a module only. One hint may be that data is sometimes * transmitted at 9600 baud during shutdown (regardless of the * speed the chip was configured for when the port was open). */ #if 0 /* Power Down */ tmp = readb(&up->regs->rw.ccr0); tmp &= ~SAB82532_CCR0_PU; writeb(tmp, &up->regs->rw.ccr0); #endif spin_unlock_irqrestore(&up->port.lock, flags); free_irq(up->port.irq, up); } /* * This is used to figure out the divisor speeds. * * The formula is: Baud = SAB_BASE_BAUD / ((N + 1) * (1 << M)), * * with 0 <= N < 64 and 0 <= M < 16 */ static void calc_ebrg(int baud, int *n_ret, int *m_ret) { int n, m; if (baud == 0) { *n_ret = 0; *m_ret = 0; return; } /* * We scale numbers by 10 so that we get better accuracy * without having to use floating point. Here we increment m * until n is within the valid range. */ n = (SAB_BASE_BAUD * 10) / baud; m = 0; while (n >= 640) { n = n / 2; m++; } n = (n+5) / 10; /* * We try very hard to avoid speeds with M == 0 since they may * not work correctly for XTAL frequences above 10 MHz. */ if ((m == 0) && ((n & 1) == 0)) { n = n / 2; m++; } *n_ret = n - 1; *m_ret = m; } /* Internal routine, port->lock is held and local interrupts are disabled. */ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cflag, unsigned int iflag, unsigned int baud, unsigned int quot) { unsigned char dafo; int bits, n, m; /* Byte size and parity */ switch (cflag & CSIZE) { case CS5: dafo = SAB82532_DAFO_CHL5; bits = 7; break; case CS6: dafo = SAB82532_DAFO_CHL6; bits = 8; break; case CS7: dafo = SAB82532_DAFO_CHL7; bits = 9; break; case CS8: dafo = SAB82532_DAFO_CHL8; bits = 10; break; /* Never happens, but GCC is too dumb to figure it out */ default: dafo = SAB82532_DAFO_CHL5; bits = 7; break; } if (cflag & CSTOPB) { dafo |= SAB82532_DAFO_STOP; bits++; } if (cflag & PARENB) { dafo |= SAB82532_DAFO_PARE; bits++; } if (cflag & PARODD) { dafo |= SAB82532_DAFO_PAR_ODD; } else { dafo |= SAB82532_DAFO_PAR_EVEN; } up->cached_dafo = dafo; calc_ebrg(baud, &n, &m); up->cached_ebrg = n | (m << 6); up->tec_timeout = (10 * 1000000) / baud; up->cec_timeout = up->tec_timeout >> 2; /* CTS flow control flags */ /* We encode read_status_mask and ignore_status_mask like so: * * --------------------- * | ... | ISR1 | ISR0 | * --------------------- * .. 15 8 7 0 */ up->port.read_status_mask = (SAB82532_ISR0_TCD | SAB82532_ISR0_TIME | SAB82532_ISR0_RFO | SAB82532_ISR0_RPF | SAB82532_ISR0_CDSC); up->port.read_status_mask |= (SAB82532_ISR1_CSC | SAB82532_ISR1_ALLS | SAB82532_ISR1_XPR) << 8; if (iflag & INPCK) up->port.read_status_mask |= (SAB82532_ISR0_PERR | SAB82532_ISR0_FERR); if (iflag & (BRKINT | PARMRK)) up->port.read_status_mask |= (SAB82532_ISR1_BRK << 8); /* * Characteres to ignore */ up->port.ignore_status_mask = 0; if (iflag & IGNPAR) up->port.ignore_status_mask |= (SAB82532_ISR0_PERR | SAB82532_ISR0_FERR); if (iflag & IGNBRK) { up->port.ignore_status_mask |= (SAB82532_ISR1_BRK << 8); /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (iflag & IGNPAR) up->port.ignore_status_mask |= SAB82532_ISR0_RFO; } /* * ignore all characters if CREAD is not set */ if ((cflag & CREAD) == 0) up->port.ignore_status_mask |= (SAB82532_ISR0_RPF | SAB82532_ISR0_TCD); uart_update_timeout(&up->port, cflag, (up->port.uartclk / (16 * quot))); /* Now schedule a register update when the chip's * transmitter is idle. */ up->cached_mode |= SAB82532_MODE_RAC; set_bit(SAB82532_REGS_PENDING, &up->irqflags); if (test_bit(SAB82532_XPR, &up->irqflags)) sunsab_tx_idle(up); } /* port->lock is not held. */ static void sunsab_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; unsigned long flags; unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000); unsigned int quot = uart_get_divisor(port, baud); spin_lock_irqsave(&up->port.lock, flags); sunsab_convert_to_sab(up, termios->c_cflag, termios->c_iflag, baud, quot); spin_unlock_irqrestore(&up->port.lock, flags); } static const char *sunsab_type(struct uart_port *port) { struct uart_sunsab_port *up = (void *)port; static char buf[36]; sprintf(buf, "SAB82532 %s", sab82532_version[up->type]); return buf; } static void sunsab_release_port(struct uart_port *port) { } static int sunsab_request_port(struct uart_port *port) { return 0; } static void sunsab_config_port(struct uart_port *port, int flags) { } static int sunsab_verify_port(struct uart_port *port, struct serial_struct *ser) { return -EINVAL; } static struct uart_ops sunsab_pops = { .tx_empty = sunsab_tx_empty, .set_mctrl = sunsab_set_mctrl, .get_mctrl = sunsab_get_mctrl, .stop_tx = sunsab_stop_tx, .start_tx = sunsab_start_tx, .send_xchar = sunsab_send_xchar, .stop_rx = sunsab_stop_rx, .enable_ms = sunsab_enable_ms, .break_ctl = sunsab_break_ctl, .startup = sunsab_startup, .shutdown = sunsab_shutdown, .set_termios = sunsab_set_termios, .type = sunsab_type, .release_port = sunsab_release_port, .request_port = sunsab_request_port, .config_port = sunsab_config_port, .verify_port = sunsab_verify_port, }; static struct uart_driver sunsab_reg = { .owner = THIS_MODULE, .driver_name = "sunsab", .dev_name = "ttyS", .major = TTY_MAJOR, }; static struct uart_sunsab_port *sunsab_ports; #ifdef CONFIG_SERIAL_SUNSAB_CONSOLE static void sunsab_console_putchar(struct uart_port *port, int c) { struct uart_sunsab_port *up = (struct uart_sunsab_port *)port; sunsab_tec_wait(up); writeb(c, &up->regs->w.tic); } static void sunsab_console_write(struct console *con, const char *s, unsigned n) { struct uart_sunsab_port *up = &sunsab_ports[con->index]; unsigned long flags; int locked = 1; local_irq_save(flags); if (up->port.sysrq) { locked = 0; } else if (oops_in_progress) { locked = spin_trylock(&up->port.lock); } else spin_lock(&up->port.lock); uart_console_write(&up->port, s, n, sunsab_console_putchar); sunsab_tec_wait(up); if (locked) spin_unlock(&up->port.lock); local_irq_restore(flags); } static int sunsab_console_setup(struct console *con, char *options) { struct uart_sunsab_port *up = &sunsab_ports[con->index]; unsigned long flags; unsigned int baud, quot; /* * The console framework calls us for each and every port * registered. Defer the console setup until the requested * port has been properly discovered. A bit of a hack, * though... */ if (up->port.type != PORT_SUNSAB) return -1; printk("Console: ttyS%d (SAB82532)\n", (sunsab_reg.minor - 64) + con->index); sunserial_console_termios(con, up->port.dev->of_node); switch (con->cflag & CBAUD) { case B150: baud = 150; break; case B300: baud = 300; break; case B600: baud = 600; break; case B1200: baud = 1200; break; case B2400: baud = 2400; break; case B4800: baud = 4800; break; default: case B9600: baud = 9600; break; case B19200: baud = 19200; break; case B38400: baud = 38400; break; case B57600: baud = 57600; break; case B115200: baud = 115200; break; case B230400: baud = 230400; break; case B460800: baud = 460800; break; }; /* * Temporary fix. */ spin_lock_init(&up->port.lock); /* * Initialize the hardware */ sunsab_startup(&up->port); spin_lock_irqsave(&up->port.lock, flags); /* * Finally, enable interrupts */ up->interrupt_mask0 = SAB82532_IMR0_PERR | SAB82532_IMR0_FERR | SAB82532_IMR0_PLLA | SAB82532_IMR0_CDSC; writeb(up->interrupt_mask0, &up->regs->w.imr0); up->interrupt_mask1 = SAB82532_IMR1_BRKT | SAB82532_IMR1_ALLS | SAB82532_IMR1_XOFF | SAB82532_IMR1_TIN | SAB82532_IMR1_CSC | SAB82532_IMR1_XON | SAB82532_IMR1_XPR; writeb(up->interrupt_mask1, &up->regs->w.imr1); quot = uart_get_divisor(&up->port, baud); sunsab_convert_to_sab(up, con->cflag, 0, baud, quot); sunsab_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS); spin_unlock_irqrestore(&up->port.lock, flags); return 0; } static struct console sunsab_console = { .name = "ttyS", .write = sunsab_console_write, .device = uart_console_device, .setup = sunsab_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &sunsab_reg, }; static inline struct console *SUNSAB_CONSOLE(void) { return &sunsab_console; } #else #define SUNSAB_CONSOLE() (NULL) #define sunsab_console_init() do { } while (0) #endif static int __devinit sunsab_init_one(struct uart_sunsab_port *up, struct platform_device *op, unsigned long offset, int line) { up->port.line = line; up->port.dev = &op->dev; up->port.mapbase = op->resource[0].start + offset; up->port.membase = of_ioremap(&op->resource[0], offset, sizeof(union sab82532_async_regs), "sab"); if (!up->port.membase) return -ENOMEM; up->regs = (union sab82532_async_regs __iomem *) up->port.membase; up->port.irq = op->archdata.irqs[0]; up->port.fifosize = SAB82532_XMIT_FIFO_SIZE; up->port.iotype = UPIO_MEM; writeb(SAB82532_IPC_IC_ACT_LOW, &up->regs->w.ipc); up->port.ops = &sunsab_pops; up->port.type = PORT_SUNSAB; up->port.uartclk = SAB_BASE_BAUD; up->type = readb(&up->regs->r.vstr) & 0x0f; writeb(~((1 << 1) | (1 << 2) | (1 << 4)), &up->regs->w.pcr); writeb(0xff, &up->regs->w.pim); if ((up->port.line & 0x1) == 0) { up->pvr_dsr_bit = (1 << 0); up->pvr_dtr_bit = (1 << 1); up->gis_shift = 2; } else { up->pvr_dsr_bit = (1 << 3); up->pvr_dtr_bit = (1 << 2); up->gis_shift = 0; } up->cached_pvr = (1 << 1) | (1 << 2) | (1 << 4); writeb(up->cached_pvr, &up->regs->w.pvr); up->cached_mode = readb(&up->regs->rw.mode); up->cached_mode |= SAB82532_MODE_FRTS; writeb(up->cached_mode, &up->regs->rw.mode); up->cached_mode |= SAB82532_MODE_RTS; writeb(up->cached_mode, &up->regs->rw.mode); up->tec_timeout = SAB82532_MAX_TEC_TIMEOUT; up->cec_timeout = SAB82532_MAX_CEC_TIMEOUT; return 0; } static int __devinit sab_probe(struct platform_device *op) { static int inst; struct uart_sunsab_port *up; int err; up = &sunsab_ports[inst * 2]; err = sunsab_init_one(&up[0], op, 0, (inst * 2) + 0); if (err) goto out; err = sunsab_init_one(&up[1], op, sizeof(union sab82532_async_regs), (inst * 2) + 1); if (err) goto out1; sunserial_console_match(SUNSAB_CONSOLE(), op->dev.of_node, &sunsab_reg, up[0].port.line, false); sunserial_console_match(SUNSAB_CONSOLE(), op->dev.of_node, &sunsab_reg, up[1].port.line, false); err = uart_add_one_port(&sunsab_reg, &up[0].port); if (err) goto out2; err = uart_add_one_port(&sunsab_reg, &up[1].port); if (err) goto out3; dev_set_drvdata(&op->dev, &up[0]); inst++; return 0; out3: uart_remove_one_port(&sunsab_reg, &up[0].port); out2: of_iounmap(&op->resource[0], up[1].port.membase, sizeof(union sab82532_async_regs)); out1: of_iounmap(&op->resource[0], up[0].port.membase, sizeof(union sab82532_async_regs)); out: return err; } static int __devexit sab_remove(struct platform_device *op) { struct uart_sunsab_port *up = dev_get_drvdata(&op->dev); uart_remove_one_port(&sunsab_reg, &up[1].port); uart_remove_one_port(&sunsab_reg, &up[0].port); of_iounmap(&op->resource[0], up[1].port.membase, sizeof(union sab82532_async_regs)); of_iounmap(&op->resource[0], up[0].port.membase, sizeof(union sab82532_async_regs)); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id sab_match[] = { { .name = "se", }, { .name = "serial", .compatible = "sab82532", }, {}, }; MODULE_DEVICE_TABLE(of, sab_match); static struct platform_driver sab_driver = { .driver = { .name = "sab", .owner = THIS_MODULE, .of_match_table = sab_match, }, .probe = sab_probe, .remove = __devexit_p(sab_remove), }; static int __init sunsab_init(void) { struct device_node *dp; int err; int num_channels = 0; for_each_node_by_name(dp, "se") num_channels += 2; for_each_node_by_name(dp, "serial") { if (of_device_is_compatible(dp, "sab82532")) num_channels += 2; } if (num_channels) { sunsab_ports = kzalloc(sizeof(struct uart_sunsab_port) * num_channels, GFP_KERNEL); if (!sunsab_ports) return -ENOMEM; err = sunserial_register_minors(&sunsab_reg, num_channels); if (err) { kfree(sunsab_ports); sunsab_ports = NULL; return err; } } return platform_driver_register(&sab_driver); } static void __exit sunsab_exit(void) { platform_driver_unregister(&sab_driver); if (sunsab_reg.nr) { sunserial_unregister_minors(&sunsab_reg, sunsab_reg.nr); } kfree(sunsab_ports); sunsab_ports = NULL; } module_init(sunsab_init); module_exit(sunsab_exit); MODULE_AUTHOR("Eddie C. Dost and David S. Miller"); MODULE_DESCRIPTION("Sun SAB82532 serial port driver"); MODULE_LICENSE("GPL");
gpl-2.0
SiddheshK15/android_kernel_cyanogen_msm8916
drivers/ide/sis5513.c
4518
18494
/* * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz> * Copyright (C) 2007-2009 Bartlomiej Zolnierkiewicz * * May be copied or modified under the terms of the GNU General Public License * * * Thanks : * * SiS Taiwan : for direct support and hardware. * Daniela Engert : for initial ATA100 advices and numerous others. * John Fremlin, Manfred Spraul, Dave Morgan, Peter Kjellerstedt : * for checking code correctness, providing patches. * * * Original tests and design on the SiS620 chipset. * ATA100 tests and design on the SiS735 chipset. * ATA16/33 support from specs * ATA133 support for SiS961/962 by L.C. Chang <lcchang@sis.com.tw> * ATA133 961/962/963 fixes by Vojtech Pavlik <vojtech@suse.cz> * * Documentation: * SiS chipset documentation available under NDA to companies only * (not to individuals). */ /* * The original SiS5513 comes from a SiS5511/55112/5513 chipset. The original * SiS5513 was also used in the SiS5596/5513 chipset. Thus if we see a SiS5511 * or SiS5596, we can assume we see the first MWDMA-16 capable SiS5513 chip. * * Later SiS chipsets integrated the 5513 functionality into the NorthBridge, * starting with SiS5571 and up to SiS745. The PCI ID didn't change, though. We * can figure out that we have a more modern and more capable 5513 by looking * for the respective NorthBridge IDs. * * Even later (96x family) SiS chipsets use the MuTIOL link and place the 5513 * into the SouthBrige. Here we cannot rely on looking up the NorthBridge PCI * ID, while the now ATA-133 capable 5513 still has the same PCI ID. * Fortunately the 5513 can be 'unmasked' by fiddling with some config space * bits, changing its device id to the true one - 5517 for 961 and 5518 for * 962/963. */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ide.h> #define DRV_NAME "sis5513" /* registers layout and init values are chipset family dependent */ #define ATA_16 0x01 #define ATA_33 0x02 #define ATA_66 0x03 #define ATA_100a 0x04 /* SiS730/SiS550 is ATA100 with ATA66 layout */ #define ATA_100 0x05 #define ATA_133a 0x06 /* SiS961b with 133 support */ #define ATA_133 0x07 /* SiS962/963 */ static u8 chipset_family; /* * Devices supported */ static const struct { const char *name; u16 host_id; u8 chipset_family; u8 flags; } SiSHostChipInfo[] = { { "SiS968", PCI_DEVICE_ID_SI_968, ATA_133 }, { "SiS966", PCI_DEVICE_ID_SI_966, ATA_133 }, { "SiS965", PCI_DEVICE_ID_SI_965, ATA_133 }, { "SiS745", PCI_DEVICE_ID_SI_745, ATA_100 }, { "SiS735", PCI_DEVICE_ID_SI_735, ATA_100 }, { "SiS733", PCI_DEVICE_ID_SI_733, ATA_100 }, { "SiS635", PCI_DEVICE_ID_SI_635, ATA_100 }, { "SiS633", PCI_DEVICE_ID_SI_633, ATA_100 }, { "SiS730", PCI_DEVICE_ID_SI_730, ATA_100a }, { "SiS550", PCI_DEVICE_ID_SI_550, ATA_100a }, { "SiS640", PCI_DEVICE_ID_SI_640, ATA_66 }, { "SiS630", PCI_DEVICE_ID_SI_630, ATA_66 }, { "SiS620", PCI_DEVICE_ID_SI_620, ATA_66 }, { "SiS540", PCI_DEVICE_ID_SI_540, ATA_66 }, { "SiS530", PCI_DEVICE_ID_SI_530, ATA_66 }, { "SiS5600", PCI_DEVICE_ID_SI_5600, ATA_33 }, { "SiS5598", PCI_DEVICE_ID_SI_5598, ATA_33 }, { "SiS5597", PCI_DEVICE_ID_SI_5597, ATA_33 }, { "SiS5591/2", PCI_DEVICE_ID_SI_5591, ATA_33 }, { "SiS5582", PCI_DEVICE_ID_SI_5582, ATA_33 }, { "SiS5581", PCI_DEVICE_ID_SI_5581, ATA_33 }, { "SiS5596", PCI_DEVICE_ID_SI_5596, ATA_16 }, { "SiS5571", PCI_DEVICE_ID_SI_5571, ATA_16 }, { "SiS5517", PCI_DEVICE_ID_SI_5517, ATA_16 }, { "SiS551x", PCI_DEVICE_ID_SI_5511, ATA_16 }, }; /* Cycle time bits and values vary across chip dma capabilities These three arrays hold the register layout and the values to set. Indexed by chipset_family and (dma_mode - XFER_UDMA_0) */ /* {0, ATA_16, ATA_33, ATA_66, ATA_100a, ATA_100, ATA_133} */ static u8 cycle_time_offset[] = { 0, 0, 5, 4, 4, 0, 0 }; static u8 cycle_time_range[] = { 0, 0, 2, 3, 3, 4, 4 }; static u8 cycle_time_value[][XFER_UDMA_6 - XFER_UDMA_0 + 1] = { { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */ { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */ { 3, 2, 1, 0, 0, 0, 0 }, /* ATA_33 */ { 7, 5, 3, 2, 1, 0, 0 }, /* ATA_66 */ { 7, 5, 3, 2, 1, 0, 0 }, /* ATA_100a (730 specific), different cycle_time range and offset */ { 11, 7, 5, 4, 2, 1, 0 }, /* ATA_100 */ { 15, 10, 7, 5, 3, 2, 1 }, /* ATA_133a (earliest 691 southbridges) */ { 15, 10, 7, 5, 3, 2, 1 }, /* ATA_133 */ }; /* CRC Valid Setup Time vary across IDE clock setting 33/66/100/133 See SiS962 data sheet for more detail */ static u8 cvs_time_value[][XFER_UDMA_6 - XFER_UDMA_0 + 1] = { { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */ { 0, 0, 0, 0, 0, 0, 0 }, /* no UDMA */ { 2, 1, 1, 0, 0, 0, 0 }, { 4, 3, 2, 1, 0, 0, 0 }, { 4, 3, 2, 1, 0, 0, 0 }, { 6, 4, 3, 1, 1, 1, 0 }, { 9, 6, 4, 2, 2, 2, 2 }, { 9, 6, 4, 2, 2, 2, 2 }, }; /* Initialize time, Active time, Recovery time vary across IDE clock settings. These 3 arrays hold the register value for PIO0/1/2/3/4 and DMA0/1/2 mode in order */ static u8 ini_time_value[][8] = { { 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0 }, { 2, 1, 0, 0, 0, 1, 0, 0 }, { 4, 3, 1, 1, 1, 3, 1, 1 }, { 4, 3, 1, 1, 1, 3, 1, 1 }, { 6, 4, 2, 2, 2, 4, 2, 2 }, { 9, 6, 3, 3, 3, 6, 3, 3 }, { 9, 6, 3, 3, 3, 6, 3, 3 }, }; static u8 act_time_value[][8] = { { 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0 }, { 9, 9, 9, 2, 2, 7, 2, 2 }, { 19, 19, 19, 5, 4, 14, 5, 4 }, { 19, 19, 19, 5, 4, 14, 5, 4 }, { 28, 28, 28, 7, 6, 21, 7, 6 }, { 38, 38, 38, 10, 9, 28, 10, 9 }, { 38, 38, 38, 10, 9, 28, 10, 9 }, }; static u8 rco_time_value[][8] = { { 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0 }, { 9, 2, 0, 2, 0, 7, 1, 1 }, { 19, 5, 1, 5, 2, 16, 3, 2 }, { 19, 5, 1, 5, 2, 16, 3, 2 }, { 30, 9, 3, 9, 4, 25, 6, 4 }, { 40, 12, 4, 12, 5, 34, 12, 5 }, { 40, 12, 4, 12, 5, 34, 12, 5 }, }; /* * Printing configuration */ /* Used for chipset type printing at boot time */ static char *chipset_capability[] = { "ATA", "ATA 16", "ATA 33", "ATA 66", "ATA 100 (1st gen)", "ATA 100 (2nd gen)", "ATA 133 (1st gen)", "ATA 133 (2nd gen)" }; /* * Configuration functions */ static u8 sis_ata133_get_base(ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); u32 reg54 = 0; pci_read_config_dword(dev, 0x54, &reg54); return ((reg54 & 0x40000000) ? 0x70 : 0x40) + drive->dn * 4; } static void sis_ata16_program_timings(ide_drive_t *drive, const u8 mode) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); u16 t1 = 0; u8 drive_pci = 0x40 + drive->dn * 2; const u16 pio_timings[] = { 0x000, 0x607, 0x404, 0x303, 0x301 }; const u16 mwdma_timings[] = { 0x008, 0x302, 0x301 }; pci_read_config_word(dev, drive_pci, &t1); /* clear active/recovery timings */ t1 &= ~0x070f; if (mode >= XFER_MW_DMA_0) { if (chipset_family > ATA_16) t1 &= ~0x8000; /* disable UDMA */ t1 |= mwdma_timings[mode - XFER_MW_DMA_0]; } else t1 |= pio_timings[mode - XFER_PIO_0]; pci_write_config_word(dev, drive_pci, t1); } static void sis_ata100_program_timings(ide_drive_t *drive, const u8 mode) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); u8 t1, drive_pci = 0x40 + drive->dn * 2; /* timing bits: 7:4 active 3:0 recovery */ const u8 pio_timings[] = { 0x00, 0x67, 0x44, 0x33, 0x31 }; const u8 mwdma_timings[] = { 0x08, 0x32, 0x31 }; if (mode >= XFER_MW_DMA_0) { u8 t2 = 0; pci_read_config_byte(dev, drive_pci, &t2); t2 &= ~0x80; /* disable UDMA */ pci_write_config_byte(dev, drive_pci, t2); t1 = mwdma_timings[mode - XFER_MW_DMA_0]; } else t1 = pio_timings[mode - XFER_PIO_0]; pci_write_config_byte(dev, drive_pci + 1, t1); } static void sis_ata133_program_timings(ide_drive_t *drive, const u8 mode) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); u32 t1 = 0; u8 drive_pci = sis_ata133_get_base(drive), clk, idx; pci_read_config_dword(dev, drive_pci, &t1); t1 &= 0xc0c00fff; clk = (t1 & 0x08) ? ATA_133 : ATA_100; if (mode >= XFER_MW_DMA_0) { t1 &= ~0x04; /* disable UDMA */ idx = mode - XFER_MW_DMA_0 + 5; } else idx = mode - XFER_PIO_0; t1 |= ini_time_value[clk][idx] << 12; t1 |= act_time_value[clk][idx] << 16; t1 |= rco_time_value[clk][idx] << 24; pci_write_config_dword(dev, drive_pci, t1); } static void sis_program_timings(ide_drive_t *drive, const u8 mode) { if (chipset_family < ATA_100) /* ATA_16/33/66/100a */ sis_ata16_program_timings(drive, mode); else if (chipset_family < ATA_133) /* ATA_100/133a */ sis_ata100_program_timings(drive, mode); else /* ATA_133 */ sis_ata133_program_timings(drive, mode); } static void config_drive_art_rwp(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u8 reg4bh = 0; u8 rw_prefetch = 0; pci_read_config_byte(dev, 0x4b, &reg4bh); rw_prefetch = reg4bh & ~(0x11 << drive->dn); if (drive->media == ide_disk) rw_prefetch |= 0x11 << drive->dn; if (reg4bh != rw_prefetch) pci_write_config_byte(dev, 0x4b, rw_prefetch); } static void sis_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { config_drive_art_rwp(drive); sis_program_timings(drive, drive->pio_mode); } static void sis_ata133_program_udma_timings(ide_drive_t *drive, const u8 mode) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); u32 regdw = 0; u8 drive_pci = sis_ata133_get_base(drive), clk, idx; pci_read_config_dword(dev, drive_pci, &regdw); regdw |= 0x04; regdw &= 0xfffff00f; /* check if ATA133 enable */ clk = (regdw & 0x08) ? ATA_133 : ATA_100; idx = mode - XFER_UDMA_0; regdw |= cycle_time_value[clk][idx] << 4; regdw |= cvs_time_value[clk][idx] << 8; pci_write_config_dword(dev, drive_pci, regdw); } static void sis_ata33_program_udma_timings(ide_drive_t *drive, const u8 mode) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); u8 drive_pci = 0x40 + drive->dn * 2, reg = 0, i = chipset_family; pci_read_config_byte(dev, drive_pci + 1, &reg); /* force the UDMA bit on if we want to use UDMA */ reg |= 0x80; /* clean reg cycle time bits */ reg &= ~((0xff >> (8 - cycle_time_range[i])) << cycle_time_offset[i]); /* set reg cycle time bits */ reg |= cycle_time_value[i][mode - XFER_UDMA_0] << cycle_time_offset[i]; pci_write_config_byte(dev, drive_pci + 1, reg); } static void sis_program_udma_timings(ide_drive_t *drive, const u8 mode) { if (chipset_family >= ATA_133) /* ATA_133 */ sis_ata133_program_udma_timings(drive, mode); else /* ATA_33/66/100a/100/133a */ sis_ata33_program_udma_timings(drive, mode); } static void sis_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { const u8 speed = drive->dma_mode; if (speed >= XFER_UDMA_0) sis_program_udma_timings(drive, speed); else sis_program_timings(drive, speed); } static u8 sis_ata133_udma_filter(ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); u32 regdw = 0; u8 drive_pci = sis_ata133_get_base(drive); pci_read_config_dword(dev, drive_pci, &regdw); /* if ATA133 disable, we should not set speed above UDMA5 */ return (regdw & 0x08) ? ATA_UDMA6 : ATA_UDMA5; } static int sis_find_family(struct pci_dev *dev) { struct pci_dev *host; int i = 0; chipset_family = 0; for (i = 0; i < ARRAY_SIZE(SiSHostChipInfo) && !chipset_family; i++) { host = pci_get_device(PCI_VENDOR_ID_SI, SiSHostChipInfo[i].host_id, NULL); if (!host) continue; chipset_family = SiSHostChipInfo[i].chipset_family; /* Special case for SiS630 : 630S/ET is ATA_100a */ if (SiSHostChipInfo[i].host_id == PCI_DEVICE_ID_SI_630) { if (host->revision >= 0x30) chipset_family = ATA_100a; } pci_dev_put(host); printk(KERN_INFO DRV_NAME " %s: %s %s controller\n", pci_name(dev), SiSHostChipInfo[i].name, chipset_capability[chipset_family]); } if (!chipset_family) { /* Belongs to pci-quirks */ u32 idemisc; u16 trueid; /* Disable ID masking and register remapping */ pci_read_config_dword(dev, 0x54, &idemisc); pci_write_config_dword(dev, 0x54, (idemisc & 0x7fffffff)); pci_read_config_word(dev, PCI_DEVICE_ID, &trueid); pci_write_config_dword(dev, 0x54, idemisc); if (trueid == 0x5518) { printk(KERN_INFO DRV_NAME " %s: SiS 962/963 MuTIOL IDE UDMA133 controller\n", pci_name(dev)); chipset_family = ATA_133; /* Check for 5513 compatibility mapping * We must use this, else the port enabled code will fail, * as it expects the enablebits at 0x4a. */ if ((idemisc & 0x40000000) == 0) { pci_write_config_dword(dev, 0x54, idemisc | 0x40000000); printk(KERN_INFO DRV_NAME " %s: Switching to 5513 register mapping\n", pci_name(dev)); } } } if (!chipset_family) { /* Belongs to pci-quirks */ struct pci_dev *lpc_bridge; u16 trueid; u8 prefctl; u8 idecfg; pci_read_config_byte(dev, 0x4a, &idecfg); pci_write_config_byte(dev, 0x4a, idecfg | 0x10); pci_read_config_word(dev, PCI_DEVICE_ID, &trueid); pci_write_config_byte(dev, 0x4a, idecfg); if (trueid == 0x5517) { /* SiS 961/961B */ lpc_bridge = pci_get_slot(dev->bus, 0x10); /* Bus 0, Dev 2, Fn 0 */ pci_read_config_byte(dev, 0x49, &prefctl); pci_dev_put(lpc_bridge); if (lpc_bridge->revision == 0x10 && (prefctl & 0x80)) { printk(KERN_INFO DRV_NAME " %s: SiS 961B MuTIOL IDE UDMA133 controller\n", pci_name(dev)); chipset_family = ATA_133a; } else { printk(KERN_INFO DRV_NAME " %s: SiS 961 MuTIOL IDE UDMA100 controller\n", pci_name(dev)); chipset_family = ATA_100; } } } return chipset_family; } static int init_chipset_sis5513(struct pci_dev *dev) { /* Make general config ops here 1/ tell IDE channels to operate in Compatibility mode only 2/ tell old chips to allow per drive IDE timings */ u8 reg; u16 regw; switch (chipset_family) { case ATA_133: /* SiS962 operation mode */ pci_read_config_word(dev, 0x50, &regw); if (regw & 0x08) pci_write_config_word(dev, 0x50, regw&0xfff7); pci_read_config_word(dev, 0x52, &regw); if (regw & 0x08) pci_write_config_word(dev, 0x52, regw&0xfff7); break; case ATA_133a: case ATA_100: /* Fixup latency */ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80); /* Set compatibility bit */ pci_read_config_byte(dev, 0x49, &reg); if (!(reg & 0x01)) pci_write_config_byte(dev, 0x49, reg|0x01); break; case ATA_100a: case ATA_66: /* Fixup latency */ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x10); /* On ATA_66 chips the bit was elsewhere */ pci_read_config_byte(dev, 0x52, &reg); if (!(reg & 0x04)) pci_write_config_byte(dev, 0x52, reg|0x04); break; case ATA_33: /* On ATA_33 we didn't have a single bit to set */ pci_read_config_byte(dev, 0x09, &reg); if ((reg & 0x0f) != 0x00) pci_write_config_byte(dev, 0x09, reg&0xf0); case ATA_16: /* force per drive recovery and active timings needed on ATA_33 and below chips */ pci_read_config_byte(dev, 0x52, &reg); if (!(reg & 0x08)) pci_write_config_byte(dev, 0x52, reg|0x08); break; } return 0; } struct sis_laptop { u16 device; u16 subvendor; u16 subdevice; }; static const struct sis_laptop sis_laptop[] = { /* devid, subvendor, subdev */ { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */ { 0x5513, 0x1734, 0x105f }, /* FSC Amilo A1630 */ { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */ /* end marker */ { 0, } }; static u8 sis_cable_detect(ide_hwif_t *hwif) { struct pci_dev *pdev = to_pci_dev(hwif->dev); const struct sis_laptop *lap = &sis_laptop[0]; u8 ata66 = 0; while (lap->device) { if (lap->device == pdev->device && lap->subvendor == pdev->subsystem_vendor && lap->subdevice == pdev->subsystem_device) return ATA_CBL_PATA40_SHORT; lap++; } if (chipset_family >= ATA_133) { u16 regw = 0; u16 reg_addr = hwif->channel ? 0x52: 0x50; pci_read_config_word(pdev, reg_addr, &regw); ata66 = (regw & 0x8000) ? 0 : 1; } else if (chipset_family >= ATA_66) { u8 reg48h = 0; u8 mask = hwif->channel ? 0x20 : 0x10; pci_read_config_byte(pdev, 0x48, &reg48h); ata66 = (reg48h & mask) ? 0 : 1; } return ata66 ? ATA_CBL_PATA80 : ATA_CBL_PATA40; } static const struct ide_port_ops sis_port_ops = { .set_pio_mode = sis_set_pio_mode, .set_dma_mode = sis_set_dma_mode, .cable_detect = sis_cable_detect, }; static const struct ide_port_ops sis_ata133_port_ops = { .set_pio_mode = sis_set_pio_mode, .set_dma_mode = sis_set_dma_mode, .udma_filter = sis_ata133_udma_filter, .cable_detect = sis_cable_detect, }; static const struct ide_port_info sis5513_chipset = { .name = DRV_NAME, .init_chipset = init_chipset_sis5513, .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} }, .host_flags = IDE_HFLAG_NO_AUTODMA, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, }; static int sis5513_init_one(struct pci_dev *dev, const struct pci_device_id *id) { struct ide_port_info d = sis5513_chipset; u8 udma_rates[] = { 0x00, 0x00, 0x07, 0x1f, 0x3f, 0x3f, 0x7f, 0x7f }; int rc; rc = pci_enable_device(dev); if (rc) return rc; if (sis_find_family(dev) == 0) return -ENOTSUPP; if (chipset_family >= ATA_133) d.port_ops = &sis_ata133_port_ops; else d.port_ops = &sis_port_ops; d.udma_mask = udma_rates[chipset_family]; return ide_pci_init_one(dev, &d, NULL); } static void sis5513_remove(struct pci_dev *dev) { ide_pci_remove(dev); pci_disable_device(dev); } static const struct pci_device_id sis5513_pci_tbl[] = { { PCI_VDEVICE(SI, PCI_DEVICE_ID_SI_5513), 0 }, { PCI_VDEVICE(SI, PCI_DEVICE_ID_SI_5518), 0 }, { PCI_VDEVICE(SI, PCI_DEVICE_ID_SI_1180), 0 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, sis5513_pci_tbl); static struct pci_driver sis5513_pci_driver = { .name = "SIS_IDE", .id_table = sis5513_pci_tbl, .probe = sis5513_init_one, .remove = sis5513_remove, .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init sis5513_ide_init(void) { return ide_pci_register_driver(&sis5513_pci_driver); } static void __exit sis5513_ide_exit(void) { pci_unregister_driver(&sis5513_pci_driver); } module_init(sis5513_ide_init); module_exit(sis5513_ide_exit); MODULE_AUTHOR("Lionel Bouton, L C Chang, Andre Hedrick, Vojtech Pavlik"); MODULE_DESCRIPTION("PCI driver module for SIS IDE"); MODULE_LICENSE("GPL");
gpl-2.0
Grace5921/android_kernel_kylevexx
scripts/kconfig/symbol.c
4774
27852
/* * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org> * Released under the terms of the GNU GPL v2.0. */ #include <ctype.h> #include <stdlib.h> #include <string.h> #include <regex.h> #include <sys/utsname.h> #include "lkc.h" struct symbol symbol_yes = { .name = "y", .curr = { "y", yes }, .flags = SYMBOL_CONST|SYMBOL_VALID, }, symbol_mod = { .name = "m", .curr = { "m", mod }, .flags = SYMBOL_CONST|SYMBOL_VALID, }, symbol_no = { .name = "n", .curr = { "n", no }, .flags = SYMBOL_CONST|SYMBOL_VALID, }, symbol_empty = { .name = "", .curr = { "", no }, .flags = SYMBOL_VALID, }; struct symbol *sym_defconfig_list; struct symbol *modules_sym; tristate modules_val; struct expr *sym_env_list; static void sym_add_default(struct symbol *sym, const char *def) { struct property *prop = prop_alloc(P_DEFAULT, sym); prop->expr = expr_alloc_symbol(sym_lookup(def, SYMBOL_CONST)); } void sym_init(void) { struct symbol *sym; struct utsname uts; static bool inited = false; if (inited) return; inited = true; uname(&uts); sym = sym_lookup("UNAME_RELEASE", 0); sym->type = S_STRING; sym->flags |= SYMBOL_AUTO; sym_add_default(sym, uts.release); } enum symbol_type sym_get_type(struct symbol *sym) { enum symbol_type type = sym->type; if (type == S_TRISTATE) { if (sym_is_choice_value(sym) && sym->visible == yes) type = S_BOOLEAN; else if (modules_val == no) type = S_BOOLEAN; } return type; } const char *sym_type_name(enum symbol_type type) { switch (type) { case S_BOOLEAN: return "boolean"; case S_TRISTATE: return "tristate"; case S_INT: return "integer"; case S_HEX: return "hex"; case S_STRING: return "string"; case S_UNKNOWN: return "unknown"; case S_OTHER: break; } return "???"; } struct property *sym_get_choice_prop(struct symbol *sym) { struct property *prop; for_all_choices(sym, prop) return prop; return NULL; } struct property *sym_get_env_prop(struct symbol *sym) { struct property *prop; for_all_properties(sym, prop, P_ENV) return prop; return NULL; } struct property *sym_get_default_prop(struct symbol *sym) { struct property *prop; for_all_defaults(sym, prop) { prop->visible.tri = expr_calc_value(prop->visible.expr); if (prop->visible.tri != no) return prop; } return NULL; } static struct property *sym_get_range_prop(struct symbol *sym) { struct property *prop; for_all_properties(sym, prop, P_RANGE) { prop->visible.tri = expr_calc_value(prop->visible.expr); if (prop->visible.tri != no) return prop; } return NULL; } static int sym_get_range_val(struct symbol *sym, int base) { sym_calc_value(sym); switch (sym->type) { case S_INT: base = 10; break; case S_HEX: base = 16; break; default: break; } return strtol(sym->curr.val, NULL, base); } static void sym_validate_range(struct symbol *sym) { struct property *prop; int base, val, val2; char str[64]; switch (sym->type) { case S_INT: base = 10; break; case S_HEX: base = 16; break; default: return; } prop = sym_get_range_prop(sym); if (!prop) return; val = strtol(sym->curr.val, NULL, base); val2 = sym_get_range_val(prop->expr->left.sym, base); if (val >= val2) { val2 = sym_get_range_val(prop->expr->right.sym, base); if (val <= val2) return; } if (sym->type == S_INT) sprintf(str, "%d", val2); else sprintf(str, "0x%x", val2); sym->curr.val = strdup(str); } static void sym_calc_visibility(struct symbol *sym) { struct property *prop; tristate tri; /* any prompt visible? */ tri = no; for_all_prompts(sym, prop) { prop->visible.tri = expr_calc_value(prop->visible.expr); tri = EXPR_OR(tri, prop->visible.tri); } if (tri == mod && (sym->type != S_TRISTATE || modules_val == no)) tri = yes; if (sym->visible != tri) { sym->visible = tri; sym_set_changed(sym); } if (sym_is_choice_value(sym)) return; /* defaulting to "yes" if no explicit "depends on" are given */ tri = yes; if (sym->dir_dep.expr) tri = expr_calc_value(sym->dir_dep.expr); if (tri == mod) tri = yes; if (sym->dir_dep.tri != tri) { sym->dir_dep.tri = tri; sym_set_changed(sym); } tri = no; if (sym->rev_dep.expr) tri = expr_calc_value(sym->rev_dep.expr); if (tri == mod && sym_get_type(sym) == S_BOOLEAN) tri = yes; if (sym->rev_dep.tri != tri) { sym->rev_dep.tri = tri; sym_set_changed(sym); } } /* * Find the default symbol for a choice. * First try the default values for the choice symbol * Next locate the first visible choice value * Return NULL if none was found */ struct symbol *sym_choice_default(struct symbol *sym) { struct symbol *def_sym; struct property *prop; struct expr *e; /* any of the defaults visible? */ for_all_defaults(sym, prop) { prop->visible.tri = expr_calc_value(prop->visible.expr); if (prop->visible.tri == no) continue; def_sym = prop_get_symbol(prop); if (def_sym->visible != no) return def_sym; } /* just get the first visible value */ prop = sym_get_choice_prop(sym); expr_list_for_each_sym(prop->expr, e, def_sym) if (def_sym->visible != no) return def_sym; /* failed to locate any defaults */ return NULL; } static struct symbol *sym_calc_choice(struct symbol *sym) { struct symbol *def_sym; struct property *prop; struct expr *e; int flags; /* first calculate all choice values' visibilities */ flags = sym->flags; prop = sym_get_choice_prop(sym); expr_list_for_each_sym(prop->expr, e, def_sym) { sym_calc_visibility(def_sym); if (def_sym->visible != no) flags &= def_sym->flags; } sym->flags &= flags | ~SYMBOL_DEF_USER; /* is the user choice visible? */ def_sym = sym->def[S_DEF_USER].val; if (def_sym && def_sym->visible != no) return def_sym; def_sym = sym_choice_default(sym); if (def_sym == NULL) /* no choice? reset tristate value */ sym->curr.tri = no; return def_sym; } void sym_calc_value(struct symbol *sym) { struct symbol_value newval, oldval; struct property *prop; struct expr *e; if (!sym) return; if (sym->flags & SYMBOL_VALID) return; sym->flags |= SYMBOL_VALID; oldval = sym->curr; switch (sym->type) { case S_INT: case S_HEX: case S_STRING: newval = symbol_empty.curr; break; case S_BOOLEAN: case S_TRISTATE: newval = symbol_no.curr; break; default: sym->curr.val = sym->name; sym->curr.tri = no; return; } if (!sym_is_choice_value(sym)) sym->flags &= ~SYMBOL_WRITE; sym_calc_visibility(sym); /* set default if recursively called */ sym->curr = newval; switch (sym_get_type(sym)) { case S_BOOLEAN: case S_TRISTATE: if (sym_is_choice_value(sym) && sym->visible == yes) { prop = sym_get_choice_prop(sym); newval.tri = (prop_get_symbol(prop)->curr.val == sym) ? yes : no; } else { if (sym->visible != no) { /* if the symbol is visible use the user value * if available, otherwise try the default value */ sym->flags |= SYMBOL_WRITE; if (sym_has_value(sym)) { newval.tri = EXPR_AND(sym->def[S_DEF_USER].tri, sym->visible); goto calc_newval; } } if (sym->rev_dep.tri != no) sym->flags |= SYMBOL_WRITE; if (!sym_is_choice(sym)) { prop = sym_get_default_prop(sym); if (prop) { sym->flags |= SYMBOL_WRITE; newval.tri = EXPR_AND(expr_calc_value(prop->expr), prop->visible.tri); } } calc_newval: if (sym->dir_dep.tri == no && sym->rev_dep.tri != no) { struct expr *e; e = expr_simplify_unmet_dep(sym->rev_dep.expr, sym->dir_dep.expr); fprintf(stderr, "warning: ("); expr_fprint(e, stderr); fprintf(stderr, ") selects %s which has unmet direct dependencies (", sym->name); expr_fprint(sym->dir_dep.expr, stderr); fprintf(stderr, ")\n"); expr_free(e); } newval.tri = EXPR_OR(newval.tri, sym->rev_dep.tri); } if (newval.tri == mod && sym_get_type(sym) == S_BOOLEAN) newval.tri = yes; break; case S_STRING: case S_HEX: case S_INT: if (sym->visible != no) { sym->flags |= SYMBOL_WRITE; if (sym_has_value(sym)) { newval.val = sym->def[S_DEF_USER].val; break; } } prop = sym_get_default_prop(sym); if (prop) { struct symbol *ds = prop_get_symbol(prop); if (ds) { sym->flags |= SYMBOL_WRITE; sym_calc_value(ds); newval.val = ds->curr.val; } } break; default: ; } sym->curr = newval; if (sym_is_choice(sym) && newval.tri == yes) sym->curr.val = sym_calc_choice(sym); sym_validate_range(sym); if (memcmp(&oldval, &sym->curr, sizeof(oldval))) { sym_set_changed(sym); if (modules_sym == sym) { sym_set_all_changed(); modules_val = modules_sym->curr.tri; } } if (sym_is_choice(sym)) { struct symbol *choice_sym; prop = sym_get_choice_prop(sym); expr_list_for_each_sym(prop->expr, e, choice_sym) { if ((sym->flags & SYMBOL_WRITE) && choice_sym->visible != no) choice_sym->flags |= SYMBOL_WRITE; if (sym->flags & SYMBOL_CHANGED) sym_set_changed(choice_sym); } } if (sym->flags & SYMBOL_AUTO) sym->flags &= ~SYMBOL_WRITE; } void sym_clear_all_valid(void) { struct symbol *sym; int i; for_all_symbols(i, sym) sym->flags &= ~SYMBOL_VALID; sym_add_change_count(1); if (modules_sym) sym_calc_value(modules_sym); } void sym_set_changed(struct symbol *sym) { struct property *prop; sym->flags |= SYMBOL_CHANGED; for (prop = sym->prop; prop; prop = prop->next) { if (prop->menu) prop->menu->flags |= MENU_CHANGED; } } void sym_set_all_changed(void) { struct symbol *sym; int i; for_all_symbols(i, sym) sym_set_changed(sym); } bool sym_tristate_within_range(struct symbol *sym, tristate val) { int type = sym_get_type(sym); if (sym->visible == no) return false; if (type != S_BOOLEAN && type != S_TRISTATE) return false; if (type == S_BOOLEAN && val == mod) return false; if (sym->visible <= sym->rev_dep.tri) return false; if (sym_is_choice_value(sym) && sym->visible == yes) return val == yes; return val >= sym->rev_dep.tri && val <= sym->visible; } bool sym_set_tristate_value(struct symbol *sym, tristate val) { tristate oldval = sym_get_tristate_value(sym); if (oldval != val && !sym_tristate_within_range(sym, val)) return false; if (!(sym->flags & SYMBOL_DEF_USER)) { sym->flags |= SYMBOL_DEF_USER; sym_set_changed(sym); } /* * setting a choice value also resets the new flag of the choice * symbol and all other choice values. */ if (sym_is_choice_value(sym) && val == yes) { struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym)); struct property *prop; struct expr *e; cs->def[S_DEF_USER].val = sym; cs->flags |= SYMBOL_DEF_USER; prop = sym_get_choice_prop(cs); for (e = prop->expr; e; e = e->left.expr) { if (e->right.sym->visible != no) e->right.sym->flags |= SYMBOL_DEF_USER; } } sym->def[S_DEF_USER].tri = val; if (oldval != val) sym_clear_all_valid(); return true; } tristate sym_toggle_tristate_value(struct symbol *sym) { tristate oldval, newval; oldval = newval = sym_get_tristate_value(sym); do { switch (newval) { case no: newval = mod; break; case mod: newval = yes; break; case yes: newval = no; break; } if (sym_set_tristate_value(sym, newval)) break; } while (oldval != newval); return newval; } bool sym_string_valid(struct symbol *sym, const char *str) { signed char ch; switch (sym->type) { case S_STRING: return true; case S_INT: ch = *str++; if (ch == '-') ch = *str++; if (!isdigit(ch)) return false; if (ch == '0' && *str != 0) return false; while ((ch = *str++)) { if (!isdigit(ch)) return false; } return true; case S_HEX: if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) str += 2; ch = *str++; do { if (!isxdigit(ch)) return false; } while ((ch = *str++)); return true; case S_BOOLEAN: case S_TRISTATE: switch (str[0]) { case 'y': case 'Y': case 'm': case 'M': case 'n': case 'N': return true; } return false; default: return false; } } bool sym_string_within_range(struct symbol *sym, const char *str) { struct property *prop; int val; switch (sym->type) { case S_STRING: return sym_string_valid(sym, str); case S_INT: if (!sym_string_valid(sym, str)) return false; prop = sym_get_range_prop(sym); if (!prop) return true; val = strtol(str, NULL, 10); return val >= sym_get_range_val(prop->expr->left.sym, 10) && val <= sym_get_range_val(prop->expr->right.sym, 10); case S_HEX: if (!sym_string_valid(sym, str)) return false; prop = sym_get_range_prop(sym); if (!prop) return true; val = strtol(str, NULL, 16); return val >= sym_get_range_val(prop->expr->left.sym, 16) && val <= sym_get_range_val(prop->expr->right.sym, 16); case S_BOOLEAN: case S_TRISTATE: switch (str[0]) { case 'y': case 'Y': return sym_tristate_within_range(sym, yes); case 'm': case 'M': return sym_tristate_within_range(sym, mod); case 'n': case 'N': return sym_tristate_within_range(sym, no); } return false; default: return false; } } bool sym_set_string_value(struct symbol *sym, const char *newval) { const char *oldval; char *val; int size; switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: switch (newval[0]) { case 'y': case 'Y': return sym_set_tristate_value(sym, yes); case 'm': case 'M': return sym_set_tristate_value(sym, mod); case 'n': case 'N': return sym_set_tristate_value(sym, no); } return false; default: ; } if (!sym_string_within_range(sym, newval)) return false; if (!(sym->flags & SYMBOL_DEF_USER)) { sym->flags |= SYMBOL_DEF_USER; sym_set_changed(sym); } oldval = sym->def[S_DEF_USER].val; size = strlen(newval) + 1; if (sym->type == S_HEX && (newval[0] != '0' || (newval[1] != 'x' && newval[1] != 'X'))) { size += 2; sym->def[S_DEF_USER].val = val = malloc(size); *val++ = '0'; *val++ = 'x'; } else if (!oldval || strcmp(oldval, newval)) sym->def[S_DEF_USER].val = val = malloc(size); else return true; strcpy(val, newval); free((void *)oldval); sym_clear_all_valid(); return true; } /* * Find the default value associated to a symbol. * For tristate symbol handle the modules=n case * in which case "m" becomes "y". * If the symbol does not have any default then fallback * to the fixed default values. */ const char *sym_get_string_default(struct symbol *sym) { struct property *prop; struct symbol *ds; const char *str; tristate val; sym_calc_visibility(sym); sym_calc_value(modules_sym); val = symbol_no.curr.tri; str = symbol_empty.curr.val; /* If symbol has a default value look it up */ prop = sym_get_default_prop(sym); if (prop != NULL) { switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: /* The visibility may limit the value from yes => mod */ val = EXPR_AND(expr_calc_value(prop->expr), prop->visible.tri); break; default: /* * The following fails to handle the situation * where a default value is further limited by * the valid range. */ ds = prop_get_symbol(prop); if (ds != NULL) { sym_calc_value(ds); str = (const char *)ds->curr.val; } } } /* Handle select statements */ val = EXPR_OR(val, sym->rev_dep.tri); /* transpose mod to yes if modules are not enabled */ if (val == mod) if (!sym_is_choice_value(sym) && modules_sym->curr.tri == no) val = yes; /* transpose mod to yes if type is bool */ if (sym->type == S_BOOLEAN && val == mod) val = yes; switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: switch (val) { case no: return "n"; case mod: return "m"; case yes: return "y"; } case S_INT: case S_HEX: return str; case S_STRING: return str; case S_OTHER: case S_UNKNOWN: break; } return ""; } const char *sym_get_string_value(struct symbol *sym) { tristate val; switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: val = sym_get_tristate_value(sym); switch (val) { case no: return "n"; case mod: sym_calc_value(modules_sym); return (modules_sym->curr.tri == no) ? "n" : "m"; case yes: return "y"; } break; default: ; } return (const char *)sym->curr.val; } bool sym_is_changable(struct symbol *sym) { return sym->visible > sym->rev_dep.tri; } static unsigned strhash(const char *s) { /* fnv32 hash */ unsigned hash = 2166136261U; for (; *s; s++) hash = (hash ^ *s) * 0x01000193; return hash; } struct symbol *sym_lookup(const char *name, int flags) { struct symbol *symbol; char *new_name; int hash; if (name) { if (name[0] && !name[1]) { switch (name[0]) { case 'y': return &symbol_yes; case 'm': return &symbol_mod; case 'n': return &symbol_no; } } hash = strhash(name) % SYMBOL_HASHSIZE; for (symbol = symbol_hash[hash]; symbol; symbol = symbol->next) { if (symbol->name && !strcmp(symbol->name, name) && (flags ? symbol->flags & flags : !(symbol->flags & (SYMBOL_CONST|SYMBOL_CHOICE)))) return symbol; } new_name = strdup(name); } else { new_name = NULL; hash = 0; } symbol = malloc(sizeof(*symbol)); memset(symbol, 0, sizeof(*symbol)); symbol->name = new_name; symbol->type = S_UNKNOWN; symbol->flags |= flags; symbol->next = symbol_hash[hash]; symbol_hash[hash] = symbol; return symbol; } struct symbol *sym_find(const char *name) { struct symbol *symbol = NULL; int hash = 0; if (!name) return NULL; if (name[0] && !name[1]) { switch (name[0]) { case 'y': return &symbol_yes; case 'm': return &symbol_mod; case 'n': return &symbol_no; } } hash = strhash(name) % SYMBOL_HASHSIZE; for (symbol = symbol_hash[hash]; symbol; symbol = symbol->next) { if (symbol->name && !strcmp(symbol->name, name) && !(symbol->flags & SYMBOL_CONST)) break; } return symbol; } /* * Expand symbol's names embedded in the string given in argument. Symbols' * name to be expanded shall be prefixed by a '$'. Unknown symbol expands to * the empty string. */ const char *sym_expand_string_value(const char *in) { const char *src; char *res; size_t reslen; reslen = strlen(in) + 1; res = malloc(reslen); res[0] = '\0'; while ((src = strchr(in, '$'))) { char *p, name[SYMBOL_MAXLENGTH]; const char *symval = ""; struct symbol *sym; size_t newlen; strncat(res, in, src - in); src++; p = name; while (isalnum(*src) || *src == '_') *p++ = *src++; *p = '\0'; sym = sym_find(name); if (sym != NULL) { sym_calc_value(sym); symval = sym_get_string_value(sym); } newlen = strlen(res) + strlen(symval) + strlen(src) + 1; if (newlen > reslen) { reslen = newlen; res = realloc(res, reslen); } strcat(res, symval); in = src; } strcat(res, in); return res; } const char *sym_escape_string_value(const char *in) { const char *p; size_t reslen; char *res; size_t l; reslen = strlen(in) + strlen("\"\"") + 1; p = in; for (;;) { l = strcspn(p, "\"\\"); p += l; if (p[0] == '\0') break; reslen++; p++; } res = malloc(reslen); res[0] = '\0'; strcat(res, "\""); p = in; for (;;) { l = strcspn(p, "\"\\"); strncat(res, p, l); p += l; if (p[0] == '\0') break; strcat(res, "\\"); strncat(res, p++, 1); } strcat(res, "\""); return res; } struct symbol **sym_re_search(const char *pattern) { struct symbol *sym, **sym_arr = NULL; int i, cnt, size; regex_t re; cnt = size = 0; /* Skip if empty */ if (strlen(pattern) == 0) return NULL; if (regcomp(&re, pattern, REG_EXTENDED|REG_NOSUB|REG_ICASE)) return NULL; for_all_symbols(i, sym) { if (sym->flags & SYMBOL_CONST || !sym->name) continue; if (regexec(&re, sym->name, 0, NULL, 0)) continue; if (cnt + 1 >= size) { void *tmp = sym_arr; size += 16; sym_arr = realloc(sym_arr, size * sizeof(struct symbol *)); if (!sym_arr) { free(tmp); return NULL; } } sym_calc_value(sym); sym_arr[cnt++] = sym; } if (sym_arr) sym_arr[cnt] = NULL; regfree(&re); return sym_arr; } /* * When we check for recursive dependencies we use a stack to save * current state so we can print out relevant info to user. * The entries are located on the call stack so no need to free memory. * Note inser() remove() must always match to properly clear the stack. */ static struct dep_stack { struct dep_stack *prev, *next; struct symbol *sym; struct property *prop; struct expr *expr; } *check_top; static void dep_stack_insert(struct dep_stack *stack, struct symbol *sym) { memset(stack, 0, sizeof(*stack)); if (check_top) check_top->next = stack; stack->prev = check_top; stack->sym = sym; check_top = stack; } static void dep_stack_remove(void) { check_top = check_top->prev; if (check_top) check_top->next = NULL; } /* * Called when we have detected a recursive dependency. * check_top point to the top of the stact so we use * the ->prev pointer to locate the bottom of the stack. */ static void sym_check_print_recursive(struct symbol *last_sym) { struct dep_stack *stack; struct symbol *sym, *next_sym; struct menu *menu = NULL; struct property *prop; struct dep_stack cv_stack; if (sym_is_choice_value(last_sym)) { dep_stack_insert(&cv_stack, last_sym); last_sym = prop_get_symbol(sym_get_choice_prop(last_sym)); } for (stack = check_top; stack != NULL; stack = stack->prev) if (stack->sym == last_sym) break; if (!stack) { fprintf(stderr, "unexpected recursive dependency error\n"); return; } for (; stack; stack = stack->next) { sym = stack->sym; next_sym = stack->next ? stack->next->sym : last_sym; prop = stack->prop; if (prop == NULL) prop = stack->sym->prop; /* for choice values find the menu entry (used below) */ if (sym_is_choice(sym) || sym_is_choice_value(sym)) { for (prop = sym->prop; prop; prop = prop->next) { menu = prop->menu; if (prop->menu) break; } } if (stack->sym == last_sym) fprintf(stderr, "%s:%d:error: recursive dependency detected!\n", prop->file->name, prop->lineno); if (stack->expr) { fprintf(stderr, "%s:%d:\tsymbol %s %s value contains %s\n", prop->file->name, prop->lineno, sym->name ? sym->name : "<choice>", prop_get_type_name(prop->type), next_sym->name ? next_sym->name : "<choice>"); } else if (stack->prop) { fprintf(stderr, "%s:%d:\tsymbol %s depends on %s\n", prop->file->name, prop->lineno, sym->name ? sym->name : "<choice>", next_sym->name ? next_sym->name : "<choice>"); } else if (sym_is_choice(sym)) { fprintf(stderr, "%s:%d:\tchoice %s contains symbol %s\n", menu->file->name, menu->lineno, sym->name ? sym->name : "<choice>", next_sym->name ? next_sym->name : "<choice>"); } else if (sym_is_choice_value(sym)) { fprintf(stderr, "%s:%d:\tsymbol %s is part of choice %s\n", menu->file->name, menu->lineno, sym->name ? sym->name : "<choice>", next_sym->name ? next_sym->name : "<choice>"); } else { fprintf(stderr, "%s:%d:\tsymbol %s is selected by %s\n", prop->file->name, prop->lineno, sym->name ? sym->name : "<choice>", next_sym->name ? next_sym->name : "<choice>"); } } if (check_top == &cv_stack) dep_stack_remove(); } static struct symbol *sym_check_expr_deps(struct expr *e) { struct symbol *sym; if (!e) return NULL; switch (e->type) { case E_OR: case E_AND: sym = sym_check_expr_deps(e->left.expr); if (sym) return sym; return sym_check_expr_deps(e->right.expr); case E_NOT: return sym_check_expr_deps(e->left.expr); case E_EQUAL: case E_UNEQUAL: sym = sym_check_deps(e->left.sym); if (sym) return sym; return sym_check_deps(e->right.sym); case E_SYMBOL: return sym_check_deps(e->left.sym); default: break; } printf("Oops! How to check %d?\n", e->type); return NULL; } /* return NULL when dependencies are OK */ static struct symbol *sym_check_sym_deps(struct symbol *sym) { struct symbol *sym2; struct property *prop; struct dep_stack stack; dep_stack_insert(&stack, sym); sym2 = sym_check_expr_deps(sym->rev_dep.expr); if (sym2) goto out; for (prop = sym->prop; prop; prop = prop->next) { if (prop->type == P_CHOICE || prop->type == P_SELECT) continue; stack.prop = prop; sym2 = sym_check_expr_deps(prop->visible.expr); if (sym2) break; if (prop->type != P_DEFAULT || sym_is_choice(sym)) continue; stack.expr = prop->expr; sym2 = sym_check_expr_deps(prop->expr); if (sym2) break; stack.expr = NULL; } out: dep_stack_remove(); return sym2; } static struct symbol *sym_check_choice_deps(struct symbol *choice) { struct symbol *sym, *sym2; struct property *prop; struct expr *e; struct dep_stack stack; dep_stack_insert(&stack, choice); prop = sym_get_choice_prop(choice); expr_list_for_each_sym(prop->expr, e, sym) sym->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED); choice->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED); sym2 = sym_check_sym_deps(choice); choice->flags &= ~SYMBOL_CHECK; if (sym2) goto out; expr_list_for_each_sym(prop->expr, e, sym) { sym2 = sym_check_sym_deps(sym); if (sym2) break; } out: expr_list_for_each_sym(prop->expr, e, sym) sym->flags &= ~SYMBOL_CHECK; if (sym2 && sym_is_choice_value(sym2) && prop_get_symbol(sym_get_choice_prop(sym2)) == choice) sym2 = choice; dep_stack_remove(); return sym2; } struct symbol *sym_check_deps(struct symbol *sym) { struct symbol *sym2; struct property *prop; if (sym->flags & SYMBOL_CHECK) { sym_check_print_recursive(sym); return sym; } if (sym->flags & SYMBOL_CHECKED) return NULL; if (sym_is_choice_value(sym)) { struct dep_stack stack; /* for choice groups start the check with main choice symbol */ dep_stack_insert(&stack, sym); prop = sym_get_choice_prop(sym); sym2 = sym_check_deps(prop_get_symbol(prop)); dep_stack_remove(); } else if (sym_is_choice(sym)) { sym2 = sym_check_choice_deps(sym); } else { sym->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED); sym2 = sym_check_sym_deps(sym); sym->flags &= ~SYMBOL_CHECK; } if (sym2 && sym2 == sym) sym2 = NULL; return sym2; } struct property *prop_alloc(enum prop_type type, struct symbol *sym) { struct property *prop; struct property **propp; prop = malloc(sizeof(*prop)); memset(prop, 0, sizeof(*prop)); prop->type = type; prop->sym = sym; prop->file = current_file; prop->lineno = zconf_lineno(); /* append property to the prop list of symbol */ if (sym) { for (propp = &sym->prop; *propp; propp = &(*propp)->next) ; *propp = prop; } return prop; } struct symbol *prop_get_symbol(struct property *prop) { if (prop->expr && (prop->expr->type == E_SYMBOL || prop->expr->type == E_LIST)) return prop->expr->left.sym; return NULL; } const char *prop_get_type_name(enum prop_type type) { switch (type) { case P_PROMPT: return "prompt"; case P_ENV: return "env"; case P_COMMENT: return "comment"; case P_MENU: return "menu"; case P_DEFAULT: return "default"; case P_CHOICE: return "choice"; case P_SELECT: return "select"; case P_RANGE: return "range"; case P_SYMBOL: return "symbol"; case P_UNKNOWN: break; } return "unknown"; } static void prop_add_env(const char *env) { struct symbol *sym, *sym2; struct property *prop; char *p; sym = current_entry->sym; sym->flags |= SYMBOL_AUTO; for_all_properties(sym, prop, P_ENV) { sym2 = prop_get_symbol(prop); if (strcmp(sym2->name, env)) menu_warn(current_entry, "redefining environment symbol from %s", sym2->name); return; } prop = prop_alloc(P_ENV, sym); prop->expr = expr_alloc_symbol(sym_lookup(env, SYMBOL_CONST)); sym_env_list = expr_alloc_one(E_LIST, sym_env_list); sym_env_list->right.sym = sym; p = getenv(env); if (p) sym_add_default(sym, p); else menu_warn(current_entry, "environment variable %s undefined", env); }
gpl-2.0
pacerom/kernel_google_msm
drivers/staging/omapdrm/omap_drv.c
4774
21092
/* * drivers/staging/omapdrm/omap_drv.c * * Copyright (C) 2011 Texas Instruments * Author: Rob Clark <rob@ti.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include "omap_drv.h" #include "drm_crtc_helper.h" #include "drm_fb_helper.h" #include "omap_dmm_tiler.h" #define DRIVER_NAME MODULE_NAME #define DRIVER_DESC "OMAP DRM" #define DRIVER_DATE "20110917" #define DRIVER_MAJOR 1 #define DRIVER_MINOR 0 #define DRIVER_PATCHLEVEL 0 struct drm_device *drm_device; static int num_crtc = CONFIG_DRM_OMAP_NUM_CRTCS; MODULE_PARM_DESC(num_crtc, "Number of overlays to use as CRTCs"); module_param(num_crtc, int, 0600); /* * mode config funcs */ /* Notes about mapping DSS and DRM entities: * CRTC: overlay * encoder: manager.. with some extension to allow one primary CRTC * and zero or more video CRTC's to be mapped to one encoder? * connector: dssdev.. manager can be attached/detached from different * devices */ static void omap_fb_output_poll_changed(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; DBG("dev=%p", dev); if (priv->fbdev) { drm_fb_helper_hotplug_event(priv->fbdev); } } static struct drm_mode_config_funcs omap_mode_config_funcs = { .fb_create = omap_framebuffer_create, .output_poll_changed = omap_fb_output_poll_changed, }; static int get_connector_type(struct omap_dss_device *dssdev) { switch (dssdev->type) { case OMAP_DISPLAY_TYPE_HDMI: return DRM_MODE_CONNECTOR_HDMIA; case OMAP_DISPLAY_TYPE_DPI: if (!strcmp(dssdev->name, "dvi")) return DRM_MODE_CONNECTOR_DVID; /* fallthrough */ default: return DRM_MODE_CONNECTOR_Unknown; } } #if 0 /* enable when dss2 supports hotplug */ static int omap_drm_notifier(struct notifier_block *nb, unsigned long evt, void *arg) { switch (evt) { case OMAP_DSS_SIZE_CHANGE: case OMAP_DSS_HOTPLUG_CONNECT: case OMAP_DSS_HOTPLUG_DISCONNECT: { struct drm_device *dev = drm_device; DBG("hotplug event: evt=%d, dev=%p", evt, dev); if (dev) { drm_sysfs_hotplug_event(dev); } return NOTIFY_OK; } default: /* don't care about other events for now */ return NOTIFY_DONE; } } #endif static void dump_video_chains(void) { int i; DBG("dumping video chains: "); for (i = 0; i < omap_dss_get_num_overlays(); i++) { struct omap_overlay *ovl = omap_dss_get_overlay(i); struct omap_overlay_manager *mgr = ovl->manager; struct omap_dss_device *dssdev = mgr ? mgr->device : NULL; if (dssdev) { DBG("%d: %s -> %s -> %s", i, ovl->name, mgr->name, dssdev->name); } else if (mgr) { DBG("%d: %s -> %s", i, ovl->name, mgr->name); } else { DBG("%d: %s", i, ovl->name); } } } /* create encoders for each manager */ static int create_encoder(struct drm_device *dev, struct omap_overlay_manager *mgr) { struct omap_drm_private *priv = dev->dev_private; struct drm_encoder *encoder = omap_encoder_init(dev, mgr); if (!encoder) { dev_err(dev->dev, "could not create encoder: %s\n", mgr->name); return -ENOMEM; } BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders)); priv->encoders[priv->num_encoders++] = encoder; return 0; } /* create connectors for each display device */ static int create_connector(struct drm_device *dev, struct omap_dss_device *dssdev) { struct omap_drm_private *priv = dev->dev_private; static struct notifier_block *notifier; struct drm_connector *connector; int j; if (!dssdev->driver) { dev_warn(dev->dev, "%s has no driver.. skipping it\n", dssdev->name); return 0; } if (!(dssdev->driver->get_timings || dssdev->driver->read_edid)) { dev_warn(dev->dev, "%s driver does not support " "get_timings or read_edid.. skipping it!\n", dssdev->name); return 0; } connector = omap_connector_init(dev, get_connector_type(dssdev), dssdev); if (!connector) { dev_err(dev->dev, "could not create connector: %s\n", dssdev->name); return -ENOMEM; } BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors)); priv->connectors[priv->num_connectors++] = connector; #if 0 /* enable when dss2 supports hotplug */ notifier = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); notifier->notifier_call = omap_drm_notifier; omap_dss_add_notify(dssdev, notifier); #else notifier = NULL; #endif for (j = 0; j < priv->num_encoders; j++) { struct omap_overlay_manager *mgr = omap_encoder_get_manager(priv->encoders[j]); if (mgr->device == dssdev) { drm_mode_connector_attach_encoder(connector, priv->encoders[j]); } } return 0; } /* create up to max_overlays CRTCs mapping to overlays.. by default, * connect the overlays to different managers/encoders, giving priority * to encoders connected to connectors with a detected connection */ static int create_crtc(struct drm_device *dev, struct omap_overlay *ovl, int *j, unsigned int connected_connectors) { struct omap_drm_private *priv = dev->dev_private; struct omap_overlay_manager *mgr = NULL; struct drm_crtc *crtc; /* find next best connector, ones with detected connection first */ while (*j < priv->num_connectors && !mgr) { if (connected_connectors & (1 << *j)) { struct drm_encoder *encoder = omap_connector_attached_encoder( priv->connectors[*j]); if (encoder) { mgr = omap_encoder_get_manager(encoder); } } (*j)++; } /* if we couldn't find another connected connector, lets start * looking at the unconnected connectors: * * note: it might not be immediately apparent, but thanks to * the !mgr check in both this loop and the one above, the only * way to enter this loop is with *j == priv->num_connectors, * so idx can never go negative. */ while (*j < 2 * priv->num_connectors && !mgr) { int idx = *j - priv->num_connectors; if (!(connected_connectors & (1 << idx))) { struct drm_encoder *encoder = omap_connector_attached_encoder( priv->connectors[idx]); if (encoder) { mgr = omap_encoder_get_manager(encoder); } } (*j)++; } crtc = omap_crtc_init(dev, ovl, priv->num_crtcs); if (!crtc) { dev_err(dev->dev, "could not create CRTC: %s\n", ovl->name); return -ENOMEM; } BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs)); priv->crtcs[priv->num_crtcs++] = crtc; return 0; } static int create_plane(struct drm_device *dev, struct omap_overlay *ovl, unsigned int possible_crtcs) { struct omap_drm_private *priv = dev->dev_private; struct drm_plane *plane = omap_plane_init(dev, ovl, possible_crtcs, false); if (!plane) { dev_err(dev->dev, "could not create plane: %s\n", ovl->name); return -ENOMEM; } BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes)); priv->planes[priv->num_planes++] = plane; return 0; } static int match_dev_name(struct omap_dss_device *dssdev, void *data) { return !strcmp(dssdev->name, data); } static unsigned int detect_connectors(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; unsigned int connected_connectors = 0; int i; for (i = 0; i < priv->num_connectors; i++) { struct drm_connector *connector = priv->connectors[i]; if (omap_connector_detect(connector, true) == connector_status_connected) { connected_connectors |= (1 << i); } } return connected_connectors; } static int omap_modeset_init(struct drm_device *dev) { const struct omap_drm_platform_data *pdata = dev->dev->platform_data; struct omap_kms_platform_data *kms_pdata = NULL; struct omap_drm_private *priv = dev->dev_private; struct omap_dss_device *dssdev = NULL; int i, j; unsigned int connected_connectors = 0; drm_mode_config_init(dev); if (pdata && pdata->kms_pdata) { kms_pdata = pdata->kms_pdata; /* if platform data is provided by the board file, use it to * control which overlays, managers, and devices we own. */ for (i = 0; i < kms_pdata->mgr_cnt; i++) { struct omap_overlay_manager *mgr = omap_dss_get_overlay_manager( kms_pdata->mgr_ids[i]); create_encoder(dev, mgr); } for (i = 0; i < kms_pdata->dev_cnt; i++) { struct omap_dss_device *dssdev = omap_dss_find_device( (void *)kms_pdata->dev_names[i], match_dev_name); if (!dssdev) { dev_warn(dev->dev, "no such dssdev: %s\n", kms_pdata->dev_names[i]); continue; } create_connector(dev, dssdev); } connected_connectors = detect_connectors(dev); j = 0; for (i = 0; i < kms_pdata->ovl_cnt; i++) { struct omap_overlay *ovl = omap_dss_get_overlay(kms_pdata->ovl_ids[i]); create_crtc(dev, ovl, &j, connected_connectors); } for (i = 0; i < kms_pdata->pln_cnt; i++) { struct omap_overlay *ovl = omap_dss_get_overlay(kms_pdata->pln_ids[i]); create_plane(dev, ovl, (1 << priv->num_crtcs) - 1); } } else { /* otherwise just grab up to CONFIG_DRM_OMAP_NUM_CRTCS and try * to make educated guesses about everything else */ int max_overlays = min(omap_dss_get_num_overlays(), num_crtc); for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) { create_encoder(dev, omap_dss_get_overlay_manager(i)); } for_each_dss_dev(dssdev) { create_connector(dev, dssdev); } connected_connectors = detect_connectors(dev); j = 0; for (i = 0; i < max_overlays; i++) { create_crtc(dev, omap_dss_get_overlay(i), &j, connected_connectors); } /* use any remaining overlays as drm planes */ for (; i < omap_dss_get_num_overlays(); i++) { struct omap_overlay *ovl = omap_dss_get_overlay(i); create_plane(dev, ovl, (1 << priv->num_crtcs) - 1); } } /* for now keep the mapping of CRTCs and encoders static.. */ for (i = 0; i < priv->num_encoders; i++) { struct drm_encoder *encoder = priv->encoders[i]; struct omap_overlay_manager *mgr = omap_encoder_get_manager(encoder); encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; DBG("%s: possible_crtcs=%08x", mgr->name, encoder->possible_crtcs); } dump_video_chains(); dev->mode_config.min_width = 32; dev->mode_config.min_height = 32; /* note: eventually will need some cpu_is_omapXYZ() type stuff here * to fill in these limits properly on different OMAP generations.. */ dev->mode_config.max_width = 2048; dev->mode_config.max_height = 2048; dev->mode_config.funcs = &omap_mode_config_funcs; return 0; } static void omap_modeset_free(struct drm_device *dev) { drm_mode_config_cleanup(dev); } /* * drm ioctl funcs */ static int ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_omap_param *args = data; DBG("%p: param=%llu", dev, args->param); switch (args->param) { case OMAP_PARAM_CHIPSET_ID: args->value = GET_OMAP_TYPE; break; default: DBG("unknown parameter %lld", args->param); return -EINVAL; } return 0; } static int ioctl_set_param(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_omap_param *args = data; switch (args->param) { default: DBG("unknown parameter %lld", args->param); return -EINVAL; } return 0; } static int ioctl_gem_new(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_omap_gem_new *args = data; DBG("%p:%p: size=0x%08x, flags=%08x", dev, file_priv, args->size.bytes, args->flags); return omap_gem_new_handle(dev, file_priv, args->size, args->flags, &args->handle); } static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_omap_gem_cpu_prep *args = data; struct drm_gem_object *obj; int ret; VERB("%p:%p: handle=%d, op=%x", dev, file_priv, args->handle, args->op); obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (!obj) { return -ENOENT; } ret = omap_gem_op_sync(obj, args->op); if (!ret) { ret = omap_gem_op_start(obj, args->op); } drm_gem_object_unreference_unlocked(obj); return ret; } static int ioctl_gem_cpu_fini(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_omap_gem_cpu_fini *args = data; struct drm_gem_object *obj; int ret; VERB("%p:%p: handle=%d", dev, file_priv, args->handle); obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (!obj) { return -ENOENT; } /* XXX flushy, flushy */ ret = 0; if (!ret) { ret = omap_gem_op_finish(obj, args->op); } drm_gem_object_unreference_unlocked(obj); return ret; } static int ioctl_gem_info(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_omap_gem_info *args = data; struct drm_gem_object *obj; int ret = 0; DBG("%p:%p: handle=%d", dev, file_priv, args->handle); obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (!obj) { return -ENOENT; } args->size = omap_gem_mmap_size(obj); args->offset = omap_gem_mmap_offset(obj); drm_gem_object_unreference_unlocked(obj); return ret; } struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = { DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH), DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH), DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH), DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH), DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH), }; /* * drm driver funcs */ /** * load - setup chip and create an initial config * @dev: DRM device * @flags: startup flags * * The driver load routine has to do several things: * - initialize the memory manager * - allocate initial config memory * - setup the DRM framebuffer with the allocated memory */ static int dev_load(struct drm_device *dev, unsigned long flags) { struct omap_drm_private *priv; int ret; DBG("load: dev=%p", dev); drm_device = dev; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(dev->dev, "could not allocate priv\n"); return -ENOMEM; } dev->dev_private = priv; priv->wq = alloc_workqueue("omapdrm", WQ_UNBOUND | WQ_NON_REENTRANT, 1); INIT_LIST_HEAD(&priv->obj_list); omap_gem_init(dev); ret = omap_modeset_init(dev); if (ret) { dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret); dev->dev_private = NULL; kfree(priv); return ret; } priv->fbdev = omap_fbdev_init(dev); if (!priv->fbdev) { dev_warn(dev->dev, "omap_fbdev_init failed\n"); /* well, limp along without an fbdev.. maybe X11 will work? */ } drm_kms_helper_poll_init(dev); ret = drm_vblank_init(dev, priv->num_crtcs); if (ret) { dev_warn(dev->dev, "could not init vblank\n"); } return 0; } static int dev_unload(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; DBG("unload: dev=%p", dev); drm_vblank_cleanup(dev); drm_kms_helper_poll_fini(dev); omap_fbdev_free(dev); omap_modeset_free(dev); omap_gem_deinit(dev); flush_workqueue(priv->wq); destroy_workqueue(priv->wq); kfree(dev->dev_private); dev->dev_private = NULL; return 0; } static int dev_open(struct drm_device *dev, struct drm_file *file) { file->driver_priv = NULL; DBG("open: dev=%p, file=%p", dev, file); return 0; } static int dev_firstopen(struct drm_device *dev) { DBG("firstopen: dev=%p", dev); return 0; } /** * lastclose - clean up after all DRM clients have exited * @dev: DRM device * * Take care of cleaning up after all DRM clients have exited. In the * mode setting case, we want to restore the kernel's initial mode (just * in case the last client left us in a bad state). */ static void dev_lastclose(struct drm_device *dev) { /* we don't support vga-switcheroo.. so just make sure the fbdev * mode is active */ struct omap_drm_private *priv = dev->dev_private; int ret; DBG("lastclose: dev=%p", dev); ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev); if (ret) DBG("failed to restore crtc mode"); } static void dev_preclose(struct drm_device *dev, struct drm_file *file) { DBG("preclose: dev=%p", dev); } static void dev_postclose(struct drm_device *dev, struct drm_file *file) { DBG("postclose: dev=%p, file=%p", dev, file); } /** * enable_vblank - enable vblank interrupt events * @dev: DRM device * @crtc: which irq to enable * * Enable vblank interrupts for @crtc. If the device doesn't have * a hardware vblank counter, this routine should be a no-op, since * interrupts will have to stay on to keep the count accurate. * * RETURNS * Zero on success, appropriate errno if the given @crtc's vblank * interrupt cannot be enabled. */ static int dev_enable_vblank(struct drm_device *dev, int crtc) { DBG("enable_vblank: dev=%p, crtc=%d", dev, crtc); return 0; } /** * disable_vblank - disable vblank interrupt events * @dev: DRM device * @crtc: which irq to enable * * Disable vblank interrupts for @crtc. If the device doesn't have * a hardware vblank counter, this routine should be a no-op, since * interrupts will have to stay on to keep the count accurate. */ static void dev_disable_vblank(struct drm_device *dev, int crtc) { DBG("disable_vblank: dev=%p, crtc=%d", dev, crtc); } static irqreturn_t dev_irq_handler(DRM_IRQ_ARGS) { return IRQ_HANDLED; } static void dev_irq_preinstall(struct drm_device *dev) { DBG("irq_preinstall: dev=%p", dev); } static int dev_irq_postinstall(struct drm_device *dev) { DBG("irq_postinstall: dev=%p", dev); return 0; } static void dev_irq_uninstall(struct drm_device *dev) { DBG("irq_uninstall: dev=%p", dev); } static struct vm_operations_struct omap_gem_vm_ops = { .fault = omap_gem_fault, .open = drm_gem_vm_open, .close = drm_gem_vm_close, }; static const struct file_operations omapdriver_fops = { .owner = THIS_MODULE, .open = drm_open, .unlocked_ioctl = drm_ioctl, .release = drm_release, .mmap = omap_gem_mmap, .poll = drm_poll, .fasync = drm_fasync, .read = drm_read, .llseek = noop_llseek, }; static struct drm_driver omap_drm_driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM, .load = dev_load, .unload = dev_unload, .open = dev_open, .firstopen = dev_firstopen, .lastclose = dev_lastclose, .preclose = dev_preclose, .postclose = dev_postclose, .get_vblank_counter = drm_vblank_count, .enable_vblank = dev_enable_vblank, .disable_vblank = dev_disable_vblank, .irq_preinstall = dev_irq_preinstall, .irq_postinstall = dev_irq_postinstall, .irq_uninstall = dev_irq_uninstall, .irq_handler = dev_irq_handler, .reclaim_buffers = drm_core_reclaim_buffers, #ifdef CONFIG_DEBUG_FS .debugfs_init = omap_debugfs_init, .debugfs_cleanup = omap_debugfs_cleanup, #endif .gem_init_object = omap_gem_init_object, .gem_free_object = omap_gem_free_object, .gem_vm_ops = &omap_gem_vm_ops, .dumb_create = omap_gem_dumb_create, .dumb_map_offset = omap_gem_dumb_map_offset, .dumb_destroy = omap_gem_dumb_destroy, .ioctls = ioctls, .num_ioctls = DRM_OMAP_NUM_IOCTLS, .fops = &omapdriver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; static int pdev_suspend(struct platform_device *pDevice, pm_message_t state) { DBG(""); return 0; } static int pdev_resume(struct platform_device *device) { DBG(""); return 0; } static void pdev_shutdown(struct platform_device *device) { DBG(""); } static int pdev_probe(struct platform_device *device) { DBG("%s", device->name); return drm_platform_init(&omap_drm_driver, device); } static int pdev_remove(struct platform_device *device) { DBG(""); drm_platform_exit(&omap_drm_driver, device); platform_driver_unregister(&omap_dmm_driver); return 0; } struct platform_driver pdev = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, .probe = pdev_probe, .remove = pdev_remove, .suspend = pdev_suspend, .resume = pdev_resume, .shutdown = pdev_shutdown, }; static int __init omap_drm_init(void) { DBG("init"); if (platform_driver_register(&omap_dmm_driver)) { /* we can continue on without DMM.. so not fatal */ dev_err(NULL, "DMM registration failed\n"); } return platform_driver_register(&pdev); } static void __exit omap_drm_fini(void) { DBG("fini"); platform_driver_unregister(&pdev); } /* need late_initcall() so we load after dss_driver's are loaded */ late_initcall(omap_drm_init); module_exit(omap_drm_fini); MODULE_AUTHOR("Rob Clark <rob@ti.com>"); MODULE_DESCRIPTION("OMAP DRM Display Driver"); MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_LICENSE("GPL v2");
gpl-2.0
Tomoms/android_kernel_sony_msm8x60
fs/nfs/objlayout/objlayout.c
4774
20449
/* * pNFS Objects layout driver high level definitions * * Copyright (C) 2007 Panasas Inc. [year of first publication] * All rights reserved. * * Benny Halevy <bhalevy@panasas.com> * Boaz Harrosh <bharrosh@panasas.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * See the file COPYING included with this distribution for more details. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the Panasas company nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/kmod.h> #include <linux/moduleparam.h> #include <linux/ratelimit.h> #include <scsi/osd_initiator.h> #include "objlayout.h" #define NFSDBG_FACILITY NFSDBG_PNFS_LD /* * Create a objlayout layout structure for the given inode and return it. */ struct pnfs_layout_hdr * objlayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags) { struct objlayout *objlay; objlay = kzalloc(sizeof(struct objlayout), gfp_flags); if (objlay) { spin_lock_init(&objlay->lock); INIT_LIST_HEAD(&objlay->err_list); } dprintk("%s: Return %p\n", __func__, objlay); return &objlay->pnfs_layout; } /* * Free an objlayout layout structure */ void objlayout_free_layout_hdr(struct pnfs_layout_hdr *lo) { struct objlayout *objlay = OBJLAYOUT(lo); dprintk("%s: objlay %p\n", __func__, objlay); WARN_ON(!list_empty(&objlay->err_list)); kfree(objlay); } /* * Unmarshall layout and store it in pnfslay. */ struct pnfs_layout_segment * objlayout_alloc_lseg(struct pnfs_layout_hdr *pnfslay, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags) { int status = -ENOMEM; struct xdr_stream stream; struct xdr_buf buf = { .pages = lgr->layoutp->pages, .page_len = lgr->layoutp->len, .buflen = lgr->layoutp->len, .len = lgr->layoutp->len, }; struct page *scratch; struct pnfs_layout_segment *lseg; dprintk("%s: Begin pnfslay %p\n", __func__, pnfslay); scratch = alloc_page(gfp_flags); if (!scratch) goto err_nofree; xdr_init_decode(&stream, &buf, NULL); xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); status = objio_alloc_lseg(&lseg, pnfslay, &lgr->range, &stream, gfp_flags); if (unlikely(status)) { dprintk("%s: objio_alloc_lseg Return err %d\n", __func__, status); goto err; } __free_page(scratch); dprintk("%s: Return %p\n", __func__, lseg); return lseg; err: __free_page(scratch); err_nofree: dprintk("%s: Err Return=>%d\n", __func__, status); return ERR_PTR(status); } /* * Free a layout segement */ void objlayout_free_lseg(struct pnfs_layout_segment *lseg) { dprintk("%s: freeing layout segment %p\n", __func__, lseg); if (unlikely(!lseg)) return; objio_free_lseg(lseg); } /* * I/O Operations */ static inline u64 end_offset(u64 start, u64 len) { u64 end; end = start + len; return end >= start ? end : NFS4_MAX_UINT64; } /* last octet in a range */ static inline u64 last_byte_offset(u64 start, u64 len) { u64 end; BUG_ON(!len); end = start + len; return end > start ? end - 1 : NFS4_MAX_UINT64; } static void _fix_verify_io_params(struct pnfs_layout_segment *lseg, struct page ***p_pages, unsigned *p_pgbase, u64 offset, unsigned long count) { u64 lseg_end_offset; BUG_ON(offset < lseg->pls_range.offset); lseg_end_offset = end_offset(lseg->pls_range.offset, lseg->pls_range.length); BUG_ON(offset >= lseg_end_offset); WARN_ON(offset + count > lseg_end_offset); if (*p_pgbase > PAGE_SIZE) { dprintk("%s: pgbase(0x%x) > PAGE_SIZE\n", __func__, *p_pgbase); *p_pages += *p_pgbase >> PAGE_SHIFT; *p_pgbase &= ~PAGE_MASK; } } /* * I/O done common code */ static void objlayout_iodone(struct objlayout_io_res *oir) { if (likely(oir->status >= 0)) { objio_free_result(oir); } else { struct objlayout *objlay = oir->objlay; spin_lock(&objlay->lock); objlay->delta_space_valid = OBJ_DSU_INVALID; list_add(&objlay->err_list, &oir->err_list); spin_unlock(&objlay->lock); } } /* * objlayout_io_set_result - Set an osd_error code on a specific osd comp. * * The @index component IO failed (error returned from target). Register * the error for later reporting at layout-return. */ void objlayout_io_set_result(struct objlayout_io_res *oir, unsigned index, struct pnfs_osd_objid *pooid, int osd_error, u64 offset, u64 length, bool is_write) { struct pnfs_osd_ioerr *ioerr = &oir->ioerrs[index]; BUG_ON(index >= oir->num_comps); if (osd_error) { ioerr->oer_component = *pooid; ioerr->oer_comp_offset = offset; ioerr->oer_comp_length = length; ioerr->oer_iswrite = is_write; ioerr->oer_errno = osd_error; dprintk("%s: err[%d]: errno=%d is_write=%d dev(%llx:%llx) " "par=0x%llx obj=0x%llx offset=0x%llx length=0x%llx\n", __func__, index, ioerr->oer_errno, ioerr->oer_iswrite, _DEVID_LO(&ioerr->oer_component.oid_device_id), _DEVID_HI(&ioerr->oer_component.oid_device_id), ioerr->oer_component.oid_partition_id, ioerr->oer_component.oid_object_id, ioerr->oer_comp_offset, ioerr->oer_comp_length); } else { /* User need not call if no error is reported */ ioerr->oer_errno = 0; } } /* Function scheduled on rpc workqueue to call ->nfs_readlist_complete(). * This is because the osd completion is called with ints-off from * the block layer */ static void _rpc_read_complete(struct work_struct *work) { struct rpc_task *task; struct nfs_read_data *rdata; dprintk("%s enter\n", __func__); task = container_of(work, struct rpc_task, u.tk_work); rdata = container_of(task, struct nfs_read_data, task); pnfs_ld_read_done(rdata); } void objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync) { struct nfs_read_data *rdata = oir->rpcdata; oir->status = rdata->task.tk_status = status; if (status >= 0) rdata->res.count = status; else rdata->pnfs_error = status; objlayout_iodone(oir); /* must not use oir after this point */ dprintk("%s: Return status=%zd eof=%d sync=%d\n", __func__, status, rdata->res.eof, sync); if (sync) pnfs_ld_read_done(rdata); else { INIT_WORK(&rdata->task.u.tk_work, _rpc_read_complete); schedule_work(&rdata->task.u.tk_work); } } /* * Perform sync or async reads. */ enum pnfs_try_status objlayout_read_pagelist(struct nfs_read_data *rdata) { loff_t offset = rdata->args.offset; size_t count = rdata->args.count; int err; loff_t eof; eof = i_size_read(rdata->inode); if (unlikely(offset + count > eof)) { if (offset >= eof) { err = 0; rdata->res.count = 0; rdata->res.eof = 1; /*FIXME: do we need to call pnfs_ld_read_done() */ goto out; } count = eof - offset; } rdata->res.eof = (offset + count) >= eof; _fix_verify_io_params(rdata->lseg, &rdata->args.pages, &rdata->args.pgbase, rdata->args.offset, rdata->args.count); dprintk("%s: inode(%lx) offset 0x%llx count 0x%Zx eof=%d\n", __func__, rdata->inode->i_ino, offset, count, rdata->res.eof); err = objio_read_pagelist(rdata); out: if (unlikely(err)) { rdata->pnfs_error = err; dprintk("%s: Returned Error %d\n", __func__, err); return PNFS_NOT_ATTEMPTED; } return PNFS_ATTEMPTED; } /* Function scheduled on rpc workqueue to call ->nfs_writelist_complete(). * This is because the osd completion is called with ints-off from * the block layer */ static void _rpc_write_complete(struct work_struct *work) { struct rpc_task *task; struct nfs_write_data *wdata; dprintk("%s enter\n", __func__); task = container_of(work, struct rpc_task, u.tk_work); wdata = container_of(task, struct nfs_write_data, task); pnfs_ld_write_done(wdata); } void objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync) { struct nfs_write_data *wdata = oir->rpcdata; oir->status = wdata->task.tk_status = status; if (status >= 0) { wdata->res.count = status; wdata->verf.committed = oir->committed; } else { wdata->pnfs_error = status; } objlayout_iodone(oir); /* must not use oir after this point */ dprintk("%s: Return status %zd committed %d sync=%d\n", __func__, status, wdata->verf.committed, sync); if (sync) pnfs_ld_write_done(wdata); else { INIT_WORK(&wdata->task.u.tk_work, _rpc_write_complete); schedule_work(&wdata->task.u.tk_work); } } /* * Perform sync or async writes. */ enum pnfs_try_status objlayout_write_pagelist(struct nfs_write_data *wdata, int how) { int err; _fix_verify_io_params(wdata->lseg, &wdata->args.pages, &wdata->args.pgbase, wdata->args.offset, wdata->args.count); err = objio_write_pagelist(wdata, how); if (unlikely(err)) { wdata->pnfs_error = err; dprintk("%s: Returned Error %d\n", __func__, err); return PNFS_NOT_ATTEMPTED; } return PNFS_ATTEMPTED; } void objlayout_encode_layoutcommit(struct pnfs_layout_hdr *pnfslay, struct xdr_stream *xdr, const struct nfs4_layoutcommit_args *args) { struct objlayout *objlay = OBJLAYOUT(pnfslay); struct pnfs_osd_layoutupdate lou; __be32 *start; dprintk("%s: Begin\n", __func__); spin_lock(&objlay->lock); lou.dsu_valid = (objlay->delta_space_valid == OBJ_DSU_VALID); lou.dsu_delta = objlay->delta_space_used; objlay->delta_space_used = 0; objlay->delta_space_valid = OBJ_DSU_INIT; lou.olu_ioerr_flag = !list_empty(&objlay->err_list); spin_unlock(&objlay->lock); start = xdr_reserve_space(xdr, 4); BUG_ON(pnfs_osd_xdr_encode_layoutupdate(xdr, &lou)); *start = cpu_to_be32((xdr->p - start - 1) * 4); dprintk("%s: Return delta_space_used %lld err %d\n", __func__, lou.dsu_delta, lou.olu_ioerr_flag); } static int err_prio(u32 oer_errno) { switch (oer_errno) { case 0: return 0; case PNFS_OSD_ERR_RESOURCE: return OSD_ERR_PRI_RESOURCE; case PNFS_OSD_ERR_BAD_CRED: return OSD_ERR_PRI_BAD_CRED; case PNFS_OSD_ERR_NO_ACCESS: return OSD_ERR_PRI_NO_ACCESS; case PNFS_OSD_ERR_UNREACHABLE: return OSD_ERR_PRI_UNREACHABLE; case PNFS_OSD_ERR_NOT_FOUND: return OSD_ERR_PRI_NOT_FOUND; case PNFS_OSD_ERR_NO_SPACE: return OSD_ERR_PRI_NO_SPACE; default: WARN_ON(1); /* fallthrough */ case PNFS_OSD_ERR_EIO: return OSD_ERR_PRI_EIO; } } static void merge_ioerr(struct pnfs_osd_ioerr *dest_err, const struct pnfs_osd_ioerr *src_err) { u64 dest_end, src_end; if (!dest_err->oer_errno) { *dest_err = *src_err; /* accumulated device must be blank */ memset(&dest_err->oer_component.oid_device_id, 0, sizeof(dest_err->oer_component.oid_device_id)); return; } if (dest_err->oer_component.oid_partition_id != src_err->oer_component.oid_partition_id) dest_err->oer_component.oid_partition_id = 0; if (dest_err->oer_component.oid_object_id != src_err->oer_component.oid_object_id) dest_err->oer_component.oid_object_id = 0; if (dest_err->oer_comp_offset > src_err->oer_comp_offset) dest_err->oer_comp_offset = src_err->oer_comp_offset; dest_end = end_offset(dest_err->oer_comp_offset, dest_err->oer_comp_length); src_end = end_offset(src_err->oer_comp_offset, src_err->oer_comp_length); if (dest_end < src_end) dest_end = src_end; dest_err->oer_comp_length = dest_end - dest_err->oer_comp_offset; if ((src_err->oer_iswrite == dest_err->oer_iswrite) && (err_prio(src_err->oer_errno) > err_prio(dest_err->oer_errno))) { dest_err->oer_errno = src_err->oer_errno; } else if (src_err->oer_iswrite) { dest_err->oer_iswrite = true; dest_err->oer_errno = src_err->oer_errno; } } static void encode_accumulated_error(struct objlayout *objlay, __be32 *p) { struct objlayout_io_res *oir, *tmp; struct pnfs_osd_ioerr accumulated_err = {.oer_errno = 0}; list_for_each_entry_safe(oir, tmp, &objlay->err_list, err_list) { unsigned i; for (i = 0; i < oir->num_comps; i++) { struct pnfs_osd_ioerr *ioerr = &oir->ioerrs[i]; if (!ioerr->oer_errno) continue; printk(KERN_ERR "NFS: %s: err[%d]: errno=%d " "is_write=%d dev(%llx:%llx) par=0x%llx " "obj=0x%llx offset=0x%llx length=0x%llx\n", __func__, i, ioerr->oer_errno, ioerr->oer_iswrite, _DEVID_LO(&ioerr->oer_component.oid_device_id), _DEVID_HI(&ioerr->oer_component.oid_device_id), ioerr->oer_component.oid_partition_id, ioerr->oer_component.oid_object_id, ioerr->oer_comp_offset, ioerr->oer_comp_length); merge_ioerr(&accumulated_err, ioerr); } list_del(&oir->err_list); objio_free_result(oir); } pnfs_osd_xdr_encode_ioerr(p, &accumulated_err); } void objlayout_encode_layoutreturn(struct pnfs_layout_hdr *pnfslay, struct xdr_stream *xdr, const struct nfs4_layoutreturn_args *args) { struct objlayout *objlay = OBJLAYOUT(pnfslay); struct objlayout_io_res *oir, *tmp; __be32 *start; dprintk("%s: Begin\n", __func__); start = xdr_reserve_space(xdr, 4); BUG_ON(!start); spin_lock(&objlay->lock); list_for_each_entry_safe(oir, tmp, &objlay->err_list, err_list) { __be32 *last_xdr = NULL, *p; unsigned i; int res = 0; for (i = 0; i < oir->num_comps; i++) { struct pnfs_osd_ioerr *ioerr = &oir->ioerrs[i]; if (!ioerr->oer_errno) continue; dprintk("%s: err[%d]: errno=%d is_write=%d " "dev(%llx:%llx) par=0x%llx obj=0x%llx " "offset=0x%llx length=0x%llx\n", __func__, i, ioerr->oer_errno, ioerr->oer_iswrite, _DEVID_LO(&ioerr->oer_component.oid_device_id), _DEVID_HI(&ioerr->oer_component.oid_device_id), ioerr->oer_component.oid_partition_id, ioerr->oer_component.oid_object_id, ioerr->oer_comp_offset, ioerr->oer_comp_length); p = pnfs_osd_xdr_ioerr_reserve_space(xdr); if (unlikely(!p)) { res = -E2BIG; break; /* accumulated_error */ } last_xdr = p; pnfs_osd_xdr_encode_ioerr(p, &oir->ioerrs[i]); } /* TODO: use xdr_write_pages */ if (unlikely(res)) { /* no space for even one error descriptor */ BUG_ON(!last_xdr); /* we've encountered a situation with lots and lots of * errors and no space to encode them all. Use the last * available slot to report the union of all the * remaining errors. */ encode_accumulated_error(objlay, last_xdr); goto loop_done; } list_del(&oir->err_list); objio_free_result(oir); } loop_done: spin_unlock(&objlay->lock); *start = cpu_to_be32((xdr->p - start - 1) * 4); dprintk("%s: Return\n", __func__); } /* * Get Device Info API for io engines */ struct objlayout_deviceinfo { struct page *page; struct pnfs_osd_deviceaddr da; /* This must be last */ }; /* Initialize and call nfs_getdeviceinfo, then decode and return a * "struct pnfs_osd_deviceaddr *" Eventually objlayout_put_deviceinfo() * should be called. */ int objlayout_get_deviceinfo(struct pnfs_layout_hdr *pnfslay, struct nfs4_deviceid *d_id, struct pnfs_osd_deviceaddr **deviceaddr, gfp_t gfp_flags) { struct objlayout_deviceinfo *odi; struct pnfs_device pd; struct page *page, **pages; u32 *p; int err; page = alloc_page(gfp_flags); if (!page) return -ENOMEM; pages = &page; pd.pages = pages; memcpy(&pd.dev_id, d_id, sizeof(*d_id)); pd.layout_type = LAYOUT_OSD2_OBJECTS; pd.pages = &page; pd.pgbase = 0; pd.pglen = PAGE_SIZE; pd.mincount = 0; err = nfs4_proc_getdeviceinfo(NFS_SERVER(pnfslay->plh_inode), &pd); dprintk("%s nfs_getdeviceinfo returned %d\n", __func__, err); if (err) goto err_out; p = page_address(page); odi = kzalloc(sizeof(*odi), gfp_flags); if (!odi) { err = -ENOMEM; goto err_out; } pnfs_osd_xdr_decode_deviceaddr(&odi->da, p); odi->page = page; *deviceaddr = &odi->da; return 0; err_out: __free_page(page); return err; } void objlayout_put_deviceinfo(struct pnfs_osd_deviceaddr *deviceaddr) { struct objlayout_deviceinfo *odi = container_of(deviceaddr, struct objlayout_deviceinfo, da); __free_page(odi->page); kfree(odi); } enum { OBJLAYOUT_MAX_URI_LEN = 256, OBJLAYOUT_MAX_OSDNAME_LEN = 64, OBJLAYOUT_MAX_SYSID_HEX_LEN = OSD_SYSTEMID_LEN * 2 + 1, OSD_LOGIN_UPCALL_PATHLEN = 256 }; static char osd_login_prog[OSD_LOGIN_UPCALL_PATHLEN] = "/sbin/osd_login"; module_param_string(osd_login_prog, osd_login_prog, sizeof(osd_login_prog), 0600); MODULE_PARM_DESC(osd_login_prog, "Path to the osd_login upcall program"); struct __auto_login { char uri[OBJLAYOUT_MAX_URI_LEN]; char osdname[OBJLAYOUT_MAX_OSDNAME_LEN]; char systemid_hex[OBJLAYOUT_MAX_SYSID_HEX_LEN]; }; static int __objlayout_upcall(struct __auto_login *login) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; char *argv[8]; int ret; if (unlikely(!osd_login_prog[0])) { dprintk("%s: osd_login_prog is disabled\n", __func__); return -EACCES; } dprintk("%s uri: %s\n", __func__, login->uri); dprintk("%s osdname %s\n", __func__, login->osdname); dprintk("%s systemid_hex %s\n", __func__, login->systemid_hex); argv[0] = (char *)osd_login_prog; argv[1] = "-u"; argv[2] = login->uri; argv[3] = "-o"; argv[4] = login->osdname; argv[5] = "-s"; argv[6] = login->systemid_hex; argv[7] = NULL; ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC); /* * Disable the upcall mechanism if we're getting an ENOENT or * EACCES error. The admin can re-enable it on the fly by using * sysfs to set the objlayoutdriver.osd_login_prog module parameter once * the problem has been fixed. */ if (ret == -ENOENT || ret == -EACCES) { printk(KERN_ERR "PNFS-OBJ: %s was not found please set " "objlayoutdriver.osd_login_prog kernel parameter!\n", osd_login_prog); osd_login_prog[0] = '\0'; } dprintk("%s %s return value: %d\n", __func__, osd_login_prog, ret); return ret; } /* Assume dest is all zeros */ static void __copy_nfsS_and_zero_terminate(struct nfs4_string s, char *dest, int max_len, const char *var_name) { if (!s.len) return; if (s.len >= max_len) { pr_warn_ratelimited( "objlayout_autologin: %s: s.len(%d) >= max_len(%d)", var_name, s.len, max_len); s.len = max_len - 1; /* space for null terminator */ } memcpy(dest, s.data, s.len); } /* Assume sysid is all zeros */ static void _sysid_2_hex(struct nfs4_string s, char sysid[OBJLAYOUT_MAX_SYSID_HEX_LEN]) { int i; char *cur; if (!s.len) return; if (s.len != OSD_SYSTEMID_LEN) { pr_warn_ratelimited( "objlayout_autologin: systemid_len(%d) != OSD_SYSTEMID_LEN", s.len); if (s.len > OSD_SYSTEMID_LEN) s.len = OSD_SYSTEMID_LEN; } cur = sysid; for (i = 0; i < s.len; i++) cur = hex_byte_pack(cur, s.data[i]); } int objlayout_autologin(struct pnfs_osd_deviceaddr *deviceaddr) { int rc; struct __auto_login login; if (!deviceaddr->oda_targetaddr.ota_netaddr.r_addr.len) return -ENODEV; memset(&login, 0, sizeof(login)); __copy_nfsS_and_zero_terminate( deviceaddr->oda_targetaddr.ota_netaddr.r_addr, login.uri, sizeof(login.uri), "URI"); __copy_nfsS_and_zero_terminate( deviceaddr->oda_osdname, login.osdname, sizeof(login.osdname), "OSDNAME"); _sysid_2_hex(deviceaddr->oda_systemid, login.systemid_hex); rc = __objlayout_upcall(&login); if (rc > 0) /* script returns positive values */ rc = -ENODEV; return rc; }
gpl-2.0
CyanogenMod/android_kernel_htc_msm8960
drivers/staging/media/easycap/easycap_low.c
5030
24963
/***************************************************************************** * * * * * easycap_low.c * * * * * *****************************************************************************/ /* * * Copyright (C) 2010 R.M. Thomas <rmthomas@sciolus.org> * * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * The software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this software; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /*****************************************************************************/ /* * ACKNOWLEGEMENTS AND REFERENCES * ------------------------------ * This driver makes use of register information contained in the Syntek * Semicon DC-1125 driver hosted at * http://sourceforge.net/projects/syntekdriver/. * Particularly useful has been a patch to the latter driver provided by * Ivor Hewitt in January 2009. The NTSC implementation is taken from the * work of Ben Trask. */ /****************************************************************************/ #include "easycap.h" #define GET(X, Y, Z) do { \ int __rc; \ *(Z) = (u16)0; \ __rc = regget(X, Y, Z, sizeof(u8)); \ if (0 > __rc) { \ JOT(8, ":-(%i\n", __LINE__); return __rc; \ } \ } while (0) #define SET(X, Y, Z) do { \ int __rc; \ __rc = regset(X, Y, Z); \ if (0 > __rc) { \ JOT(8, ":-(%i\n", __LINE__); return __rc; \ } \ } while (0) /*--------------------------------------------------------------------------*/ static const struct stk1160config { u16 reg; u16 set; } stk1160configPAL[] = { {0x000, 0x0098}, {0x002, 0x0093}, {0x001, 0x0003}, {0x003, 0x0080}, {0x00D, 0x0000}, {0x00F, 0x0002}, {0x018, 0x0010}, {0x019, 0x0000}, {0x01A, 0x0014}, {0x01B, 0x000E}, {0x01C, 0x0046}, {0x100, 0x0033}, {0x103, 0x0000}, {0x104, 0x0000}, {0x105, 0x0000}, {0x106, 0x0000}, /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* * RESOLUTION 640x480 */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ {0x110, 0x0008}, {0x111, 0x0000}, {0x112, 0x0020}, {0x113, 0x0000}, {0x114, 0x0508}, {0x115, 0x0005}, {0x116, 0x0110}, {0x117, 0x0001}, /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ {0x202, 0x000F}, {0x203, 0x004A}, {0x2FF, 0x0000}, {0xFFF, 0xFFFF} }; /*--------------------------------------------------------------------------*/ static const struct stk1160config stk1160configNTSC[] = { {0x000, 0x0098}, {0x002, 0x0093}, {0x001, 0x0003}, {0x003, 0x0080}, {0x00D, 0x0000}, {0x00F, 0x0002}, {0x018, 0x0010}, {0x019, 0x0000}, {0x01A, 0x0014}, {0x01B, 0x000E}, {0x01C, 0x0046}, {0x100, 0x0033}, {0x103, 0x0000}, {0x104, 0x0000}, {0x105, 0x0000}, {0x106, 0x0000}, /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* * RESOLUTION 640x480 */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ {0x110, 0x0008}, {0x111, 0x0000}, {0x112, 0x0003}, {0x113, 0x0000}, {0x114, 0x0508}, {0x115, 0x0005}, {0x116, 0x00F3}, {0x117, 0x0000}, /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ {0x202, 0x000F}, {0x203, 0x004A}, {0x2FF, 0x0000}, {0xFFF, 0xFFFF} }; /*--------------------------------------------------------------------------*/ static const struct saa7113config { u8 reg; u8 set; } saa7113configPAL[] = { {0x01, 0x08}, {0x02, 0x80}, {0x03, 0x33}, {0x04, 0x00}, {0x05, 0x00}, {0x06, 0xE9}, {0x07, 0x0D}, {0x08, 0x38}, {0x09, 0x00}, {0x0A, SAA_0A_DEFAULT}, {0x0B, SAA_0B_DEFAULT}, {0x0C, SAA_0C_DEFAULT}, {0x0D, SAA_0D_DEFAULT}, {0x0E, 0x01}, {0x0F, 0x36}, {0x10, 0x00}, {0x11, 0x0C}, {0x12, 0xE7}, {0x13, 0x00}, {0x15, 0x00}, {0x16, 0x00}, {0x40, 0x02}, {0x41, 0xFF}, {0x42, 0xFF}, {0x43, 0xFF}, {0x44, 0xFF}, {0x45, 0xFF}, {0x46, 0xFF}, {0x47, 0xFF}, {0x48, 0xFF}, {0x49, 0xFF}, {0x4A, 0xFF}, {0x4B, 0xFF}, {0x4C, 0xFF}, {0x4D, 0xFF}, {0x4E, 0xFF}, {0x4F, 0xFF}, {0x50, 0xFF}, {0x51, 0xFF}, {0x52, 0xFF}, {0x53, 0xFF}, {0x54, 0xFF}, {0x55, 0xFF}, {0x56, 0xFF}, {0x57, 0xFF}, {0x58, 0x40}, {0x59, 0x54}, {0x5A, 0x07}, {0x5B, 0x83}, {0xFF, 0xFF} }; /*--------------------------------------------------------------------------*/ static const struct saa7113config saa7113configNTSC[] = { {0x01, 0x08}, {0x02, 0x80}, {0x03, 0x33}, {0x04, 0x00}, {0x05, 0x00}, {0x06, 0xE9}, {0x07, 0x0D}, {0x08, 0x78}, {0x09, 0x00}, {0x0A, SAA_0A_DEFAULT}, {0x0B, SAA_0B_DEFAULT}, {0x0C, SAA_0C_DEFAULT}, {0x0D, SAA_0D_DEFAULT}, {0x0E, 0x01}, {0x0F, 0x36}, {0x10, 0x00}, {0x11, 0x0C}, {0x12, 0xE7}, {0x13, 0x00}, {0x15, 0x00}, {0x16, 0x00}, {0x40, 0x82}, {0x41, 0xFF}, {0x42, 0xFF}, {0x43, 0xFF}, {0x44, 0xFF}, {0x45, 0xFF}, {0x46, 0xFF}, {0x47, 0xFF}, {0x48, 0xFF}, {0x49, 0xFF}, {0x4A, 0xFF}, {0x4B, 0xFF}, {0x4C, 0xFF}, {0x4D, 0xFF}, {0x4E, 0xFF}, {0x4F, 0xFF}, {0x50, 0xFF}, {0x51, 0xFF}, {0x52, 0xFF}, {0x53, 0xFF}, {0x54, 0xFF}, {0x55, 0xFF}, {0x56, 0xFF}, {0x57, 0xFF}, {0x58, 0x40}, {0x59, 0x54}, {0x5A, 0x0A}, {0x5B, 0x83}, {0xFF, 0xFF} }; static int regget(struct usb_device *pusb_device, u16 index, void *reg, int reg_size) { int rc; if (!pusb_device) return -ENODEV; rc = usb_control_msg(pusb_device, usb_rcvctrlpipe(pusb_device, 0), 0x00, (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE), 0x00, index, reg, reg_size, 50000); return rc; } static int regset(struct usb_device *pusb_device, u16 index, u16 value) { int rc; if (!pusb_device) return -ENODEV; rc = usb_control_msg(pusb_device, usb_sndctrlpipe(pusb_device, 0), 0x01, (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE), value, index, NULL, 0, 500); if (rc < 0) return rc; if (easycap_readback) { u16 igot = 0; rc = regget(pusb_device, index, &igot, sizeof(igot)); igot = 0xFF & igot; switch (index) { case 0x000: case 0x500: case 0x502: case 0x503: case 0x504: case 0x506: case 0x507: break; case 0x204: case 0x205: case 0x350: case 0x351: if (igot) JOT(8, "unexpected 0x%02X " "for STK register 0x%03X\n", igot, index); break; default: if ((0xFF & value) != igot) JOT(8, "unexpected 0x%02X != 0x%02X " "for STK register 0x%03X\n", igot, value, index); break; } } return rc; } /*--------------------------------------------------------------------------*/ /* * FUNCTION wait_i2c() RETURNS 0 ON SUCCESS */ /*--------------------------------------------------------------------------*/ static int wait_i2c(struct usb_device *p) { u16 get0; u8 igot; const int max = 2; int k; if (!p) return -ENODEV; for (k = 0; k < max; k++) { GET(p, 0x0201, &igot); get0 = igot; switch (get0) { case 0x04: case 0x01: return 0; case 0x00: msleep(20); continue; default: return get0 - 1; } } return -1; } /****************************************************************************/ int write_saa(struct usb_device *p, u16 reg0, u16 set0) { if (!p) return -ENODEV; SET(p, 0x200, 0x00); SET(p, 0x204, reg0); SET(p, 0x205, set0); SET(p, 0x200, 0x01); return wait_i2c(p); } /****************************************************************************/ /*--------------------------------------------------------------------------*/ /* * REGISTER 500: SETTING VALUE TO 0x008B READS FROM VT1612A (?) * REGISTER 500: SETTING VALUE TO 0x008C WRITES TO VT1612A * REGISTER 502: LEAST SIGNIFICANT BYTE OF VALUE TO SET * REGISTER 503: MOST SIGNIFICANT BYTE OF VALUE TO SET * REGISTER 504: TARGET ADDRESS ON VT1612A */ /*--------------------------------------------------------------------------*/ static int write_vt(struct usb_device *p, u16 reg0, u16 set0) { u8 igot; u16 got502, got503; u16 set502, set503; if (!p) return -ENODEV; SET(p, 0x0504, reg0); SET(p, 0x0500, 0x008B); GET(p, 0x0502, &igot); got502 = (0xFF & igot); GET(p, 0x0503, &igot); got503 = (0xFF & igot); JOT(16, "write_vt(., 0x%04X, 0x%04X): was 0x%04X\n", reg0, set0, ((got503 << 8) | got502)); set502 = (0x00FF & set0); set503 = ((0xFF00 & set0) >> 8); SET(p, 0x0504, reg0); SET(p, 0x0502, set502); SET(p, 0x0503, set503); SET(p, 0x0500, 0x008C); return 0; } /****************************************************************************/ /*--------------------------------------------------------------------------*/ /* * REGISTER 500: SETTING VALUE TO 0x008B READS FROM VT1612A (?) * REGISTER 500: SETTING VALUE TO 0x008C WRITES TO VT1612A * REGISTER 502: LEAST SIGNIFICANT BYTE OF VALUE TO GET * REGISTER 503: MOST SIGNIFICANT BYTE OF VALUE TO GET * REGISTER 504: TARGET ADDRESS ON VT1612A */ /*--------------------------------------------------------------------------*/ static int read_vt(struct usb_device *p, u16 reg0) { u8 igot; u16 got502, got503; if (!p) return -ENODEV; SET(p, 0x0504, reg0); SET(p, 0x0500, 0x008B); GET(p, 0x0502, &igot); got502 = (0xFF & igot); GET(p, 0x0503, &igot); got503 = (0xFF & igot); JOT(16, "read_vt(., 0x%04X): has 0x%04X\n", reg0, ((got503 << 8) | got502)); return (got503 << 8) | got502; } /****************************************************************************/ /*--------------------------------------------------------------------------*/ /* * THESE APPEAR TO HAVE NO EFFECT ON EITHER VIDEO OR AUDIO. */ /*--------------------------------------------------------------------------*/ static int write_300(struct usb_device *p) { if (!p) return -ENODEV; SET(p, 0x300, 0x0012); SET(p, 0x350, 0x002D); SET(p, 0x351, 0x0001); SET(p, 0x352, 0x0000); SET(p, 0x353, 0x0000); SET(p, 0x300, 0x0080); return 0; } /****************************************************************************/ /****************************************************************************/ int setup_stk(struct usb_device *p, bool ntsc) { int i; const struct stk1160config *cfg; if (!p) return -ENODEV; cfg = (ntsc) ? stk1160configNTSC : stk1160configPAL; for (i = 0; cfg[i].reg != 0xFFF; i++) SET(p, cfg[i].reg, cfg[i].set); write_300(p); return 0; } /****************************************************************************/ int setup_saa(struct usb_device *p, bool ntsc) { int i, rc; const struct saa7113config *cfg; if (!p) return -ENODEV; cfg = (ntsc) ? saa7113configNTSC : saa7113configPAL; for (i = 0; cfg[i].reg != 0xFF; i++) { rc = write_saa(p, cfg[i].reg, cfg[i].set); if (rc) dev_err(&p->dev, "Failed to set SAA register %d", cfg[i].reg); } return 0; } /****************************************************************************/ int merit_saa(struct usb_device *p) { int rc; if (!p) return -ENODEV; rc = read_saa(p, 0x1F); return ((0 > rc) || (0x02 & rc)) ? 1 : 0; } /****************************************************************************/ int ready_saa(struct usb_device *p) { int j, rc, rate; const int max = 5, marktime = PATIENCE/5; /*--------------------------------------------------------------------------*/ /* * RETURNS 0 FOR INTERLACED 50 Hz * 1 FOR NON-INTERLACED 50 Hz * 2 FOR INTERLACED 60 Hz * 3 FOR NON-INTERLACED 60 Hz */ /*--------------------------------------------------------------------------*/ if (!p) return -ENODEV; j = 0; while (max > j) { rc = read_saa(p, 0x1F); if (0 <= rc) { if (0 == (0x40 & rc)) break; if (1 == (0x01 & rc)) break; } msleep(marktime); j++; } if (max == j) return -1; if (0x20 & rc) { rate = 2; JOT(8, "hardware detects 60 Hz\n"); } else { rate = 0; JOT(8, "hardware detects 50 Hz\n"); } if (0x80 & rc) JOT(8, "hardware detects interlacing\n"); else { rate++; JOT(8, "hardware detects no interlacing\n"); } return 0; } /****************************************************************************/ int read_saa(struct usb_device *p, u16 reg0) { u8 igot; if (!p) return -ENODEV; SET(p, 0x208, reg0); SET(p, 0x200, 0x20); if (0 != wait_i2c(p)) return -1; igot = 0; GET(p, 0x0209, &igot); return igot; } /****************************************************************************/ static int read_stk(struct usb_device *p, u32 reg0) { u8 igot; if (!p) return -ENODEV; igot = 0; GET(p, reg0, &igot); return igot; } int select_input(struct usb_device *p, int input, int mode) { int ir; if (!p) return -ENODEV; stop_100(p); switch (input) { case 0: case 1: { if (0 != write_saa(p, 0x02, 0x80)) SAY("ERROR: failed to set SAA register 0x02 " "for input %i\n", input); SET(p, 0x0000, 0x0098); SET(p, 0x0002, 0x0078); break; } case 2: { if (0 != write_saa(p, 0x02, 0x80)) SAY("ERROR: failed to set SAA register 0x02 " "for input %i\n", input); SET(p, 0x0000, 0x0090); SET(p, 0x0002, 0x0078); break; } case 3: { if (0 != write_saa(p, 0x02, 0x80)) SAY("ERROR: failed to set SAA register 0x02 " " for input %i\n", input); SET(p, 0x0000, 0x0088); SET(p, 0x0002, 0x0078); break; } case 4: { if (0 != write_saa(p, 0x02, 0x80)) { SAY("ERROR: failed to set SAA register 0x02 " "for input %i\n", input); } SET(p, 0x0000, 0x0080); SET(p, 0x0002, 0x0078); break; } case 5: { if (9 != mode) mode = 7; switch (mode) { case 7: { if (0 != write_saa(p, 0x02, 0x87)) SAY("ERROR: failed to set SAA register 0x02 " "for input %i\n", input); if (0 != write_saa(p, 0x05, 0xFF)) SAY("ERROR: failed to set SAA register 0x05 " "for input %i\n", input); break; } case 9: { if (0 != write_saa(p, 0x02, 0x89)) SAY("ERROR: failed to set SAA register 0x02 " "for input %i\n", input); if (0 != write_saa(p, 0x05, 0x00)) SAY("ERROR: failed to set SAA register 0x05 " "for input %i\n", input); break; } default: SAY("MISTAKE: bad mode: %i\n", mode); return -1; } if (0 != write_saa(p, 0x04, 0x00)) SAY("ERROR: failed to set SAA register 0x04 " "for input %i\n", input); if (0 != write_saa(p, 0x09, 0x80)) SAY("ERROR: failed to set SAA register 0x09 " "for input %i\n", input); SET(p, 0x0002, 0x0093); break; } default: SAY("ERROR: bad input: %i\n", input); return -1; } ir = read_stk(p, 0x00); JOT(8, "STK register 0x00 has 0x%02X\n", ir); ir = read_saa(p, 0x02); JOT(8, "SAA register 0x02 has 0x%02X\n", ir); start_100(p); return 0; } /****************************************************************************/ int set_resolution(struct usb_device *p, u16 set0, u16 set1, u16 set2, u16 set3) { u16 u0x0111, u0x0113, u0x0115, u0x0117; if (!p) return -ENODEV; u0x0111 = ((0xFF00 & set0) >> 8); u0x0113 = ((0xFF00 & set1) >> 8); u0x0115 = ((0xFF00 & set2) >> 8); u0x0117 = ((0xFF00 & set3) >> 8); SET(p, 0x0110, (0x00FF & set0)); SET(p, 0x0111, u0x0111); SET(p, 0x0112, (0x00FF & set1)); SET(p, 0x0113, u0x0113); SET(p, 0x0114, (0x00FF & set2)); SET(p, 0x0115, u0x0115); SET(p, 0x0116, (0x00FF & set3)); SET(p, 0x0117, u0x0117); return 0; } /****************************************************************************/ int start_100(struct usb_device *p) { u16 get116, get117, get0; u8 igot116, igot117, igot; if (!p) return -ENODEV; GET(p, 0x0116, &igot116); get116 = igot116; GET(p, 0x0117, &igot117); get117 = igot117; SET(p, 0x0116, 0x0000); SET(p, 0x0117, 0x0000); GET(p, 0x0100, &igot); get0 = igot; SET(p, 0x0100, (0x80 | get0)); SET(p, 0x0116, get116); SET(p, 0x0117, get117); return 0; } /****************************************************************************/ int stop_100(struct usb_device *p) { u16 get0; u8 igot; if (!p) return -ENODEV; GET(p, 0x0100, &igot); get0 = igot; SET(p, 0x0100, (0x7F & get0)); return 0; } /****************************************************************************/ /****************************************************************************/ /*****************************************************************************/ int easycap_wakeup_device(struct usb_device *pusb_device) { if (!pusb_device) return -ENODEV; return usb_control_msg(pusb_device, usb_sndctrlpipe(pusb_device, 0), USB_REQ_SET_FEATURE, USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0, 50000); } /*****************************************************************************/ int easycap_audio_setup(struct easycap *peasycap) { struct usb_device *pusb_device; u8 buffer[1]; int rc, id1, id2; /*---------------------------------------------------------------------------*/ /* * IMPORTANT: * THE MESSAGE OF TYPE (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) * CAUSES MUTING IF THE VALUE 0x0100 IS SENT. * TO ENABLE AUDIO THE VALUE 0x0200 MUST BE SENT. */ /*---------------------------------------------------------------------------*/ const u8 request = 0x01; const u8 requesttype = USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE; const u16 value_unmute = 0x0200; const u16 index = 0x0301; const u16 length = 1; if (!peasycap) return -EFAULT; pusb_device = peasycap->pusb_device; if (!pusb_device) return -ENODEV; JOM(8, "%02X %02X %02X %02X %02X %02X %02X %02X\n", requesttype, request, (0x00FF & value_unmute), (0xFF00 & value_unmute) >> 8, (0x00FF & index), (0xFF00 & index) >> 8, (0x00FF & length), (0xFF00 & length) >> 8); buffer[0] = 0x01; rc = usb_control_msg(pusb_device, usb_sndctrlpipe(pusb_device, 0), request, requesttype, value_unmute, index, &buffer[0], length, 50000); JOT(8, "0x%02X=buffer\n", buffer[0]); if (rc != (int)length) { switch (rc) { case -EPIPE: SAY("usb_control_msg returned -EPIPE\n"); break; default: SAY("ERROR: usb_control_msg returned %i\n", rc); break; } } /*--------------------------------------------------------------------------*/ /* * REGISTER 500: SETTING VALUE TO 0x0094 RESETS AUDIO CONFIGURATION ??? * REGISTER 506: ANALOGUE AUDIO ATTENTUATOR ??? * FOR THE CVBS+S-VIDEO HARDWARE: * SETTING VALUE TO 0x0000 GIVES QUIET SOUND. * THE UPPER BYTE SEEMS TO HAVE NO EFFECT. * FOR THE FOUR-CVBS HARDWARE: * SETTING VALUE TO 0x0000 SEEMS TO HAVE NO EFFECT. * REGISTER 507: ANALOGUE AUDIO PREAMPLIFIER ON/OFF ??? * FOR THE CVBS-S-VIDEO HARDWARE: * SETTING VALUE TO 0x0001 GIVES VERY LOUD, DISTORTED SOUND. * THE UPPER BYTE SEEMS TO HAVE NO EFFECT. */ /*--------------------------------------------------------------------------*/ SET(pusb_device, 0x0500, 0x0094); SET(pusb_device, 0x0500, 0x008C); SET(pusb_device, 0x0506, 0x0001); SET(pusb_device, 0x0507, 0x0000); id1 = read_vt(pusb_device, 0x007C); id2 = read_vt(pusb_device, 0x007E); SAM("0x%04X:0x%04X is audio vendor id\n", id1, id2); /*---------------------------------------------------------------------------*/ /* * SELECT AUDIO SOURCE "LINE IN" AND SET THE AUDIO GAIN. */ /*---------------------------------------------------------------------------*/ if (easycap_audio_gainset(pusb_device, peasycap->gain)) SAY("ERROR: audio_gainset() failed\n"); check_vt(pusb_device); return 0; } /*****************************************************************************/ int check_vt(struct usb_device *pusb_device) { int igot; if (!pusb_device) return -ENODEV; igot = read_vt(pusb_device, 0x0002); if (0 > igot) SAY("ERROR: failed to read VT1612A register 0x02\n"); if (0x8000 & igot) SAY("register 0x%02X muted\n", 0x02); igot = read_vt(pusb_device, 0x000E); if (0 > igot) SAY("ERROR: failed to read VT1612A register 0x0E\n"); if (0x8000 & igot) SAY("register 0x%02X muted\n", 0x0E); igot = read_vt(pusb_device, 0x0010); if (0 > igot) SAY("ERROR: failed to read VT1612A register 0x10\n"); if (0x8000 & igot) SAY("register 0x%02X muted\n", 0x10); igot = read_vt(pusb_device, 0x0012); if (0 > igot) SAY("ERROR: failed to read VT1612A register 0x12\n"); if (0x8000 & igot) SAY("register 0x%02X muted\n", 0x12); igot = read_vt(pusb_device, 0x0014); if (0 > igot) SAY("ERROR: failed to read VT1612A register 0x14\n"); if (0x8000 & igot) SAY("register 0x%02X muted\n", 0x14); igot = read_vt(pusb_device, 0x0016); if (0 > igot) SAY("ERROR: failed to read VT1612A register 0x16\n"); if (0x8000 & igot) SAY("register 0x%02X muted\n", 0x16); igot = read_vt(pusb_device, 0x0018); if (0 > igot) SAY("ERROR: failed to read VT1612A register 0x18\n"); if (0x8000 & igot) SAY("register 0x%02X muted\n", 0x18); igot = read_vt(pusb_device, 0x001C); if (0 > igot) SAY("ERROR: failed to read VT1612A register 0x1C\n"); if (0x8000 & igot) SAY("register 0x%02X muted\n", 0x1C); return 0; } /*****************************************************************************/ /*---------------------------------------------------------------------------*/ /* NOTE: THIS DOES INCREASE THE VOLUME DRAMATICALLY: * audio_gainset(pusb_device, 0x000F); * * loud dB register 0x10 dB register 0x1C dB total * 0 -34.5 0 -34.5 * .. .... . .... * 15 10.5 0 10.5 * 16 12.0 0 12.0 * 17 12.0 1.5 13.5 * .. .... .... .... * 31 12.0 22.5 34.5 */ /*---------------------------------------------------------------------------*/ int easycap_audio_gainset(struct usb_device *pusb_device, s8 loud) { int igot; u8 tmp; u16 mute; if (!pusb_device) return -ENODEV; if (0 > loud) loud = 0; if (31 < loud) loud = 31; write_vt(pusb_device, 0x0002, 0x8000); /*---------------------------------------------------------------------------*/ igot = read_vt(pusb_device, 0x000E); if (0 > igot) { SAY("ERROR: failed to read VT1612A register 0x0E\n"); mute = 0x0000; } else mute = 0x8000 & ((unsigned int)igot); mute = 0; if (16 > loud) tmp = 0x01 | (0x001F & (((u8)(15 - loud)) << 1)); else tmp = 0; JOT(8, "0x%04X=(mute|tmp) for VT1612A register 0x0E\n", mute | tmp); write_vt(pusb_device, 0x000E, (mute | tmp)); /*---------------------------------------------------------------------------*/ igot = read_vt(pusb_device, 0x0010); if (0 > igot) { SAY("ERROR: failed to read VT1612A register 0x10\n"); mute = 0x0000; } else mute = 0x8000 & ((unsigned int)igot); mute = 0; JOT(8, "0x%04X=(mute|tmp|(tmp<<8)) for VT1612A register 0x10,...0x18\n", mute | tmp | (tmp << 8)); write_vt(pusb_device, 0x0010, (mute | tmp | (tmp << 8))); write_vt(pusb_device, 0x0012, (mute | tmp | (tmp << 8))); write_vt(pusb_device, 0x0014, (mute | tmp | (tmp << 8))); write_vt(pusb_device, 0x0016, (mute | tmp | (tmp << 8))); write_vt(pusb_device, 0x0018, (mute | tmp | (tmp << 8))); /*---------------------------------------------------------------------------*/ igot = read_vt(pusb_device, 0x001C); if (0 > igot) { SAY("ERROR: failed to read VT1612A register 0x1C\n"); mute = 0x0000; } else mute = 0x8000 & ((unsigned int)igot); mute = 0; if (16 <= loud) tmp = 0x000F & (u8)(loud - 16); else tmp = 0; JOT(8, "0x%04X=(mute|tmp|(tmp<<8)) for VT1612A register 0x1C\n", mute | tmp | (tmp << 8)); write_vt(pusb_device, 0x001C, (mute | tmp | (tmp << 8))); write_vt(pusb_device, 0x001A, 0x0404); write_vt(pusb_device, 0x0002, 0x0000); return 0; } /*****************************************************************************/
gpl-2.0
simar7/singhdroid
kernel/drivers/media/dvb/dvb-usb/dvb-usb-dvb.c
5030
8081
/* dvb-usb-dvb.c is part of the DVB USB library. * * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) * see dvb-usb-init.c for copyright information. * * This file contains functions for initializing and handling the * linux-dvb API. */ #include "dvb-usb-common.h" /* does the complete input transfer handling */ static int dvb_usb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff) { struct dvb_usb_adapter *adap = dvbdmxfeed->demux->priv; int newfeedcount, ret; if (adap == NULL) return -ENODEV; if ((adap->active_fe < 0) || (adap->active_fe >= adap->num_frontends_initialized)) { return -EINVAL; } newfeedcount = adap->feedcount + (onoff ? 1 : -1); /* stop feed before setting a new pid if there will be no pid anymore */ if (newfeedcount == 0) { deb_ts("stop feeding\n"); usb_urb_kill(&adap->fe_adap[adap->active_fe].stream); if (adap->props.fe[adap->active_fe].streaming_ctrl != NULL) { ret = adap->props.fe[adap->active_fe].streaming_ctrl(adap, 0); if (ret < 0) { err("error while stopping stream."); return ret; } } } adap->feedcount = newfeedcount; /* activate the pid on the device specific pid_filter */ deb_ts("setting pid (%s): %5d %04x at index %d '%s'\n", adap->fe_adap[adap->active_fe].pid_filtering ? "yes" : "no", dvbdmxfeed->pid, dvbdmxfeed->pid, dvbdmxfeed->index, onoff ? "on" : "off"); if (adap->props.fe[adap->active_fe].caps & DVB_USB_ADAP_HAS_PID_FILTER && adap->fe_adap[adap->active_fe].pid_filtering && adap->props.fe[adap->active_fe].pid_filter != NULL) adap->props.fe[adap->active_fe].pid_filter(adap, dvbdmxfeed->index, dvbdmxfeed->pid, onoff); /* start the feed if this was the first feed and there is still a feed * for reception. */ if (adap->feedcount == onoff && adap->feedcount > 0) { deb_ts("submitting all URBs\n"); usb_urb_submit(&adap->fe_adap[adap->active_fe].stream); deb_ts("controlling pid parser\n"); if (adap->props.fe[adap->active_fe].caps & DVB_USB_ADAP_HAS_PID_FILTER && adap->props.fe[adap->active_fe].caps & DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF && adap->props.fe[adap->active_fe].pid_filter_ctrl != NULL) { ret = adap->props.fe[adap->active_fe].pid_filter_ctrl(adap, adap->fe_adap[adap->active_fe].pid_filtering); if (ret < 0) { err("could not handle pid_parser"); return ret; } } deb_ts("start feeding\n"); if (adap->props.fe[adap->active_fe].streaming_ctrl != NULL) { ret = adap->props.fe[adap->active_fe].streaming_ctrl(adap, 1); if (ret < 0) { err("error while enabling fifo."); return ret; } } } return 0; } static int dvb_usb_start_feed(struct dvb_demux_feed *dvbdmxfeed) { deb_ts("start pid: 0x%04x, feedtype: %d\n", dvbdmxfeed->pid,dvbdmxfeed->type); return dvb_usb_ctrl_feed(dvbdmxfeed,1); } static int dvb_usb_stop_feed(struct dvb_demux_feed *dvbdmxfeed) { deb_ts("stop pid: 0x%04x, feedtype: %d\n", dvbdmxfeed->pid, dvbdmxfeed->type); return dvb_usb_ctrl_feed(dvbdmxfeed,0); } int dvb_usb_adapter_dvb_init(struct dvb_usb_adapter *adap, short *adapter_nums) { int i; int ret = dvb_register_adapter(&adap->dvb_adap, adap->dev->desc->name, adap->dev->owner, &adap->dev->udev->dev, adapter_nums); if (ret < 0) { deb_info("dvb_register_adapter failed: error %d", ret); goto err; } adap->dvb_adap.priv = adap; adap->dvb_adap.fe_ioctl_override = adap->props.fe_ioctl_override; if (adap->dev->props.read_mac_address) { if (adap->dev->props.read_mac_address(adap->dev,adap->dvb_adap.proposed_mac) == 0) info("MAC address: %pM",adap->dvb_adap.proposed_mac); else err("MAC address reading failed."); } adap->demux.dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING; adap->demux.priv = adap; adap->demux.filternum = 0; for (i = 0; i < adap->props.num_frontends; i++) { if (adap->demux.filternum < adap->fe_adap[i].max_feed_count) adap->demux.filternum = adap->fe_adap[i].max_feed_count; } adap->demux.feednum = adap->demux.filternum; adap->demux.start_feed = dvb_usb_start_feed; adap->demux.stop_feed = dvb_usb_stop_feed; adap->demux.write_to_decoder = NULL; if ((ret = dvb_dmx_init(&adap->demux)) < 0) { err("dvb_dmx_init failed: error %d",ret); goto err_dmx; } adap->dmxdev.filternum = adap->demux.filternum; adap->dmxdev.demux = &adap->demux.dmx; adap->dmxdev.capabilities = 0; if ((ret = dvb_dmxdev_init(&adap->dmxdev, &adap->dvb_adap)) < 0) { err("dvb_dmxdev_init failed: error %d",ret); goto err_dmx_dev; } if ((ret = dvb_net_init(&adap->dvb_adap, &adap->dvb_net, &adap->demux.dmx)) < 0) { err("dvb_net_init failed: error %d",ret); goto err_net_init; } adap->state |= DVB_USB_ADAP_STATE_DVB; return 0; err_net_init: dvb_dmxdev_release(&adap->dmxdev); err_dmx_dev: dvb_dmx_release(&adap->demux); err_dmx: dvb_unregister_adapter(&adap->dvb_adap); err: return ret; } int dvb_usb_adapter_dvb_exit(struct dvb_usb_adapter *adap) { if (adap->state & DVB_USB_ADAP_STATE_DVB) { deb_info("unregistering DVB part\n"); dvb_net_release(&adap->dvb_net); adap->demux.dmx.close(&adap->demux.dmx); dvb_dmxdev_release(&adap->dmxdev); dvb_dmx_release(&adap->demux); dvb_unregister_adapter(&adap->dvb_adap); adap->state &= ~DVB_USB_ADAP_STATE_DVB; } return 0; } static int dvb_usb_set_active_fe(struct dvb_frontend *fe, int onoff) { struct dvb_usb_adapter *adap = fe->dvb->priv; int ret = (adap->props.frontend_ctrl) ? adap->props.frontend_ctrl(fe, onoff) : 0; if (ret < 0) { err("frontend_ctrl request failed"); return ret; } if (onoff) adap->active_fe = fe->id; return 0; } static int dvb_usb_fe_wakeup(struct dvb_frontend *fe) { struct dvb_usb_adapter *adap = fe->dvb->priv; dvb_usb_device_power_ctrl(adap->dev, 1); dvb_usb_set_active_fe(fe, 1); if (adap->fe_adap[fe->id].fe_init) adap->fe_adap[fe->id].fe_init(fe); return 0; } static int dvb_usb_fe_sleep(struct dvb_frontend *fe) { struct dvb_usb_adapter *adap = fe->dvb->priv; if (adap->fe_adap[fe->id].fe_sleep) adap->fe_adap[fe->id].fe_sleep(fe); dvb_usb_set_active_fe(fe, 0); return dvb_usb_device_power_ctrl(adap->dev, 0); } int dvb_usb_adapter_frontend_init(struct dvb_usb_adapter *adap) { int ret, i; /* register all given adapter frontends */ for (i = 0; i < adap->props.num_frontends; i++) { if (adap->props.fe[i].frontend_attach == NULL) { err("strange: '%s' #%d,%d " "doesn't want to attach a frontend.", adap->dev->desc->name, adap->id, i); return 0; } ret = adap->props.fe[i].frontend_attach(adap); if (ret || adap->fe_adap[i].fe == NULL) { /* only print error when there is no FE at all */ if (i == 0) err("no frontend was attached by '%s'", adap->dev->desc->name); return 0; } adap->fe_adap[i].fe->id = i; /* re-assign sleep and wakeup functions */ adap->fe_adap[i].fe_init = adap->fe_adap[i].fe->ops.init; adap->fe_adap[i].fe->ops.init = dvb_usb_fe_wakeup; adap->fe_adap[i].fe_sleep = adap->fe_adap[i].fe->ops.sleep; adap->fe_adap[i].fe->ops.sleep = dvb_usb_fe_sleep; if (dvb_register_frontend(&adap->dvb_adap, adap->fe_adap[i].fe)) { err("Frontend %d registration failed.", i); dvb_frontend_detach(adap->fe_adap[i].fe); adap->fe_adap[i].fe = NULL; /* In error case, do not try register more FEs, * still leaving already registered FEs alive. */ if (i == 0) return -ENODEV; else return 0; } /* only attach the tuner if the demod is there */ if (adap->props.fe[i].tuner_attach != NULL) adap->props.fe[i].tuner_attach(adap); adap->num_frontends_initialized++; } return 0; } int dvb_usb_adapter_frontend_exit(struct dvb_usb_adapter *adap) { int i = adap->num_frontends_initialized - 1; /* unregister all given adapter frontends */ for (; i >= 0; i--) { if (adap->fe_adap[i].fe != NULL) { dvb_unregister_frontend(adap->fe_adap[i].fe); dvb_frontend_detach(adap->fe_adap[i].fe); } } adap->num_frontends_initialized = 0; return 0; }
gpl-2.0
Desterly/android_kernel_motorola_msm8994
arch/ia64/sn/kernel/sn2/sn2_smp.c
7334
15742
/* * SN2 Platform specific SMP Support * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2000-2006 Silicon Graphics, Inc. All rights reserved. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/threads.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/mmzone.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/nodemask.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/processor.h> #include <asm/irq.h> #include <asm/sal.h> #include <asm/delay.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/tlb.h> #include <asm/numa.h> #include <asm/hw_irq.h> #include <asm/current.h> #include <asm/sn/sn_cpuid.h> #include <asm/sn/sn_sal.h> #include <asm/sn/addrs.h> #include <asm/sn/shub_mmr.h> #include <asm/sn/nodepda.h> #include <asm/sn/rw_mmr.h> #include <asm/sn/sn_feature_sets.h> DEFINE_PER_CPU(struct ptc_stats, ptcstats); DECLARE_PER_CPU(struct ptc_stats, ptcstats); static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); /* 0 = old algorithm (no IPI flushes), 1 = ipi deadlock flush, 2 = ipi instead of SHUB ptc, >2 = always ipi */ static int sn2_flush_opt = 0; extern unsigned long sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long); void sn2_ptc_deadlock_recovery(short *, short, short, int, volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long); /* * Note: some is the following is captured here to make degugging easier * (the macros make more sense if you see the debug patch - not posted) */ #define sn2_ptctest 0 #define local_node_uses_ptc_ga(sh1) ((sh1) ? 1 : 0) #define max_active_pio(sh1) ((sh1) ? 32 : 7) #define reset_max_active_on_deadlock() 1 #define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock) struct ptc_stats { unsigned long ptc_l; unsigned long change_rid; unsigned long shub_ptc_flushes; unsigned long nodes_flushed; unsigned long deadlocks; unsigned long deadlocks2; unsigned long lock_itc_clocks; unsigned long shub_itc_clocks; unsigned long shub_itc_clocks_max; unsigned long shub_ptc_flushes_not_my_mm; unsigned long shub_ipi_flushes; unsigned long shub_ipi_flushes_itc_clocks; }; #define sn2_ptctest 0 static inline unsigned long wait_piowc(void) { volatile unsigned long *piows; unsigned long zeroval, ws; piows = pda->pio_write_status_addr; zeroval = pda->pio_write_status_val; do { cpu_relax(); } while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval); return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0; } /** * sn_migrate - SN-specific task migration actions * @task: Task being migrated to new CPU * * SN2 PIO writes from separate CPUs are not guaranteed to arrive in order. * Context switching user threads which have memory-mapped MMIO may cause * PIOs to issue from separate CPUs, thus the PIO writes must be drained * from the previous CPU's Shub before execution resumes on the new CPU. */ void sn_migrate(struct task_struct *task) { pda_t *last_pda = pdacpu(task_thread_info(task)->last_cpu); volatile unsigned long *adr = last_pda->pio_write_status_addr; unsigned long val = last_pda->pio_write_status_val; /* Drain PIO writes from old CPU's Shub */ while (unlikely((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != val)) cpu_relax(); } void sn_tlb_migrate_finish(struct mm_struct *mm) { /* flush_tlb_mm is inefficient if more than 1 users of mm */ if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1) flush_tlb_mm(mm); } static void sn2_ipi_flush_all_tlb(struct mm_struct *mm) { unsigned long itc; itc = ia64_get_itc(); smp_flush_tlb_cpumask(*mm_cpumask(mm)); itc = ia64_get_itc() - itc; __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc; __get_cpu_var(ptcstats).shub_ipi_flushes++; } /** * sn2_global_tlb_purge - globally purge translation cache of virtual address range * @mm: mm_struct containing virtual address range * @start: start of virtual address range * @end: end of virtual address range * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc)) * * Purges the translation caches of all processors of the given virtual address * range. * * Note: * - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context. * - cpu_vm_mask is converted into a nodemask of the nodes containing the * cpus in cpu_vm_mask. * - if only one bit is set in cpu_vm_mask & it is the current cpu & the * process is purging its own virtual address range, then only the * local TLB needs to be flushed. This flushing can be done using * ptc.l. This is the common case & avoids the global spinlock. * - if multiple cpus have loaded the context, then flushing has to be * done with ptc.g/MMRs under protection of the global ptc_lock. */ void sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long nbits) { int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid; int mymm = (mm == current->active_mm && mm == current->mm); int use_cpu_ptcga; volatile unsigned long *ptc0, *ptc1; unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0; short nasids[MAX_NUMNODES], nix; nodemask_t nodes_flushed; int active, max_active, deadlock, flush_opt = sn2_flush_opt; if (flush_opt > 2) { sn2_ipi_flush_all_tlb(mm); return; } nodes_clear(nodes_flushed); i = 0; for_each_cpu(cpu, mm_cpumask(mm)) { cnode = cpu_to_node(cpu); node_set(cnode, nodes_flushed); lcpu = cpu; i++; } if (i == 0) return; preempt_disable(); if (likely(i == 1 && lcpu == smp_processor_id() && mymm)) { do { ia64_ptcl(start, nbits << 2); start += (1UL << nbits); } while (start < end); ia64_srlz_i(); __get_cpu_var(ptcstats).ptc_l++; preempt_enable(); return; } if (atomic_read(&mm->mm_users) == 1 && mymm) { flush_tlb_mm(mm); __get_cpu_var(ptcstats).change_rid++; preempt_enable(); return; } if (flush_opt == 2) { sn2_ipi_flush_all_tlb(mm); preempt_enable(); return; } itc = ia64_get_itc(); nix = 0; for_each_node_mask(cnode, nodes_flushed) nasids[nix++] = cnodeid_to_nasid(cnode); rr_value = (mm->context << 3) | REGION_NUMBER(start); shub1 = is_shub1(); if (shub1) { data0 = (1UL << SH1_PTC_0_A_SHFT) | (nbits << SH1_PTC_0_PS_SHFT) | (rr_value << SH1_PTC_0_RID_SHFT) | (1UL << SH1_PTC_0_START_SHFT); ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0); ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1); } else { data0 = (1UL << SH2_PTC_A_SHFT) | (nbits << SH2_PTC_PS_SHFT) | (1UL << SH2_PTC_START_SHFT); ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC + (rr_value << SH2_PTC_RID_SHFT)); ptc1 = NULL; } mynasid = get_nasid(); use_cpu_ptcga = local_node_uses_ptc_ga(shub1); max_active = max_active_pio(shub1); itc = ia64_get_itc(); spin_lock_irqsave(PTC_LOCK(shub1), flags); itc2 = ia64_get_itc(); __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc; __get_cpu_var(ptcstats).shub_ptc_flushes++; __get_cpu_var(ptcstats).nodes_flushed += nix; if (!mymm) __get_cpu_var(ptcstats).shub_ptc_flushes_not_my_mm++; if (use_cpu_ptcga && !mymm) { old_rr = ia64_get_rr(start); ia64_set_rr(start, (old_rr & 0xff) | (rr_value << 8)); ia64_srlz_d(); } wait_piowc(); do { if (shub1) data1 = start | (1UL << SH1_PTC_1_START_SHFT); else data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK); deadlock = 0; active = 0; for (ibegin = 0, i = 0; i < nix; i++) { nasid = nasids[i]; if (use_cpu_ptcga && unlikely(nasid == mynasid)) { ia64_ptcga(start, nbits << 2); ia64_srlz_i(); } else { ptc0 = CHANGE_NASID(nasid, ptc0); if (ptc1) ptc1 = CHANGE_NASID(nasid, ptc1); pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1); active++; } if (active >= max_active || i == (nix - 1)) { if ((deadlock = wait_piowc())) { if (flush_opt == 1) goto done; sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1); if (reset_max_active_on_deadlock()) max_active = 1; } active = 0; ibegin = i + 1; } } start += (1UL << nbits); } while (start < end); done: itc2 = ia64_get_itc() - itc2; __get_cpu_var(ptcstats).shub_itc_clocks += itc2; if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max) __get_cpu_var(ptcstats).shub_itc_clocks_max = itc2; if (old_rr) { ia64_set_rr(start, old_rr); ia64_srlz_d(); } spin_unlock_irqrestore(PTC_LOCK(shub1), flags); if (flush_opt == 1 && deadlock) { __get_cpu_var(ptcstats).deadlocks++; sn2_ipi_flush_all_tlb(mm); } preempt_enable(); } /* * sn2_ptc_deadlock_recovery * * Recover from PTC deadlocks conditions. Recovery requires stepping thru each * TLB flush transaction. The recovery sequence is somewhat tricky & is * coded in assembly language. */ void sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid, volatile unsigned long *ptc0, unsigned long data0, volatile unsigned long *ptc1, unsigned long data1) { short nasid, i; unsigned long *piows, zeroval, n; __get_cpu_var(ptcstats).deadlocks++; piows = (unsigned long *) pda->pio_write_status_addr; zeroval = pda->pio_write_status_val; for (i=ib; i <= ie; i++) { nasid = nasids[i]; if (local_node_uses_ptc_ga(is_shub1()) && nasid == mynasid) continue; ptc0 = CHANGE_NASID(nasid, ptc0); if (ptc1) ptc1 = CHANGE_NASID(nasid, ptc1); n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval); __get_cpu_var(ptcstats).deadlocks2 += n; } } /** * sn_send_IPI_phys - send an IPI to a Nasid and slice * @nasid: nasid to receive the interrupt (may be outside partition) * @physid: physical cpuid to receive the interrupt. * @vector: command to send * @delivery_mode: delivery mechanism * * Sends an IPI (interprocessor interrupt) to the processor specified by * @physid * * @delivery_mode can be one of the following * * %IA64_IPI_DM_INT - pend an interrupt * %IA64_IPI_DM_PMI - pend a PMI * %IA64_IPI_DM_NMI - pend an NMI * %IA64_IPI_DM_INIT - pend an INIT interrupt */ void sn_send_IPI_phys(int nasid, long physid, int vector, int delivery_mode) { long val; unsigned long flags = 0; volatile long *p; p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT); val = (1UL << SH_IPI_INT_SEND_SHFT) | (physid << SH_IPI_INT_PID_SHFT) | ((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) | ((long)vector << SH_IPI_INT_IDX_SHFT) | (0x000feeUL << SH_IPI_INT_BASE_SHFT); mb(); if (enable_shub_wars_1_1()) { spin_lock_irqsave(&sn2_global_ptc_lock, flags); } pio_phys_write_mmr(p, val); if (enable_shub_wars_1_1()) { wait_piowc(); spin_unlock_irqrestore(&sn2_global_ptc_lock, flags); } } EXPORT_SYMBOL(sn_send_IPI_phys); /** * sn2_send_IPI - send an IPI to a processor * @cpuid: target of the IPI * @vector: command to send * @delivery_mode: delivery mechanism * @redirect: redirect the IPI? * * Sends an IPI (InterProcessor Interrupt) to the processor specified by * @cpuid. @vector specifies the command to send, while @delivery_mode can * be one of the following * * %IA64_IPI_DM_INT - pend an interrupt * %IA64_IPI_DM_PMI - pend a PMI * %IA64_IPI_DM_NMI - pend an NMI * %IA64_IPI_DM_INIT - pend an INIT interrupt */ void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect) { long physid; int nasid; physid = cpu_physical_id(cpuid); nasid = cpuid_to_nasid(cpuid); /* the following is used only when starting cpus at boot time */ if (unlikely(nasid == -1)) ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL); sn_send_IPI_phys(nasid, physid, vector, delivery_mode); } #ifdef CONFIG_HOTPLUG_CPU /** * sn_cpu_disable_allowed - Determine if a CPU can be disabled. * @cpu - CPU that is requested to be disabled. * * CPU disable is only allowed on SHub2 systems running with a PROM * that supports CPU disable. It is not permitted to disable the boot processor. */ bool sn_cpu_disable_allowed(int cpu) { if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) { if (cpu != 0) return true; else printk(KERN_WARNING "Disabling the boot processor is not allowed.\n"); } else printk(KERN_WARNING "CPU disable is not supported on this system.\n"); return false; } #endif /* CONFIG_HOTPLUG_CPU */ #ifdef CONFIG_PROC_FS #define PTC_BASENAME "sgi_sn/ptc_statistics" static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) { if (*offset < nr_cpu_ids) return offset; return NULL; } static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset) { (*offset)++; if (*offset < nr_cpu_ids) return offset; return NULL; } static void sn2_ptc_seq_stop(struct seq_file *file, void *data) { } static int sn2_ptc_seq_show(struct seq_file *file, void *data) { struct ptc_stats *stat; int cpu; cpu = *(loff_t *) data; if (!cpu) { seq_printf(file, "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2 ipi_fluches ipi_nsec\n"); seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt); } if (cpu < nr_cpu_ids && cpu_online(cpu)) { stat = &per_cpu(ptcstats, cpu); seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, stat->deadlocks, 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, stat->shub_ptc_flushes_not_my_mm, stat->deadlocks2, stat->shub_ipi_flushes, 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec); } return 0; } static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, size_t count, loff_t *data) { int cpu; char optstr[64]; if (count == 0 || count > sizeof(optstr)) return -EINVAL; if (copy_from_user(optstr, user, count)) return -EFAULT; optstr[count - 1] = '\0'; sn2_flush_opt = simple_strtoul(optstr, NULL, 0); for_each_online_cpu(cpu) memset(&per_cpu(ptcstats, cpu), 0, sizeof(struct ptc_stats)); return count; } static const struct seq_operations sn2_ptc_seq_ops = { .start = sn2_ptc_seq_start, .next = sn2_ptc_seq_next, .stop = sn2_ptc_seq_stop, .show = sn2_ptc_seq_show }; static int sn2_ptc_proc_open(struct inode *inode, struct file *file) { return seq_open(file, &sn2_ptc_seq_ops); } static const struct file_operations proc_sn2_ptc_operations = { .open = sn2_ptc_proc_open, .read = seq_read, .write = sn2_ptc_proc_write, .llseek = seq_lseek, .release = seq_release, }; static struct proc_dir_entry *proc_sn2_ptc; static int __init sn2_ptc_init(void) { if (!ia64_platform_is("sn2")) return 0; proc_sn2_ptc = proc_create(PTC_BASENAME, 0444, NULL, &proc_sn2_ptc_operations); if (!proc_sn2_ptc) { printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME); return -EINVAL; } spin_lock_init(&sn2_global_ptc_lock); return 0; } static void __exit sn2_ptc_exit(void) { remove_proc_entry(PTC_BASENAME, NULL); } module_init(sn2_ptc_init); module_exit(sn2_ptc_exit); #endif /* CONFIG_PROC_FS */
gpl-2.0
davros-/elite_kernel_jf
arch/ia64/sn/kernel/sn2/sn2_smp.c
7334
15742
/* * SN2 Platform specific SMP Support * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2000-2006 Silicon Graphics, Inc. All rights reserved. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/threads.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/mmzone.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/nodemask.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/processor.h> #include <asm/irq.h> #include <asm/sal.h> #include <asm/delay.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/tlb.h> #include <asm/numa.h> #include <asm/hw_irq.h> #include <asm/current.h> #include <asm/sn/sn_cpuid.h> #include <asm/sn/sn_sal.h> #include <asm/sn/addrs.h> #include <asm/sn/shub_mmr.h> #include <asm/sn/nodepda.h> #include <asm/sn/rw_mmr.h> #include <asm/sn/sn_feature_sets.h> DEFINE_PER_CPU(struct ptc_stats, ptcstats); DECLARE_PER_CPU(struct ptc_stats, ptcstats); static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); /* 0 = old algorithm (no IPI flushes), 1 = ipi deadlock flush, 2 = ipi instead of SHUB ptc, >2 = always ipi */ static int sn2_flush_opt = 0; extern unsigned long sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long); void sn2_ptc_deadlock_recovery(short *, short, short, int, volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long); /* * Note: some is the following is captured here to make degugging easier * (the macros make more sense if you see the debug patch - not posted) */ #define sn2_ptctest 0 #define local_node_uses_ptc_ga(sh1) ((sh1) ? 1 : 0) #define max_active_pio(sh1) ((sh1) ? 32 : 7) #define reset_max_active_on_deadlock() 1 #define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock) struct ptc_stats { unsigned long ptc_l; unsigned long change_rid; unsigned long shub_ptc_flushes; unsigned long nodes_flushed; unsigned long deadlocks; unsigned long deadlocks2; unsigned long lock_itc_clocks; unsigned long shub_itc_clocks; unsigned long shub_itc_clocks_max; unsigned long shub_ptc_flushes_not_my_mm; unsigned long shub_ipi_flushes; unsigned long shub_ipi_flushes_itc_clocks; }; #define sn2_ptctest 0 static inline unsigned long wait_piowc(void) { volatile unsigned long *piows; unsigned long zeroval, ws; piows = pda->pio_write_status_addr; zeroval = pda->pio_write_status_val; do { cpu_relax(); } while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval); return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0; } /** * sn_migrate - SN-specific task migration actions * @task: Task being migrated to new CPU * * SN2 PIO writes from separate CPUs are not guaranteed to arrive in order. * Context switching user threads which have memory-mapped MMIO may cause * PIOs to issue from separate CPUs, thus the PIO writes must be drained * from the previous CPU's Shub before execution resumes on the new CPU. */ void sn_migrate(struct task_struct *task) { pda_t *last_pda = pdacpu(task_thread_info(task)->last_cpu); volatile unsigned long *adr = last_pda->pio_write_status_addr; unsigned long val = last_pda->pio_write_status_val; /* Drain PIO writes from old CPU's Shub */ while (unlikely((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != val)) cpu_relax(); } void sn_tlb_migrate_finish(struct mm_struct *mm) { /* flush_tlb_mm is inefficient if more than 1 users of mm */ if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1) flush_tlb_mm(mm); } static void sn2_ipi_flush_all_tlb(struct mm_struct *mm) { unsigned long itc; itc = ia64_get_itc(); smp_flush_tlb_cpumask(*mm_cpumask(mm)); itc = ia64_get_itc() - itc; __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc; __get_cpu_var(ptcstats).shub_ipi_flushes++; } /** * sn2_global_tlb_purge - globally purge translation cache of virtual address range * @mm: mm_struct containing virtual address range * @start: start of virtual address range * @end: end of virtual address range * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc)) * * Purges the translation caches of all processors of the given virtual address * range. * * Note: * - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context. * - cpu_vm_mask is converted into a nodemask of the nodes containing the * cpus in cpu_vm_mask. * - if only one bit is set in cpu_vm_mask & it is the current cpu & the * process is purging its own virtual address range, then only the * local TLB needs to be flushed. This flushing can be done using * ptc.l. This is the common case & avoids the global spinlock. * - if multiple cpus have loaded the context, then flushing has to be * done with ptc.g/MMRs under protection of the global ptc_lock. */ void sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long nbits) { int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid; int mymm = (mm == current->active_mm && mm == current->mm); int use_cpu_ptcga; volatile unsigned long *ptc0, *ptc1; unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0; short nasids[MAX_NUMNODES], nix; nodemask_t nodes_flushed; int active, max_active, deadlock, flush_opt = sn2_flush_opt; if (flush_opt > 2) { sn2_ipi_flush_all_tlb(mm); return; } nodes_clear(nodes_flushed); i = 0; for_each_cpu(cpu, mm_cpumask(mm)) { cnode = cpu_to_node(cpu); node_set(cnode, nodes_flushed); lcpu = cpu; i++; } if (i == 0) return; preempt_disable(); if (likely(i == 1 && lcpu == smp_processor_id() && mymm)) { do { ia64_ptcl(start, nbits << 2); start += (1UL << nbits); } while (start < end); ia64_srlz_i(); __get_cpu_var(ptcstats).ptc_l++; preempt_enable(); return; } if (atomic_read(&mm->mm_users) == 1 && mymm) { flush_tlb_mm(mm); __get_cpu_var(ptcstats).change_rid++; preempt_enable(); return; } if (flush_opt == 2) { sn2_ipi_flush_all_tlb(mm); preempt_enable(); return; } itc = ia64_get_itc(); nix = 0; for_each_node_mask(cnode, nodes_flushed) nasids[nix++] = cnodeid_to_nasid(cnode); rr_value = (mm->context << 3) | REGION_NUMBER(start); shub1 = is_shub1(); if (shub1) { data0 = (1UL << SH1_PTC_0_A_SHFT) | (nbits << SH1_PTC_0_PS_SHFT) | (rr_value << SH1_PTC_0_RID_SHFT) | (1UL << SH1_PTC_0_START_SHFT); ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0); ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1); } else { data0 = (1UL << SH2_PTC_A_SHFT) | (nbits << SH2_PTC_PS_SHFT) | (1UL << SH2_PTC_START_SHFT); ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC + (rr_value << SH2_PTC_RID_SHFT)); ptc1 = NULL; } mynasid = get_nasid(); use_cpu_ptcga = local_node_uses_ptc_ga(shub1); max_active = max_active_pio(shub1); itc = ia64_get_itc(); spin_lock_irqsave(PTC_LOCK(shub1), flags); itc2 = ia64_get_itc(); __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc; __get_cpu_var(ptcstats).shub_ptc_flushes++; __get_cpu_var(ptcstats).nodes_flushed += nix; if (!mymm) __get_cpu_var(ptcstats).shub_ptc_flushes_not_my_mm++; if (use_cpu_ptcga && !mymm) { old_rr = ia64_get_rr(start); ia64_set_rr(start, (old_rr & 0xff) | (rr_value << 8)); ia64_srlz_d(); } wait_piowc(); do { if (shub1) data1 = start | (1UL << SH1_PTC_1_START_SHFT); else data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK); deadlock = 0; active = 0; for (ibegin = 0, i = 0; i < nix; i++) { nasid = nasids[i]; if (use_cpu_ptcga && unlikely(nasid == mynasid)) { ia64_ptcga(start, nbits << 2); ia64_srlz_i(); } else { ptc0 = CHANGE_NASID(nasid, ptc0); if (ptc1) ptc1 = CHANGE_NASID(nasid, ptc1); pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1); active++; } if (active >= max_active || i == (nix - 1)) { if ((deadlock = wait_piowc())) { if (flush_opt == 1) goto done; sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1); if (reset_max_active_on_deadlock()) max_active = 1; } active = 0; ibegin = i + 1; } } start += (1UL << nbits); } while (start < end); done: itc2 = ia64_get_itc() - itc2; __get_cpu_var(ptcstats).shub_itc_clocks += itc2; if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max) __get_cpu_var(ptcstats).shub_itc_clocks_max = itc2; if (old_rr) { ia64_set_rr(start, old_rr); ia64_srlz_d(); } spin_unlock_irqrestore(PTC_LOCK(shub1), flags); if (flush_opt == 1 && deadlock) { __get_cpu_var(ptcstats).deadlocks++; sn2_ipi_flush_all_tlb(mm); } preempt_enable(); } /* * sn2_ptc_deadlock_recovery * * Recover from PTC deadlocks conditions. Recovery requires stepping thru each * TLB flush transaction. The recovery sequence is somewhat tricky & is * coded in assembly language. */ void sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid, volatile unsigned long *ptc0, unsigned long data0, volatile unsigned long *ptc1, unsigned long data1) { short nasid, i; unsigned long *piows, zeroval, n; __get_cpu_var(ptcstats).deadlocks++; piows = (unsigned long *) pda->pio_write_status_addr; zeroval = pda->pio_write_status_val; for (i=ib; i <= ie; i++) { nasid = nasids[i]; if (local_node_uses_ptc_ga(is_shub1()) && nasid == mynasid) continue; ptc0 = CHANGE_NASID(nasid, ptc0); if (ptc1) ptc1 = CHANGE_NASID(nasid, ptc1); n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval); __get_cpu_var(ptcstats).deadlocks2 += n; } } /** * sn_send_IPI_phys - send an IPI to a Nasid and slice * @nasid: nasid to receive the interrupt (may be outside partition) * @physid: physical cpuid to receive the interrupt. * @vector: command to send * @delivery_mode: delivery mechanism * * Sends an IPI (interprocessor interrupt) to the processor specified by * @physid * * @delivery_mode can be one of the following * * %IA64_IPI_DM_INT - pend an interrupt * %IA64_IPI_DM_PMI - pend a PMI * %IA64_IPI_DM_NMI - pend an NMI * %IA64_IPI_DM_INIT - pend an INIT interrupt */ void sn_send_IPI_phys(int nasid, long physid, int vector, int delivery_mode) { long val; unsigned long flags = 0; volatile long *p; p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT); val = (1UL << SH_IPI_INT_SEND_SHFT) | (physid << SH_IPI_INT_PID_SHFT) | ((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) | ((long)vector << SH_IPI_INT_IDX_SHFT) | (0x000feeUL << SH_IPI_INT_BASE_SHFT); mb(); if (enable_shub_wars_1_1()) { spin_lock_irqsave(&sn2_global_ptc_lock, flags); } pio_phys_write_mmr(p, val); if (enable_shub_wars_1_1()) { wait_piowc(); spin_unlock_irqrestore(&sn2_global_ptc_lock, flags); } } EXPORT_SYMBOL(sn_send_IPI_phys); /** * sn2_send_IPI - send an IPI to a processor * @cpuid: target of the IPI * @vector: command to send * @delivery_mode: delivery mechanism * @redirect: redirect the IPI? * * Sends an IPI (InterProcessor Interrupt) to the processor specified by * @cpuid. @vector specifies the command to send, while @delivery_mode can * be one of the following * * %IA64_IPI_DM_INT - pend an interrupt * %IA64_IPI_DM_PMI - pend a PMI * %IA64_IPI_DM_NMI - pend an NMI * %IA64_IPI_DM_INIT - pend an INIT interrupt */ void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect) { long physid; int nasid; physid = cpu_physical_id(cpuid); nasid = cpuid_to_nasid(cpuid); /* the following is used only when starting cpus at boot time */ if (unlikely(nasid == -1)) ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL); sn_send_IPI_phys(nasid, physid, vector, delivery_mode); } #ifdef CONFIG_HOTPLUG_CPU /** * sn_cpu_disable_allowed - Determine if a CPU can be disabled. * @cpu - CPU that is requested to be disabled. * * CPU disable is only allowed on SHub2 systems running with a PROM * that supports CPU disable. It is not permitted to disable the boot processor. */ bool sn_cpu_disable_allowed(int cpu) { if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) { if (cpu != 0) return true; else printk(KERN_WARNING "Disabling the boot processor is not allowed.\n"); } else printk(KERN_WARNING "CPU disable is not supported on this system.\n"); return false; } #endif /* CONFIG_HOTPLUG_CPU */ #ifdef CONFIG_PROC_FS #define PTC_BASENAME "sgi_sn/ptc_statistics" static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) { if (*offset < nr_cpu_ids) return offset; return NULL; } static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset) { (*offset)++; if (*offset < nr_cpu_ids) return offset; return NULL; } static void sn2_ptc_seq_stop(struct seq_file *file, void *data) { } static int sn2_ptc_seq_show(struct seq_file *file, void *data) { struct ptc_stats *stat; int cpu; cpu = *(loff_t *) data; if (!cpu) { seq_printf(file, "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2 ipi_fluches ipi_nsec\n"); seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt); } if (cpu < nr_cpu_ids && cpu_online(cpu)) { stat = &per_cpu(ptcstats, cpu); seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, stat->deadlocks, 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, stat->shub_ptc_flushes_not_my_mm, stat->deadlocks2, stat->shub_ipi_flushes, 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec); } return 0; } static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, size_t count, loff_t *data) { int cpu; char optstr[64]; if (count == 0 || count > sizeof(optstr)) return -EINVAL; if (copy_from_user(optstr, user, count)) return -EFAULT; optstr[count - 1] = '\0'; sn2_flush_opt = simple_strtoul(optstr, NULL, 0); for_each_online_cpu(cpu) memset(&per_cpu(ptcstats, cpu), 0, sizeof(struct ptc_stats)); return count; } static const struct seq_operations sn2_ptc_seq_ops = { .start = sn2_ptc_seq_start, .next = sn2_ptc_seq_next, .stop = sn2_ptc_seq_stop, .show = sn2_ptc_seq_show }; static int sn2_ptc_proc_open(struct inode *inode, struct file *file) { return seq_open(file, &sn2_ptc_seq_ops); } static const struct file_operations proc_sn2_ptc_operations = { .open = sn2_ptc_proc_open, .read = seq_read, .write = sn2_ptc_proc_write, .llseek = seq_lseek, .release = seq_release, }; static struct proc_dir_entry *proc_sn2_ptc; static int __init sn2_ptc_init(void) { if (!ia64_platform_is("sn2")) return 0; proc_sn2_ptc = proc_create(PTC_BASENAME, 0444, NULL, &proc_sn2_ptc_operations); if (!proc_sn2_ptc) { printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME); return -EINVAL; } spin_lock_init(&sn2_global_ptc_lock); return 0; } static void __exit sn2_ptc_exit(void) { remove_proc_entry(PTC_BASENAME, NULL); } module_init(sn2_ptc_init); module_exit(sn2_ptc_exit); #endif /* CONFIG_PROC_FS */
gpl-2.0
h0tw1r3/kernel_samsung_sghi717
drivers/block/mg_disk.c
8358
26577
/* * drivers/block/mg_disk.c * * Support for the mGine m[g]flash IO mode. * Based on legacy hd.c * * (c) 2008 mGine Co.,LTD * (c) 2008 unsik Kim <donari75@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/hdreg.h> #include <linux/ata.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/mg_disk.h> #include <linux/slab.h> #define MG_RES_SEC (CONFIG_MG_DISK_RES << 1) /* name for block device */ #define MG_DISK_NAME "mgd" #define MG_DISK_MAJ 0 #define MG_DISK_MAX_PART 16 #define MG_SECTOR_SIZE 512 #define MG_MAX_SECTS 256 /* Register offsets */ #define MG_BUFF_OFFSET 0x8000 #define MG_REG_OFFSET 0xC000 #define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */ #define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */ #define MG_REG_SECT_CNT (MG_REG_OFFSET + 4) #define MG_REG_SECT_NUM (MG_REG_OFFSET + 6) #define MG_REG_CYL_LOW (MG_REG_OFFSET + 8) #define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA) #define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC) #define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */ #define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */ #define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10) #define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12) /* handy status */ #define MG_STAT_READY (ATA_DRDY | ATA_DSC) #define MG_READY_OK(s) (((s) & (MG_STAT_READY | (ATA_BUSY | ATA_DF | \ ATA_ERR))) == MG_STAT_READY) /* error code for others */ #define MG_ERR_NONE 0 #define MG_ERR_TIMEOUT 0x100 #define MG_ERR_INIT_STAT 0x101 #define MG_ERR_TRANSLATION 0x102 #define MG_ERR_CTRL_RST 0x103 #define MG_ERR_INV_STAT 0x104 #define MG_ERR_RSTOUT 0x105 #define MG_MAX_ERRORS 6 /* Max read/write errors */ /* command */ #define MG_CMD_RD 0x20 #define MG_CMD_WR 0x30 #define MG_CMD_SLEEP 0x99 #define MG_CMD_WAKEUP 0xC3 #define MG_CMD_ID 0xEC #define MG_CMD_WR_CONF 0x3C #define MG_CMD_RD_CONF 0x40 /* operation mode */ #define MG_OP_CASCADE (1 << 0) #define MG_OP_CASCADE_SYNC_RD (1 << 1) #define MG_OP_CASCADE_SYNC_WR (1 << 2) #define MG_OP_INTERLEAVE (1 << 3) /* synchronous */ #define MG_BURST_LAT_4 (3 << 4) #define MG_BURST_LAT_5 (4 << 4) #define MG_BURST_LAT_6 (5 << 4) #define MG_BURST_LAT_7 (6 << 4) #define MG_BURST_LAT_8 (7 << 4) #define MG_BURST_LEN_4 (1 << 1) #define MG_BURST_LEN_8 (2 << 1) #define MG_BURST_LEN_16 (3 << 1) #define MG_BURST_LEN_32 (4 << 1) #define MG_BURST_LEN_CONT (0 << 1) /* timeout value (unit: ms) */ #define MG_TMAX_CONF_TO_CMD 1 #define MG_TMAX_WAIT_RD_DRQ 10 #define MG_TMAX_WAIT_WR_DRQ 500 #define MG_TMAX_RST_TO_BUSY 10 #define MG_TMAX_HDRST_TO_RDY 500 #define MG_TMAX_SWRST_TO_RDY 500 #define MG_TMAX_RSTOUT 3000 #define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST) /* main structure for mflash driver */ struct mg_host { struct device *dev; struct request_queue *breq; struct request *req; spinlock_t lock; struct gendisk *gd; struct timer_list timer; void (*mg_do_intr) (struct mg_host *); u16 id[ATA_ID_WORDS]; u16 cyls; u16 heads; u16 sectors; u32 n_sectors; u32 nres_sectors; void __iomem *dev_base; unsigned int irq; unsigned int rst; unsigned int rstout; u32 major; u32 error; }; /* * Debugging macro and defines */ #undef DO_MG_DEBUG #ifdef DO_MG_DEBUG # define MG_DBG(fmt, args...) \ printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args) #else /* CONFIG_MG_DEBUG */ # define MG_DBG(fmt, args...) do { } while (0) #endif /* CONFIG_MG_DEBUG */ static void mg_request(struct request_queue *); static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes) { if (__blk_end_request(host->req, err, nr_bytes)) return true; host->req = NULL; return false; } static bool mg_end_request_cur(struct mg_host *host, int err) { return mg_end_request(host, err, blk_rq_cur_bytes(host->req)); } static void mg_dump_status(const char *msg, unsigned int stat, struct mg_host *host) { char *name = MG_DISK_NAME; if (host->req) name = host->req->rq_disk->disk_name; printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff); if (stat & ATA_BUSY) printk("Busy "); if (stat & ATA_DRDY) printk("DriveReady "); if (stat & ATA_DF) printk("WriteFault "); if (stat & ATA_DSC) printk("SeekComplete "); if (stat & ATA_DRQ) printk("DataRequest "); if (stat & ATA_CORR) printk("CorrectedError "); if (stat & ATA_ERR) printk("Error "); printk("}\n"); if ((stat & ATA_ERR) == 0) { host->error = 0; } else { host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR); printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg, host->error & 0xff); if (host->error & ATA_BBK) printk("BadSector "); if (host->error & ATA_UNC) printk("UncorrectableError "); if (host->error & ATA_IDNF) printk("SectorIdNotFound "); if (host->error & ATA_ABORTED) printk("DriveStatusError "); if (host->error & ATA_AMNF) printk("AddrMarkNotFound "); printk("}"); if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) { if (host->req) printk(", sector=%u", (unsigned int)blk_rq_pos(host->req)); } printk("\n"); } } static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec) { u8 status; unsigned long expire, cur_jiffies; struct mg_drv_data *prv_data = host->dev->platform_data; host->error = MG_ERR_NONE; expire = jiffies + msecs_to_jiffies(msec); /* These 2 times dummy status read prevents reading invalid * status. A very little time (3 times of mflash operating clk) * is required for busy bit is set. Use dummy read instead of * busy wait, because mflash's PLL is machine dependent. */ if (prv_data->use_polling) { status = inb((unsigned long)host->dev_base + MG_REG_STATUS); status = inb((unsigned long)host->dev_base + MG_REG_STATUS); } status = inb((unsigned long)host->dev_base + MG_REG_STATUS); do { cur_jiffies = jiffies; if (status & ATA_BUSY) { if (expect == ATA_BUSY) break; } else { /* Check the error condition! */ if (status & ATA_ERR) { mg_dump_status("mg_wait", status, host); break; } if (expect == MG_STAT_READY) if (MG_READY_OK(status)) break; if (expect == ATA_DRQ) if (status & ATA_DRQ) break; } if (!msec) { mg_dump_status("not ready", status, host); return MG_ERR_INV_STAT; } status = inb((unsigned long)host->dev_base + MG_REG_STATUS); } while (time_before(cur_jiffies, expire)); if (time_after_eq(cur_jiffies, expire) && msec) host->error = MG_ERR_TIMEOUT; return host->error; } static unsigned int mg_wait_rstout(u32 rstout, u32 msec) { unsigned long expire; expire = jiffies + msecs_to_jiffies(msec); while (time_before(jiffies, expire)) { if (gpio_get_value(rstout) == 1) return MG_ERR_NONE; msleep(10); } return MG_ERR_RSTOUT; } static void mg_unexpected_intr(struct mg_host *host) { u32 status = inb((unsigned long)host->dev_base + MG_REG_STATUS); mg_dump_status("mg_unexpected_intr", status, host); } static irqreturn_t mg_irq(int irq, void *dev_id) { struct mg_host *host = dev_id; void (*handler)(struct mg_host *) = host->mg_do_intr; spin_lock(&host->lock); host->mg_do_intr = NULL; del_timer(&host->timer); if (!handler) handler = mg_unexpected_intr; handler(host); spin_unlock(&host->lock); return IRQ_HANDLED; } /* local copy of ata_id_string() */ static void mg_id_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len) { unsigned int c; BUG_ON(len & 1); while (len > 0) { c = id[ofs] >> 8; *s = c; s++; c = id[ofs] & 0xff; *s = c; s++; ofs++; len -= 2; } } /* local copy of ata_id_c_string() */ static void mg_id_c_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len) { unsigned char *p; mg_id_string(id, s, ofs, len - 1); p = s + strnlen(s, len - 1); while (p > s && p[-1] == ' ') p--; *p = '\0'; } static int mg_get_disk_id(struct mg_host *host) { u32 i; s32 err; const u16 *id = host->id; struct mg_drv_data *prv_data = host->dev->platform_data; char fwrev[ATA_ID_FW_REV_LEN + 1]; char model[ATA_ID_PROD_LEN + 1]; char serial[ATA_ID_SERNO_LEN + 1]; if (!prv_data->use_polling) outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND); err = mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ); if (err) return err; for (i = 0; i < (MG_SECTOR_SIZE >> 1); i++) host->id[i] = le16_to_cpu(inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + i * 2)); outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); err = mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD); if (err) return err; if ((id[ATA_ID_FIELD_VALID] & 1) == 0) return MG_ERR_TRANSLATION; host->n_sectors = ata_id_u32(id, ATA_ID_LBA_CAPACITY); host->cyls = id[ATA_ID_CYLS]; host->heads = id[ATA_ID_HEADS]; host->sectors = id[ATA_ID_SECTORS]; if (MG_RES_SEC && host->heads && host->sectors) { /* modify cyls, n_sectors */ host->cyls = (host->n_sectors - MG_RES_SEC) / host->heads / host->sectors; host->nres_sectors = host->n_sectors - host->cyls * host->heads * host->sectors; host->n_sectors -= host->nres_sectors; } mg_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev)); mg_id_c_string(id, model, ATA_ID_PROD, sizeof(model)); mg_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial)); printk(KERN_INFO "mg_disk: model: %s\n", model); printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev); printk(KERN_INFO "mg_disk: serial: %s\n", serial); printk(KERN_INFO "mg_disk: %d + reserved %d sectors\n", host->n_sectors, host->nres_sectors); if (!prv_data->use_polling) outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); return err; } static int mg_disk_init(struct mg_host *host) { struct mg_drv_data *prv_data = host->dev->platform_data; s32 err; u8 init_status; /* hdd rst low */ gpio_set_value(host->rst, 0); err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY); if (err) return err; /* hdd rst high */ gpio_set_value(host->rst, 1); err = mg_wait(host, MG_STAT_READY, MG_TMAX_HDRST_TO_RDY); if (err) return err; /* soft reset on */ outb(ATA_SRST | (prv_data->use_polling ? ATA_NIEN : 0), (unsigned long)host->dev_base + MG_REG_DRV_CTRL); err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY); if (err) return err; /* soft reset off */ outb(prv_data->use_polling ? ATA_NIEN : 0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY); if (err) return err; init_status = inb((unsigned long)host->dev_base + MG_REG_STATUS) & 0xf; if (init_status == 0xf) return MG_ERR_INIT_STAT; return err; } static void mg_bad_rw_intr(struct mg_host *host) { if (host->req) if (++host->req->errors >= MG_MAX_ERRORS || host->error == MG_ERR_TIMEOUT) mg_end_request_cur(host, -EIO); } static unsigned int mg_out(struct mg_host *host, unsigned int sect_num, unsigned int sect_cnt, unsigned int cmd, void (*intr_addr)(struct mg_host *)) { struct mg_drv_data *prv_data = host->dev->platform_data; if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) return host->error; if (!prv_data->use_polling) { host->mg_do_intr = intr_addr; mod_timer(&host->timer, jiffies + 3 * HZ); } if (MG_RES_SEC) sect_num += MG_RES_SEC; outb((u8)sect_cnt, (unsigned long)host->dev_base + MG_REG_SECT_CNT); outb((u8)sect_num, (unsigned long)host->dev_base + MG_REG_SECT_NUM); outb((u8)(sect_num >> 8), (unsigned long)host->dev_base + MG_REG_CYL_LOW); outb((u8)(sect_num >> 16), (unsigned long)host->dev_base + MG_REG_CYL_HIGH); outb((u8)((sect_num >> 24) | ATA_LBA | ATA_DEVICE_OBS), (unsigned long)host->dev_base + MG_REG_DRV_HEAD); outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND); return MG_ERR_NONE; } static void mg_read_one(struct mg_host *host, struct request *req) { u16 *buff = (u16 *)req->buffer; u32 i; for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + (i << 1)); } static void mg_read(struct request *req) { struct mg_host *host = req->rq_disk->private_data; if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), MG_CMD_RD, NULL) != MG_ERR_NONE) mg_bad_rw_intr(host); MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", blk_rq_sectors(req), blk_rq_pos(req), req->buffer); do { if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) { mg_bad_rw_intr(host); return; } mg_read_one(host, req); outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); } static void mg_write_one(struct mg_host *host, struct request *req) { u16 *buff = (u16 *)req->buffer; u32 i; for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) outw(*buff++, (unsigned long)host->dev_base + MG_BUFF_OFFSET + (i << 1)); } static void mg_write(struct request *req) { struct mg_host *host = req->rq_disk->private_data; unsigned int rem = blk_rq_sectors(req); if (mg_out(host, blk_rq_pos(req), rem, MG_CMD_WR, NULL) != MG_ERR_NONE) { mg_bad_rw_intr(host); return; } MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", rem, blk_rq_pos(req), req->buffer); if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { mg_bad_rw_intr(host); return; } do { mg_write_one(host, req); outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); rem--; if (rem > 1 && mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { mg_bad_rw_intr(host); return; } else if (mg_wait(host, MG_STAT_READY, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { mg_bad_rw_intr(host); return; } } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); } static void mg_read_intr(struct mg_host *host) { struct request *req = host->req; u32 i; /* check status */ do { i = inb((unsigned long)host->dev_base + MG_REG_STATUS); if (i & ATA_BUSY) break; if (!MG_READY_OK(i)) break; if (i & ATA_DRQ) goto ok_to_read; } while (0); mg_dump_status("mg_read_intr", i, host); mg_bad_rw_intr(host); mg_request(host->breq); return; ok_to_read: mg_read_one(host, req); MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer); /* send read confirm */ outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); if (mg_end_request(host, 0, MG_SECTOR_SIZE)) { /* set handler if read remains */ host->mg_do_intr = mg_read_intr; mod_timer(&host->timer, jiffies + 3 * HZ); } else /* goto next request */ mg_request(host->breq); } static void mg_write_intr(struct mg_host *host) { struct request *req = host->req; u32 i; bool rem; /* check status */ do { i = inb((unsigned long)host->dev_base + MG_REG_STATUS); if (i & ATA_BUSY) break; if (!MG_READY_OK(i)) break; if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ)) goto ok_to_write; } while (0); mg_dump_status("mg_write_intr", i, host); mg_bad_rw_intr(host); mg_request(host->breq); return; ok_to_write: if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) { /* write 1 sector and set handler if remains */ mg_write_one(host, req); MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", blk_rq_pos(req), blk_rq_sectors(req), req->buffer); host->mg_do_intr = mg_write_intr; mod_timer(&host->timer, jiffies + 3 * HZ); } /* send write confirm */ outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); if (!rem) mg_request(host->breq); } void mg_times_out(unsigned long data) { struct mg_host *host = (struct mg_host *)data; char *name; spin_lock_irq(&host->lock); if (!host->req) goto out_unlock; host->mg_do_intr = NULL; name = host->req->rq_disk->disk_name; printk(KERN_DEBUG "%s: timeout\n", name); host->error = MG_ERR_TIMEOUT; mg_bad_rw_intr(host); out_unlock: mg_request(host->breq); spin_unlock_irq(&host->lock); } static void mg_request_poll(struct request_queue *q) { struct mg_host *host = q->queuedata; while (1) { if (!host->req) { host->req = blk_fetch_request(q); if (!host->req) break; } if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) { mg_end_request_cur(host, -EIO); continue; } if (rq_data_dir(host->req) == READ) mg_read(host->req); else mg_write(host->req); } } static unsigned int mg_issue_req(struct request *req, struct mg_host *host, unsigned int sect_num, unsigned int sect_cnt) { switch (rq_data_dir(req)) { case READ: if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr) != MG_ERR_NONE) { mg_bad_rw_intr(host); return host->error; } break; case WRITE: /* TODO : handler */ outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr) != MG_ERR_NONE) { mg_bad_rw_intr(host); return host->error; } del_timer(&host->timer); mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ); outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); if (host->error) { mg_bad_rw_intr(host); return host->error; } mg_write_one(host, req); mod_timer(&host->timer, jiffies + 3 * HZ); outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); break; } return MG_ERR_NONE; } /* This function also called from IRQ context */ static void mg_request(struct request_queue *q) { struct mg_host *host = q->queuedata; struct request *req; u32 sect_num, sect_cnt; while (1) { if (!host->req) { host->req = blk_fetch_request(q); if (!host->req) break; } req = host->req; /* check unwanted request call */ if (host->mg_do_intr) return; del_timer(&host->timer); sect_num = blk_rq_pos(req); /* deal whole segments */ sect_cnt = blk_rq_sectors(req); /* sanity check */ if (sect_num >= get_capacity(req->rq_disk) || ((sect_num + sect_cnt) > get_capacity(req->rq_disk))) { printk(KERN_WARNING "%s: bad access: sector=%d, count=%d\n", req->rq_disk->disk_name, sect_num, sect_cnt); mg_end_request_cur(host, -EIO); continue; } if (unlikely(req->cmd_type != REQ_TYPE_FS)) { mg_end_request_cur(host, -EIO); continue; } if (!mg_issue_req(req, host, sect_num, sect_cnt)) return; } } static int mg_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct mg_host *host = bdev->bd_disk->private_data; geo->cylinders = (unsigned short)host->cyls; geo->heads = (unsigned char)host->heads; geo->sectors = (unsigned char)host->sectors; return 0; } static const struct block_device_operations mg_disk_ops = { .getgeo = mg_getgeo }; static int mg_suspend(struct platform_device *plat_dev, pm_message_t state) { struct mg_drv_data *prv_data = plat_dev->dev.platform_data; struct mg_host *host = prv_data->host; if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) return -EIO; if (!prv_data->use_polling) outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND); /* wait until mflash deep sleep */ msleep(1); if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) { if (!prv_data->use_polling) outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); return -EIO; } return 0; } static int mg_resume(struct platform_device *plat_dev) { struct mg_drv_data *prv_data = plat_dev->dev.platform_data; struct mg_host *host = prv_data->host; if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) return -EIO; outb(MG_CMD_WAKEUP, (unsigned long)host->dev_base + MG_REG_COMMAND); /* wait until mflash wakeup */ msleep(1); if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) return -EIO; if (!prv_data->use_polling) outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); return 0; } static int mg_probe(struct platform_device *plat_dev) { struct mg_host *host; struct resource *rsc; struct mg_drv_data *prv_data = plat_dev->dev.platform_data; int err = 0; if (!prv_data) { printk(KERN_ERR "%s:%d fail (no driver_data)\n", __func__, __LINE__); err = -EINVAL; goto probe_err; } /* alloc mg_host */ host = kzalloc(sizeof(struct mg_host), GFP_KERNEL); if (!host) { printk(KERN_ERR "%s:%d fail (no memory for mg_host)\n", __func__, __LINE__); err = -ENOMEM; goto probe_err; } host->major = MG_DISK_MAJ; /* link each other */ prv_data->host = host; host->dev = &plat_dev->dev; /* io remap */ rsc = platform_get_resource(plat_dev, IORESOURCE_MEM, 0); if (!rsc) { printk(KERN_ERR "%s:%d platform_get_resource fail\n", __func__, __LINE__); err = -EINVAL; goto probe_err_2; } host->dev_base = ioremap(rsc->start, resource_size(rsc)); if (!host->dev_base) { printk(KERN_ERR "%s:%d ioremap fail\n", __func__, __LINE__); err = -EIO; goto probe_err_2; } MG_DBG("dev_base = 0x%x\n", (u32)host->dev_base); /* get reset pin */ rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO, MG_RST_PIN); if (!rsc) { printk(KERN_ERR "%s:%d get reset pin fail\n", __func__, __LINE__); err = -EIO; goto probe_err_3; } host->rst = rsc->start; /* init rst pin */ err = gpio_request(host->rst, MG_RST_PIN); if (err) goto probe_err_3; gpio_direction_output(host->rst, 1); /* reset out pin */ if (!(prv_data->dev_attr & MG_DEV_MASK)) goto probe_err_3a; if (prv_data->dev_attr != MG_BOOT_DEV) { rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO, MG_RSTOUT_PIN); if (!rsc) { printk(KERN_ERR "%s:%d get reset-out pin fail\n", __func__, __LINE__); err = -EIO; goto probe_err_3a; } host->rstout = rsc->start; err = gpio_request(host->rstout, MG_RSTOUT_PIN); if (err) goto probe_err_3a; gpio_direction_input(host->rstout); } /* disk reset */ if (prv_data->dev_attr == MG_STORAGE_DEV) { /* If POR seq. not yet finised, wait */ err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT); if (err) goto probe_err_3b; err = mg_disk_init(host); if (err) { printk(KERN_ERR "%s:%d fail (err code : %d)\n", __func__, __LINE__, err); err = -EIO; goto probe_err_3b; } } /* get irq resource */ if (!prv_data->use_polling) { host->irq = platform_get_irq(plat_dev, 0); if (host->irq == -ENXIO) { err = host->irq; goto probe_err_3b; } err = request_irq(host->irq, mg_irq, IRQF_DISABLED | IRQF_TRIGGER_RISING, MG_DEV_NAME, host); if (err) { printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n", __func__, __LINE__, err); goto probe_err_3b; } } /* get disk id */ err = mg_get_disk_id(host); if (err) { printk(KERN_ERR "%s:%d fail (err code : %d)\n", __func__, __LINE__, err); err = -EIO; goto probe_err_4; } err = register_blkdev(host->major, MG_DISK_NAME); if (err < 0) { printk(KERN_ERR "%s:%d register_blkdev fail (err code : %d)\n", __func__, __LINE__, err); goto probe_err_4; } if (!host->major) host->major = err; spin_lock_init(&host->lock); if (prv_data->use_polling) host->breq = blk_init_queue(mg_request_poll, &host->lock); else host->breq = blk_init_queue(mg_request, &host->lock); if (!host->breq) { err = -ENOMEM; printk(KERN_ERR "%s:%d (blk_init_queue) fail\n", __func__, __LINE__); goto probe_err_5; } host->breq->queuedata = host; /* mflash is random device, thanx for the noop */ err = elevator_change(host->breq, "noop"); if (err) { printk(KERN_ERR "%s:%d (elevator_init) fail\n", __func__, __LINE__); goto probe_err_6; } blk_queue_max_hw_sectors(host->breq, MG_MAX_SECTS); blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE); init_timer(&host->timer); host->timer.function = mg_times_out; host->timer.data = (unsigned long)host; host->gd = alloc_disk(MG_DISK_MAX_PART); if (!host->gd) { printk(KERN_ERR "%s:%d (alloc_disk) fail\n", __func__, __LINE__); err = -ENOMEM; goto probe_err_7; } host->gd->major = host->major; host->gd->first_minor = 0; host->gd->fops = &mg_disk_ops; host->gd->queue = host->breq; host->gd->private_data = host; sprintf(host->gd->disk_name, MG_DISK_NAME"a"); set_capacity(host->gd, host->n_sectors); add_disk(host->gd); return err; probe_err_7: del_timer_sync(&host->timer); probe_err_6: blk_cleanup_queue(host->breq); probe_err_5: unregister_blkdev(MG_DISK_MAJ, MG_DISK_NAME); probe_err_4: if (!prv_data->use_polling) free_irq(host->irq, host); probe_err_3b: gpio_free(host->rstout); probe_err_3a: gpio_free(host->rst); probe_err_3: iounmap(host->dev_base); probe_err_2: kfree(host); probe_err: return err; } static int mg_remove(struct platform_device *plat_dev) { struct mg_drv_data *prv_data = plat_dev->dev.platform_data; struct mg_host *host = prv_data->host; int err = 0; /* delete timer */ del_timer_sync(&host->timer); /* remove disk */ if (host->gd) { del_gendisk(host->gd); put_disk(host->gd); } /* remove queue */ if (host->breq) blk_cleanup_queue(host->breq); /* unregister blk device */ unregister_blkdev(host->major, MG_DISK_NAME); /* free irq */ if (!prv_data->use_polling) free_irq(host->irq, host); /* free reset-out pin */ if (prv_data->dev_attr != MG_BOOT_DEV) gpio_free(host->rstout); /* free rst pin */ if (host->rst) gpio_free(host->rst); /* unmap io */ if (host->dev_base) iounmap(host->dev_base); /* free mg_host */ kfree(host); return err; } static struct platform_driver mg_disk_driver = { .probe = mg_probe, .remove = mg_remove, .suspend = mg_suspend, .resume = mg_resume, .driver = { .name = MG_DEV_NAME, .owner = THIS_MODULE, } }; /**************************************************************************** * * Module stuff * ****************************************************************************/ static int __init mg_init(void) { printk(KERN_INFO "mGine mflash driver, (c) 2008 mGine Co.\n"); return platform_driver_register(&mg_disk_driver); } static void __exit mg_exit(void) { printk(KERN_INFO "mflash driver : bye bye\n"); platform_driver_unregister(&mg_disk_driver); } module_init(mg_init); module_exit(mg_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("unsik Kim <donari75@gmail.com>"); MODULE_DESCRIPTION("mGine m[g]flash device driver");
gpl-2.0
balika011/android_kernel_lenovo_spark
arch/x86/mm/kmemcheck/opcode.c
12710
1920
#include <linux/types.h> #include "opcode.h" static bool opcode_is_prefix(uint8_t b) { return /* Group 1 */ b == 0xf0 || b == 0xf2 || b == 0xf3 /* Group 2 */ || b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26 || b == 0x64 || b == 0x65 /* Group 3 */ || b == 0x66 /* Group 4 */ || b == 0x67; } #ifdef CONFIG_X86_64 static bool opcode_is_rex_prefix(uint8_t b) { return (b & 0xf0) == 0x40; } #else static bool opcode_is_rex_prefix(uint8_t b) { return false; } #endif #define REX_W (1 << 3) /* * This is a VERY crude opcode decoder. We only need to find the size of the * load/store that caused our #PF and this should work for all the opcodes * that we care about. Moreover, the ones who invented this instruction set * should be shot. */ void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size) { /* Default operand size */ int operand_size_override = 4; /* prefixes */ for (; opcode_is_prefix(*op); ++op) { if (*op == 0x66) operand_size_override = 2; } /* REX prefix */ if (opcode_is_rex_prefix(*op)) { uint8_t rex = *op; ++op; if (rex & REX_W) { switch (*op) { case 0x63: *size = 4; return; case 0x0f: ++op; switch (*op) { case 0xb6: case 0xbe: *size = 1; return; case 0xb7: case 0xbf: *size = 2; return; } break; } *size = 8; return; } } /* escape opcode */ if (*op == 0x0f) { ++op; /* * This is move with zero-extend and sign-extend, respectively; * we don't have to think about 0xb6/0xbe, because this is * already handled in the conditional below. */ if (*op == 0xb7 || *op == 0xbf) operand_size_override = 2; } *size = (*op & 1) ? operand_size_override : 1; } const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op) { /* skip prefixes */ while (opcode_is_prefix(*op)) ++op; if (opcode_is_rex_prefix(*op)) ++op; return op; }
gpl-2.0
110440/fastsocket
kernel/sound/isa/gus/gus_mixer.c
13734
6149
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Routines for control of ICS 2101 chip and "mixer" in GF1 chip * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <linux/wait.h> #include <sound/core.h> #include <sound/control.h> #include <sound/gus.h> /* * */ #define GF1_SINGLE(xname, xindex, shift, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_gf1_info_single, \ .get = snd_gf1_get_single, .put = snd_gf1_put_single, \ .private_value = shift | (invert << 8) } #define snd_gf1_info_single snd_ctl_boolean_mono_info static int snd_gf1_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); int shift = kcontrol->private_value & 0xff; int invert = (kcontrol->private_value >> 8) & 1; ucontrol->value.integer.value[0] = (gus->mix_cntrl_reg >> shift) & 1; if (invert) ucontrol->value.integer.value[0] ^= 1; return 0; } static int snd_gf1_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); unsigned long flags; int shift = kcontrol->private_value & 0xff; int invert = (kcontrol->private_value >> 8) & 1; int change; unsigned char oval, nval; nval = ucontrol->value.integer.value[0] & 1; if (invert) nval ^= 1; nval <<= shift; spin_lock_irqsave(&gus->reg_lock, flags); oval = gus->mix_cntrl_reg; nval = (oval & ~(1 << shift)) | nval; change = nval != oval; outb(gus->mix_cntrl_reg = nval, GUSP(gus, MIXCNTRLREG)); outb(gus->gf1.active_voice = 0, GUSP(gus, GF1PAGE)); spin_unlock_irqrestore(&gus->reg_lock, flags); return change; } #define ICS_DOUBLE(xname, xindex, addr) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_ics_info_double, \ .get = snd_ics_get_double, .put = snd_ics_put_double, \ .private_value = addr } static int snd_ics_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 127; return 0; } static int snd_ics_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); unsigned long flags; int addr = kcontrol->private_value & 0xff; unsigned char left, right; spin_lock_irqsave(&gus->reg_lock, flags); left = gus->gf1.ics_regs[addr][0]; right = gus->gf1.ics_regs[addr][1]; spin_unlock_irqrestore(&gus->reg_lock, flags); ucontrol->value.integer.value[0] = left & 127; ucontrol->value.integer.value[1] = right & 127; return 0; } static int snd_ics_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); unsigned long flags; int addr = kcontrol->private_value & 0xff; int change; unsigned char val1, val2, oval1, oval2, tmp; val1 = ucontrol->value.integer.value[0] & 127; val2 = ucontrol->value.integer.value[1] & 127; spin_lock_irqsave(&gus->reg_lock, flags); oval1 = gus->gf1.ics_regs[addr][0]; oval2 = gus->gf1.ics_regs[addr][1]; change = val1 != oval1 || val2 != oval2; gus->gf1.ics_regs[addr][0] = val1; gus->gf1.ics_regs[addr][1] = val2; if (gus->ics_flag && gus->ics_flipped && (addr == SNDRV_ICS_GF1_DEV || addr == SNDRV_ICS_MASTER_DEV)) { tmp = val1; val1 = val2; val2 = tmp; } addr <<= 3; outb(addr | 0, GUSP(gus, MIXCNTRLPORT)); outb(1, GUSP(gus, MIXDATAPORT)); outb(addr | 2, GUSP(gus, MIXCNTRLPORT)); outb((unsigned char) val1, GUSP(gus, MIXDATAPORT)); outb(addr | 1, GUSP(gus, MIXCNTRLPORT)); outb(2, GUSP(gus, MIXDATAPORT)); outb(addr | 3, GUSP(gus, MIXCNTRLPORT)); outb((unsigned char) val2, GUSP(gus, MIXDATAPORT)); spin_unlock_irqrestore(&gus->reg_lock, flags); return change; } static struct snd_kcontrol_new snd_gf1_controls[] = { GF1_SINGLE("Master Playback Switch", 0, 1, 1), GF1_SINGLE("Line Switch", 0, 0, 1), GF1_SINGLE("Mic Switch", 0, 2, 0) }; static struct snd_kcontrol_new snd_ics_controls[] = { GF1_SINGLE("Master Playback Switch", 0, 1, 1), ICS_DOUBLE("Master Playback Volume", 0, SNDRV_ICS_MASTER_DEV), ICS_DOUBLE("Synth Playback Volume", 0, SNDRV_ICS_GF1_DEV), GF1_SINGLE("Line Switch", 0, 0, 1), ICS_DOUBLE("Line Playback Volume", 0, SNDRV_ICS_LINE_DEV), GF1_SINGLE("Mic Switch", 0, 2, 0), ICS_DOUBLE("Mic Playback Volume", 0, SNDRV_ICS_MIC_DEV), ICS_DOUBLE("CD Playback Volume", 0, SNDRV_ICS_CD_DEV) }; int snd_gf1_new_mixer(struct snd_gus_card * gus) { struct snd_card *card; unsigned int idx, max; int err; if (snd_BUG_ON(!gus)) return -EINVAL; card = gus->card; if (snd_BUG_ON(!card)) return -EINVAL; if (gus->ics_flag) snd_component_add(card, "ICS2101"); if (card->mixername[0] == '\0') { strcpy(card->mixername, gus->ics_flag ? "GF1,ICS2101" : "GF1"); } else { if (gus->ics_flag) strcat(card->mixername, ",ICS2101"); strcat(card->mixername, ",GF1"); } if (!gus->ics_flag) { max = gus->ess_flag ? 1 : ARRAY_SIZE(snd_gf1_controls); for (idx = 0; idx < max; idx++) { if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_gf1_controls[idx], gus))) < 0) return err; } } else { for (idx = 0; idx < ARRAY_SIZE(snd_ics_controls); idx++) { if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_ics_controls[idx], gus))) < 0) return err; } } return 0; }
gpl-2.0
TeamWin/android_kernel_samsung_goyave
sound/pci/au88x0/au88x0_a3ddata.c
15014
2908
/*************************************************************************** * au88x0_a3ddata.c * * Wed Nov 19 21:11:32 2003 * Copyright 2003 mjander * mjander@users.sourceforge.org ****************************************************************************/ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Library General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Constant initializer values. */ static const a3d_Hrtf_t A3dHrirZeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static const a3d_Hrtf_t A3dHrirImpulse = { 0x7fff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static const a3d_Hrtf_t A3dHrirOnes = { 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff }; static const a3d_Hrtf_t A3dHrirSatTest = { 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x7fff, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, 0x8001, 0x7fff, 0x0000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static const a3d_Hrtf_t A3dHrirDImpulse = { 0, 0x7fff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static const a3d_ItdDline_t A3dItdDlineZeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static short const GainTCDefault = 0x300; static short const ItdTCDefault = 0x0C8; static short const HrtfTCDefault = 0x147; static short const CoefTCDefault = 0x300;
gpl-2.0
shirishpargaonkar/cifsclient
drivers/staging/iio/accel/lis3l02dq_ring.c
423
10478
#include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/mutex.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/iio/iio.h> #include <linux/iio/kfifo_buf.h> #include <linux/iio/trigger.h> #include <linux/iio/trigger_consumer.h> #include "lis3l02dq.h" /** * combine_8_to_16() utility function to munge two u8s into u16 **/ static inline u16 combine_8_to_16(u8 lower, u8 upper) { u16 _lower = lower; u16 _upper = upper; return _lower | (_upper << 8); } /** * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig **/ irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private) { struct iio_dev *indio_dev = private; struct lis3l02dq_state *st = iio_priv(indio_dev); if (st->trigger_on) { iio_trigger_poll(st->trig, iio_get_time_ns()); return IRQ_HANDLED; } else return IRQ_WAKE_THREAD; } static const u8 read_all_tx_array[] = { LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0, LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0, LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0, LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0, LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0, LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0, }; /** * lis3l02dq_read_all() Reads all channels currently selected * @indio_dev: IIO device state * @rx_array: (dma capable) receive array, must be at least * 4*number of channels **/ static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array) { struct lis3l02dq_state *st = iio_priv(indio_dev); struct spi_transfer *xfers; struct spi_message msg; int ret, i, j = 0; xfers = kcalloc(bitmap_weight(indio_dev->active_scan_mask, indio_dev->masklength) * 2, sizeof(*xfers), GFP_KERNEL); if (!xfers) return -ENOMEM; mutex_lock(&st->buf_lock); for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++) if (test_bit(i, indio_dev->active_scan_mask)) { /* lower byte */ xfers[j].tx_buf = st->tx + 2*j; st->tx[2*j] = read_all_tx_array[i*4]; st->tx[2*j + 1] = 0; if (rx_array) xfers[j].rx_buf = rx_array + j*2; xfers[j].bits_per_word = 8; xfers[j].len = 2; xfers[j].cs_change = 1; j++; /* upper byte */ xfers[j].tx_buf = st->tx + 2*j; st->tx[2*j] = read_all_tx_array[i*4 + 2]; st->tx[2*j + 1] = 0; if (rx_array) xfers[j].rx_buf = rx_array + j*2; xfers[j].bits_per_word = 8; xfers[j].len = 2; xfers[j].cs_change = 1; j++; } /* After these are transmitted, the rx_buff should have * values in alternate bytes */ spi_message_init(&msg); for (j = 0; j < bitmap_weight(indio_dev->active_scan_mask, indio_dev->masklength) * 2; j++) spi_message_add_tail(&xfers[j], &msg); ret = spi_sync(st->us, &msg); mutex_unlock(&st->buf_lock); kfree(xfers); return ret; } static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev, u8 *buf) { int ret, i; u8 *rx_array; s16 *data = (s16 *)buf; int scan_count = bitmap_weight(indio_dev->active_scan_mask, indio_dev->masklength); rx_array = kzalloc(4 * scan_count, GFP_KERNEL); if (rx_array == NULL) return -ENOMEM; ret = lis3l02dq_read_all(indio_dev, rx_array); if (ret < 0) { kfree(rx_array); return ret; } for (i = 0; i < scan_count; i++) data[i] = combine_8_to_16(rx_array[i*4+1], rx_array[i*4+3]); kfree(rx_array); return i*sizeof(data[0]); } static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; int len = 0; char *data; data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL); if (data == NULL) goto done; if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) len = lis3l02dq_get_buffer_element(indio_dev, data); iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp); kfree(data); done: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; } /* Caller responsible for locking as necessary. */ static int __lis3l02dq_write_data_ready_config(struct iio_dev *indio_dev, bool state) { int ret; u8 valold; bool currentlyset; struct lis3l02dq_state *st = iio_priv(indio_dev); /* Get the current event mask register */ ret = lis3l02dq_spi_read_reg_8(indio_dev, LIS3L02DQ_REG_CTRL_2_ADDR, &valold); if (ret) goto error_ret; /* Find out if data ready is already on */ currentlyset = valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION; /* Disable requested */ if (!state && currentlyset) { /* Disable the data ready signal */ valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION; /* The double write is to overcome a hardware bug? */ ret = lis3l02dq_spi_write_reg_8(indio_dev, LIS3L02DQ_REG_CTRL_2_ADDR, valold); if (ret) goto error_ret; ret = lis3l02dq_spi_write_reg_8(indio_dev, LIS3L02DQ_REG_CTRL_2_ADDR, valold); if (ret) goto error_ret; st->trigger_on = false; /* Enable requested */ } else if (state && !currentlyset) { /* If not set, enable requested * first disable all events */ ret = lis3l02dq_disable_all_events(indio_dev); if (ret < 0) goto error_ret; valold = ret | LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION; st->trigger_on = true; ret = lis3l02dq_spi_write_reg_8(indio_dev, LIS3L02DQ_REG_CTRL_2_ADDR, valold); if (ret) goto error_ret; } return 0; error_ret: return ret; } /** * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state * * If disabling the interrupt also does a final read to ensure it is clear. * This is only important in some cases where the scan enable elements are * switched before the buffer is reenabled. **/ static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig, bool state) { struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig); int ret = 0; u8 t; __lis3l02dq_write_data_ready_config(indio_dev, state); if (!state) { /* * A possible quirk with the handler is currently worked around * by ensuring outstanding read events are cleared. */ ret = lis3l02dq_read_all(indio_dev, NULL); } lis3l02dq_spi_read_reg_8(indio_dev, LIS3L02DQ_REG_WAKE_UP_SRC_ADDR, &t); return ret; } /** * lis3l02dq_trig_try_reen() try reenabling irq for data rdy trigger * @trig: the datardy trigger */ static int lis3l02dq_trig_try_reen(struct iio_trigger *trig) { struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig); struct lis3l02dq_state *st = iio_priv(indio_dev); int i; /* If gpio still high (or high again) * In theory possible we will need to do this several times */ for (i = 0; i < 5; i++) if (gpio_get_value(st->gpio)) lis3l02dq_read_all(indio_dev, NULL); else break; if (i == 5) pr_info("Failed to clear the interrupt for lis3l02dq\n"); /* irq reenabled so success! */ return 0; } static const struct iio_trigger_ops lis3l02dq_trigger_ops = { .owner = THIS_MODULE, .set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state, .try_reenable = &lis3l02dq_trig_try_reen, }; int lis3l02dq_probe_trigger(struct iio_dev *indio_dev) { int ret; struct lis3l02dq_state *st = iio_priv(indio_dev); st->trig = iio_trigger_alloc("lis3l02dq-dev%d", indio_dev->id); if (!st->trig) { ret = -ENOMEM; goto error_ret; } st->trig->dev.parent = &st->us->dev; st->trig->ops = &lis3l02dq_trigger_ops; iio_trigger_set_drvdata(st->trig, indio_dev); ret = iio_trigger_register(st->trig); if (ret) goto error_free_trig; return 0; error_free_trig: iio_trigger_free(st->trig); error_ret: return ret; } void lis3l02dq_remove_trigger(struct iio_dev *indio_dev) { struct lis3l02dq_state *st = iio_priv(indio_dev); iio_trigger_unregister(st->trig); iio_trigger_free(st->trig); } void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev) { iio_dealloc_pollfunc(indio_dev->pollfunc); iio_kfifo_free(indio_dev->buffer); } static int lis3l02dq_buffer_postenable(struct iio_dev *indio_dev) { /* Disable unwanted channels otherwise the interrupt will not clear */ u8 t; int ret; bool oneenabled = false; ret = lis3l02dq_spi_read_reg_8(indio_dev, LIS3L02DQ_REG_CTRL_1_ADDR, &t); if (ret) goto error_ret; if (test_bit(0, indio_dev->active_scan_mask)) { t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE; oneenabled = true; } else t &= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE; if (test_bit(1, indio_dev->active_scan_mask)) { t |= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE; oneenabled = true; } else t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE; if (test_bit(2, indio_dev->active_scan_mask)) { t |= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE; oneenabled = true; } else t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE; if (!oneenabled) /* what happens in this case is unknown */ return -EINVAL; ret = lis3l02dq_spi_write_reg_8(indio_dev, LIS3L02DQ_REG_CTRL_1_ADDR, t); if (ret) goto error_ret; return iio_triggered_buffer_postenable(indio_dev); error_ret: return ret; } /* Turn all channels on again */ static int lis3l02dq_buffer_predisable(struct iio_dev *indio_dev) { u8 t; int ret; ret = iio_triggered_buffer_predisable(indio_dev); if (ret) goto error_ret; ret = lis3l02dq_spi_read_reg_8(indio_dev, LIS3L02DQ_REG_CTRL_1_ADDR, &t); if (ret) goto error_ret; t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE | LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE | LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE; ret = lis3l02dq_spi_write_reg_8(indio_dev, LIS3L02DQ_REG_CTRL_1_ADDR, t); error_ret: return ret; } static const struct iio_buffer_setup_ops lis3l02dq_buffer_setup_ops = { .postenable = &lis3l02dq_buffer_postenable, .predisable = &lis3l02dq_buffer_predisable, }; int lis3l02dq_configure_buffer(struct iio_dev *indio_dev) { int ret; struct iio_buffer *buffer; buffer = iio_kfifo_allocate(indio_dev); if (!buffer) return -ENOMEM; iio_device_attach_buffer(indio_dev, buffer); buffer->scan_timestamp = true; indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops; /* Functions are NULL as we set handler below */ indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, &lis3l02dq_trigger_handler, 0, indio_dev, "lis3l02dq_consumer%d", indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_iio_sw_rb_free; } indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_iio_sw_rb_free: iio_kfifo_free(indio_dev->buffer); return ret; }
gpl-2.0
CTCaer/CTCaer-ICS-Xperia2011
drivers/net/hamradio/6pack.c
423
24736
/* * 6pack.c This module implements the 6pack protocol for kernel-based * devices like TTY. It interfaces between a raw TTY and the * kernel's AX.25 protocol layers. * * Authors: Andreas Könsgen <ajk@comnets.uni-bremen.de> * Ralf Baechle DL5RB <ralf@linux-mips.org> * * Quite a lot of stuff "stolen" by Joerg Reuter from slip.c, written by * * Laurence Culhane, <loz@holmes.demon.co.uk> * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org> */ #include <linux/module.h> #include <asm/system.h> #include <asm/uaccess.h> #include <linux/bitops.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/timer.h> #include <net/ax25.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/spinlock.h> #include <linux/if_arp.h> #include <linux/init.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/semaphore.h> #include <asm/atomic.h> #define SIXPACK_VERSION "Revision: 0.3.0" /* sixpack priority commands */ #define SIXP_SEOF 0x40 /* start and end of a 6pack frame */ #define SIXP_TX_URUN 0x48 /* transmit overrun */ #define SIXP_RX_ORUN 0x50 /* receive overrun */ #define SIXP_RX_BUF_OVL 0x58 /* receive buffer overflow */ #define SIXP_CHKSUM 0xFF /* valid checksum of a 6pack frame */ /* masks to get certain bits out of the status bytes sent by the TNC */ #define SIXP_CMD_MASK 0xC0 #define SIXP_CHN_MASK 0x07 #define SIXP_PRIO_CMD_MASK 0x80 #define SIXP_STD_CMD_MASK 0x40 #define SIXP_PRIO_DATA_MASK 0x38 #define SIXP_TX_MASK 0x20 #define SIXP_RX_MASK 0x10 #define SIXP_RX_DCD_MASK 0x18 #define SIXP_LEDS_ON 0x78 #define SIXP_LEDS_OFF 0x60 #define SIXP_CON 0x08 #define SIXP_STA 0x10 #define SIXP_FOUND_TNC 0xe9 #define SIXP_CON_ON 0x68 #define SIXP_DCD_MASK 0x08 #define SIXP_DAMA_OFF 0 /* default level 2 parameters */ #define SIXP_TXDELAY (HZ/4) /* in 1 s */ #define SIXP_PERSIST 50 /* in 256ths */ #define SIXP_SLOTTIME (HZ/10) /* in 1 s */ #define SIXP_INIT_RESYNC_TIMEOUT (3*HZ/2) /* in 1 s */ #define SIXP_RESYNC_TIMEOUT 5*HZ /* in 1 s */ /* 6pack configuration. */ #define SIXP_NRUNIT 31 /* MAX number of 6pack channels */ #define SIXP_MTU 256 /* Default MTU */ enum sixpack_flags { SIXPF_ERROR, /* Parity, etc. error */ }; struct sixpack { /* Various fields. */ struct tty_struct *tty; /* ptr to TTY structure */ struct net_device *dev; /* easy for intr handling */ /* These are pointers to the malloc()ed frame buffers. */ unsigned char *rbuff; /* receiver buffer */ int rcount; /* received chars counter */ unsigned char *xbuff; /* transmitter buffer */ unsigned char *xhead; /* next byte to XMIT */ int xleft; /* bytes left in XMIT queue */ unsigned char raw_buf[4]; unsigned char cooked_buf[400]; unsigned int rx_count; unsigned int rx_count_cooked; int mtu; /* Our mtu (to spot changes!) */ int buffsize; /* Max buffers sizes */ unsigned long flags; /* Flag values/ mode etc */ unsigned char mode; /* 6pack mode */ /* 6pack stuff */ unsigned char tx_delay; unsigned char persistence; unsigned char slottime; unsigned char duplex; unsigned char led_state; unsigned char status; unsigned char status1; unsigned char status2; unsigned char tx_enable; unsigned char tnc_state; struct timer_list tx_t; struct timer_list resync_t; atomic_t refcnt; struct semaphore dead_sem; spinlock_t lock; }; #define AX25_6PACK_HEADER_LEN 0 static void sixpack_decode(struct sixpack *, unsigned char[], int); static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char); /* * Perform the persistence/slottime algorithm for CSMA access. If the * persistence check was successful, write the data to the serial driver. * Note that in case of DAMA operation, the data is not sent here. */ static void sp_xmit_on_air(unsigned long channel) { struct sixpack *sp = (struct sixpack *) channel; int actual, when = sp->slottime; static unsigned char random; random = random * 17 + 41; if (((sp->status1 & SIXP_DCD_MASK) == 0) && (random < sp->persistence)) { sp->led_state = 0x70; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->tx_enable = 1; actual = sp->tty->ops->write(sp->tty, sp->xbuff, sp->status2); sp->xleft -= actual; sp->xhead += actual; sp->led_state = 0x60; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->status2 = 0; } else mod_timer(&sp->tx_t, jiffies + ((when + 1) * HZ) / 100); } /* ----> 6pack timer interrupt handler and friends. <---- */ /* Encapsulate one AX.25 frame and stuff into a TTY queue. */ static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len) { unsigned char *msg, *p = icp; int actual, count; if (len > sp->mtu) { /* sp->mtu = AX25_MTU = max. PACLEN = 256 */ msg = "oversized transmit packet!"; goto out_drop; } if (len > sp->mtu) { /* sp->mtu = AX25_MTU = max. PACLEN = 256 */ msg = "oversized transmit packet!"; goto out_drop; } if (p[0] > 5) { msg = "invalid KISS command"; goto out_drop; } if ((p[0] != 0) && (len > 2)) { msg = "KISS control packet too long"; goto out_drop; } if ((p[0] == 0) && (len < 15)) { msg = "bad AX.25 packet to transmit"; goto out_drop; } count = encode_sixpack(p, sp->xbuff, len, sp->tx_delay); set_bit(TTY_DO_WRITE_WAKEUP, &sp->tty->flags); switch (p[0]) { case 1: sp->tx_delay = p[1]; return; case 2: sp->persistence = p[1]; return; case 3: sp->slottime = p[1]; return; case 4: /* ignored */ return; case 5: sp->duplex = p[1]; return; } if (p[0] != 0) return; /* * In case of fullduplex or DAMA operation, we don't take care about the * state of the DCD or of any timers, as the determination of the * correct time to send is the job of the AX.25 layer. We send * immediately after data has arrived. */ if (sp->duplex == 1) { sp->led_state = 0x70; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->tx_enable = 1; actual = sp->tty->ops->write(sp->tty, sp->xbuff, count); sp->xleft = count - actual; sp->xhead = sp->xbuff + actual; sp->led_state = 0x60; sp->tty->ops->write(sp->tty, &sp->led_state, 1); } else { sp->xleft = count; sp->xhead = sp->xbuff; sp->status2 = count; sp_xmit_on_air((unsigned long)sp); } return; out_drop: sp->dev->stats.tx_dropped++; netif_start_queue(sp->dev); if (net_ratelimit()) printk(KERN_DEBUG "%s: %s - dropped.\n", sp->dev->name, msg); } /* Encapsulate an IP datagram and kick it into a TTY queue. */ static netdev_tx_t sp_xmit(struct sk_buff *skb, struct net_device *dev) { struct sixpack *sp = netdev_priv(dev); spin_lock_bh(&sp->lock); /* We were not busy, so we are now... :-) */ netif_stop_queue(dev); dev->stats.tx_bytes += skb->len; sp_encaps(sp, skb->data, skb->len); spin_unlock_bh(&sp->lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } static int sp_open_dev(struct net_device *dev) { struct sixpack *sp = netdev_priv(dev); if (sp->tty == NULL) return -ENODEV; return 0; } /* Close the low-level part of the 6pack channel. */ static int sp_close(struct net_device *dev) { struct sixpack *sp = netdev_priv(dev); spin_lock_bh(&sp->lock); if (sp->tty) { /* TTY discipline is running. */ clear_bit(TTY_DO_WRITE_WAKEUP, &sp->tty->flags); } netif_stop_queue(dev); spin_unlock_bh(&sp->lock); return 0; } /* Return the frame type ID */ static int sp_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { #ifdef CONFIG_INET if (type != ETH_P_AX25) return ax25_hard_header(skb, dev, type, daddr, saddr, len); #endif return 0; } static int sp_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr_ax25 *sa = addr; netif_tx_lock_bh(dev); netif_addr_lock(dev); memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN); netif_addr_unlock(dev); netif_tx_unlock_bh(dev); return 0; } static int sp_rebuild_header(struct sk_buff *skb) { #ifdef CONFIG_INET return ax25_rebuild_header(skb); #else return 0; #endif } static const struct header_ops sp_header_ops = { .create = sp_header, .rebuild = sp_rebuild_header, }; static const struct net_device_ops sp_netdev_ops = { .ndo_open = sp_open_dev, .ndo_stop = sp_close, .ndo_start_xmit = sp_xmit, .ndo_set_mac_address = sp_set_mac_address, }; static void sp_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ dev->netdev_ops = &sp_netdev_ops; dev->destructor = free_netdev; dev->mtu = SIXP_MTU; dev->hard_header_len = AX25_MAX_HEADER_LEN; dev->header_ops = &sp_header_ops; dev->addr_len = AX25_ADDR_LEN; dev->type = ARPHRD_AX25; dev->tx_queue_len = 10; /* Only activated in AX.25 mode */ memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); dev->flags = 0; } /* Send one completely decapsulated IP datagram to the IP layer. */ /* * This is the routine that sends the received data to the kernel AX.25. * 'cmd' is the KISS command. For AX.25 data, it is zero. */ static void sp_bump(struct sixpack *sp, char cmd) { struct sk_buff *skb; int count; unsigned char *ptr; count = sp->rcount + 1; sp->dev->stats.rx_bytes += count; if ((skb = dev_alloc_skb(count)) == NULL) goto out_mem; ptr = skb_put(skb, count); *ptr++ = cmd; /* KISS command */ memcpy(ptr, sp->cooked_buf + 1, count); skb->protocol = ax25_type_trans(skb, sp->dev); netif_rx(skb); sp->dev->stats.rx_packets++; return; out_mem: sp->dev->stats.rx_dropped++; } /* ----------------------------------------------------------------------- */ /* * We have a potential race on dereferencing tty->disc_data, because the tty * layer provides no locking at all - thus one cpu could be running * sixpack_receive_buf while another calls sixpack_close, which zeroes * tty->disc_data and frees the memory that sixpack_receive_buf is using. The * best way to fix this is to use a rwlock in the tty struct, but for now we * use a single global rwlock for all ttys in ppp line discipline. */ static DEFINE_RWLOCK(disc_data_lock); static struct sixpack *sp_get(struct tty_struct *tty) { struct sixpack *sp; read_lock(&disc_data_lock); sp = tty->disc_data; if (sp) atomic_inc(&sp->refcnt); read_unlock(&disc_data_lock); return sp; } static void sp_put(struct sixpack *sp) { if (atomic_dec_and_test(&sp->refcnt)) up(&sp->dead_sem); } /* * Called by the TTY driver when there's room for more data. If we have * more packets to send, we send them here. */ static void sixpack_write_wakeup(struct tty_struct *tty) { struct sixpack *sp = sp_get(tty); int actual; if (!sp) return; if (sp->xleft <= 0) { /* Now serial buffer is almost free & we can start * transmission of another packet */ sp->dev->stats.tx_packets++; clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); sp->tx_enable = 0; netif_wake_queue(sp->dev); goto out; } if (sp->tx_enable) { actual = tty->ops->write(tty, sp->xhead, sp->xleft); sp->xleft -= actual; sp->xhead += actual; } out: sp_put(sp); } /* ----------------------------------------------------------------------- */ /* * Handle the 'receiver data ready' interrupt. * This function is called by the 'tty_io' module in the kernel when * a block of 6pack data has been received, which can now be decapsulated * and sent on to some IP layer for further processing. */ static void sixpack_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct sixpack *sp; unsigned char buf[512]; int count1; if (!count) return; sp = sp_get(tty); if (!sp) return; memcpy(buf, cp, count < sizeof(buf) ? count : sizeof(buf)); /* Read the characters out of the buffer */ count1 = count; while (count) { count--; if (fp && *fp++) { if (!test_and_set_bit(SIXPF_ERROR, &sp->flags)) sp->dev->stats.rx_errors++; continue; } } sixpack_decode(sp, buf, count1); sp_put(sp); tty_unthrottle(tty); } /* * Try to resync the TNC. Called by the resync timer defined in * decode_prio_command */ #define TNC_UNINITIALIZED 0 #define TNC_UNSYNC_STARTUP 1 #define TNC_UNSYNCED 2 #define TNC_IN_SYNC 3 static void __tnc_set_sync_state(struct sixpack *sp, int new_tnc_state) { char *msg; switch (new_tnc_state) { default: /* gcc oh piece-o-crap ... */ case TNC_UNSYNC_STARTUP: msg = "Synchronizing with TNC"; break; case TNC_UNSYNCED: msg = "Lost synchronization with TNC\n"; break; case TNC_IN_SYNC: msg = "Found TNC"; break; } sp->tnc_state = new_tnc_state; printk(KERN_INFO "%s: %s\n", sp->dev->name, msg); } static inline void tnc_set_sync_state(struct sixpack *sp, int new_tnc_state) { int old_tnc_state = sp->tnc_state; if (old_tnc_state != new_tnc_state) __tnc_set_sync_state(sp, new_tnc_state); } static void resync_tnc(unsigned long channel) { struct sixpack *sp = (struct sixpack *) channel; static char resync_cmd = 0xe8; /* clear any data that might have been received */ sp->rx_count = 0; sp->rx_count_cooked = 0; /* reset state machine */ sp->status = 1; sp->status1 = 1; sp->status2 = 0; /* resync the TNC */ sp->led_state = 0x60; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->tty->ops->write(sp->tty, &resync_cmd, 1); /* Start resync timer again -- the TNC might be still absent */ del_timer(&sp->resync_t); sp->resync_t.data = (unsigned long) sp; sp->resync_t.function = resync_tnc; sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT; add_timer(&sp->resync_t); } static inline int tnc_init(struct sixpack *sp) { unsigned char inbyte = 0xe8; tnc_set_sync_state(sp, TNC_UNSYNC_STARTUP); sp->tty->ops->write(sp->tty, &inbyte, 1); del_timer(&sp->resync_t); sp->resync_t.data = (unsigned long) sp; sp->resync_t.function = resync_tnc; sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT; add_timer(&sp->resync_t); return 0; } /* * Open the high-level part of the 6pack channel. * This function is called by the TTY module when the * 6pack line discipline is called for. Because we are * sure the tty line exists, we only have to link it to * a free 6pcack channel... */ static int sixpack_open(struct tty_struct *tty) { char *rbuff = NULL, *xbuff = NULL; struct net_device *dev; struct sixpack *sp; unsigned long len; int err = 0; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (tty->ops->write == NULL) return -EOPNOTSUPP; dev = alloc_netdev(sizeof(struct sixpack), "sp%d", sp_setup); if (!dev) { err = -ENOMEM; goto out; } sp = netdev_priv(dev); sp->dev = dev; spin_lock_init(&sp->lock); atomic_set(&sp->refcnt, 1); init_MUTEX_LOCKED(&sp->dead_sem); /* !!! length of the buffers. MTU is IP MTU, not PACLEN! */ len = dev->mtu * 2; rbuff = kmalloc(len + 4, GFP_KERNEL); xbuff = kmalloc(len + 4, GFP_KERNEL); if (rbuff == NULL || xbuff == NULL) { err = -ENOBUFS; goto out_free; } spin_lock_bh(&sp->lock); sp->tty = tty; sp->rbuff = rbuff; sp->xbuff = xbuff; sp->mtu = AX25_MTU + 73; sp->buffsize = len; sp->rcount = 0; sp->rx_count = 0; sp->rx_count_cooked = 0; sp->xleft = 0; sp->flags = 0; /* Clear ESCAPE & ERROR flags */ sp->duplex = 0; sp->tx_delay = SIXP_TXDELAY; sp->persistence = SIXP_PERSIST; sp->slottime = SIXP_SLOTTIME; sp->led_state = 0x60; sp->status = 1; sp->status1 = 1; sp->status2 = 0; sp->tx_enable = 0; netif_start_queue(dev); init_timer(&sp->tx_t); sp->tx_t.function = sp_xmit_on_air; sp->tx_t.data = (unsigned long) sp; init_timer(&sp->resync_t); spin_unlock_bh(&sp->lock); /* Done. We have linked the TTY line to a channel. */ tty->disc_data = sp; tty->receive_room = 65536; /* Now we're ready to register. */ if (register_netdev(dev)) goto out_free; tnc_init(sp); return 0; out_free: kfree(xbuff); kfree(rbuff); if (dev) free_netdev(dev); out: return err; } /* * Close down a 6pack channel. * This means flushing out any pending queues, and then restoring the * TTY line discipline to what it was before it got hooked to 6pack * (which usually is TTY again). */ static void sixpack_close(struct tty_struct *tty) { struct sixpack *sp; write_lock(&disc_data_lock); sp = tty->disc_data; tty->disc_data = NULL; write_unlock(&disc_data_lock); if (!sp) return; /* * We have now ensured that nobody can start using ap from now on, but * we have to wait for all existing users to finish. */ if (!atomic_dec_and_test(&sp->refcnt)) down(&sp->dead_sem); unregister_netdev(sp->dev); del_timer(&sp->tx_t); del_timer(&sp->resync_t); /* Free all 6pack frame buffers. */ kfree(sp->rbuff); kfree(sp->xbuff); } /* Perform I/O control on an active 6pack channel. */ static int sixpack_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { struct sixpack *sp = sp_get(tty); struct net_device *dev; unsigned int tmp, err; if (!sp) return -ENXIO; dev = sp->dev; switch(cmd) { case SIOCGIFNAME: err = copy_to_user((void __user *) arg, dev->name, strlen(dev->name) + 1) ? -EFAULT : 0; break; case SIOCGIFENCAP: err = put_user(0, (int __user *) arg); break; case SIOCSIFENCAP: if (get_user(tmp, (int __user *) arg)) { err = -EFAULT; break; } sp->mode = tmp; dev->addr_len = AX25_ADDR_LEN; dev->hard_header_len = AX25_KISS_HEADER_LEN + AX25_MAX_HEADER_LEN + 3; dev->type = ARPHRD_AX25; err = 0; break; case SIOCSIFHWADDR: { char addr[AX25_ADDR_LEN]; if (copy_from_user(&addr, (void __user *) arg, AX25_ADDR_LEN)) { err = -EFAULT; break; } netif_tx_lock_bh(dev); memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN); netif_tx_unlock_bh(dev); err = 0; break; } default: err = tty_mode_ioctl(tty, file, cmd, arg); } sp_put(sp); return err; } static struct tty_ldisc_ops sp_ldisc = { .owner = THIS_MODULE, .magic = TTY_LDISC_MAGIC, .name = "6pack", .open = sixpack_open, .close = sixpack_close, .ioctl = sixpack_ioctl, .receive_buf = sixpack_receive_buf, .write_wakeup = sixpack_write_wakeup, }; /* Initialize 6pack control device -- register 6pack line discipline */ static const char msg_banner[] __initdata = KERN_INFO \ "AX.25: 6pack driver, " SIXPACK_VERSION "\n"; static const char msg_regfail[] __initdata = KERN_ERR \ "6pack: can't register line discipline (err = %d)\n"; static int __init sixpack_init_driver(void) { int status; printk(msg_banner); /* Register the provided line protocol discipline */ if ((status = tty_register_ldisc(N_6PACK, &sp_ldisc)) != 0) printk(msg_regfail, status); return status; } static const char msg_unregfail[] __exitdata = KERN_ERR \ "6pack: can't unregister line discipline (err = %d)\n"; static void __exit sixpack_exit_driver(void) { int ret; if ((ret = tty_unregister_ldisc(N_6PACK))) printk(msg_unregfail, ret); } /* encode an AX.25 packet into 6pack */ static int encode_sixpack(unsigned char *tx_buf, unsigned char *tx_buf_raw, int length, unsigned char tx_delay) { int count = 0; unsigned char checksum = 0, buf[400]; int raw_count = 0; tx_buf_raw[raw_count++] = SIXP_PRIO_CMD_MASK | SIXP_TX_MASK; tx_buf_raw[raw_count++] = SIXP_SEOF; buf[0] = tx_delay; for (count = 1; count < length; count++) buf[count] = tx_buf[count]; for (count = 0; count < length; count++) checksum += buf[count]; buf[length] = (unsigned char) 0xff - checksum; for (count = 0; count <= length; count++) { if ((count % 3) == 0) { tx_buf_raw[raw_count++] = (buf[count] & 0x3f); tx_buf_raw[raw_count] = ((buf[count] >> 2) & 0x30); } else if ((count % 3) == 1) { tx_buf_raw[raw_count++] |= (buf[count] & 0x0f); tx_buf_raw[raw_count] = ((buf[count] >> 2) & 0x3c); } else { tx_buf_raw[raw_count++] |= (buf[count] & 0x03); tx_buf_raw[raw_count++] = (buf[count] >> 2); } } if ((length % 3) != 2) raw_count++; tx_buf_raw[raw_count++] = SIXP_SEOF; return raw_count; } /* decode 4 sixpack-encoded bytes into 3 data bytes */ static void decode_data(struct sixpack *sp, unsigned char inbyte) { unsigned char *buf; if (sp->rx_count != 3) { sp->raw_buf[sp->rx_count++] = inbyte; return; } buf = sp->raw_buf; sp->cooked_buf[sp->rx_count_cooked++] = buf[0] | ((buf[1] << 2) & 0xc0); sp->cooked_buf[sp->rx_count_cooked++] = (buf[1] & 0x0f) | ((buf[2] << 2) & 0xf0); sp->cooked_buf[sp->rx_count_cooked++] = (buf[2] & 0x03) | (inbyte << 2); sp->rx_count = 0; } /* identify and execute a 6pack priority command byte */ static void decode_prio_command(struct sixpack *sp, unsigned char cmd) { unsigned char channel; int actual; channel = cmd & SIXP_CHN_MASK; if ((cmd & SIXP_PRIO_DATA_MASK) != 0) { /* idle ? */ /* RX and DCD flags can only be set in the same prio command, if the DCD flag has been set without the RX flag in the previous prio command. If DCD has not been set before, something in the transmission has gone wrong. In this case, RX and DCD are cleared in order to prevent the decode_data routine from reading further data that might be corrupt. */ if (((sp->status & SIXP_DCD_MASK) == 0) && ((cmd & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK)) { if (sp->status != 1) printk(KERN_DEBUG "6pack: protocol violation\n"); else sp->status = 0; cmd &= ~SIXP_RX_DCD_MASK; } sp->status = cmd & SIXP_PRIO_DATA_MASK; } else { /* output watchdog char if idle */ if ((sp->status2 != 0) && (sp->duplex == 1)) { sp->led_state = 0x70; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->tx_enable = 1; actual = sp->tty->ops->write(sp->tty, sp->xbuff, sp->status2); sp->xleft -= actual; sp->xhead += actual; sp->led_state = 0x60; sp->status2 = 0; } } /* needed to trigger the TNC watchdog */ sp->tty->ops->write(sp->tty, &sp->led_state, 1); /* if the state byte has been received, the TNC is present, so the resync timer can be reset. */ if (sp->tnc_state == TNC_IN_SYNC) { del_timer(&sp->resync_t); sp->resync_t.data = (unsigned long) sp; sp->resync_t.function = resync_tnc; sp->resync_t.expires = jiffies + SIXP_INIT_RESYNC_TIMEOUT; add_timer(&sp->resync_t); } sp->status1 = cmd & SIXP_PRIO_DATA_MASK; } /* identify and execute a standard 6pack command byte */ static void decode_std_command(struct sixpack *sp, unsigned char cmd) { unsigned char checksum = 0, rest = 0, channel; short i; channel = cmd & SIXP_CHN_MASK; switch (cmd & SIXP_CMD_MASK) { /* normal command */ case SIXP_SEOF: if ((sp->rx_count == 0) && (sp->rx_count_cooked == 0)) { if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK) { sp->led_state = 0x68; sp->tty->ops->write(sp->tty, &sp->led_state, 1); } } else { sp->led_state = 0x60; /* fill trailing bytes with zeroes */ sp->tty->ops->write(sp->tty, &sp->led_state, 1); rest = sp->rx_count; if (rest != 0) for (i = rest; i <= 3; i++) decode_data(sp, 0); if (rest == 2) sp->rx_count_cooked -= 2; else if (rest == 3) sp->rx_count_cooked -= 1; for (i = 0; i < sp->rx_count_cooked; i++) checksum += sp->cooked_buf[i]; if (checksum != SIXP_CHKSUM) { printk(KERN_DEBUG "6pack: bad checksum %2.2x\n", checksum); } else { sp->rcount = sp->rx_count_cooked-2; sp_bump(sp, 0); } sp->rx_count_cooked = 0; } break; case SIXP_TX_URUN: printk(KERN_DEBUG "6pack: TX underrun\n"); break; case SIXP_RX_ORUN: printk(KERN_DEBUG "6pack: RX overrun\n"); break; case SIXP_RX_BUF_OVL: printk(KERN_DEBUG "6pack: RX buffer overflow\n"); } } /* decode a 6pack packet */ static void sixpack_decode(struct sixpack *sp, unsigned char *pre_rbuff, int count) { unsigned char inbyte; int count1; for (count1 = 0; count1 < count; count1++) { inbyte = pre_rbuff[count1]; if (inbyte == SIXP_FOUND_TNC) { tnc_set_sync_state(sp, TNC_IN_SYNC); del_timer(&sp->resync_t); } if ((inbyte & SIXP_PRIO_CMD_MASK) != 0) decode_prio_command(sp, inbyte); else if ((inbyte & SIXP_STD_CMD_MASK) != 0) decode_std_command(sp, inbyte); else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK) decode_data(sp, inbyte); } } MODULE_AUTHOR("Ralf Baechle DO1GRB <ralf@linux-mips.org>"); MODULE_DESCRIPTION("6pack driver for AX.25"); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_6PACK); module_init(sixpack_init_driver); module_exit(sixpack_exit_driver);
gpl-2.0
cherifyass/s4-gpe-kernel
sound/soc/msm/msm-multi-ch-pcm-q6.c
679
24397
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/err.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/android_pmem.h> #include <asm/dma.h> #include <sound/core.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/control.h> #include <sound/timer.h> #include "msm-pcm-q6.h" #include "msm-pcm-routing.h" static struct audio_locks the_locks; struct snd_msm { struct snd_card *card; struct snd_pcm *pcm; }; struct snd_msm_volume { struct msm_audio *prtd; unsigned volume; }; static struct snd_msm_volume multi_ch_pcm_audio = {NULL, 0x2000}; #define PLAYBACK_NUM_PERIODS 8 #define PLAYBACK_MAX_PERIOD_SIZE 12288 #define PLAYBACK_MIN_PERIOD_SIZE 256 #define CAPTURE_NUM_PERIODS 16 #define CAPTURE_MIN_PERIOD_SIZE 320 #define CAPTURE_MAX_PERIOD_SIZE 12288 static struct snd_pcm_hardware msm_pcm_hardware_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 8, .buffer_bytes_max = CAPTURE_NUM_PERIODS * CAPTURE_MAX_PERIOD_SIZE, .period_bytes_min = CAPTURE_MIN_PERIOD_SIZE, .period_bytes_max = CAPTURE_MAX_PERIOD_SIZE, .periods_min = CAPTURE_NUM_PERIODS, .periods_max = CAPTURE_NUM_PERIODS, .fifo_size = 0, }; static struct snd_pcm_hardware msm_pcm_hardware_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_KNOT, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 8, .buffer_bytes_max = PLAYBACK_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE, .period_bytes_min = PLAYBACK_MIN_PERIOD_SIZE, .period_bytes_max = PLAYBACK_MAX_PERIOD_SIZE, .periods_min = PLAYBACK_NUM_PERIODS, .periods_max = PLAYBACK_NUM_PERIODS, .fifo_size = 0, }; /* Conventional and unconventional sample rate supported */ static unsigned int supported_sample_rates[] = { 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000 }; static uint32_t in_frame_info[CAPTURE_NUM_PERIODS][2]; static struct snd_pcm_hw_constraint_list constraints_sample_rates = { .count = ARRAY_SIZE(supported_sample_rates), .list = supported_sample_rates, .mask = 0, }; static void event_handler(uint32_t opcode, uint32_t token, uint32_t *payload, void *priv) { struct msm_audio *prtd = priv; struct snd_pcm_substream *substream = prtd->substream; struct snd_pcm_runtime *runtime = substream->runtime; struct audio_aio_write_param param; struct audio_buffer *buf = NULL; uint32_t *ptrmem = (uint32_t *)payload; int i = 0; uint32_t idx = 0; uint32_t size = 0; pr_debug("%s\n", __func__); switch (opcode) { case ASM_DATA_EVENT_WRITE_DONE: { uint32_t *ptrmem = (uint32_t *)&param; pr_debug("ASM_DATA_EVENT_WRITE_DONE\n"); pr_debug("Buffer Consumed = 0x%08x\n", *ptrmem); prtd->pcm_irq_pos += prtd->pcm_count; if (atomic_read(&prtd->start)) snd_pcm_period_elapsed(substream); atomic_inc(&prtd->out_count); wake_up(&the_locks.write_wait); if (!atomic_read(&prtd->start)) break; if (!prtd->mmap_flag) break; buf = prtd->audio_client->port[IN].buf; pr_debug("%s:writing %d bytes of buffer[%d] to dsp 2\n", __func__, prtd->pcm_count, prtd->out_head); pr_debug("%s:writing buffer[%d] from 0x%08x\n", __func__, prtd->out_head, ((unsigned int)buf[0].phys + (prtd->out_head * prtd->pcm_count))); param.paddr = (unsigned long)buf[0].phys + (prtd->out_head * prtd->pcm_count); param.len = prtd->pcm_count; param.msw_ts = 0; param.lsw_ts = 0; param.flags = NO_TIMESTAMP; param.uid = (unsigned long)buf[0].phys + (prtd->out_head * prtd->pcm_count); for (i = 0; i < sizeof(struct audio_aio_write_param)/4; i++, ++ptrmem) pr_debug("cmd[%d]=0x%08x\n", i, *ptrmem); if (q6asm_async_write(prtd->audio_client, &param) < 0) pr_err("%s:q6asm_async_write failed\n", __func__); else prtd->out_head = (prtd->out_head + 1) & (runtime->periods - 1); break; } case ASM_DATA_CMDRSP_EOS: pr_debug("ASM_DATA_CMDRSP_EOS\n"); prtd->cmd_ack = 1; wake_up(&the_locks.eos_wait); break; case ASM_DATA_EVENT_READ_DONE: { pr_debug("ASM_DATA_EVENT_READ_DONE\n"); pr_debug("token = 0x%08x\n", token); for (i = 0; i < 8; i++, ++ptrmem) pr_debug("cmd[%d]=0x%08x\n", i, *ptrmem); in_frame_info[token][0] = payload[2]; in_frame_info[token][1] = payload[3]; prtd->pcm_irq_pos += in_frame_info[token][0]; pr_debug("pcm_irq_pos=%d\n", prtd->pcm_irq_pos); if (atomic_read(&prtd->start)) snd_pcm_period_elapsed(substream); if (atomic_read(&prtd->in_count) <= prtd->periods) atomic_inc(&prtd->in_count); wake_up(&the_locks.read_wait); if (prtd->mmap_flag && q6asm_is_cpu_buf_avail_nolock(OUT, prtd->audio_client, &size, &idx)) q6asm_read_nolock(prtd->audio_client); break; } case APR_BASIC_RSP_RESULT: { switch (payload[0]) { case ASM_SESSION_CMD_RUN: if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) { atomic_set(&prtd->start, 1); break; } if (prtd->mmap_flag) { pr_debug("%s:writing %d bytes"\ " of buffer[%d] to dsp\n", __func__, prtd->pcm_count, prtd->out_head); buf = prtd->audio_client->port[IN].buf; pr_debug("%s:writing buffer[%d] from 0x%08x\n", __func__, prtd->out_head, ((unsigned int)buf[0].phys + (prtd->out_head * prtd->pcm_count))); param.paddr = (unsigned long) buf[prtd->out_head].phys; param.len = prtd->pcm_count; param.msw_ts = 0; param.lsw_ts = 0; param.flags = NO_TIMESTAMP; param.uid = (unsigned long) buf[prtd->out_head].phys; if (q6asm_async_write(prtd->audio_client, &param) < 0) pr_err("%s:q6asm_async_write failed\n", __func__); else prtd->out_head = (prtd->out_head + 1) & (runtime->periods - 1); } else { while (atomic_read(&prtd->out_needed)) { pr_debug("%s:writing %d bytesi" \ " of buffer to dsp\n", \ __func__, \ prtd->pcm_count); q6asm_write_nolock(prtd->audio_client, prtd->pcm_count, 0, 0, NO_TIMESTAMP); atomic_dec(&prtd->out_needed); wake_up(&the_locks.write_wait); }; } atomic_set(&prtd->start, 1); break; default: break; } } break; default: pr_debug("Not Supported Event opcode[0x%x]\n", opcode); break; } } static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; int ret; pr_debug("%s\n", __func__); if (prtd->mmap_flag) { ret = q6asm_set_io_mode(prtd->audio_client, ASYNC_IO_MODE); if (ret < 0) { pr_err("%s: Set IO mode failed\n", __func__); return -ENOMEM; } } prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream); prtd->pcm_count = snd_pcm_lib_period_bytes(substream); prtd->pcm_irq_pos = 0; /* rate and channels are sent to audio driver */ prtd->samp_rate = runtime->rate; prtd->channel_mode = runtime->channels; if (prtd->enabled) return 0; ret = q6asm_media_format_block_multi_ch_pcm(prtd->audio_client, runtime->rate, runtime->channels); if (ret < 0) pr_info("%s: CMD Format block failed\n", __func__); atomic_set(&prtd->out_count, runtime->periods); prtd->enabled = 1; prtd->cmd_ack = 0; return 0; } static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; int ret = 0; int i = 0; pr_debug("%s\n", __func__); prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream); prtd->pcm_count = snd_pcm_lib_period_bytes(substream); prtd->pcm_irq_pos = 0; /* rate and channels are sent to audio driver */ prtd->samp_rate = runtime->rate; prtd->channel_mode = runtime->channels; if (prtd->enabled) return 0; pr_debug("Samp_rate = %d\n", prtd->samp_rate); pr_debug("Channel = %d\n", prtd->channel_mode); ret = q6asm_enc_cfg_blk_multi_ch_pcm(prtd->audio_client, prtd->samp_rate, prtd->channel_mode); if (ret < 0) pr_debug("%s: cmd cfg pcm was block failed", __func__); for (i = 0; i < runtime->periods; i++) q6asm_read(prtd->audio_client); prtd->periods = runtime->periods; prtd->enabled = 1; return ret; } static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { int ret = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: pr_debug("%s: Trigger start\n", __func__); q6asm_run_nowait(prtd->audio_client, 0, 0, 0); break; case SNDRV_PCM_TRIGGER_STOP: pr_debug("SNDRV_PCM_TRIGGER_STOP\n"); atomic_set(&prtd->start, 0); if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) break; prtd->cmd_ack = 0; q6asm_cmd_nowait(prtd->audio_client, CMD_EOS); break; case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: pr_debug("SNDRV_PCM_TRIGGER_PAUSE\n"); q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE); atomic_set(&prtd->start, 0); break; default: ret = -EINVAL; break; } return ret; } static int msm_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *soc_prtd = substream->private_data; struct msm_audio *prtd; int ret = 0; struct asm_softpause_params softpause = { .enable = SOFT_PAUSE_ENABLE, .period = SOFT_PAUSE_PERIOD, .step = SOFT_PAUSE_STEP, .rampingcurve = SOFT_PAUSE_CURVE_LINEAR, }; struct asm_softvolume_params softvol = { .period = SOFT_VOLUME_PERIOD, .step = SOFT_VOLUME_STEP, .rampingcurve = SOFT_VOLUME_CURVE_LINEAR, }; pr_debug("%s\n", __func__); prtd = kzalloc(sizeof(struct msm_audio), GFP_KERNEL); if (prtd == NULL) { pr_err("Failed to allocate memory for msm_audio\n"); return -ENOMEM; } prtd->substream = substream; prtd->audio_client = q6asm_audio_client_alloc( (app_cb)event_handler, prtd); if (!prtd->audio_client) { pr_err("%s: Could not allocate memory\n", __func__); kfree(prtd); return -ENOMEM; } prtd->audio_client->perf_mode = false; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { runtime->hw = msm_pcm_hardware_playback; ret = q6asm_open_write(prtd->audio_client, FORMAT_MULTI_CHANNEL_LINEAR_PCM); if (ret < 0) { pr_err("%s: pcm out open failed\n", __func__); q6asm_audio_client_free(prtd->audio_client); kfree(prtd); return -ENOMEM; } } /* Capture path */ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { runtime->hw = msm_pcm_hardware_capture; ret = q6asm_open_read(prtd->audio_client, FORMAT_MULTI_CHANNEL_LINEAR_PCM); if (ret < 0) { pr_err("%s: pcm in open failed\n", __func__); q6asm_audio_client_free(prtd->audio_client); kfree(prtd); return -ENOMEM; } } pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session); prtd->session_id = prtd->audio_client->session; msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id, prtd->audio_client->perf_mode, prtd->session_id, substream->stream); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) prtd->cmd_ack = 1; ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &constraints_sample_rates); if (ret < 0) pr_err("snd_pcm_hw_constraint_list failed\n"); /* Ensure that buffer size is a multiple of period size */ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) pr_err("snd_pcm_hw_constraint_integer failed\n"); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ret = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, PLAYBACK_NUM_PERIODS * PLAYBACK_MIN_PERIOD_SIZE, PLAYBACK_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE); if (ret < 0) { pr_err("constraint for buffer bytes min max ret = %d\n", ret); } } if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { ret = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, CAPTURE_NUM_PERIODS * CAPTURE_MIN_PERIOD_SIZE, CAPTURE_NUM_PERIODS * CAPTURE_MAX_PERIOD_SIZE); if (ret < 0) { pr_err("constraint for buffer bytes min max ret = %d\n", ret); } } prtd->dsp_cnt = 0; runtime->private_data = prtd; pr_debug("substream->pcm->device = %d\n", substream->pcm->device); pr_debug("soc_prtd->dai_link->be_id = %d\n", soc_prtd->dai_link->be_id); multi_ch_pcm_audio.prtd = prtd; ret = multi_ch_pcm_set_volume(multi_ch_pcm_audio.volume); if (ret < 0) pr_err("%s : Set Volume failed : %d", __func__, ret); ret = q6asm_set_softpause(multi_ch_pcm_audio.prtd->audio_client, &softpause); if (ret < 0) pr_err("%s: Send SoftPause Param failed ret=%d\n", __func__, ret); ret = q6asm_set_softvolume(multi_ch_pcm_audio.prtd->audio_client, &softvol); if (ret < 0) pr_err("%s: Send SoftVolume Param failed ret=%d\n", __func__, ret); return 0; } int multi_ch_pcm_set_volume(unsigned volume) { int rc = 0; pr_err("multi_ch_pcm_set_volume\n"); if (multi_ch_pcm_audio.prtd && multi_ch_pcm_audio.prtd->audio_client) { pr_err("%s q6asm_set_volume\n", __func__); rc = q6asm_set_volume(multi_ch_pcm_audio.prtd->audio_client, volume); if (rc < 0) { pr_err("%s: Send Volume command failed" " rc=%d\n", __func__, rc); } } multi_ch_pcm_audio.volume = volume; return rc; } static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; int fbytes = 0; int xfer = 0; char *bufptr = NULL; void *data = NULL; uint32_t idx = 0; uint32_t size = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; fbytes = frames_to_bytes(runtime, frames); pr_debug("%s: prtd->out_count = %d\n", __func__, atomic_read(&prtd->out_count)); ret = wait_event_timeout(the_locks.write_wait, (atomic_read(&prtd->out_count)), 5 * HZ); if (!ret) { pr_err("%s: wait_event_timeout failed\n", __func__); goto fail; } if (!atomic_read(&prtd->out_count)) { pr_err("%s: pcm stopped out_count 0\n", __func__); return 0; } data = q6asm_is_cpu_buf_avail(IN, prtd->audio_client, &size, &idx); bufptr = data; if (bufptr) { pr_debug("%s:fbytes =%d: xfer=%d size=%d\n", __func__, fbytes, xfer, size); xfer = fbytes; if (copy_from_user(bufptr, buf, xfer)) { ret = -EFAULT; goto fail; } buf += xfer; fbytes -= xfer; pr_debug("%s:fbytes = %d: xfer=%d\n", __func__, fbytes, xfer); if (atomic_read(&prtd->start)) { pr_debug("%s:writing %d bytes of buffer to dsp\n", __func__, xfer); ret = q6asm_write(prtd->audio_client, xfer, 0, 0, NO_TIMESTAMP); if (ret < 0) { ret = -EFAULT; goto fail; } } else atomic_inc(&prtd->out_needed); atomic_dec(&prtd->out_count); } fail: return ret; } static int msm_pcm_playback_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *soc_prtd = substream->private_data; struct msm_audio *prtd = runtime->private_data; int dir = 0; int ret = 0; pr_debug("%s\n", __func__); dir = IN; ret = wait_event_timeout(the_locks.eos_wait, prtd->cmd_ack, 5 * HZ); if (!ret) pr_err("%s: CMD_EOS failed\n", __func__); q6asm_cmd(prtd->audio_client, CMD_CLOSE); q6asm_audio_client_buf_free_contiguous(dir, prtd->audio_client); msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id, SNDRV_PCM_STREAM_PLAYBACK); multi_ch_pcm_audio.prtd = NULL; q6asm_audio_client_free(prtd->audio_client); kfree(prtd); return 0; } static int msm_pcm_capture_copy(struct snd_pcm_substream *substream, int channel, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; int fbytes = 0; int xfer; char *bufptr; void *data = NULL; static uint32_t idx; static uint32_t size; uint32_t offset = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = substream->runtime->private_data; pr_debug("%s\n", __func__); fbytes = frames_to_bytes(runtime, frames); pr_debug("appl_ptr %d\n", (int)runtime->control->appl_ptr); pr_debug("hw_ptr %d\n", (int)runtime->status->hw_ptr); pr_debug("avail_min %d\n", (int)runtime->control->avail_min); ret = wait_event_timeout(the_locks.read_wait, (atomic_read(&prtd->in_count)), 5 * HZ); if (!ret) { pr_debug("%s: wait_event_timeout failed\n", __func__); goto fail; } if (!atomic_read(&prtd->in_count)) { pr_debug("%s: pcm stopped in_count 0\n", __func__); return 0; } pr_debug("Checking if valid buffer is available...%08x\n", (unsigned int) data); data = q6asm_is_cpu_buf_avail(OUT, prtd->audio_client, &size, &idx); bufptr = data; pr_debug("Size = %d\n", size); pr_debug("fbytes = %d\n", fbytes); pr_debug("idx = %d\n", idx); if (bufptr) { xfer = fbytes; if (xfer > size) xfer = size; offset = in_frame_info[idx][1]; pr_debug("Offset value = %d\n", offset); if (copy_to_user(buf, bufptr+offset, xfer)) { pr_err("Failed to copy buf to user\n"); ret = -EFAULT; goto fail; } fbytes -= xfer; size -= xfer; in_frame_info[idx][1] += xfer; pr_debug("%s:fbytes = %d: size=%d: xfer=%d\n", __func__, fbytes, size, xfer); pr_debug(" Sending next buffer to dsp\n"); memset(&in_frame_info[idx], 0, sizeof(uint32_t) * 2); atomic_dec(&prtd->in_count); ret = q6asm_read(prtd->audio_client); if (ret < 0) { pr_err("q6asm read failed\n"); ret = -EFAULT; goto fail; } } else pr_err("No valid buffer\n"); pr_debug("Returning from capture_copy... %d\n", ret); fail: return ret; } static int msm_pcm_capture_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *soc_prtd = substream->private_data; struct msm_audio *prtd = runtime->private_data; int dir = OUT; pr_debug("%s\n", __func__); q6asm_cmd(prtd->audio_client, CMD_CLOSE); q6asm_audio_client_buf_free_contiguous(dir, prtd->audio_client); msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id, SNDRV_PCM_STREAM_CAPTURE); q6asm_audio_client_free(prtd->audio_client); kfree(prtd); return 0; } static int msm_pcm_copy(struct snd_pcm_substream *substream, int a, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_copy(substream, a, hwoff, buf, frames); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_copy(substream, a, hwoff, buf, frames); return ret; } static int msm_pcm_close(struct snd_pcm_substream *substream) { int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_close(substream); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_close(substream); return ret; } static int msm_pcm_prepare(struct snd_pcm_substream *substream) { int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_prepare(substream); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_prepare(substream); return ret; } static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; if (prtd->pcm_irq_pos >= prtd->pcm_size) prtd->pcm_irq_pos = 0; pr_debug("pcm_irq_pos = %d\n", prtd->pcm_irq_pos); return bytes_to_frames(runtime, (prtd->pcm_irq_pos)); } static int msm_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { int result = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; pr_debug("%s\n", __func__); prtd->mmap_flag = 1; if (runtime->dma_addr && runtime->dma_bytes) { vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); result = remap_pfn_range(vma, vma->vm_start, runtime->dma_addr >> PAGE_SHIFT, runtime->dma_bytes, vma->vm_page_prot); } else { pr_err("Physical address or size of buf is NULL"); return -EINVAL; } return result; } static int msm_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; struct snd_dma_buffer *dma_buf = &substream->dma_buffer; struct audio_buffer *buf; int dir, ret; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) dir = IN; else dir = OUT; ret = q6asm_audio_client_buf_alloc_contiguous(dir, prtd->audio_client, (params_buffer_bytes(params) / params_periods(params)), params_periods(params)); if (ret < 0) { pr_err("Audio Start: Buffer Allocation failed rc = %d\n", ret); return -ENOMEM; } buf = prtd->audio_client->port[dir].buf; pr_debug("%s:buf = %p\n", __func__, buf); dma_buf->dev.type = SNDRV_DMA_TYPE_DEV; dma_buf->dev.dev = substream->pcm->card->dev; dma_buf->private_data = NULL; dma_buf->area = buf[0].data; dma_buf->addr = buf[0].phys; dma_buf->bytes = params_buffer_bytes(params); if (!dma_buf->area) return -ENOMEM; snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); return 0; } static struct snd_pcm_ops msm_pcm_ops = { .open = msm_pcm_open, .copy = msm_pcm_copy, .hw_params = msm_pcm_hw_params, .close = msm_pcm_close, .ioctl = snd_pcm_lib_ioctl, .prepare = msm_pcm_prepare, .trigger = msm_pcm_trigger, .pointer = msm_pcm_pointer, .mmap = msm_pcm_mmap, }; static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; int ret = 0; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(32); return ret; } static struct snd_soc_platform_driver msm_soc_platform = { .ops = &msm_pcm_ops, .pcm_new = msm_asoc_pcm_new, }; static __devinit int msm_pcm_probe(struct platform_device *pdev) { pr_info("%s: dev name %s\n", __func__, dev_name(&pdev->dev)); return snd_soc_register_platform(&pdev->dev, &msm_soc_platform); } static int msm_pcm_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static struct platform_driver msm_pcm_driver = { .driver = { .name = "msm-multi-ch-pcm-dsp", .owner = THIS_MODULE, }, .probe = msm_pcm_probe, .remove = __devexit_p(msm_pcm_remove), }; static int __init msm_soc_platform_init(void) { init_waitqueue_head(&the_locks.enable_wait); init_waitqueue_head(&the_locks.eos_wait); init_waitqueue_head(&the_locks.write_wait); init_waitqueue_head(&the_locks.read_wait); return platform_driver_register(&msm_pcm_driver); } module_init(msm_soc_platform_init); static void __exit msm_soc_platform_exit(void) { platform_driver_unregister(&msm_pcm_driver); } module_exit(msm_soc_platform_exit); MODULE_DESCRIPTION("Multi channel PCM module platform driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
shuiziliuBUPT/linuxkernel
drivers/usb/atm/usbatm.c
1191
36538
/****************************************************************************** * usbatm.c - Generic USB xDSL driver core * * Copyright (C) 2001, Alcatel * Copyright (C) 2003, Duncan Sands, SolNegro, Josep Comas * Copyright (C) 2004, David Woodhouse, Roman Kagan * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ******************************************************************************/ /* * Written by Johan Verrept, Duncan Sands (duncan.sands@free.fr) and David Woodhouse * * 1.7+: - See the check-in logs * * 1.6: - No longer opens a connection if the firmware is not loaded * - Added support for the speedtouch 330 * - Removed the limit on the number of devices * - Module now autoloads on device plugin * - Merged relevant parts of sarlib * - Replaced the kernel thread with a tasklet * - New packet transmission code * - Changed proc file contents * - Fixed all known SMP races * - Many fixes and cleanups * - Various fixes by Oliver Neukum (oliver@neukum.name) * * 1.5A: - Version for inclusion in 2.5 series kernel * - Modifications by Richard Purdie (rpurdie@rpsys.net) * - made compatible with kernel 2.5.6 onwards by changing * usbatm_usb_send_data_context->urb to a pointer and adding code * to alloc and free it * - remove_wait_queue() added to usbatm_atm_processqueue_thread() * * 1.5: - fixed memory leak when atmsar_decode_aal5 returned NULL. * (reported by stephen.robinson@zen.co.uk) * * 1.4: - changed the spin_lock() under interrupt to spin_lock_irqsave() * - unlink all active send urbs of a vcc that is being closed. * * 1.3.1: - added the version number * * 1.3: - Added multiple send urb support * - fixed memory leak and vcc->tx_inuse starvation bug * when not enough memory left in vcc. * * 1.2: - Fixed race condition in usbatm_usb_send_data() * 1.1: - Turned off packet debugging * */ #include "usbatm.h" #include <asm/uaccess.h> #include <linux/crc32.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/timer.h> #include <linux/wait.h> #include <linux/kthread.h> #include <linux/ratelimit.h> #ifdef VERBOSE_DEBUG static int usbatm_print_packet(struct usbatm_data *instance, const unsigned char *data, int len); #define PACKETDEBUG(arg...) usbatm_print_packet(arg) #define vdbg(arg...) dev_dbg(arg) #else #define PACKETDEBUG(arg...) #define vdbg(arg...) #endif #define DRIVER_AUTHOR "Johan Verrept, Duncan Sands <duncan.sands@free.fr>" #define DRIVER_VERSION "1.10" #define DRIVER_DESC "Generic USB ATM/DSL I/O, version " DRIVER_VERSION static const char usbatm_driver_name[] = "usbatm"; #define UDSL_MAX_RCV_URBS 16 #define UDSL_MAX_SND_URBS 16 #define UDSL_MAX_BUF_SIZE 65536 #define UDSL_DEFAULT_RCV_URBS 4 #define UDSL_DEFAULT_SND_URBS 4 #define UDSL_DEFAULT_RCV_BUF_SIZE 3392 /* 64 * ATM_CELL_SIZE */ #define UDSL_DEFAULT_SND_BUF_SIZE 3392 /* 64 * ATM_CELL_SIZE */ #define ATM_CELL_HEADER (ATM_CELL_SIZE - ATM_CELL_PAYLOAD) #define THROTTLE_MSECS 100 /* delay to recover processing after urb submission fails */ static unsigned int num_rcv_urbs = UDSL_DEFAULT_RCV_URBS; static unsigned int num_snd_urbs = UDSL_DEFAULT_SND_URBS; static unsigned int rcv_buf_bytes = UDSL_DEFAULT_RCV_BUF_SIZE; static unsigned int snd_buf_bytes = UDSL_DEFAULT_SND_BUF_SIZE; module_param(num_rcv_urbs, uint, S_IRUGO); MODULE_PARM_DESC(num_rcv_urbs, "Number of urbs used for reception (range: 0-" __MODULE_STRING(UDSL_MAX_RCV_URBS) ", default: " __MODULE_STRING(UDSL_DEFAULT_RCV_URBS) ")"); module_param(num_snd_urbs, uint, S_IRUGO); MODULE_PARM_DESC(num_snd_urbs, "Number of urbs used for transmission (range: 0-" __MODULE_STRING(UDSL_MAX_SND_URBS) ", default: " __MODULE_STRING(UDSL_DEFAULT_SND_URBS) ")"); module_param(rcv_buf_bytes, uint, S_IRUGO); MODULE_PARM_DESC(rcv_buf_bytes, "Size of the buffers used for reception, in bytes (range: 1-" __MODULE_STRING(UDSL_MAX_BUF_SIZE) ", default: " __MODULE_STRING(UDSL_DEFAULT_RCV_BUF_SIZE) ")"); module_param(snd_buf_bytes, uint, S_IRUGO); MODULE_PARM_DESC(snd_buf_bytes, "Size of the buffers used for transmission, in bytes (range: 1-" __MODULE_STRING(UDSL_MAX_BUF_SIZE) ", default: " __MODULE_STRING(UDSL_DEFAULT_SND_BUF_SIZE) ")"); /* receive */ struct usbatm_vcc_data { /* vpi/vci lookup */ struct list_head list; short vpi; int vci; struct atm_vcc *vcc; /* raw cell reassembly */ struct sk_buff *sarb; }; /* send */ struct usbatm_control { struct atm_skb_data atm; u32 len; u32 crc; }; #define UDSL_SKB(x) ((struct usbatm_control *)(x)->cb) /* ATM */ static void usbatm_atm_dev_close(struct atm_dev *atm_dev); static int usbatm_atm_open(struct atm_vcc *vcc); static void usbatm_atm_close(struct atm_vcc *vcc); static int usbatm_atm_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg); static int usbatm_atm_send(struct atm_vcc *vcc, struct sk_buff *skb); static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page); static struct atmdev_ops usbatm_atm_devops = { .dev_close = usbatm_atm_dev_close, .open = usbatm_atm_open, .close = usbatm_atm_close, .ioctl = usbatm_atm_ioctl, .send = usbatm_atm_send, .proc_read = usbatm_atm_proc_read, .owner = THIS_MODULE, }; /*********** ** misc ** ***********/ static inline unsigned int usbatm_pdu_length(unsigned int length) { length += ATM_CELL_PAYLOAD - 1 + ATM_AAL5_TRAILER; return length - length % ATM_CELL_PAYLOAD; } static inline void usbatm_pop(struct atm_vcc *vcc, struct sk_buff *skb) { if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); } /*********** ** urbs ** ************/ static struct urb *usbatm_pop_urb(struct usbatm_channel *channel) { struct urb *urb; spin_lock_irq(&channel->lock); if (list_empty(&channel->list)) { spin_unlock_irq(&channel->lock); return NULL; } urb = list_entry(channel->list.next, struct urb, urb_list); list_del(&urb->urb_list); spin_unlock_irq(&channel->lock); return urb; } static int usbatm_submit_urb(struct urb *urb) { struct usbatm_channel *channel = urb->context; int ret; /* vdbg("%s: submitting urb 0x%p, size %u", __func__, urb, urb->transfer_buffer_length); */ ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { if (printk_ratelimit()) atm_warn(channel->usbatm, "%s: urb 0x%p submission failed (%d)!\n", __func__, urb, ret); /* consider all errors transient and return the buffer back to the queue */ urb->status = -EAGAIN; spin_lock_irq(&channel->lock); /* must add to the front when sending; doesn't matter when receiving */ list_add(&urb->urb_list, &channel->list); spin_unlock_irq(&channel->lock); /* make sure the channel doesn't stall */ mod_timer(&channel->delay, jiffies + msecs_to_jiffies(THROTTLE_MSECS)); } return ret; } static void usbatm_complete(struct urb *urb) { struct usbatm_channel *channel = urb->context; unsigned long flags; int status = urb->status; /* vdbg("%s: urb 0x%p, status %d, actual_length %d", __func__, urb, status, urb->actual_length); */ /* usually in_interrupt(), but not always */ spin_lock_irqsave(&channel->lock, flags); /* must add to the back when receiving; doesn't matter when sending */ list_add_tail(&urb->urb_list, &channel->list); spin_unlock_irqrestore(&channel->lock, flags); if (unlikely(status) && (!(channel->usbatm->flags & UDSL_IGNORE_EILSEQ) || status != -EILSEQ)) { if (status == -ESHUTDOWN) return; if (printk_ratelimit()) atm_warn(channel->usbatm, "%s: urb 0x%p failed (%d)!\n", __func__, urb, status); /* throttle processing in case of an error */ mod_timer(&channel->delay, jiffies + msecs_to_jiffies(THROTTLE_MSECS)); } else tasklet_schedule(&channel->tasklet); } /************* ** decode ** *************/ static inline struct usbatm_vcc_data *usbatm_find_vcc(struct usbatm_data *instance, short vpi, int vci) { struct usbatm_vcc_data *vcc_data; list_for_each_entry(vcc_data, &instance->vcc_list, list) if ((vcc_data->vci == vci) && (vcc_data->vpi == vpi)) return vcc_data; return NULL; } static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char *source) { struct atm_vcc *vcc; struct sk_buff *sarb; short vpi = ((source[0] & 0x0f) << 4) | (source[1] >> 4); int vci = ((source[1] & 0x0f) << 12) | (source[2] << 4) | (source[3] >> 4); u8 pti = ((source[3] & 0xe) >> 1); if ((vci != instance->cached_vci) || (vpi != instance->cached_vpi)) { instance->cached_vpi = vpi; instance->cached_vci = vci; instance->cached_vcc = usbatm_find_vcc(instance, vpi, vci); if (!instance->cached_vcc) atm_rldbg(instance, "%s: unknown vpi/vci (%hd/%d)!\n", __func__, vpi, vci); } if (!instance->cached_vcc) return; vcc = instance->cached_vcc->vcc; /* OAM F5 end-to-end */ if (pti == ATM_PTI_E2EF5) { if (printk_ratelimit()) atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n", __func__, vpi, vci); atomic_inc(&vcc->stats->rx_err); return; } sarb = instance->cached_vcc->sarb; if (sarb->tail + ATM_CELL_PAYLOAD > sarb->end) { atm_rldbg(instance, "%s: buffer overrun (sarb->len %u, vcc: 0x%p)!\n", __func__, sarb->len, vcc); /* discard cells already received */ skb_trim(sarb, 0); } memcpy(skb_tail_pointer(sarb), source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD); __skb_put(sarb, ATM_CELL_PAYLOAD); if (pti & 1) { struct sk_buff *skb; unsigned int length; unsigned int pdu_length; length = (source[ATM_CELL_SIZE - 6] << 8) + source[ATM_CELL_SIZE - 5]; /* guard against overflow */ if (length > ATM_MAX_AAL5_PDU) { atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n", __func__, length, vcc); atomic_inc(&vcc->stats->rx_err); goto out; } pdu_length = usbatm_pdu_length(length); if (sarb->len < pdu_length) { atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n", __func__, pdu_length, sarb->len, vcc); atomic_inc(&vcc->stats->rx_err); goto out; } if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) { atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n", __func__, vcc); atomic_inc(&vcc->stats->rx_err); goto out; } vdbg(&instance->usb_intf->dev, "%s: got packet (length: %u, pdu_length: %u, vcc: 0x%p)", __func__, length, pdu_length, vcc); if (!(skb = dev_alloc_skb(length))) { if (printk_ratelimit()) atm_err(instance, "%s: no memory for skb (length: %u)!\n", __func__, length); atomic_inc(&vcc->stats->rx_drop); goto out; } vdbg(&instance->usb_intf->dev, "%s: allocated new sk_buff (skb: 0x%p, skb->truesize: %u)", __func__, skb, skb->truesize); if (!atm_charge(vcc, skb->truesize)) { atm_rldbg(instance, "%s: failed atm_charge (skb->truesize: %u)!\n", __func__, skb->truesize); dev_kfree_skb_any(skb); goto out; /* atm_charge increments rx_drop */ } skb_copy_to_linear_data(skb, skb_tail_pointer(sarb) - pdu_length, length); __skb_put(skb, length); vdbg(&instance->usb_intf->dev, "%s: sending skb 0x%p, skb->len %u, skb->truesize %u", __func__, skb, skb->len, skb->truesize); PACKETDEBUG(instance, skb->data, skb->len); vcc->push(vcc, skb); atomic_inc(&vcc->stats->rx); out: skb_trim(sarb, 0); } } static void usbatm_extract_cells(struct usbatm_data *instance, unsigned char *source, unsigned int avail_data) { unsigned int stride = instance->rx_channel.stride; unsigned int buf_usage = instance->buf_usage; /* extract cells from incoming data, taking into account that * the length of avail data may not be a multiple of stride */ if (buf_usage > 0) { /* we have a partially received atm cell */ unsigned char *cell_buf = instance->cell_buf; unsigned int space_left = stride - buf_usage; if (avail_data >= space_left) { /* add new data and process cell */ memcpy(cell_buf + buf_usage, source, space_left); source += space_left; avail_data -= space_left; usbatm_extract_one_cell(instance, cell_buf); instance->buf_usage = 0; } else { /* not enough data to fill the cell */ memcpy(cell_buf + buf_usage, source, avail_data); instance->buf_usage = buf_usage + avail_data; return; } } for (; avail_data >= stride; avail_data -= stride, source += stride) usbatm_extract_one_cell(instance, source); if (avail_data > 0) { /* length was not a multiple of stride - * save remaining data for next call */ memcpy(instance->cell_buf, source, avail_data); instance->buf_usage = avail_data; } } /************* ** encode ** *************/ static unsigned int usbatm_write_cells(struct usbatm_data *instance, struct sk_buff *skb, u8 *target, unsigned int avail_space) { struct usbatm_control *ctrl = UDSL_SKB(skb); struct atm_vcc *vcc = ctrl->atm.vcc; unsigned int bytes_written; unsigned int stride = instance->tx_channel.stride; for (bytes_written = 0; bytes_written < avail_space && ctrl->len; bytes_written += stride, target += stride) { unsigned int data_len = min_t(unsigned int, skb->len, ATM_CELL_PAYLOAD); unsigned int left = ATM_CELL_PAYLOAD - data_len; u8 *ptr = target; ptr[0] = vcc->vpi >> 4; ptr[1] = (vcc->vpi << 4) | (vcc->vci >> 12); ptr[2] = vcc->vci >> 4; ptr[3] = vcc->vci << 4; ptr[4] = 0xec; ptr += ATM_CELL_HEADER; skb_copy_from_linear_data(skb, ptr, data_len); ptr += data_len; __skb_pull(skb, data_len); if (!left) continue; memset(ptr, 0, left); if (left >= ATM_AAL5_TRAILER) { /* trailer will go in this cell */ u8 *trailer = target + ATM_CELL_SIZE - ATM_AAL5_TRAILER; /* trailer[0] = 0; UU = 0 */ /* trailer[1] = 0; CPI = 0 */ trailer[2] = ctrl->len >> 8; trailer[3] = ctrl->len; ctrl->crc = ~crc32_be(ctrl->crc, ptr, left - 4); trailer[4] = ctrl->crc >> 24; trailer[5] = ctrl->crc >> 16; trailer[6] = ctrl->crc >> 8; trailer[7] = ctrl->crc; target[3] |= 0x2; /* adjust PTI */ ctrl->len = 0; /* tag this skb finished */ } else ctrl->crc = crc32_be(ctrl->crc, ptr, left); } return bytes_written; } /************** ** receive ** **************/ static void usbatm_rx_process(unsigned long data) { struct usbatm_data *instance = (struct usbatm_data *)data; struct urb *urb; while ((urb = usbatm_pop_urb(&instance->rx_channel))) { vdbg(&instance->usb_intf->dev, "%s: processing urb 0x%p", __func__, urb); if (usb_pipeisoc(urb->pipe)) { unsigned char *merge_start = NULL; unsigned int merge_length = 0; const unsigned int packet_size = instance->rx_channel.packet_size; int i; for (i = 0; i < urb->number_of_packets; i++) { if (!urb->iso_frame_desc[i].status) { unsigned int actual_length = urb->iso_frame_desc[i].actual_length; if (!merge_length) merge_start = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset; merge_length += actual_length; if (merge_length && (actual_length < packet_size)) { usbatm_extract_cells(instance, merge_start, merge_length); merge_length = 0; } } else { atm_rldbg(instance, "%s: status %d in frame %d!\n", __func__, urb->status, i); if (merge_length) usbatm_extract_cells(instance, merge_start, merge_length); merge_length = 0; instance->buf_usage = 0; } } if (merge_length) usbatm_extract_cells(instance, merge_start, merge_length); } else if (!urb->status) usbatm_extract_cells(instance, urb->transfer_buffer, urb->actual_length); else instance->buf_usage = 0; if (usbatm_submit_urb(urb)) return; } } /*********** ** send ** ***********/ static void usbatm_tx_process(unsigned long data) { struct usbatm_data *instance = (struct usbatm_data *)data; struct sk_buff *skb = instance->current_skb; struct urb *urb = NULL; const unsigned int buf_size = instance->tx_channel.buf_size; unsigned int bytes_written = 0; u8 *buffer = NULL; if (!skb) skb = skb_dequeue(&instance->sndqueue); while (skb) { if (!urb) { urb = usbatm_pop_urb(&instance->tx_channel); if (!urb) break; /* no more senders */ buffer = urb->transfer_buffer; bytes_written = (urb->status == -EAGAIN) ? urb->transfer_buffer_length : 0; } bytes_written += usbatm_write_cells(instance, skb, buffer + bytes_written, buf_size - bytes_written); vdbg(&instance->usb_intf->dev, "%s: wrote %u bytes from skb 0x%p to urb 0x%p", __func__, bytes_written, skb, urb); if (!UDSL_SKB(skb)->len) { struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc; usbatm_pop(vcc, skb); atomic_inc(&vcc->stats->tx); skb = skb_dequeue(&instance->sndqueue); } if (bytes_written == buf_size || (!skb && bytes_written)) { urb->transfer_buffer_length = bytes_written; if (usbatm_submit_urb(urb)) break; urb = NULL; } } instance->current_skb = skb; } static void usbatm_cancel_send(struct usbatm_data *instance, struct atm_vcc *vcc) { struct sk_buff *skb, *n; spin_lock_irq(&instance->sndqueue.lock); skb_queue_walk_safe(&instance->sndqueue, skb, n) { if (UDSL_SKB(skb)->atm.vcc == vcc) { atm_dbg(instance, "%s: popping skb 0x%p\n", __func__, skb); __skb_unlink(skb, &instance->sndqueue); usbatm_pop(vcc, skb); } } spin_unlock_irq(&instance->sndqueue.lock); tasklet_disable(&instance->tx_channel.tasklet); if ((skb = instance->current_skb) && (UDSL_SKB(skb)->atm.vcc == vcc)) { atm_dbg(instance, "%s: popping current skb (0x%p)\n", __func__, skb); instance->current_skb = NULL; usbatm_pop(vcc, skb); } tasklet_enable(&instance->tx_channel.tasklet); } static int usbatm_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) { struct usbatm_data *instance = vcc->dev->dev_data; struct usbatm_control *ctrl = UDSL_SKB(skb); int err; /* racy disconnection check - fine */ if (!instance || instance->disconnected) { #ifdef VERBOSE_DEBUG printk_ratelimited(KERN_DEBUG "%s: %s!\n", __func__, instance ? "disconnected" : "NULL instance"); #endif err = -ENODEV; goto fail; } if (vcc->qos.aal != ATM_AAL5) { atm_rldbg(instance, "%s: unsupported ATM type %d!\n", __func__, vcc->qos.aal); err = -EINVAL; goto fail; } if (skb->len > ATM_MAX_AAL5_PDU) { atm_rldbg(instance, "%s: packet too long (%d vs %d)!\n", __func__, skb->len, ATM_MAX_AAL5_PDU); err = -EINVAL; goto fail; } PACKETDEBUG(instance, skb->data, skb->len); /* initialize the control block */ ctrl->atm.vcc = vcc; ctrl->len = skb->len; ctrl->crc = crc32_be(~0, skb->data, skb->len); skb_queue_tail(&instance->sndqueue, skb); tasklet_schedule(&instance->tx_channel.tasklet); return 0; fail: usbatm_pop(vcc, skb); return err; } /******************** ** bean counting ** ********************/ static void usbatm_destroy_instance(struct kref *kref) { struct usbatm_data *instance = container_of(kref, struct usbatm_data, refcount); tasklet_kill(&instance->rx_channel.tasklet); tasklet_kill(&instance->tx_channel.tasklet); usb_put_dev(instance->usb_dev); kfree(instance); } static void usbatm_get_instance(struct usbatm_data *instance) { kref_get(&instance->refcount); } static void usbatm_put_instance(struct usbatm_data *instance) { kref_put(&instance->refcount, usbatm_destroy_instance); } /********** ** ATM ** **********/ static void usbatm_atm_dev_close(struct atm_dev *atm_dev) { struct usbatm_data *instance = atm_dev->dev_data; if (!instance) return; atm_dev->dev_data = NULL; /* catch bugs */ usbatm_put_instance(instance); /* taken in usbatm_atm_init */ } static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page) { struct usbatm_data *instance = atm_dev->dev_data; int left = *pos; if (!instance) return -ENODEV; if (!left--) return sprintf(page, "%s\n", instance->description); if (!left--) return sprintf(page, "MAC: %pM\n", atm_dev->esi); if (!left--) return sprintf(page, "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n", atomic_read(&atm_dev->stats.aal5.tx), atomic_read(&atm_dev->stats.aal5.tx_err), atomic_read(&atm_dev->stats.aal5.rx), atomic_read(&atm_dev->stats.aal5.rx_err), atomic_read(&atm_dev->stats.aal5.rx_drop)); if (!left--) { if (instance->disconnected) return sprintf(page, "Disconnected\n"); else switch (atm_dev->signal) { case ATM_PHY_SIG_FOUND: return sprintf(page, "Line up\n"); case ATM_PHY_SIG_LOST: return sprintf(page, "Line down\n"); default: return sprintf(page, "Line state unknown\n"); } } return 0; } static int usbatm_atm_open(struct atm_vcc *vcc) { struct usbatm_data *instance = vcc->dev->dev_data; struct usbatm_vcc_data *new = NULL; int ret; int vci = vcc->vci; short vpi = vcc->vpi; if (!instance) return -ENODEV; /* only support AAL5 */ if ((vcc->qos.aal != ATM_AAL5)) { atm_warn(instance, "%s: unsupported ATM type %d!\n", __func__, vcc->qos.aal); return -EINVAL; } /* sanity checks */ if ((vcc->qos.rxtp.max_sdu < 0) || (vcc->qos.rxtp.max_sdu > ATM_MAX_AAL5_PDU)) { atm_dbg(instance, "%s: max_sdu %d out of range!\n", __func__, vcc->qos.rxtp.max_sdu); return -EINVAL; } mutex_lock(&instance->serialize); /* vs self, usbatm_atm_close, usbatm_usb_disconnect */ if (instance->disconnected) { atm_dbg(instance, "%s: disconnected!\n", __func__); ret = -ENODEV; goto fail; } if (usbatm_find_vcc(instance, vpi, vci)) { atm_dbg(instance, "%s: %hd/%d already in use!\n", __func__, vpi, vci); ret = -EADDRINUSE; goto fail; } if (!(new = kzalloc(sizeof(struct usbatm_vcc_data), GFP_KERNEL))) { atm_err(instance, "%s: no memory for vcc_data!\n", __func__); ret = -ENOMEM; goto fail; } new->vcc = vcc; new->vpi = vpi; new->vci = vci; new->sarb = alloc_skb(usbatm_pdu_length(vcc->qos.rxtp.max_sdu), GFP_KERNEL); if (!new->sarb) { atm_err(instance, "%s: no memory for SAR buffer!\n", __func__); ret = -ENOMEM; goto fail; } vcc->dev_data = new; tasklet_disable(&instance->rx_channel.tasklet); instance->cached_vcc = new; instance->cached_vpi = vpi; instance->cached_vci = vci; list_add(&new->list, &instance->vcc_list); tasklet_enable(&instance->rx_channel.tasklet); set_bit(ATM_VF_ADDR, &vcc->flags); set_bit(ATM_VF_PARTIAL, &vcc->flags); set_bit(ATM_VF_READY, &vcc->flags); mutex_unlock(&instance->serialize); atm_dbg(instance, "%s: allocated vcc data 0x%p\n", __func__, new); return 0; fail: kfree(new); mutex_unlock(&instance->serialize); return ret; } static void usbatm_atm_close(struct atm_vcc *vcc) { struct usbatm_data *instance = vcc->dev->dev_data; struct usbatm_vcc_data *vcc_data = vcc->dev_data; if (!instance || !vcc_data) return; usbatm_cancel_send(instance, vcc); mutex_lock(&instance->serialize); /* vs self, usbatm_atm_open, usbatm_usb_disconnect */ tasklet_disable(&instance->rx_channel.tasklet); if (instance->cached_vcc == vcc_data) { instance->cached_vcc = NULL; instance->cached_vpi = ATM_VPI_UNSPEC; instance->cached_vci = ATM_VCI_UNSPEC; } list_del(&vcc_data->list); tasklet_enable(&instance->rx_channel.tasklet); kfree_skb(vcc_data->sarb); vcc_data->sarb = NULL; kfree(vcc_data); vcc->dev_data = NULL; vcc->vpi = ATM_VPI_UNSPEC; vcc->vci = ATM_VCI_UNSPEC; clear_bit(ATM_VF_READY, &vcc->flags); clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); mutex_unlock(&instance->serialize); } static int usbatm_atm_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg) { struct usbatm_data *instance = atm_dev->dev_data; if (!instance || instance->disconnected) return -ENODEV; switch (cmd) { case ATM_QUERYLOOP: return put_user(ATM_LM_NONE, (int __user *)arg) ? -EFAULT : 0; default: return -ENOIOCTLCMD; } } static int usbatm_atm_init(struct usbatm_data *instance) { struct atm_dev *atm_dev; int ret, i; /* ATM init. The ATM initialization scheme suffers from an intrinsic race * condition: callbacks we register can be executed at once, before we have * initialized the struct atm_dev. To protect against this, all callbacks * abort if atm_dev->dev_data is NULL. */ atm_dev = atm_dev_register(instance->driver_name, &instance->usb_intf->dev, &usbatm_atm_devops, -1, NULL); if (!atm_dev) { usb_err(instance, "%s: failed to register ATM device!\n", __func__); return -1; } instance->atm_dev = atm_dev; atm_dev->ci_range.vpi_bits = ATM_CI_MAX; atm_dev->ci_range.vci_bits = ATM_CI_MAX; atm_dev->signal = ATM_PHY_SIG_UNKNOWN; /* temp init ATM device, set to 128kbit */ atm_dev->link_rate = 128 * 1000 / 424; if (instance->driver->atm_start && ((ret = instance->driver->atm_start(instance, atm_dev)) < 0)) { atm_err(instance, "%s: atm_start failed: %d!\n", __func__, ret); goto fail; } usbatm_get_instance(instance); /* dropped in usbatm_atm_dev_close */ /* ready for ATM callbacks */ mb(); atm_dev->dev_data = instance; /* submit all rx URBs */ for (i = 0; i < num_rcv_urbs; i++) usbatm_submit_urb(instance->urbs[i]); return 0; fail: instance->atm_dev = NULL; atm_dev_deregister(atm_dev); /* usbatm_atm_dev_close will eventually be called */ return ret; } /********** ** USB ** **********/ static int usbatm_do_heavy_init(void *arg) { struct usbatm_data *instance = arg; int ret; allow_signal(SIGTERM); complete(&instance->thread_started); ret = instance->driver->heavy_init(instance, instance->usb_intf); if (!ret) ret = usbatm_atm_init(instance); mutex_lock(&instance->serialize); instance->thread = NULL; mutex_unlock(&instance->serialize); complete_and_exit(&instance->thread_exited, ret); } static int usbatm_heavy_init(struct usbatm_data *instance) { struct task_struct *t; t = kthread_create(usbatm_do_heavy_init, instance, "%s", instance->driver->driver_name); if (IS_ERR(t)) { usb_err(instance, "%s: failed to create kernel_thread (%ld)!\n", __func__, PTR_ERR(t)); return PTR_ERR(t); } instance->thread = t; wake_up_process(t); wait_for_completion(&instance->thread_started); return 0; } static void usbatm_tasklet_schedule(unsigned long data) { tasklet_schedule((struct tasklet_struct *) data); } static void usbatm_init_channel(struct usbatm_channel *channel) { spin_lock_init(&channel->lock); INIT_LIST_HEAD(&channel->list); channel->delay.function = usbatm_tasklet_schedule; channel->delay.data = (unsigned long) &channel->tasklet; init_timer(&channel->delay); } int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id, struct usbatm_driver *driver) { struct device *dev = &intf->dev; struct usb_device *usb_dev = interface_to_usbdev(intf); struct usbatm_data *instance; char *buf; int error = -ENOMEM; int i, length; unsigned int maxpacket, num_packets; /* instance init */ instance = kzalloc(sizeof(*instance) + sizeof(struct urb *) * (num_rcv_urbs + num_snd_urbs), GFP_KERNEL); if (!instance) { dev_err(dev, "%s: no memory for instance data!\n", __func__); return -ENOMEM; } /* public fields */ instance->driver = driver; strlcpy(instance->driver_name, driver->driver_name, sizeof(instance->driver_name)); instance->usb_dev = usb_dev; instance->usb_intf = intf; buf = instance->description; length = sizeof(instance->description); if ((i = usb_string(usb_dev, usb_dev->descriptor.iProduct, buf, length)) < 0) goto bind; buf += i; length -= i; i = scnprintf(buf, length, " ("); buf += i; length -= i; if (length <= 0 || (i = usb_make_path(usb_dev, buf, length)) < 0) goto bind; buf += i; length -= i; snprintf(buf, length, ")"); bind: if (driver->bind && (error = driver->bind(instance, intf, id)) < 0) { dev_err(dev, "%s: bind failed: %d!\n", __func__, error); goto fail_free; } /* private fields */ kref_init(&instance->refcount); /* dropped in usbatm_usb_disconnect */ mutex_init(&instance->serialize); instance->thread = NULL; init_completion(&instance->thread_started); init_completion(&instance->thread_exited); INIT_LIST_HEAD(&instance->vcc_list); skb_queue_head_init(&instance->sndqueue); usbatm_init_channel(&instance->rx_channel); usbatm_init_channel(&instance->tx_channel); tasklet_init(&instance->rx_channel.tasklet, usbatm_rx_process, (unsigned long)instance); tasklet_init(&instance->tx_channel.tasklet, usbatm_tx_process, (unsigned long)instance); instance->rx_channel.stride = ATM_CELL_SIZE + driver->rx_padding; instance->tx_channel.stride = ATM_CELL_SIZE + driver->tx_padding; instance->rx_channel.usbatm = instance->tx_channel.usbatm = instance; if ((instance->flags & UDSL_USE_ISOC) && driver->isoc_in) instance->rx_channel.endpoint = usb_rcvisocpipe(usb_dev, driver->isoc_in); else instance->rx_channel.endpoint = usb_rcvbulkpipe(usb_dev, driver->bulk_in); instance->tx_channel.endpoint = usb_sndbulkpipe(usb_dev, driver->bulk_out); /* tx buffer size must be a positive multiple of the stride */ instance->tx_channel.buf_size = max(instance->tx_channel.stride, snd_buf_bytes - (snd_buf_bytes % instance->tx_channel.stride)); /* rx buffer size must be a positive multiple of the endpoint maxpacket */ maxpacket = usb_maxpacket(usb_dev, instance->rx_channel.endpoint, 0); if ((maxpacket < 1) || (maxpacket > UDSL_MAX_BUF_SIZE)) { dev_err(dev, "%s: invalid endpoint %02x!\n", __func__, usb_pipeendpoint(instance->rx_channel.endpoint)); error = -EINVAL; goto fail_unbind; } num_packets = max(1U, (rcv_buf_bytes + maxpacket / 2) / maxpacket); /* round */ if (num_packets * maxpacket > UDSL_MAX_BUF_SIZE) num_packets--; instance->rx_channel.buf_size = num_packets * maxpacket; instance->rx_channel.packet_size = maxpacket; for (i = 0; i < 2; i++) { struct usbatm_channel *channel = i ? &instance->tx_channel : &instance->rx_channel; dev_dbg(dev, "%s: using %d byte buffer for %s channel 0x%p\n", __func__, channel->buf_size, i ? "tx" : "rx", channel); } /* initialize urbs */ for (i = 0; i < num_rcv_urbs + num_snd_urbs; i++) { u8 *buffer; struct usbatm_channel *channel = i < num_rcv_urbs ? &instance->rx_channel : &instance->tx_channel; struct urb *urb; unsigned int iso_packets = usb_pipeisoc(channel->endpoint) ? channel->buf_size / channel->packet_size : 0; urb = usb_alloc_urb(iso_packets, GFP_KERNEL); if (!urb) { dev_err(dev, "%s: no memory for urb %d!\n", __func__, i); error = -ENOMEM; goto fail_unbind; } instance->urbs[i] = urb; /* zero the tx padding to avoid leaking information */ buffer = kzalloc(channel->buf_size, GFP_KERNEL); if (!buffer) { dev_err(dev, "%s: no memory for buffer %d!\n", __func__, i); error = -ENOMEM; goto fail_unbind; } usb_fill_bulk_urb(urb, instance->usb_dev, channel->endpoint, buffer, channel->buf_size, usbatm_complete, channel); if (iso_packets) { int j; urb->interval = 1; urb->transfer_flags = URB_ISO_ASAP; urb->number_of_packets = iso_packets; for (j = 0; j < iso_packets; j++) { urb->iso_frame_desc[j].offset = channel->packet_size * j; urb->iso_frame_desc[j].length = channel->packet_size; } } /* put all tx URBs on the list of spares */ if (i >= num_rcv_urbs) list_add_tail(&urb->urb_list, &channel->list); vdbg(&intf->dev, "%s: alloced buffer 0x%p buf size %u urb 0x%p", __func__, urb->transfer_buffer, urb->transfer_buffer_length, urb); } instance->cached_vpi = ATM_VPI_UNSPEC; instance->cached_vci = ATM_VCI_UNSPEC; instance->cell_buf = kmalloc(instance->rx_channel.stride, GFP_KERNEL); if (!instance->cell_buf) { dev_err(dev, "%s: no memory for cell buffer!\n", __func__); error = -ENOMEM; goto fail_unbind; } if (!(instance->flags & UDSL_SKIP_HEAVY_INIT) && driver->heavy_init) { error = usbatm_heavy_init(instance); } else { complete(&instance->thread_exited); /* pretend that heavy_init was run */ error = usbatm_atm_init(instance); } if (error < 0) goto fail_unbind; usb_get_dev(usb_dev); usb_set_intfdata(intf, instance); return 0; fail_unbind: if (instance->driver->unbind) instance->driver->unbind(instance, intf); fail_free: kfree(instance->cell_buf); for (i = 0; i < num_rcv_urbs + num_snd_urbs; i++) { if (instance->urbs[i]) kfree(instance->urbs[i]->transfer_buffer); usb_free_urb(instance->urbs[i]); } kfree(instance); return error; } EXPORT_SYMBOL_GPL(usbatm_usb_probe); void usbatm_usb_disconnect(struct usb_interface *intf) { struct device *dev = &intf->dev; struct usbatm_data *instance = usb_get_intfdata(intf); struct usbatm_vcc_data *vcc_data; int i; if (!instance) { dev_dbg(dev, "%s: NULL instance!\n", __func__); return; } usb_set_intfdata(intf, NULL); mutex_lock(&instance->serialize); instance->disconnected = 1; if (instance->thread != NULL) send_sig(SIGTERM, instance->thread, 1); mutex_unlock(&instance->serialize); wait_for_completion(&instance->thread_exited); mutex_lock(&instance->serialize); list_for_each_entry(vcc_data, &instance->vcc_list, list) vcc_release_async(vcc_data->vcc, -EPIPE); mutex_unlock(&instance->serialize); tasklet_disable(&instance->rx_channel.tasklet); tasklet_disable(&instance->tx_channel.tasklet); for (i = 0; i < num_rcv_urbs + num_snd_urbs; i++) usb_kill_urb(instance->urbs[i]); del_timer_sync(&instance->rx_channel.delay); del_timer_sync(&instance->tx_channel.delay); /* turn usbatm_[rt]x_process into something close to a no-op */ /* no need to take the spinlock */ INIT_LIST_HEAD(&instance->rx_channel.list); INIT_LIST_HEAD(&instance->tx_channel.list); tasklet_enable(&instance->rx_channel.tasklet); tasklet_enable(&instance->tx_channel.tasklet); if (instance->atm_dev && instance->driver->atm_stop) instance->driver->atm_stop(instance, instance->atm_dev); if (instance->driver->unbind) instance->driver->unbind(instance, intf); instance->driver_data = NULL; for (i = 0; i < num_rcv_urbs + num_snd_urbs; i++) { kfree(instance->urbs[i]->transfer_buffer); usb_free_urb(instance->urbs[i]); } kfree(instance->cell_buf); /* ATM finalize */ if (instance->atm_dev) { atm_dev_deregister(instance->atm_dev); instance->atm_dev = NULL; } usbatm_put_instance(instance); /* taken in usbatm_usb_probe */ } EXPORT_SYMBOL_GPL(usbatm_usb_disconnect); /*********** ** init ** ***********/ static int __init usbatm_usb_init(void) { if (sizeof(struct usbatm_control) > FIELD_SIZEOF(struct sk_buff, cb)) { printk(KERN_ERR "%s unusable with this kernel!\n", usbatm_driver_name); return -EIO; } if ((num_rcv_urbs > UDSL_MAX_RCV_URBS) || (num_snd_urbs > UDSL_MAX_SND_URBS) || (rcv_buf_bytes < 1) || (rcv_buf_bytes > UDSL_MAX_BUF_SIZE) || (snd_buf_bytes < 1) || (snd_buf_bytes > UDSL_MAX_BUF_SIZE)) return -EINVAL; return 0; } module_init(usbatm_usb_init); static void __exit usbatm_usb_exit(void) { } module_exit(usbatm_usb_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION(DRIVER_VERSION); /************ ** debug ** ************/ #ifdef VERBOSE_DEBUG static int usbatm_print_packet(struct usbatm_data *instance, const unsigned char *data, int len) { unsigned char buffer[256]; int i = 0, j = 0; for (i = 0; i < len;) { buffer[0] = '\0'; sprintf(buffer, "%.3d :", i); for (j = 0; (j < 16) && (i < len); j++, i++) sprintf(buffer, "%s %2.2x", buffer, data[i]); dev_dbg(&instance->usb_intf->dev, "%s", buffer); } return i; } #endif
gpl-2.0
TeamRefuse/android_kernel_samsung_dempsey
arch/arm/mach-at91/board-usb-a9263.c
1447
5692
/* * linux/arch/arm/mach-at91/board-usb-a9263.c * * Copyright (C) 2005 SAN People * Copyright (C) 2007 Atmel Corporation. * Copyright (C) 2007 Calao-systems * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/board.h> #include <mach/gpio.h> #include <mach/at91sam9_smc.h> #include <mach/at91_shdwc.h> #include "sam9_smc.h" #include "generic.h" static void __init ek_map_io(void) { /* Initialize processor: 12.00 MHz crystal */ at91sam9263_initialize(12000000); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } static void __init ek_init_irq(void) { at91sam9263_init_interrupts(NULL); } /* * USB Host port */ static struct at91_usbh_data __initdata ek_usbh_data = { .ports = 2, }; /* * USB Device port */ static struct at91_udc_data __initdata ek_udc_data = { .vbus_pin = AT91_PIN_PB11, .pullup_pin = 0, /* pull-up driven by UDC */ }; /* * SPI devices. */ static struct spi_board_info ek_spi_devices[] = { #if !defined(CONFIG_MMC_AT91) { /* DataFlash chip */ .modalias = "mtd_dataflash", .chip_select = 0, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, } #endif }; /* * MACB Ethernet device */ static struct at91_eth_data __initdata ek_macb_data = { .phy_irq_pin = AT91_PIN_PE31, .is_rmii = 1, }; /* * NAND flash */ static struct mtd_partition __initdata ek_nand_partition[] = { { .name = "Linux Kernel", .offset = 0, .size = SZ_16M, }, { .name = "Root FS", .offset = MTDPART_OFS_NXTBLK, .size = 120 * SZ_1M, }, { .name = "FS", .offset = MTDPART_OFS_NXTBLK, .size = 120 * SZ_1M, } }; static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) { *num_partitions = ARRAY_SIZE(ek_nand_partition); return ek_nand_partition; } static struct atmel_nand_data __initdata ek_nand_data = { .ale = 21, .cle = 22, // .det_pin = ... not connected .rdy_pin = AT91_PIN_PA22, .enable_pin = AT91_PIN_PD15, .partition_info = nand_partitions, }; static struct sam9_smc_config __initdata ek_nand_smc_config = { .ncs_read_setup = 0, .nrd_setup = 1, .ncs_write_setup = 0, .nwe_setup = 1, .ncs_read_pulse = 3, .nrd_pulse = 3, .ncs_write_pulse = 3, .nwe_pulse = 3, .read_cycle = 5, .write_cycle = 5, .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8, .tdf_cycles = 2, }; static void __init ek_add_device_nand(void) { /* configure chip-select 3 (NAND) */ sam9_smc_configure(3, &ek_nand_smc_config); at91_add_device_nand(&ek_nand_data); } /* * GPIO Buttons */ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) static struct gpio_keys_button ek_buttons[] = { { /* USER PUSH BUTTON */ .code = KEY_ENTER, .gpio = AT91_PIN_PB10, .active_low = 1, .desc = "user_pb", .wakeup = 1, } }; static struct gpio_keys_platform_data ek_button_data = { .buttons = ek_buttons, .nbuttons = ARRAY_SIZE(ek_buttons), }; static struct platform_device ek_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &ek_button_data, } }; static void __init ek_add_device_buttons(void) { at91_set_GPIO_periph(AT91_PIN_PB10, 1); /* user push button, pull up enabled */ at91_set_deglitch(AT91_PIN_PB10, 1); platform_device_register(&ek_button_device); } #else static void __init ek_add_device_buttons(void) {} #endif /* * LEDs */ static struct gpio_led ek_leds[] = { { /* user_led (green) */ .name = "user_led", .gpio = AT91_PIN_PB21, .active_low = 1, .default_trigger = "heartbeat", } }; static void __init ek_board_init(void) { /* Serial */ at91_add_device_serial(); /* USB Host */ at91_add_device_usbh(&ek_usbh_data); /* USB Device */ at91_add_device_udc(&ek_udc_data); /* SPI */ at91_add_device_spi(ek_spi_devices, ARRAY_SIZE(ek_spi_devices)); /* Ethernet */ at91_add_device_eth(&ek_macb_data); /* NAND */ ek_add_device_nand(); /* I2C */ at91_add_device_i2c(NULL, 0); /* Push Buttons */ ek_add_device_buttons(); /* LEDs */ at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds)); /* shutdown controller, wakeup button (5 msec low) */ at91_sys_write(AT91_SHDW_MR, AT91_SHDW_CPTWK0_(10) | AT91_SHDW_WKMODE0_LOW | AT91_SHDW_RTTWKEN); } MACHINE_START(USB_A9263, "CALAO USB_A9263") /* Maintainer: calao-systems */ .phys_io = AT91_BASE_SYS, .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc, .boot_params = AT91_SDRAM_BASE + 0x100, .timer = &at91sam926x_timer, .map_io = ek_map_io, .init_irq = ek_init_irq, .init_machine = ek_board_init, MACHINE_END
gpl-2.0
Ander-Alvarez/UltraKernel
kernel/relay.c
1959
33563
/* * Public API and common code for kernel->userspace relay file support. * * See Documentation/filesystems/relay.txt for an overview. * * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com) * * Moved to kernel/relay.c by Paul Mundt, 2006. * November 2006 - CPU hotplug support by Mathieu Desnoyers * (mathieu.desnoyers@polymtl.ca) * * This file is released under the GPL. */ #include <linux/errno.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/string.h> #include <linux/relay.h> #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/cpu.h> #include <linux/splice.h> /* list of open channels, for cpu hotplug */ static DEFINE_MUTEX(relay_channels_mutex); static LIST_HEAD(relay_channels); /* * close() vm_op implementation for relay file mapping. */ static void relay_file_mmap_close(struct vm_area_struct *vma) { struct rchan_buf *buf = vma->vm_private_data; buf->chan->cb->buf_unmapped(buf, vma->vm_file); } /* * fault() vm_op implementation for relay file mapping. */ static int relay_buf_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page; struct rchan_buf *buf = vma->vm_private_data; pgoff_t pgoff = vmf->pgoff; if (!buf) return VM_FAULT_OOM; page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT)); if (!page) return VM_FAULT_SIGBUS; get_page(page); vmf->page = page; return 0; } /* * vm_ops for relay file mappings. */ static const struct vm_operations_struct relay_file_mmap_ops = { .fault = relay_buf_fault, .close = relay_file_mmap_close, }; /* * allocate an array of pointers of struct page */ static struct page **relay_alloc_page_array(unsigned int n_pages) { const size_t pa_size = n_pages * sizeof(struct page *); if (pa_size > PAGE_SIZE) return vzalloc(pa_size); return kzalloc(pa_size, GFP_KERNEL); } /* * free an array of pointers of struct page */ static void relay_free_page_array(struct page **array) { if (is_vmalloc_addr(array)) vfree(array); else kfree(array); } /** * relay_mmap_buf: - mmap channel buffer to process address space * @buf: relay channel buffer * @vma: vm_area_struct describing memory to be mapped * * Returns 0 if ok, negative on error * * Caller should already have grabbed mmap_sem. */ static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma) { unsigned long length = vma->vm_end - vma->vm_start; struct file *filp = vma->vm_file; if (!buf) return -EBADF; if (length != (unsigned long)buf->chan->alloc_size) return -EINVAL; vma->vm_ops = &relay_file_mmap_ops; vma->vm_flags |= VM_DONTEXPAND; vma->vm_private_data = buf; buf->chan->cb->buf_mapped(buf, filp); return 0; } /** * relay_alloc_buf - allocate a channel buffer * @buf: the buffer struct * @size: total size of the buffer * * Returns a pointer to the resulting buffer, %NULL if unsuccessful. The * passed in size will get page aligned, if it isn't already. */ static void *relay_alloc_buf(struct rchan_buf *buf, size_t *size) { void *mem; unsigned int i, j, n_pages; *size = PAGE_ALIGN(*size); n_pages = *size >> PAGE_SHIFT; buf->page_array = relay_alloc_page_array(n_pages); if (!buf->page_array) return NULL; for (i = 0; i < n_pages; i++) { buf->page_array[i] = alloc_page(GFP_KERNEL); if (unlikely(!buf->page_array[i])) goto depopulate; set_page_private(buf->page_array[i], (unsigned long)buf); } mem = vmap(buf->page_array, n_pages, VM_MAP, PAGE_KERNEL); if (!mem) goto depopulate; memset(mem, 0, *size); buf->page_count = n_pages; return mem; depopulate: for (j = 0; j < i; j++) __free_page(buf->page_array[j]); relay_free_page_array(buf->page_array); return NULL; } /** * relay_create_buf - allocate and initialize a channel buffer * @chan: the relay channel * * Returns channel buffer if successful, %NULL otherwise. */ static struct rchan_buf *relay_create_buf(struct rchan *chan) { struct rchan_buf *buf; if (chan->n_subbufs > UINT_MAX / sizeof(size_t *)) return NULL; buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); if (!buf) return NULL; buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL); if (!buf->padding) goto free_buf; buf->start = relay_alloc_buf(buf, &chan->alloc_size); if (!buf->start) goto free_buf; buf->chan = chan; kref_get(&buf->chan->kref); return buf; free_buf: kfree(buf->padding); kfree(buf); return NULL; } /** * relay_destroy_channel - free the channel struct * @kref: target kernel reference that contains the relay channel * * Should only be called from kref_put(). */ static void relay_destroy_channel(struct kref *kref) { struct rchan *chan = container_of(kref, struct rchan, kref); kfree(chan); } /** * relay_destroy_buf - destroy an rchan_buf struct and associated buffer * @buf: the buffer struct */ static void relay_destroy_buf(struct rchan_buf *buf) { struct rchan *chan = buf->chan; unsigned int i; if (likely(buf->start)) { vunmap(buf->start); for (i = 0; i < buf->page_count; i++) __free_page(buf->page_array[i]); relay_free_page_array(buf->page_array); } chan->buf[buf->cpu] = NULL; kfree(buf->padding); kfree(buf); kref_put(&chan->kref, relay_destroy_channel); } /** * relay_remove_buf - remove a channel buffer * @kref: target kernel reference that contains the relay buffer * * Removes the file from the fileystem, which also frees the * rchan_buf_struct and the channel buffer. Should only be called from * kref_put(). */ static void relay_remove_buf(struct kref *kref) { struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref); buf->chan->cb->remove_buf_file(buf->dentry); relay_destroy_buf(buf); } /** * relay_buf_empty - boolean, is the channel buffer empty? * @buf: channel buffer * * Returns 1 if the buffer is empty, 0 otherwise. */ static int relay_buf_empty(struct rchan_buf *buf) { return (buf->subbufs_produced - buf->subbufs_consumed) ? 0 : 1; } /** * relay_buf_full - boolean, is the channel buffer full? * @buf: channel buffer * * Returns 1 if the buffer is full, 0 otherwise. */ int relay_buf_full(struct rchan_buf *buf) { size_t ready = buf->subbufs_produced - buf->subbufs_consumed; return (ready >= buf->chan->n_subbufs) ? 1 : 0; } EXPORT_SYMBOL_GPL(relay_buf_full); /* * High-level relay kernel API and associated functions. */ /* * rchan_callback implementations defining default channel behavior. Used * in place of corresponding NULL values in client callback struct. */ /* * subbuf_start() default callback. Does nothing. */ static int subbuf_start_default_callback (struct rchan_buf *buf, void *subbuf, void *prev_subbuf, size_t prev_padding) { if (relay_buf_full(buf)) return 0; return 1; } /* * buf_mapped() default callback. Does nothing. */ static void buf_mapped_default_callback(struct rchan_buf *buf, struct file *filp) { } /* * buf_unmapped() default callback. Does nothing. */ static void buf_unmapped_default_callback(struct rchan_buf *buf, struct file *filp) { } /* * create_buf_file_create() default callback. Does nothing. */ static struct dentry *create_buf_file_default_callback(const char *filename, struct dentry *parent, umode_t mode, struct rchan_buf *buf, int *is_global) { return NULL; } /* * remove_buf_file() default callback. Does nothing. */ static int remove_buf_file_default_callback(struct dentry *dentry) { return -EINVAL; } /* relay channel default callbacks */ static struct rchan_callbacks default_channel_callbacks = { .subbuf_start = subbuf_start_default_callback, .buf_mapped = buf_mapped_default_callback, .buf_unmapped = buf_unmapped_default_callback, .create_buf_file = create_buf_file_default_callback, .remove_buf_file = remove_buf_file_default_callback, }; /** * wakeup_readers - wake up readers waiting on a channel * @data: contains the channel buffer * * This is the timer function used to defer reader waking. */ static void wakeup_readers(unsigned long data) { struct rchan_buf *buf = (struct rchan_buf *)data; wake_up_interruptible(&buf->read_wait); } /** * __relay_reset - reset a channel buffer * @buf: the channel buffer * @init: 1 if this is a first-time initialization * * See relay_reset() for description of effect. */ static void __relay_reset(struct rchan_buf *buf, unsigned int init) { size_t i; if (init) { init_waitqueue_head(&buf->read_wait); kref_init(&buf->kref); setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf); } else del_timer_sync(&buf->timer); buf->subbufs_produced = 0; buf->subbufs_consumed = 0; buf->bytes_consumed = 0; buf->finalized = 0; buf->data = buf->start; buf->offset = 0; for (i = 0; i < buf->chan->n_subbufs; i++) buf->padding[i] = 0; buf->chan->cb->subbuf_start(buf, buf->data, NULL, 0); } /** * relay_reset - reset the channel * @chan: the channel * * This has the effect of erasing all data from all channel buffers * and restarting the channel in its initial state. The buffers * are not freed, so any mappings are still in effect. * * NOTE. Care should be taken that the channel isn't actually * being used by anything when this call is made. */ void relay_reset(struct rchan *chan) { unsigned int i; if (!chan) return; if (chan->is_global && chan->buf[0]) { __relay_reset(chan->buf[0], 0); return; } mutex_lock(&relay_channels_mutex); for_each_possible_cpu(i) if (chan->buf[i]) __relay_reset(chan->buf[i], 0); mutex_unlock(&relay_channels_mutex); } EXPORT_SYMBOL_GPL(relay_reset); static inline void relay_set_buf_dentry(struct rchan_buf *buf, struct dentry *dentry) { buf->dentry = dentry; buf->dentry->d_inode->i_size = buf->early_bytes; } static struct dentry *relay_create_buf_file(struct rchan *chan, struct rchan_buf *buf, unsigned int cpu) { struct dentry *dentry; char *tmpname; tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL); if (!tmpname) return NULL; snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu); /* Create file in fs */ dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR, buf, &chan->is_global); kfree(tmpname); return dentry; } /* * relay_open_buf - create a new relay channel buffer * * used by relay_open() and CPU hotplug. */ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu) { struct rchan_buf *buf = NULL; struct dentry *dentry; if (chan->is_global) return chan->buf[0]; buf = relay_create_buf(chan); if (!buf) return NULL; if (chan->has_base_filename) { dentry = relay_create_buf_file(chan, buf, cpu); if (!dentry) goto free_buf; relay_set_buf_dentry(buf, dentry); } buf->cpu = cpu; __relay_reset(buf, 1); if(chan->is_global) { chan->buf[0] = buf; buf->cpu = 0; } return buf; free_buf: relay_destroy_buf(buf); return NULL; } /** * relay_close_buf - close a channel buffer * @buf: channel buffer * * Marks the buffer finalized and restores the default callbacks. * The channel buffer and channel buffer data structure are then freed * automatically when the last reference is given up. */ static void relay_close_buf(struct rchan_buf *buf) { buf->finalized = 1; del_timer_sync(&buf->timer); kref_put(&buf->kref, relay_remove_buf); } static void setup_callbacks(struct rchan *chan, struct rchan_callbacks *cb) { if (!cb) { chan->cb = &default_channel_callbacks; return; } if (!cb->subbuf_start) cb->subbuf_start = subbuf_start_default_callback; if (!cb->buf_mapped) cb->buf_mapped = buf_mapped_default_callback; if (!cb->buf_unmapped) cb->buf_unmapped = buf_unmapped_default_callback; if (!cb->create_buf_file) cb->create_buf_file = create_buf_file_default_callback; if (!cb->remove_buf_file) cb->remove_buf_file = remove_buf_file_default_callback; chan->cb = cb; } /** * relay_hotcpu_callback - CPU hotplug callback * @nb: notifier block * @action: hotplug action to take * @hcpu: CPU number * * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) */ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { unsigned int hotcpu = (unsigned long)hcpu; struct rchan *chan; switch(action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: mutex_lock(&relay_channels_mutex); list_for_each_entry(chan, &relay_channels, list) { if (chan->buf[hotcpu]) continue; chan->buf[hotcpu] = relay_open_buf(chan, hotcpu); if(!chan->buf[hotcpu]) { printk(KERN_ERR "relay_hotcpu_callback: cpu %d buffer " "creation failed\n", hotcpu); mutex_unlock(&relay_channels_mutex); return notifier_from_errno(-ENOMEM); } } mutex_unlock(&relay_channels_mutex); break; case CPU_DEAD: case CPU_DEAD_FROZEN: /* No need to flush the cpu : will be flushed upon * final relay_flush() call. */ break; } return NOTIFY_OK; } /** * relay_open - create a new relay channel * @base_filename: base name of files to create, %NULL for buffering only * @parent: dentry of parent directory, %NULL for root directory or buffer * @subbuf_size: size of sub-buffers * @n_subbufs: number of sub-buffers * @cb: client callback functions * @private_data: user-defined data * * Returns channel pointer if successful, %NULL otherwise. * * Creates a channel buffer for each cpu using the sizes and * attributes specified. The created channel buffer files * will be named base_filename0...base_filenameN-1. File * permissions will be %S_IRUSR. */ struct rchan *relay_open(const char *base_filename, struct dentry *parent, size_t subbuf_size, size_t n_subbufs, struct rchan_callbacks *cb, void *private_data) { unsigned int i; struct rchan *chan; if (!(subbuf_size && n_subbufs)) return NULL; if (subbuf_size > UINT_MAX / n_subbufs) return NULL; chan = kzalloc(sizeof(struct rchan), GFP_KERNEL); if (!chan) return NULL; chan->version = RELAYFS_CHANNEL_VERSION; chan->n_subbufs = n_subbufs; chan->subbuf_size = subbuf_size; chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs); chan->parent = parent; chan->private_data = private_data; if (base_filename) { chan->has_base_filename = 1; strlcpy(chan->base_filename, base_filename, NAME_MAX); } setup_callbacks(chan, cb); kref_init(&chan->kref); mutex_lock(&relay_channels_mutex); for_each_online_cpu(i) { chan->buf[i] = relay_open_buf(chan, i); if (!chan->buf[i]) goto free_bufs; } list_add(&chan->list, &relay_channels); mutex_unlock(&relay_channels_mutex); return chan; free_bufs: for_each_possible_cpu(i) { if (chan->buf[i]) relay_close_buf(chan->buf[i]); } kref_put(&chan->kref, relay_destroy_channel); mutex_unlock(&relay_channels_mutex); return NULL; } EXPORT_SYMBOL_GPL(relay_open); struct rchan_percpu_buf_dispatcher { struct rchan_buf *buf; struct dentry *dentry; }; /* Called in atomic context. */ static void __relay_set_buf_dentry(void *info) { struct rchan_percpu_buf_dispatcher *p = info; relay_set_buf_dentry(p->buf, p->dentry); } /** * relay_late_setup_files - triggers file creation * @chan: channel to operate on * @base_filename: base name of files to create * @parent: dentry of parent directory, %NULL for root directory * * Returns 0 if successful, non-zero otherwise. * * Use to setup files for a previously buffer-only channel. * Useful to do early tracing in kernel, before VFS is up, for example. */ int relay_late_setup_files(struct rchan *chan, const char *base_filename, struct dentry *parent) { int err = 0; unsigned int i, curr_cpu; unsigned long flags; struct dentry *dentry; struct rchan_percpu_buf_dispatcher disp; if (!chan || !base_filename) return -EINVAL; strlcpy(chan->base_filename, base_filename, NAME_MAX); mutex_lock(&relay_channels_mutex); /* Is chan already set up? */ if (unlikely(chan->has_base_filename)) { mutex_unlock(&relay_channels_mutex); return -EEXIST; } chan->has_base_filename = 1; chan->parent = parent; curr_cpu = get_cpu(); /* * The CPU hotplug notifier ran before us and created buffers with * no files associated. So it's safe to call relay_setup_buf_file() * on all currently online CPUs. */ for_each_online_cpu(i) { if (unlikely(!chan->buf[i])) { WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n"); err = -EINVAL; break; } dentry = relay_create_buf_file(chan, chan->buf[i], i); if (unlikely(!dentry)) { err = -EINVAL; break; } if (curr_cpu == i) { local_irq_save(flags); relay_set_buf_dentry(chan->buf[i], dentry); local_irq_restore(flags); } else { disp.buf = chan->buf[i]; disp.dentry = dentry; smp_mb(); /* relay_channels_mutex must be held, so wait. */ err = smp_call_function_single(i, __relay_set_buf_dentry, &disp, 1); } if (unlikely(err)) break; } put_cpu(); mutex_unlock(&relay_channels_mutex); return err; } /** * relay_switch_subbuf - switch to a new sub-buffer * @buf: channel buffer * @length: size of current event * * Returns either the length passed in or 0 if full. * * Performs sub-buffer-switch tasks such as invoking callbacks, * updating padding counts, waking up readers, etc. */ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length) { void *old, *new; size_t old_subbuf, new_subbuf; if (unlikely(length > buf->chan->subbuf_size)) goto toobig; if (buf->offset != buf->chan->subbuf_size + 1) { buf->prev_padding = buf->chan->subbuf_size - buf->offset; old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs; buf->padding[old_subbuf] = buf->prev_padding; buf->subbufs_produced++; if (buf->dentry) buf->dentry->d_inode->i_size += buf->chan->subbuf_size - buf->padding[old_subbuf]; else buf->early_bytes += buf->chan->subbuf_size - buf->padding[old_subbuf]; smp_mb(); if (waitqueue_active(&buf->read_wait)) /* * Calling wake_up_interruptible() from here * will deadlock if we happen to be logging * from the scheduler (trying to re-grab * rq->lock), so defer it. */ mod_timer(&buf->timer, jiffies + 1); } old = buf->data; new_subbuf = buf->subbufs_produced % buf->chan->n_subbufs; new = buf->start + new_subbuf * buf->chan->subbuf_size; buf->offset = 0; if (!buf->chan->cb->subbuf_start(buf, new, old, buf->prev_padding)) { buf->offset = buf->chan->subbuf_size + 1; return 0; } buf->data = new; buf->padding[new_subbuf] = 0; if (unlikely(length + buf->offset > buf->chan->subbuf_size)) goto toobig; return length; toobig: buf->chan->last_toobig = length; return 0; } EXPORT_SYMBOL_GPL(relay_switch_subbuf); /** * relay_subbufs_consumed - update the buffer's sub-buffers-consumed count * @chan: the channel * @cpu: the cpu associated with the channel buffer to update * @subbufs_consumed: number of sub-buffers to add to current buf's count * * Adds to the channel buffer's consumed sub-buffer count. * subbufs_consumed should be the number of sub-buffers newly consumed, * not the total consumed. * * NOTE. Kernel clients don't need to call this function if the channel * mode is 'overwrite'. */ void relay_subbufs_consumed(struct rchan *chan, unsigned int cpu, size_t subbufs_consumed) { struct rchan_buf *buf; if (!chan) return; if (cpu >= NR_CPUS || !chan->buf[cpu] || subbufs_consumed > chan->n_subbufs) return; buf = chan->buf[cpu]; if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed) buf->subbufs_consumed = buf->subbufs_produced; else buf->subbufs_consumed += subbufs_consumed; } EXPORT_SYMBOL_GPL(relay_subbufs_consumed); /** * relay_close - close the channel * @chan: the channel * * Closes all channel buffers and frees the channel. */ void relay_close(struct rchan *chan) { unsigned int i; if (!chan) return; mutex_lock(&relay_channels_mutex); if (chan->is_global && chan->buf[0]) relay_close_buf(chan->buf[0]); else for_each_possible_cpu(i) if (chan->buf[i]) relay_close_buf(chan->buf[i]); if (chan->last_toobig) printk(KERN_WARNING "relay: one or more items not logged " "[item size (%Zd) > sub-buffer size (%Zd)]\n", chan->last_toobig, chan->subbuf_size); list_del(&chan->list); kref_put(&chan->kref, relay_destroy_channel); mutex_unlock(&relay_channels_mutex); } EXPORT_SYMBOL_GPL(relay_close); /** * relay_flush - close the channel * @chan: the channel * * Flushes all channel buffers, i.e. forces buffer switch. */ void relay_flush(struct rchan *chan) { unsigned int i; if (!chan) return; if (chan->is_global && chan->buf[0]) { relay_switch_subbuf(chan->buf[0], 0); return; } mutex_lock(&relay_channels_mutex); for_each_possible_cpu(i) if (chan->buf[i]) relay_switch_subbuf(chan->buf[i], 0); mutex_unlock(&relay_channels_mutex); } EXPORT_SYMBOL_GPL(relay_flush); /** * relay_file_open - open file op for relay files * @inode: the inode * @filp: the file * * Increments the channel buffer refcount. */ static int relay_file_open(struct inode *inode, struct file *filp) { struct rchan_buf *buf = inode->i_private; kref_get(&buf->kref); filp->private_data = buf; return nonseekable_open(inode, filp); } /** * relay_file_mmap - mmap file op for relay files * @filp: the file * @vma: the vma describing what to map * * Calls upon relay_mmap_buf() to map the file into user space. */ static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma) { struct rchan_buf *buf = filp->private_data; return relay_mmap_buf(buf, vma); } /** * relay_file_poll - poll file op for relay files * @filp: the file * @wait: poll table * * Poll implemention. */ static unsigned int relay_file_poll(struct file *filp, poll_table *wait) { unsigned int mask = 0; struct rchan_buf *buf = filp->private_data; if (buf->finalized) return POLLERR; if (filp->f_mode & FMODE_READ) { poll_wait(filp, &buf->read_wait, wait); if (!relay_buf_empty(buf)) mask |= POLLIN | POLLRDNORM; } return mask; } /** * relay_file_release - release file op for relay files * @inode: the inode * @filp: the file * * Decrements the channel refcount, as the filesystem is * no longer using it. */ static int relay_file_release(struct inode *inode, struct file *filp) { struct rchan_buf *buf = filp->private_data; kref_put(&buf->kref, relay_remove_buf); return 0; } /* * relay_file_read_consume - update the consumed count for the buffer */ static void relay_file_read_consume(struct rchan_buf *buf, size_t read_pos, size_t bytes_consumed) { size_t subbuf_size = buf->chan->subbuf_size; size_t n_subbufs = buf->chan->n_subbufs; size_t read_subbuf; if (buf->subbufs_produced == buf->subbufs_consumed && buf->offset == buf->bytes_consumed) return; if (buf->bytes_consumed + bytes_consumed > subbuf_size) { relay_subbufs_consumed(buf->chan, buf->cpu, 1); buf->bytes_consumed = 0; } buf->bytes_consumed += bytes_consumed; if (!read_pos) read_subbuf = buf->subbufs_consumed % n_subbufs; else read_subbuf = read_pos / buf->chan->subbuf_size; if (buf->bytes_consumed + buf->padding[read_subbuf] == subbuf_size) { if ((read_subbuf == buf->subbufs_produced % n_subbufs) && (buf->offset == subbuf_size)) return; relay_subbufs_consumed(buf->chan, buf->cpu, 1); buf->bytes_consumed = 0; } } /* * relay_file_read_avail - boolean, are there unconsumed bytes available? */ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos) { size_t subbuf_size = buf->chan->subbuf_size; size_t n_subbufs = buf->chan->n_subbufs; size_t produced = buf->subbufs_produced; size_t consumed = buf->subbufs_consumed; relay_file_read_consume(buf, read_pos, 0); consumed = buf->subbufs_consumed; if (unlikely(buf->offset > subbuf_size)) { if (produced == consumed) return 0; return 1; } if (unlikely(produced - consumed >= n_subbufs)) { consumed = produced - n_subbufs + 1; buf->subbufs_consumed = consumed; buf->bytes_consumed = 0; } produced = (produced % n_subbufs) * subbuf_size + buf->offset; consumed = (consumed % n_subbufs) * subbuf_size + buf->bytes_consumed; if (consumed > produced) produced += n_subbufs * subbuf_size; if (consumed == produced) { if (buf->offset == subbuf_size && buf->subbufs_produced > buf->subbufs_consumed) return 1; return 0; } return 1; } /** * relay_file_read_subbuf_avail - return bytes available in sub-buffer * @read_pos: file read position * @buf: relay channel buffer */ static size_t relay_file_read_subbuf_avail(size_t read_pos, struct rchan_buf *buf) { size_t padding, avail = 0; size_t read_subbuf, read_offset, write_subbuf, write_offset; size_t subbuf_size = buf->chan->subbuf_size; write_subbuf = (buf->data - buf->start) / subbuf_size; write_offset = buf->offset > subbuf_size ? subbuf_size : buf->offset; read_subbuf = read_pos / subbuf_size; read_offset = read_pos % subbuf_size; padding = buf->padding[read_subbuf]; if (read_subbuf == write_subbuf) { if (read_offset + padding < write_offset) avail = write_offset - (read_offset + padding); } else avail = (subbuf_size - padding) - read_offset; return avail; } /** * relay_file_read_start_pos - find the first available byte to read * @read_pos: file read position * @buf: relay channel buffer * * If the @read_pos is in the middle of padding, return the * position of the first actually available byte, otherwise * return the original value. */ static size_t relay_file_read_start_pos(size_t read_pos, struct rchan_buf *buf) { size_t read_subbuf, padding, padding_start, padding_end; size_t subbuf_size = buf->chan->subbuf_size; size_t n_subbufs = buf->chan->n_subbufs; size_t consumed = buf->subbufs_consumed % n_subbufs; if (!read_pos) read_pos = consumed * subbuf_size + buf->bytes_consumed; read_subbuf = read_pos / subbuf_size; padding = buf->padding[read_subbuf]; padding_start = (read_subbuf + 1) * subbuf_size - padding; padding_end = (read_subbuf + 1) * subbuf_size; if (read_pos >= padding_start && read_pos < padding_end) { read_subbuf = (read_subbuf + 1) % n_subbufs; read_pos = read_subbuf * subbuf_size; } return read_pos; } /** * relay_file_read_end_pos - return the new read position * @read_pos: file read position * @buf: relay channel buffer * @count: number of bytes to be read */ static size_t relay_file_read_end_pos(struct rchan_buf *buf, size_t read_pos, size_t count) { size_t read_subbuf, padding, end_pos; size_t subbuf_size = buf->chan->subbuf_size; size_t n_subbufs = buf->chan->n_subbufs; read_subbuf = read_pos / subbuf_size; padding = buf->padding[read_subbuf]; if (read_pos % subbuf_size + count + padding == subbuf_size) end_pos = (read_subbuf + 1) * subbuf_size; else end_pos = read_pos + count; if (end_pos >= subbuf_size * n_subbufs) end_pos = 0; return end_pos; } /* * subbuf_read_actor - read up to one subbuf's worth of data */ static int subbuf_read_actor(size_t read_start, struct rchan_buf *buf, size_t avail, read_descriptor_t *desc, read_actor_t actor) { void *from; int ret = 0; from = buf->start + read_start; ret = avail; if (copy_to_user(desc->arg.buf, from, avail)) { desc->error = -EFAULT; ret = 0; } desc->arg.data += ret; desc->written += ret; desc->count -= ret; return ret; } typedef int (*subbuf_actor_t) (size_t read_start, struct rchan_buf *buf, size_t avail, read_descriptor_t *desc, read_actor_t actor); /* * relay_file_read_subbufs - read count bytes, bridging subbuf boundaries */ static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos, subbuf_actor_t subbuf_actor, read_actor_t actor, read_descriptor_t *desc) { struct rchan_buf *buf = filp->private_data; size_t read_start, avail; int ret; if (!desc->count) return 0; mutex_lock(&filp->f_path.dentry->d_inode->i_mutex); do { if (!relay_file_read_avail(buf, *ppos)) break; read_start = relay_file_read_start_pos(*ppos, buf); avail = relay_file_read_subbuf_avail(read_start, buf); if (!avail) break; avail = min(desc->count, avail); ret = subbuf_actor(read_start, buf, avail, desc, actor); if (desc->error < 0) break; if (ret) { relay_file_read_consume(buf, read_start, ret); *ppos = relay_file_read_end_pos(buf, read_start, ret); } } while (desc->count && ret); mutex_unlock(&filp->f_path.dentry->d_inode->i_mutex); return desc->written; } static ssize_t relay_file_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { read_descriptor_t desc; desc.written = 0; desc.count = count; desc.arg.buf = buffer; desc.error = 0; return relay_file_read_subbufs(filp, ppos, subbuf_read_actor, NULL, &desc); } static void relay_consume_bytes(struct rchan_buf *rbuf, int bytes_consumed) { rbuf->bytes_consumed += bytes_consumed; if (rbuf->bytes_consumed >= rbuf->chan->subbuf_size) { relay_subbufs_consumed(rbuf->chan, rbuf->cpu, 1); rbuf->bytes_consumed %= rbuf->chan->subbuf_size; } } static void relay_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct rchan_buf *rbuf; rbuf = (struct rchan_buf *)page_private(buf->page); relay_consume_bytes(rbuf, buf->private); } static const struct pipe_buf_operations relay_pipe_buf_ops = { .can_merge = 0, .map = generic_pipe_buf_map, .unmap = generic_pipe_buf_unmap, .confirm = generic_pipe_buf_confirm, .release = relay_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = generic_pipe_buf_get, }; static void relay_page_release(struct splice_pipe_desc *spd, unsigned int i) { } /* * subbuf_splice_actor - splice up to one subbuf's worth of data */ static ssize_t subbuf_splice_actor(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags, int *nonpad_ret) { unsigned int pidx, poff, total_len, subbuf_pages, nr_pages; struct rchan_buf *rbuf = in->private_data; unsigned int subbuf_size = rbuf->chan->subbuf_size; uint64_t pos = (uint64_t) *ppos; uint32_t alloc_size = (uint32_t) rbuf->chan->alloc_size; size_t read_start = (size_t) do_div(pos, alloc_size); size_t read_subbuf = read_start / subbuf_size; size_t padding = rbuf->padding[read_subbuf]; size_t nonpad_end = read_subbuf * subbuf_size + subbuf_size - padding; struct page *pages[PIPE_DEF_BUFFERS]; struct partial_page partial[PIPE_DEF_BUFFERS]; struct splice_pipe_desc spd = { .pages = pages, .nr_pages = 0, .nr_pages_max = PIPE_DEF_BUFFERS, .partial = partial, .flags = flags, .ops = &relay_pipe_buf_ops, .spd_release = relay_page_release, }; ssize_t ret; if (rbuf->subbufs_produced == rbuf->subbufs_consumed) return 0; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; /* * Adjust read len, if longer than what is available */ if (len > (subbuf_size - read_start % subbuf_size)) len = subbuf_size - read_start % subbuf_size; subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT; pidx = (read_start / PAGE_SIZE) % subbuf_pages; poff = read_start & ~PAGE_MASK; nr_pages = min_t(unsigned int, subbuf_pages, pipe->buffers); for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) { unsigned int this_len, this_end, private; unsigned int cur_pos = read_start + total_len; if (!len) break; this_len = min_t(unsigned long, len, PAGE_SIZE - poff); private = this_len; spd.pages[spd.nr_pages] = rbuf->page_array[pidx]; spd.partial[spd.nr_pages].offset = poff; this_end = cur_pos + this_len; if (this_end >= nonpad_end) { this_len = nonpad_end - cur_pos; private = this_len + padding; } spd.partial[spd.nr_pages].len = this_len; spd.partial[spd.nr_pages].private = private; len -= this_len; total_len += this_len; poff = 0; pidx = (pidx + 1) % subbuf_pages; if (this_end >= nonpad_end) { spd.nr_pages++; break; } } ret = 0; if (!spd.nr_pages) goto out; ret = *nonpad_ret = splice_to_pipe(pipe, &spd); if (ret < 0 || ret < total_len) goto out; if (read_start + ret == nonpad_end) ret += padding; out: splice_shrink_spd(&spd); return ret; } static ssize_t relay_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { ssize_t spliced; int ret; int nonpad_ret = 0; ret = 0; spliced = 0; while (len && !spliced) { ret = subbuf_splice_actor(in, ppos, pipe, len, flags, &nonpad_ret); if (ret < 0) break; else if (!ret) { if (flags & SPLICE_F_NONBLOCK) ret = -EAGAIN; break; } *ppos += ret; if (ret > len) len = 0; else len -= ret; spliced += nonpad_ret; nonpad_ret = 0; } if (spliced) return spliced; return ret; } const struct file_operations relay_file_operations = { .open = relay_file_open, .poll = relay_file_poll, .mmap = relay_file_mmap, .read = relay_file_read, .llseek = no_llseek, .release = relay_file_release, .splice_read = relay_file_splice_read, }; EXPORT_SYMBOL_GPL(relay_file_operations); static __init int relay_init(void) { hotcpu_notifier(relay_hotcpu_callback, 0); return 0; } early_initcall(relay_init);
gpl-2.0
CyanogenMod/android_kernel_hardkernel_odroidc1
drivers/platform/x86/panasonic-laptop.c
2215
18669
/* * Panasonic HotKey and LCD brightness control driver * (C) 2004 Hiroshi Miura <miura@da-cha.org> * (C) 2004 NTT DATA Intellilink Co. http://www.intellilink.co.jp/ * (C) YOKOTA Hiroshi <yokota (at) netlab. is. tsukuba. ac. jp> * (C) 2004 David Bronaugh <dbronaugh> * (C) 2006-2008 Harald Welte <laforge@gnumonks.org> * * derived from toshiba_acpi.c, Copyright (C) 2002-2004 John Belmonte * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * publicshed by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * *--------------------------------------------------------------------------- * * ChangeLog: * Sep.23, 2008 Harald Welte <laforge@gnumonks.org> * -v0.95 rename driver from drivers/acpi/pcc_acpi.c to * drivers/misc/panasonic-laptop.c * * Jul.04, 2008 Harald Welte <laforge@gnumonks.org> * -v0.94 replace /proc interface with device attributes * support {set,get}keycode on th input device * * Jun.27, 2008 Harald Welte <laforge@gnumonks.org> * -v0.92 merge with 2.6.26-rc6 input API changes * remove broken <= 2.6.15 kernel support * resolve all compiler warnings * various coding style fixes (checkpatch.pl) * add support for backlight api * major code restructuring * * Dac.28, 2007 Harald Welte <laforge@gnumonks.org> * -v0.91 merge with 2.6.24-rc6 ACPI changes * * Nov.04, 2006 Hiroshi Miura <miura@da-cha.org> * -v0.9 remove warning about section reference. * remove acpi_os_free * add /proc/acpi/pcc/brightness interface for HAL access * merge dbronaugh's enhancement * Aug.17, 2004 David Bronaugh (dbronaugh) * - Added screen brightness setting interface * Thanks to FreeBSD crew (acpi_panasonic.c) * for the ideas I needed to accomplish it * * May.29, 2006 Hiroshi Miura <miura@da-cha.org> * -v0.8.4 follow to change keyinput structure * thanks Fabian Yamaguchi <fabs@cs.tu-berlin.de>, * Jacob Bower <jacob.bower@ic.ac.uk> and * Hiroshi Yokota for providing solutions. * * Oct.02, 2004 Hiroshi Miura <miura@da-cha.org> * -v0.8.2 merge code of YOKOTA Hiroshi * <yokota@netlab.is.tsukuba.ac.jp>. * Add sticky key mode interface. * Refactoring acpi_pcc_generate_keyinput(). * * Sep.15, 2004 Hiroshi Miura <miura@da-cha.org> * -v0.8 Generate key input event on input subsystem. * This is based on yet another driver written by * Ryuta Nakanishi. * * Sep.10, 2004 Hiroshi Miura <miura@da-cha.org> * -v0.7 Change proc interface functions using seq_file * facility as same as other ACPI drivers. * * Aug.28, 2004 Hiroshi Miura <miura@da-cha.org> * -v0.6.4 Fix a silly error with status checking * * Aug.25, 2004 Hiroshi Miura <miura@da-cha.org> * -v0.6.3 replace read_acpi_int by standard function * acpi_evaluate_integer * some clean up and make smart copyright notice. * fix return value of pcc_acpi_get_key() * fix checking return value of acpi_bus_register_driver() * * Aug.22, 2004 David Bronaugh <dbronaugh@linuxboxen.org> * -v0.6.2 Add check on ACPI data (num_sifr) * Coding style cleanups, better error messages/handling * Fixed an off-by-one error in memory allocation * * Aug.21, 2004 David Bronaugh <dbronaugh@linuxboxen.org> * -v0.6.1 Fix a silly error with status checking * * Aug.20, 2004 David Bronaugh <dbronaugh@linuxboxen.org> * - v0.6 Correct brightness controls to reflect reality * based on information gleaned by Hiroshi Miura * and discussions with Hiroshi Miura * * Aug.10, 2004 Hiroshi Miura <miura@da-cha.org> * - v0.5 support LCD brightness control * based on the disclosed information by MEI. * * Jul.25, 2004 Hiroshi Miura <miura@da-cha.org> * - v0.4 first post version * add function to retrive SIFR * * Jul.24, 2004 Hiroshi Miura <miura@da-cha.org> * - v0.3 get proper status of hotkey * * Jul.22, 2004 Hiroshi Miura <miura@da-cha.org> * - v0.2 add HotKey handler * * Jul.17, 2004 Hiroshi Miura <miura@da-cha.org> * - v0.1 start from toshiba_acpi driver written by John Belmonte * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/backlight.h> #include <linux/ctype.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #ifndef ACPI_HOTKEY_COMPONENT #define ACPI_HOTKEY_COMPONENT 0x10000000 #endif #define _COMPONENT ACPI_HOTKEY_COMPONENT MODULE_AUTHOR("Hiroshi Miura, David Bronaugh and Harald Welte"); MODULE_DESCRIPTION("ACPI HotKey driver for Panasonic Let's Note laptops"); MODULE_LICENSE("GPL"); #define LOGPREFIX "pcc_acpi: " /* Define ACPI PATHs */ /* Lets note hotkeys */ #define METHOD_HKEY_QUERY "HINF" #define METHOD_HKEY_SQTY "SQTY" #define METHOD_HKEY_SINF "SINF" #define METHOD_HKEY_SSET "SSET" #define HKEY_NOTIFY 0x80 #define ACPI_PCC_DRIVER_NAME "Panasonic Laptop Support" #define ACPI_PCC_DEVICE_NAME "Hotkey" #define ACPI_PCC_CLASS "pcc" #define ACPI_PCC_INPUT_PHYS "panasonic/hkey0" /* LCD_TYPEs: 0 = Normal, 1 = Semi-transparent ENV_STATEs: Normal temp=0x01, High temp=0x81, N/A=0x00 */ enum SINF_BITS { SINF_NUM_BATTERIES = 0, SINF_LCD_TYPE, SINF_AC_MAX_BRIGHT, SINF_AC_MIN_BRIGHT, SINF_AC_CUR_BRIGHT, SINF_DC_MAX_BRIGHT, SINF_DC_MIN_BRIGHT, SINF_DC_CUR_BRIGHT, SINF_MUTE, SINF_RESERVED, SINF_ENV_STATE, SINF_STICKY_KEY = 0x80, }; /* R1 handles SINF_AC_CUR_BRIGHT as SINF_CUR_BRIGHT, doesn't know AC state */ static int acpi_pcc_hotkey_add(struct acpi_device *device); static int acpi_pcc_hotkey_remove(struct acpi_device *device); static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event); static const struct acpi_device_id pcc_device_ids[] = { { "MAT0012", 0}, { "MAT0013", 0}, { "MAT0018", 0}, { "MAT0019", 0}, { "", 0}, }; MODULE_DEVICE_TABLE(acpi, pcc_device_ids); #ifdef CONFIG_PM_SLEEP static int acpi_pcc_hotkey_resume(struct device *dev); #endif static SIMPLE_DEV_PM_OPS(acpi_pcc_hotkey_pm, NULL, acpi_pcc_hotkey_resume); static struct acpi_driver acpi_pcc_driver = { .name = ACPI_PCC_DRIVER_NAME, .class = ACPI_PCC_CLASS, .ids = pcc_device_ids, .ops = { .add = acpi_pcc_hotkey_add, .remove = acpi_pcc_hotkey_remove, .notify = acpi_pcc_hotkey_notify, }, .drv.pm = &acpi_pcc_hotkey_pm, }; static const struct key_entry panasonic_keymap[] = { { KE_KEY, 0, { KEY_RESERVED } }, { KE_KEY, 1, { KEY_BRIGHTNESSDOWN } }, { KE_KEY, 2, { KEY_BRIGHTNESSUP } }, { KE_KEY, 3, { KEY_DISPLAYTOGGLE } }, { KE_KEY, 4, { KEY_MUTE } }, { KE_KEY, 5, { KEY_VOLUMEDOWN } }, { KE_KEY, 6, { KEY_VOLUMEUP } }, { KE_KEY, 7, { KEY_SLEEP } }, { KE_KEY, 8, { KEY_PROG1 } }, /* Change CPU boost */ { KE_KEY, 9, { KEY_BATTERY } }, { KE_KEY, 10, { KEY_SUSPEND } }, { KE_END, 0 } }; struct pcc_acpi { acpi_handle handle; unsigned long num_sifr; int sticky_mode; u32 *sinf; struct acpi_device *device; struct input_dev *input_dev; struct backlight_device *backlight; }; struct pcc_keyinput { struct acpi_hotkey *hotkey; }; /* method access functions */ static int acpi_pcc_write_sset(struct pcc_acpi *pcc, int func, int val) { union acpi_object in_objs[] = { { .integer.type = ACPI_TYPE_INTEGER, .integer.value = func, }, { .integer.type = ACPI_TYPE_INTEGER, .integer.value = val, }, }; struct acpi_object_list params = { .count = ARRAY_SIZE(in_objs), .pointer = in_objs, }; acpi_status status = AE_OK; status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SSET, &params, NULL); return (status == AE_OK) ? 0 : -EIO; } static inline int acpi_pcc_get_sqty(struct acpi_device *device) { unsigned long long s; acpi_status status; status = acpi_evaluate_integer(device->handle, METHOD_HKEY_SQTY, NULL, &s); if (ACPI_SUCCESS(status)) return s; else { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "evaluation error HKEY.SQTY\n")); return -EINVAL; } } static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc) { acpi_status status; struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *hkey = NULL; int i; status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, NULL, &buffer); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "evaluation error HKEY.SINF\n")); return 0; } hkey = buffer.pointer; if (!hkey || (hkey->type != ACPI_TYPE_PACKAGE)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid HKEY.SINF\n")); status = AE_ERROR; goto end; } if (pcc->num_sifr < hkey->package.count) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "SQTY reports bad SINF length\n")); status = AE_ERROR; goto end; } for (i = 0; i < hkey->package.count; i++) { union acpi_object *element = &(hkey->package.elements[i]); if (likely(element->type == ACPI_TYPE_INTEGER)) { pcc->sinf[i] = element->integer.value; } else ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid HKEY.SINF data\n")); } pcc->sinf[hkey->package.count] = -1; end: kfree(buffer.pointer); return status == AE_OK; } /* backlight API interface functions */ /* This driver currently treats AC and DC brightness identical, * since we don't need to invent an interface to the core ACPI * logic to receive events in case a power supply is plugged in * or removed */ static int bl_get(struct backlight_device *bd) { struct pcc_acpi *pcc = bl_get_data(bd); if (!acpi_pcc_retrieve_biosdata(pcc)) return -EIO; return pcc->sinf[SINF_AC_CUR_BRIGHT]; } static int bl_set_status(struct backlight_device *bd) { struct pcc_acpi *pcc = bl_get_data(bd); int bright = bd->props.brightness; int rc; if (!acpi_pcc_retrieve_biosdata(pcc)) return -EIO; if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT]) bright = pcc->sinf[SINF_AC_MIN_BRIGHT]; if (bright < pcc->sinf[SINF_DC_MIN_BRIGHT]) bright = pcc->sinf[SINF_DC_MIN_BRIGHT]; if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT] || bright > pcc->sinf[SINF_AC_MAX_BRIGHT]) return -EINVAL; rc = acpi_pcc_write_sset(pcc, SINF_AC_CUR_BRIGHT, bright); if (rc < 0) return rc; return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright); } static const struct backlight_ops pcc_backlight_ops = { .get_brightness = bl_get, .update_status = bl_set_status, }; /* sysfs user interface functions */ static ssize_t show_numbatt(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi = to_acpi_device(dev); struct pcc_acpi *pcc = acpi_driver_data(acpi); if (!acpi_pcc_retrieve_biosdata(pcc)) return -EIO; return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sinf[SINF_NUM_BATTERIES]); } static ssize_t show_lcdtype(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi = to_acpi_device(dev); struct pcc_acpi *pcc = acpi_driver_data(acpi); if (!acpi_pcc_retrieve_biosdata(pcc)) return -EIO; return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sinf[SINF_LCD_TYPE]); } static ssize_t show_mute(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi = to_acpi_device(dev); struct pcc_acpi *pcc = acpi_driver_data(acpi); if (!acpi_pcc_retrieve_biosdata(pcc)) return -EIO; return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sinf[SINF_MUTE]); } static ssize_t show_sticky(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi = to_acpi_device(dev); struct pcc_acpi *pcc = acpi_driver_data(acpi); if (!acpi_pcc_retrieve_biosdata(pcc)) return -EIO; return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sinf[SINF_STICKY_KEY]); } static ssize_t set_sticky(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct acpi_device *acpi = to_acpi_device(dev); struct pcc_acpi *pcc = acpi_driver_data(acpi); int val; if (count && sscanf(buf, "%i", &val) == 1 && (val == 0 || val == 1)) { acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, val); pcc->sticky_mode = val; } return count; } static DEVICE_ATTR(numbatt, S_IRUGO, show_numbatt, NULL); static DEVICE_ATTR(lcdtype, S_IRUGO, show_lcdtype, NULL); static DEVICE_ATTR(mute, S_IRUGO, show_mute, NULL); static DEVICE_ATTR(sticky_key, S_IRUGO | S_IWUSR, show_sticky, set_sticky); static struct attribute *pcc_sysfs_entries[] = { &dev_attr_numbatt.attr, &dev_attr_lcdtype.attr, &dev_attr_mute.attr, &dev_attr_sticky_key.attr, NULL, }; static struct attribute_group pcc_attr_group = { .name = NULL, /* put in device directory */ .attrs = pcc_sysfs_entries, }; /* hotkey input device driver */ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc) { struct input_dev *hotk_input_dev = pcc->input_dev; int rc; unsigned long long result; rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY, NULL, &result); if (!ACPI_SUCCESS(rc)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "error getting hotkey status\n")); return; } acpi_bus_generate_proc_event(pcc->device, HKEY_NOTIFY, result); if (!sparse_keymap_report_event(hotk_input_dev, result & 0xf, result & 0x80, false)) ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown hotkey event: %d\n", result)); } static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event) { struct pcc_acpi *pcc = acpi_driver_data(device); switch (event) { case HKEY_NOTIFY: acpi_pcc_generate_keyinput(pcc); break; default: /* nothing to do */ break; } } static int acpi_pcc_init_input(struct pcc_acpi *pcc) { struct input_dev *input_dev; int error; input_dev = input_allocate_device(); if (!input_dev) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Couldn't allocate input device for hotkey")); return -ENOMEM; } input_dev->name = ACPI_PCC_DRIVER_NAME; input_dev->phys = ACPI_PCC_INPUT_PHYS; input_dev->id.bustype = BUS_HOST; input_dev->id.vendor = 0x0001; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; error = sparse_keymap_setup(input_dev, panasonic_keymap, NULL); if (error) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unable to setup input device keymap\n")); goto err_free_dev; } error = input_register_device(input_dev); if (error) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unable to register input device\n")); goto err_free_keymap; } pcc->input_dev = input_dev; return 0; err_free_keymap: sparse_keymap_free(input_dev); err_free_dev: input_free_device(input_dev); return error; } static void acpi_pcc_destroy_input(struct pcc_acpi *pcc) { sparse_keymap_free(pcc->input_dev); input_unregister_device(pcc->input_dev); /* * No need to input_free_device() since core input API refcounts * and free()s the device. */ } /* kernel module interface */ #ifdef CONFIG_PM_SLEEP static int acpi_pcc_hotkey_resume(struct device *dev) { struct pcc_acpi *pcc; if (!dev) return -EINVAL; pcc = acpi_driver_data(to_acpi_device(dev)); if (!pcc) return -EINVAL; ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Sticky mode restore: %d\n", pcc->sticky_mode)); return acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_mode); } #endif static int acpi_pcc_hotkey_add(struct acpi_device *device) { struct backlight_properties props; struct pcc_acpi *pcc; int num_sifr, result; if (!device) return -EINVAL; num_sifr = acpi_pcc_get_sqty(device); if (num_sifr < 0 || num_sifr > 255) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "num_sifr out of range")); return -ENODEV; } pcc = kzalloc(sizeof(struct pcc_acpi), GFP_KERNEL); if (!pcc) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Couldn't allocate mem for pcc")); return -ENOMEM; } pcc->sinf = kzalloc(sizeof(u32) * (num_sifr + 1), GFP_KERNEL); if (!pcc->sinf) { result = -ENOMEM; goto out_hotkey; } pcc->device = device; pcc->handle = device->handle; pcc->num_sifr = num_sifr; device->driver_data = pcc; strcpy(acpi_device_name(device), ACPI_PCC_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_PCC_CLASS); result = acpi_pcc_init_input(pcc); if (result) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error installing keyinput handler\n")); goto out_sinf; } if (!acpi_pcc_retrieve_biosdata(pcc)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Couldn't retrieve BIOS data\n")); result = -EIO; goto out_input; } /* initialize backlight */ memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = pcc->sinf[SINF_AC_MAX_BRIGHT]; pcc->backlight = backlight_device_register("panasonic", NULL, pcc, &pcc_backlight_ops, &props); if (IS_ERR(pcc->backlight)) { result = PTR_ERR(pcc->backlight); goto out_input; } /* read the initial brightness setting from the hardware */ pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT]; /* read the initial sticky key mode from the hardware */ pcc->sticky_mode = pcc->sinf[SINF_STICKY_KEY]; /* add sysfs attributes */ result = sysfs_create_group(&device->dev.kobj, &pcc_attr_group); if (result) goto out_backlight; return 0; out_backlight: backlight_device_unregister(pcc->backlight); out_input: acpi_pcc_destroy_input(pcc); out_sinf: kfree(pcc->sinf); out_hotkey: kfree(pcc); return result; } static int __init acpi_pcc_init(void) { int result = 0; if (acpi_disabled) return -ENODEV; result = acpi_bus_register_driver(&acpi_pcc_driver); if (result < 0) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error registering hotkey driver\n")); return -ENODEV; } return 0; } static int acpi_pcc_hotkey_remove(struct acpi_device *device) { struct pcc_acpi *pcc = acpi_driver_data(device); if (!device || !pcc) return -EINVAL; sysfs_remove_group(&device->dev.kobj, &pcc_attr_group); backlight_device_unregister(pcc->backlight); acpi_pcc_destroy_input(pcc); kfree(pcc->sinf); kfree(pcc); return 0; } static void __exit acpi_pcc_exit(void) { acpi_bus_unregister_driver(&acpi_pcc_driver); } module_init(acpi_pcc_init); module_exit(acpi_pcc_exit);
gpl-2.0
XileForce/Vindicator-S6-Uni-Old
drivers/edac/i82975x_edac.c
2471
18760
/* * Intel 82975X Memory Controller kernel module * (C) 2007 aCarLab (India) Pvt. Ltd. (http://acarlab.com) * (C) 2007 jetzbroadband (http://jetzbroadband.com) * This file may be distributed under the terms of the * GNU General Public License. * * Written by Arvind R. * Copied from i82875p_edac.c source: */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/edac.h> #include "edac_core.h" #define I82975X_REVISION " Ver: 1.0.0" #define EDAC_MOD_STR "i82975x_edac" #define i82975x_printk(level, fmt, arg...) \ edac_printk(level, "i82975x", fmt, ##arg) #define i82975x_mc_printk(mci, level, fmt, arg...) \ edac_mc_chipset_printk(mci, level, "i82975x", fmt, ##arg) #ifndef PCI_DEVICE_ID_INTEL_82975_0 #define PCI_DEVICE_ID_INTEL_82975_0 0x277c #endif /* PCI_DEVICE_ID_INTEL_82975_0 */ #define I82975X_NR_DIMMS 8 #define I82975X_NR_CSROWS(nr_chans) (I82975X_NR_DIMMS / (nr_chans)) /* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */ #define I82975X_EAP 0x58 /* Dram Error Address Pointer (32b) * * 31:7 128 byte cache-line address * 6:1 reserved * 0 0: CH0; 1: CH1 */ #define I82975X_DERRSYN 0x5c /* Dram Error SYNdrome (8b) * * 7:0 DRAM ECC Syndrome */ #define I82975X_DES 0x5d /* Dram ERRor DeSTination (8b) * 0h: Processor Memory Reads * 1h:7h reserved * More - See Page 65 of Intel DocSheet. */ #define I82975X_ERRSTS 0xc8 /* Error Status Register (16b) * * 15:12 reserved * 11 Thermal Sensor Event * 10 reserved * 9 non-DRAM lock error (ndlock) * 8 Refresh Timeout * 7:2 reserved * 1 ECC UE (multibit DRAM error) * 0 ECC CE (singlebit DRAM error) */ /* Error Reporting is supported by 3 mechanisms: 1. DMI SERR generation ( ERRCMD ) 2. SMI DMI generation ( SMICMD ) 3. SCI DMI generation ( SCICMD ) NOTE: Only ONE of the three must be enabled */ #define I82975X_ERRCMD 0xca /* Error Command (16b) * * 15:12 reserved * 11 Thermal Sensor Event * 10 reserved * 9 non-DRAM lock error (ndlock) * 8 Refresh Timeout * 7:2 reserved * 1 ECC UE (multibit DRAM error) * 0 ECC CE (singlebit DRAM error) */ #define I82975X_SMICMD 0xcc /* Error Command (16b) * * 15:2 reserved * 1 ECC UE (multibit DRAM error) * 0 ECC CE (singlebit DRAM error) */ #define I82975X_SCICMD 0xce /* Error Command (16b) * * 15:2 reserved * 1 ECC UE (multibit DRAM error) * 0 ECC CE (singlebit DRAM error) */ #define I82975X_XEAP 0xfc /* Extended Dram Error Address Pointer (8b) * * 7:1 reserved * 0 Bit32 of the Dram Error Address */ #define I82975X_MCHBAR 0x44 /* * * 31:14 Base Addr of 16K memory-mapped * configuration space * 13:1 reserverd * 0 mem-mapped config space enable */ /* NOTE: Following addresses have to indexed using MCHBAR offset (44h, 32b) */ /* Intel 82975x memory mapped register space */ #define I82975X_DRB_SHIFT 25 /* fixed 32MiB grain */ #define I82975X_DRB 0x100 /* DRAM Row Boundary (8b x 8) * * 7 set to 1 in highest DRB of * channel if 4GB in ch. * 6:2 upper boundary of rank in * 32MB grains * 1:0 set to 0 */ #define I82975X_DRB_CH0R0 0x100 #define I82975X_DRB_CH0R1 0x101 #define I82975X_DRB_CH0R2 0x102 #define I82975X_DRB_CH0R3 0x103 #define I82975X_DRB_CH1R0 0x180 #define I82975X_DRB_CH1R1 0x181 #define I82975X_DRB_CH1R2 0x182 #define I82975X_DRB_CH1R3 0x183 #define I82975X_DRA 0x108 /* DRAM Row Attribute (4b x 8) * defines the PAGE SIZE to be used * for the rank * 7 reserved * 6:4 row attr of odd rank, i.e. 1 * 3 reserved * 2:0 row attr of even rank, i.e. 0 * * 000 = unpopulated * 001 = reserved * 010 = 4KiB * 011 = 8KiB * 100 = 16KiB * others = reserved */ #define I82975X_DRA_CH0R01 0x108 #define I82975X_DRA_CH0R23 0x109 #define I82975X_DRA_CH1R01 0x188 #define I82975X_DRA_CH1R23 0x189 #define I82975X_BNKARC 0x10e /* Type of device in each rank - Bank Arch (16b) * * 15:8 reserved * 7:6 Rank 3 architecture * 5:4 Rank 2 architecture * 3:2 Rank 1 architecture * 1:0 Rank 0 architecture * * 00 => 4 banks * 01 => 8 banks */ #define I82975X_C0BNKARC 0x10e #define I82975X_C1BNKARC 0x18e #define I82975X_DRC 0x120 /* DRAM Controller Mode0 (32b) * * 31:30 reserved * 29 init complete * 28:11 reserved, according to Intel * 22:21 number of channels * 00=1 01=2 in 82875 * seems to be ECC mode * bits in 82975 in Asus * P5W * 19:18 Data Integ Mode * 00=none 01=ECC in 82875 * 10:8 refresh mode * 7 reserved * 6:4 mode select * 3:2 reserved * 1:0 DRAM type 10=Second Revision * DDR2 SDRAM * 00, 01, 11 reserved */ #define I82975X_DRC_CH0M0 0x120 #define I82975X_DRC_CH1M0 0x1A0 #define I82975X_DRC_M1 0x124 /* DRAM Controller Mode1 (32b) * 31 0=Standard Address Map * 1=Enhanced Address Map * 30:0 reserved */ #define I82975X_DRC_CH0M1 0x124 #define I82975X_DRC_CH1M1 0x1A4 enum i82975x_chips { I82975X = 0, }; struct i82975x_pvt { void __iomem *mch_window; }; struct i82975x_dev_info { const char *ctl_name; }; struct i82975x_error_info { u16 errsts; u32 eap; u8 des; u8 derrsyn; u16 errsts2; u8 chan; /* the channel is bit 0 of EAP */ u8 xeap; /* extended eap bit */ }; static const struct i82975x_dev_info i82975x_devs[] = { [I82975X] = { .ctl_name = "i82975x" }, }; static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has * already registered driver */ static int i82975x_registered = 1; static void i82975x_get_error_info(struct mem_ctl_info *mci, struct i82975x_error_info *info) { struct pci_dev *pdev; pdev = to_pci_dev(mci->pdev); /* * This is a mess because there is no atomic way to read all the * registers at once and the registers can transition from CE being * overwritten by UE. */ pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts); pci_read_config_dword(pdev, I82975X_EAP, &info->eap); pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap); pci_read_config_byte(pdev, I82975X_DES, &info->des); pci_read_config_byte(pdev, I82975X_DERRSYN, &info->derrsyn); pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts2); pci_write_bits16(pdev, I82975X_ERRSTS, 0x0003, 0x0003); /* * If the error is the same then we can for both reads then * the first set of reads is valid. If there is a change then * there is a CE no info and the second set of reads is valid * and should be UE info. */ if (!(info->errsts2 & 0x0003)) return; if ((info->errsts ^ info->errsts2) & 0x0003) { pci_read_config_dword(pdev, I82975X_EAP, &info->eap); pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap); pci_read_config_byte(pdev, I82975X_DES, &info->des); pci_read_config_byte(pdev, I82975X_DERRSYN, &info->derrsyn); } } static int i82975x_process_error_info(struct mem_ctl_info *mci, struct i82975x_error_info *info, int handle_errors) { int row, chan; unsigned long offst, page; if (!(info->errsts2 & 0x0003)) return 0; if (!handle_errors) return 1; if ((info->errsts ^ info->errsts2) & 0x0003) { edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, "UE overwrote CE", ""); info->errsts = info->errsts2; } page = (unsigned long) info->eap; page >>= 1; if (info->xeap & 1) page |= 0x80000000; page >>= (PAGE_SHIFT - 1); row = edac_mc_find_csrow_by_page(mci, page); if (row == -1) { i82975x_mc_printk(mci, KERN_ERR, "error processing EAP:\n" "\tXEAP=%u\n" "\t EAP=0x%08x\n" "\tPAGE=0x%08x\n", (info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page); return 0; } chan = (mci->csrows[row]->nr_channels == 1) ? 0 : info->eap & 1; offst = info->eap & ((1 << PAGE_SHIFT) - (1 << mci->csrows[row]->channels[chan]->dimm->grain)); if (info->errsts & 0x0002) edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, page, offst, 0, row, -1, -1, "i82975x UE", ""); else edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, page, offst, info->derrsyn, row, chan ? chan : 0, -1, "i82975x CE", ""); return 1; } static void i82975x_check(struct mem_ctl_info *mci) { struct i82975x_error_info info; edac_dbg(1, "MC%d\n", mci->mc_idx); i82975x_get_error_info(mci, &info); i82975x_process_error_info(mci, &info, 1); } /* Return 1 if dual channel mode is active. Else return 0. */ static int dual_channel_active(void __iomem *mch_window) { /* * We treat interleaved-symmetric configuration as dual-channel - EAP's * bit-0 giving the channel of the error location. * * All other configurations are treated as single channel - the EAP's * bit-0 will resolve ok in symmetric area of mixed * (symmetric/asymmetric) configurations */ u8 drb[4][2]; int row; int dualch; for (dualch = 1, row = 0; dualch && (row < 4); row++) { drb[row][0] = readb(mch_window + I82975X_DRB + row); drb[row][1] = readb(mch_window + I82975X_DRB + row + 0x80); dualch = dualch && (drb[row][0] == drb[row][1]); } return dualch; } static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank) { /* * ECC is possible on i92975x ONLY with DEV_X8 */ return DEV_X8; } static void i82975x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, void __iomem *mch_window) { struct csrow_info *csrow; unsigned long last_cumul_size; u8 value; u32 cumul_size, nr_pages; int index, chan; struct dimm_info *dimm; enum dev_type dtype; last_cumul_size = 0; /* * 82875 comment: * The dram row boundary (DRB) reg values are boundary address * for each DRAM row with a granularity of 32 or 64MB (single/dual * channel operation). DRB regs are cumulative; therefore DRB7 will * contain the total memory contained in all rows. * */ for (index = 0; index < mci->nr_csrows; index++) { csrow = mci->csrows[index]; value = readb(mch_window + I82975X_DRB + index + ((index >= 4) ? 0x80 : 0)); cumul_size = value; cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT); /* * Adjust cumul_size w.r.t number of channels * */ if (csrow->nr_channels > 1) cumul_size <<= 1; edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size); nr_pages = cumul_size - last_cumul_size; if (!nr_pages) continue; /* * Initialise dram labels * index values: * [0-7] for single-channel; i.e. csrow->nr_channels = 1 * [0-3] for dual-channel; i.e. csrow->nr_channels = 2 */ dtype = i82975x_dram_type(mch_window, index); for (chan = 0; chan < csrow->nr_channels; chan++) { dimm = mci->csrows[index]->channels[chan]->dimm; dimm->nr_pages = nr_pages / csrow->nr_channels; snprintf(csrow->channels[chan]->dimm->label, EDAC_MC_LABEL_LEN, "DIMM %c%d", (chan == 0) ? 'A' : 'B', index); dimm->grain = 1 << 7; /* 128Byte cache-line resolution */ dimm->dtype = i82975x_dram_type(mch_window, index); dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */ dimm->edac_mode = EDAC_SECDED; /* only supported */ } csrow->first_page = last_cumul_size; csrow->last_page = cumul_size - 1; last_cumul_size = cumul_size; } } /* #define i82975x_DEBUG_IOMEM */ #ifdef i82975x_DEBUG_IOMEM static void i82975x_print_dram_timings(void __iomem *mch_window) { /* * The register meanings are from Intel specs; * (shows 13-5-5-5 for 800-DDR2) * Asus P5W Bios reports 15-5-4-4 * What's your religion? */ static const int caslats[4] = { 5, 4, 3, 6 }; u32 dtreg[2]; dtreg[0] = readl(mch_window + 0x114); dtreg[1] = readl(mch_window + 0x194); i82975x_printk(KERN_INFO, "DRAM Timings : Ch0 Ch1\n" " RAS Active Min = %d %d\n" " CAS latency = %d %d\n" " RAS to CAS = %d %d\n" " RAS precharge = %d %d\n", (dtreg[0] >> 19 ) & 0x0f, (dtreg[1] >> 19) & 0x0f, caslats[(dtreg[0] >> 8) & 0x03], caslats[(dtreg[1] >> 8) & 0x03], ((dtreg[0] >> 4) & 0x07) + 2, ((dtreg[1] >> 4) & 0x07) + 2, (dtreg[0] & 0x07) + 2, (dtreg[1] & 0x07) + 2 ); } #endif static int i82975x_probe1(struct pci_dev *pdev, int dev_idx) { int rc = -ENODEV; struct mem_ctl_info *mci; struct edac_mc_layer layers[2]; struct i82975x_pvt *pvt; void __iomem *mch_window; u32 mchbar; u32 drc[2]; struct i82975x_error_info discard; int chans; #ifdef i82975x_DEBUG_IOMEM u8 c0drb[4]; u8 c1drb[4]; #endif edac_dbg(0, "\n"); pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar); if (!(mchbar & 1)) { edac_dbg(3, "failed, MCHBAR disabled!\n"); goto fail0; } mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */ mch_window = ioremap_nocache(mchbar, 0x1000); #ifdef i82975x_DEBUG_IOMEM i82975x_printk(KERN_INFO, "MCHBAR real = %0x, remapped = %p\n", mchbar, mch_window); c0drb[0] = readb(mch_window + I82975X_DRB_CH0R0); c0drb[1] = readb(mch_window + I82975X_DRB_CH0R1); c0drb[2] = readb(mch_window + I82975X_DRB_CH0R2); c0drb[3] = readb(mch_window + I82975X_DRB_CH0R3); c1drb[0] = readb(mch_window + I82975X_DRB_CH1R0); c1drb[1] = readb(mch_window + I82975X_DRB_CH1R1); c1drb[2] = readb(mch_window + I82975X_DRB_CH1R2); c1drb[3] = readb(mch_window + I82975X_DRB_CH1R3); i82975x_printk(KERN_INFO, "DRBCH0R0 = 0x%02x\n", c0drb[0]); i82975x_printk(KERN_INFO, "DRBCH0R1 = 0x%02x\n", c0drb[1]); i82975x_printk(KERN_INFO, "DRBCH0R2 = 0x%02x\n", c0drb[2]); i82975x_printk(KERN_INFO, "DRBCH0R3 = 0x%02x\n", c0drb[3]); i82975x_printk(KERN_INFO, "DRBCH1R0 = 0x%02x\n", c1drb[0]); i82975x_printk(KERN_INFO, "DRBCH1R1 = 0x%02x\n", c1drb[1]); i82975x_printk(KERN_INFO, "DRBCH1R2 = 0x%02x\n", c1drb[2]); i82975x_printk(KERN_INFO, "DRBCH1R3 = 0x%02x\n", c1drb[3]); #endif drc[0] = readl(mch_window + I82975X_DRC_CH0M0); drc[1] = readl(mch_window + I82975X_DRC_CH1M0); #ifdef i82975x_DEBUG_IOMEM i82975x_printk(KERN_INFO, "DRC_CH0 = %0x, %s\n", drc[0], ((drc[0] >> 21) & 3) == 1 ? "ECC enabled" : "ECC disabled"); i82975x_printk(KERN_INFO, "DRC_CH1 = %0x, %s\n", drc[1], ((drc[1] >> 21) & 3) == 1 ? "ECC enabled" : "ECC disabled"); i82975x_printk(KERN_INFO, "C0 BNKARC = %0x\n", readw(mch_window + I82975X_C0BNKARC)); i82975x_printk(KERN_INFO, "C1 BNKARC = %0x\n", readw(mch_window + I82975X_C1BNKARC)); i82975x_print_dram_timings(mch_window); goto fail1; #endif if (!(((drc[0] >> 21) & 3) == 1 || ((drc[1] >> 21) & 3) == 1)) { i82975x_printk(KERN_INFO, "ECC disabled on both channels.\n"); goto fail1; } chans = dual_channel_active(mch_window) + 1; /* assuming only one controller, index thus is 0 */ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; layers[0].size = I82975X_NR_DIMMS; layers[0].is_virt_csrow = true; layers[1].type = EDAC_MC_LAYER_CHANNEL; layers[1].size = I82975X_NR_CSROWS(chans); layers[1].is_virt_csrow = false; mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); if (!mci) { rc = -ENOMEM; goto fail1; } edac_dbg(3, "init mci\n"); mci->pdev = &pdev->dev; mci->mtype_cap = MEM_FLAG_DDR2; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; mci->mod_name = EDAC_MOD_STR; mci->mod_ver = I82975X_REVISION; mci->ctl_name = i82975x_devs[dev_idx].ctl_name; mci->dev_name = pci_name(pdev); mci->edac_check = i82975x_check; mci->ctl_page_to_phys = NULL; edac_dbg(3, "init pvt\n"); pvt = (struct i82975x_pvt *) mci->pvt_info; pvt->mch_window = mch_window; i82975x_init_csrows(mci, pdev, mch_window); mci->scrub_mode = SCRUB_HW_SRC; i82975x_get_error_info(mci, &discard); /* clear counters */ /* finalize this instance of memory controller with edac core */ if (edac_mc_add_mc(mci)) { edac_dbg(3, "failed edac_mc_add_mc()\n"); goto fail2; } /* get this far and it's successful */ edac_dbg(3, "success\n"); return 0; fail2: edac_mc_free(mci); fail1: iounmap(mch_window); fail0: return rc; } /* returns count (>= 0), or negative on error */ static int i82975x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int rc; edac_dbg(0, "\n"); if (pci_enable_device(pdev) < 0) return -EIO; rc = i82975x_probe1(pdev, ent->driver_data); if (mci_pdev == NULL) mci_pdev = pci_dev_get(pdev); return rc; } static void i82975x_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; struct i82975x_pvt *pvt; edac_dbg(0, "\n"); mci = edac_mc_del_mc(&pdev->dev); if (mci == NULL) return; pvt = mci->pvt_info; if (pvt->mch_window) iounmap( pvt->mch_window ); edac_mc_free(mci); } static DEFINE_PCI_DEVICE_TABLE(i82975x_pci_tbl) = { { PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, I82975X }, { 0, } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, i82975x_pci_tbl); static struct pci_driver i82975x_driver = { .name = EDAC_MOD_STR, .probe = i82975x_init_one, .remove = i82975x_remove_one, .id_table = i82975x_pci_tbl, }; static int __init i82975x_init(void) { int pci_rc; edac_dbg(3, "\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); pci_rc = pci_register_driver(&i82975x_driver); if (pci_rc < 0) goto fail0; if (mci_pdev == NULL) { mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82975_0, NULL); if (!mci_pdev) { edac_dbg(0, "i82975x pci_get_device fail\n"); pci_rc = -ENODEV; goto fail1; } pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl); if (pci_rc < 0) { edac_dbg(0, "i82975x init fail\n"); pci_rc = -ENODEV; goto fail1; } } return 0; fail1: pci_unregister_driver(&i82975x_driver); fail0: if (mci_pdev != NULL) pci_dev_put(mci_pdev); return pci_rc; } static void __exit i82975x_exit(void) { edac_dbg(3, "\n"); pci_unregister_driver(&i82975x_driver); if (!i82975x_registered) { i82975x_remove_one(mci_pdev); pci_dev_put(mci_pdev); } } module_init(i82975x_init); module_exit(i82975x_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arvind R. <arvino55@gmail.com>"); MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers"); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
gpl-2.0
Garcia98/kernel-amami
fs/lockd/mon.c
3239
13881
/* * linux/fs/lockd/mon.c * * The kernel statd client. * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #include <linux/types.h> #include <linux/utsname.h> #include <linux/kernel.h> #include <linux/ktime.h> #include <linux/slab.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/xprtsock.h> #include <linux/sunrpc/svc.h> #include <linux/lockd/lockd.h> #include <asm/unaligned.h> #define NLMDBG_FACILITY NLMDBG_MONITOR #define NSM_PROGRAM 100024 #define NSM_VERSION 1 enum { NSMPROC_NULL, NSMPROC_STAT, NSMPROC_MON, NSMPROC_UNMON, NSMPROC_UNMON_ALL, NSMPROC_SIMU_CRASH, NSMPROC_NOTIFY, }; struct nsm_args { struct nsm_private *priv; u32 prog; /* RPC callback info */ u32 vers; u32 proc; char *mon_name; }; struct nsm_res { u32 status; u32 state; }; static const struct rpc_program nsm_program; static LIST_HEAD(nsm_handles); static DEFINE_SPINLOCK(nsm_lock); /* * Local NSM state */ u32 __read_mostly nsm_local_state; bool __read_mostly nsm_use_hostnames; static inline struct sockaddr *nsm_addr(const struct nsm_handle *nsm) { return (struct sockaddr *)&nsm->sm_addr; } static struct rpc_clnt *nsm_create(struct net *net) { struct sockaddr_in sin = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_LOOPBACK), }; struct rpc_create_args args = { .net = net, .protocol = XPRT_TRANSPORT_UDP, .address = (struct sockaddr *)&sin, .addrsize = sizeof(sin), .servername = "rpc.statd", .program = &nsm_program, .version = NSM_VERSION, .authflavor = RPC_AUTH_NULL, .flags = RPC_CLNT_CREATE_NOPING, }; return rpc_create(&args); } static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res, struct net *net) { struct rpc_clnt *clnt; int status; struct nsm_args args = { .priv = &nsm->sm_priv, .prog = NLM_PROGRAM, .vers = 3, .proc = NLMPROC_NSM_NOTIFY, .mon_name = nsm->sm_mon_name, }; struct rpc_message msg = { .rpc_argp = &args, .rpc_resp = res, }; clnt = nsm_create(net); if (IS_ERR(clnt)) { status = PTR_ERR(clnt); dprintk("lockd: failed to create NSM upcall transport, " "status=%d\n", status); goto out; } memset(res, 0, sizeof(*res)); msg.rpc_proc = &clnt->cl_procinfo[proc]; status = rpc_call_sync(clnt, &msg, 0); if (status < 0) dprintk("lockd: NSM upcall RPC failed, status=%d\n", status); else status = 0; rpc_shutdown_client(clnt); out: return status; } /** * nsm_monitor - Notify a peer in case we reboot * @host: pointer to nlm_host of peer to notify * * If this peer is not already monitored, this function sends an * upcall to the local rpc.statd to record the name/address of * the peer to notify in case we reboot. * * Returns zero if the peer is monitored by the local rpc.statd; * otherwise a negative errno value is returned. */ int nsm_monitor(const struct nlm_host *host) { struct nsm_handle *nsm = host->h_nsmhandle; struct nsm_res res; int status; dprintk("lockd: nsm_monitor(%s)\n", nsm->sm_name); if (nsm->sm_monitored) return 0; /* * Choose whether to record the caller_name or IP address of * this peer in the local rpc.statd's database. */ nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf; status = nsm_mon_unmon(nsm, NSMPROC_MON, &res, host->net); if (unlikely(res.status != 0)) status = -EIO; if (unlikely(status < 0)) { printk(KERN_NOTICE "lockd: cannot monitor %s\n", nsm->sm_name); return status; } nsm->sm_monitored = 1; if (unlikely(nsm_local_state != res.state)) { nsm_local_state = res.state; dprintk("lockd: NSM state changed to %d\n", nsm_local_state); } return 0; } /** * nsm_unmonitor - Unregister peer notification * @host: pointer to nlm_host of peer to stop monitoring * * If this peer is monitored, this function sends an upcall to * tell the local rpc.statd not to send this peer a notification * when we reboot. */ void nsm_unmonitor(const struct nlm_host *host) { struct nsm_handle *nsm = host->h_nsmhandle; struct nsm_res res; int status; if (atomic_read(&nsm->sm_count) == 1 && nsm->sm_monitored && !nsm->sm_sticky) { dprintk("lockd: nsm_unmonitor(%s)\n", nsm->sm_name); status = nsm_mon_unmon(nsm, NSMPROC_UNMON, &res, host->net); if (res.status != 0) status = -EIO; if (status < 0) printk(KERN_NOTICE "lockd: cannot unmonitor %s\n", nsm->sm_name); else nsm->sm_monitored = 0; } } static struct nsm_handle *nsm_lookup_hostname(const char *hostname, const size_t len) { struct nsm_handle *nsm; list_for_each_entry(nsm, &nsm_handles, sm_link) if (strlen(nsm->sm_name) == len && memcmp(nsm->sm_name, hostname, len) == 0) return nsm; return NULL; } static struct nsm_handle *nsm_lookup_addr(const struct sockaddr *sap) { struct nsm_handle *nsm; list_for_each_entry(nsm, &nsm_handles, sm_link) if (rpc_cmp_addr(nsm_addr(nsm), sap)) return nsm; return NULL; } static struct nsm_handle *nsm_lookup_priv(const struct nsm_private *priv) { struct nsm_handle *nsm; list_for_each_entry(nsm, &nsm_handles, sm_link) if (memcmp(nsm->sm_priv.data, priv->data, sizeof(priv->data)) == 0) return nsm; return NULL; } /* * Construct a unique cookie to match this nsm_handle to this monitored * host. It is passed to the local rpc.statd via NSMPROC_MON, and * returned via NLMPROC_SM_NOTIFY, in the "priv" field of these * requests. * * The NSM protocol requires that these cookies be unique while the * system is running. We prefer a stronger requirement of making them * unique across reboots. If user space bugs cause a stale cookie to * be sent to the kernel, it could cause the wrong host to lose its * lock state if cookies were not unique across reboots. * * The cookies are exposed only to local user space via loopback. They * do not appear on the physical network. If we want greater security * for some reason, nsm_init_private() could perform a one-way hash to * obscure the contents of the cookie. */ static void nsm_init_private(struct nsm_handle *nsm) { u64 *p = (u64 *)&nsm->sm_priv.data; struct timespec ts; s64 ns; ktime_get_ts(&ts); ns = timespec_to_ns(&ts); put_unaligned(ns, p); put_unaligned((unsigned long)nsm, p + 1); } static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap, const size_t salen, const char *hostname, const size_t hostname_len) { struct nsm_handle *new; new = kzalloc(sizeof(*new) + hostname_len + 1, GFP_KERNEL); if (unlikely(new == NULL)) return NULL; atomic_set(&new->sm_count, 1); new->sm_name = (char *)(new + 1); memcpy(nsm_addr(new), sap, salen); new->sm_addrlen = salen; nsm_init_private(new); if (rpc_ntop(nsm_addr(new), new->sm_addrbuf, sizeof(new->sm_addrbuf)) == 0) (void)snprintf(new->sm_addrbuf, sizeof(new->sm_addrbuf), "unsupported address family"); memcpy(new->sm_name, hostname, hostname_len); new->sm_name[hostname_len] = '\0'; return new; } /** * nsm_get_handle - Find or create a cached nsm_handle * @sap: pointer to socket address of handle to find * @salen: length of socket address * @hostname: pointer to C string containing hostname to find * @hostname_len: length of C string * * Behavior is modulated by the global nsm_use_hostnames variable. * * Returns a cached nsm_handle after bumping its ref count, or * returns a fresh nsm_handle if a handle that matches @sap and/or * @hostname cannot be found in the handle cache. Returns NULL if * an error occurs. */ struct nsm_handle *nsm_get_handle(const struct sockaddr *sap, const size_t salen, const char *hostname, const size_t hostname_len) { struct nsm_handle *cached, *new = NULL; if (hostname && memchr(hostname, '/', hostname_len) != NULL) { if (printk_ratelimit()) { printk(KERN_WARNING "Invalid hostname \"%.*s\" " "in NFS lock request\n", (int)hostname_len, hostname); } return NULL; } retry: spin_lock(&nsm_lock); if (nsm_use_hostnames && hostname != NULL) cached = nsm_lookup_hostname(hostname, hostname_len); else cached = nsm_lookup_addr(sap); if (cached != NULL) { atomic_inc(&cached->sm_count); spin_unlock(&nsm_lock); kfree(new); dprintk("lockd: found nsm_handle for %s (%s), " "cnt %d\n", cached->sm_name, cached->sm_addrbuf, atomic_read(&cached->sm_count)); return cached; } if (new != NULL) { list_add(&new->sm_link, &nsm_handles); spin_unlock(&nsm_lock); dprintk("lockd: created nsm_handle for %s (%s)\n", new->sm_name, new->sm_addrbuf); return new; } spin_unlock(&nsm_lock); new = nsm_create_handle(sap, salen, hostname, hostname_len); if (unlikely(new == NULL)) return NULL; goto retry; } /** * nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle * @info: pointer to NLMPROC_SM_NOTIFY arguments * * Returns a matching nsm_handle if found in the nsm cache. The returned * nsm_handle's reference count is bumped. Otherwise returns NULL if some * error occurred. */ struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info) { struct nsm_handle *cached; spin_lock(&nsm_lock); cached = nsm_lookup_priv(&info->priv); if (unlikely(cached == NULL)) { spin_unlock(&nsm_lock); dprintk("lockd: never saw rebooted peer '%.*s' before\n", info->len, info->mon); return cached; } atomic_inc(&cached->sm_count); spin_unlock(&nsm_lock); dprintk("lockd: host %s (%s) rebooted, cnt %d\n", cached->sm_name, cached->sm_addrbuf, atomic_read(&cached->sm_count)); return cached; } /** * nsm_release - Release an NSM handle * @nsm: pointer to handle to be released * */ void nsm_release(struct nsm_handle *nsm) { if (atomic_dec_and_lock(&nsm->sm_count, &nsm_lock)) { list_del(&nsm->sm_link); spin_unlock(&nsm_lock); dprintk("lockd: destroyed nsm_handle for %s (%s)\n", nsm->sm_name, nsm->sm_addrbuf); kfree(nsm); } } /* * XDR functions for NSM. * * See http://www.opengroup.org/ for details on the Network * Status Monitor wire protocol. */ static void encode_nsm_string(struct xdr_stream *xdr, const char *string) { const u32 len = strlen(string); __be32 *p; BUG_ON(len > SM_MAXSTRLEN); p = xdr_reserve_space(xdr, 4 + len); xdr_encode_opaque(p, string, len); } /* * "mon_name" specifies the host to be monitored. */ static void encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp) { encode_nsm_string(xdr, argp->mon_name); } /* * The "my_id" argument specifies the hostname and RPC procedure * to be called when the status manager receives notification * (via the NLMPROC_SM_NOTIFY call) that the state of host "mon_name" * has changed. */ static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp) { __be32 *p; encode_nsm_string(xdr, utsname()->nodename); p = xdr_reserve_space(xdr, 4 + 4 + 4); *p++ = cpu_to_be32(argp->prog); *p++ = cpu_to_be32(argp->vers); *p = cpu_to_be32(argp->proc); } /* * The "mon_id" argument specifies the non-private arguments * of an NSMPROC_MON or NSMPROC_UNMON call. */ static void encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp) { encode_mon_name(xdr, argp); encode_my_id(xdr, argp); } /* * The "priv" argument may contain private information required * by the NSMPROC_MON call. This information will be supplied in the * NLMPROC_SM_NOTIFY call. */ static void encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp) { __be32 *p; p = xdr_reserve_space(xdr, SM_PRIV_SIZE); xdr_encode_opaque_fixed(p, argp->priv->data, SM_PRIV_SIZE); } static void nsm_xdr_enc_mon(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nsm_args *argp) { encode_mon_id(xdr, argp); encode_priv(xdr, argp); } static void nsm_xdr_enc_unmon(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nsm_args *argp) { encode_mon_id(xdr, argp); } static int nsm_xdr_dec_stat_res(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nsm_res *resp) { __be32 *p; p = xdr_inline_decode(xdr, 4 + 4); if (unlikely(p == NULL)) return -EIO; resp->status = be32_to_cpup(p++); resp->state = be32_to_cpup(p); dprintk("lockd: %s status %d state %d\n", __func__, resp->status, resp->state); return 0; } static int nsm_xdr_dec_stat(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nsm_res *resp) { __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) return -EIO; resp->state = be32_to_cpup(p); dprintk("lockd: %s state %d\n", __func__, resp->state); return 0; } #define SM_my_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN)) #define SM_my_id_sz (SM_my_name_sz+3) #define SM_mon_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN)) #define SM_mon_id_sz (SM_mon_name_sz+SM_my_id_sz) #define SM_priv_sz (XDR_QUADLEN(SM_PRIV_SIZE)) #define SM_mon_sz (SM_mon_id_sz+SM_priv_sz) #define SM_monres_sz 2 #define SM_unmonres_sz 1 static struct rpc_procinfo nsm_procedures[] = { [NSMPROC_MON] = { .p_proc = NSMPROC_MON, .p_encode = (kxdreproc_t)nsm_xdr_enc_mon, .p_decode = (kxdrdproc_t)nsm_xdr_dec_stat_res, .p_arglen = SM_mon_sz, .p_replen = SM_monres_sz, .p_statidx = NSMPROC_MON, .p_name = "MONITOR", }, [NSMPROC_UNMON] = { .p_proc = NSMPROC_UNMON, .p_encode = (kxdreproc_t)nsm_xdr_enc_unmon, .p_decode = (kxdrdproc_t)nsm_xdr_dec_stat, .p_arglen = SM_mon_id_sz, .p_replen = SM_unmonres_sz, .p_statidx = NSMPROC_UNMON, .p_name = "UNMONITOR", }, }; static const struct rpc_version nsm_version1 = { .number = 1, .nrprocs = ARRAY_SIZE(nsm_procedures), .procs = nsm_procedures }; static const struct rpc_version *nsm_version[] = { [1] = &nsm_version1, }; static struct rpc_stat nsm_stats; static const struct rpc_program nsm_program = { .name = "statd", .number = NSM_PROGRAM, .nrvers = ARRAY_SIZE(nsm_version), .version = nsm_version, .stats = &nsm_stats };
gpl-2.0
IllusionRom-deprecated/android_kernel_lge_zee
drivers/net/ethernet/davicom/dm9000.c
3495
39386
/* * Davicom DM9000 Fast Ethernet driver for Linux. * Copyright (C) 1997 Sten Wang * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved. * * Additional updates, Copyright: * Ben Dooks <ben@simtec.co.uk> * Sascha Hauer <s.hauer@pengutronix.de> */ #include <linux/module.h> #include <linux/ioport.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/crc32.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/dm9000.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/slab.h> #include <asm/delay.h> #include <asm/irq.h> #include <asm/io.h> #include "dm9000.h" /* Board/System/Debug information/definition ---------------- */ #define DM9000_PHY 0x40 /* PHY address 0x01 */ #define CARDNAME "dm9000" #define DRV_VERSION "1.31" /* * Transmit timeout, default 5 seconds. */ static int watchdog = 5000; module_param(watchdog, int, 0400); MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); /* * Debug messages level */ static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "dm9000 debug level (0-4)"); /* DM9000 register address locking. * * The DM9000 uses an address register to control where data written * to the data register goes. This means that the address register * must be preserved over interrupts or similar calls. * * During interrupt and other critical calls, a spinlock is used to * protect the system, but the calls themselves save the address * in the address register in case they are interrupting another * access to the device. * * For general accesses a lock is provided so that calls which are * allowed to sleep are serialised so that the address register does * not need to be saved. This lock also serves to serialise access * to the EEPROM and PHY access registers which are shared between * these two devices. */ /* The driver supports the original DM9000E, and now the two newer * devices, DM9000A and DM9000B. */ enum dm9000_type { TYPE_DM9000E, /* original DM9000 */ TYPE_DM9000A, TYPE_DM9000B }; /* Structure/enum declaration ------------------------------- */ typedef struct board_info { void __iomem *io_addr; /* Register I/O base address */ void __iomem *io_data; /* Data I/O address */ u16 irq; /* IRQ */ u16 tx_pkt_cnt; u16 queue_pkt_len; u16 queue_start_addr; u16 queue_ip_summed; u16 dbug_cnt; u8 io_mode; /* 0:word, 2:byte */ u8 phy_addr; u8 imr_all; unsigned int flags; unsigned int in_suspend :1; unsigned int wake_supported :1; enum dm9000_type type; void (*inblk)(void __iomem *port, void *data, int length); void (*outblk)(void __iomem *port, void *data, int length); void (*dumpblk)(void __iomem *port, int length); struct device *dev; /* parent device */ struct resource *addr_res; /* resources found */ struct resource *data_res; struct resource *addr_req; /* resources requested */ struct resource *data_req; struct resource *irq_res; int irq_wake; struct mutex addr_lock; /* phy and eeprom access lock */ struct delayed_work phy_poll; struct net_device *ndev; spinlock_t lock; struct mii_if_info mii; u32 msg_enable; u32 wake_state; int ip_summed; } board_info_t; /* debug code */ #define dm9000_dbg(db, lev, msg...) do { \ if ((lev) < debug) { \ dev_dbg(db->dev, msg); \ } \ } while (0) static inline board_info_t *to_dm9000_board(struct net_device *dev) { return netdev_priv(dev); } /* DM9000 network board routine ---------------------------- */ static void dm9000_reset(board_info_t * db) { dev_dbg(db->dev, "resetting device\n"); /* RESET device */ writeb(DM9000_NCR, db->io_addr); udelay(200); writeb(NCR_RST, db->io_data); udelay(200); } /* * Read a byte from I/O port */ static u8 ior(board_info_t * db, int reg) { writeb(reg, db->io_addr); return readb(db->io_data); } /* * Write a byte to I/O port */ static void iow(board_info_t * db, int reg, int value) { writeb(reg, db->io_addr); writeb(value, db->io_data); } /* routines for sending block to chip */ static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count) { writesb(reg, data, count); } static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count) { writesw(reg, data, (count+1) >> 1); } static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count) { writesl(reg, data, (count+3) >> 2); } /* input block from chip to memory */ static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count) { readsb(reg, data, count); } static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count) { readsw(reg, data, (count+1) >> 1); } static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count) { readsl(reg, data, (count+3) >> 2); } /* dump block from chip to null */ static void dm9000_dumpblk_8bit(void __iomem *reg, int count) { int i; int tmp; for (i = 0; i < count; i++) tmp = readb(reg); } static void dm9000_dumpblk_16bit(void __iomem *reg, int count) { int i; int tmp; count = (count + 1) >> 1; for (i = 0; i < count; i++) tmp = readw(reg); } static void dm9000_dumpblk_32bit(void __iomem *reg, int count) { int i; int tmp; count = (count + 3) >> 2; for (i = 0; i < count; i++) tmp = readl(reg); } /* dm9000_set_io * * select the specified set of io routines to use with the * device */ static void dm9000_set_io(struct board_info *db, int byte_width) { /* use the size of the data resource to work out what IO * routines we want to use */ switch (byte_width) { case 1: db->dumpblk = dm9000_dumpblk_8bit; db->outblk = dm9000_outblk_8bit; db->inblk = dm9000_inblk_8bit; break; case 3: dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n"); case 2: db->dumpblk = dm9000_dumpblk_16bit; db->outblk = dm9000_outblk_16bit; db->inblk = dm9000_inblk_16bit; break; case 4: default: db->dumpblk = dm9000_dumpblk_32bit; db->outblk = dm9000_outblk_32bit; db->inblk = dm9000_inblk_32bit; break; } } static void dm9000_schedule_poll(board_info_t *db) { if (db->type == TYPE_DM9000E) schedule_delayed_work(&db->phy_poll, HZ * 2); } static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { board_info_t *dm = to_dm9000_board(dev); if (!netif_running(dev)) return -EINVAL; return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL); } static unsigned int dm9000_read_locked(board_info_t *db, int reg) { unsigned long flags; unsigned int ret; spin_lock_irqsave(&db->lock, flags); ret = ior(db, reg); spin_unlock_irqrestore(&db->lock, flags); return ret; } static int dm9000_wait_eeprom(board_info_t *db) { unsigned int status; int timeout = 8; /* wait max 8msec */ /* The DM9000 data sheets say we should be able to * poll the ERRE bit in EPCR to wait for the EEPROM * operation. From testing several chips, this bit * does not seem to work. * * We attempt to use the bit, but fall back to the * timeout (which is why we do not return an error * on expiry) to say that the EEPROM operation has * completed. */ while (1) { status = dm9000_read_locked(db, DM9000_EPCR); if ((status & EPCR_ERRE) == 0) break; msleep(1); if (timeout-- < 0) { dev_dbg(db->dev, "timeout waiting EEPROM\n"); break; } } return 0; } /* * Read a word data from EEPROM */ static void dm9000_read_eeprom(board_info_t *db, int offset, u8 *to) { unsigned long flags; if (db->flags & DM9000_PLATF_NO_EEPROM) { to[0] = 0xff; to[1] = 0xff; return; } mutex_lock(&db->addr_lock); spin_lock_irqsave(&db->lock, flags); iow(db, DM9000_EPAR, offset); iow(db, DM9000_EPCR, EPCR_ERPRR); spin_unlock_irqrestore(&db->lock, flags); dm9000_wait_eeprom(db); /* delay for at-least 150uS */ msleep(1); spin_lock_irqsave(&db->lock, flags); iow(db, DM9000_EPCR, 0x0); to[0] = ior(db, DM9000_EPDRL); to[1] = ior(db, DM9000_EPDRH); spin_unlock_irqrestore(&db->lock, flags); mutex_unlock(&db->addr_lock); } /* * Write a word data to SROM */ static void dm9000_write_eeprom(board_info_t *db, int offset, u8 *data) { unsigned long flags; if (db->flags & DM9000_PLATF_NO_EEPROM) return; mutex_lock(&db->addr_lock); spin_lock_irqsave(&db->lock, flags); iow(db, DM9000_EPAR, offset); iow(db, DM9000_EPDRH, data[1]); iow(db, DM9000_EPDRL, data[0]); iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW); spin_unlock_irqrestore(&db->lock, flags); dm9000_wait_eeprom(db); mdelay(1); /* wait at least 150uS to clear */ spin_lock_irqsave(&db->lock, flags); iow(db, DM9000_EPCR, 0); spin_unlock_irqrestore(&db->lock, flags); mutex_unlock(&db->addr_lock); } /* ethtool ops */ static void dm9000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { board_info_t *dm = to_dm9000_board(dev); strcpy(info->driver, CARDNAME); strcpy(info->version, DRV_VERSION); strcpy(info->bus_info, to_platform_device(dm->dev)->name); } static u32 dm9000_get_msglevel(struct net_device *dev) { board_info_t *dm = to_dm9000_board(dev); return dm->msg_enable; } static void dm9000_set_msglevel(struct net_device *dev, u32 value) { board_info_t *dm = to_dm9000_board(dev); dm->msg_enable = value; } static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { board_info_t *dm = to_dm9000_board(dev); mii_ethtool_gset(&dm->mii, cmd); return 0; } static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { board_info_t *dm = to_dm9000_board(dev); return mii_ethtool_sset(&dm->mii, cmd); } static int dm9000_nway_reset(struct net_device *dev) { board_info_t *dm = to_dm9000_board(dev); return mii_nway_restart(&dm->mii); } static int dm9000_set_features(struct net_device *dev, netdev_features_t features) { board_info_t *dm = to_dm9000_board(dev); netdev_features_t changed = dev->features ^ features; unsigned long flags; if (!(changed & NETIF_F_RXCSUM)) return 0; spin_lock_irqsave(&dm->lock, flags); iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0); spin_unlock_irqrestore(&dm->lock, flags); return 0; } static u32 dm9000_get_link(struct net_device *dev) { board_info_t *dm = to_dm9000_board(dev); u32 ret; if (dm->flags & DM9000_PLATF_EXT_PHY) ret = mii_link_ok(&dm->mii); else ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0; return ret; } #define DM_EEPROM_MAGIC (0x444D394B) static int dm9000_get_eeprom_len(struct net_device *dev) { return 128; } static int dm9000_get_eeprom(struct net_device *dev, struct ethtool_eeprom *ee, u8 *data) { board_info_t *dm = to_dm9000_board(dev); int offset = ee->offset; int len = ee->len; int i; /* EEPROM access is aligned to two bytes */ if ((len & 1) != 0 || (offset & 1) != 0) return -EINVAL; if (dm->flags & DM9000_PLATF_NO_EEPROM) return -ENOENT; ee->magic = DM_EEPROM_MAGIC; for (i = 0; i < len; i += 2) dm9000_read_eeprom(dm, (offset + i) / 2, data + i); return 0; } static int dm9000_set_eeprom(struct net_device *dev, struct ethtool_eeprom *ee, u8 *data) { board_info_t *dm = to_dm9000_board(dev); int offset = ee->offset; int len = ee->len; int done; /* EEPROM access is aligned to two bytes */ if (dm->flags & DM9000_PLATF_NO_EEPROM) return -ENOENT; if (ee->magic != DM_EEPROM_MAGIC) return -EINVAL; while (len > 0) { if (len & 1 || offset & 1) { int which = offset & 1; u8 tmp[2]; dm9000_read_eeprom(dm, offset / 2, tmp); tmp[which] = *data; dm9000_write_eeprom(dm, offset / 2, tmp); done = 1; } else { dm9000_write_eeprom(dm, offset / 2, data); done = 2; } data += done; offset += done; len -= done; } return 0; } static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) { board_info_t *dm = to_dm9000_board(dev); memset(w, 0, sizeof(struct ethtool_wolinfo)); /* note, we could probably support wake-phy too */ w->supported = dm->wake_supported ? WAKE_MAGIC : 0; w->wolopts = dm->wake_state; } static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) { board_info_t *dm = to_dm9000_board(dev); unsigned long flags; u32 opts = w->wolopts; u32 wcr = 0; if (!dm->wake_supported) return -EOPNOTSUPP; if (opts & ~WAKE_MAGIC) return -EINVAL; if (opts & WAKE_MAGIC) wcr |= WCR_MAGICEN; mutex_lock(&dm->addr_lock); spin_lock_irqsave(&dm->lock, flags); iow(dm, DM9000_WCR, wcr); spin_unlock_irqrestore(&dm->lock, flags); mutex_unlock(&dm->addr_lock); if (dm->wake_state != opts) { /* change in wol state, update IRQ state */ if (!dm->wake_state) irq_set_irq_wake(dm->irq_wake, 1); else if (dm->wake_state && !opts) irq_set_irq_wake(dm->irq_wake, 0); } dm->wake_state = opts; return 0; } static const struct ethtool_ops dm9000_ethtool_ops = { .get_drvinfo = dm9000_get_drvinfo, .get_settings = dm9000_get_settings, .set_settings = dm9000_set_settings, .get_msglevel = dm9000_get_msglevel, .set_msglevel = dm9000_set_msglevel, .nway_reset = dm9000_nway_reset, .get_link = dm9000_get_link, .get_wol = dm9000_get_wol, .set_wol = dm9000_set_wol, .get_eeprom_len = dm9000_get_eeprom_len, .get_eeprom = dm9000_get_eeprom, .set_eeprom = dm9000_set_eeprom, }; static void dm9000_show_carrier(board_info_t *db, unsigned carrier, unsigned nsr) { struct net_device *ndev = db->ndev; unsigned ncr = dm9000_read_locked(db, DM9000_NCR); if (carrier) dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n", ndev->name, (nsr & NSR_SPEED) ? 10 : 100, (ncr & NCR_FDX) ? "full" : "half"); else dev_info(db->dev, "%s: link down\n", ndev->name); } static void dm9000_poll_work(struct work_struct *w) { struct delayed_work *dw = to_delayed_work(w); board_info_t *db = container_of(dw, board_info_t, phy_poll); struct net_device *ndev = db->ndev; if (db->flags & DM9000_PLATF_SIMPLE_PHY && !(db->flags & DM9000_PLATF_EXT_PHY)) { unsigned nsr = dm9000_read_locked(db, DM9000_NSR); unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0; unsigned new_carrier; new_carrier = (nsr & NSR_LINKST) ? 1 : 0; if (old_carrier != new_carrier) { if (netif_msg_link(db)) dm9000_show_carrier(db, new_carrier, nsr); if (!new_carrier) netif_carrier_off(ndev); else netif_carrier_on(ndev); } } else mii_check_media(&db->mii, netif_msg_link(db), 0); if (netif_running(ndev)) dm9000_schedule_poll(db); } /* dm9000_release_board * * release a board, and any mapped resources */ static void dm9000_release_board(struct platform_device *pdev, struct board_info *db) { /* unmap our resources */ iounmap(db->io_addr); iounmap(db->io_data); /* release the resources */ release_resource(db->data_req); kfree(db->data_req); release_resource(db->addr_req); kfree(db->addr_req); } static unsigned char dm9000_type_to_char(enum dm9000_type type) { switch (type) { case TYPE_DM9000E: return 'e'; case TYPE_DM9000A: return 'a'; case TYPE_DM9000B: return 'b'; } return '?'; } /* * Set DM9000 multicast address */ static void dm9000_hash_table_unlocked(struct net_device *dev) { board_info_t *db = netdev_priv(dev); struct netdev_hw_addr *ha; int i, oft; u32 hash_val; u16 hash_table[4]; u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN; dm9000_dbg(db, 1, "entering %s\n", __func__); for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++) iow(db, oft, dev->dev_addr[i]); /* Clear Hash Table */ for (i = 0; i < 4; i++) hash_table[i] = 0x0; /* broadcast address */ hash_table[3] = 0x8000; if (dev->flags & IFF_PROMISC) rcr |= RCR_PRMSC; if (dev->flags & IFF_ALLMULTI) rcr |= RCR_ALL; /* the multicast address in Hash Table : 64 bits */ netdev_for_each_mc_addr(ha, dev) { hash_val = ether_crc_le(6, ha->addr) & 0x3f; hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); } /* Write the hash table to MAC MD table */ for (i = 0, oft = DM9000_MAR; i < 4; i++) { iow(db, oft++, hash_table[i]); iow(db, oft++, hash_table[i] >> 8); } iow(db, DM9000_RCR, rcr); } static void dm9000_hash_table(struct net_device *dev) { board_info_t *db = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&db->lock, flags); dm9000_hash_table_unlocked(dev); spin_unlock_irqrestore(&db->lock, flags); } /* * Initialize dm9000 board */ static void dm9000_init_dm9000(struct net_device *dev) { board_info_t *db = netdev_priv(dev); unsigned int imr; unsigned int ncr; dm9000_dbg(db, 1, "entering %s\n", __func__); /* I/O mode */ db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */ /* Checksum mode */ if (dev->hw_features & NETIF_F_RXCSUM) iow(db, DM9000_RCSR, (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0); iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; /* if wol is needed, then always set NCR_WAKEEN otherwise we end * up dumping the wake events if we disable this. There is already * a wake-mask in DM9000_WCR */ if (db->wake_supported) ncr |= NCR_WAKEEN; iow(db, DM9000_NCR, ncr); /* Program operating register */ iow(db, DM9000_TCR, 0); /* TX Polling clear */ iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */ iow(db, DM9000_FCR, 0xff); /* Flow Control */ iow(db, DM9000_SMCR, 0); /* Special Mode */ /* clear TX status */ iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END); iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */ /* Set address filter table */ dm9000_hash_table_unlocked(dev); imr = IMR_PAR | IMR_PTM | IMR_PRM; if (db->type != TYPE_DM9000E) imr |= IMR_LNKCHNG; db->imr_all = imr; /* Enable TX/RX interrupt mask */ iow(db, DM9000_IMR, imr); /* Init Driver variable */ db->tx_pkt_cnt = 0; db->queue_pkt_len = 0; dev->trans_start = jiffies; } /* Our watchdog timed out. Called by the networking layer */ static void dm9000_timeout(struct net_device *dev) { board_info_t *db = netdev_priv(dev); u8 reg_save; unsigned long flags; /* Save previous register address */ spin_lock_irqsave(&db->lock, flags); reg_save = readb(db->io_addr); netif_stop_queue(dev); dm9000_reset(db); dm9000_init_dm9000(dev); /* We can accept TX packets again */ dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); /* Restore previous register address */ writeb(reg_save, db->io_addr); spin_unlock_irqrestore(&db->lock, flags); } static void dm9000_send_packet(struct net_device *dev, int ip_summed, u16 pkt_len) { board_info_t *dm = to_dm9000_board(dev); /* The DM9000 is not smart enough to leave fragmented packets alone. */ if (dm->ip_summed != ip_summed) { if (ip_summed == CHECKSUM_NONE) iow(dm, DM9000_TCCR, 0); else iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP); dm->ip_summed = ip_summed; } /* Set TX length to DM9000 */ iow(dm, DM9000_TXPLL, pkt_len); iow(dm, DM9000_TXPLH, pkt_len >> 8); /* Issue TX polling command */ iow(dm, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */ } /* * Hardware start transmission. * Send a packet to media from the upper layer. */ static int dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned long flags; board_info_t *db = netdev_priv(dev); dm9000_dbg(db, 3, "%s:\n", __func__); if (db->tx_pkt_cnt > 1) return NETDEV_TX_BUSY; spin_lock_irqsave(&db->lock, flags); /* Move data to DM9000 TX RAM */ writeb(DM9000_MWCMD, db->io_addr); (db->outblk)(db->io_data, skb->data, skb->len); dev->stats.tx_bytes += skb->len; db->tx_pkt_cnt++; /* TX control: First packet immediately send, second packet queue */ if (db->tx_pkt_cnt == 1) { dm9000_send_packet(dev, skb->ip_summed, skb->len); } else { /* Second packet */ db->queue_pkt_len = skb->len; db->queue_ip_summed = skb->ip_summed; netif_stop_queue(dev); } spin_unlock_irqrestore(&db->lock, flags); /* free this SKB */ dev_kfree_skb(skb); return NETDEV_TX_OK; } /* * DM9000 interrupt handler * receive the packet to upper layer, free the transmitted packet */ static void dm9000_tx_done(struct net_device *dev, board_info_t *db) { int tx_status = ior(db, DM9000_NSR); /* Got TX status */ if (tx_status & (NSR_TX2END | NSR_TX1END)) { /* One packet sent complete */ db->tx_pkt_cnt--; dev->stats.tx_packets++; if (netif_msg_tx_done(db)) dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status); /* Queue packet check & send */ if (db->tx_pkt_cnt > 0) dm9000_send_packet(dev, db->queue_ip_summed, db->queue_pkt_len); netif_wake_queue(dev); } } struct dm9000_rxhdr { u8 RxPktReady; u8 RxStatus; __le16 RxLen; } __packed; /* * Received a packet and pass to upper layer */ static void dm9000_rx(struct net_device *dev) { board_info_t *db = netdev_priv(dev); struct dm9000_rxhdr rxhdr; struct sk_buff *skb; u8 rxbyte, *rdptr; bool GoodPacket; int RxLen; /* Check packet ready or not */ do { ior(db, DM9000_MRCMDX); /* Dummy read */ /* Get most updated data */ rxbyte = readb(db->io_data); /* Status check: this byte must be 0 or 1 */ if (rxbyte & DM9000_PKT_ERR) { dev_warn(db->dev, "status check fail: %d\n", rxbyte); iow(db, DM9000_RCR, 0x00); /* Stop Device */ iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */ return; } if (!(rxbyte & DM9000_PKT_RDY)) return; /* A packet ready now & Get status/length */ GoodPacket = true; writeb(DM9000_MRCMD, db->io_addr); (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr)); RxLen = le16_to_cpu(rxhdr.RxLen); if (netif_msg_rx_status(db)) dev_dbg(db->dev, "RX: status %02x, length %04x\n", rxhdr.RxStatus, RxLen); /* Packet Status check */ if (RxLen < 0x40) { GoodPacket = false; if (netif_msg_rx_err(db)) dev_dbg(db->dev, "RX: Bad Packet (runt)\n"); } if (RxLen > DM9000_PKT_MAX) { dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen); } /* rxhdr.RxStatus is identical to RSR register. */ if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE | RSR_PLE | RSR_RWTO | RSR_LCS | RSR_RF)) { GoodPacket = false; if (rxhdr.RxStatus & RSR_FOE) { if (netif_msg_rx_err(db)) dev_dbg(db->dev, "fifo error\n"); dev->stats.rx_fifo_errors++; } if (rxhdr.RxStatus & RSR_CE) { if (netif_msg_rx_err(db)) dev_dbg(db->dev, "crc error\n"); dev->stats.rx_crc_errors++; } if (rxhdr.RxStatus & RSR_RF) { if (netif_msg_rx_err(db)) dev_dbg(db->dev, "length error\n"); dev->stats.rx_length_errors++; } } /* Move data from DM9000 */ if (GoodPacket && ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) { skb_reserve(skb, 2); rdptr = (u8 *) skb_put(skb, RxLen - 4); /* Read received packet from RX SRAM */ (db->inblk)(db->io_data, rdptr, RxLen); dev->stats.rx_bytes += RxLen; /* Pass to upper layer */ skb->protocol = eth_type_trans(skb, dev); if (dev->features & NETIF_F_RXCSUM) { if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); } netif_rx(skb); dev->stats.rx_packets++; } else { /* need to dump the packet's data */ (db->dumpblk)(db->io_data, RxLen); } } while (rxbyte & DM9000_PKT_RDY); } static irqreturn_t dm9000_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; board_info_t *db = netdev_priv(dev); int int_status; unsigned long flags; u8 reg_save; dm9000_dbg(db, 3, "entering %s\n", __func__); /* A real interrupt coming */ /* holders of db->lock must always block IRQs */ spin_lock_irqsave(&db->lock, flags); /* Save previous register address */ reg_save = readb(db->io_addr); /* Disable all interrupts */ iow(db, DM9000_IMR, IMR_PAR); /* Got DM9000 interrupt status */ int_status = ior(db, DM9000_ISR); /* Got ISR */ iow(db, DM9000_ISR, int_status); /* Clear ISR status */ if (netif_msg_intr(db)) dev_dbg(db->dev, "interrupt status %02x\n", int_status); /* Received the coming packet */ if (int_status & ISR_PRS) dm9000_rx(dev); /* Trnasmit Interrupt check */ if (int_status & ISR_PTS) dm9000_tx_done(dev, db); if (db->type != TYPE_DM9000E) { if (int_status & ISR_LNKCHNG) { /* fire a link-change request */ schedule_delayed_work(&db->phy_poll, 1); } } /* Re-enable interrupt mask */ iow(db, DM9000_IMR, db->imr_all); /* Restore previous register address */ writeb(reg_save, db->io_addr); spin_unlock_irqrestore(&db->lock, flags); return IRQ_HANDLED; } static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; board_info_t *db = netdev_priv(dev); unsigned long flags; unsigned nsr, wcr; spin_lock_irqsave(&db->lock, flags); nsr = ior(db, DM9000_NSR); wcr = ior(db, DM9000_WCR); dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr); if (nsr & NSR_WAKEST) { /* clear, so we can avoid */ iow(db, DM9000_NSR, NSR_WAKEST); if (wcr & WCR_LINKST) dev_info(db->dev, "wake by link status change\n"); if (wcr & WCR_SAMPLEST) dev_info(db->dev, "wake by sample packet\n"); if (wcr & WCR_MAGICST ) dev_info(db->dev, "wake by magic packet\n"); if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST))) dev_err(db->dev, "wake signalled with no reason? " "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr); } spin_unlock_irqrestore(&db->lock, flags); return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE; } #ifdef CONFIG_NET_POLL_CONTROLLER /* *Used by netconsole */ static void dm9000_poll_controller(struct net_device *dev) { disable_irq(dev->irq); dm9000_interrupt(dev->irq, dev); enable_irq(dev->irq); } #endif /* * Open the interface. * The interface is opened whenever "ifconfig" actives it. */ static int dm9000_open(struct net_device *dev) { board_info_t *db = netdev_priv(dev); unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK; if (netif_msg_ifup(db)) dev_dbg(db->dev, "enabling %s\n", dev->name); /* If there is no IRQ type specified, default to something that * may work, and tell the user that this is a problem */ if (irqflags == IRQF_TRIGGER_NONE) dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); irqflags |= IRQF_SHARED; /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ mdelay(1); /* delay needs by DM9000B */ /* Initialize DM9000 board */ dm9000_reset(db); dm9000_init_dm9000(dev); if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) return -EAGAIN; /* Init driver variable */ db->dbug_cnt = 0; mii_check_media(&db->mii, netif_msg_link(db), 1); netif_start_queue(dev); dm9000_schedule_poll(db); return 0; } /* * Sleep, either by using msleep() or if we are suspending, then * use mdelay() to sleep. */ static void dm9000_msleep(board_info_t *db, unsigned int ms) { if (db->in_suspend) mdelay(ms); else msleep(ms); } /* * Read a word from phyxcer */ static int dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) { board_info_t *db = netdev_priv(dev); unsigned long flags; unsigned int reg_save; int ret; mutex_lock(&db->addr_lock); spin_lock_irqsave(&db->lock,flags); /* Save previous register address */ reg_save = readb(db->io_addr); /* Fill the phyxcer register into REG_0C */ iow(db, DM9000_EPAR, DM9000_PHY | reg); iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */ writeb(reg_save, db->io_addr); spin_unlock_irqrestore(&db->lock,flags); dm9000_msleep(db, 1); /* Wait read complete */ spin_lock_irqsave(&db->lock,flags); reg_save = readb(db->io_addr); iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ /* The read data keeps on REG_0D & REG_0E */ ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); /* restore the previous address */ writeb(reg_save, db->io_addr); spin_unlock_irqrestore(&db->lock,flags); mutex_unlock(&db->addr_lock); dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); return ret; } /* * Write a word to phyxcer */ static void dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, int value) { board_info_t *db = netdev_priv(dev); unsigned long flags; unsigned long reg_save; dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); mutex_lock(&db->addr_lock); spin_lock_irqsave(&db->lock,flags); /* Save previous register address */ reg_save = readb(db->io_addr); /* Fill the phyxcer register into REG_0C */ iow(db, DM9000_EPAR, DM9000_PHY | reg); /* Fill the written data into REG_0D & REG_0E */ iow(db, DM9000_EPDRL, value); iow(db, DM9000_EPDRH, value >> 8); iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */ writeb(reg_save, db->io_addr); spin_unlock_irqrestore(&db->lock, flags); dm9000_msleep(db, 1); /* Wait write complete */ spin_lock_irqsave(&db->lock,flags); reg_save = readb(db->io_addr); iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ /* restore the previous address */ writeb(reg_save, db->io_addr); spin_unlock_irqrestore(&db->lock, flags); mutex_unlock(&db->addr_lock); } static void dm9000_shutdown(struct net_device *dev) { board_info_t *db = netdev_priv(dev); /* RESET device */ dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */ iow(db, DM9000_IMR, IMR_PAR); /* Disable all interrupt */ iow(db, DM9000_RCR, 0x00); /* Disable RX */ } /* * Stop the interface. * The interface is stopped when it is brought. */ static int dm9000_stop(struct net_device *ndev) { board_info_t *db = netdev_priv(ndev); if (netif_msg_ifdown(db)) dev_dbg(db->dev, "shutting down %s\n", ndev->name); cancel_delayed_work_sync(&db->phy_poll); netif_stop_queue(ndev); netif_carrier_off(ndev); /* free interrupt */ free_irq(ndev->irq, ndev); dm9000_shutdown(ndev); return 0; } static const struct net_device_ops dm9000_netdev_ops = { .ndo_open = dm9000_open, .ndo_stop = dm9000_stop, .ndo_start_xmit = dm9000_start_xmit, .ndo_tx_timeout = dm9000_timeout, .ndo_set_rx_mode = dm9000_hash_table, .ndo_do_ioctl = dm9000_ioctl, .ndo_change_mtu = eth_change_mtu, .ndo_set_features = dm9000_set_features, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = dm9000_poll_controller, #endif }; /* * Search DM9000 board, allocate space and register it */ static int __devinit dm9000_probe(struct platform_device *pdev) { struct dm9000_plat_data *pdata = pdev->dev.platform_data; struct board_info *db; /* Point a board information structure */ struct net_device *ndev; const unsigned char *mac_src; int ret = 0; int iosize; int i; u32 id_val; /* Init network device */ ndev = alloc_etherdev(sizeof(struct board_info)); if (!ndev) return -ENOMEM; SET_NETDEV_DEV(ndev, &pdev->dev); dev_dbg(&pdev->dev, "dm9000_probe()\n"); /* setup board info structure */ db = netdev_priv(ndev); db->dev = &pdev->dev; db->ndev = ndev; spin_lock_init(&db->lock); mutex_init(&db->addr_lock); INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work); db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (db->addr_res == NULL || db->data_res == NULL || db->irq_res == NULL) { dev_err(db->dev, "insufficient resources\n"); ret = -ENOENT; goto out; } db->irq_wake = platform_get_irq(pdev, 1); if (db->irq_wake >= 0) { dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake); ret = request_irq(db->irq_wake, dm9000_wol_interrupt, IRQF_SHARED, dev_name(db->dev), ndev); if (ret) { dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret); } else { /* test to see if irq is really wakeup capable */ ret = irq_set_irq_wake(db->irq_wake, 1); if (ret) { dev_err(db->dev, "irq %d cannot set wakeup (%d)\n", db->irq_wake, ret); ret = 0; } else { irq_set_irq_wake(db->irq_wake, 0); db->wake_supported = 1; } } } iosize = resource_size(db->addr_res); db->addr_req = request_mem_region(db->addr_res->start, iosize, pdev->name); if (db->addr_req == NULL) { dev_err(db->dev, "cannot claim address reg area\n"); ret = -EIO; goto out; } db->io_addr = ioremap(db->addr_res->start, iosize); if (db->io_addr == NULL) { dev_err(db->dev, "failed to ioremap address reg\n"); ret = -EINVAL; goto out; } iosize = resource_size(db->data_res); db->data_req = request_mem_region(db->data_res->start, iosize, pdev->name); if (db->data_req == NULL) { dev_err(db->dev, "cannot claim data reg area\n"); ret = -EIO; goto out; } db->io_data = ioremap(db->data_res->start, iosize); if (db->io_data == NULL) { dev_err(db->dev, "failed to ioremap data reg\n"); ret = -EINVAL; goto out; } /* fill in parameters for net-dev structure */ ndev->base_addr = (unsigned long)db->io_addr; ndev->irq = db->irq_res->start; /* ensure at least we have a default set of IO routines */ dm9000_set_io(db, iosize); /* check to see if anything is being over-ridden */ if (pdata != NULL) { /* check to see if the driver wants to over-ride the * default IO width */ if (pdata->flags & DM9000_PLATF_8BITONLY) dm9000_set_io(db, 1); if (pdata->flags & DM9000_PLATF_16BITONLY) dm9000_set_io(db, 2); if (pdata->flags & DM9000_PLATF_32BITONLY) dm9000_set_io(db, 4); /* check to see if there are any IO routine * over-rides */ if (pdata->inblk != NULL) db->inblk = pdata->inblk; if (pdata->outblk != NULL) db->outblk = pdata->outblk; if (pdata->dumpblk != NULL) db->dumpblk = pdata->dumpblk; db->flags = pdata->flags; } #ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL db->flags |= DM9000_PLATF_SIMPLE_PHY; #endif dm9000_reset(db); /* try multiple times, DM9000 sometimes gets the read wrong */ for (i = 0; i < 8; i++) { id_val = ior(db, DM9000_VIDL); id_val |= (u32)ior(db, DM9000_VIDH) << 8; id_val |= (u32)ior(db, DM9000_PIDL) << 16; id_val |= (u32)ior(db, DM9000_PIDH) << 24; if (id_val == DM9000_ID) break; dev_err(db->dev, "read wrong id 0x%08x\n", id_val); } if (id_val != DM9000_ID) { dev_err(db->dev, "wrong id: 0x%08x\n", id_val); ret = -ENODEV; goto out; } /* Identify what type of DM9000 we are working on */ id_val = ior(db, DM9000_CHIPR); dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val); switch (id_val) { case CHIPR_DM9000A: db->type = TYPE_DM9000A; break; case CHIPR_DM9000B: db->type = TYPE_DM9000B; break; default: dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val); db->type = TYPE_DM9000E; } /* dm9000a/b are capable of hardware checksum offload */ if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) { ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM; ndev->features |= ndev->hw_features; } /* from this point we assume that we have found a DM9000 */ /* driver system function */ ether_setup(ndev); ndev->netdev_ops = &dm9000_netdev_ops; ndev->watchdog_timeo = msecs_to_jiffies(watchdog); ndev->ethtool_ops = &dm9000_ethtool_ops; db->msg_enable = NETIF_MSG_LINK; db->mii.phy_id_mask = 0x1f; db->mii.reg_num_mask = 0x1f; db->mii.force_media = 0; db->mii.full_duplex = 0; db->mii.dev = ndev; db->mii.mdio_read = dm9000_phy_read; db->mii.mdio_write = dm9000_phy_write; mac_src = "eeprom"; /* try reading the node address from the attached EEPROM */ for (i = 0; i < 6; i += 2) dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i); if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) { mac_src = "platform data"; memcpy(ndev->dev_addr, pdata->dev_addr, 6); } if (!is_valid_ether_addr(ndev->dev_addr)) { /* try reading from mac */ mac_src = "chip"; for (i = 0; i < 6; i++) ndev->dev_addr[i] = ior(db, i+DM9000_PAR); } if (!is_valid_ether_addr(ndev->dev_addr)) { dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please " "set using ifconfig\n", ndev->name); eth_hw_addr_random(ndev); mac_src = "random"; } platform_set_drvdata(pdev, ndev); ret = register_netdev(ndev); if (ret == 0) printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n", ndev->name, dm9000_type_to_char(db->type), db->io_addr, db->io_data, ndev->irq, ndev->dev_addr, mac_src); return 0; out: dev_err(db->dev, "not found (%d).\n", ret); dm9000_release_board(pdev, db); free_netdev(ndev); return ret; } static int dm9000_drv_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct net_device *ndev = platform_get_drvdata(pdev); board_info_t *db; if (ndev) { db = netdev_priv(ndev); db->in_suspend = 1; if (!netif_running(ndev)) return 0; netif_device_detach(ndev); /* only shutdown if not using WoL */ if (!db->wake_state) dm9000_shutdown(ndev); } return 0; } static int dm9000_drv_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct net_device *ndev = platform_get_drvdata(pdev); board_info_t *db = netdev_priv(ndev); if (ndev) { if (netif_running(ndev)) { /* reset if we were not in wake mode to ensure if * the device was powered off it is in a known state */ if (!db->wake_state) { dm9000_reset(db); dm9000_init_dm9000(ndev); } netif_device_attach(ndev); } db->in_suspend = 0; } return 0; } static const struct dev_pm_ops dm9000_drv_pm_ops = { .suspend = dm9000_drv_suspend, .resume = dm9000_drv_resume, }; static int __devexit dm9000_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); unregister_netdev(ndev); dm9000_release_board(pdev, netdev_priv(ndev)); free_netdev(ndev); /* free device structure */ dev_dbg(&pdev->dev, "released and freed device\n"); return 0; } static struct platform_driver dm9000_driver = { .driver = { .name = "dm9000", .owner = THIS_MODULE, .pm = &dm9000_drv_pm_ops, }, .probe = dm9000_probe, .remove = __devexit_p(dm9000_drv_remove), }; static int __init dm9000_init(void) { printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION); return platform_driver_register(&dm9000_driver); } static void __exit dm9000_cleanup(void) { platform_driver_unregister(&dm9000_driver); } module_init(dm9000_init); module_exit(dm9000_cleanup); MODULE_AUTHOR("Sascha Hauer, Ben Dooks"); MODULE_DESCRIPTION("Davicom DM9000 network driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:dm9000");
gpl-2.0
argentinos/kernel-s5pc100
drivers/rtc/rtc-vt8500.c
3495
8998
/* * drivers/rtc/rtc-vt8500.c * * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com> * * Based on rtc-pxa.c * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/rtc.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/bcd.h> #include <linux/platform_device.h> #include <linux/slab.h> /* * Register definitions */ #define VT8500_RTC_TS 0x00 /* Time set */ #define VT8500_RTC_DS 0x04 /* Date set */ #define VT8500_RTC_AS 0x08 /* Alarm set */ #define VT8500_RTC_CR 0x0c /* Control */ #define VT8500_RTC_TR 0x10 /* Time read */ #define VT8500_RTC_DR 0x14 /* Date read */ #define VT8500_RTC_WS 0x18 /* Write status */ #define VT8500_RTC_CL 0x20 /* Calibration */ #define VT8500_RTC_IS 0x24 /* Interrupt status */ #define VT8500_RTC_ST 0x28 /* Status */ #define INVALID_TIME_BIT (1 << 31) #define DATE_CENTURY_S 19 #define DATE_YEAR_S 11 #define DATE_YEAR_MASK (0xff << DATE_YEAR_S) #define DATE_MONTH_S 6 #define DATE_MONTH_MASK (0x1f << DATE_MONTH_S) #define DATE_DAY_MASK 0x3f #define TIME_DOW_S 20 #define TIME_DOW_MASK (0x07 << TIME_DOW_S) #define TIME_HOUR_S 14 #define TIME_HOUR_MASK (0x3f << TIME_HOUR_S) #define TIME_MIN_S 7 #define TIME_MIN_MASK (0x7f << TIME_MIN_S) #define TIME_SEC_MASK 0x7f #define ALARM_DAY_S 20 #define ALARM_DAY_MASK (0x3f << ALARM_DAY_S) #define ALARM_DAY_BIT (1 << 29) #define ALARM_HOUR_BIT (1 << 28) #define ALARM_MIN_BIT (1 << 27) #define ALARM_SEC_BIT (1 << 26) #define ALARM_ENABLE_MASK (ALARM_DAY_BIT \ | ALARM_HOUR_BIT \ | ALARM_MIN_BIT \ | ALARM_SEC_BIT) #define VT8500_RTC_CR_ENABLE (1 << 0) /* Enable RTC */ #define VT8500_RTC_CR_24H (1 << 1) /* 24h time format */ #define VT8500_RTC_CR_SM_ENABLE (1 << 2) /* Enable periodic irqs */ #define VT8500_RTC_CR_SM_SEC (1 << 3) /* 0: 1Hz/60, 1: 1Hz */ #define VT8500_RTC_CR_CALIB (1 << 4) /* Enable calibration */ #define VT8500_RTC_IS_ALARM (1 << 0) /* Alarm interrupt status */ struct vt8500_rtc { void __iomem *regbase; struct resource *res; int irq_alarm; struct rtc_device *rtc; spinlock_t lock; /* Protects this structure */ }; static irqreturn_t vt8500_rtc_irq(int irq, void *dev_id) { struct vt8500_rtc *vt8500_rtc = dev_id; u32 isr; unsigned long events = 0; spin_lock(&vt8500_rtc->lock); /* clear interrupt sources */ isr = readl(vt8500_rtc->regbase + VT8500_RTC_IS); writel(isr, vt8500_rtc->regbase + VT8500_RTC_IS); spin_unlock(&vt8500_rtc->lock); if (isr & VT8500_RTC_IS_ALARM) events |= RTC_AF | RTC_IRQF; rtc_update_irq(vt8500_rtc->rtc, 1, events); return IRQ_HANDLED; } static int vt8500_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev); u32 date, time; date = readl(vt8500_rtc->regbase + VT8500_RTC_DR); time = readl(vt8500_rtc->regbase + VT8500_RTC_TR); tm->tm_sec = bcd2bin(time & TIME_SEC_MASK); tm->tm_min = bcd2bin((time & TIME_MIN_MASK) >> TIME_MIN_S); tm->tm_hour = bcd2bin((time & TIME_HOUR_MASK) >> TIME_HOUR_S); tm->tm_mday = bcd2bin(date & DATE_DAY_MASK); tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S); tm->tm_year = bcd2bin((date & DATE_YEAR_MASK) >> DATE_YEAR_S) + ((date >> DATE_CENTURY_S) & 1 ? 200 : 100); tm->tm_wday = (time & TIME_DOW_MASK) >> TIME_DOW_S; return 0; } static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev); if (tm->tm_year < 100) { dev_warn(dev, "Only years 2000-2199 are supported by the " "hardware!\n"); return -EINVAL; } writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S) | (bin2bcd(tm->tm_mon) << DATE_MONTH_S) | (bin2bcd(tm->tm_mday)), vt8500_rtc->regbase + VT8500_RTC_DS); writel((bin2bcd(tm->tm_wday) << TIME_DOW_S) | (bin2bcd(tm->tm_hour) << TIME_HOUR_S) | (bin2bcd(tm->tm_min) << TIME_MIN_S) | (bin2bcd(tm->tm_sec)), vt8500_rtc->regbase + VT8500_RTC_TS); return 0; } static int vt8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev); u32 isr, alarm; alarm = readl(vt8500_rtc->regbase + VT8500_RTC_AS); isr = readl(vt8500_rtc->regbase + VT8500_RTC_IS); alrm->time.tm_mday = bcd2bin((alarm & ALARM_DAY_MASK) >> ALARM_DAY_S); alrm->time.tm_hour = bcd2bin((alarm & TIME_HOUR_MASK) >> TIME_HOUR_S); alrm->time.tm_min = bcd2bin((alarm & TIME_MIN_MASK) >> TIME_MIN_S); alrm->time.tm_sec = bcd2bin((alarm & TIME_SEC_MASK)); alrm->enabled = (alarm & ALARM_ENABLE_MASK) ? 1 : 0; alrm->pending = (isr & VT8500_RTC_IS_ALARM) ? 1 : 0; return rtc_valid_tm(&alrm->time); } static int vt8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev); writel((alrm->enabled ? ALARM_ENABLE_MASK : 0) | (bin2bcd(alrm->time.tm_mday) << ALARM_DAY_S) | (bin2bcd(alrm->time.tm_hour) << TIME_HOUR_S) | (bin2bcd(alrm->time.tm_min) << TIME_MIN_S) | (bin2bcd(alrm->time.tm_sec)), vt8500_rtc->regbase + VT8500_RTC_AS); return 0; } static int vt8500_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev); unsigned long tmp = readl(vt8500_rtc->regbase + VT8500_RTC_AS); if (enabled) tmp |= ALARM_ENABLE_MASK; else tmp &= ~ALARM_ENABLE_MASK; writel(tmp, vt8500_rtc->regbase + VT8500_RTC_AS); return 0; } static const struct rtc_class_ops vt8500_rtc_ops = { .read_time = vt8500_rtc_read_time, .set_time = vt8500_rtc_set_time, .read_alarm = vt8500_rtc_read_alarm, .set_alarm = vt8500_rtc_set_alarm, .alarm_irq_enable = vt8500_alarm_irq_enable, }; static int __devinit vt8500_rtc_probe(struct platform_device *pdev) { struct vt8500_rtc *vt8500_rtc; int ret; vt8500_rtc = kzalloc(sizeof(struct vt8500_rtc), GFP_KERNEL); if (!vt8500_rtc) return -ENOMEM; spin_lock_init(&vt8500_rtc->lock); platform_set_drvdata(pdev, vt8500_rtc); vt8500_rtc->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!vt8500_rtc->res) { dev_err(&pdev->dev, "No I/O memory resource defined\n"); ret = -ENXIO; goto err_free; } vt8500_rtc->irq_alarm = platform_get_irq(pdev, 0); if (vt8500_rtc->irq_alarm < 0) { dev_err(&pdev->dev, "No alarm IRQ resource defined\n"); ret = -ENXIO; goto err_free; } vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start, resource_size(vt8500_rtc->res), "vt8500-rtc"); if (vt8500_rtc->res == NULL) { dev_err(&pdev->dev, "failed to request I/O memory\n"); ret = -EBUSY; goto err_free; } vt8500_rtc->regbase = ioremap(vt8500_rtc->res->start, resource_size(vt8500_rtc->res)); if (!vt8500_rtc->regbase) { dev_err(&pdev->dev, "Unable to map RTC I/O memory\n"); ret = -EBUSY; goto err_release; } /* Enable RTC and set it to 24-hour mode */ writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H, vt8500_rtc->regbase + VT8500_RTC_CR); vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev, &vt8500_rtc_ops, THIS_MODULE); if (IS_ERR(vt8500_rtc->rtc)) { ret = PTR_ERR(vt8500_rtc->rtc); dev_err(&pdev->dev, "Failed to register RTC device -> %d\n", ret); goto err_unmap; } ret = request_irq(vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0, "rtc alarm", vt8500_rtc); if (ret < 0) { dev_err(&pdev->dev, "can't get irq %i, err %d\n", vt8500_rtc->irq_alarm, ret); goto err_unreg; } return 0; err_unreg: rtc_device_unregister(vt8500_rtc->rtc); err_unmap: iounmap(vt8500_rtc->regbase); err_release: release_mem_region(vt8500_rtc->res->start, resource_size(vt8500_rtc->res)); err_free: kfree(vt8500_rtc); return ret; } static int __devexit vt8500_rtc_remove(struct platform_device *pdev) { struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev); free_irq(vt8500_rtc->irq_alarm, vt8500_rtc); rtc_device_unregister(vt8500_rtc->rtc); /* Disable alarm matching */ writel(0, vt8500_rtc->regbase + VT8500_RTC_IS); iounmap(vt8500_rtc->regbase); release_mem_region(vt8500_rtc->res->start, resource_size(vt8500_rtc->res)); kfree(vt8500_rtc); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver vt8500_rtc_driver = { .probe = vt8500_rtc_probe, .remove = __devexit_p(vt8500_rtc_remove), .driver = { .name = "vt8500-rtc", .owner = THIS_MODULE, }, }; module_platform_driver(vt8500_rtc_driver); MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>"); MODULE_DESCRIPTION("VIA VT8500 SoC Realtime Clock Driver (RTC)"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:vt8500-rtc");
gpl-2.0
dh-harald/android_kernel_samsung_codina
arch/powerpc/sysdev/ppc4xx_soc.c
3751
6114
/* * IBM/AMCC PPC4xx SoC setup code * * Copyright 2008 DENX Software Engineering, Stefan Roese <sr@denx.de> * * L2 cache routines cloned from arch/ppc/syslib/ibm440gx_common.c which is: * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * Copyright (c) 2003 - 2006 Zultys Technologies * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/of_platform.h> #include <asm/dcr.h> #include <asm/dcr-regs.h> #include <asm/reg.h> static u32 dcrbase_l2c; /* * L2-cache */ /* Issue L2C diagnostic command */ static inline u32 l2c_diag(u32 addr) { mtdcr(dcrbase_l2c + DCRN_L2C0_ADDR, addr); mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_DIAG); while (!(mfdcr(dcrbase_l2c + DCRN_L2C0_SR) & L2C_SR_CC)) ; return mfdcr(dcrbase_l2c + DCRN_L2C0_DATA); } static irqreturn_t l2c_error_handler(int irq, void *dev) { u32 sr = mfdcr(dcrbase_l2c + DCRN_L2C0_SR); if (sr & L2C_SR_CPE) { /* Read cache trapped address */ u32 addr = l2c_diag(0x42000000); printk(KERN_EMERG "L2C: Cache Parity Error, addr[16:26] = 0x%08x\n", addr); } if (sr & L2C_SR_TPE) { /* Read tag trapped address */ u32 addr = l2c_diag(0x82000000) >> 16; printk(KERN_EMERG "L2C: Tag Parity Error, addr[16:26] = 0x%08x\n", addr); } /* Clear parity errors */ if (sr & (L2C_SR_CPE | L2C_SR_TPE)){ mtdcr(dcrbase_l2c + DCRN_L2C0_ADDR, 0); mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_CCP | L2C_CMD_CTE); } else { printk(KERN_EMERG "L2C: LRU error\n"); } return IRQ_HANDLED; } static int __init ppc4xx_l2c_probe(void) { struct device_node *np; u32 r; unsigned long flags; int irq; const u32 *dcrreg; u32 dcrbase_isram; int len; const u32 *prop; u32 l2_size; np = of_find_compatible_node(NULL, NULL, "ibm,l2-cache"); if (!np) return 0; /* Get l2 cache size */ prop = of_get_property(np, "cache-size", NULL); if (prop == NULL) { printk(KERN_ERR "%s: Can't get cache-size!\n", np->full_name); of_node_put(np); return -ENODEV; } l2_size = prop[0]; /* Map DCRs */ dcrreg = of_get_property(np, "dcr-reg", &len); if (!dcrreg || (len != 4 * sizeof(u32))) { printk(KERN_ERR "%s: Can't get DCR register base !", np->full_name); of_node_put(np); return -ENODEV; } dcrbase_isram = dcrreg[0]; dcrbase_l2c = dcrreg[2]; /* Get and map irq number from device tree */ irq = irq_of_parse_and_map(np, 0); if (irq == NO_IRQ) { printk(KERN_ERR "irq_of_parse_and_map failed\n"); of_node_put(np); return -ENODEV; } /* Install error handler */ if (request_irq(irq, l2c_error_handler, IRQF_DISABLED, "L2C", 0) < 0) { printk(KERN_ERR "Cannot install L2C error handler" ", cache is not enabled\n"); of_node_put(np); return -ENODEV; } local_irq_save(flags); asm volatile ("sync" ::: "memory"); /* Disable SRAM */ mtdcr(dcrbase_isram + DCRN_SRAM0_DPC, mfdcr(dcrbase_isram + DCRN_SRAM0_DPC) & ~SRAM_DPC_ENABLE); mtdcr(dcrbase_isram + DCRN_SRAM0_SB0CR, mfdcr(dcrbase_isram + DCRN_SRAM0_SB0CR) & ~SRAM_SBCR_BU_MASK); mtdcr(dcrbase_isram + DCRN_SRAM0_SB1CR, mfdcr(dcrbase_isram + DCRN_SRAM0_SB1CR) & ~SRAM_SBCR_BU_MASK); mtdcr(dcrbase_isram + DCRN_SRAM0_SB2CR, mfdcr(dcrbase_isram + DCRN_SRAM0_SB2CR) & ~SRAM_SBCR_BU_MASK); mtdcr(dcrbase_isram + DCRN_SRAM0_SB3CR, mfdcr(dcrbase_isram + DCRN_SRAM0_SB3CR) & ~SRAM_SBCR_BU_MASK); /* Enable L2_MODE without ICU/DCU */ r = mfdcr(dcrbase_l2c + DCRN_L2C0_CFG) & ~(L2C_CFG_ICU | L2C_CFG_DCU | L2C_CFG_SS_MASK); r |= L2C_CFG_L2M | L2C_CFG_SS_256; mtdcr(dcrbase_l2c + DCRN_L2C0_CFG, r); mtdcr(dcrbase_l2c + DCRN_L2C0_ADDR, 0); /* Hardware Clear Command */ mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_HCC); while (!(mfdcr(dcrbase_l2c + DCRN_L2C0_SR) & L2C_SR_CC)) ; /* Clear Cache Parity and Tag Errors */ mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_CCP | L2C_CMD_CTE); /* Enable 64G snoop region starting at 0 */ r = mfdcr(dcrbase_l2c + DCRN_L2C0_SNP0) & ~(L2C_SNP_BA_MASK | L2C_SNP_SSR_MASK); r |= L2C_SNP_SSR_32G | L2C_SNP_ESR; mtdcr(dcrbase_l2c + DCRN_L2C0_SNP0, r); r = mfdcr(dcrbase_l2c + DCRN_L2C0_SNP1) & ~(L2C_SNP_BA_MASK | L2C_SNP_SSR_MASK); r |= 0x80000000 | L2C_SNP_SSR_32G | L2C_SNP_ESR; mtdcr(dcrbase_l2c + DCRN_L2C0_SNP1, r); asm volatile ("sync" ::: "memory"); /* Enable ICU/DCU ports */ r = mfdcr(dcrbase_l2c + DCRN_L2C0_CFG); r &= ~(L2C_CFG_DCW_MASK | L2C_CFG_PMUX_MASK | L2C_CFG_PMIM | L2C_CFG_TPEI | L2C_CFG_CPEI | L2C_CFG_NAM | L2C_CFG_NBRM); r |= L2C_CFG_ICU | L2C_CFG_DCU | L2C_CFG_TPC | L2C_CFG_CPC | L2C_CFG_FRAN | L2C_CFG_CPIM | L2C_CFG_TPIM | L2C_CFG_LIM | L2C_CFG_SMCM; /* Check for 460EX/GT special handling */ if (of_device_is_compatible(np, "ibm,l2-cache-460ex") || of_device_is_compatible(np, "ibm,l2-cache-460gt")) r |= L2C_CFG_RDBW; mtdcr(dcrbase_l2c + DCRN_L2C0_CFG, r); asm volatile ("sync; isync" ::: "memory"); local_irq_restore(flags); printk(KERN_INFO "%dk L2-cache enabled\n", l2_size >> 10); of_node_put(np); return 0; } arch_initcall(ppc4xx_l2c_probe); /* * Apply a system reset. Alternatively a board specific value may be * provided via the "reset-type" property in the cpu node. */ void ppc4xx_reset_system(char *cmd) { struct device_node *np; u32 reset_type = DBCR0_RST_SYSTEM; const u32 *prop; np = of_find_node_by_type(NULL, "cpu"); if (np) { prop = of_get_property(np, "reset-type", NULL); /* * Check if property exists and if it is in range: * 1 - PPC4xx core reset * 2 - PPC4xx chip reset * 3 - PPC4xx system reset (default) */ if ((prop) && ((prop[0] >= 1) && (prop[0] <= 3))) reset_type = prop[0] << 28; } mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | reset_type); while (1) ; /* Just in case the reset doesn't work */ }
gpl-2.0
SlimKat-U8950/chil360-kernel
drivers/gpu/drm/ttm/ttm_lock.c
6055
7397
/************************************************************************** * * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ /* * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ #include "ttm/ttm_lock.h" #include "ttm/ttm_module.h" #include <linux/atomic.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/module.h> #define TTM_WRITE_LOCK_PENDING (1 << 0) #define TTM_VT_LOCK_PENDING (1 << 1) #define TTM_SUSPEND_LOCK_PENDING (1 << 2) #define TTM_VT_LOCK (1 << 3) #define TTM_SUSPEND_LOCK (1 << 4) void ttm_lock_init(struct ttm_lock *lock) { spin_lock_init(&lock->lock); init_waitqueue_head(&lock->queue); lock->rw = 0; lock->flags = 0; lock->kill_takers = false; lock->signal = SIGKILL; } EXPORT_SYMBOL(ttm_lock_init); void ttm_read_unlock(struct ttm_lock *lock) { spin_lock(&lock->lock); if (--lock->rw == 0) wake_up_all(&lock->queue); spin_unlock(&lock->lock); } EXPORT_SYMBOL(ttm_read_unlock); static bool __ttm_read_lock(struct ttm_lock *lock) { bool locked = false; spin_lock(&lock->lock); if (unlikely(lock->kill_takers)) { send_sig(lock->signal, current, 0); spin_unlock(&lock->lock); return false; } if (lock->rw >= 0 && lock->flags == 0) { ++lock->rw; locked = true; } spin_unlock(&lock->lock); return locked; } int ttm_read_lock(struct ttm_lock *lock, bool interruptible) { int ret = 0; if (interruptible) ret = wait_event_interruptible(lock->queue, __ttm_read_lock(lock)); else wait_event(lock->queue, __ttm_read_lock(lock)); return ret; } EXPORT_SYMBOL(ttm_read_lock); static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked) { bool block = true; *locked = false; spin_lock(&lock->lock); if (unlikely(lock->kill_takers)) { send_sig(lock->signal, current, 0); spin_unlock(&lock->lock); return false; } if (lock->rw >= 0 && lock->flags == 0) { ++lock->rw; block = false; *locked = true; } else if (lock->flags == 0) { block = false; } spin_unlock(&lock->lock); return !block; } int ttm_read_trylock(struct ttm_lock *lock, bool interruptible) { int ret = 0; bool locked; if (interruptible) ret = wait_event_interruptible (lock->queue, __ttm_read_trylock(lock, &locked)); else wait_event(lock->queue, __ttm_read_trylock(lock, &locked)); if (unlikely(ret != 0)) { BUG_ON(locked); return ret; } return (locked) ? 0 : -EBUSY; } void ttm_write_unlock(struct ttm_lock *lock) { spin_lock(&lock->lock); lock->rw = 0; wake_up_all(&lock->queue); spin_unlock(&lock->lock); } EXPORT_SYMBOL(ttm_write_unlock); static bool __ttm_write_lock(struct ttm_lock *lock) { bool locked = false; spin_lock(&lock->lock); if (unlikely(lock->kill_takers)) { send_sig(lock->signal, current, 0); spin_unlock(&lock->lock); return false; } if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) { lock->rw = -1; lock->flags &= ~TTM_WRITE_LOCK_PENDING; locked = true; } else { lock->flags |= TTM_WRITE_LOCK_PENDING; } spin_unlock(&lock->lock); return locked; } int ttm_write_lock(struct ttm_lock *lock, bool interruptible) { int ret = 0; if (interruptible) { ret = wait_event_interruptible(lock->queue, __ttm_write_lock(lock)); if (unlikely(ret != 0)) { spin_lock(&lock->lock); lock->flags &= ~TTM_WRITE_LOCK_PENDING; wake_up_all(&lock->queue); spin_unlock(&lock->lock); } } else wait_event(lock->queue, __ttm_read_lock(lock)); return ret; } EXPORT_SYMBOL(ttm_write_lock); void ttm_write_lock_downgrade(struct ttm_lock *lock) { spin_lock(&lock->lock); lock->rw = 1; wake_up_all(&lock->queue); spin_unlock(&lock->lock); } static int __ttm_vt_unlock(struct ttm_lock *lock) { int ret = 0; spin_lock(&lock->lock); if (unlikely(!(lock->flags & TTM_VT_LOCK))) ret = -EINVAL; lock->flags &= ~TTM_VT_LOCK; wake_up_all(&lock->queue); spin_unlock(&lock->lock); return ret; } static void ttm_vt_lock_remove(struct ttm_base_object **p_base) { struct ttm_base_object *base = *p_base; struct ttm_lock *lock = container_of(base, struct ttm_lock, base); int ret; *p_base = NULL; ret = __ttm_vt_unlock(lock); BUG_ON(ret != 0); } static bool __ttm_vt_lock(struct ttm_lock *lock) { bool locked = false; spin_lock(&lock->lock); if (lock->rw == 0) { lock->flags &= ~TTM_VT_LOCK_PENDING; lock->flags |= TTM_VT_LOCK; locked = true; } else { lock->flags |= TTM_VT_LOCK_PENDING; } spin_unlock(&lock->lock); return locked; } int ttm_vt_lock(struct ttm_lock *lock, bool interruptible, struct ttm_object_file *tfile) { int ret = 0; if (interruptible) { ret = wait_event_interruptible(lock->queue, __ttm_vt_lock(lock)); if (unlikely(ret != 0)) { spin_lock(&lock->lock); lock->flags &= ~TTM_VT_LOCK_PENDING; wake_up_all(&lock->queue); spin_unlock(&lock->lock); return ret; } } else wait_event(lock->queue, __ttm_vt_lock(lock)); /* * Add a base-object, the destructor of which will * make sure the lock is released if the client dies * while holding it. */ ret = ttm_base_object_init(tfile, &lock->base, false, ttm_lock_type, &ttm_vt_lock_remove, NULL); if (ret) (void)__ttm_vt_unlock(lock); else lock->vt_holder = tfile; return ret; } EXPORT_SYMBOL(ttm_vt_lock); int ttm_vt_unlock(struct ttm_lock *lock) { return ttm_ref_object_base_unref(lock->vt_holder, lock->base.hash.key, TTM_REF_USAGE); } EXPORT_SYMBOL(ttm_vt_unlock); void ttm_suspend_unlock(struct ttm_lock *lock) { spin_lock(&lock->lock); lock->flags &= ~TTM_SUSPEND_LOCK; wake_up_all(&lock->queue); spin_unlock(&lock->lock); } EXPORT_SYMBOL(ttm_suspend_unlock); static bool __ttm_suspend_lock(struct ttm_lock *lock) { bool locked = false; spin_lock(&lock->lock); if (lock->rw == 0) { lock->flags &= ~TTM_SUSPEND_LOCK_PENDING; lock->flags |= TTM_SUSPEND_LOCK; locked = true; } else { lock->flags |= TTM_SUSPEND_LOCK_PENDING; } spin_unlock(&lock->lock); return locked; } void ttm_suspend_lock(struct ttm_lock *lock) { wait_event(lock->queue, __ttm_suspend_lock(lock)); } EXPORT_SYMBOL(ttm_suspend_lock);
gpl-2.0
FXITech/kernel
fs/notify/dnotify/dnotify.c
8359
11681
/* * Directory notifications for Linux. * * Copyright (C) 2000,2001,2002 Stephen Rothwell * * Copyright (C) 2009 Eric Paris <Red Hat Inc> * dnotify was largly rewritten to use the new fsnotify infrastructure * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/fs.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/dnotify.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/fdtable.h> #include <linux/fsnotify_backend.h> int dir_notify_enable __read_mostly = 1; static struct kmem_cache *dnotify_struct_cache __read_mostly; static struct kmem_cache *dnotify_mark_cache __read_mostly; static struct fsnotify_group *dnotify_group __read_mostly; static DEFINE_MUTEX(dnotify_mark_mutex); /* * dnotify will attach one of these to each inode (i_fsnotify_marks) which * is being watched by dnotify. If multiple userspace applications are watching * the same directory with dnotify their information is chained in dn */ struct dnotify_mark { struct fsnotify_mark fsn_mark; struct dnotify_struct *dn; }; /* * When a process starts or stops watching an inode the set of events which * dnotify cares about for that inode may change. This function runs the * list of everything receiving dnotify events about this directory and calculates * the set of all those events. After it updates what dnotify is interested in * it calls the fsnotify function so it can update the set of all events relevant * to this inode. */ static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark) { __u32 new_mask, old_mask; struct dnotify_struct *dn; struct dnotify_mark *dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); assert_spin_locked(&fsn_mark->lock); old_mask = fsn_mark->mask; new_mask = 0; for (dn = dn_mark->dn; dn != NULL; dn = dn->dn_next) new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT); fsnotify_set_mark_mask_locked(fsn_mark, new_mask); if (old_mask == new_mask) return; if (fsn_mark->i.inode) fsnotify_recalc_inode_mask(fsn_mark->i.inode); } /* * Mains fsnotify call where events are delivered to dnotify. * Find the dnotify mark on the relevant inode, run the list of dnotify structs * on that mark and determine which of them has expressed interest in receiving * events of this type. When found send the correct process and signal and * destroy the dnotify struct if it was not registered to receive multiple * events. */ static int dnotify_handle_event(struct fsnotify_group *group, struct fsnotify_mark *inode_mark, struct fsnotify_mark *vfsmount_mark, struct fsnotify_event *event) { struct dnotify_mark *dn_mark; struct inode *to_tell; struct dnotify_struct *dn; struct dnotify_struct **prev; struct fown_struct *fown; __u32 test_mask = event->mask & ~FS_EVENT_ON_CHILD; BUG_ON(vfsmount_mark); to_tell = event->to_tell; dn_mark = container_of(inode_mark, struct dnotify_mark, fsn_mark); spin_lock(&inode_mark->lock); prev = &dn_mark->dn; while ((dn = *prev) != NULL) { if ((dn->dn_mask & test_mask) == 0) { prev = &dn->dn_next; continue; } fown = &dn->dn_filp->f_owner; send_sigio(fown, dn->dn_fd, POLL_MSG); if (dn->dn_mask & FS_DN_MULTISHOT) prev = &dn->dn_next; else { *prev = dn->dn_next; kmem_cache_free(dnotify_struct_cache, dn); dnotify_recalc_inode_mask(inode_mark); } } spin_unlock(&inode_mark->lock); return 0; } /* * Given an inode and mask determine if dnotify would be interested in sending * userspace notification for that pair. */ static bool dnotify_should_send_event(struct fsnotify_group *group, struct inode *inode, struct fsnotify_mark *inode_mark, struct fsnotify_mark *vfsmount_mark, __u32 mask, void *data, int data_type) { /* not a dir, dnotify doesn't care */ if (!S_ISDIR(inode->i_mode)) return false; return true; } static void dnotify_free_mark(struct fsnotify_mark *fsn_mark) { struct dnotify_mark *dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); BUG_ON(dn_mark->dn); kmem_cache_free(dnotify_mark_cache, dn_mark); } static struct fsnotify_ops dnotify_fsnotify_ops = { .handle_event = dnotify_handle_event, .should_send_event = dnotify_should_send_event, .free_group_priv = NULL, .freeing_mark = NULL, .free_event_priv = NULL, }; /* * Called every time a file is closed. Looks first for a dnotify mark on the * inode. If one is found run all of the ->dn structures attached to that * mark for one relevant to this process closing the file and remove that * dnotify_struct. If that was the last dnotify_struct also remove the * fsnotify_mark. */ void dnotify_flush(struct file *filp, fl_owner_t id) { struct fsnotify_mark *fsn_mark; struct dnotify_mark *dn_mark; struct dnotify_struct *dn; struct dnotify_struct **prev; struct inode *inode; inode = filp->f_path.dentry->d_inode; if (!S_ISDIR(inode->i_mode)) return; fsn_mark = fsnotify_find_inode_mark(dnotify_group, inode); if (!fsn_mark) return; dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); mutex_lock(&dnotify_mark_mutex); spin_lock(&fsn_mark->lock); prev = &dn_mark->dn; while ((dn = *prev) != NULL) { if ((dn->dn_owner == id) && (dn->dn_filp == filp)) { *prev = dn->dn_next; kmem_cache_free(dnotify_struct_cache, dn); dnotify_recalc_inode_mask(fsn_mark); break; } prev = &dn->dn_next; } spin_unlock(&fsn_mark->lock); /* nothing else could have found us thanks to the dnotify_mark_mutex */ if (dn_mark->dn == NULL) fsnotify_destroy_mark(fsn_mark); mutex_unlock(&dnotify_mark_mutex); fsnotify_put_mark(fsn_mark); } /* this conversion is done only at watch creation */ static __u32 convert_arg(unsigned long arg) { __u32 new_mask = FS_EVENT_ON_CHILD; if (arg & DN_MULTISHOT) new_mask |= FS_DN_MULTISHOT; if (arg & DN_DELETE) new_mask |= (FS_DELETE | FS_MOVED_FROM); if (arg & DN_MODIFY) new_mask |= FS_MODIFY; if (arg & DN_ACCESS) new_mask |= FS_ACCESS; if (arg & DN_ATTRIB) new_mask |= FS_ATTRIB; if (arg & DN_RENAME) new_mask |= FS_DN_RENAME; if (arg & DN_CREATE) new_mask |= (FS_CREATE | FS_MOVED_TO); return new_mask; } /* * If multiple processes watch the same inode with dnotify there is only one * dnotify mark in inode->i_fsnotify_marks but we chain a dnotify_struct * onto that mark. This function either attaches the new dnotify_struct onto * that list, or it |= the mask onto an existing dnofiy_struct. */ static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark *dn_mark, fl_owner_t id, int fd, struct file *filp, __u32 mask) { struct dnotify_struct *odn; odn = dn_mark->dn; while (odn != NULL) { /* adding more events to existing dnofiy_struct? */ if ((odn->dn_owner == id) && (odn->dn_filp == filp)) { odn->dn_fd = fd; odn->dn_mask |= mask; return -EEXIST; } odn = odn->dn_next; } dn->dn_mask = mask; dn->dn_fd = fd; dn->dn_filp = filp; dn->dn_owner = id; dn->dn_next = dn_mark->dn; dn_mark->dn = dn; return 0; } /* * When a process calls fcntl to attach a dnotify watch to a directory it ends * up here. Allocate both a mark for fsnotify to add and a dnotify_struct to be * attached to the fsnotify_mark. */ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) { struct dnotify_mark *new_dn_mark, *dn_mark; struct fsnotify_mark *new_fsn_mark, *fsn_mark; struct dnotify_struct *dn; struct inode *inode; fl_owner_t id = current->files; struct file *f; int destroy = 0, error = 0; __u32 mask; /* we use these to tell if we need to kfree */ new_fsn_mark = NULL; dn = NULL; if (!dir_notify_enable) { error = -EINVAL; goto out_err; } /* a 0 mask means we are explicitly removing the watch */ if ((arg & ~DN_MULTISHOT) == 0) { dnotify_flush(filp, id); error = 0; goto out_err; } /* dnotify only works on directories */ inode = filp->f_path.dentry->d_inode; if (!S_ISDIR(inode->i_mode)) { error = -ENOTDIR; goto out_err; } /* expect most fcntl to add new rather than augment old */ dn = kmem_cache_alloc(dnotify_struct_cache, GFP_KERNEL); if (!dn) { error = -ENOMEM; goto out_err; } /* new fsnotify mark, we expect most fcntl calls to add a new mark */ new_dn_mark = kmem_cache_alloc(dnotify_mark_cache, GFP_KERNEL); if (!new_dn_mark) { error = -ENOMEM; goto out_err; } /* convert the userspace DN_* "arg" to the internal FS_* defines in fsnotify */ mask = convert_arg(arg); /* set up the new_fsn_mark and new_dn_mark */ new_fsn_mark = &new_dn_mark->fsn_mark; fsnotify_init_mark(new_fsn_mark, dnotify_free_mark); new_fsn_mark->mask = mask; new_dn_mark->dn = NULL; /* this is needed to prevent the fcntl/close race described below */ mutex_lock(&dnotify_mark_mutex); /* add the new_fsn_mark or find an old one. */ fsn_mark = fsnotify_find_inode_mark(dnotify_group, inode); if (fsn_mark) { dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); spin_lock(&fsn_mark->lock); } else { fsnotify_add_mark(new_fsn_mark, dnotify_group, inode, NULL, 0); spin_lock(&new_fsn_mark->lock); fsn_mark = new_fsn_mark; dn_mark = new_dn_mark; /* we used new_fsn_mark, so don't free it */ new_fsn_mark = NULL; } rcu_read_lock(); f = fcheck(fd); rcu_read_unlock(); /* if (f != filp) means that we lost a race and another task/thread * actually closed the fd we are still playing with before we grabbed * the dnotify_mark_mutex and fsn_mark->lock. Since closing the fd is the * only time we clean up the marks we need to get our mark off * the list. */ if (f != filp) { /* if we added ourselves, shoot ourselves, it's possible that * the flush actually did shoot this fsn_mark. That's fine too * since multiple calls to destroy_mark is perfectly safe, if * we found a dn_mark already attached to the inode, just sod * off silently as the flush at close time dealt with it. */ if (dn_mark == new_dn_mark) destroy = 1; goto out; } error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); if (error) { /* if we added, we must shoot */ if (dn_mark == new_dn_mark) destroy = 1; goto out; } error = attach_dn(dn, dn_mark, id, fd, filp, mask); /* !error means that we attached the dn to the dn_mark, so don't free it */ if (!error) dn = NULL; /* -EEXIST means that we didn't add this new dn and used an old one. * that isn't an error (and the unused dn should be freed) */ else if (error == -EEXIST) error = 0; dnotify_recalc_inode_mask(fsn_mark); out: spin_unlock(&fsn_mark->lock); if (destroy) fsnotify_destroy_mark(fsn_mark); mutex_unlock(&dnotify_mark_mutex); fsnotify_put_mark(fsn_mark); out_err: if (new_fsn_mark) fsnotify_put_mark(new_fsn_mark); if (dn) kmem_cache_free(dnotify_struct_cache, dn); return error; } static int __init dnotify_init(void) { dnotify_struct_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC); dnotify_mark_cache = KMEM_CACHE(dnotify_mark, SLAB_PANIC); dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops); if (IS_ERR(dnotify_group)) panic("unable to allocate fsnotify group for dnotify\n"); return 0; } module_init(dnotify_init)
gpl-2.0
matsu/linux-stable
drivers/scsi/aic7xxx/aic79xx_proc.c
8615
10544
/* * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * String handling code courtesy of Gerard Roudier's <groudier@club-internet.fr> * sym driver. * * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_proc.c#19 $ */ #include "aic79xx_osm.h" #include "aic79xx_inline.h" static void copy_mem_info(struct info_str *info, char *data, int len); static int copy_info(struct info_str *info, char *fmt, ...); static void ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info, u_int our_id, char channel, u_int target_id); static void ahd_dump_device_state(struct info_str *info, struct scsi_device *sdev); static int ahd_proc_write_seeprom(struct ahd_softc *ahd, char *buffer, int length); /* * Table of syncrates that don't follow the "divisible by 4" * rule. This table will be expanded in future SCSI specs. */ static const struct { u_int period_factor; u_int period; /* in 100ths of ns */ } scsi_syncrates[] = { { 0x08, 625 }, /* FAST-160 */ { 0x09, 1250 }, /* FAST-80 */ { 0x0a, 2500 }, /* FAST-40 40MHz */ { 0x0b, 3030 }, /* FAST-40 33MHz */ { 0x0c, 5000 } /* FAST-20 */ }; /* * Return the frequency in kHz corresponding to the given * sync period factor. */ static u_int ahd_calc_syncsrate(u_int period_factor) { int i; /* See if the period is in the "exception" table */ for (i = 0; i < ARRAY_SIZE(scsi_syncrates); i++) { if (period_factor == scsi_syncrates[i].period_factor) { /* Period in kHz */ return (100000000 / scsi_syncrates[i].period); } } /* * Wasn't in the table, so use the standard * 4 times conversion. */ return (10000000 / (period_factor * 4 * 10)); } static void copy_mem_info(struct info_str *info, char *data, int len) { if (info->pos + len > info->offset + info->length) len = info->offset + info->length - info->pos; if (info->pos + len < info->offset) { info->pos += len; return; } if (info->pos < info->offset) { off_t partial; partial = info->offset - info->pos; data += partial; info->pos += partial; len -= partial; } if (len > 0) { memcpy(info->buffer, data, len); info->pos += len; info->buffer += len; } } static int copy_info(struct info_str *info, char *fmt, ...) { va_list args; char buf[256]; int len; va_start(args, fmt); len = vsprintf(buf, fmt, args); va_end(args); copy_mem_info(info, buf, len); return (len); } static void ahd_format_transinfo(struct info_str *info, struct ahd_transinfo *tinfo) { u_int speed; u_int freq; u_int mb; if (tinfo->period == AHD_PERIOD_UNKNOWN) { copy_info(info, "Renegotiation Pending\n"); return; } speed = 3300; freq = 0; if (tinfo->offset != 0) { freq = ahd_calc_syncsrate(tinfo->period); speed = freq; } speed *= (0x01 << tinfo->width); mb = speed / 1000; if (mb > 0) copy_info(info, "%d.%03dMB/s transfers", mb, speed % 1000); else copy_info(info, "%dKB/s transfers", speed); if (freq != 0) { int printed_options; printed_options = 0; copy_info(info, " (%d.%03dMHz", freq / 1000, freq % 1000); if ((tinfo->ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { copy_info(info, " RDSTRM"); printed_options++; } if ((tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { copy_info(info, "%s", printed_options ? "|DT" : " DT"); printed_options++; } if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { copy_info(info, "%s", printed_options ? "|IU" : " IU"); printed_options++; } if ((tinfo->ppr_options & MSG_EXT_PPR_RTI) != 0) { copy_info(info, "%s", printed_options ? "|RTI" : " RTI"); printed_options++; } if ((tinfo->ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { copy_info(info, "%s", printed_options ? "|QAS" : " QAS"); printed_options++; } } if (tinfo->width > 0) { if (freq != 0) { copy_info(info, ", "); } else { copy_info(info, " ("); } copy_info(info, "%dbit)", 8 * (0x01 << tinfo->width)); } else if (freq != 0) { copy_info(info, ")"); } copy_info(info, "\n"); } static void ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info, u_int our_id, char channel, u_int target_id) { struct scsi_target *starget; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; int lun; tinfo = ahd_fetch_transinfo(ahd, channel, our_id, target_id, &tstate); copy_info(info, "Target %d Negotiation Settings\n", target_id); copy_info(info, "\tUser: "); ahd_format_transinfo(info, &tinfo->user); starget = ahd->platform_data->starget[target_id]; if (starget == NULL) return; copy_info(info, "\tGoal: "); ahd_format_transinfo(info, &tinfo->goal); copy_info(info, "\tCurr: "); ahd_format_transinfo(info, &tinfo->curr); for (lun = 0; lun < AHD_NUM_LUNS; lun++) { struct scsi_device *dev; dev = scsi_device_lookup_by_target(starget, lun); if (dev == NULL) continue; ahd_dump_device_state(info, dev); } } static void ahd_dump_device_state(struct info_str *info, struct scsi_device *sdev) { struct ahd_linux_device *dev = scsi_transport_device_data(sdev); copy_info(info, "\tChannel %c Target %d Lun %d Settings\n", sdev->sdev_target->channel + 'A', sdev->sdev_target->id, sdev->lun); copy_info(info, "\t\tCommands Queued %ld\n", dev->commands_issued); copy_info(info, "\t\tCommands Active %d\n", dev->active); copy_info(info, "\t\tCommand Openings %d\n", dev->openings); copy_info(info, "\t\tMax Tagged Openings %d\n", dev->maxtags); copy_info(info, "\t\tDevice Queue Frozen Count %d\n", dev->qfrozen); } static int ahd_proc_write_seeprom(struct ahd_softc *ahd, char *buffer, int length) { ahd_mode_state saved_modes; int have_seeprom; u_long s; int paused; int written; /* Default to failure. */ written = -EINVAL; ahd_lock(ahd, &s); paused = ahd_is_paused(ahd); if (!paused) ahd_pause(ahd); saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); if (length != sizeof(struct seeprom_config)) { printk("ahd_proc_write_seeprom: incorrect buffer size\n"); goto done; } have_seeprom = ahd_verify_cksum((struct seeprom_config*)buffer); if (have_seeprom == 0) { printk("ahd_proc_write_seeprom: cksum verification failed\n"); goto done; } have_seeprom = ahd_acquire_seeprom(ahd); if (!have_seeprom) { printk("ahd_proc_write_seeprom: No Serial EEPROM\n"); goto done; } else { u_int start_addr; if (ahd->seep_config == NULL) { ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC); if (ahd->seep_config == NULL) { printk("aic79xx: Unable to allocate serial " "eeprom buffer. Write failing\n"); goto done; } } printk("aic79xx: Writing Serial EEPROM\n"); start_addr = 32 * (ahd->channel - 'A'); ahd_write_seeprom(ahd, (u_int16_t *)buffer, start_addr, sizeof(struct seeprom_config)/2); ahd_read_seeprom(ahd, (uint16_t *)ahd->seep_config, start_addr, sizeof(struct seeprom_config)/2, /*ByteStream*/FALSE); ahd_release_seeprom(ahd); written = length; } done: ahd_restore_modes(ahd, saved_modes); if (!paused) ahd_unpause(ahd); ahd_unlock(ahd, &s); return (written); } /* * Return information to handle /proc support for the driver. */ int ahd_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start, off_t offset, int length, int inout) { struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata; struct info_str info; char ahd_info[256]; u_int max_targ; u_int i; int retval; /* Has data been written to the file? */ if (inout == TRUE) { retval = ahd_proc_write_seeprom(ahd, buffer, length); goto done; } if (start) *start = buffer; info.buffer = buffer; info.length = length; info.offset = offset; info.pos = 0; copy_info(&info, "Adaptec AIC79xx driver version: %s\n", AIC79XX_DRIVER_VERSION); copy_info(&info, "%s\n", ahd->description); ahd_controller_info(ahd, ahd_info); copy_info(&info, "%s\n", ahd_info); copy_info(&info, "Allocated SCBs: %d, SG List Length: %d\n\n", ahd->scb_data.numscbs, AHD_NSEG); max_targ = 16; if (ahd->seep_config == NULL) copy_info(&info, "No Serial EEPROM\n"); else { copy_info(&info, "Serial EEPROM:\n"); for (i = 0; i < sizeof(*ahd->seep_config)/2; i++) { if (((i % 8) == 0) && (i != 0)) { copy_info(&info, "\n"); } copy_info(&info, "0x%.4x ", ((uint16_t*)ahd->seep_config)[i]); } copy_info(&info, "\n"); } copy_info(&info, "\n"); if ((ahd->features & AHD_WIDE) == 0) max_targ = 8; for (i = 0; i < max_targ; i++) { ahd_dump_target_state(ahd, &info, ahd->our_id, 'A', /*target_id*/i); } retval = info.pos > info.offset ? info.pos - info.offset : 0; done: return (retval); }
gpl-2.0
henrix/rpi-linux
arch/powerpc/math-emu/fmsubs.c
13735
1154
#include <linux/types.h> #include <linux/errno.h> #include <asm/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> #include <math-emu/single.h> int fmsubs(void *frD, void *frA, void *frB, void *frC) { FP_DECL_D(R); FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); FP_UNPACK_DP(C, frC); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); printk("C: %ld %lu %lu %ld (%ld)\n", C_s, C_f1, C_f0, C_e, C_c); #endif if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); if (B_c != FP_CLS_NAN) B_s ^= 1; if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_DS(frD, R); return FP_CUR_EXCEPTIONS; }
gpl-2.0
C-Aniruddh/Axiom_totoro
arch/alpha/lib/memcpy.c
13735
4108
/* * linux/arch/alpha/lib/memcpy.c * * Copyright (C) 1995 Linus Torvalds */ /* * This is a reasonably optimized memcpy() routine. */ /* * Note that the C code is written to be optimized into good assembly. However, * at this point gcc is unable to sanely compile "if (n >= 0)", resulting in a * explicit compare against 0 (instead of just using the proper "blt reg, xx" or * "bge reg, xx"). I hope alpha-gcc will be fixed to notice this eventually.. */ #include <linux/types.h> /* * This should be done in one go with ldq_u*2/mask/stq_u. Do it * with a macro so that we can fix it up later.. */ #define ALIGN_DEST_TO8_UP(d,s,n) \ while (d & 7) { \ if (n <= 0) return; \ n--; \ *(char *) d = *(char *) s; \ d++; s++; \ } #define ALIGN_DEST_TO8_DN(d,s,n) \ while (d & 7) { \ if (n <= 0) return; \ n--; \ d--; s--; \ *(char *) d = *(char *) s; \ } /* * This should similarly be done with ldq_u*2/mask/stq. The destination * is aligned, but we don't fill in a full quad-word */ #define DO_REST_UP(d,s,n) \ while (n > 0) { \ n--; \ *(char *) d = *(char *) s; \ d++; s++; \ } #define DO_REST_DN(d,s,n) \ while (n > 0) { \ n--; \ d--; s--; \ *(char *) d = *(char *) s; \ } /* * This should be done with ldq/mask/stq. The source and destination are * aligned, but we don't fill in a full quad-word */ #define DO_REST_ALIGNED_UP(d,s,n) DO_REST_UP(d,s,n) #define DO_REST_ALIGNED_DN(d,s,n) DO_REST_DN(d,s,n) /* * This does unaligned memory copies. We want to avoid storing to * an unaligned address, as that would do a read-modify-write cycle. * We also want to avoid double-reading the unaligned reads. * * Note the ordering to try to avoid load (and address generation) latencies. */ static inline void __memcpy_unaligned_up (unsigned long d, unsigned long s, long n) { ALIGN_DEST_TO8_UP(d,s,n); n -= 8; /* to avoid compare against 8 in the loop */ if (n >= 0) { unsigned long low_word, high_word; __asm__("ldq_u %0,%1":"=r" (low_word):"m" (*(unsigned long *) s)); do { unsigned long tmp; __asm__("ldq_u %0,%1":"=r" (high_word):"m" (*(unsigned long *)(s+8))); n -= 8; __asm__("extql %1,%2,%0" :"=r" (low_word) :"r" (low_word), "r" (s)); __asm__("extqh %1,%2,%0" :"=r" (tmp) :"r" (high_word), "r" (s)); s += 8; *(unsigned long *) d = low_word | tmp; d += 8; low_word = high_word; } while (n >= 0); } n += 8; DO_REST_UP(d,s,n); } static inline void __memcpy_unaligned_dn (unsigned long d, unsigned long s, long n) { /* I don't understand AXP assembler well enough for this. -Tim */ s += n; d += n; while (n--) * (char *) --d = * (char *) --s; } /* * Hmm.. Strange. The __asm__ here is there to make gcc use an integer register * for the load-store. I don't know why, but it would seem that using a floating * point register for the move seems to slow things down (very small difference, * though). * * Note the ordering to try to avoid load (and address generation) latencies. */ static inline void __memcpy_aligned_up (unsigned long d, unsigned long s, long n) { ALIGN_DEST_TO8_UP(d,s,n); n -= 8; while (n >= 0) { unsigned long tmp; __asm__("ldq %0,%1":"=r" (tmp):"m" (*(unsigned long *) s)); n -= 8; s += 8; *(unsigned long *) d = tmp; d += 8; } n += 8; DO_REST_ALIGNED_UP(d,s,n); } static inline void __memcpy_aligned_dn (unsigned long d, unsigned long s, long n) { s += n; d += n; ALIGN_DEST_TO8_DN(d,s,n); n -= 8; while (n >= 0) { unsigned long tmp; s -= 8; __asm__("ldq %0,%1":"=r" (tmp):"m" (*(unsigned long *) s)); n -= 8; d -= 8; *(unsigned long *) d = tmp; } n += 8; DO_REST_ALIGNED_DN(d,s,n); } void * memcpy(void * dest, const void *src, size_t n) { if (!(((unsigned long) dest ^ (unsigned long) src) & 7)) { __memcpy_aligned_up ((unsigned long) dest, (unsigned long) src, n); return dest; } __memcpy_unaligned_up ((unsigned long) dest, (unsigned long) src, n); return dest; } /* For backward modules compatibility, define __memcpy. */ asm("__memcpy = memcpy; .globl __memcpy");
gpl-2.0
mdalexca/marlin
sound/drivers/opl4/opl4_mixer.c
15015
2867
/* * OPL4 mixer functions * Copyright (c) 2003 by Clemens Ladisch <clemens@ladisch.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "opl4_local.h" #include <sound/control.h> static int snd_opl4_ctl_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 7; return 0; } static int snd_opl4_ctl_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_opl4 *opl4 = snd_kcontrol_chip(kcontrol); unsigned long flags; u8 reg = kcontrol->private_value; u8 value; spin_lock_irqsave(&opl4->reg_lock, flags); value = snd_opl4_read(opl4, reg); spin_unlock_irqrestore(&opl4->reg_lock, flags); ucontrol->value.integer.value[0] = 7 - (value & 7); ucontrol->value.integer.value[1] = 7 - ((value >> 3) & 7); return 0; } static int snd_opl4_ctl_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_opl4 *opl4 = snd_kcontrol_chip(kcontrol); unsigned long flags; u8 reg = kcontrol->private_value; u8 value, old_value; value = (7 - (ucontrol->value.integer.value[0] & 7)) | ((7 - (ucontrol->value.integer.value[1] & 7)) << 3); spin_lock_irqsave(&opl4->reg_lock, flags); old_value = snd_opl4_read(opl4, reg); snd_opl4_write(opl4, reg, value); spin_unlock_irqrestore(&opl4->reg_lock, flags); return value != old_value; } static struct snd_kcontrol_new snd_opl4_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "FM Playback Volume", .info = snd_opl4_ctl_info, .get = snd_opl4_ctl_get, .put = snd_opl4_ctl_put, .private_value = OPL4_REG_MIX_CONTROL_FM }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Wavetable Playback Volume", .info = snd_opl4_ctl_info, .get = snd_opl4_ctl_get, .put = snd_opl4_ctl_put, .private_value = OPL4_REG_MIX_CONTROL_PCM } }; int snd_opl4_create_mixer(struct snd_opl4 *opl4) { struct snd_card *card = opl4->card; int i, err; strcat(card->mixername, ",OPL4"); for (i = 0; i < 2; ++i) { err = snd_ctl_add(card, snd_ctl_new1(&snd_opl4_controls[i], opl4)); if (err < 0) return err; } return 0; }
gpl-2.0
honor6-dev/android_kernel_huawei_h60
sound/drivers/opl4/opl4_mixer.c
15015
2867
/* * OPL4 mixer functions * Copyright (c) 2003 by Clemens Ladisch <clemens@ladisch.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "opl4_local.h" #include <sound/control.h> static int snd_opl4_ctl_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 7; return 0; } static int snd_opl4_ctl_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_opl4 *opl4 = snd_kcontrol_chip(kcontrol); unsigned long flags; u8 reg = kcontrol->private_value; u8 value; spin_lock_irqsave(&opl4->reg_lock, flags); value = snd_opl4_read(opl4, reg); spin_unlock_irqrestore(&opl4->reg_lock, flags); ucontrol->value.integer.value[0] = 7 - (value & 7); ucontrol->value.integer.value[1] = 7 - ((value >> 3) & 7); return 0; } static int snd_opl4_ctl_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_opl4 *opl4 = snd_kcontrol_chip(kcontrol); unsigned long flags; u8 reg = kcontrol->private_value; u8 value, old_value; value = (7 - (ucontrol->value.integer.value[0] & 7)) | ((7 - (ucontrol->value.integer.value[1] & 7)) << 3); spin_lock_irqsave(&opl4->reg_lock, flags); old_value = snd_opl4_read(opl4, reg); snd_opl4_write(opl4, reg, value); spin_unlock_irqrestore(&opl4->reg_lock, flags); return value != old_value; } static struct snd_kcontrol_new snd_opl4_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "FM Playback Volume", .info = snd_opl4_ctl_info, .get = snd_opl4_ctl_get, .put = snd_opl4_ctl_put, .private_value = OPL4_REG_MIX_CONTROL_FM }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Wavetable Playback Volume", .info = snd_opl4_ctl_info, .get = snd_opl4_ctl_get, .put = snd_opl4_ctl_put, .private_value = OPL4_REG_MIX_CONTROL_PCM } }; int snd_opl4_create_mixer(struct snd_opl4 *opl4) { struct snd_card *card = opl4->card; int i, err; strcat(card->mixername, ",OPL4"); for (i = 0; i < 2; ++i) { err = snd_ctl_add(card, snd_ctl_new1(&snd_opl4_controls[i], opl4)); if (err < 0) return err; } return 0; }
gpl-2.0
loverlucia/linux-3.10.101
net/sctp/socket.c
168
207827
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2003 Intel Corp. * Copyright (c) 2001-2002 Nokia, Inc. * Copyright (c) 2001 La Monte H.P. Yarroll * * This file is part of the SCTP kernel implementation * * These functions interface with the sockets layer to implement the * SCTP Extensions for the Sockets API. * * Note that the descriptions from the specification are USER level * functions--this file is the functions which populate the struct proto * for SCTP which is the BOTTOM of the sockets interface. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <lksctp-developers@lists.sourceforge.net> * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Narasimha Budihal <narsi@refcode.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@us.ibm.com> * Xingang Guo <xingang.guo@intel.com> * Daisy Chang <daisyc@us.ibm.com> * Sridhar Samudrala <samudrala@us.ibm.com> * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> * Ardelle Fan <ardelle.fan@intel.com> * Ryan Layer <rmlayer@us.ibm.com> * Anup Pemmaiah <pemmaiah@cc.usu.edu> * Kevin Gao <kevin.gao@intel.com> * * Any bugs reported given to us we will try to fix... any fixes shared will * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/kernel.h> #include <linux/wait.h> #include <linux/time.h> #include <linux/ip.h> #include <linux/capability.h> #include <linux/fcntl.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/crypto.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/compat.h> #include <net/ip.h> #include <net/icmp.h> #include <net/route.h> #include <net/ipv6.h> #include <net/inet_common.h> #include <linux/socket.h> /* for sa_family_t */ #include <linux/export.h> #include <net/sock.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> /* WARNING: Please do not remove the SCTP_STATIC attribute to * any of the functions below as they are used to export functions * used by a project regression testsuite. */ /* Forward declarations for internal helper functions. */ static int sctp_writeable(struct sock *sk); static void sctp_wfree(struct sk_buff *skb); static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, size_t msg_len); static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p); static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); static int sctp_wait_for_accept(struct sock *sk, long timeo); static void sctp_wait_for_close(struct sock *sk, long timeo); static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, union sctp_addr *addr, int len); static int sctp_bindx_add(struct sock *, struct sockaddr *, int); static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); static int sctp_send_asconf(struct sctp_association *asoc, struct sctp_chunk *chunk); static int sctp_do_bind(struct sock *, union sctp_addr *, int); static int sctp_autobind(struct sock *sk); static void sctp_sock_migrate(struct sock *, struct sock *, struct sctp_association *, sctp_socket_type_t); extern struct kmem_cache *sctp_bucket_cachep; extern long sysctl_sctp_mem[3]; extern int sysctl_sctp_rmem[3]; extern int sysctl_sctp_wmem[3]; static int sctp_memory_pressure; static atomic_long_t sctp_memory_allocated; struct percpu_counter sctp_sockets_allocated; static void sctp_enter_memory_pressure(struct sock *sk) { sctp_memory_pressure = 1; } /* Get the sndbuf space available at the time on the association. */ static inline int sctp_wspace(struct sctp_association *asoc) { int amt; if (asoc->ep->sndbuf_policy) amt = asoc->sndbuf_used; else amt = sk_wmem_alloc_get(asoc->base.sk); if (amt >= asoc->base.sk->sk_sndbuf) { if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) amt = 0; else { amt = sk_stream_wspace(asoc->base.sk); if (amt < 0) amt = 0; } } else { amt = asoc->base.sk->sk_sndbuf - amt; } return amt; } /* Increment the used sndbuf space count of the corresponding association by * the size of the outgoing data chunk. * Also, set the skb destructor for sndbuf accounting later. * * Since it is always 1-1 between chunk and skb, and also a new skb is always * allocated for chunk bundling in sctp_packet_transmit(), we can use the * destructor in the data chunk skb for the purpose of the sndbuf space * tracking. */ static inline void sctp_set_owner_w(struct sctp_chunk *chunk) { struct sctp_association *asoc = chunk->asoc; struct sock *sk = asoc->base.sk; /* The sndbuf space is tracked per association. */ sctp_association_hold(asoc); skb_set_owner_w(chunk->skb, sk); chunk->skb->destructor = sctp_wfree; /* Save the chunk pointer in skb for sctp_wfree to use later. */ *((struct sctp_chunk **)(chunk->skb->cb)) = chunk; asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + sizeof(struct sk_buff) + sizeof(struct sctp_chunk); atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); sk->sk_wmem_queued += chunk->skb->truesize; sk_mem_charge(sk, chunk->skb->truesize); } /* Verify that this is a valid address. */ static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, int len) { struct sctp_af *af; /* Verify basic sockaddr. */ af = sctp_sockaddr_af(sctp_sk(sk), addr, len); if (!af) return -EINVAL; /* Is this a valid SCTP address? */ if (!af->addr_valid(addr, sctp_sk(sk), NULL)) return -EINVAL; if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) return -EINVAL; return 0; } /* Look up the association by its id. If this is not a UDP-style * socket, the ID field is always ignored. */ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) { struct sctp_association *asoc = NULL; /* If this is not a UDP-style socket, assoc id should be ignored. */ if (!sctp_style(sk, UDP)) { /* Return NULL if the socket state is not ESTABLISHED. It * could be a TCP-style listening socket or a socket which * hasn't yet called connect() to establish an association. */ if (!sctp_sstate(sk, ESTABLISHED)) return NULL; /* Get the first and the only association from the list. */ if (!list_empty(&sctp_sk(sk)->ep->asocs)) asoc = list_entry(sctp_sk(sk)->ep->asocs.next, struct sctp_association, asocs); return asoc; } /* Otherwise this is a UDP-style socket. */ if (!id || (id == (sctp_assoc_t)-1)) return NULL; spin_lock_bh(&sctp_assocs_id_lock); asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); spin_unlock_bh(&sctp_assocs_id_lock); if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) return NULL; return asoc; } /* Look up the transport from an address and an assoc id. If both address and * id are specified, the associations matching the address and the id should be * the same. */ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, struct sockaddr_storage *addr, sctp_assoc_t id) { struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; struct sctp_transport *transport; union sctp_addr *laddr = (union sctp_addr *)addr; addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, laddr, &transport); if (!addr_asoc) return NULL; id_asoc = sctp_id2assoc(sk, id); if (id_asoc && (id_asoc != addr_asoc)) return NULL; sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), (union sctp_addr *)addr); return transport; } /* API 3.1.2 bind() - UDP Style Syntax * The syntax of bind() is, * * ret = bind(int sd, struct sockaddr *addr, int addrlen); * * sd - the socket descriptor returned by socket(). * addr - the address structure (struct sockaddr_in or struct * sockaddr_in6 [RFC 2553]), * addr_len - the size of the address structure. */ SCTP_STATIC int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) { int retval = 0; sctp_lock_sock(sk); SCTP_DEBUG_PRINTK("sctp_bind(sk: %p, addr: %p, addr_len: %d)\n", sk, addr, addr_len); /* Disallow binding twice. */ if (!sctp_sk(sk)->ep->base.bind_addr.port) retval = sctp_do_bind(sk, (union sctp_addr *)addr, addr_len); else retval = -EINVAL; sctp_release_sock(sk); return retval; } static long sctp_get_port_local(struct sock *, union sctp_addr *); /* Verify this is a valid sockaddr. */ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, union sctp_addr *addr, int len) { struct sctp_af *af; /* Check minimum size. */ if (len < sizeof (struct sockaddr)) return NULL; /* V4 mapped address are really of AF_INET family */ if (addr->sa.sa_family == AF_INET6 && ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { if (!opt->pf->af_supported(AF_INET, opt)) return NULL; } else { /* Does this PF support this AF? */ if (!opt->pf->af_supported(addr->sa.sa_family, opt)) return NULL; } /* If we get this far, af is valid. */ af = sctp_get_af_specific(addr->sa.sa_family); if (len < af->sockaddr_len) return NULL; return af; } /* Bind a local address either to an endpoint or to an association. */ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) { struct net *net = sock_net(sk); struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; struct sctp_bind_addr *bp = &ep->base.bind_addr; struct sctp_af *af; unsigned short snum; int ret = 0; /* Common sockaddr verification. */ af = sctp_sockaddr_af(sp, addr, len); if (!af) { SCTP_DEBUG_PRINTK("sctp_do_bind(sk: %p, newaddr: %p, len: %d) EINVAL\n", sk, addr, len); return -EINVAL; } snum = ntohs(addr->v4.sin_port); SCTP_DEBUG_PRINTK_IPADDR("sctp_do_bind(sk: %p, new addr: ", ", port: %d, new port: %d, len: %d)\n", sk, addr, bp->port, snum, len); /* PF specific bind() address verification. */ if (!sp->pf->bind_verify(sp, addr)) return -EADDRNOTAVAIL; /* We must either be unbound, or bind to the same port. * It's OK to allow 0 ports if we are already bound. * We'll just inhert an already bound port in this case */ if (bp->port) { if (!snum) snum = bp->port; else if (snum != bp->port) { SCTP_DEBUG_PRINTK("sctp_do_bind:" " New port %d does not match existing port " "%d.\n", snum, bp->port); return -EINVAL; } } if (snum && snum < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) return -EACCES; /* See if the address matches any of the addresses we may have * already bound before checking against other endpoints. */ if (sctp_bind_addr_match(bp, addr, sp)) return -EINVAL; /* Make sure we are allowed to bind here. * The function sctp_get_port_local() does duplicate address * detection. */ addr->v4.sin_port = htons(snum); if ((ret = sctp_get_port_local(sk, addr))) { return -EADDRINUSE; } /* Refresh ephemeral port. */ if (!bp->port) bp->port = inet_sk(sk)->inet_num; /* Add the address to the bind address list. * Use GFP_ATOMIC since BHs will be disabled. */ ret = sctp_add_bind_addr(bp, addr, SCTP_ADDR_SRC, GFP_ATOMIC); /* Copy back into socket for getsockname() use. */ if (!ret) { inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); af->to_sk_saddr(addr, sk); } return ret; } /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks * * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged * at any one time. If a sender, after sending an ASCONF chunk, decides * it needs to transfer another ASCONF Chunk, it MUST wait until the * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a * subsequent ASCONF. Note this restriction binds each side, so at any * time two ASCONF may be in-transit on any given association (one sent * from each endpoint). */ static int sctp_send_asconf(struct sctp_association *asoc, struct sctp_chunk *chunk) { struct net *net = sock_net(asoc->base.sk); int retval = 0; /* If there is an outstanding ASCONF chunk, queue it for later * transmission. */ if (asoc->addip_last_asconf) { list_add_tail(&chunk->list, &asoc->addip_chunk_list); goto out; } /* Hold the chunk until an ASCONF_ACK is received. */ sctp_chunk_hold(chunk); retval = sctp_primitive_ASCONF(net, asoc, chunk); if (retval) sctp_chunk_free(chunk); else asoc->addip_last_asconf = chunk; out: return retval; } /* Add a list of addresses as bind addresses to local endpoint or * association. * * Basically run through each address specified in the addrs/addrcnt * array/length pair, determine if it is IPv6 or IPv4 and call * sctp_do_bind() on it. * * If any of them fails, then the operation will be reversed and the * ones that were added will be removed. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) { int cnt; int retval = 0; void *addr_buf; struct sockaddr *sa_addr; struct sctp_af *af; SCTP_DEBUG_PRINTK("sctp_bindx_add (sk: %p, addrs: %p, addrcnt: %d)\n", sk, addrs, addrcnt); addr_buf = addrs; for (cnt = 0; cnt < addrcnt; cnt++) { /* The list may contain either IPv4 or IPv6 address; * determine the address length for walking thru the list. */ sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa_family); if (!af) { retval = -EINVAL; goto err_bindx_add; } retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, af->sockaddr_len); addr_buf += af->sockaddr_len; err_bindx_add: if (retval < 0) { /* Failed. Cleanup the ones that have been added */ if (cnt > 0) sctp_bindx_rem(sk, addrs, cnt); return retval; } } return retval; } /* Send an ASCONF chunk with Add IP address parameters to all the peers of the * associations that are part of the endpoint indicating that a list of local * addresses are added to the endpoint. * * If any of the addresses is already in the bind address list of the * association, we do not send the chunk for that association. But it will not * affect other associations. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_send_asconf_add_ip(struct sock *sk, struct sockaddr *addrs, int addrcnt) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc; struct sctp_bind_addr *bp; struct sctp_chunk *chunk; struct sctp_sockaddr_entry *laddr; union sctp_addr *addr; union sctp_addr saveaddr; void *addr_buf; struct sctp_af *af; struct list_head *p; int i; int retval = 0; if (!net->sctp.addip_enable) return retval; sp = sctp_sk(sk); ep = sp->ep; SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n", __func__, sk, addrs, addrcnt); list_for_each_entry(asoc, &ep->asocs, asocs) { if (!asoc->peer.asconf_capable) continue; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) continue; if (!sctp_state(asoc, ESTABLISHED)) continue; /* Check if any address in the packed array of addresses is * in the bind address list of the association. If so, * do not send the asconf chunk to its peer, but continue with * other associations. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); if (!af) { retval = -EINVAL; goto out; } if (sctp_assoc_lookup_laddr(asoc, addr)) break; addr_buf += af->sockaddr_len; } if (i < addrcnt) continue; /* Use the first valid address in bind addr list of * association as Address Parameter of ASCONF CHUNK. */ bp = &asoc->base.bind_addr; p = bp->address_list.next; laddr = list_entry(p, struct sctp_sockaddr_entry, list); chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, addrcnt, SCTP_PARAM_ADD_IP); if (!chunk) { retval = -ENOMEM; goto out; } /* Add the new addresses to the bind address list with * use_as_src set to 0. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); memcpy(&saveaddr, addr, af->sockaddr_len); retval = sctp_add_bind_addr(bp, &saveaddr, SCTP_ADDR_NEW, GFP_ATOMIC); addr_buf += af->sockaddr_len; } if (asoc->src_out_of_asoc_ok) { struct sctp_transport *trans; list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { /* Clear the source and route cache */ dst_release(trans->dst); trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); trans->ssthresh = asoc->peer.i.a_rwnd; trans->rto = asoc->rto_initial; sctp_max_rto(asoc, trans); trans->rtt = trans->srtt = trans->rttvar = 0; sctp_transport_route(trans, NULL, sctp_sk(asoc->base.sk)); } } retval = sctp_send_asconf(asoc, chunk); } out: return retval; } /* Remove a list of addresses from bind addresses list. Do not remove the * last address. * * Basically run through each address specified in the addrs/addrcnt * array/length pair, determine if it is IPv6 or IPv4 and call * sctp_del_bind() on it. * * If any of them fails, then the operation will be reversed and the * ones that were removed will be added back. * * At least one address has to be left; if only one address is * available, the operation will return -EBUSY. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; int cnt; struct sctp_bind_addr *bp = &ep->base.bind_addr; int retval = 0; void *addr_buf; union sctp_addr *sa_addr; struct sctp_af *af; SCTP_DEBUG_PRINTK("sctp_bindx_rem (sk: %p, addrs: %p, addrcnt: %d)\n", sk, addrs, addrcnt); addr_buf = addrs; for (cnt = 0; cnt < addrcnt; cnt++) { /* If the bind address list is empty or if there is only one * bind address, there is nothing more to be removed (we need * at least one address here). */ if (list_empty(&bp->address_list) || (sctp_list_single_entry(&bp->address_list))) { retval = -EBUSY; goto err_bindx_rem; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa.sa_family); if (!af) { retval = -EINVAL; goto err_bindx_rem; } if (!af->addr_valid(sa_addr, sp, NULL)) { retval = -EADDRNOTAVAIL; goto err_bindx_rem; } if (sa_addr->v4.sin_port && sa_addr->v4.sin_port != htons(bp->port)) { retval = -EINVAL; goto err_bindx_rem; } if (!sa_addr->v4.sin_port) sa_addr->v4.sin_port = htons(bp->port); /* FIXME - There is probably a need to check if sk->sk_saddr and * sk->sk_rcv_addr are currently set to one of the addresses to * be removed. This is something which needs to be looked into * when we are fixing the outstanding issues with multi-homing * socket routing and failover schemes. Refer to comments in * sctp_do_bind(). -daisy */ retval = sctp_del_bind_addr(bp, sa_addr); addr_buf += af->sockaddr_len; err_bindx_rem: if (retval < 0) { /* Failed. Add the ones that has been removed back */ if (cnt > 0) sctp_bindx_add(sk, addrs, cnt); return retval; } } return retval; } /* Send an ASCONF chunk with Delete IP address parameters to all the peers of * the associations that are part of the endpoint indicating that a list of * local addresses are removed from the endpoint. * * If any of the addresses is already in the bind address list of the * association, we do not send the chunk for that association. But it will not * affect other associations. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_send_asconf_del_ip(struct sock *sk, struct sockaddr *addrs, int addrcnt) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc; struct sctp_transport *transport; struct sctp_bind_addr *bp; struct sctp_chunk *chunk; union sctp_addr *laddr; void *addr_buf; struct sctp_af *af; struct sctp_sockaddr_entry *saddr; int i; int retval = 0; int stored = 0; chunk = NULL; if (!net->sctp.addip_enable) return retval; sp = sctp_sk(sk); ep = sp->ep; SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n", __func__, sk, addrs, addrcnt); list_for_each_entry(asoc, &ep->asocs, asocs) { if (!asoc->peer.asconf_capable) continue; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) continue; if (!sctp_state(asoc, ESTABLISHED)) continue; /* Check if any address in the packed array of addresses is * not present in the bind address list of the association. * If so, do not send the asconf chunk to its peer, but * continue with other associations. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { laddr = addr_buf; af = sctp_get_af_specific(laddr->v4.sin_family); if (!af) { retval = -EINVAL; goto out; } if (!sctp_assoc_lookup_laddr(asoc, laddr)) break; addr_buf += af->sockaddr_len; } if (i < addrcnt) continue; /* Find one address in the association's bind address list * that is not in the packed array of addresses. This is to * make sure that we do not delete all the addresses in the * association. */ bp = &asoc->base.bind_addr; laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, addrcnt, sp); if ((laddr == NULL) && (addrcnt == 1)) { if (asoc->asconf_addr_del_pending) continue; asoc->asconf_addr_del_pending = kzalloc(sizeof(union sctp_addr), GFP_ATOMIC); if (asoc->asconf_addr_del_pending == NULL) { retval = -ENOMEM; goto out; } asoc->asconf_addr_del_pending->sa.sa_family = addrs->sa_family; asoc->asconf_addr_del_pending->v4.sin_port = htons(bp->port); if (addrs->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addrs; asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr; } else if (addrs->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addrs; asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; } SCTP_DEBUG_PRINTK_IPADDR("send_asconf_del_ip: keep the last address asoc: %p ", " at %p\n", asoc, asoc->asconf_addr_del_pending, asoc->asconf_addr_del_pending); asoc->src_out_of_asoc_ok = 1; stored = 1; goto skip_mkasconf; } if (laddr == NULL) return -EINVAL; /* We do not need RCU protection throughout this loop * because this is done under a socket lock from the * setsockopt call. */ chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, SCTP_PARAM_DEL_IP); if (!chunk) { retval = -ENOMEM; goto out; } skip_mkasconf: /* Reset use_as_src flag for the addresses in the bind address * list that are to be deleted. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { laddr = addr_buf; af = sctp_get_af_specific(laddr->v4.sin_family); list_for_each_entry(saddr, &bp->address_list, list) { if (sctp_cmp_addr_exact(&saddr->a, laddr)) saddr->state = SCTP_ADDR_DEL; } addr_buf += af->sockaddr_len; } /* Update the route and saddr entries for all the transports * as some of the addresses in the bind address list are * about to be deleted and cannot be used as source addresses. */ list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { dst_release(transport->dst); sctp_transport_route(transport, NULL, sctp_sk(asoc->base.sk)); } if (stored) /* We don't need to transmit ASCONF */ continue; retval = sctp_send_asconf(asoc, chunk); } out: return retval; } /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) { struct sock *sk = sctp_opt2sk(sp); union sctp_addr *addr; struct sctp_af *af; /* It is safe to write port space in caller. */ addr = &addrw->a; addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); af = sctp_get_af_specific(addr->sa.sa_family); if (!af) return -EINVAL; if (sctp_verify_addr(sk, addr, af->sockaddr_len)) return -EINVAL; if (addrw->state == SCTP_ADDR_NEW) return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); else return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); } /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() * * API 8.1 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, * int flags); * * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. * If the sd is an IPv6 socket, the addresses passed can either be IPv4 * or IPv6 addresses. * * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see * Section 3.1.2 for this usage. * * addrs is a pointer to an array of one or more socket addresses. Each * address is contained in its appropriate structure (i.e. struct * sockaddr_in or struct sockaddr_in6) the family of the address type * must be used to distinguish the address length (note that this * representation is termed a "packed array" of addresses). The caller * specifies the number of addresses in the array with addrcnt. * * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns * -1, and sets errno to the appropriate error code. * * For SCTP, the port given in each socket address must be the same, or * sctp_bindx() will fail, setting errno to EINVAL. * * The flags parameter is formed from the bitwise OR of zero or more of * the following currently defined flags: * * SCTP_BINDX_ADD_ADDR * * SCTP_BINDX_REM_ADDR * * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given * addresses from the association. The two flags are mutually exclusive; * if both are given, sctp_bindx() will fail with EINVAL. A caller may * not remove all addresses from an association; sctp_bindx() will * reject such an attempt with EINVAL. * * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate * additional addresses with an endpoint after calling bind(). Or use * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening * socket is associated with so that no new association accepted will be * associated with those addresses. If the endpoint supports dynamic * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a * endpoint to send the appropriate message to the peer to change the * peers address lists. * * Adding and removing addresses from a connected association is * optional functionality. Implementations that do not support this * functionality should return EOPNOTSUPP. * * Basically do nothing but copying the addresses from user to kernel * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() * from userspace. * * We don't use copy_from_user() for optimization: we first do the * sanity checks (buffer size -fast- and access check-healthy * pointer); if all of those succeed, then we can alloc the memory * (expensive operation) needed to copy the data to kernel. Then we do * the copying without checking the user space area * (__copy_from_user()). * * On exit there is no need to do sockfd_put(), sys_setsockopt() does * it. * * sk The sk of the socket * addrs The pointer to the addresses in user land * addrssize Size of the addrs buffer * op Operation to perform (add or remove, see the flags of * sctp_bindx) * * Returns 0 if ok, <0 errno code on error. */ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk, struct sockaddr __user *addrs, int addrs_size, int op) { struct sockaddr *kaddrs; int err; int addrcnt = 0; int walk_size = 0; struct sockaddr *sa_addr; void *addr_buf; struct sctp_af *af; SCTP_DEBUG_PRINTK("sctp_setsockopt_bindx: sk %p addrs %p" " addrs_size %d opt %d\n", sk, addrs, addrs_size, op); if (unlikely(addrs_size <= 0)) return -EINVAL; /* Check the user passed a healthy pointer. */ if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) return -EFAULT; /* Alloc space for the address array in kernel memory. */ kaddrs = kmalloc(addrs_size, GFP_KERNEL); if (unlikely(!kaddrs)) return -ENOMEM; if (__copy_from_user(kaddrs, addrs, addrs_size)) { kfree(kaddrs); return -EFAULT; } /* Walk through the addrs buffer and count the number of addresses. */ addr_buf = kaddrs; while (walk_size < addrs_size) { if (walk_size + sizeof(sa_family_t) > addrs_size) { kfree(kaddrs); return -EINVAL; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa_family); /* If the address family is not supported or if this address * causes the address buffer to overflow return EINVAL. */ if (!af || (walk_size + af->sockaddr_len) > addrs_size) { kfree(kaddrs); return -EINVAL; } addrcnt++; addr_buf += af->sockaddr_len; walk_size += af->sockaddr_len; } /* Do the work. */ switch (op) { case SCTP_BINDX_ADD_ADDR: err = sctp_bindx_add(sk, kaddrs, addrcnt); if (err) goto out; err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); break; case SCTP_BINDX_REM_ADDR: err = sctp_bindx_rem(sk, kaddrs, addrcnt); if (err) goto out; err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); break; default: err = -EINVAL; break; } out: kfree(kaddrs); return err; } /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) * * Common routine for handling connect() and sctp_connectx(). * Connect will come in with just a single address. */ static int __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size, sctp_assoc_t *assoc_id) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc = NULL; struct sctp_association *asoc2; struct sctp_transport *transport; union sctp_addr to; struct sctp_af *af; sctp_scope_t scope; long timeo; int err = 0; int addrcnt = 0; int walk_size = 0; union sctp_addr *sa_addr = NULL; void *addr_buf; unsigned short port; unsigned int f_flags = 0; sp = sctp_sk(sk); ep = sp->ep; /* connect() cannot be done on a socket that is already in ESTABLISHED * state - UDP-style peeled off socket or a TCP-style socket that * is already connected. * It cannot be done even on a TCP-style listening socket. */ if (sctp_sstate(sk, ESTABLISHED) || (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { err = -EISCONN; goto out_free; } /* Walk through the addrs buffer and count the number of addresses. */ addr_buf = kaddrs; while (walk_size < addrs_size) { if (walk_size + sizeof(sa_family_t) > addrs_size) { err = -EINVAL; goto out_free; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa.sa_family); /* If the address family is not supported or if this address * causes the address buffer to overflow return EINVAL. */ if (!af || (walk_size + af->sockaddr_len) > addrs_size) { err = -EINVAL; goto out_free; } port = ntohs(sa_addr->v4.sin_port); /* Save current address so we can work with it */ memcpy(&to, sa_addr, af->sockaddr_len); err = sctp_verify_addr(sk, &to, af->sockaddr_len); if (err) goto out_free; /* Make sure the destination port is correctly set * in all addresses. */ if (asoc && asoc->peer.port && asoc->peer.port != port) { err = -EINVAL; goto out_free; } /* Check if there already is a matching association on the * endpoint (other than the one created here). */ asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); if (asoc2 && asoc2 != asoc) { if (asoc2->state >= SCTP_STATE_ESTABLISHED) err = -EISCONN; else err = -EALREADY; goto out_free; } /* If we could not find a matching association on the endpoint, * make sure that there is no peeled-off association matching * the peer address even on another socket. */ if (sctp_endpoint_is_peeled_off(ep, &to)) { err = -EADDRNOTAVAIL; goto out_free; } if (!asoc) { /* If a bind() or sctp_bindx() is not called prior to * an sctp_connectx() call, the system picks an * ephemeral port and will choose an address set * equivalent to binding with a wildcard address. */ if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) { err = -EAGAIN; goto out_free; } } else { /* * If an unprivileged user inherits a 1-many * style socket with open associations on a * privileged port, it MAY be permitted to * accept new associations, but it SHOULD NOT * be permitted to open new associations. */ if (ep->base.bind_addr.port < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { err = -EACCES; goto out_free; } } scope = sctp_scope(&to); asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); if (!asoc) { err = -ENOMEM; goto out_free; } err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); if (err < 0) { goto out_free; } } /* Prime the peer's transport structures. */ transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); if (!transport) { err = -ENOMEM; goto out_free; } addrcnt++; addr_buf += af->sockaddr_len; walk_size += af->sockaddr_len; } /* In case the user of sctp_connectx() wants an association * id back, assign one now. */ if (assoc_id) { err = sctp_assoc_set_id(asoc, GFP_KERNEL); if (err < 0) goto out_free; } err = sctp_primitive_ASSOCIATE(net, asoc, NULL); if (err < 0) { goto out_free; } /* Initialize sk's dport and daddr for getpeername() */ inet_sk(sk)->inet_dport = htons(asoc->peer.port); af = sctp_get_af_specific(sa_addr->sa.sa_family); af->to_sk_daddr(sa_addr, sk); sk->sk_err = 0; /* in-kernel sockets don't generally have a file allocated to them * if all they do is call sock_create_kern(). */ if (sk->sk_socket->file) f_flags = sk->sk_socket->file->f_flags; timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); err = sctp_wait_for_connect(asoc, &timeo); if ((err == 0 || err == -EINPROGRESS) && assoc_id) *assoc_id = asoc->assoc_id; /* Don't free association on exit. */ asoc = NULL; out_free: SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p" " kaddrs: %p err: %d\n", asoc, kaddrs, err); if (asoc) { /* sctp_primitive_ASSOCIATE may have added this association * To the hash table, try to unhash it, just in case, its a noop * if it wasn't hashed so we're safe */ sctp_unhash_established(asoc); sctp_association_free(asoc); } return err; } /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() * * API 8.9 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, * sctp_assoc_t *asoc); * * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. * If the sd is an IPv6 socket, the addresses passed can either be IPv4 * or IPv6 addresses. * * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see * Section 3.1.2 for this usage. * * addrs is a pointer to an array of one or more socket addresses. Each * address is contained in its appropriate structure (i.e. struct * sockaddr_in or struct sockaddr_in6) the family of the address type * must be used to distengish the address length (note that this * representation is termed a "packed array" of addresses). The caller * specifies the number of addresses in the array with addrcnt. * * On success, sctp_connectx() returns 0. It also sets the assoc_id to * the association id of the new association. On failure, sctp_connectx() * returns -1, and sets errno to the appropriate error code. The assoc_id * is not touched by the kernel. * * For SCTP, the port given in each socket address must be the same, or * sctp_connectx() will fail, setting errno to EINVAL. * * An application can use sctp_connectx to initiate an association with * an endpoint that is multi-homed. Much like sctp_bindx() this call * allows a caller to specify multiple addresses at which a peer can be * reached. The way the SCTP stack uses the list of addresses to set up * the association is implementation dependent. This function only * specifies that the stack will try to make use of all the addresses in * the list when needed. * * Note that the list of addresses passed in is only used for setting up * the association. It does not necessarily equal the set of addresses * the peer uses for the resulting association. If the caller wants to * find out the set of peer addresses, it must use sctp_getpaddrs() to * retrieve them after the association has been set up. * * Basically do nothing but copying the addresses from user to kernel * land and invoking either sctp_connectx(). This is used for tunneling * the sctp_connectx() request through sctp_setsockopt() from userspace. * * We don't use copy_from_user() for optimization: we first do the * sanity checks (buffer size -fast- and access check-healthy * pointer); if all of those succeed, then we can alloc the memory * (expensive operation) needed to copy the data to kernel. Then we do * the copying without checking the user space area * (__copy_from_user()). * * On exit there is no need to do sockfd_put(), sys_setsockopt() does * it. * * sk The sk of the socket * addrs The pointer to the addresses in user land * addrssize Size of the addrs buffer * * Returns >=0 if ok, <0 errno code on error. */ SCTP_STATIC int __sctp_setsockopt_connectx(struct sock* sk, struct sockaddr __user *addrs, int addrs_size, sctp_assoc_t *assoc_id) { int err = 0; struct sockaddr *kaddrs; SCTP_DEBUG_PRINTK("%s - sk %p addrs %p addrs_size %d\n", __func__, sk, addrs, addrs_size); if (unlikely(addrs_size <= 0)) return -EINVAL; /* Check the user passed a healthy pointer. */ if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) return -EFAULT; /* Alloc space for the address array in kernel memory. */ kaddrs = kmalloc(addrs_size, GFP_KERNEL); if (unlikely(!kaddrs)) return -ENOMEM; if (__copy_from_user(kaddrs, addrs, addrs_size)) { err = -EFAULT; } else { err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); } kfree(kaddrs); return err; } /* * This is an older interface. It's kept for backward compatibility * to the option that doesn't provide association id. */ SCTP_STATIC int sctp_setsockopt_connectx_old(struct sock* sk, struct sockaddr __user *addrs, int addrs_size) { return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); } /* * New interface for the API. The since the API is done with a socket * option, to make it simple we feed back the association id is as a return * indication to the call. Error is always negative and association id is * always positive. */ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk, struct sockaddr __user *addrs, int addrs_size) { sctp_assoc_t assoc_id = 0; int err = 0; err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); if (err) return err; else return assoc_id; } /* * New (hopefully final) interface for the API. * We use the sctp_getaddrs_old structure so that use-space library * can avoid any unnecessary allocations. The only different part * is that we store the actual length of the address buffer into the * addrs_num structure member. That way we can re-use the existing * code. */ #ifdef CONFIG_COMPAT struct compat_sctp_getaddrs_old { sctp_assoc_t assoc_id; s32 addr_num; compat_uptr_t addrs; /* struct sockaddr * */ }; #endif SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len, char __user *optval, int __user *optlen) { struct sctp_getaddrs_old param; sctp_assoc_t assoc_id = 0; int err = 0; #ifdef CONFIG_COMPAT if (is_compat_task()) { struct compat_sctp_getaddrs_old param32; if (len < sizeof(param32)) return -EINVAL; if (copy_from_user(&param32, optval, sizeof(param32))) return -EFAULT; param.assoc_id = param32.assoc_id; param.addr_num = param32.addr_num; param.addrs = compat_ptr(param32.addrs); } else #endif { if (len < sizeof(param)) return -EINVAL; if (copy_from_user(&param, optval, sizeof(param))) return -EFAULT; } err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) param.addrs, param.addr_num, &assoc_id); if (err == 0 || err == -EINPROGRESS) { if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) return -EFAULT; if (put_user(sizeof(assoc_id), optlen)) return -EFAULT; } return err; } /* API 3.1.4 close() - UDP Style Syntax * Applications use close() to perform graceful shutdown (as described in * Section 10.1 of [SCTP]) on ALL the associations currently represented * by a UDP-style socket. * * The syntax is * * ret = close(int sd); * * sd - the socket descriptor of the associations to be closed. * * To gracefully shutdown a specific association represented by the * UDP-style socket, an application should use the sendmsg() call, * passing no user data, but including the appropriate flag in the * ancillary data (see Section xxxx). * * If sd in the close() call is a branched-off socket representing only * one association, the shutdown is performed on that association only. * * 4.1.6 close() - TCP Style Syntax * * Applications use close() to gracefully close down an association. * * The syntax is: * * int close(int sd); * * sd - the socket descriptor of the association to be closed. * * After an application calls close() on a socket descriptor, no further * socket operations will succeed on that descriptor. * * API 7.1.4 SO_LINGER * * An application using the TCP-style socket can use this option to * perform the SCTP ABORT primitive. The linger option structure is: * * struct linger { * int l_onoff; // option on/off * int l_linger; // linger time * }; * * To enable the option, set l_onoff to 1. If the l_linger value is set * to 0, calling close() is the same as the ABORT primitive. If the * value is set to a negative value, the setsockopt() call will return * an error. If the value is set to a positive value linger_time, the * close() can be blocked for at most linger_time ms. If the graceful * shutdown phase does not finish during this period, close() will * return but the graceful shutdown phase continues in the system. */ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) { struct net *net = sock_net(sk); struct sctp_endpoint *ep; struct sctp_association *asoc; struct list_head *pos, *temp; unsigned int data_was_unread; SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout); sctp_lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_state = SCTP_SS_CLOSING; ep = sctp_sk(sk)->ep; /* Clean up any skbs sitting on the receive queue. */ data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); /* Walk all associations on an endpoint. */ list_for_each_safe(pos, temp, &ep->asocs) { asoc = list_entry(pos, struct sctp_association, asocs); if (sctp_style(sk, TCP)) { /* A closed association can still be in the list if * it belongs to a TCP-style listening socket that is * not yet accepted. If so, free it. If not, send an * ABORT or SHUTDOWN based on the linger options. */ if (sctp_state(asoc, CLOSED)) { sctp_unhash_established(asoc); sctp_association_free(asoc); continue; } } if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || !skb_queue_empty(&asoc->ulpq.reasm) || (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { struct sctp_chunk *chunk; chunk = sctp_make_abort_user(asoc, NULL, 0); sctp_primitive_ABORT(net, asoc, chunk); } else sctp_primitive_SHUTDOWN(net, asoc, NULL); } /* On a TCP-style socket, block for at most linger_time if set. */ if (sctp_style(sk, TCP) && timeout) sctp_wait_for_close(sk, timeout); /* This will run the backlog queue. */ sctp_release_sock(sk); /* Supposedly, no process has access to the socket, but * the net layers still may. * Also, sctp_destroy_sock() needs to be called with addr_wq_lock * held and that should be grabbed before socket lock. */ spin_lock_bh(&net->sctp.addr_wq_lock); sctp_bh_lock_sock(sk); /* Hold the sock, since sk_common_release() will put sock_put() * and we have just a little more cleanup. */ sock_hold(sk); sk_common_release(sk); sctp_bh_unlock_sock(sk); spin_unlock_bh(&net->sctp.addr_wq_lock); sock_put(sk); SCTP_DBG_OBJCNT_DEC(sock); } /* Handle EPIPE error. */ static int sctp_error(struct sock *sk, int flags, int err) { if (err == -EPIPE) err = sock_error(sk) ? : -EPIPE; if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); return err; } /* API 3.1.3 sendmsg() - UDP Style Syntax * * An application uses sendmsg() and recvmsg() calls to transmit data to * and receive data from its peer. * * ssize_t sendmsg(int socket, const struct msghdr *message, * int flags); * * socket - the socket descriptor of the endpoint. * message - pointer to the msghdr structure which contains a single * user message and possibly some ancillary data. * * See Section 5 for complete description of the data * structures. * * flags - flags sent or received with the user message, see Section * 5 for complete description of the flags. * * Note: This function could use a rewrite especially when explicit * connect support comes in. */ /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t msg_len) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *new_asoc=NULL, *asoc=NULL; struct sctp_transport *transport, *chunk_tp; struct sctp_chunk *chunk; union sctp_addr to; struct sockaddr *msg_name = NULL; struct sctp_sndrcvinfo default_sinfo; struct sctp_sndrcvinfo *sinfo; struct sctp_initmsg *sinit; sctp_assoc_t associd = 0; sctp_cmsgs_t cmsgs = { NULL }; int err; sctp_scope_t scope; long timeo; __u16 sinfo_flags = 0; struct sctp_datamsg *datamsg; int msg_flags = msg->msg_flags; SCTP_DEBUG_PRINTK("sctp_sendmsg(sk: %p, msg: %p, msg_len: %zu)\n", sk, msg, msg_len); err = 0; sp = sctp_sk(sk); ep = sp->ep; SCTP_DEBUG_PRINTK("Using endpoint: %p.\n", ep); /* We cannot send a message over a TCP-style listening socket. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { err = -EPIPE; goto out_nounlock; } /* Parse out the SCTP CMSGs. */ err = sctp_msghdr_parse(msg, &cmsgs); if (err) { SCTP_DEBUG_PRINTK("msghdr parse err = %x\n", err); goto out_nounlock; } /* Fetch the destination address for this packet. This * address only selects the association--it is not necessarily * the address we will send to. * For a peeled-off socket, msg_name is ignored. */ if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { int msg_namelen = msg->msg_namelen; err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, msg_namelen); if (err) return err; if (msg_namelen > sizeof(to)) msg_namelen = sizeof(to); memcpy(&to, msg->msg_name, msg_namelen); msg_name = msg->msg_name; } sinfo = cmsgs.info; sinit = cmsgs.init; /* Did the user specify SNDRCVINFO? */ if (sinfo) { sinfo_flags = sinfo->sinfo_flags; associd = sinfo->sinfo_assoc_id; } SCTP_DEBUG_PRINTK("msg_len: %zu, sinfo_flags: 0x%x\n", msg_len, sinfo_flags); /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { err = -EINVAL; goto out_nounlock; } /* If SCTP_EOF is set, no data can be sent. Disallow sending zero * length messages when SCTP_EOF|SCTP_ABORT is not set. * If SCTP_ABORT is set, the message length could be non zero with * the msg_iov set to the user abort reason. */ if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { err = -EINVAL; goto out_nounlock; } /* If SCTP_ADDR_OVER is set, there must be an address * specified in msg_name. */ if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { err = -EINVAL; goto out_nounlock; } transport = NULL; SCTP_DEBUG_PRINTK("About to look up association.\n"); sctp_lock_sock(sk); /* If a msg_name has been specified, assume this is to be used. */ if (msg_name) { /* Look for a matching association on the endpoint. */ asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); if (!asoc) { /* If we could not find a matching association on the * endpoint, make sure that it is not a TCP-style * socket that already has an association or there is * no peeled-off association on another socket. */ if ((sctp_style(sk, TCP) && sctp_sstate(sk, ESTABLISHED)) || sctp_endpoint_is_peeled_off(ep, &to)) { err = -EADDRNOTAVAIL; goto out_unlock; } } } else { asoc = sctp_id2assoc(sk, associd); if (!asoc) { err = -EPIPE; goto out_unlock; } } if (asoc) { SCTP_DEBUG_PRINTK("Just looked up association: %p.\n", asoc); /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED * socket that has an association in CLOSED state. This can * happen when an accepted socket has an association that is * already CLOSED. */ if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { err = -EPIPE; goto out_unlock; } if (sinfo_flags & SCTP_EOF) { SCTP_DEBUG_PRINTK("Shutting down association: %p\n", asoc); sctp_primitive_SHUTDOWN(net, asoc, NULL); err = 0; goto out_unlock; } if (sinfo_flags & SCTP_ABORT) { chunk = sctp_make_abort_user(asoc, msg, msg_len); if (!chunk) { err = -ENOMEM; goto out_unlock; } SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc); sctp_primitive_ABORT(net, asoc, chunk); err = 0; goto out_unlock; } } /* Do we need to create the association? */ if (!asoc) { SCTP_DEBUG_PRINTK("There is no association yet.\n"); if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { err = -EINVAL; goto out_unlock; } /* Check for invalid stream against the stream counts, * either the default or the user specified stream counts. */ if (sinfo) { if (!sinit || (sinit && !sinit->sinit_num_ostreams)) { /* Check against the defaults. */ if (sinfo->sinfo_stream >= sp->initmsg.sinit_num_ostreams) { err = -EINVAL; goto out_unlock; } } else { /* Check against the requested. */ if (sinfo->sinfo_stream >= sinit->sinit_num_ostreams) { err = -EINVAL; goto out_unlock; } } } /* * API 3.1.2 bind() - UDP Style Syntax * If a bind() or sctp_bindx() is not called prior to a * sendmsg() call that initiates a new association, the * system picks an ephemeral port and will choose an address * set equivalent to binding with a wildcard address. */ if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) { err = -EAGAIN; goto out_unlock; } } else { /* * If an unprivileged user inherits a one-to-many * style socket with open associations on a privileged * port, it MAY be permitted to accept new associations, * but it SHOULD NOT be permitted to open new * associations. */ if (ep->base.bind_addr.port < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { err = -EACCES; goto out_unlock; } } scope = sctp_scope(&to); new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); if (!new_asoc) { err = -ENOMEM; goto out_unlock; } asoc = new_asoc; err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); if (err < 0) { err = -ENOMEM; goto out_free; } /* If the SCTP_INIT ancillary data is specified, set all * the association init values accordingly. */ if (sinit) { if (sinit->sinit_num_ostreams) { asoc->c.sinit_num_ostreams = sinit->sinit_num_ostreams; } if (sinit->sinit_max_instreams) { asoc->c.sinit_max_instreams = sinit->sinit_max_instreams; } if (sinit->sinit_max_attempts) { asoc->max_init_attempts = sinit->sinit_max_attempts; } if (sinit->sinit_max_init_timeo) { asoc->max_init_timeo = msecs_to_jiffies(sinit->sinit_max_init_timeo); } } /* Prime the peer's transport structures. */ transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); if (!transport) { err = -ENOMEM; goto out_free; } } /* ASSERT: we have a valid association at this point. */ SCTP_DEBUG_PRINTK("We have a valid association.\n"); if (!sinfo) { /* If the user didn't specify SNDRCVINFO, make up one with * some defaults. */ memset(&default_sinfo, 0, sizeof(default_sinfo)); default_sinfo.sinfo_stream = asoc->default_stream; default_sinfo.sinfo_flags = asoc->default_flags; default_sinfo.sinfo_ppid = asoc->default_ppid; default_sinfo.sinfo_context = asoc->default_context; default_sinfo.sinfo_timetolive = asoc->default_timetolive; default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); sinfo = &default_sinfo; } /* API 7.1.7, the sndbuf size per association bounds the * maximum size of data that can be sent in a single send call. */ if (msg_len > sk->sk_sndbuf) { err = -EMSGSIZE; goto out_free; } if (asoc->pmtu_pending) sctp_assoc_pending_pmtu(sk, asoc); /* If fragmentation is disabled and the message length exceeds the * association fragmentation point, return EMSGSIZE. The I-D * does not specify what this error is, but this looks like * a great fit. */ if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { err = -EMSGSIZE; goto out_free; } /* Check for invalid stream. */ if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { err = -EINVAL; goto out_free; } timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); if (!sctp_wspace(asoc)) { err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); if (err) goto out_free; } /* If an address is passed with the sendto/sendmsg call, it is used * to override the primary destination address in the TCP model, or * when SCTP_ADDR_OVER flag is set in the UDP model. */ if ((sctp_style(sk, TCP) && msg_name) || (sinfo_flags & SCTP_ADDR_OVER)) { chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); if (!chunk_tp) { err = -EINVAL; goto out_free; } } else chunk_tp = NULL; /* Auto-connect, if we aren't connected already. */ if (sctp_state(asoc, CLOSED)) { err = sctp_primitive_ASSOCIATE(net, asoc, NULL); if (err < 0) goto out_free; SCTP_DEBUG_PRINTK("We associated primitively.\n"); } /* Break the message into multiple chunks of maximum size. */ datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len); if (IS_ERR(datamsg)) { err = PTR_ERR(datamsg); goto out_free; } /* Now send the (possibly) fragmented message. */ list_for_each_entry(chunk, &datamsg->chunks, frag_list) { sctp_chunk_hold(chunk); /* Do accounting for the write space. */ sctp_set_owner_w(chunk); chunk->transport = chunk_tp; } /* Send it to the lower layers. Note: all chunks * must either fail or succeed. The lower layer * works that way today. Keep it that way or this * breaks. */ err = sctp_primitive_SEND(net, asoc, datamsg); /* Did the lower layer accept the chunk? */ if (err) sctp_datamsg_free(datamsg); else sctp_datamsg_put(datamsg); SCTP_DEBUG_PRINTK("We sent primitively.\n"); if (err) goto out_free; else err = msg_len; /* If we are already past ASSOCIATE, the lower * layers are responsible for association cleanup. */ goto out_unlock; out_free: if (new_asoc) { sctp_unhash_established(asoc); sctp_association_free(asoc); } out_unlock: sctp_release_sock(sk); out_nounlock: return sctp_error(sk, msg_flags, err); #if 0 do_sock_err: if (msg_len) err = msg_len; else err = sock_error(sk); goto out; do_interrupted: if (msg_len) err = msg_len; goto out; #endif /* 0 */ } /* This is an extended version of skb_pull() that removes the data from the * start of a skb even when data is spread across the list of skb's in the * frag_list. len specifies the total amount of data that needs to be removed. * when 'len' bytes could be removed from the skb, it returns 0. * If 'len' exceeds the total skb length, it returns the no. of bytes that * could not be removed. */ static int sctp_skb_pull(struct sk_buff *skb, int len) { struct sk_buff *list; int skb_len = skb_headlen(skb); int rlen; if (len <= skb_len) { __skb_pull(skb, len); return 0; } len -= skb_len; __skb_pull(skb, skb_len); skb_walk_frags(skb, list) { rlen = sctp_skb_pull(list, len); skb->len -= (len-rlen); skb->data_len -= (len-rlen); if (!rlen) return 0; len = rlen; } return len; } /* API 3.1.3 recvmsg() - UDP Style Syntax * * ssize_t recvmsg(int socket, struct msghdr *message, * int flags); * * socket - the socket descriptor of the endpoint. * message - pointer to the msghdr structure which contains a single * user message and possibly some ancillary data. * * See Section 5 for complete description of the data * structures. * * flags - flags sent or received with the user message, see Section * 5 for complete description of the flags. */ static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *); SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct sctp_ulpevent *event = NULL; struct sctp_sock *sp = sctp_sk(sk); struct sk_buff *skb; int copied; int err = 0; int skb_len; SCTP_DEBUG_PRINTK("sctp_recvmsg(%s: %p, %s: %p, %s: %zd, %s: %d, %s: " "0x%x, %s: %p)\n", "sk", sk, "msghdr", msg, "len", len, "knoblauch", noblock, "flags", flags, "addr_len", addr_len); sctp_lock_sock(sk); if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) { err = -ENOTCONN; goto out; } skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; /* Get the total length of the skb including any skb's in the * frag_list. */ skb_len = skb->len; copied = skb_len; if (copied > len) copied = len; err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); event = sctp_skb2event(skb); if (err) goto out_free; sock_recv_ts_and_drops(msg, sk, skb); if (sctp_ulpevent_is_notification(event)) { msg->msg_flags |= MSG_NOTIFICATION; sp->pf->event_msgname(event, msg->msg_name, addr_len); } else { sp->pf->skb_msgname(skb, msg->msg_name, addr_len); } /* Check if we allow SCTP_SNDRCVINFO. */ if (sp->subscribe.sctp_data_io_event) sctp_ulpevent_read_sndrcvinfo(event, msg); #if 0 /* FIXME: we should be calling IP/IPv6 layers. */ if (sk->sk_protinfo.af_inet.cmsg_flags) ip_cmsg_recv(msg, skb); #endif err = copied; /* If skb's length exceeds the user's buffer, update the skb and * push it back to the receive_queue so that the next call to * recvmsg() will return the remaining data. Don't set MSG_EOR. */ if (skb_len > copied) { msg->msg_flags &= ~MSG_EOR; if (flags & MSG_PEEK) goto out_free; sctp_skb_pull(skb, copied); skb_queue_head(&sk->sk_receive_queue, skb); /* When only partial message is copied to the user, increase * rwnd by that amount. If all the data in the skb is read, * rwnd is updated when the event is freed. */ if (!sctp_ulpevent_is_notification(event)) sctp_assoc_rwnd_increase(event->asoc, copied); goto out; } else if ((event->msg_flags & MSG_NOTIFICATION) || (event->msg_flags & MSG_EOR)) msg->msg_flags |= MSG_EOR; else msg->msg_flags &= ~MSG_EOR; out_free: if (flags & MSG_PEEK) { /* Release the skb reference acquired after peeking the skb in * sctp_skb_recv_datagram(). */ kfree_skb(skb); } else { /* Free the event which includes releasing the reference to * the owner of the skb, freeing the skb and updating the * rwnd. */ sctp_ulpevent_free(event); } out: sctp_release_sock(sk); return err; } /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) * * This option is a on/off flag. If enabled no SCTP message * fragmentation will be performed. Instead if a message being sent * exceeds the current PMTU size, the message will NOT be sent and * instead a error will be indicated to the user. */ static int sctp_setsockopt_disable_fragments(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; return 0; } static int sctp_setsockopt_events(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_association *asoc; struct sctp_ulpevent *event; if (optlen > sizeof(struct sctp_event_subscribe)) return -EINVAL; if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) return -EFAULT; /* * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, * if there is no data to be sent or retransmit, the stack will * immediately send up this notification. */ if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, &sctp_sk(sk)->subscribe)) { asoc = sctp_id2assoc(sk, 0); if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC); if (!event) return -ENOMEM; sctp_ulpq_tail_event(&asoc->ulpq, event); } } return 0; } /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) * * This socket option is applicable to the UDP-style socket only. When * set it will cause associations that are idle for more than the * specified number of seconds to automatically close. An association * being idle is defined an association that has NOT sent or received * user data. The special value of '0' indicates that no automatic * close of any associations should be performed. The option expects an * integer defining the number of seconds of idle time before an * association is closed. */ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); /* Applicable to UDP-style socket only */ if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (optlen != sizeof(int)) return -EINVAL; if (copy_from_user(&sp->autoclose, optval, optlen)) return -EFAULT; return 0; } /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) * * Applications can enable or disable heartbeats for any peer address of * an association, modify an address's heartbeat interval, force a * heartbeat to be sent immediately, and adjust the address's maximum * number of retransmissions sent before an address is considered * unreachable. The following structure is used to access and modify an * address's parameters: * * struct sctp_paddrparams { * sctp_assoc_t spp_assoc_id; * struct sockaddr_storage spp_address; * uint32_t spp_hbinterval; * uint16_t spp_pathmaxrxt; * uint32_t spp_pathmtu; * uint32_t spp_sackdelay; * uint32_t spp_flags; * }; * * spp_assoc_id - (one-to-many style socket) This is filled in the * application, and identifies the association for * this query. * spp_address - This specifies which address is of interest. * spp_hbinterval - This contains the value of the heartbeat interval, * in milliseconds. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmaxrxt - This contains the maximum number of * retransmissions before this address shall be * considered unreachable. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmtu - When Path MTU discovery is disabled the value * specified here will be the "fixed" path mtu. * Note that if the spp_address field is empty * then all associations on this address will * have this fixed path mtu set upon them. * * spp_sackdelay - When delayed sack is enabled, this value specifies * the number of milliseconds that sacks will be delayed * for. This value will apply to all addresses of an * association if the spp_address field is empty. Note * also, that if delayed sack is enabled and this * value is set to 0, no change is made to the last * recorded delayed sack timer value. * * spp_flags - These flags are used to control various features * on an association. The flag field may contain * zero or more of the following options. * * SPP_HB_ENABLE - Enable heartbeats on the * specified address. Note that if the address * field is empty all addresses for the association * have heartbeats enabled upon them. * * SPP_HB_DISABLE - Disable heartbeats on the * speicifed address. Note that if the address * field is empty all addresses for the association * will have their heartbeats disabled. Note also * that SPP_HB_ENABLE and SPP_HB_DISABLE are * mutually exclusive, only one of these two should * be specified. Enabling both fields will have * undetermined results. * * SPP_HB_DEMAND - Request a user initiated heartbeat * to be made immediately. * * SPP_HB_TIME_IS_ZERO - Specify's that the time for * heartbeat delayis to be set to the value of 0 * milliseconds. * * SPP_PMTUD_ENABLE - This field will enable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. * * SPP_PMTUD_DISABLE - This field will disable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. Not also that * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually * exclusive. Enabling both will have undetermined * results. * * SPP_SACKDELAY_ENABLE - Setting this flag turns * on delayed sack. The time specified in spp_sackdelay * is used to specify the sack delay for this address. Note * that if spp_address is empty then all addresses will * enable delayed sack and take on the sack delay * value specified in spp_sackdelay. * SPP_SACKDELAY_DISABLE - Setting this flag turns * off delayed sack. If the spp_address field is blank then * delayed sack is disabled for the entire association. Note * also that this field is mutually exclusive to * SPP_SACKDELAY_ENABLE, setting both will have undefined * results. */ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, struct sctp_transport *trans, struct sctp_association *asoc, struct sctp_sock *sp, int hb_change, int pmtud_change, int sackdelay_change) { int error; if (params->spp_flags & SPP_HB_DEMAND && trans) { struct net *net = sock_net(trans->asoc->base.sk); error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); if (error) return error; } /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of * this field is ignored. Note also that a value of zero indicates * the current setting should be left unchanged. */ if (params->spp_flags & SPP_HB_ENABLE) { /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is * set. This lets us use 0 value when this flag * is set. */ if (params->spp_flags & SPP_HB_TIME_IS_ZERO) params->spp_hbinterval = 0; if (params->spp_hbinterval || (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { if (trans) { trans->hbinterval = msecs_to_jiffies(params->spp_hbinterval); } else if (asoc) { asoc->hbinterval = msecs_to_jiffies(params->spp_hbinterval); } else { sp->hbinterval = params->spp_hbinterval; } } } if (hb_change) { if (trans) { trans->param_flags = (trans->param_flags & ~SPP_HB) | hb_change; } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_HB) | hb_change; } else { sp->param_flags = (sp->param_flags & ~SPP_HB) | hb_change; } } /* When Path MTU discovery is disabled the value specified here will * be the "fixed" path mtu (i.e. the value of the spp_flags field must * include the flag SPP_PMTUD_DISABLE for this field to have any * effect). */ if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { if (trans) { trans->pathmtu = params->spp_pathmtu; sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); } else if (asoc) { asoc->pathmtu = params->spp_pathmtu; sctp_frag_point(asoc, params->spp_pathmtu); } else { sp->pathmtu = params->spp_pathmtu; } } if (pmtud_change) { if (trans) { int update = (trans->param_flags & SPP_PMTUD_DISABLE) && (params->spp_flags & SPP_PMTUD_ENABLE); trans->param_flags = (trans->param_flags & ~SPP_PMTUD) | pmtud_change; if (update) { sctp_transport_pmtu(trans, sctp_opt2sk(sp)); sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); } } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; } else { sp->param_flags = (sp->param_flags & ~SPP_PMTUD) | pmtud_change; } } /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the * value of this field is ignored. Note also that a value of zero * indicates the current setting should be left unchanged. */ if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { if (trans) { trans->sackdelay = msecs_to_jiffies(params->spp_sackdelay); } else if (asoc) { asoc->sackdelay = msecs_to_jiffies(params->spp_sackdelay); } else { sp->sackdelay = params->spp_sackdelay; } } if (sackdelay_change) { if (trans) { trans->param_flags = (trans->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } else { sp->param_flags = (sp->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } } /* Note that a value of zero indicates the current setting should be left unchanged. */ if (params->spp_pathmaxrxt) { if (trans) { trans->pathmaxrxt = params->spp_pathmaxrxt; } else if (asoc) { asoc->pathmaxrxt = params->spp_pathmaxrxt; } else { sp->pathmaxrxt = params->spp_pathmaxrxt; } } return 0; } static int sctp_setsockopt_peer_addr_params(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_paddrparams params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); int error; int hb_change, pmtud_change, sackdelay_change; if (optlen != sizeof(struct sctp_paddrparams)) return - EINVAL; if (copy_from_user(&params, optval, optlen)) return -EFAULT; /* Validate flags and value parameters. */ hb_change = params.spp_flags & SPP_HB; pmtud_change = params.spp_flags & SPP_PMTUD; sackdelay_change = params.spp_flags & SPP_SACKDELAY; if (hb_change == SPP_HB || pmtud_change == SPP_PMTUD || sackdelay_change == SPP_SACKDELAY || params.spp_sackdelay > 500 || (params.spp_pathmtu && params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) return -EINVAL; /* If an address other than INADDR_ANY is specified, and * no transport is found, then the request is invalid. */ if (!sctp_is_any(sk, ( union sctp_addr *)&params.spp_address)) { trans = sctp_addr_id2transport(sk, &params.spp_address, params.spp_assoc_id); if (!trans) return -EINVAL; } /* Get association, if assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.spp_assoc_id); if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Heartbeat demand can only be sent on a transport or * association, but not a socket. */ if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) return -EINVAL; /* Process parameters. */ error = sctp_apply_peer_addr_params(&params, trans, asoc, sp, hb_change, pmtud_change, sackdelay_change); if (error) return error; /* If changes are for association, also apply parameters to each * transport. */ if (!trans && asoc) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { sctp_apply_peer_addr_params(&params, trans, asoc, sp, hb_change, pmtud_change, sackdelay_change); } } return 0; } /* * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) * * This option will effect the way delayed acks are performed. This * option allows you to get or set the delayed ack time, in * milliseconds. It also allows changing the delayed ack frequency. * Changing the frequency to 1 disables the delayed sack algorithm. If * the assoc_id is 0, then this sets or gets the endpoints default * values. If the assoc_id field is non-zero, then the set or get * effects the specified association for the one to many model (the * assoc_id field is ignored by the one to one model). Note that if * sack_delay or sack_freq are 0 when setting this option, then the * current values will remain unchanged. * * struct sctp_sack_info { * sctp_assoc_t sack_assoc_id; * uint32_t sack_delay; * uint32_t sack_freq; * }; * * sack_assoc_id - This parameter, indicates which association the user * is performing an action upon. Note that if this field's value is * zero then the endpoints default value is changed (effecting future * associations only). * * sack_delay - This parameter contains the number of milliseconds that * the user is requesting the delayed ACK timer be set to. Note that * this value is defined in the standard to be between 200 and 500 * milliseconds. * * sack_freq - This parameter contains the number of packets that must * be received before a sack is sent without waiting for the delay * timer to expire. The default value for this is 2, setting this * value to 1 will disable the delayed sack algorithm. */ static int sctp_setsockopt_delayed_ack(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sack_info params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (optlen == sizeof(struct sctp_sack_info)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; if (params.sack_delay == 0 && params.sack_freq == 0) return 0; } else if (optlen == sizeof(struct sctp_assoc_value)) { pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n"); pr_warn("Use struct sctp_sack_info instead\n"); if (copy_from_user(&params, optval, optlen)) return -EFAULT; if (params.sack_delay == 0) params.sack_freq = 1; else params.sack_freq = 0; } else return - EINVAL; /* Validate value parameter. */ if (params.sack_delay > 500) return -EINVAL; /* Get association, if sack_assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.sack_assoc_id); if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (params.sack_delay) { if (asoc) { asoc->sackdelay = msecs_to_jiffies(params.sack_delay); asoc->param_flags = (asoc->param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; } else { sp->sackdelay = params.sack_delay; sp->param_flags = (sp->param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; } } if (params.sack_freq == 1) { if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; } else { sp->param_flags = (sp->param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; } } else if (params.sack_freq > 1) { if (asoc) { asoc->sackfreq = params.sack_freq; asoc->param_flags = (asoc->param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; } else { sp->sackfreq = params.sack_freq; sp->param_flags = (sp->param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; } } /* If change is for association, also apply to each transport. */ if (asoc) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { if (params.sack_delay) { trans->sackdelay = msecs_to_jiffies(params.sack_delay); trans->param_flags = (trans->param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; } if (params.sack_freq == 1) { trans->param_flags = (trans->param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; } else if (params.sack_freq > 1) { trans->sackfreq = params.sack_freq; trans->param_flags = (trans->param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; } } } return 0; } /* 7.1.3 Initialization Parameters (SCTP_INITMSG) * * Applications can specify protocol parameters for the default association * initialization. The option name argument to setsockopt() and getsockopt() * is SCTP_INITMSG. * * Setting initialization parameters is effective only on an unconnected * socket (for UDP-style sockets only future associations are effected * by the change). With TCP-style sockets, this option is inherited by * sockets derived from a listener socket. */ static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_initmsg sinit; struct sctp_sock *sp = sctp_sk(sk); if (optlen != sizeof(struct sctp_initmsg)) return -EINVAL; if (copy_from_user(&sinit, optval, optlen)) return -EFAULT; if (sinit.sinit_num_ostreams) sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; if (sinit.sinit_max_instreams) sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; if (sinit.sinit_max_attempts) sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; if (sinit.sinit_max_init_timeo) sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; return 0; } /* * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) * * Applications that wish to use the sendto() system call may wish to * specify a default set of parameters that would normally be supplied * through the inclusion of ancillary data. This socket option allows * such an application to set the default sctp_sndrcvinfo structure. * The application that wishes to use this socket option simply passes * in to this call the sctp_sndrcvinfo structure defined in Section * 5.2.2) The input parameters accepted by this call include * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, * sinfo_timetolive. The user must provide the sinfo_assoc_id field in * to this call if the caller is using the UDP model. */ static int sctp_setsockopt_default_send_param(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sndrcvinfo info; struct sctp_association *asoc; struct sctp_sock *sp = sctp_sk(sk); if (optlen != sizeof(struct sctp_sndrcvinfo)) return -EINVAL; if (copy_from_user(&info, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { asoc->default_stream = info.sinfo_stream; asoc->default_flags = info.sinfo_flags; asoc->default_ppid = info.sinfo_ppid; asoc->default_context = info.sinfo_context; asoc->default_timetolive = info.sinfo_timetolive; } else { sp->default_stream = info.sinfo_stream; sp->default_flags = info.sinfo_flags; sp->default_ppid = info.sinfo_ppid; sp->default_context = info.sinfo_context; sp->default_timetolive = info.sinfo_timetolive; } return 0; } /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) * * Requests that the local SCTP stack use the enclosed peer address as * the association primary. The enclosed address must be one of the * association peer's addresses. */ static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_prim prim; struct sctp_transport *trans; if (optlen != sizeof(struct sctp_prim)) return -EINVAL; if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) return -EFAULT; trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); if (!trans) return -EINVAL; sctp_assoc_set_primary(trans->asoc, trans); return 0; } /* * 7.1.5 SCTP_NODELAY * * Turn on/off any Nagle-like algorithm. This means that packets are * generally sent as soon as possible and no unnecessary delays are * introduced, at the cost of more packets in the network. Expects an * integer boolean flag. */ static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; return 0; } /* * * 7.1.1 SCTP_RTOINFO * * The protocol parameters used to initialize and bound retransmission * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access * and modify these parameters. * All parameters are time values, in milliseconds. A value of 0, when * modifying the parameters, indicates that the current value should not * be changed. * */ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_rtoinfo rtoinfo; struct sctp_association *asoc; if (optlen != sizeof (struct sctp_rtoinfo)) return -EINVAL; if (copy_from_user(&rtoinfo, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); /* Set the values to the specific association */ if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { if (rtoinfo.srto_initial != 0) asoc->rto_initial = msecs_to_jiffies(rtoinfo.srto_initial); if (rtoinfo.srto_max != 0) asoc->rto_max = msecs_to_jiffies(rtoinfo.srto_max); if (rtoinfo.srto_min != 0) asoc->rto_min = msecs_to_jiffies(rtoinfo.srto_min); } else { /* If there is no association or the association-id = 0 * set the values to the endpoint. */ struct sctp_sock *sp = sctp_sk(sk); if (rtoinfo.srto_initial != 0) sp->rtoinfo.srto_initial = rtoinfo.srto_initial; if (rtoinfo.srto_max != 0) sp->rtoinfo.srto_max = rtoinfo.srto_max; if (rtoinfo.srto_min != 0) sp->rtoinfo.srto_min = rtoinfo.srto_min; } return 0; } /* * * 7.1.2 SCTP_ASSOCINFO * * This option is used to tune the maximum retransmission attempts * of the association. * Returns an error if the new association retransmission value is * greater than the sum of the retransmission value of the peer. * See [SCTP] for more information. * */ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assocparams assocparams; struct sctp_association *asoc; if (optlen != sizeof(struct sctp_assocparams)) return -EINVAL; if (copy_from_user(&assocparams, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Set the values to the specific association */ if (asoc) { if (assocparams.sasoc_asocmaxrxt != 0) { __u32 path_sum = 0; int paths = 0; struct sctp_transport *peer_addr; list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list, transports) { path_sum += peer_addr->pathmaxrxt; paths++; } /* Only validate asocmaxrxt if we have more than * one path/transport. We do this because path * retransmissions are only counted when we have more * then one path. */ if (paths > 1 && assocparams.sasoc_asocmaxrxt > path_sum) return -EINVAL; asoc->max_retrans = assocparams.sasoc_asocmaxrxt; } if (assocparams.sasoc_cookie_life != 0) { asoc->cookie_life.tv_sec = assocparams.sasoc_cookie_life / 1000; asoc->cookie_life.tv_usec = (assocparams.sasoc_cookie_life % 1000) * 1000; } } else { /* Set the values to the endpoint */ struct sctp_sock *sp = sctp_sk(sk); if (assocparams.sasoc_asocmaxrxt != 0) sp->assocparams.sasoc_asocmaxrxt = assocparams.sasoc_asocmaxrxt; if (assocparams.sasoc_cookie_life != 0) sp->assocparams.sasoc_cookie_life = assocparams.sasoc_cookie_life; } return 0; } /* * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) * * This socket option is a boolean flag which turns on or off mapped V4 * addresses. If this option is turned on and the socket is type * PF_INET6, then IPv4 addresses will be mapped to V6 representation. * If this option is turned off, then no mapping will be done of V4 * addresses and a user will receive both PF_INET6 and PF_INET type * addresses on the socket. */ static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) { int val; struct sctp_sock *sp = sctp_sk(sk); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; if (val) sp->v4mapped = 1; else sp->v4mapped = 0; return 0; } /* * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) * This option will get or set the maximum size to put in any outgoing * SCTP DATA chunk. If a message is larger than this size it will be * fragmented by SCTP into the specified size. Note that the underlying * SCTP implementation may fragment into smaller sized chunks when the * PMTU of the underlying association is smaller than the value set by * the user. The default value for this option is '0' which indicates * the user is NOT limiting fragmentation and only the PMTU will effect * SCTP's choice of DATA chunk size. Note also that values set larger * than the maximum size of an IP datagram will effectively let SCTP * control fragmentation (i.e. the same as setting this option to 0). * * The following structure is used to access and modify this parameter: * * struct sctp_assoc_value { * sctp_assoc_t assoc_id; * uint32_t assoc_value; * }; * * assoc_id: This parameter is ignored for one-to-one style sockets. * For one-to-many style sockets this parameter indicates which * association the user is performing an action upon. Note that if * this field's value is zero then the endpoints default value is * changed (effecting future associations only). * assoc_value: This parameter specifies the maximum size in bytes. */ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; struct sctp_sock *sp = sctp_sk(sk); int val; if (optlen == sizeof(int)) { pr_warn("Use of int in maxseg socket option deprecated\n"); pr_warn("Use struct sctp_assoc_value instead\n"); if (copy_from_user(&val, optval, optlen)) return -EFAULT; params.assoc_id = 0; } else if (optlen == sizeof(struct sctp_assoc_value)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; val = params.assoc_value; } else return -EINVAL; if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) return -EINVAL; asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc && params.assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { if (val == 0) { val = asoc->pathmtu; val -= sp->pf->af->net_header_len; val -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk); } asoc->user_frag = val; asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); } else { sp->user_frag = val; } return 0; } /* * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) * * Requests that the peer mark the enclosed address as the association * primary. The enclosed address must be one of the association's * locally bound addresses. The following structure is used to make a * set primary request: */ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, unsigned int optlen) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_association *asoc = NULL; struct sctp_setpeerprim prim; struct sctp_chunk *chunk; struct sctp_af *af; int err; sp = sctp_sk(sk); if (!net->sctp.addip_enable) return -EPERM; if (optlen != sizeof(struct sctp_setpeerprim)) return -EINVAL; if (copy_from_user(&prim, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); if (!asoc) return -EINVAL; if (!asoc->peer.asconf_capable) return -EPERM; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) return -EPERM; if (!sctp_state(asoc, ESTABLISHED)) return -ENOTCONN; af = sctp_get_af_specific(prim.sspp_addr.ss_family); if (!af) return -EINVAL; if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) return -EADDRNOTAVAIL; if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) return -EADDRNOTAVAIL; /* Create an ASCONF chunk with SET_PRIMARY parameter */ chunk = sctp_make_asconf_set_prim(asoc, (union sctp_addr *)&prim.sspp_addr); if (!chunk) return -ENOMEM; err = sctp_send_asconf(asoc, chunk); SCTP_DEBUG_PRINTK("We set peer primary addr primitively.\n"); return err; } static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_setadaptation adaptation; if (optlen != sizeof(struct sctp_setadaptation)) return -EINVAL; if (copy_from_user(&adaptation, optval, optlen)) return -EFAULT; sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; return 0; } /* * 7.1.29. Set or Get the default context (SCTP_CONTEXT) * * The context field in the sctp_sndrcvinfo structure is normally only * used when a failed message is retrieved holding the value that was * sent down on the actual send call. This option allows the setting of * a default context on an association basis that will be received on * reading messages from the peer. This is especially helpful in the * one-2-many model for an application to keep some reference to an * internal state machine that is processing messages on the * association. Note that the setting of this value only effects * received messages from the peer and does not effect the value that is * saved with outbound messages. */ static int sctp_setsockopt_context(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (optlen != sizeof(struct sctp_assoc_value)) return -EINVAL; if (copy_from_user(&params, optval, optlen)) return -EFAULT; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; asoc->default_rcv_context = params.assoc_value; } else { sp->default_rcv_context = params.assoc_value; } return 0; } /* * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) * * This options will at a minimum specify if the implementation is doing * fragmented interleave. Fragmented interleave, for a one to many * socket, is when subsequent calls to receive a message may return * parts of messages from different associations. Some implementations * may allow you to turn this value on or off. If so, when turned off, * no fragment interleave will occur (which will cause a head of line * blocking amongst multiple associations sharing the same one to many * socket). When this option is turned on, then each receive call may * come from a different association (thus the user must receive data * with the extended calls (e.g. sctp_recvmsg) to keep track of which * association each receive belongs to. * * This option takes a boolean value. A non-zero value indicates that * fragmented interleave is on. A value of zero indicates that * fragmented interleave is off. * * Note that it is important that an implementation that allows this * option to be turned on, have it off by default. Otherwise an unaware * application using the one to many model may become confused and act * incorrectly. */ static int sctp_setsockopt_fragment_interleave(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen != sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; return 0; } /* * 8.1.21. Set or Get the SCTP Partial Delivery Point * (SCTP_PARTIAL_DELIVERY_POINT) * * This option will set or get the SCTP partial delivery point. This * point is the size of a message where the partial delivery API will be * invoked to help free up rwnd space for the peer. Setting this to a * lower value will cause partial deliveries to happen more often. The * calls argument is an integer that sets or gets the partial delivery * point. Note also that the call will fail if the user attempts to set * this value larger than the socket receive buffer size. * * Note that any single message having a length smaller than or equal to * the SCTP partial delivery point will be delivered in one single read * call as long as the user provided buffer is large enough to hold the * message. */ static int sctp_setsockopt_partial_delivery_point(struct sock *sk, char __user *optval, unsigned int optlen) { u32 val; if (optlen != sizeof(u32)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; /* Note: We double the receive buffer from what the user sets * it to be, also initial rwnd is based on rcvbuf/2. */ if (val > (sk->sk_rcvbuf >> 1)) return -EINVAL; sctp_sk(sk)->pd_point = val; return 0; /* is this the right error code? */ } /* * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) * * This option will allow a user to change the maximum burst of packets * that can be emitted by this association. Note that the default value * is 4, and some implementations may restrict this setting so that it * can only be lowered. * * NOTE: This text doesn't seem right. Do this on a socket basis with * future associations inheriting the socket value. */ static int sctp_setsockopt_maxburst(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; int val; int assoc_id = 0; if (optlen == sizeof(int)) { pr_warn("Use of int in max_burst socket option deprecated\n"); pr_warn("Use struct sctp_assoc_value instead\n"); if (copy_from_user(&val, optval, optlen)) return -EFAULT; } else if (optlen == sizeof(struct sctp_assoc_value)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; val = params.assoc_value; assoc_id = params.assoc_id; } else return -EINVAL; sp = sctp_sk(sk); if (assoc_id != 0) { asoc = sctp_id2assoc(sk, assoc_id); if (!asoc) return -EINVAL; asoc->max_burst = val; } else sp->max_burst = val; return 0; } /* * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) * * This set option adds a chunk type that the user is requesting to be * received only in an authenticated way. Changes to the list of chunks * will only effect future associations on the socket. */ static int sctp_setsockopt_auth_chunk(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunk val; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authchunk)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) return -EFAULT; switch (val.sauth_chunk) { case SCTP_CID_INIT: case SCTP_CID_INIT_ACK: case SCTP_CID_SHUTDOWN_COMPLETE: case SCTP_CID_AUTH: return -EINVAL; } /* add this chunk id to the endpoint */ return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); } /* * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) * * This option gets or sets the list of HMAC algorithms that the local * endpoint requires the peer to use. */ static int sctp_setsockopt_hmac_ident(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_hmacalgo *hmacs; u32 idents; int err; if (!ep->auth_enable) return -EACCES; if (optlen < sizeof(struct sctp_hmacalgo)) return -EINVAL; hmacs= memdup_user(optval, optlen); if (IS_ERR(hmacs)) return PTR_ERR(hmacs); idents = hmacs->shmac_num_idents; if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { err = -EINVAL; goto out; } err = sctp_auth_ep_set_hmacs(ep, hmacs); out: kfree(hmacs); return err; } /* * 7.1.20. Set a shared key (SCTP_AUTH_KEY) * * This option will set a shared secret key which is used to build an * association shared key. */ static int sctp_setsockopt_auth_key(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkey *authkey; struct sctp_association *asoc; int ret; if (!ep->auth_enable) return -EACCES; if (optlen <= sizeof(struct sctp_authkey)) return -EINVAL; authkey= memdup_user(optval, optlen); if (IS_ERR(authkey)) return PTR_ERR(authkey); if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { ret = -EINVAL; goto out; } asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { ret = -EINVAL; goto out; } ret = sctp_auth_set_key(ep, asoc, authkey); out: kzfree(authkey); return ret; } /* * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) * * This option will get or set the active shared key to be used to build * the association shared key. */ static int sctp_setsockopt_active_key(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkeyid val; struct sctp_association *asoc; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, val.scact_assoc_id); if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL; return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); } /* * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) * * This set option will delete a shared secret key from use. */ static int sctp_setsockopt_del_key(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkeyid val; struct sctp_association *asoc; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, val.scact_assoc_id); if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL; return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); } /* * 8.1.23 SCTP_AUTO_ASCONF * * This option will enable or disable the use of the automatic generation of * ASCONF chunks to add and delete addresses to an existing association. Note * that this option has two caveats namely: a) it only affects sockets that * are bound to all addresses available to the SCTP stack, and b) the system * administrator may have an overriding control that turns the ASCONF feature * off no matter what setting the socket option may have. * This option expects an integer boolean flag, where a non-zero value turns on * the option, and a zero value turns off the option. * Note. In this implementation, socket operation overrides default parameter * being set by sysctl as well as FreeBSD implementation */ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, unsigned int optlen) { int val; struct sctp_sock *sp = sctp_sk(sk); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; if (!sctp_is_ep_boundall(sk) && val) return -EINVAL; if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) return 0; spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock); if (val == 0 && sp->do_auto_asconf) { list_del(&sp->auto_asconf_list); sp->do_auto_asconf = 0; } else if (val && !sp->do_auto_asconf) { list_add_tail(&sp->auto_asconf_list, &sock_net(sk)->sctp.auto_asconf_splist); sp->do_auto_asconf = 1; } spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock); return 0; } /* * SCTP_PEER_ADDR_THLDS * * This option allows us to alter the partially failed threshold for one or all * transports in an association. See Section 6.1 of: * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt */ static int sctp_setsockopt_paddr_thresholds(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_paddrthlds val; struct sctp_transport *trans; struct sctp_association *asoc; if (optlen < sizeof(struct sctp_paddrthlds)) return -EINVAL; if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, sizeof(struct sctp_paddrthlds))) return -EFAULT; if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { asoc = sctp_id2assoc(sk, val.spt_assoc_id); if (!asoc) return -ENOENT; list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { if (val.spt_pathmaxrxt) trans->pathmaxrxt = val.spt_pathmaxrxt; trans->pf_retrans = val.spt_pathpfthld; } if (val.spt_pathmaxrxt) asoc->pathmaxrxt = val.spt_pathmaxrxt; asoc->pf_retrans = val.spt_pathpfthld; } else { trans = sctp_addr_id2transport(sk, &val.spt_address, val.spt_assoc_id); if (!trans) return -ENOENT; if (val.spt_pathmaxrxt) trans->pathmaxrxt = val.spt_pathmaxrxt; trans->pf_retrans = val.spt_pathpfthld; } return 0; } /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve * socket options. Socket options are used to change the default * behavior of sockets calls. They are described in Section 7. * * The syntax is: * * ret = getsockopt(int sd, int level, int optname, void __user *optval, * int __user *optlen); * ret = setsockopt(int sd, int level, int optname, const void __user *optval, * int optlen); * * sd - the socket descript. * level - set to IPPROTO_SCTP for all SCTP options. * optname - the option name. * optval - the buffer to store the value of the option. * optlen - the size of the buffer. */ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { int retval = 0; SCTP_DEBUG_PRINTK("sctp_setsockopt(sk: %p... optname: %d)\n", sk, optname); /* I can hardly begin to describe how wrong this is. This is * so broken as to be worse than useless. The API draft * REALLY is NOT helpful here... I am not convinced that the * semantics of setsockopt() with a level OTHER THAN SOL_SCTP * are at all well-founded. */ if (level != SOL_SCTP) { struct sctp_af *af = sctp_sk(sk)->pf->af; retval = af->setsockopt(sk, level, optname, optval, optlen); goto out_nounlock; } sctp_lock_sock(sk); switch (optname) { case SCTP_SOCKOPT_BINDX_ADD: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, optlen, SCTP_BINDX_ADD_ADDR); break; case SCTP_SOCKOPT_BINDX_REM: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, optlen, SCTP_BINDX_REM_ADDR); break; case SCTP_SOCKOPT_CONNECTX_OLD: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_connectx_old(sk, (struct sockaddr __user *)optval, optlen); break; case SCTP_SOCKOPT_CONNECTX: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_connectx(sk, (struct sockaddr __user *)optval, optlen); break; case SCTP_DISABLE_FRAGMENTS: retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); break; case SCTP_EVENTS: retval = sctp_setsockopt_events(sk, optval, optlen); break; case SCTP_AUTOCLOSE: retval = sctp_setsockopt_autoclose(sk, optval, optlen); break; case SCTP_PEER_ADDR_PARAMS: retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); break; case SCTP_DELAYED_SACK: retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); break; case SCTP_PARTIAL_DELIVERY_POINT: retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); break; case SCTP_INITMSG: retval = sctp_setsockopt_initmsg(sk, optval, optlen); break; case SCTP_DEFAULT_SEND_PARAM: retval = sctp_setsockopt_default_send_param(sk, optval, optlen); break; case SCTP_PRIMARY_ADDR: retval = sctp_setsockopt_primary_addr(sk, optval, optlen); break; case SCTP_SET_PEER_PRIMARY_ADDR: retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); break; case SCTP_NODELAY: retval = sctp_setsockopt_nodelay(sk, optval, optlen); break; case SCTP_RTOINFO: retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); break; case SCTP_ASSOCINFO: retval = sctp_setsockopt_associnfo(sk, optval, optlen); break; case SCTP_I_WANT_MAPPED_V4_ADDR: retval = sctp_setsockopt_mappedv4(sk, optval, optlen); break; case SCTP_MAXSEG: retval = sctp_setsockopt_maxseg(sk, optval, optlen); break; case SCTP_ADAPTATION_LAYER: retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); break; case SCTP_CONTEXT: retval = sctp_setsockopt_context(sk, optval, optlen); break; case SCTP_FRAGMENT_INTERLEAVE: retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); break; case SCTP_MAX_BURST: retval = sctp_setsockopt_maxburst(sk, optval, optlen); break; case SCTP_AUTH_CHUNK: retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); break; case SCTP_HMAC_IDENT: retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); break; case SCTP_AUTH_KEY: retval = sctp_setsockopt_auth_key(sk, optval, optlen); break; case SCTP_AUTH_ACTIVE_KEY: retval = sctp_setsockopt_active_key(sk, optval, optlen); break; case SCTP_AUTH_DELETE_KEY: retval = sctp_setsockopt_del_key(sk, optval, optlen); break; case SCTP_AUTO_ASCONF: retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); break; case SCTP_PEER_ADDR_THLDS: retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); break; default: retval = -ENOPROTOOPT; break; } sctp_release_sock(sk); out_nounlock: return retval; } /* API 3.1.6 connect() - UDP Style Syntax * * An application may use the connect() call in the UDP model to initiate an * association without sending data. * * The syntax is: * * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); * * sd: the socket descriptor to have a new association added to. * * nam: the address structure (either struct sockaddr_in or struct * sockaddr_in6 defined in RFC2553 [7]). * * len: the size of the address. */ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *addr, int addr_len) { int err = 0; struct sctp_af *af; sctp_lock_sock(sk); SCTP_DEBUG_PRINTK("%s - sk: %p, sockaddr: %p, addr_len: %d\n", __func__, sk, addr, addr_len); /* Validate addr_len before calling common connect/connectx routine. */ af = sctp_get_af_specific(addr->sa_family); if (!af || addr_len < af->sockaddr_len) { err = -EINVAL; } else { /* Pass correct addr len to common routine (so it knows there * is only one address being passed. */ err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); } sctp_release_sock(sk); return err; } /* FIXME: Write comments. */ SCTP_STATIC int sctp_disconnect(struct sock *sk, int flags) { return -EOPNOTSUPP; /* STUB */ } /* 4.1.4 accept() - TCP Style Syntax * * Applications use accept() call to remove an established SCTP * association from the accept queue of the endpoint. A new socket * descriptor will be returned from accept() to represent the newly * formed association. */ SCTP_STATIC struct sock *sctp_accept(struct sock *sk, int flags, int *err) { struct sctp_sock *sp; struct sctp_endpoint *ep; struct sock *newsk = NULL; struct sctp_association *asoc; long timeo; int error = 0; sctp_lock_sock(sk); sp = sctp_sk(sk); ep = sp->ep; if (!sctp_style(sk, TCP)) { error = -EOPNOTSUPP; goto out; } if (!sctp_sstate(sk, LISTENING)) { error = -EINVAL; goto out; } timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); error = sctp_wait_for_accept(sk, timeo); if (error) goto out; /* We treat the list of associations on the endpoint as the accept * queue and pick the first association on the list. */ asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); newsk = sp->pf->create_accept_sk(sk, asoc); if (!newsk) { error = -ENOMEM; goto out; } /* Populate the fields of the newsk from the oldsk and migrate the * asoc to the newsk. */ sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); out: sctp_release_sock(sk); *err = error; return newsk; } /* The SCTP ioctl handler. */ SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) { int rc = -ENOTCONN; sctp_lock_sock(sk); /* * SEQPACKET-style sockets in LISTENING state are valid, for * SCTP, so only discard TCP-style sockets in LISTENING state. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) goto out; switch (cmd) { case SIOCINQ: { struct sk_buff *skb; unsigned int amount = 0; skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) { /* * We will only return the amount of this packet since * that is all that will be read. */ amount = skb->len; } rc = put_user(amount, (int __user *)arg); break; } default: rc = -ENOIOCTLCMD; break; } out: sctp_release_sock(sk); return rc; } /* This is the function which gets called during socket creation to * initialized the SCTP-specific portion of the sock. * The sock structure should already be zero-filled memory. */ SCTP_STATIC int sctp_init_sock(struct sock *sk) { struct net *net = sock_net(sk); struct sctp_endpoint *ep; struct sctp_sock *sp; SCTP_DEBUG_PRINTK("sctp_init_sock(sk: %p)\n", sk); sp = sctp_sk(sk); /* Initialize the SCTP per socket area. */ switch (sk->sk_type) { case SOCK_SEQPACKET: sp->type = SCTP_SOCKET_UDP; break; case SOCK_STREAM: sp->type = SCTP_SOCKET_TCP; break; default: return -ESOCKTNOSUPPORT; } /* Initialize default send parameters. These parameters can be * modified with the SCTP_DEFAULT_SEND_PARAM socket option. */ sp->default_stream = 0; sp->default_ppid = 0; sp->default_flags = 0; sp->default_context = 0; sp->default_timetolive = 0; sp->default_rcv_context = 0; sp->max_burst = net->sctp.max_burst; sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; /* Initialize default setup parameters. These parameters * can be modified with the SCTP_INITMSG socket option or * overridden by the SCTP_INIT CMSG. */ sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; sp->initmsg.sinit_max_instreams = sctp_max_instreams; sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max; /* Initialize default RTO related parameters. These parameters can * be modified for with the SCTP_RTOINFO socket option. */ sp->rtoinfo.srto_initial = net->sctp.rto_initial; sp->rtoinfo.srto_max = net->sctp.rto_max; sp->rtoinfo.srto_min = net->sctp.rto_min; /* Initialize default association related parameters. These parameters * can be modified with the SCTP_ASSOCINFO socket option. */ sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; sp->assocparams.sasoc_number_peer_destinations = 0; sp->assocparams.sasoc_peer_rwnd = 0; sp->assocparams.sasoc_local_rwnd = 0; sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life; /* Initialize default event subscriptions. By default, all the * options are off. */ memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); /* Default Peer Address Parameters. These defaults can * be modified via SCTP_PEER_ADDR_PARAMS */ sp->hbinterval = net->sctp.hb_interval; sp->pathmaxrxt = net->sctp.max_retrans_path; sp->pathmtu = 0; // allow default discovery sp->sackdelay = net->sctp.sack_timeout; sp->sackfreq = 2; sp->param_flags = SPP_HB_ENABLE | SPP_PMTUD_ENABLE | SPP_SACKDELAY_ENABLE; /* If enabled no SCTP message fragmentation will be performed. * Configure through SCTP_DISABLE_FRAGMENTS socket option. */ sp->disable_fragments = 0; /* Enable Nagle algorithm by default. */ sp->nodelay = 0; /* Enable by default. */ sp->v4mapped = 1; /* Auto-close idle associations after the configured * number of seconds. A value of 0 disables this * feature. Configure through the SCTP_AUTOCLOSE socket option, * for UDP-style sockets only. */ sp->autoclose = 0; /* User specified fragmentation limit. */ sp->user_frag = 0; sp->adaptation_ind = 0; sp->pf = sctp_get_pf_specific(sk->sk_family); /* Control variables for partial data delivery. */ atomic_set(&sp->pd_mode, 0); skb_queue_head_init(&sp->pd_lobby); sp->frag_interleave = 0; /* Create a per socket endpoint structure. Even if we * change the data structure relationships, this may still * be useful for storing pre-connect address information. */ ep = sctp_endpoint_new(sk, GFP_KERNEL); if (!ep) return -ENOMEM; sp->ep = ep; sp->hmac = NULL; SCTP_DBG_OBJCNT_INC(sock); local_bh_disable(); percpu_counter_inc(&sctp_sockets_allocated); sock_prot_inuse_add(net, sk->sk_prot, 1); /* Nothing can fail after this block, otherwise * sctp_destroy_sock() will be called without addr_wq_lock held */ if (net->sctp.default_auto_asconf) { spin_lock(&sock_net(sk)->sctp.addr_wq_lock); list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist); sp->do_auto_asconf = 1; spin_unlock(&sock_net(sk)->sctp.addr_wq_lock); } else { sp->do_auto_asconf = 0; } local_bh_enable(); return 0; } /* Cleanup any SCTP per socket resources. Must be called with * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true */ SCTP_STATIC void sctp_destroy_sock(struct sock *sk) { struct sctp_sock *sp; SCTP_DEBUG_PRINTK("sctp_destroy_sock(sk: %p)\n", sk); /* Release our hold on the endpoint. */ sp = sctp_sk(sk); /* This could happen during socket init, thus we bail out * early, since the rest of the below is not setup either. */ if (sp->ep == NULL) return; if (sp->do_auto_asconf) { sp->do_auto_asconf = 0; list_del(&sp->auto_asconf_list); } sctp_endpoint_free(sp->ep); local_bh_disable(); percpu_counter_dec(&sctp_sockets_allocated); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); local_bh_enable(); } /* API 4.1.7 shutdown() - TCP Style Syntax * int shutdown(int socket, int how); * * sd - the socket descriptor of the association to be closed. * how - Specifies the type of shutdown. The values are * as follows: * SHUT_RD * Disables further receive operations. No SCTP * protocol action is taken. * SHUT_WR * Disables further send operations, and initiates * the SCTP shutdown sequence. * SHUT_RDWR * Disables further send and receive operations * and initiates the SCTP shutdown sequence. */ SCTP_STATIC void sctp_shutdown(struct sock *sk, int how) { struct net *net = sock_net(sk); struct sctp_endpoint *ep; struct sctp_association *asoc; if (!sctp_style(sk, TCP)) return; if (how & SEND_SHUTDOWN) { ep = sctp_sk(sk)->ep; if (!list_empty(&ep->asocs)) { asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); sctp_primitive_SHUTDOWN(net, asoc, NULL); } } } /* 7.2.1 Association Status (SCTP_STATUS) * Applications can retrieve current status information about an * association, including association state, peer receiver window size, * number of unacked data chunks, and number of data chunks pending * receipt. This information is read-only. */ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_status status; struct sctp_association *asoc = NULL; struct sctp_transport *transport; sctp_assoc_t associd; int retval = 0; if (len < sizeof(status)) { retval = -EINVAL; goto out; } len = sizeof(status); if (copy_from_user(&status, optval, len)) { retval = -EFAULT; goto out; } associd = status.sstat_assoc_id; asoc = sctp_id2assoc(sk, associd); if (!asoc) { retval = -EINVAL; goto out; } transport = asoc->peer.primary_path; status.sstat_assoc_id = sctp_assoc2id(asoc); status.sstat_state = asoc->state; status.sstat_rwnd = asoc->peer.rwnd; status.sstat_unackdata = asoc->unack_data; status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); status.sstat_instrms = asoc->c.sinit_max_instreams; status.sstat_outstrms = asoc->c.sinit_num_ostreams; status.sstat_fragmentation_point = asoc->frag_point; status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, transport->af_specific->sockaddr_len); /* Map ipv4 address into v4-mapped-on-v6 address. */ sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), (union sctp_addr *)&status.sstat_primary.spinfo_address); status.sstat_primary.spinfo_state = transport->state; status.sstat_primary.spinfo_cwnd = transport->cwnd; status.sstat_primary.spinfo_srtt = transport->srtt; status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); status.sstat_primary.spinfo_mtu = transport->pathmtu; if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN) status.sstat_primary.spinfo_state = SCTP_ACTIVE; if (put_user(len, optlen)) { retval = -EFAULT; goto out; } SCTP_DEBUG_PRINTK("sctp_getsockopt_sctp_status(%d): %d %d %d\n", len, status.sstat_state, status.sstat_rwnd, status.sstat_assoc_id); if (copy_to_user(optval, &status, len)) { retval = -EFAULT; goto out; } out: return retval; } /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) * * Applications can retrieve information about a specific peer address * of an association, including its reachability state, congestion * window, and retransmission timer values. This information is * read-only. */ static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_paddrinfo pinfo; struct sctp_transport *transport; int retval = 0; if (len < sizeof(pinfo)) { retval = -EINVAL; goto out; } len = sizeof(pinfo); if (copy_from_user(&pinfo, optval, len)) { retval = -EFAULT; goto out; } transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, pinfo.spinfo_assoc_id); if (!transport) return -EINVAL; pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); pinfo.spinfo_state = transport->state; pinfo.spinfo_cwnd = transport->cwnd; pinfo.spinfo_srtt = transport->srtt; pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); pinfo.spinfo_mtu = transport->pathmtu; if (pinfo.spinfo_state == SCTP_UNKNOWN) pinfo.spinfo_state = SCTP_ACTIVE; if (put_user(len, optlen)) { retval = -EFAULT; goto out; } if (copy_to_user(optval, &pinfo, len)) { retval = -EFAULT; goto out; } out: return retval; } /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) * * This option is a on/off flag. If enabled no SCTP message * fragmentation will be performed. Instead if a message being sent * exceeds the current PMTU size, the message will NOT be sent and * instead a error will be indicated to the user. */ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = (sctp_sk(sk)->disable_fragments == 1); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) * * This socket option is used to specify various notifications and * ancillary data the user wishes to receive. */ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, int __user *optlen) { if (len <= 0) return -EINVAL; if (len > sizeof(struct sctp_event_subscribe)) len = sizeof(struct sctp_event_subscribe); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) return -EFAULT; return 0; } /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) * * This socket option is applicable to the UDP-style socket only. When * set it will cause associations that are idle for more than the * specified number of seconds to automatically close. An association * being idle is defined an association that has NOT sent or received * user data. The special value of '0' indicates that no automatic * close of any associations should be performed. The option expects an * integer defining the number of seconds of idle time before an * association is closed. */ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) { /* Applicable to UDP-style socket only */ if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) return -EFAULT; return 0; } /* Helper routine to branch off an association to a new socket. */ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) { struct sctp_association *asoc = sctp_id2assoc(sk, id); struct socket *sock; struct sctp_af *af; int err = 0; if (!asoc) return -EINVAL; /* An association cannot be branched off from an already peeled-off * socket, nor is this supported for tcp style sockets. */ if (!sctp_style(sk, UDP)) return -EINVAL; /* Create a new socket. */ err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); if (err < 0) return err; sctp_copy_sock(sock->sk, sk, asoc); /* Make peeled-off sockets more like 1-1 accepted sockets. * Set the daddr and initialize id to something more random */ af = sctp_get_af_specific(asoc->peer.primary_addr.sa.sa_family); af->to_sk_daddr(&asoc->peer.primary_addr, sk); /* Populate the fields of the newsk from the oldsk and migrate the * asoc to the newsk. */ sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); *sockp = sock; return err; } EXPORT_SYMBOL(sctp_do_peeloff); static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) { sctp_peeloff_arg_t peeloff; struct socket *newsock; struct file *newfile; int retval = 0; if (len < sizeof(sctp_peeloff_arg_t)) return -EINVAL; len = sizeof(sctp_peeloff_arg_t); if (copy_from_user(&peeloff, optval, len)) return -EFAULT; retval = sctp_do_peeloff(sk, peeloff.associd, &newsock); if (retval < 0) goto out; /* Map the socket to an unused fd that can be returned to the user. */ retval = get_unused_fd(); if (retval < 0) { sock_release(newsock); goto out; } newfile = sock_alloc_file(newsock, 0, NULL); if (unlikely(IS_ERR(newfile))) { put_unused_fd(retval); sock_release(newsock); return PTR_ERR(newfile); } SCTP_DEBUG_PRINTK("%s: sk: %p newsk: %p sd: %d\n", __func__, sk, newsock->sk, retval); /* Return the fd mapped to the new socket. */ if (put_user(len, optlen)) { fput(newfile); put_unused_fd(retval); return -EFAULT; } peeloff.sd = retval; if (copy_to_user(optval, &peeloff, len)) { fput(newfile); put_unused_fd(retval); return -EFAULT; } fd_install(retval, newfile); out: return retval; } /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) * * Applications can enable or disable heartbeats for any peer address of * an association, modify an address's heartbeat interval, force a * heartbeat to be sent immediately, and adjust the address's maximum * number of retransmissions sent before an address is considered * unreachable. The following structure is used to access and modify an * address's parameters: * * struct sctp_paddrparams { * sctp_assoc_t spp_assoc_id; * struct sockaddr_storage spp_address; * uint32_t spp_hbinterval; * uint16_t spp_pathmaxrxt; * uint32_t spp_pathmtu; * uint32_t spp_sackdelay; * uint32_t spp_flags; * }; * * spp_assoc_id - (one-to-many style socket) This is filled in the * application, and identifies the association for * this query. * spp_address - This specifies which address is of interest. * spp_hbinterval - This contains the value of the heartbeat interval, * in milliseconds. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmaxrxt - This contains the maximum number of * retransmissions before this address shall be * considered unreachable. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmtu - When Path MTU discovery is disabled the value * specified here will be the "fixed" path mtu. * Note that if the spp_address field is empty * then all associations on this address will * have this fixed path mtu set upon them. * * spp_sackdelay - When delayed sack is enabled, this value specifies * the number of milliseconds that sacks will be delayed * for. This value will apply to all addresses of an * association if the spp_address field is empty. Note * also, that if delayed sack is enabled and this * value is set to 0, no change is made to the last * recorded delayed sack timer value. * * spp_flags - These flags are used to control various features * on an association. The flag field may contain * zero or more of the following options. * * SPP_HB_ENABLE - Enable heartbeats on the * specified address. Note that if the address * field is empty all addresses for the association * have heartbeats enabled upon them. * * SPP_HB_DISABLE - Disable heartbeats on the * speicifed address. Note that if the address * field is empty all addresses for the association * will have their heartbeats disabled. Note also * that SPP_HB_ENABLE and SPP_HB_DISABLE are * mutually exclusive, only one of these two should * be specified. Enabling both fields will have * undetermined results. * * SPP_HB_DEMAND - Request a user initiated heartbeat * to be made immediately. * * SPP_PMTUD_ENABLE - This field will enable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. * * SPP_PMTUD_DISABLE - This field will disable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. Not also that * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually * exclusive. Enabling both will have undetermined * results. * * SPP_SACKDELAY_ENABLE - Setting this flag turns * on delayed sack. The time specified in spp_sackdelay * is used to specify the sack delay for this address. Note * that if spp_address is empty then all addresses will * enable delayed sack and take on the sack delay * value specified in spp_sackdelay. * SPP_SACKDELAY_DISABLE - Setting this flag turns * off delayed sack. If the spp_address field is blank then * delayed sack is disabled for the entire association. Note * also that this field is mutually exclusive to * SPP_SACKDELAY_ENABLE, setting both will have undefined * results. */ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_paddrparams params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(struct sctp_paddrparams)) return -EINVAL; len = sizeof(struct sctp_paddrparams); if (copy_from_user(&params, optval, len)) return -EFAULT; /* If an address other than INADDR_ANY is specified, and * no transport is found, then the request is invalid. */ if (!sctp_is_any(sk, ( union sctp_addr *)&params.spp_address)) { trans = sctp_addr_id2transport(sk, &params.spp_address, params.spp_assoc_id); if (!trans) { SCTP_DEBUG_PRINTK("Failed no transport\n"); return -EINVAL; } } /* Get association, if assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.spp_assoc_id); if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { SCTP_DEBUG_PRINTK("Failed no association\n"); return -EINVAL; } if (trans) { /* Fetch transport values. */ params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval); params.spp_pathmtu = trans->pathmtu; params.spp_pathmaxrxt = trans->pathmaxrxt; params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay); /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = trans->param_flags; } else if (asoc) { /* Fetch association values. */ params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); params.spp_pathmtu = asoc->pathmtu; params.spp_pathmaxrxt = asoc->pathmaxrxt; params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay); /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = asoc->param_flags; } else { /* Fetch socket values. */ params.spp_hbinterval = sp->hbinterval; params.spp_pathmtu = sp->pathmtu; params.spp_sackdelay = sp->sackdelay; params.spp_pathmaxrxt = sp->pathmaxrxt; /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = sp->param_flags; } if (copy_to_user(optval, &params, len)) return -EFAULT; if (put_user(len, optlen)) return -EFAULT; return 0; } /* * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) * * This option will effect the way delayed acks are performed. This * option allows you to get or set the delayed ack time, in * milliseconds. It also allows changing the delayed ack frequency. * Changing the frequency to 1 disables the delayed sack algorithm. If * the assoc_id is 0, then this sets or gets the endpoints default * values. If the assoc_id field is non-zero, then the set or get * effects the specified association for the one to many model (the * assoc_id field is ignored by the one to one model). Note that if * sack_delay or sack_freq are 0 when setting this option, then the * current values will remain unchanged. * * struct sctp_sack_info { * sctp_assoc_t sack_assoc_id; * uint32_t sack_delay; * uint32_t sack_freq; * }; * * sack_assoc_id - This parameter, indicates which association the user * is performing an action upon. Note that if this field's value is * zero then the endpoints default value is changed (effecting future * associations only). * * sack_delay - This parameter contains the number of milliseconds that * the user is requesting the delayed ACK timer be set to. Note that * this value is defined in the standard to be between 200 and 500 * milliseconds. * * sack_freq - This parameter contains the number of packets that must * be received before a sack is sent without waiting for the delay * timer to expire. The default value for this is 2, setting this * value to 1 will disable the delayed sack algorithm. */ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sack_info params; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (len >= sizeof(struct sctp_sack_info)) { len = sizeof(struct sctp_sack_info); if (copy_from_user(&params, optval, len)) return -EFAULT; } else if (len == sizeof(struct sctp_assoc_value)) { pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n"); pr_warn("Use struct sctp_sack_info instead\n"); if (copy_from_user(&params, optval, len)) return -EFAULT; } else return - EINVAL; /* Get association, if sack_assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.sack_assoc_id); if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { /* Fetch association values. */ if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { params.sack_delay = jiffies_to_msecs( asoc->sackdelay); params.sack_freq = asoc->sackfreq; } else { params.sack_delay = 0; params.sack_freq = 1; } } else { /* Fetch socket values. */ if (sp->param_flags & SPP_SACKDELAY_ENABLE) { params.sack_delay = sp->sackdelay; params.sack_freq = sp->sackfreq; } else { params.sack_delay = 0; params.sack_freq = 1; } } if (copy_to_user(optval, &params, len)) return -EFAULT; if (put_user(len, optlen)) return -EFAULT; return 0; } /* 7.1.3 Initialization Parameters (SCTP_INITMSG) * * Applications can specify protocol parameters for the default association * initialization. The option name argument to setsockopt() and getsockopt() * is SCTP_INITMSG. * * Setting initialization parameters is effective only on an unconnected * socket (for UDP-style sockets only future associations are effected * by the change). With TCP-style sockets, this option is inherited by * sockets derived from a listener socket. */ static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) { if (len < sizeof(struct sctp_initmsg)) return -EINVAL; len = sizeof(struct sctp_initmsg); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) return -EFAULT; return 0; } static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_association *asoc; int cnt = 0; struct sctp_getaddrs getaddrs; struct sctp_transport *from; void __user *to; union sctp_addr temp; struct sctp_sock *sp = sctp_sk(sk); int addrlen; size_t space_left; int bytes_copied; if (len < sizeof(struct sctp_getaddrs)) return -EINVAL; if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) return -EFAULT; /* For UDP-style sockets, id specifies the association to query. */ asoc = sctp_id2assoc(sk, getaddrs.assoc_id); if (!asoc) return -EINVAL; to = optval + offsetof(struct sctp_getaddrs,addrs); space_left = len - offsetof(struct sctp_getaddrs,addrs); list_for_each_entry(from, &asoc->peer.transport_addr_list, transports) { memcpy(&temp, &from->ipaddr, sizeof(temp)); sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; if (space_left < addrlen) return -ENOMEM; if (copy_to_user(to, &temp, addrlen)) return -EFAULT; to += addrlen; cnt++; space_left -= addrlen; } if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) return -EFAULT; bytes_copied = ((char __user *)to) - optval; if (put_user(bytes_copied, optlen)) return -EFAULT; return 0; } static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, size_t space_left, int *bytes_copied) { struct sctp_sockaddr_entry *addr; union sctp_addr temp; int cnt = 0; int addrlen; struct net *net = sock_net(sk); rcu_read_lock(); list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { if (!addr->valid) continue; if ((PF_INET == sk->sk_family) && (AF_INET6 == addr->a.sa.sa_family)) continue; if ((PF_INET6 == sk->sk_family) && inet_v6_ipv6only(sk) && (AF_INET == addr->a.sa.sa_family)) continue; memcpy(&temp, &addr->a, sizeof(temp)); if (!temp.v4.sin_port) temp.v4.sin_port = htons(port); sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), &temp); addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; if (space_left < addrlen) { cnt = -ENOMEM; break; } memcpy(to, &temp, addrlen); to += addrlen; cnt ++; space_left -= addrlen; *bytes_copied += addrlen; } rcu_read_unlock(); return cnt; } static int sctp_getsockopt_local_addrs(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_bind_addr *bp; struct sctp_association *asoc; int cnt = 0; struct sctp_getaddrs getaddrs; struct sctp_sockaddr_entry *addr; void __user *to; union sctp_addr temp; struct sctp_sock *sp = sctp_sk(sk); int addrlen; int err = 0; size_t space_left; int bytes_copied = 0; void *addrs; void *buf; if (len < sizeof(struct sctp_getaddrs)) return -EINVAL; if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) return -EFAULT; /* * For UDP-style sockets, id specifies the association to query. * If the id field is set to the value '0' then the locally bound * addresses are returned without regard to any particular * association. */ if (0 == getaddrs.assoc_id) { bp = &sctp_sk(sk)->ep->base.bind_addr; } else { asoc = sctp_id2assoc(sk, getaddrs.assoc_id); if (!asoc) return -EINVAL; bp = &asoc->base.bind_addr; } to = optval + offsetof(struct sctp_getaddrs,addrs); space_left = len - offsetof(struct sctp_getaddrs,addrs); addrs = kmalloc(space_left, GFP_KERNEL); if (!addrs) return -ENOMEM; /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid * addresses from the global local address list. */ if (sctp_list_single_entry(&bp->address_list)) { addr = list_entry(bp->address_list.next, struct sctp_sockaddr_entry, list); if (sctp_is_any(sk, &addr->a)) { cnt = sctp_copy_laddrs(sk, bp->port, addrs, space_left, &bytes_copied); if (cnt < 0) { err = cnt; goto out; } goto copy_getaddrs; } } buf = addrs; /* Protection on the bound address list is not needed since * in the socket option context we hold a socket lock and * thus the bound address list can't change. */ list_for_each_entry(addr, &bp->address_list, list) { memcpy(&temp, &addr->a, sizeof(temp)); sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; if (space_left < addrlen) { err = -ENOMEM; /*fixme: right error?*/ goto out; } memcpy(buf, &temp, addrlen); buf += addrlen; bytes_copied += addrlen; cnt ++; space_left -= addrlen; } copy_getaddrs: if (copy_to_user(to, addrs, bytes_copied)) { err = -EFAULT; goto out; } if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { err = -EFAULT; goto out; } if (put_user(bytes_copied, optlen)) err = -EFAULT; out: kfree(addrs); return err; } /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) * * Requests that the local SCTP stack use the enclosed peer address as * the association primary. The enclosed address must be one of the * association peer's addresses. */ static int sctp_getsockopt_primary_addr(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_prim prim; struct sctp_association *asoc; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(struct sctp_prim)) return -EINVAL; len = sizeof(struct sctp_prim); if (copy_from_user(&prim, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); if (!asoc) return -EINVAL; if (!asoc->peer.primary_path) return -ENOTCONN; memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, asoc->peer.primary_path->af_specific->sockaddr_len); sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, (union sctp_addr *)&prim.ssp_addr); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &prim, len)) return -EFAULT; return 0; } /* * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) * * Requests that the local endpoint set the specified Adaptation Layer * Indication parameter for all future INIT and INIT-ACK exchanges. */ static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_setadaptation adaptation; if (len < sizeof(struct sctp_setadaptation)) return -EINVAL; len = sizeof(struct sctp_setadaptation); adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &adaptation, len)) return -EFAULT; return 0; } /* * * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) * * Applications that wish to use the sendto() system call may wish to * specify a default set of parameters that would normally be supplied * through the inclusion of ancillary data. This socket option allows * such an application to set the default sctp_sndrcvinfo structure. * The application that wishes to use this socket option simply passes * in to this call the sctp_sndrcvinfo structure defined in Section * 5.2.2) The input parameters accepted by this call include * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, * sinfo_timetolive. The user must provide the sinfo_assoc_id field in * to this call if the caller is using the UDP model. * * For getsockopt, it get the default sctp_sndrcvinfo structure. */ static int sctp_getsockopt_default_send_param(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sndrcvinfo info; struct sctp_association *asoc; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(struct sctp_sndrcvinfo)) return -EINVAL; len = sizeof(struct sctp_sndrcvinfo); if (copy_from_user(&info, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { info.sinfo_stream = asoc->default_stream; info.sinfo_flags = asoc->default_flags; info.sinfo_ppid = asoc->default_ppid; info.sinfo_context = asoc->default_context; info.sinfo_timetolive = asoc->default_timetolive; } else { info.sinfo_stream = sp->default_stream; info.sinfo_flags = sp->default_flags; info.sinfo_ppid = sp->default_ppid; info.sinfo_context = sp->default_context; info.sinfo_timetolive = sp->default_timetolive; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &info, len)) return -EFAULT; return 0; } /* * * 7.1.5 SCTP_NODELAY * * Turn on/off any Nagle-like algorithm. This means that packets are * generally sent as soon as possible and no unnecessary delays are * introduced, at the cost of more packets in the network. Expects an * integer boolean flag. */ static int sctp_getsockopt_nodelay(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = (sctp_sk(sk)->nodelay == 1); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * * 7.1.1 SCTP_RTOINFO * * The protocol parameters used to initialize and bound retransmission * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access * and modify these parameters. * All parameters are time values, in milliseconds. A value of 0, when * modifying the parameters, indicates that the current value should not * be changed. * */ static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_rtoinfo rtoinfo; struct sctp_association *asoc; if (len < sizeof (struct sctp_rtoinfo)) return -EINVAL; len = sizeof(struct sctp_rtoinfo); if (copy_from_user(&rtoinfo, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Values corresponding to the specific association. */ if (asoc) { rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); } else { /* Values corresponding to the endpoint. */ struct sctp_sock *sp = sctp_sk(sk); rtoinfo.srto_initial = sp->rtoinfo.srto_initial; rtoinfo.srto_max = sp->rtoinfo.srto_max; rtoinfo.srto_min = sp->rtoinfo.srto_min; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &rtoinfo, len)) return -EFAULT; return 0; } /* * * 7.1.2 SCTP_ASSOCINFO * * This option is used to tune the maximum retransmission attempts * of the association. * Returns an error if the new association retransmission value is * greater than the sum of the retransmission value of the peer. * See [SCTP] for more information. * */ static int sctp_getsockopt_associnfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assocparams assocparams; struct sctp_association *asoc; struct list_head *pos; int cnt = 0; if (len < sizeof (struct sctp_assocparams)) return -EINVAL; len = sizeof(struct sctp_assocparams); if (copy_from_user(&assocparams, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Values correspoinding to the specific association */ if (asoc) { assocparams.sasoc_asocmaxrxt = asoc->max_retrans; assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; assocparams.sasoc_local_rwnd = asoc->a_rwnd; assocparams.sasoc_cookie_life = (asoc->cookie_life.tv_sec * 1000) + (asoc->cookie_life.tv_usec / 1000); list_for_each(pos, &asoc->peer.transport_addr_list) { cnt ++; } assocparams.sasoc_number_peer_destinations = cnt; } else { /* Values corresponding to the endpoint */ struct sctp_sock *sp = sctp_sk(sk); assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; assocparams.sasoc_cookie_life = sp->assocparams.sasoc_cookie_life; assocparams.sasoc_number_peer_destinations = sp->assocparams. sasoc_number_peer_destinations; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &assocparams, len)) return -EFAULT; return 0; } /* * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) * * This socket option is a boolean flag which turns on or off mapped V4 * addresses. If this option is turned on and the socket is type * PF_INET6, then IPv4 addresses will be mapped to V6 representation. * If this option is turned off, then no mapping will be done of V4 * addresses and a user will receive both PF_INET6 and PF_INET type * addresses on the socket. */ static int sctp_getsockopt_mappedv4(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = sp->v4mapped; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 7.1.29. Set or Get the default context (SCTP_CONTEXT) * (chapter and verse is quoted at sctp_setsockopt_context()) */ static int sctp_getsockopt_context(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (len < sizeof(struct sctp_assoc_value)) return -EINVAL; len = sizeof(struct sctp_assoc_value); if (copy_from_user(&params, optval, len)) return -EFAULT; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; params.assoc_value = asoc->default_rcv_context; } else { params.assoc_value = sp->default_rcv_context; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &params, len)) return -EFAULT; return 0; } /* * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) * This option will get or set the maximum size to put in any outgoing * SCTP DATA chunk. If a message is larger than this size it will be * fragmented by SCTP into the specified size. Note that the underlying * SCTP implementation may fragment into smaller sized chunks when the * PMTU of the underlying association is smaller than the value set by * the user. The default value for this option is '0' which indicates * the user is NOT limiting fragmentation and only the PMTU will effect * SCTP's choice of DATA chunk size. Note also that values set larger * than the maximum size of an IP datagram will effectively let SCTP * control fragmentation (i.e. the same as setting this option to 0). * * The following structure is used to access and modify this parameter: * * struct sctp_assoc_value { * sctp_assoc_t assoc_id; * uint32_t assoc_value; * }; * * assoc_id: This parameter is ignored for one-to-one style sockets. * For one-to-many style sockets this parameter indicates which * association the user is performing an action upon. Note that if * this field's value is zero then the endpoints default value is * changed (effecting future associations only). * assoc_value: This parameter specifies the maximum size in bytes. */ static int sctp_getsockopt_maxseg(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; if (len == sizeof(int)) { pr_warn("Use of int in maxseg socket option deprecated\n"); pr_warn("Use struct sctp_assoc_value instead\n"); params.assoc_id = 0; } else if (len >= sizeof(struct sctp_assoc_value)) { len = sizeof(struct sctp_assoc_value); if (copy_from_user(&params, optval, sizeof(params))) return -EFAULT; } else return -EINVAL; asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc && params.assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) params.assoc_value = asoc->frag_point; else params.assoc_value = sctp_sk(sk)->user_frag; if (put_user(len, optlen)) return -EFAULT; if (len == sizeof(int)) { if (copy_to_user(optval, &params.assoc_value, len)) return -EFAULT; } else { if (copy_to_user(optval, &params, len)) return -EFAULT; } return 0; } /* * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) */ static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = sctp_sk(sk)->frag_interleave; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 7.1.25. Set or Get the sctp partial delivery point * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) */ static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, char __user *optval, int __user *optlen) { u32 val; if (len < sizeof(u32)) return -EINVAL; len = sizeof(u32); val = sctp_sk(sk)->pd_point; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) * (chapter and verse is quoted at sctp_setsockopt_maxburst()) */ static int sctp_getsockopt_maxburst(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (len == sizeof(int)) { pr_warn("Use of int in max_burst socket option deprecated\n"); pr_warn("Use struct sctp_assoc_value instead\n"); params.assoc_id = 0; } else if (len >= sizeof(struct sctp_assoc_value)) { len = sizeof(struct sctp_assoc_value); if (copy_from_user(&params, optval, len)) return -EFAULT; } else return -EINVAL; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; params.assoc_value = asoc->max_burst; } else params.assoc_value = sp->max_burst; if (len == sizeof(int)) { if (copy_to_user(optval, &params.assoc_value, len)) return -EFAULT; } else { if (copy_to_user(optval, &params, len)) return -EFAULT; } return 0; } static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_hmacalgo __user *p = (void __user *)optval; struct sctp_hmac_algo_param *hmacs; __u16 data_len = 0; u32 num_idents; if (!ep->auth_enable) return -EACCES; hmacs = ep->auth_hmacs_list; data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < sizeof(struct sctp_hmacalgo) + data_len) return -EINVAL; len = sizeof(struct sctp_hmacalgo) + data_len; num_idents = data_len / sizeof(u16); if (put_user(len, optlen)) return -EFAULT; if (put_user(num_idents, &p->shmac_num_idents)) return -EFAULT; if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) return -EFAULT; return 0; } static int sctp_getsockopt_active_key(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkeyid val; struct sctp_association *asoc; if (!ep->auth_enable) return -EACCES; if (len < sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) return -EFAULT; asoc = sctp_id2assoc(sk, val.scact_assoc_id); if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) val.scact_keynumber = asoc->active_key_id; else val.scact_keynumber = ep->active_key_id; len = sizeof(struct sctp_authkeyid); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunks __user *p = (void __user *)optval; struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; u32 num_chunks = 0; char __user *to; if (!ep->auth_enable) return -EACCES; if (len < sizeof(struct sctp_authchunks)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; asoc = sctp_id2assoc(sk, val.gauth_assoc_id); if (!asoc) return -EINVAL; ch = asoc->peer.peer_chunks; if (!ch) goto num; /* See if the user provided enough room for all the data */ num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < num_chunks) return -EINVAL; if (copy_to_user(to, ch->chunks, num_chunks)) return -EFAULT; num: len = sizeof(struct sctp_authchunks) + num_chunks; if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; return 0; } static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunks __user *p = (void __user *)optval; struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; u32 num_chunks = 0; char __user *to; if (!ep->auth_enable) return -EACCES; if (len < sizeof(struct sctp_authchunks)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; asoc = sctp_id2assoc(sk, val.gauth_assoc_id); if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) ch = (struct sctp_chunks_param*)asoc->c.auth_chunks; else ch = ep->auth_chunk_list; if (!ch) goto num; num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < sizeof(struct sctp_authchunks) + num_chunks) return -EINVAL; if (copy_to_user(to, ch->chunks, num_chunks)) return -EFAULT; num: len = sizeof(struct sctp_authchunks) + num_chunks; if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; return 0; } /* * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER) * This option gets the current number of associations that are attached * to a one-to-many style socket. The option value is an uint32_t. */ static int sctp_getsockopt_assoc_number(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; u32 val = 0; if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (len < sizeof(u32)) return -EINVAL; len = sizeof(u32); list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { val++; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 8.1.23 SCTP_AUTO_ASCONF * See the corresponding setsockopt entry as description */ static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val = 0; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) val = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 8.2.6. Get the Current Identifiers of Associations * (SCTP_GET_ASSOC_ID_LIST) * * This option gets the current list of SCTP association identifiers of * the SCTP associations handled by a one-to-many style socket. */ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_assoc_ids *ids; u32 num = 0; if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (len < sizeof(struct sctp_assoc_ids)) return -EINVAL; list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { num++; } if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num) return -EINVAL; len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; ids = kmalloc(len, GFP_KERNEL); if (unlikely(!ids)) return -ENOMEM; ids->gaids_number_of_ids = num; num = 0; list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { ids->gaids_assoc_id[num++] = asoc->assoc_id; } if (put_user(len, optlen) || copy_to_user(optval, ids, len)) { kfree(ids); return -EFAULT; } kfree(ids); return 0; } /* * SCTP_PEER_ADDR_THLDS * * This option allows us to fetch the partially failed threshold for one or all * transports in an association. See Section 6.1 of: * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt */ static int sctp_getsockopt_paddr_thresholds(struct sock *sk, char __user *optval, int len, int __user *optlen) { struct sctp_paddrthlds val; struct sctp_transport *trans; struct sctp_association *asoc; if (len < sizeof(struct sctp_paddrthlds)) return -EINVAL; len = sizeof(struct sctp_paddrthlds); if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len)) return -EFAULT; if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { asoc = sctp_id2assoc(sk, val.spt_assoc_id); if (!asoc) return -ENOENT; val.spt_pathpfthld = asoc->pf_retrans; val.spt_pathmaxrxt = asoc->pathmaxrxt; } else { trans = sctp_addr_id2transport(sk, &val.spt_address, val.spt_assoc_id); if (!trans) return -ENOENT; val.spt_pathmaxrxt = trans->pathmaxrxt; val.spt_pathpfthld = trans->pf_retrans; } if (put_user(len, optlen) || copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * SCTP_GET_ASSOC_STATS * * This option retrieves local per endpoint statistics. It is modeled * after OpenSolaris' implementation */ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_stats sas; struct sctp_association *asoc = NULL; /* User must provide at least the assoc id */ if (len < sizeof(sctp_assoc_t)) return -EINVAL; /* Allow the struct to grow and fill in as much as possible */ len = min_t(size_t, len, sizeof(sas)); if (copy_from_user(&sas, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, sas.sas_assoc_id); if (!asoc) return -EINVAL; sas.sas_rtxchunks = asoc->stats.rtxchunks; sas.sas_gapcnt = asoc->stats.gapcnt; sas.sas_outofseqtsns = asoc->stats.outofseqtsns; sas.sas_osacks = asoc->stats.osacks; sas.sas_isacks = asoc->stats.isacks; sas.sas_octrlchunks = asoc->stats.octrlchunks; sas.sas_ictrlchunks = asoc->stats.ictrlchunks; sas.sas_oodchunks = asoc->stats.oodchunks; sas.sas_iodchunks = asoc->stats.iodchunks; sas.sas_ouodchunks = asoc->stats.ouodchunks; sas.sas_iuodchunks = asoc->stats.iuodchunks; sas.sas_idupchunks = asoc->stats.idupchunks; sas.sas_opackets = asoc->stats.opackets; sas.sas_ipackets = asoc->stats.ipackets; /* New high max rto observed, will return 0 if not a single * RTO update took place. obs_rto_ipaddr will be bogus * in such a case */ sas.sas_maxrto = asoc->stats.max_obs_rto; memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, sizeof(struct sockaddr_storage)); /* Mark beginning of a new observation period */ asoc->stats.max_obs_rto = asoc->rto_min; if (put_user(len, optlen)) return -EFAULT; SCTP_DEBUG_PRINTK("sctp_getsockopt_assoc_stat(%d): %d\n", len, sas.sas_assoc_id); if (copy_to_user(optval, &sas, len)) return -EFAULT; return 0; } SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { int retval = 0; int len; SCTP_DEBUG_PRINTK("sctp_getsockopt(sk: %p... optname: %d)\n", sk, optname); /* I can hardly begin to describe how wrong this is. This is * so broken as to be worse than useless. The API draft * REALLY is NOT helpful here... I am not convinced that the * semantics of getsockopt() with a level OTHER THAN SOL_SCTP * are at all well-founded. */ if (level != SOL_SCTP) { struct sctp_af *af = sctp_sk(sk)->pf->af; retval = af->getsockopt(sk, level, optname, optval, optlen); return retval; } if (get_user(len, optlen)) return -EFAULT; sctp_lock_sock(sk); switch (optname) { case SCTP_STATUS: retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); break; case SCTP_DISABLE_FRAGMENTS: retval = sctp_getsockopt_disable_fragments(sk, len, optval, optlen); break; case SCTP_EVENTS: retval = sctp_getsockopt_events(sk, len, optval, optlen); break; case SCTP_AUTOCLOSE: retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); break; case SCTP_SOCKOPT_PEELOFF: retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); break; case SCTP_PEER_ADDR_PARAMS: retval = sctp_getsockopt_peer_addr_params(sk, len, optval, optlen); break; case SCTP_DELAYED_SACK: retval = sctp_getsockopt_delayed_ack(sk, len, optval, optlen); break; case SCTP_INITMSG: retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); break; case SCTP_GET_PEER_ADDRS: retval = sctp_getsockopt_peer_addrs(sk, len, optval, optlen); break; case SCTP_GET_LOCAL_ADDRS: retval = sctp_getsockopt_local_addrs(sk, len, optval, optlen); break; case SCTP_SOCKOPT_CONNECTX3: retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); break; case SCTP_DEFAULT_SEND_PARAM: retval = sctp_getsockopt_default_send_param(sk, len, optval, optlen); break; case SCTP_PRIMARY_ADDR: retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); break; case SCTP_NODELAY: retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); break; case SCTP_RTOINFO: retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); break; case SCTP_ASSOCINFO: retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); break; case SCTP_I_WANT_MAPPED_V4_ADDR: retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); break; case SCTP_MAXSEG: retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); break; case SCTP_GET_PEER_ADDR_INFO: retval = sctp_getsockopt_peer_addr_info(sk, len, optval, optlen); break; case SCTP_ADAPTATION_LAYER: retval = sctp_getsockopt_adaptation_layer(sk, len, optval, optlen); break; case SCTP_CONTEXT: retval = sctp_getsockopt_context(sk, len, optval, optlen); break; case SCTP_FRAGMENT_INTERLEAVE: retval = sctp_getsockopt_fragment_interleave(sk, len, optval, optlen); break; case SCTP_PARTIAL_DELIVERY_POINT: retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, optlen); break; case SCTP_MAX_BURST: retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); break; case SCTP_AUTH_KEY: case SCTP_AUTH_CHUNK: case SCTP_AUTH_DELETE_KEY: retval = -EOPNOTSUPP; break; case SCTP_HMAC_IDENT: retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); break; case SCTP_AUTH_ACTIVE_KEY: retval = sctp_getsockopt_active_key(sk, len, optval, optlen); break; case SCTP_PEER_AUTH_CHUNKS: retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, optlen); break; case SCTP_LOCAL_AUTH_CHUNKS: retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, optlen); break; case SCTP_GET_ASSOC_NUMBER: retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); break; case SCTP_GET_ASSOC_ID_LIST: retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); break; case SCTP_AUTO_ASCONF: retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); break; case SCTP_PEER_ADDR_THLDS: retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); break; case SCTP_GET_ASSOC_STATS: retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); break; default: retval = -ENOPROTOOPT; break; } sctp_release_sock(sk); return retval; } static void sctp_hash(struct sock *sk) { /* STUB */ } static void sctp_unhash(struct sock *sk) { /* STUB */ } /* Check if port is acceptable. Possibly find first available port. * * The port hash table (contained in the 'global' SCTP protocol storage * returned by struct sctp_protocol *sctp_get_protocol()). The hash * table is an array of 4096 lists (sctp_bind_hashbucket). Each * list (the list number is the port number hashed out, so as you * would expect from a hash function, all the ports in a given list have * such a number that hashes out to the same list number; you were * expecting that, right?); so each list has a set of ports, with a * link to the socket (struct sock) that uses it, the port number and * a fastreuse flag (FIXME: NPI ipg). */ static struct sctp_bind_bucket *sctp_bucket_create( struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) { struct sctp_bind_hashbucket *head; /* hash list */ struct sctp_bind_bucket *pp; unsigned short snum; int ret; snum = ntohs(addr->v4.sin_port); SCTP_DEBUG_PRINTK("sctp_get_port() begins, snum=%d\n", snum); sctp_local_bh_disable(); if (snum == 0) { /* Search for an available port. */ int low, high, remaining, index; unsigned int rover; inet_get_local_port_range(&low, &high); remaining = (high - low) + 1; rover = net_random() % remaining + low; do { rover++; if ((rover < low) || (rover > high)) rover = low; if (inet_is_reserved_local_port(rover)) continue; index = sctp_phashfn(sock_net(sk), rover); head = &sctp_port_hashtable[index]; sctp_spin_lock(&head->lock); sctp_for_each_hentry(pp, &head->chain) if ((pp->port == rover) && net_eq(sock_net(sk), pp->net)) goto next; break; next: sctp_spin_unlock(&head->lock); } while (--remaining > 0); /* Exhausted local port range during search? */ ret = 1; if (remaining <= 0) goto fail; /* OK, here is the one we will use. HEAD (the port * hash table list entry) is non-NULL and we hold it's * mutex. */ snum = rover; } else { /* We are given an specific port number; we verify * that it is not being used. If it is used, we will * exahust the search in the hash list corresponding * to the port number (snum) - we detect that with the * port iterator, pp being NULL. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; sctp_spin_lock(&head->lock); sctp_for_each_hentry(pp, &head->chain) { if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) goto pp_found; } } pp = NULL; goto pp_not_found; pp_found: if (!hlist_empty(&pp->owner)) { /* We had a port hash table hit - there is an * available port (pp != NULL) and it is being * used by other socket (pp->owner not empty); that other * socket is going to be sk2. */ int reuse = sk->sk_reuse; struct sock *sk2; SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n"); if (pp->fastreuse && sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) goto success; /* Run through the list of sockets bound to the port * (pp->port) [via the pointers bind_next and * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, * we get the endpoint they describe and run through * the endpoint's list of IP (v4 or v6) addresses, * comparing each of the addresses with the address of * the socket sk. If we find a match, then that means * that this port/socket (sk) combination are already * in an endpoint. */ sk_for_each_bound(sk2, &pp->owner) { struct sctp_endpoint *ep2; ep2 = sctp_sk(sk2)->ep; if (sk == sk2 || (reuse && sk2->sk_reuse && sk2->sk_state != SCTP_SS_LISTENING)) continue; if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, sctp_sk(sk2), sctp_sk(sk))) { ret = (long)sk2; goto fail_unlock; } } SCTP_DEBUG_PRINTK("sctp_get_port(): Found a match\n"); } pp_not_found: /* If there was a hash table miss, create a new port. */ ret = 1; if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) goto fail_unlock; /* In either case (hit or miss), make sure fastreuse is 1 only * if sk->sk_reuse is too (that is, if the caller requested * SO_REUSEADDR on this socket -sk-). */ if (hlist_empty(&pp->owner)) { if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) pp->fastreuse = 1; else pp->fastreuse = 0; } else if (pp->fastreuse && (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) pp->fastreuse = 0; /* We are set, so fill up all the data in the hash table * entry, tie the socket list information with the rest of the * sockets FIXME: Blurry, NPI (ipg). */ success: if (!sctp_sk(sk)->bind_hash) { inet_sk(sk)->inet_num = snum; sk_add_bind_node(sk, &pp->owner); sctp_sk(sk)->bind_hash = pp; } ret = 0; fail_unlock: sctp_spin_unlock(&head->lock); fail: sctp_local_bh_enable(); return ret; } /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral * port is requested. */ static int sctp_get_port(struct sock *sk, unsigned short snum) { long ret; union sctp_addr addr; struct sctp_af *af = sctp_sk(sk)->pf->af; /* Set up a dummy address struct from the sk. */ af->from_sk(&addr, sk); addr.v4.sin_port = htons(snum); /* Note: sk->sk_num gets filled in if ephemeral port request. */ ret = sctp_get_port_local(sk, &addr); return ret ? 1 : 0; } /* * Move a socket to LISTENING state. */ SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; struct crypto_hash *tfm = NULL; char alg[32]; /* Allocate HMAC for generating cookie. */ if (!sp->hmac && sp->sctp_hmac_alg) { sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { net_info_ratelimited("failed to load transform for %s: %ld\n", sp->sctp_hmac_alg, PTR_ERR(tfm)); return -ENOSYS; } sctp_sk(sk)->hmac = tfm; } /* * If a bind() or sctp_bindx() is not called prior to a listen() * call that allows new associations to be accepted, the system * picks an ephemeral port and will choose an address set equivalent * to binding with a wildcard address. * * This is not currently spelled out in the SCTP sockets * extensions draft, but follows the practice as seen in TCP * sockets. * */ sk->sk_state = SCTP_SS_LISTENING; if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) return -EAGAIN; } else { if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { sk->sk_state = SCTP_SS_CLOSED; return -EADDRINUSE; } } sk->sk_max_ack_backlog = backlog; sctp_hash_endpoint(ep); return 0; } /* * 4.1.3 / 5.1.3 listen() * * By default, new associations are not accepted for UDP style sockets. * An application uses listen() to mark a socket as being able to * accept new associations. * * On TCP style sockets, applications use listen() to ready the SCTP * endpoint for accepting inbound associations. * * On both types of endpoints a backlog of '0' disables listening. * * Move a socket to LISTENING state. */ int sctp_inet_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; struct sctp_endpoint *ep = sctp_sk(sk)->ep; int err = -EINVAL; if (unlikely(backlog < 0)) return err; sctp_lock_sock(sk); /* Peeled-off sockets are not allowed to listen(). */ if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) goto out; if (sock->state != SS_UNCONNECTED) goto out; /* If backlog is zero, disable listening. */ if (!backlog) { if (sctp_sstate(sk, CLOSED)) goto out; err = 0; sctp_unhash_endpoint(ep); sk->sk_state = SCTP_SS_CLOSED; if (sk->sk_reuse) sctp_sk(sk)->bind_hash->fastreuse = 1; goto out; } /* If we are already listening, just update the backlog */ if (sctp_sstate(sk, LISTENING)) sk->sk_max_ack_backlog = backlog; else { err = sctp_listen_start(sk, backlog); if (err) goto out; } err = 0; out: sctp_release_sock(sk); return err; } /* * This function is done by modeling the current datagram_poll() and the * tcp_poll(). Note that, based on these implementations, we don't * lock the socket in this function, even though it seems that, * ideally, locking or some other mechanisms can be used to ensure * the integrity of the counters (sndbuf and wmem_alloc) used * in this place. We assume that we don't need locks either until proven * otherwise. * * Another thing to note is that we include the Async I/O support * here, again, by modeling the current TCP/UDP code. We don't have * a good way to test with it yet. */ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct sctp_sock *sp = sctp_sk(sk); unsigned int mask; poll_wait(file, sk_sleep(sk), wait); /* A TCP-style listening socket becomes readable when the accept queue * is not empty. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) return (!list_empty(&sp->ep->asocs)) ? (POLLIN | POLLRDNORM) : 0; mask = 0; /* Is there any exceptional events? */ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) mask |= POLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP | POLLIN | POLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; /* Is it readable? Reconsider this code with TCP-style support. */ if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= POLLIN | POLLRDNORM; /* The association is either gone or not ready. */ if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) return mask; /* Is it writable? */ if (sctp_writeable(sk)) { mask |= POLLOUT | POLLWRNORM; } else { set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); /* * Since the socket is not locked, the buffer * might be made available after the writeable check and * before the bit is set. This could cause a lost I/O * signal. tcp_poll() has a race breaker for this race * condition. Based on their implementation, we put * in the following code to cover it as well. */ if (sctp_writeable(sk)) mask |= POLLOUT | POLLWRNORM; } return mask; } /******************************************************************** * 2nd Level Abstractions ********************************************************************/ static struct sctp_bind_bucket *sctp_bucket_create( struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) { struct sctp_bind_bucket *pp; pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); if (pp) { SCTP_DBG_OBJCNT_INC(bind_bucket); pp->port = snum; pp->fastreuse = 0; INIT_HLIST_HEAD(&pp->owner); pp->net = net; hlist_add_head(&pp->node, &head->chain); } return pp; } /* Caller must hold hashbucket lock for this tb with local BH disabled */ static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) { if (pp && hlist_empty(&pp->owner)) { __hlist_del(&pp->node); kmem_cache_free(sctp_bucket_cachep, pp); SCTP_DBG_OBJCNT_DEC(bind_bucket); } } /* Release this socket's reference to a local port. */ static inline void __sctp_put_port(struct sock *sk) { struct sctp_bind_hashbucket *head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), inet_sk(sk)->inet_num)]; struct sctp_bind_bucket *pp; sctp_spin_lock(&head->lock); pp = sctp_sk(sk)->bind_hash; __sk_del_bind_node(sk); sctp_sk(sk)->bind_hash = NULL; inet_sk(sk)->inet_num = 0; sctp_bucket_destroy(pp); sctp_spin_unlock(&head->lock); } void sctp_put_port(struct sock *sk) { sctp_local_bh_disable(); __sctp_put_port(sk); sctp_local_bh_enable(); } /* * The system picks an ephemeral port and choose an address set equivalent * to binding with a wildcard address. * One of those addresses will be the primary address for the association. * This automatically enables the multihoming capability of SCTP. */ static int sctp_autobind(struct sock *sk) { union sctp_addr autoaddr; struct sctp_af *af; __be16 port; /* Initialize a local sockaddr structure to INADDR_ANY. */ af = sctp_sk(sk)->pf->af; port = htons(inet_sk(sk)->inet_num); af->inaddr_any(&autoaddr, port); return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); } /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. * * From RFC 2292 * 4.2 The cmsghdr Structure * * * When ancillary data is sent or received, any number of ancillary data * objects can be specified by the msg_control and msg_controllen members of * the msghdr structure, because each object is preceded by * a cmsghdr structure defining the object's length (the cmsg_len member). * Historically Berkeley-derived implementations have passed only one object * at a time, but this API allows multiple objects to be * passed in a single call to sendmsg() or recvmsg(). The following example * shows two ancillary data objects in a control buffer. * * |<--------------------------- msg_controllen -------------------------->| * | | * * |<----- ancillary data object ----->|<----- ancillary data object ----->| * * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| * | | | * * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | * * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | * | | | | | * * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| * * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| * * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ * ^ * | * * msg_control * points here */ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) { struct cmsghdr *cmsg; struct msghdr *my_msg = (struct msghdr *)msg; for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(my_msg, cmsg)) { if (!CMSG_OK(my_msg, cmsg)) return -EINVAL; /* Should we parse this header or ignore? */ if (cmsg->cmsg_level != IPPROTO_SCTP) continue; /* Strictly check lengths following example in SCM code. */ switch (cmsg->cmsg_type) { case SCTP_INIT: /* SCTP Socket API Extension * 5.2.1 SCTP Initiation Structure (SCTP_INIT) * * This cmsghdr structure provides information for * initializing new SCTP associations with sendmsg(). * The SCTP_INITMSG socket option uses this same data * structure. This structure is not used for * recvmsg(). * * cmsg_level cmsg_type cmsg_data[] * ------------ ------------ ---------------------- * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg */ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg))) return -EINVAL; cmsgs->init = (struct sctp_initmsg *)CMSG_DATA(cmsg); break; case SCTP_SNDRCV: /* SCTP Socket API Extension * 5.2.2 SCTP Header Information Structure(SCTP_SNDRCV) * * This cmsghdr structure specifies SCTP options for * sendmsg() and describes SCTP header information * about a received message through recvmsg(). * * cmsg_level cmsg_type cmsg_data[] * ------------ ------------ ---------------------- * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo */ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) return -EINVAL; cmsgs->info = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); /* Minimally, validate the sinfo_flags. */ if (cmsgs->info->sinfo_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_ABORT | SCTP_EOF)) return -EINVAL; break; default: return -EINVAL; } } return 0; } /* * Wait for a packet.. * Note: This function is the same function as in core/datagram.c * with a few modifications to make lksctp work. */ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p) { int error; DEFINE_WAIT(wait); prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); /* Socket errors? */ error = sock_error(sk); if (error) goto out; if (!skb_queue_empty(&sk->sk_receive_queue)) goto ready; /* Socket shut down? */ if (sk->sk_shutdown & RCV_SHUTDOWN) goto out; /* Sequenced packets can come disconnected. If so we report the * problem. */ error = -ENOTCONN; /* Is there a good reason to think that we may receive some data? */ if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) goto out; /* Handle signals. */ if (signal_pending(current)) goto interrupted; /* Let another process have a go. Since we are going to sleep * anyway. Note: This may cause odd behaviors if the message * does not fit in the user's buffer, but this seems to be the * only way to honor MSG_DONTWAIT realistically. */ sctp_release_sock(sk); *timeo_p = schedule_timeout(*timeo_p); sctp_lock_sock(sk); ready: finish_wait(sk_sleep(sk), &wait); return 0; interrupted: error = sock_intr_errno(*timeo_p); out: finish_wait(sk_sleep(sk), &wait); *err = error; return error; } /* Receive a datagram. * Note: This is pretty much the same routine as in core/datagram.c * with a few changes to make lksctp work. */ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, int noblock, int *err) { int error; struct sk_buff *skb; long timeo; timeo = sock_rcvtimeo(sk, noblock); SCTP_DEBUG_PRINTK("Timeout: timeo: %ld, MAX: %ld.\n", timeo, MAX_SCHEDULE_TIMEOUT); do { /* Again only user level code calls this function, * so nothing interrupt level * will suddenly eat the receive_queue. * * Look at current nfs client by the way... * However, this function was correct in any case. 8) */ if (flags & MSG_PEEK) { spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) atomic_inc(&skb->users); spin_unlock_bh(&sk->sk_receive_queue.lock); } else { skb = skb_dequeue(&sk->sk_receive_queue); } if (skb) return skb; /* Caller is allowed not to check sk->sk_err before calling. */ error = sock_error(sk); if (error) goto no_packet; if (sk->sk_shutdown & RCV_SHUTDOWN) break; /* User doesn't want to wait. */ error = -EAGAIN; if (!timeo) goto no_packet; } while (sctp_wait_for_packet(sk, err, &timeo) == 0); return NULL; no_packet: *err = error; return NULL; } /* If sndbuf has changed, wake up per association sndbuf waiters. */ static void __sctp_write_space(struct sctp_association *asoc) { struct sock *sk = asoc->base.sk; struct socket *sock = sk->sk_socket; if ((sctp_wspace(asoc) > 0) && sock) { if (waitqueue_active(&asoc->wait)) wake_up_interruptible(&asoc->wait); if (sctp_writeable(sk)) { wait_queue_head_t *wq = sk_sleep(sk); if (wq && waitqueue_active(wq)) wake_up_interruptible(wq); /* Note that we try to include the Async I/O support * here by modeling from the current TCP/UDP code. * We have not tested with it yet. */ if (!(sk->sk_shutdown & SEND_SHUTDOWN)) sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); } } } static void sctp_wake_up_waiters(struct sock *sk, struct sctp_association *asoc) { struct sctp_association *tmp = asoc; /* We do accounting for the sndbuf space per association, * so we only need to wake our own association. */ if (asoc->ep->sndbuf_policy) return __sctp_write_space(asoc); /* If association goes down and is just flushing its * outq, then just normally notify others. */ if (asoc->base.dead) return sctp_write_space(sk); /* Accounting for the sndbuf space is per socket, so we * need to wake up others, try to be fair and in case of * other associations, let them have a go first instead * of just doing a sctp_write_space() call. * * Note that we reach sctp_wake_up_waiters() only when * associations free up queued chunks, thus we are under * lock and the list of associations on a socket is * guaranteed not to change. */ for (tmp = list_next_entry(tmp, asocs); 1; tmp = list_next_entry(tmp, asocs)) { /* Manually skip the head element. */ if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs)) continue; /* Wake up association. */ __sctp_write_space(tmp); /* We've reached the end. */ if (tmp == asoc) break; } } /* Do accounting for the sndbuf space. * Decrement the used sndbuf space of the corresponding association by the * data size which was just transmitted(freed). */ static void sctp_wfree(struct sk_buff *skb) { struct sctp_association *asoc; struct sctp_chunk *chunk; struct sock *sk; /* Get the saved chunk pointer. */ chunk = *((struct sctp_chunk **)(skb->cb)); asoc = chunk->asoc; sk = asoc->base.sk; asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + sizeof(struct sk_buff) + sizeof(struct sctp_chunk); atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); /* * This undoes what is done via sctp_set_owner_w and sk_mem_charge */ sk->sk_wmem_queued -= skb->truesize; sk_mem_uncharge(sk, skb->truesize); sock_wfree(skb); sctp_wake_up_waiters(sk, asoc); sctp_association_put(asoc); } /* Do accounting for the receive space on the socket. * Accounting for the association is done in ulpevent.c * We set this as a destructor for the cloned data skbs so that * accounting is done at the correct time. */ void sctp_sock_rfree(struct sk_buff *skb) { struct sock *sk = skb->sk; struct sctp_ulpevent *event = sctp_skb2event(skb); atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); /* * Mimic the behavior of sock_rfree */ sk_mem_uncharge(sk, event->rmem_len); } /* Helper function to wait for space in the sndbuf. */ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, size_t msg_len) { struct sock *sk = asoc->base.sk; int err = 0; long current_timeo = *timeo_p; DEFINE_WAIT(wait); SCTP_DEBUG_PRINTK("wait_for_sndbuf: asoc=%p, timeo=%ld, msg_len=%zu\n", asoc, (long)(*timeo_p), msg_len); /* Increment the association's refcnt. */ sctp_association_hold(asoc); /* Wait on the association specific sndbuf space. */ for (;;) { prepare_to_wait_exclusive(&asoc->wait, &wait, TASK_INTERRUPTIBLE); if (!*timeo_p) goto do_nonblock; if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || asoc->base.dead) goto do_error; if (signal_pending(current)) goto do_interrupted; if (msg_len <= sctp_wspace(asoc)) break; /* Let another process have a go. Since we are going * to sleep anyway. */ sctp_release_sock(sk); current_timeo = schedule_timeout(current_timeo); BUG_ON(sk != asoc->base.sk); sctp_lock_sock(sk); *timeo_p = current_timeo; } out: finish_wait(&asoc->wait, &wait); /* Release the association's refcnt. */ sctp_association_put(asoc); return err; do_error: err = -EPIPE; goto out; do_interrupted: err = sock_intr_errno(*timeo_p); goto out; do_nonblock: err = -EAGAIN; goto out; } void sctp_data_ready(struct sock *sk, int len) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLRDNORM | POLLRDBAND); sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); rcu_read_unlock(); } /* If socket sndbuf has changed, wake up all per association waiters. */ void sctp_write_space(struct sock *sk) { struct sctp_association *asoc; /* Wake up the tasks in each wait queue. */ list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { __sctp_write_space(asoc); } } /* Is there any sndbuf space available on the socket? * * Note that sk_wmem_alloc is the sum of the send buffers on all of the * associations on the same socket. For a UDP-style socket with * multiple associations, it is possible for it to be "unwriteable" * prematurely. I assume that this is acceptable because * a premature "unwriteable" is better than an accidental "writeable" which * would cause an unwanted block under certain circumstances. For the 1-1 * UDP-style sockets or TCP-style sockets, this code should work. * - Daisy */ static int sctp_writeable(struct sock *sk) { int amt = 0; amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amt < 0) amt = 0; return amt; } /* Wait for an association to go into ESTABLISHED state. If timeout is 0, * returns immediately with EINPROGRESS. */ static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) { struct sock *sk = asoc->base.sk; int err = 0; long current_timeo = *timeo_p; DEFINE_WAIT(wait); SCTP_DEBUG_PRINTK("%s: asoc=%p, timeo=%ld\n", __func__, asoc, (long)(*timeo_p)); /* Increment the association's refcnt. */ sctp_association_hold(asoc); for (;;) { prepare_to_wait_exclusive(&asoc->wait, &wait, TASK_INTERRUPTIBLE); if (!*timeo_p) goto do_nonblock; if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || asoc->base.dead) goto do_error; if (signal_pending(current)) goto do_interrupted; if (sctp_state(asoc, ESTABLISHED)) break; /* Let another process have a go. Since we are going * to sleep anyway. */ sctp_release_sock(sk); current_timeo = schedule_timeout(current_timeo); sctp_lock_sock(sk); *timeo_p = current_timeo; } out: finish_wait(&asoc->wait, &wait); /* Release the association's refcnt. */ sctp_association_put(asoc); return err; do_error: if (asoc->init_err_counter + 1 > asoc->max_init_attempts) err = -ETIMEDOUT; else err = -ECONNREFUSED; goto out; do_interrupted: err = sock_intr_errno(*timeo_p); goto out; do_nonblock: err = -EINPROGRESS; goto out; } static int sctp_wait_for_accept(struct sock *sk, long timeo) { struct sctp_endpoint *ep; int err = 0; DEFINE_WAIT(wait); ep = sctp_sk(sk)->ep; for (;;) { prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (list_empty(&ep->asocs)) { sctp_release_sock(sk); timeo = schedule_timeout(timeo); sctp_lock_sock(sk); } err = -EINVAL; if (!sctp_sstate(sk, LISTENING)) break; err = 0; if (!list_empty(&ep->asocs)) break; err = sock_intr_errno(timeo); if (signal_pending(current)) break; err = -EAGAIN; if (!timeo) break; } finish_wait(sk_sleep(sk), &wait); return err; } static void sctp_wait_for_close(struct sock *sk, long timeout) { DEFINE_WAIT(wait); do { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (list_empty(&sctp_sk(sk)->ep->asocs)) break; sctp_release_sock(sk); timeout = schedule_timeout(timeout); sctp_lock_sock(sk); } while (!signal_pending(current) && timeout); finish_wait(sk_sleep(sk), &wait); } static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) { struct sk_buff *frag; if (!skb->data_len) goto done; /* Don't forget the fragments. */ skb_walk_frags(skb, frag) sctp_skb_set_owner_r_frag(frag, sk); done: sctp_skb_set_owner_r(skb, sk); } void sctp_copy_sock(struct sock *newsk, struct sock *sk, struct sctp_association *asoc) { struct inet_sock *inet = inet_sk(sk); struct inet_sock *newinet; newsk->sk_type = sk->sk_type; newsk->sk_bound_dev_if = sk->sk_bound_dev_if; newsk->sk_flags = sk->sk_flags; newsk->sk_no_check = sk->sk_no_check; newsk->sk_reuse = sk->sk_reuse; newsk->sk_shutdown = sk->sk_shutdown; newsk->sk_destruct = inet_sock_destruct; newsk->sk_family = sk->sk_family; newsk->sk_protocol = IPPROTO_SCTP; newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; newsk->sk_sndbuf = sk->sk_sndbuf; newsk->sk_rcvbuf = sk->sk_rcvbuf; newsk->sk_lingertime = sk->sk_lingertime; newsk->sk_rcvtimeo = sk->sk_rcvtimeo; newsk->sk_sndtimeo = sk->sk_sndtimeo; newinet = inet_sk(newsk); /* Initialize sk's sport, dport, rcv_saddr and daddr for * getsockname() and getpeername() */ newinet->inet_sport = inet->inet_sport; newinet->inet_saddr = inet->inet_saddr; newinet->inet_rcv_saddr = inet->inet_rcv_saddr; newinet->inet_dport = htons(asoc->peer.port); newinet->pmtudisc = inet->pmtudisc; newinet->inet_id = asoc->next_tsn ^ jiffies; newinet->uc_ttl = inet->uc_ttl; newinet->mc_loop = 1; newinet->mc_ttl = 1; newinet->mc_index = 0; newinet->mc_list = NULL; if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) net_enable_timestamp(); } static inline void sctp_copy_descendant(struct sock *sk_to, const struct sock *sk_from) { int ancestor_size = sizeof(struct inet_sock) + sizeof(struct sctp_sock) - offsetof(struct sctp_sock, auto_asconf_list); if (sk_from->sk_family == PF_INET6) ancestor_size += sizeof(struct ipv6_pinfo); __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); } /* Populate the fields of the newsk from the oldsk and migrate the assoc * and its messages to the newsk. */ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, struct sctp_association *assoc, sctp_socket_type_t type) { struct sctp_sock *oldsp = sctp_sk(oldsk); struct sctp_sock *newsp = sctp_sk(newsk); struct sctp_bind_bucket *pp; /* hash list port iterator */ struct sctp_endpoint *newep = newsp->ep; struct sk_buff *skb, *tmp; struct sctp_ulpevent *event; struct sctp_bind_hashbucket *head; /* Migrate socket buffer sizes and all the socket level options to the * new socket. */ newsk->sk_sndbuf = oldsk->sk_sndbuf; newsk->sk_rcvbuf = oldsk->sk_rcvbuf; /* Brute force copy old sctp opt. */ sctp_copy_descendant(newsk, oldsk); /* Restore the ep value that was overwritten with the above structure * copy. */ newsp->ep = newep; newsp->hmac = NULL; /* Hook this new socket in to the bind_hash list. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), inet_sk(oldsk)->inet_num)]; sctp_local_bh_disable(); sctp_spin_lock(&head->lock); pp = sctp_sk(oldsk)->bind_hash; sk_add_bind_node(newsk, &pp->owner); sctp_sk(newsk)->bind_hash = pp; inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; sctp_spin_unlock(&head->lock); sctp_local_bh_enable(); /* Copy the bind_addr list from the original endpoint to the new * endpoint so that we can handle restarts properly */ sctp_bind_addr_dup(&newsp->ep->base.bind_addr, &oldsp->ep->base.bind_addr, GFP_KERNEL); /* Move any messages in the old socket's receive queue that are for the * peeled off association to the new socket's receive queue. */ sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { event = sctp_skb2event(skb); if (event->asoc == assoc) { __skb_unlink(skb, &oldsk->sk_receive_queue); __skb_queue_tail(&newsk->sk_receive_queue, skb); sctp_skb_set_owner_r_frag(skb, newsk); } } /* Clean up any messages pending delivery due to partial * delivery. Three cases: * 1) No partial deliver; no work. * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. */ skb_queue_head_init(&newsp->pd_lobby); atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { struct sk_buff_head *queue; /* Decide which queue to move pd_lobby skbs to. */ if (assoc->ulpq.pd_mode) { queue = &newsp->pd_lobby; } else queue = &newsk->sk_receive_queue; /* Walk through the pd_lobby, looking for skbs that * need moved to the new socket. */ sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { event = sctp_skb2event(skb); if (event->asoc == assoc) { __skb_unlink(skb, &oldsp->pd_lobby); __skb_queue_tail(queue, skb); sctp_skb_set_owner_r_frag(skb, newsk); } } /* Clear up any skbs waiting for the partial * delivery to finish. */ if (assoc->ulpq.pd_mode) sctp_clear_pd(oldsk, NULL); } sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) sctp_skb_set_owner_r_frag(skb, newsk); sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) sctp_skb_set_owner_r_frag(skb, newsk); /* Set the type of socket to indicate that it is peeled off from the * original UDP-style socket or created with the accept() call on a * TCP-style socket.. */ newsp->type = type; /* Mark the new socket "in-use" by the user so that any packets * that may arrive on the association after we've moved it are * queued to the backlog. This prevents a potential race between * backlog processing on the old socket and new-packet processing * on the new socket. * * The caller has just allocated newsk so we can guarantee that other * paths won't try to lock it and then oldsk. */ lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); sctp_assoc_migrate(assoc, newsk); /* If the association on the newsk is already closed before accept() * is called, set RCV_SHUTDOWN flag. */ if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) newsk->sk_shutdown |= RCV_SHUTDOWN; newsk->sk_state = SCTP_SS_ESTABLISHED; sctp_release_sock(newsk); } /* This proto struct describes the ULP interface for SCTP. */ struct proto sctp_prot = { .name = "SCTP", .owner = THIS_MODULE, .close = sctp_close, .connect = sctp_connect, .disconnect = sctp_disconnect, .accept = sctp_accept, .ioctl = sctp_ioctl, .init = sctp_init_sock, .destroy = sctp_destroy_sock, .shutdown = sctp_shutdown, .setsockopt = sctp_setsockopt, .getsockopt = sctp_getsockopt, .sendmsg = sctp_sendmsg, .recvmsg = sctp_recvmsg, .bind = sctp_bind, .backlog_rcv = sctp_backlog_rcv, .hash = sctp_hash, .unhash = sctp_unhash, .get_port = sctp_get_port, .obj_size = sizeof(struct sctp_sock), .sysctl_mem = sysctl_sctp_mem, .sysctl_rmem = sysctl_sctp_rmem, .sysctl_wmem = sysctl_sctp_wmem, .memory_pressure = &sctp_memory_pressure, .enter_memory_pressure = sctp_enter_memory_pressure, .memory_allocated = &sctp_memory_allocated, .sockets_allocated = &sctp_sockets_allocated, }; #if IS_ENABLED(CONFIG_IPV6) #include <net/transp_v6.h> static void sctp_v6_destroy_sock(struct sock *sk) { sctp_destroy_sock(sk); inet6_destroy_sock(sk); } struct proto sctpv6_prot = { .name = "SCTPv6", .owner = THIS_MODULE, .close = sctp_close, .connect = sctp_connect, .disconnect = sctp_disconnect, .accept = sctp_accept, .ioctl = sctp_ioctl, .init = sctp_init_sock, .destroy = sctp_v6_destroy_sock, .shutdown = sctp_shutdown, .setsockopt = sctp_setsockopt, .getsockopt = sctp_getsockopt, .sendmsg = sctp_sendmsg, .recvmsg = sctp_recvmsg, .bind = sctp_bind, .backlog_rcv = sctp_backlog_rcv, .hash = sctp_hash, .unhash = sctp_unhash, .get_port = sctp_get_port, .obj_size = sizeof(struct sctp6_sock), .sysctl_mem = sysctl_sctp_mem, .sysctl_rmem = sysctl_sctp_rmem, .sysctl_wmem = sysctl_sctp_wmem, .memory_pressure = &sctp_memory_pressure, .enter_memory_pressure = sctp_enter_memory_pressure, .memory_allocated = &sctp_memory_allocated, .sockets_allocated = &sctp_sockets_allocated, }; #endif /* IS_ENABLED(CONFIG_IPV6) */
gpl-2.0
davet321/rpi-linux
arch/cris/arch-v32/drivers/pci/dma.c
168
2000
/* * Dynamic DMA mapping support. * * On cris there is no hardware dynamic DMA address translation, * so consistent alloc/free are merely page allocation/freeing. * The rest of the dynamic DMA mapping interface is implemented * in asm/pci.h. * * Borrowed from i386. */ #include <linux/types.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/pci.h> #include <linux/gfp.h> #include <asm/io.h> static void *v32_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { void *ret; /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) gfp |= GFP_DMA; ret = (void *)__get_free_pages(gfp, get_order(size)); if (ret != NULL) { memset(ret, 0, size); *dma_handle = virt_to_phys(ret); } return ret; } static void v32_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { free_pages((unsigned long)vaddr, get_order(size)); } static inline dma_addr_t v32_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, unsigned long attrs) { return page_to_phys(page) + offset; } static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, unsigned long attrs) { printk("Map sg\n"); return nents; } static inline int v32_dma_supported(struct device *dev, u64 mask) { /* * we fall back to GFP_DMA when the mask isn't all 1s, * so we can't guarantee allocations that must be * within a tighter range than GFP_DMA.. */ if (mask < 0x00ffffff) return 0; return 1; } struct dma_map_ops v32_dma_ops = { .alloc = v32_dma_alloc, .free = v32_dma_free, .map_page = v32_dma_map_page, .map_sg = v32_dma_map_sg, .dma_supported = v32_dma_supported, }; EXPORT_SYMBOL(v32_dma_ops);
gpl-2.0
arm-embedded/newlib.debian
newlib/libm/test/asinf_vec.c
168
27646
#include "test.h" one_line_type asinf_vec[] = { { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff33333, 0x33333333}, /* 64.0000=f(-1.20000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff30a3d, 0x70a3d70a}, /* 64.0000=f(-1.19000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff2e147, 0xae147ae1}, /* 64.0000=f(-1.18000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff2b851, 0xeb851eb8}, /* 64.0000=f(-1.17000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff28f5c, 0x28f5c28f}, /* 64.0000=f(-1.16000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff26666, 0x66666666}, /* 64.0000=f(-1.15000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff23d70, 0xa3d70a3d}, /* 64.0000=f(-1.14000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff2147a, 0xe147ae14}, /* 64.0000=f(-1.13000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff1eb85, 0x1eb851eb}, /* 64.0000=f(-1.12000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff1c28f, 0x5c28f5c2}, /* 64.0000=f(-1.11000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff19999, 0x99999999}, /* 64.0000=f(-1.10000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff170a3, 0xd70a3d70}, /* 64.0000=f(-1.09000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff147ae, 0x147ae147}, /* 64.0000=f(-1.08000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff11eb8, 0x51eb851e}, /* 64.0000=f(-1.07000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff0f5c2, 0x8f5c28f5}, /* 64.0000=f(-1.06000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff0cccc, 0xcccccccc}, /* 64.0000=f(-1.05000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff0a3d7, 0x0a3d70a3}, /* 64.0000=f(-1.04000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff07ae1, 0x47ae147a}, /* 64.0000=f(-1.03000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff051eb, 0x851eb851}, /* 64.0000=f(-1.02000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff028f5, 0xc28f5c28}, /* 64.0000=f(-1.01000)*/ {12, 0,123,__LINE__, 0xbff921fb, 0x60000000, 0xbfefffff, 0xfffffffe}, /* -1.57079=f(-0.01000)*/ {11, 0,123,__LINE__, 0xbff6de3c, 0x80000000, 0xbfefae14, 0x7ae147ac}, /* -1.42925=f(-0.99000)*/ {11, 0,123,__LINE__, 0xbff5ed69, 0x20000000, 0xbfef5c28, 0xf5c28f5a}, /* -1.37046=f(-0.98000)*/ {11, 0,123,__LINE__, 0xbff53425, 0x60000000, 0xbfef0a3d, 0x70a3d708}, /* -1.32523=f(-0.97000)*/ {11, 0,123,__LINE__, 0xbff4978f, 0xa0000000, 0xbfeeb851, 0xeb851eb6}, /* -1.28700=f(-0.96000)*/ {11, 0,123,__LINE__, 0xbff40d41, 0x20000000, 0xbfee6666, 0x66666664}, /* -1.25323=f(-0.95000)*/ {11, 0,123,__LINE__, 0xbff38fe4, 0xe0000000, 0xbfee147a, 0xe147ae12}, /* -1.22263=f(-0.94000)*/ {11, 0,123,__LINE__, 0xbff31c50, 0xc0000000, 0xbfedc28f, 0x5c28f5c0}, /* -1.19441=f(-0.93000)*/ {11, 0,123,__LINE__, 0xbff2b075, 0x40000000, 0xbfed70a3, 0xd70a3d6e}, /* -1.16808=f(-0.92000)*/ {11, 0,123,__LINE__, 0xbff24ae4, 0x60000000, 0xbfed1eb8, 0x51eb851c}, /* -1.14328=f(-0.91000)*/ {11, 0,123,__LINE__, 0xbff1ea93, 0x80000000, 0xbfeccccc, 0xccccccca}, /* -1.11977=f(-0.90000)*/ {11, 0,123,__LINE__, 0xbff18eb9, 0xc0000000, 0xbfec7ae1, 0x47ae1478}, /* -1.09734=f(-0.89000)*/ {11, 0,123,__LINE__, 0xbff136bb, 0x40000000, 0xbfec28f5, 0xc28f5c26}, /* -1.07586=f(-0.88000)*/ {11, 0,123,__LINE__, 0xbff0e21b, 0xe0000000, 0xbfebd70a, 0x3d70a3d4}, /* -1.05520=f(-0.87000)*/ {11, 0,123,__LINE__, 0xbff09077, 0x00000000, 0xbfeb851e, 0xb851eb82}, /* -1.03527=f(-0.86000)*/ {11, 0,123,__LINE__, 0xbff04179, 0xe0000000, 0xbfeb3333, 0x33333330}, /* -1.01598=f(-0.85000)*/ {13, 0,123,__LINE__, 0xbfefe9be, 0x80000000, 0xbfeae147, 0xae147ade}, /* -0.99728=f(-0.84000)*/ {13, 0,123,__LINE__, 0xbfef54d9, 0xc0000000, 0xbfea8f5c, 0x28f5c28c}, /* -0.97910=f(-0.83000)*/ {13, 0,123,__LINE__, 0xbfeec3e1, 0x20000000, 0xbfea3d70, 0xa3d70a3a}, /* -0.96141=f(-0.82000)*/ {13, 0,123,__LINE__, 0xbfee367e, 0xa0000000, 0xbfe9eb85, 0x1eb851e8}, /* -0.94415=f(-0.81000)*/ {13, 0,123,__LINE__, 0xbfedac67, 0x20000000, 0xbfe99999, 0x99999996}, /* -0.92729=f(-0.80000)*/ {13, 0,123,__LINE__, 0xbfed2559, 0x20000000, 0xbfe947ae, 0x147ae144}, /* -0.91080=f(-0.79000)*/ {13, 0,123,__LINE__, 0xbfeca11a, 0x20000000, 0xbfe8f5c2, 0x8f5c28f2}, /* -0.89466=f(-0.78000)*/ {13, 0,123,__LINE__, 0xbfec1f77, 0x80000000, 0xbfe8a3d7, 0x0a3d70a0}, /* -0.87884=f(-0.77000)*/ {14, 0,123,__LINE__, 0xbfeba043, 0x00000000, 0xbfe851eb, 0x851eb84e}, /* -0.86331=f(-0.76000)*/ {14, 0,123,__LINE__, 0xbfeb2353, 0x40000000, 0xbfe7ffff, 0xfffffffc}, /* -0.84806=f(-0.75000)*/ {12, 0,123,__LINE__, 0xbfeaa883, 0x60000000, 0xbfe7ae14, 0x7ae147aa}, /* -0.83307=f(-0.74000)*/ {12, 0,123,__LINE__, 0xbfea2fb1, 0xa0000000, 0xbfe75c28, 0xf5c28f58}, /* -0.81832=f(-0.73000)*/ {12, 0,123,__LINE__, 0xbfe9b8bf, 0xe0000000, 0xbfe70a3d, 0x70a3d706}, /* -0.80380=f(-0.72000)*/ {12, 0,123,__LINE__, 0xbfe94391, 0xc0000000, 0xbfe6b851, 0xeb851eb4}, /* -0.78949=f(-0.71000)*/ {12, 0,123,__LINE__, 0xbfe8d00e, 0x80000000, 0xbfe66666, 0x66666662}, /* -0.77539=f(-0.70000)*/ {12, 0,123,__LINE__, 0xbfe85e1e, 0x60000000, 0xbfe6147a, 0xe147ae10}, /* -0.76148=f(-0.69000)*/ {14, 0,123,__LINE__, 0xbfe7edac, 0x00000000, 0xbfe5c28f, 0x5c28f5be}, /* -0.74776=f(-0.68000)*/ {14, 0,123,__LINE__, 0xbfe77ea3, 0xa0000000, 0xbfe570a3, 0xd70a3d6c}, /* -0.73420=f(-0.67000)*/ {14, 0,123,__LINE__, 0xbfe710f2, 0xc0000000, 0xbfe51eb8, 0x51eb851a}, /* -0.72081=f(-0.66000)*/ {14, 0,123,__LINE__, 0xbfe6a488, 0x40000000, 0xbfe4cccc, 0xccccccc8}, /* -0.70758=f(-0.65000)*/ {14, 0,123,__LINE__, 0xbfe63954, 0x80000000, 0xbfe47ae1, 0x47ae1476}, /* -0.69449=f(-0.64000)*/ {15, 0,123,__LINE__, 0xbfe5cf48, 0xc0000000, 0xbfe428f5, 0xc28f5c24}, /* -0.68155=f(-0.63000)*/ {13, 0,123,__LINE__, 0xbfe56657, 0x20000000, 0xbfe3d70a, 0x3d70a3d2}, /* -0.66874=f(-0.62000)*/ {13, 0,123,__LINE__, 0xbfe4fe72, 0xe0000000, 0xbfe3851e, 0xb851eb80}, /* -0.65606=f(-0.61000)*/ {13, 0,123,__LINE__, 0xbfe4978f, 0xc0000000, 0xbfe33333, 0x3333332e}, /* -0.64350=f(-0.60000)*/ {13, 0,123,__LINE__, 0xbfe431a2, 0x40000000, 0xbfe2e147, 0xae147adc}, /* -0.63105=f(-0.59000)*/ {15, 0,123,__LINE__, 0xbfe3cca0, 0x20000000, 0xbfe28f5c, 0x28f5c28a}, /* -0.61872=f(-0.58000)*/ {15, 0,123,__LINE__, 0xbfe3687f, 0x20000000, 0xbfe23d70, 0xa3d70a38}, /* -0.60650=f(-0.57000)*/ {14, 0,123,__LINE__, 0xbfe30535, 0x60000000, 0xbfe1eb85, 0x1eb851e6}, /* -0.59438=f(-0.56000)*/ {14, 0,123,__LINE__, 0xbfe2a2ba, 0x80000000, 0xbfe19999, 0x99999994}, /* -0.58236=f(-0.55000)*/ {14, 0,123,__LINE__, 0xbfe24105, 0x80000000, 0xbfe147ae, 0x147ae142}, /* -0.57043=f(-0.54000)*/ {15, 0,123,__LINE__, 0xbfe1e00e, 0x40000000, 0xbfe0f5c2, 0x8f5c28f0}, /* -0.55860=f(-0.53000)*/ {15, 0,123,__LINE__, 0xbfe17fcd, 0x80000000, 0xbfe0a3d7, 0x0a3d709e}, /* -0.54685=f(-0.52000)*/ {15, 0,123,__LINE__, 0xbfe1203c, 0x00000000, 0xbfe051eb, 0x851eb84c}, /* -0.53518=f(-0.51000)*/ {16, 0,123,__LINE__, 0xbfe0c152, 0x40000000, 0xbfdfffff, 0xfffffff4}, /* -0.52359=f(-0.50000)*/ {10, 0,123,__LINE__, 0xbfe0630a, 0x20000000, 0xbfdf5c28, 0xf5c28f50}, /* -0.51208=f(-0.49000)*/ {10, 0,123,__LINE__, 0xbfe0055d, 0x00000000, 0xbfdeb851, 0xeb851eac}, /* -0.50065=f(-0.48000)*/ {15, 0,123,__LINE__, 0xbfdf508a, 0x40000000, 0xbfde147a, 0xe147ae08}, /* -0.48929=f(-0.47000)*/ {14, 0,123,__LINE__, 0xbfde9779, 0x40000000, 0xbfdd70a3, 0xd70a3d64}, /* -0.47799=f(-0.46000)*/ {15, 0,123,__LINE__, 0xbfdddf7b, 0xa0000000, 0xbfdccccc, 0xccccccc0}, /* -0.46676=f(-0.45000)*/ {15, 0,123,__LINE__, 0xbfdd2887, 0x60000000, 0xbfdc28f5, 0xc28f5c1c}, /* -0.45559=f(-0.44000)*/ {13, 0,123,__LINE__, 0xbfdc7291, 0xe0000000, 0xbfdb851e, 0xb851eb78}, /* -0.44449=f(-0.43000)*/ {15, 0,123,__LINE__, 0xbfdbbd91, 0x60000000, 0xbfdae147, 0xae147ad4}, /* -0.43344=f(-0.42000)*/ {15, 0,123,__LINE__, 0xbfdb097c, 0xc0000000, 0xbfda3d70, 0xa3d70a30}, /* -0.42245=f(-0.41000)*/ {14, 0,123,__LINE__, 0xbfda564a, 0xc0000000, 0xbfd99999, 0x9999998c}, /* -0.41151=f(-0.40000)*/ {15, 0,123,__LINE__, 0xbfd9a3f2, 0xa0000000, 0xbfd8f5c2, 0x8f5c28e8}, /* -0.40063=f(-0.39000)*/ {16, 0,123,__LINE__, 0xbfd8f26c, 0x20000000, 0xbfd851eb, 0x851eb844}, /* -0.38979=f(-0.38000)*/ {12, 0,123,__LINE__, 0xbfd841af, 0x20000000, 0xbfd7ae14, 0x7ae147a0}, /* -0.37900=f(-0.37000)*/ {16, 0,123,__LINE__, 0xbfd791b3, 0x80000000, 0xbfd70a3d, 0x70a3d6fc}, /* -0.36826=f(-0.36000)*/ {16, 0,123,__LINE__, 0xbfd6e271, 0xe0000000, 0xbfd66666, 0x66666658}, /* -0.35757=f(-0.35000)*/ {14, 0,123,__LINE__, 0xbfd633e3, 0x00000000, 0xbfd5c28f, 0x5c28f5b4}, /* -0.34691=f(-0.34000)*/ {16, 0,123,__LINE__, 0xbfd585ff, 0x80000000, 0xbfd51eb8, 0x51eb8510}, /* -0.33630=f(-0.33000)*/ {16, 0,123,__LINE__, 0xbfd4d8c0, 0x80000000, 0xbfd47ae1, 0x47ae146c}, /* -0.32572=f(-0.32000)*/ {13, 0,123,__LINE__, 0xbfd42c1f, 0x60000000, 0xbfd3d70a, 0x3d70a3c8}, /* -0.31519=f(-0.31000)*/ {16, 0,123,__LINE__, 0xbfd38015, 0xa0000000, 0xbfd33333, 0x33333324}, /* -0.30469=f(-0.30000)*/ {17, 0,123,__LINE__, 0xbfd2d49c, 0xc0000000, 0xbfd28f5c, 0x28f5c280}, /* -0.29422=f(-0.29000)*/ {14, 0,123,__LINE__, 0xbfd229ae, 0xc0000000, 0xbfd1eb85, 0x1eb851dc}, /* -0.28379=f(-0.28000)*/ {18, 0,123,__LINE__, 0xbfd17f45, 0x80000000, 0xbfd147ae, 0x147ae138}, /* -0.27339=f(-0.27000)*/ {17, 0,123,__LINE__, 0xbfd0d55b, 0x00000000, 0xbfd0a3d7, 0x0a3d7094}, /* -0.26302=f(-0.26000)*/ {18, 0,123,__LINE__, 0xbfd02be9, 0xc0000000, 0xbfcfffff, 0xffffffe0}, /* -0.25268=f(-0.25000)*/ {15, 0,123,__LINE__, 0xbfcf05d8, 0x20000000, 0xbfceb851, 0xeb851e98}, /* -0.24236=f(-0.24000)*/ {16, 0,123,__LINE__, 0xbfcdb4b8, 0xc0000000, 0xbfcd70a3, 0xd70a3d50}, /* -0.23207=f(-0.23000)*/ {17, 0,123,__LINE__, 0xbfcc646a, 0xa0000000, 0xbfcc28f5, 0xc28f5c08}, /* -0.22181=f(-0.22000)*/ {15, 0,123,__LINE__, 0xbfcb14e3, 0x60000000, 0xbfcae147, 0xae147ac0}, /* -0.21157=f(-0.21000)*/ {17, 0,123,__LINE__, 0xbfc9c618, 0xc0000000, 0xbfc99999, 0x99999978}, /* -0.20135=f(-0.20000)*/ {18, 0,123,__LINE__, 0xbfc87800, 0x40000000, 0xbfc851eb, 0x851eb830}, /* -0.19116=f(-0.19000)*/ {18, 0,123,__LINE__, 0xbfc72a90, 0x80000000, 0xbfc70a3d, 0x70a3d6e8}, /* -0.18098=f(-0.18000)*/ {19, 0,123,__LINE__, 0xbfc5ddbf, 0x20000000, 0xbfc5c28f, 0x5c28f5a0}, /* -0.17082=f(-0.17000)*/ {16, 0,123,__LINE__, 0xbfc49182, 0xe0000000, 0xbfc47ae1, 0x47ae1458}, /* -0.16069=f(-0.16000)*/ {17, 0,123,__LINE__, 0xbfc345d2, 0x40000000, 0xbfc33333, 0x33333310}, /* -0.15056=f(-0.15000)*/ {19, 0,123,__LINE__, 0xbfc1faa3, 0xc0000000, 0xbfc1eb85, 0x1eb851c8}, /* -0.14046=f(-0.14000)*/ {20, 0,123,__LINE__, 0xbfc0afee, 0x40000000, 0xbfc0a3d7, 0x0a3d7080}, /* -0.13036=f(-0.13000)*/ {17, 0,123,__LINE__, 0xbfbecb51, 0x40000000, 0xbfbeb851, 0xeb851e71}, /* -0.12028=f(-0.12000)*/ {19, 0,123,__LINE__, 0xbfbc3793, 0xe0000000, 0xbfbc28f5, 0xc28f5be2}, /* -0.11022=f(-0.11000)*/ {18, 0,123,__LINE__, 0xbfb9a492, 0x80000000, 0xbfb99999, 0x99999953}, /* -0.10016=f(-0.00100)*/ {19, 0,123,__LINE__, 0xbfb7123b, 0x60000000, 0xbfb70a3d, 0x70a3d6c4}, /* -0.09012=f(-0.09000)*/ {16, 0,123,__LINE__, 0xbfb4807d, 0x00000000, 0xbfb47ae1, 0x47ae1435}, /* -0.08008=f(-0.08000)*/ {21, 0,123,__LINE__, 0xbfb1ef46, 0x60000000, 0xbfb1eb85, 0x1eb851a6}, /* -0.07005=f(-0.07000)*/ {21, 0,123,__LINE__, 0xbfaebd0b, 0xc0000000, 0xbfaeb851, 0xeb851e2d}, /* -0.06003=f(-0.06000)*/ {21, 0,123,__LINE__, 0xbfa99c55, 0x80000000, 0xbfa99999, 0x9999990e}, /* -0.05002=f(-0.05000)*/ {21, 0,123,__LINE__, 0xbfa47c47, 0x60000000, 0xbfa47ae1, 0x47ae13ef}, /* -0.04001=f(-0.04000)*/ {23, 0,123,__LINE__, 0xbf9eb980, 0x00000000, 0xbf9eb851, 0xeb851da0}, /* -0.03000=f(-0.03000)*/ {23, 0,123,__LINE__, 0xbf947b3a, 0xc0000000, 0xbf947ae1, 0x47ae1362}, /* -0.02000=f(-0.02000)*/ {27, 0,123,__LINE__, 0xbf847af7, 0xa0000000, 0xbf847ae1, 0x47ae1249}, /* -0.01000=f(-0.00010)*/ { 2, 0,123,__LINE__, 0x3cd19000, 0x00000000, 0x3cd19000, 0x00000000}, /* 9.74915e-16=f(9.74915e-16)*/ {27, 0,123,__LINE__, 0x3f847af7, 0xa0000000, 0x3f847ae1, 0x47ae16ad}, /* 0.01000=f(0.01000)*/ {23, 0,123,__LINE__, 0x3f947b3a, 0xc0000000, 0x3f947ae1, 0x47ae1594}, /* 0.02000=f(0.02000)*/ {23, 0,123,__LINE__, 0x3f9eb980, 0x00000000, 0x3f9eb851, 0xeb851fd2}, /* 0.03000=f(0.03000)*/ {21, 0,123,__LINE__, 0x3fa47c47, 0x60000000, 0x3fa47ae1, 0x47ae1508}, /* 0.04001=f(0.04000)*/ {21, 0,123,__LINE__, 0x3fa99c55, 0x80000000, 0x3fa99999, 0x99999a27}, /* 0.05002=f(0.05000)*/ {21, 0,123,__LINE__, 0x3faebd0b, 0xc0000000, 0x3faeb851, 0xeb851f46}, /* 0.06003=f(0.06000)*/ {21, 0,123,__LINE__, 0x3fb1ef46, 0x60000000, 0x3fb1eb85, 0x1eb85232}, /* 0.07005=f(0.07000)*/ {16, 0,123,__LINE__, 0x3fb4807d, 0x00000000, 0x3fb47ae1, 0x47ae14c1}, /* 0.08008=f(0.08000)*/ {19, 0,123,__LINE__, 0x3fb7123b, 0x60000000, 0x3fb70a3d, 0x70a3d750}, /* 0.09012=f(0.09000)*/ {18, 0,123,__LINE__, 0x3fb9a492, 0x80000000, 0x3fb99999, 0x999999df}, /* 0.10016=f(0.10000)*/ {19, 0,123,__LINE__, 0x3fbc3793, 0xe0000000, 0x3fbc28f5, 0xc28f5c6e}, /* 0.11022=f(0.11000)*/ {17, 0,123,__LINE__, 0x3fbecb51, 0x40000000, 0x3fbeb851, 0xeb851efd}, /* 0.12028=f(0.12000)*/ {20, 0,123,__LINE__, 0x3fc0afee, 0x40000000, 0x3fc0a3d7, 0x0a3d70c6}, /* 0.13036=f(0.13000)*/ {19, 0,123,__LINE__, 0x3fc1faa3, 0xc0000000, 0x3fc1eb85, 0x1eb8520e}, /* 0.14046=f(0.14000)*/ {17, 0,123,__LINE__, 0x3fc345d2, 0x40000000, 0x3fc33333, 0x33333356}, /* 0.15056=f(0.15000)*/ {16, 0,123,__LINE__, 0x3fc49182, 0xe0000000, 0x3fc47ae1, 0x47ae149e}, /* 0.16069=f(0.16000)*/ {19, 0,123,__LINE__, 0x3fc5ddbf, 0x20000000, 0x3fc5c28f, 0x5c28f5e6}, /* 0.17082=f(0.17000)*/ {18, 0,123,__LINE__, 0x3fc72a90, 0x80000000, 0x3fc70a3d, 0x70a3d72e}, /* 0.18098=f(0.18000)*/ {18, 0,123,__LINE__, 0x3fc87800, 0x40000000, 0x3fc851eb, 0x851eb876}, /* 0.19116=f(0.19000)*/ {17, 0,123,__LINE__, 0x3fc9c618, 0xc0000000, 0x3fc99999, 0x999999be}, /* 0.20135=f(0.20000)*/ {15, 0,123,__LINE__, 0x3fcb14e3, 0x60000000, 0x3fcae147, 0xae147b06}, /* 0.21157=f(0.21000)*/ {17, 0,123,__LINE__, 0x3fcc646a, 0xa0000000, 0x3fcc28f5, 0xc28f5c4e}, /* 0.22181=f(0.22000)*/ {16, 0,123,__LINE__, 0x3fcdb4b8, 0xc0000000, 0x3fcd70a3, 0xd70a3d96}, /* 0.23207=f(0.23000)*/ {15, 0,123,__LINE__, 0x3fcf05d8, 0x20000000, 0x3fceb851, 0xeb851ede}, /* 0.24236=f(0.24000)*/ {18, 0,123,__LINE__, 0x3fd02be9, 0xc0000000, 0x3fd00000, 0x00000013}, /* 0.25268=f(0.25000)*/ {17, 0,123,__LINE__, 0x3fd0d55b, 0x00000000, 0x3fd0a3d7, 0x0a3d70b7}, /* 0.26302=f(0.26000)*/ {18, 0,123,__LINE__, 0x3fd17f45, 0x80000000, 0x3fd147ae, 0x147ae15b}, /* 0.27339=f(0.27000)*/ {14, 0,123,__LINE__, 0x3fd229ae, 0xc0000000, 0x3fd1eb85, 0x1eb851ff}, /* 0.28379=f(0.28000)*/ {17, 0,123,__LINE__, 0x3fd2d49c, 0xc0000000, 0x3fd28f5c, 0x28f5c2a3}, /* 0.29422=f(0.29000)*/ {16, 0,123,__LINE__, 0x3fd38015, 0xa0000000, 0x3fd33333, 0x33333347}, /* 0.30469=f(0.30000)*/ {13, 0,123,__LINE__, 0x3fd42c1f, 0x60000000, 0x3fd3d70a, 0x3d70a3eb}, /* 0.31519=f(0.31000)*/ {16, 0,123,__LINE__, 0x3fd4d8c0, 0x80000000, 0x3fd47ae1, 0x47ae148f}, /* 0.32572=f(0.32000)*/ {16, 0,123,__LINE__, 0x3fd585ff, 0x80000000, 0x3fd51eb8, 0x51eb8533}, /* 0.33630=f(0.33000)*/ {14, 0,123,__LINE__, 0x3fd633e3, 0x00000000, 0x3fd5c28f, 0x5c28f5d7}, /* 0.34691=f(0.34000)*/ {16, 0,123,__LINE__, 0x3fd6e271, 0xe0000000, 0x3fd66666, 0x6666667b}, /* 0.35757=f(0.35000)*/ {16, 0,123,__LINE__, 0x3fd791b3, 0x80000000, 0x3fd70a3d, 0x70a3d71f}, /* 0.36826=f(0.36000)*/ {12, 0,123,__LINE__, 0x3fd841af, 0x20000000, 0x3fd7ae14, 0x7ae147c3}, /* 0.37900=f(0.37000)*/ {16, 0,123,__LINE__, 0x3fd8f26c, 0x20000000, 0x3fd851eb, 0x851eb867}, /* 0.38979=f(0.38000)*/ {15, 0,123,__LINE__, 0x3fd9a3f2, 0xa0000000, 0x3fd8f5c2, 0x8f5c290b}, /* 0.40063=f(0.39000)*/ {14, 0,123,__LINE__, 0x3fda564a, 0xc0000000, 0x3fd99999, 0x999999af}, /* 0.41151=f(0.40000)*/ {15, 0,123,__LINE__, 0x3fdb097c, 0xc0000000, 0x3fda3d70, 0xa3d70a53}, /* 0.42245=f(0.41000)*/ {15, 0,123,__LINE__, 0x3fdbbd91, 0x60000000, 0x3fdae147, 0xae147af7}, /* 0.43344=f(0.42000)*/ {13, 0,123,__LINE__, 0x3fdc7291, 0xe0000000, 0x3fdb851e, 0xb851eb9b}, /* 0.44449=f(0.43000)*/ {15, 0,123,__LINE__, 0x3fdd2887, 0x60000000, 0x3fdc28f5, 0xc28f5c3f}, /* 0.45559=f(0.44000)*/ {15, 0,123,__LINE__, 0x3fdddf7b, 0xa0000000, 0x3fdccccc, 0xcccccce3}, /* 0.46676=f(0.45000)*/ {14, 0,123,__LINE__, 0x3fde9779, 0x40000000, 0x3fdd70a3, 0xd70a3d87}, /* 0.47799=f(0.46000)*/ {15, 0,123,__LINE__, 0x3fdf508a, 0x40000000, 0x3fde147a, 0xe147ae2b}, /* 0.48929=f(0.47000)*/ {10, 0,123,__LINE__, 0x3fe0055d, 0x00000000, 0x3fdeb851, 0xeb851ecf}, /* 0.50065=f(0.48000)*/ {10, 0,123,__LINE__, 0x3fe0630a, 0x20000000, 0x3fdf5c28, 0xf5c28f73}, /* 0.51208=f(0.49000)*/ {16, 0,123,__LINE__, 0x3fe0c152, 0x40000000, 0x3fe00000, 0x0000000b}, /* 0.52359=f(0.50000)*/ {15, 0,123,__LINE__, 0x3fe1203c, 0x00000000, 0x3fe051eb, 0x851eb85d}, /* 0.53518=f(0.51000)*/ {15, 0,123,__LINE__, 0x3fe17fcd, 0x80000000, 0x3fe0a3d7, 0x0a3d70af}, /* 0.54685=f(0.52000)*/ {15, 0,123,__LINE__, 0x3fe1e00e, 0x40000000, 0x3fe0f5c2, 0x8f5c2901}, /* 0.55860=f(0.53000)*/ {14, 0,123,__LINE__, 0x3fe24105, 0x80000000, 0x3fe147ae, 0x147ae153}, /* 0.57043=f(0.54000)*/ {14, 0,123,__LINE__, 0x3fe2a2ba, 0x80000000, 0x3fe19999, 0x999999a5}, /* 0.58236=f(0.55000)*/ {14, 0,123,__LINE__, 0x3fe30535, 0x60000000, 0x3fe1eb85, 0x1eb851f7}, /* 0.59438=f(0.56000)*/ {15, 0,123,__LINE__, 0x3fe3687f, 0x20000000, 0x3fe23d70, 0xa3d70a49}, /* 0.60650=f(0.57000)*/ {15, 0,123,__LINE__, 0x3fe3cca0, 0x20000000, 0x3fe28f5c, 0x28f5c29b}, /* 0.61872=f(0.58000)*/ {13, 0,123,__LINE__, 0x3fe431a2, 0x40000000, 0x3fe2e147, 0xae147aed}, /* 0.63105=f(0.59000)*/ {13, 0,123,__LINE__, 0x3fe4978f, 0xc0000000, 0x3fe33333, 0x3333333f}, /* 0.64350=f(0.60000)*/ {13, 0,123,__LINE__, 0x3fe4fe72, 0xe0000000, 0x3fe3851e, 0xb851eb91}, /* 0.65606=f(0.61000)*/ {13, 0,123,__LINE__, 0x3fe56657, 0x20000000, 0x3fe3d70a, 0x3d70a3e3}, /* 0.66874=f(0.62000)*/ {15, 0,123,__LINE__, 0x3fe5cf48, 0xc0000000, 0x3fe428f5, 0xc28f5c35}, /* 0.68155=f(0.63000)*/ {14, 0,123,__LINE__, 0x3fe63954, 0x80000000, 0x3fe47ae1, 0x47ae1487}, /* 0.69449=f(0.64000)*/ {14, 0,123,__LINE__, 0x3fe6a488, 0x40000000, 0x3fe4cccc, 0xccccccd9}, /* 0.70758=f(0.65000)*/ {14, 0,123,__LINE__, 0x3fe710f2, 0xc0000000, 0x3fe51eb8, 0x51eb852b}, /* 0.72081=f(0.66000)*/ {14, 0,123,__LINE__, 0x3fe77ea3, 0xa0000000, 0x3fe570a3, 0xd70a3d7d}, /* 0.73420=f(0.67000)*/ {14, 0,123,__LINE__, 0x3fe7edac, 0x00000000, 0x3fe5c28f, 0x5c28f5cf}, /* 0.74776=f(0.68000)*/ {12, 0,123,__LINE__, 0x3fe85e1e, 0x60000000, 0x3fe6147a, 0xe147ae21}, /* 0.76148=f(0.69000)*/ {12, 0,123,__LINE__, 0x3fe8d00e, 0x80000000, 0x3fe66666, 0x66666673}, /* 0.77539=f(0.70000)*/ {12, 0,123,__LINE__, 0x3fe94391, 0xc0000000, 0x3fe6b851, 0xeb851ec5}, /* 0.78949=f(0.71000)*/ {12, 0,123,__LINE__, 0x3fe9b8bf, 0xe0000000, 0x3fe70a3d, 0x70a3d717}, /* 0.80380=f(0.72000)*/ {12, 0,123,__LINE__, 0x3fea2fb1, 0xa0000000, 0x3fe75c28, 0xf5c28f69}, /* 0.81832=f(0.73000)*/ {12, 0,123,__LINE__, 0x3feaa883, 0x60000000, 0x3fe7ae14, 0x7ae147bb}, /* 0.83307=f(0.74000)*/ {14, 0,123,__LINE__, 0x3feb2353, 0x40000000, 0x3fe80000, 0x0000000d}, /* 0.84806=f(0.75000)*/ {14, 0,123,__LINE__, 0x3feba043, 0x00000000, 0x3fe851eb, 0x851eb85f}, /* 0.86331=f(0.76000)*/ {13, 0,123,__LINE__, 0x3fec1f77, 0x80000000, 0x3fe8a3d7, 0x0a3d70b1}, /* 0.87884=f(0.77000)*/ {13, 0,123,__LINE__, 0x3feca11a, 0x20000000, 0x3fe8f5c2, 0x8f5c2903}, /* 0.89466=f(0.78000)*/ {13, 0,123,__LINE__, 0x3fed2559, 0x20000000, 0x3fe947ae, 0x147ae155}, /* 0.91080=f(0.79000)*/ {13, 0,123,__LINE__, 0x3fedac67, 0x20000000, 0x3fe99999, 0x999999a7}, /* 0.92729=f(0.80000)*/ {13, 0,123,__LINE__, 0x3fee367e, 0xa0000000, 0x3fe9eb85, 0x1eb851f9}, /* 0.94415=f(0.81000)*/ {13, 0,123,__LINE__, 0x3feec3e1, 0x20000000, 0x3fea3d70, 0xa3d70a4b}, /* 0.96141=f(0.82000)*/ {13, 0,123,__LINE__, 0x3fef54d9, 0xc0000000, 0x3fea8f5c, 0x28f5c29d}, /* 0.97910=f(0.83000)*/ {13, 0,123,__LINE__, 0x3fefe9be, 0x80000000, 0x3feae147, 0xae147aef}, /* 0.99728=f(0.84000)*/ {11, 0,123,__LINE__, 0x3ff04179, 0xe0000000, 0x3feb3333, 0x33333341}, /* 1.01598=f(0.85000)*/ {11, 0,123,__LINE__, 0x3ff09077, 0x00000000, 0x3feb851e, 0xb851eb93}, /* 1.03527=f(0.86000)*/ {11, 0,123,__LINE__, 0x3ff0e21b, 0xe0000000, 0x3febd70a, 0x3d70a3e5}, /* 1.05520=f(0.87000)*/ {11, 0,123,__LINE__, 0x3ff136bb, 0x40000000, 0x3fec28f5, 0xc28f5c37}, /* 1.07586=f(0.88000)*/ {11, 0,123,__LINE__, 0x3ff18eb9, 0xc0000000, 0x3fec7ae1, 0x47ae1489}, /* 1.09734=f(0.89000)*/ {11, 0,123,__LINE__, 0x3ff1ea93, 0x80000000, 0x3feccccc, 0xccccccdb}, /* 1.11977=f(0.90000)*/ {11, 0,123,__LINE__, 0x3ff24ae4, 0x60000000, 0x3fed1eb8, 0x51eb852d}, /* 1.14328=f(0.91000)*/ {11, 0,123,__LINE__, 0x3ff2b075, 0x40000000, 0x3fed70a3, 0xd70a3d7f}, /* 1.16808=f(0.92000)*/ {11, 0,123,__LINE__, 0x3ff31c50, 0xc0000000, 0x3fedc28f, 0x5c28f5d1}, /* 1.19441=f(0.93000)*/ {11, 0,123,__LINE__, 0x3ff38fe4, 0xe0000000, 0x3fee147a, 0xe147ae23}, /* 1.22263=f(0.94000)*/ {11, 0,123,__LINE__, 0x3ff40d41, 0x20000000, 0x3fee6666, 0x66666675}, /* 1.25323=f(0.95000)*/ {11, 0,123,__LINE__, 0x3ff4978f, 0xa0000000, 0x3feeb851, 0xeb851ec7}, /* 1.28700=f(0.96000)*/ {11, 0,123,__LINE__, 0x3ff53425, 0x60000000, 0x3fef0a3d, 0x70a3d719}, /* 1.32523=f(0.97000)*/ {11, 0,123,__LINE__, 0x3ff5ed69, 0x20000000, 0x3fef5c28, 0xf5c28f6b}, /* 1.37046=f(0.98000)*/ {11, 0,123,__LINE__, 0x3ff6de3c, 0x80000000, 0x3fefae14, 0x7ae147bd}, /* 1.42925=f(0.99000)*/ {12, 0,123,__LINE__, 0x3ff921fb, 0x60000000, 0x3ff00000, 0x00000007}, /* 1.57079=f(1.00000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff028f5, 0xc28f5c30}, /* 64.0000=f(1.01000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff051eb, 0x851eb859}, /* 64.0000=f(1.02000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff07ae1, 0x47ae1482}, /* 64.0000=f(1.03000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff0a3d7, 0x0a3d70ab}, /* 64.0000=f(1.04000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff0cccc, 0xccccccd4}, /* 64.0000=f(1.05000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff0f5c2, 0x8f5c28fd}, /* 64.0000=f(1.06000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff11eb8, 0x51eb8526}, /* 64.0000=f(1.07000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff147ae, 0x147ae14f}, /* 64.0000=f(1.08000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff170a3, 0xd70a3d78}, /* 64.0000=f(1.09000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff19999, 0x999999a1}, /* 64.0000=f(1.10000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff1c28f, 0x5c28f5ca}, /* 64.0000=f(1.11000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff1eb85, 0x1eb851f3}, /* 64.0000=f(1.12000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff2147a, 0xe147ae1c}, /* 64.0000=f(1.13000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff23d70, 0xa3d70a45}, /* 64.0000=f(1.14000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff26666, 0x6666666e}, /* 64.0000=f(1.15000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff28f5c, 0x28f5c297}, /* 64.0000=f(1.16000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff2b851, 0xeb851ec0}, /* 64.0000=f(1.17000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff2e147, 0xae147ae9}, /* 64.0000=f(1.18000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff30a3d, 0x70a3d712}, /* 64.0000=f(1.19000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc01921fb, 0x54442d18}, /* 64.0000=f(-6.28318)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc012d97c, 0x7f3321d2}, /* 64.0000=f(-4.71238)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc00921fb, 0x54442d18}, /* 64.0000=f(-3.14159)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff921fb, 0x54442d18}, /* 64.0000=f(-1.57079)*/ {64, 0,123,__LINE__, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, /* 0.00000=f(0.00000)*/ { 1,13, 37,__LINE__, 0x40500000, 0x00000000, 0x3ff921fb, 0x54442d18}, /* 64.0000=f(1.57079)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x400921fb, 0x54442d18}, /* 64.0000=f(3.14159)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x4012d97c, 0x7f3321d2}, /* 64.0000=f(4.71238)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc03e0000, 0x00000000}, /* 64.0000=f(-30.0000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc03c4ccc, 0xcccccccd}, /* 64.0000=f(-28.3000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc03a9999, 0x9999999a}, /* 64.0000=f(-26.6000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc038e666, 0x66666667}, /* 64.0000=f(-24.9000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc0373333, 0x33333334}, /* 64.0000=f(-23.2000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc0358000, 0x00000001}, /* 64.0000=f(-21.5000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc033cccc, 0xccccccce}, /* 64.0000=f(-19.8000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc0321999, 0x9999999b}, /* 64.0000=f(-18.1000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc0306666, 0x66666668}, /* 64.0000=f(-16.4000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc02d6666, 0x6666666a}, /* 64.0000=f(-14.7000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc02a0000, 0x00000004}, /* 64.0000=f(-13.0000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc0269999, 0x9999999e}, /* 64.0000=f(-11.3000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc0233333, 0x33333338}, /* 64.0000=f(-9.60000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc01f9999, 0x999999a3}, /* 64.0000=f(-7.90000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc018cccc, 0xccccccd6}, /* 64.0000=f(-6.20000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc0120000, 0x00000009}, /* 64.0000=f(-4.50000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0xc0066666, 0x66666678}, /* 64.0000=f(-2.80000)*/ { 0,13, 37,__LINE__, 0x40500000, 0x00000000, 0xbff19999, 0x999999bd}, /* 64.0000=f(-1.10000)*/ {13, 0,123,__LINE__, 0x3fe4978f, 0xc0000000, 0x3fe33333, 0x333332ec}, /* 0.64350=f(0.60000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x40026666, 0x66666654}, /* 64.0000=f(2.30000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x400fffff, 0xffffffee}, /* 64.0000=f(4.00000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x4016cccc, 0xccccccc4}, /* 64.0000=f(5.70000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x401d9999, 0x99999991}, /* 64.0000=f(7.40000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x40223333, 0x3333332f}, /* 64.0000=f(9.10000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x40259999, 0x99999995}, /* 64.0000=f(10.8000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x4028ffff, 0xfffffffb}, /* 64.0000=f(12.5000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x402c6666, 0x66666661}, /* 64.0000=f(14.2000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x402fcccc, 0xccccccc7}, /* 64.0000=f(15.9000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x40319999, 0x99999997}, /* 64.0000=f(17.6000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x40334ccc, 0xccccccca}, /* 64.0000=f(19.3000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x4034ffff, 0xfffffffd}, /* 64.0000=f(21.0000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x4036b333, 0x33333330}, /* 64.0000=f(22.7000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x40386666, 0x66666663}, /* 64.0000=f(24.4000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x403a1999, 0x99999996}, /* 64.0000=f(26.1000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x403bcccc, 0xccccccc9}, /* 64.0000=f(27.8000)*/ { 9,13, 37,__LINE__, 0x40500000, 0x00000000, 0x403d7fff, 0xfffffffc}, /* 64.0000=f(29.5000)*/ 0,}; test_asinf(m) {run_vector_1(m,asinf_vec,(char *)(asinf),"asinf","ff"); }
gpl-2.0
chenyu105/linux
sound/soc/pxa/magician.c
936
13668
/* * SoC audio for HTC Magician * * Copyright (c) 2006 Philipp Zabel <philipp.zabel@gmail.com> * * based on spitz.c, * Authors: Liam Girdwood <lrg@slimlogic.co.uk> * Richard Purdie <richard@openedhand.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/uda1380.h> #include <mach/magician.h> #include <asm/mach-types.h> #include "../codecs/uda1380.h" #include "pxa2xx-i2s.h" #include "pxa-ssp.h" #define MAGICIAN_MIC 0 #define MAGICIAN_MIC_EXT 1 static int magician_hp_switch; static int magician_spk_switch = 1; static int magician_in_sel = MAGICIAN_MIC; static void magician_ext_control(struct snd_soc_dapm_context *dapm) { snd_soc_dapm_mutex_lock(dapm); if (magician_spk_switch) snd_soc_dapm_enable_pin_unlocked(dapm, "Speaker"); else snd_soc_dapm_disable_pin_unlocked(dapm, "Speaker"); if (magician_hp_switch) snd_soc_dapm_enable_pin_unlocked(dapm, "Headphone Jack"); else snd_soc_dapm_disable_pin_unlocked(dapm, "Headphone Jack"); switch (magician_in_sel) { case MAGICIAN_MIC: snd_soc_dapm_disable_pin_unlocked(dapm, "Headset Mic"); snd_soc_dapm_enable_pin_unlocked(dapm, "Call Mic"); break; case MAGICIAN_MIC_EXT: snd_soc_dapm_disable_pin_unlocked(dapm, "Call Mic"); snd_soc_dapm_enable_pin_unlocked(dapm, "Headset Mic"); break; } snd_soc_dapm_sync_unlocked(dapm); snd_soc_dapm_mutex_unlock(dapm); } static int magician_startup(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; /* check the jack status at stream startup */ magician_ext_control(&rtd->card->dapm); return 0; } /* * Magician uses SSP port for playback. */ static int magician_playback_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; unsigned int acps, acds, width; unsigned int div4 = PXA_SSP_CLK_SCDB_4; int ret = 0; width = snd_pcm_format_physical_width(params_format(params)); /* * rate = SSPSCLK / (2 * width(16 or 32)) * SSPSCLK = (ACPS / ACDS) / SSPSCLKDIV(div4 or div1) */ switch (params_rate(params)) { case 8000: /* off by a factor of 2: bug in the PXA27x audio clock? */ acps = 32842000; switch (width) { case 16: /* 513156 Hz ~= _2_ * 8000 Hz * 32 (+0.23%) */ acds = PXA_SSP_CLK_AUDIO_DIV_16; break; default: /* 32 */ /* 1026312 Hz ~= _2_ * 8000 Hz * 64 (+0.23%) */ acds = PXA_SSP_CLK_AUDIO_DIV_8; } break; case 11025: acps = 5622000; switch (width) { case 16: /* 351375 Hz ~= 11025 Hz * 32 (-0.41%) */ acds = PXA_SSP_CLK_AUDIO_DIV_4; break; default: /* 32 */ /* 702750 Hz ~= 11025 Hz * 64 (-0.41%) */ acds = PXA_SSP_CLK_AUDIO_DIV_2; } break; case 22050: acps = 5622000; switch (width) { case 16: /* 702750 Hz ~= 22050 Hz * 32 (-0.41%) */ acds = PXA_SSP_CLK_AUDIO_DIV_2; break; default: /* 32 */ /* 1405500 Hz ~= 22050 Hz * 64 (-0.41%) */ acds = PXA_SSP_CLK_AUDIO_DIV_1; } break; case 44100: acps = 5622000; switch (width) { case 16: /* 1405500 Hz ~= 44100 Hz * 32 (-0.41%) */ acds = PXA_SSP_CLK_AUDIO_DIV_2; break; default: /* 32 */ /* 2811000 Hz ~= 44100 Hz * 64 (-0.41%) */ acds = PXA_SSP_CLK_AUDIO_DIV_1; } break; case 48000: acps = 12235000; switch (width) { case 16: /* 1529375 Hz ~= 48000 Hz * 32 (-0.44%) */ acds = PXA_SSP_CLK_AUDIO_DIV_2; break; default: /* 32 */ /* 3058750 Hz ~= 48000 Hz * 64 (-0.44%) */ acds = PXA_SSP_CLK_AUDIO_DIV_1; } break; case 96000: default: acps = 12235000; switch (width) { case 16: /* 3058750 Hz ~= 96000 Hz * 32 (-0.44%) */ acds = PXA_SSP_CLK_AUDIO_DIV_1; break; default: /* 32 */ /* 6117500 Hz ~= 96000 Hz * 64 (-0.44%) */ acds = PXA_SSP_CLK_AUDIO_DIV_2; div4 = PXA_SSP_CLK_SCDB_1; break; } break; } /* set codec DAI configuration */ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_MSB | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; /* set cpu DAI configuration */ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_IF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; ret = snd_soc_dai_set_tdm_slot(cpu_dai, 1, 0, 1, width); if (ret < 0) return ret; /* set audio clock as clock source */ ret = snd_soc_dai_set_sysclk(cpu_dai, PXA_SSP_CLK_AUDIO, 0, SND_SOC_CLOCK_OUT); if (ret < 0) return ret; /* set the SSP audio system clock ACDS divider */ ret = snd_soc_dai_set_clkdiv(cpu_dai, PXA_SSP_AUDIO_DIV_ACDS, acds); if (ret < 0) return ret; /* set the SSP audio system clock SCDB divider4 */ ret = snd_soc_dai_set_clkdiv(cpu_dai, PXA_SSP_AUDIO_DIV_SCDB, div4); if (ret < 0) return ret; /* set SSP audio pll clock */ ret = snd_soc_dai_set_pll(cpu_dai, 0, 0, 0, acps); if (ret < 0) return ret; return 0; } /* * Magician uses I2S for capture. */ static int magician_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int ret = 0; /* set codec DAI configuration */ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_MSB | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; /* set cpu DAI configuration */ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_MSB | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; /* set the I2S system clock as output */ ret = snd_soc_dai_set_sysclk(cpu_dai, PXA2XX_I2S_SYSCLK, 0, SND_SOC_CLOCK_OUT); if (ret < 0) return ret; return 0; } static struct snd_soc_ops magician_capture_ops = { .startup = magician_startup, .hw_params = magician_capture_hw_params, }; static struct snd_soc_ops magician_playback_ops = { .startup = magician_startup, .hw_params = magician_playback_hw_params, }; static int magician_get_hp(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = magician_hp_switch; return 0; } static int magician_set_hp(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_card *card = snd_kcontrol_chip(kcontrol); if (magician_hp_switch == ucontrol->value.integer.value[0]) return 0; magician_hp_switch = ucontrol->value.integer.value[0]; magician_ext_control(&card->dapm); return 1; } static int magician_get_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = magician_spk_switch; return 0; } static int magician_set_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_card *card = snd_kcontrol_chip(kcontrol); if (magician_spk_switch == ucontrol->value.integer.value[0]) return 0; magician_spk_switch = ucontrol->value.integer.value[0]; magician_ext_control(&card->dapm); return 1; } static int magician_get_input(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = magician_in_sel; return 0; } static int magician_set_input(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { if (magician_in_sel == ucontrol->value.integer.value[0]) return 0; magician_in_sel = ucontrol->value.integer.value[0]; switch (magician_in_sel) { case MAGICIAN_MIC: gpio_set_value(EGPIO_MAGICIAN_IN_SEL1, 1); break; case MAGICIAN_MIC_EXT: gpio_set_value(EGPIO_MAGICIAN_IN_SEL1, 0); } return 1; } static int magician_spk_power(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { gpio_set_value(EGPIO_MAGICIAN_SPK_POWER, SND_SOC_DAPM_EVENT_ON(event)); return 0; } static int magician_hp_power(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { gpio_set_value(EGPIO_MAGICIAN_EP_POWER, SND_SOC_DAPM_EVENT_ON(event)); return 0; } static int magician_mic_bias(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { gpio_set_value(EGPIO_MAGICIAN_MIC_POWER, SND_SOC_DAPM_EVENT_ON(event)); return 0; } /* magician machine dapm widgets */ static const struct snd_soc_dapm_widget uda1380_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", magician_hp_power), SND_SOC_DAPM_SPK("Speaker", magician_spk_power), SND_SOC_DAPM_MIC("Call Mic", magician_mic_bias), SND_SOC_DAPM_MIC("Headset Mic", magician_mic_bias), }; /* magician machine audio_map */ static const struct snd_soc_dapm_route audio_map[] = { /* Headphone connected to VOUTL, VOUTR */ {"Headphone Jack", NULL, "VOUTL"}, {"Headphone Jack", NULL, "VOUTR"}, /* Speaker connected to VOUTL, VOUTR */ {"Speaker", NULL, "VOUTL"}, {"Speaker", NULL, "VOUTR"}, /* Mics are connected to VINM */ {"VINM", NULL, "Headset Mic"}, {"VINM", NULL, "Call Mic"}, }; static const char *input_select[] = {"Call Mic", "Headset Mic"}; static const struct soc_enum magician_in_sel_enum = SOC_ENUM_SINGLE_EXT(2, input_select); static const struct snd_kcontrol_new uda1380_magician_controls[] = { SOC_SINGLE_BOOL_EXT("Headphone Switch", (unsigned long)&magician_hp_switch, magician_get_hp, magician_set_hp), SOC_SINGLE_BOOL_EXT("Speaker Switch", (unsigned long)&magician_spk_switch, magician_get_spk, magician_set_spk), SOC_ENUM_EXT("Input Select", magician_in_sel_enum, magician_get_input, magician_set_input), }; /* magician digital audio interface glue - connects codec <--> CPU */ static struct snd_soc_dai_link magician_dai[] = { { .name = "uda1380", .stream_name = "UDA1380 Playback", .cpu_dai_name = "pxa-ssp-dai.0", .codec_dai_name = "uda1380-hifi-playback", .platform_name = "pxa-pcm-audio", .codec_name = "uda1380-codec.0-0018", .ops = &magician_playback_ops, }, { .name = "uda1380", .stream_name = "UDA1380 Capture", .cpu_dai_name = "pxa2xx-i2s", .codec_dai_name = "uda1380-hifi-capture", .platform_name = "pxa-pcm-audio", .codec_name = "uda1380-codec.0-0018", .ops = &magician_capture_ops, } }; /* magician audio machine driver */ static struct snd_soc_card snd_soc_card_magician = { .name = "Magician", .owner = THIS_MODULE, .dai_link = magician_dai, .num_links = ARRAY_SIZE(magician_dai), .controls = uda1380_magician_controls, .num_controls = ARRAY_SIZE(uda1380_magician_controls), .dapm_widgets = uda1380_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(uda1380_dapm_widgets), .dapm_routes = audio_map, .num_dapm_routes = ARRAY_SIZE(audio_map), .fully_routed = true, }; static struct platform_device *magician_snd_device; /* * FIXME: move into magician board file once merged into the pxa tree */ static struct uda1380_platform_data uda1380_info = { .gpio_power = EGPIO_MAGICIAN_CODEC_POWER, .gpio_reset = EGPIO_MAGICIAN_CODEC_RESET, .dac_clk = UDA1380_DAC_CLK_WSPLL, }; static struct i2c_board_info i2c_board_info[] = { { I2C_BOARD_INFO("uda1380", 0x18), .platform_data = &uda1380_info, }, }; static int __init magician_init(void) { int ret; struct i2c_adapter *adapter; struct i2c_client *client; if (!machine_is_magician()) return -ENODEV; adapter = i2c_get_adapter(0); if (!adapter) return -ENODEV; client = i2c_new_device(adapter, i2c_board_info); i2c_put_adapter(adapter); if (!client) return -ENODEV; ret = gpio_request(EGPIO_MAGICIAN_SPK_POWER, "SPK_POWER"); if (ret) goto err_request_spk; ret = gpio_request(EGPIO_MAGICIAN_EP_POWER, "EP_POWER"); if (ret) goto err_request_ep; ret = gpio_request(EGPIO_MAGICIAN_MIC_POWER, "MIC_POWER"); if (ret) goto err_request_mic; ret = gpio_request(EGPIO_MAGICIAN_IN_SEL0, "IN_SEL0"); if (ret) goto err_request_in_sel0; ret = gpio_request(EGPIO_MAGICIAN_IN_SEL1, "IN_SEL1"); if (ret) goto err_request_in_sel1; gpio_set_value(EGPIO_MAGICIAN_IN_SEL0, 0); magician_snd_device = platform_device_alloc("soc-audio", -1); if (!magician_snd_device) { ret = -ENOMEM; goto err_pdev; } platform_set_drvdata(magician_snd_device, &snd_soc_card_magician); ret = platform_device_add(magician_snd_device); if (ret) { platform_device_put(magician_snd_device); goto err_pdev; } return 0; err_pdev: gpio_free(EGPIO_MAGICIAN_IN_SEL1); err_request_in_sel1: gpio_free(EGPIO_MAGICIAN_IN_SEL0); err_request_in_sel0: gpio_free(EGPIO_MAGICIAN_MIC_POWER); err_request_mic: gpio_free(EGPIO_MAGICIAN_EP_POWER); err_request_ep: gpio_free(EGPIO_MAGICIAN_SPK_POWER); err_request_spk: return ret; } static void __exit magician_exit(void) { platform_device_unregister(magician_snd_device); gpio_set_value(EGPIO_MAGICIAN_SPK_POWER, 0); gpio_set_value(EGPIO_MAGICIAN_EP_POWER, 0); gpio_set_value(EGPIO_MAGICIAN_MIC_POWER, 0); gpio_free(EGPIO_MAGICIAN_IN_SEL1); gpio_free(EGPIO_MAGICIAN_IN_SEL0); gpio_free(EGPIO_MAGICIAN_MIC_POWER); gpio_free(EGPIO_MAGICIAN_EP_POWER); gpio_free(EGPIO_MAGICIAN_SPK_POWER); } module_init(magician_init); module_exit(magician_exit); MODULE_AUTHOR("Philipp Zabel"); MODULE_DESCRIPTION("ALSA SoC Magician"); MODULE_LICENSE("GPL");
gpl-2.0
Quarx2k/msm8260_kernel_fly_iq285
net/mac80211/aes_ccm.c
1704
3589
/* * Copyright 2003-2004, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> #include <net/mac80211.h> #include "key.h" #include "aes_ccm.h" static void aes_ccm_prepare(struct crypto_cipher *tfm, u8 *scratch, u8 *a) { int i; u8 *b_0, *aad, *b, *s_0; b_0 = scratch + 3 * AES_BLOCK_LEN; aad = scratch + 4 * AES_BLOCK_LEN; b = scratch; s_0 = scratch + AES_BLOCK_LEN; crypto_cipher_encrypt_one(tfm, b, b_0); /* Extra Authenticate-only data (always two AES blocks) */ for (i = 0; i < AES_BLOCK_LEN; i++) aad[i] ^= b[i]; crypto_cipher_encrypt_one(tfm, b, aad); aad += AES_BLOCK_LEN; for (i = 0; i < AES_BLOCK_LEN; i++) aad[i] ^= b[i]; crypto_cipher_encrypt_one(tfm, a, aad); /* Mask out bits from auth-only-b_0 */ b_0[0] &= 0x07; /* S_0 is used to encrypt T (= MIC) */ b_0[14] = 0; b_0[15] = 0; crypto_cipher_encrypt_one(tfm, s_0, b_0); } void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch, u8 *data, size_t data_len, u8 *cdata, u8 *mic) { int i, j, last_len, num_blocks; u8 *pos, *cpos, *b, *s_0, *e, *b_0, *aad; b = scratch; s_0 = scratch + AES_BLOCK_LEN; e = scratch + 2 * AES_BLOCK_LEN; b_0 = scratch + 3 * AES_BLOCK_LEN; aad = scratch + 4 * AES_BLOCK_LEN; num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); last_len = data_len % AES_BLOCK_LEN; aes_ccm_prepare(tfm, scratch, b); /* Process payload blocks */ pos = data; cpos = cdata; for (j = 1; j <= num_blocks; j++) { int blen = (j == num_blocks && last_len) ? last_len : AES_BLOCK_LEN; /* Authentication followed by encryption */ for (i = 0; i < blen; i++) b[i] ^= pos[i]; crypto_cipher_encrypt_one(tfm, b, b); b_0[14] = (j >> 8) & 0xff; b_0[15] = j & 0xff; crypto_cipher_encrypt_one(tfm, e, b_0); for (i = 0; i < blen; i++) *cpos++ = *pos++ ^ e[i]; } for (i = 0; i < CCMP_MIC_LEN; i++) mic[i] = b[i] ^ s_0[i]; } int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch, u8 *cdata, size_t data_len, u8 *mic, u8 *data) { int i, j, last_len, num_blocks; u8 *pos, *cpos, *b, *s_0, *a, *b_0, *aad; b = scratch; s_0 = scratch + AES_BLOCK_LEN; a = scratch + 2 * AES_BLOCK_LEN; b_0 = scratch + 3 * AES_BLOCK_LEN; aad = scratch + 4 * AES_BLOCK_LEN; num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); last_len = data_len % AES_BLOCK_LEN; aes_ccm_prepare(tfm, scratch, a); /* Process payload blocks */ cpos = cdata; pos = data; for (j = 1; j <= num_blocks; j++) { int blen = (j == num_blocks && last_len) ? last_len : AES_BLOCK_LEN; /* Decryption followed by authentication */ b_0[14] = (j >> 8) & 0xff; b_0[15] = j & 0xff; crypto_cipher_encrypt_one(tfm, b, b_0); for (i = 0; i < blen; i++) { *pos = *cpos++ ^ b[i]; a[i] ^= *pos++; } crypto_cipher_encrypt_one(tfm, a, a); } for (i = 0; i < CCMP_MIC_LEN; i++) { if ((mic[i] ^ s_0[i]) != a[i]) return -1; } return 0; } struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[]) { struct crypto_cipher *tfm; tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return NULL; crypto_cipher_setkey(tfm, key, ALG_CCMP_KEY_LEN); return tfm; } void ieee80211_aes_key_free(struct crypto_cipher *tfm) { if (tfm) crypto_free_cipher(tfm); }
gpl-2.0
hiikezoe/android_kernel_fujitsu_f11d
net/netrom/af_netrom.c
1960
33166
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright Darryl Miles G7LED (dlm@g7led.demon.co.uk) */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/stat.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/net_namespace.h> #include <net/sock.h> #include <asm/uaccess.h> #include <asm/system.h> #include <linux/fcntl.h> #include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <net/netrom.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/ip.h> #include <net/tcp_states.h> #include <net/arp.h> #include <linux/init.h> static int nr_ndevs = 4; int sysctl_netrom_default_path_quality = NR_DEFAULT_QUAL; int sysctl_netrom_obsolescence_count_initialiser = NR_DEFAULT_OBS; int sysctl_netrom_network_ttl_initialiser = NR_DEFAULT_TTL; int sysctl_netrom_transport_timeout = NR_DEFAULT_T1; int sysctl_netrom_transport_maximum_tries = NR_DEFAULT_N2; int sysctl_netrom_transport_acknowledge_delay = NR_DEFAULT_T2; int sysctl_netrom_transport_busy_delay = NR_DEFAULT_T4; int sysctl_netrom_transport_requested_window_size = NR_DEFAULT_WINDOW; int sysctl_netrom_transport_no_activity_timeout = NR_DEFAULT_IDLE; int sysctl_netrom_routing_control = NR_DEFAULT_ROUTING; int sysctl_netrom_link_fails_count = NR_DEFAULT_FAILS; int sysctl_netrom_reset_circuit = NR_DEFAULT_RESET; static unsigned short circuit = 0x101; static HLIST_HEAD(nr_list); static DEFINE_SPINLOCK(nr_list_lock); static const struct proto_ops nr_proto_ops; /* * NETROM network devices are virtual network devices encapsulating NETROM * frames into AX.25 which will be sent through an AX.25 device, so form a * special "super class" of normal net devices; split their locks off into a * separate class since they always nest. */ static struct lock_class_key nr_netdev_xmit_lock_key; static struct lock_class_key nr_netdev_addr_lock_key; static void nr_set_lockdep_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key); } static void nr_set_lockdep_key(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL); } /* * Socket removal during an interrupt is now safe. */ static void nr_remove_socket(struct sock *sk) { spin_lock_bh(&nr_list_lock); sk_del_node_init(sk); spin_unlock_bh(&nr_list_lock); } /* * Kill all bound sockets on a dropped device. */ static void nr_kill_by_device(struct net_device *dev) { struct sock *s; struct hlist_node *node; spin_lock_bh(&nr_list_lock); sk_for_each(s, node, &nr_list) if (nr_sk(s)->device == dev) nr_disconnect(s, ENETUNREACH); spin_unlock_bh(&nr_list_lock); } /* * Handle device status changes. */ static int nr_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = (struct net_device *)ptr; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (event != NETDEV_DOWN) return NOTIFY_DONE; nr_kill_by_device(dev); nr_rt_device_down(dev); return NOTIFY_DONE; } /* * Add a socket to the bound sockets list. */ static void nr_insert_socket(struct sock *sk) { spin_lock_bh(&nr_list_lock); sk_add_node(sk, &nr_list); spin_unlock_bh(&nr_list_lock); } /* * Find a socket that wants to accept the Connect Request we just * received. */ static struct sock *nr_find_listener(ax25_address *addr) { struct sock *s; struct hlist_node *node; spin_lock_bh(&nr_list_lock); sk_for_each(s, node, &nr_list) if (!ax25cmp(&nr_sk(s)->source_addr, addr) && s->sk_state == TCP_LISTEN) { bh_lock_sock(s); goto found; } s = NULL; found: spin_unlock_bh(&nr_list_lock); return s; } /* * Find a connected NET/ROM socket given my circuit IDs. */ static struct sock *nr_find_socket(unsigned char index, unsigned char id) { struct sock *s; struct hlist_node *node; spin_lock_bh(&nr_list_lock); sk_for_each(s, node, &nr_list) { struct nr_sock *nr = nr_sk(s); if (nr->my_index == index && nr->my_id == id) { bh_lock_sock(s); goto found; } } s = NULL; found: spin_unlock_bh(&nr_list_lock); return s; } /* * Find a connected NET/ROM socket given their circuit IDs. */ static struct sock *nr_find_peer(unsigned char index, unsigned char id, ax25_address *dest) { struct sock *s; struct hlist_node *node; spin_lock_bh(&nr_list_lock); sk_for_each(s, node, &nr_list) { struct nr_sock *nr = nr_sk(s); if (nr->your_index == index && nr->your_id == id && !ax25cmp(&nr->dest_addr, dest)) { bh_lock_sock(s); goto found; } } s = NULL; found: spin_unlock_bh(&nr_list_lock); return s; } /* * Find next free circuit ID. */ static unsigned short nr_find_next_circuit(void) { unsigned short id = circuit; unsigned char i, j; struct sock *sk; for (;;) { i = id / 256; j = id % 256; if (i != 0 && j != 0) { if ((sk=nr_find_socket(i, j)) == NULL) break; bh_unlock_sock(sk); } id++; } return id; } /* * Deferred destroy. */ void nr_destroy_socket(struct sock *); /* * Handler for deferred kills. */ static void nr_destroy_timer(unsigned long data) { struct sock *sk=(struct sock *)data; bh_lock_sock(sk); sock_hold(sk); nr_destroy_socket(sk); bh_unlock_sock(sk); sock_put(sk); } /* * This is called from user mode and the timers. Thus it protects itself * against interrupt users but doesn't worry about being called during * work. Once it is removed from the queue no interrupt or bottom half * will touch it and we are (fairly 8-) ) safe. */ void nr_destroy_socket(struct sock *sk) { struct sk_buff *skb; nr_remove_socket(sk); nr_stop_heartbeat(sk); nr_stop_t1timer(sk); nr_stop_t2timer(sk); nr_stop_t4timer(sk); nr_stop_idletimer(sk); nr_clear_queues(sk); /* Flush the queues */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (skb->sk != sk) { /* A pending connection */ /* Queue the unaccepted socket for death */ sock_set_flag(skb->sk, SOCK_DEAD); nr_start_heartbeat(skb->sk); nr_sk(skb->sk)->state = NR_STATE_0; } kfree_skb(skb); } if (sk_has_allocations(sk)) { /* Defer: outstanding buffers */ sk->sk_timer.function = nr_destroy_timer; sk->sk_timer.expires = jiffies + 2 * HZ; add_timer(&sk->sk_timer); } else sock_put(sk); } /* * Handling for system calls applied via the various interfaces to a * NET/ROM socket object. */ static int nr_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); int opt; if (level != SOL_NETROM) return -ENOPROTOOPT; if (optlen < sizeof(int)) return -EINVAL; if (get_user(opt, (int __user *)optval)) return -EFAULT; switch (optname) { case NETROM_T1: if (opt < 1) return -EINVAL; nr->t1 = opt * HZ; return 0; case NETROM_T2: if (opt < 1) return -EINVAL; nr->t2 = opt * HZ; return 0; case NETROM_N2: if (opt < 1 || opt > 31) return -EINVAL; nr->n2 = opt; return 0; case NETROM_T4: if (opt < 1) return -EINVAL; nr->t4 = opt * HZ; return 0; case NETROM_IDLE: if (opt < 0) return -EINVAL; nr->idle = opt * 60 * HZ; return 0; default: return -ENOPROTOOPT; } } static int nr_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); int val = 0; int len; if (level != SOL_NETROM) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case NETROM_T1: val = nr->t1 / HZ; break; case NETROM_T2: val = nr->t2 / HZ; break; case NETROM_N2: val = nr->n2; break; case NETROM_T4: val = nr->t4 / HZ; break; case NETROM_IDLE: val = nr->idle / (60 * HZ); break; default: return -ENOPROTOOPT; } len = min_t(unsigned int, len, sizeof(int)); if (put_user(len, optlen)) return -EFAULT; return copy_to_user(optval, &val, len) ? -EFAULT : 0; } static int nr_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; lock_sock(sk); if (sk->sk_state != TCP_LISTEN) { memset(&nr_sk(sk)->user_addr, 0, AX25_ADDR_LEN); sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; release_sock(sk); return 0; } release_sock(sk); return -EOPNOTSUPP; } static struct proto nr_proto = { .name = "NETROM", .owner = THIS_MODULE, .obj_size = sizeof(struct nr_sock), }; static int nr_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct nr_sock *nr; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; if (sock->type != SOCK_SEQPACKET || protocol != 0) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto); if (sk == NULL) return -ENOMEM; nr = nr_sk(sk); sock_init_data(sock, sk); sock->ops = &nr_proto_ops; sk->sk_protocol = protocol; skb_queue_head_init(&nr->ack_queue); skb_queue_head_init(&nr->reseq_queue); skb_queue_head_init(&nr->frag_queue); nr_init_timers(sk); nr->t1 = msecs_to_jiffies(sysctl_netrom_transport_timeout); nr->t2 = msecs_to_jiffies(sysctl_netrom_transport_acknowledge_delay); nr->n2 = msecs_to_jiffies(sysctl_netrom_transport_maximum_tries); nr->t4 = msecs_to_jiffies(sysctl_netrom_transport_busy_delay); nr->idle = msecs_to_jiffies(sysctl_netrom_transport_no_activity_timeout); nr->window = sysctl_netrom_transport_requested_window_size; nr->bpqext = 1; nr->state = NR_STATE_0; return 0; } static struct sock *nr_make_new(struct sock *osk) { struct sock *sk; struct nr_sock *nr, *onr; if (osk->sk_type != SOCK_SEQPACKET) return NULL; sk = sk_alloc(sock_net(osk), PF_NETROM, GFP_ATOMIC, osk->sk_prot); if (sk == NULL) return NULL; nr = nr_sk(sk); sock_init_data(NULL, sk); sk->sk_type = osk->sk_type; sk->sk_priority = osk->sk_priority; sk->sk_protocol = osk->sk_protocol; sk->sk_rcvbuf = osk->sk_rcvbuf; sk->sk_sndbuf = osk->sk_sndbuf; sk->sk_state = TCP_ESTABLISHED; sock_copy_flags(sk, osk); skb_queue_head_init(&nr->ack_queue); skb_queue_head_init(&nr->reseq_queue); skb_queue_head_init(&nr->frag_queue); nr_init_timers(sk); onr = nr_sk(osk); nr->t1 = onr->t1; nr->t2 = onr->t2; nr->n2 = onr->n2; nr->t4 = onr->t4; nr->idle = onr->idle; nr->window = onr->window; nr->device = onr->device; nr->bpqext = onr->bpqext; return sk; } static int nr_release(struct socket *sock) { struct sock *sk = sock->sk; struct nr_sock *nr; if (sk == NULL) return 0; sock_hold(sk); sock_orphan(sk); lock_sock(sk); nr = nr_sk(sk); switch (nr->state) { case NR_STATE_0: case NR_STATE_1: case NR_STATE_2: nr_disconnect(sk, 0); nr_destroy_socket(sk); break; case NR_STATE_3: nr_clear_queues(sk); nr->n2count = 0; nr_write_internal(sk, NR_DISCREQ); nr_start_t1timer(sk); nr_stop_t2timer(sk); nr_stop_t4timer(sk); nr_stop_idletimer(sk); nr->state = NR_STATE_2; sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DESTROY); break; default: break; } sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; } static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr; struct net_device *dev; ax25_uid_assoc *user; ax25_address *source; lock_sock(sk); if (!sock_flag(sk, SOCK_ZAPPED)) { release_sock(sk); return -EINVAL; } if (addr_len < sizeof(struct sockaddr_ax25) || addr_len > sizeof(struct full_sockaddr_ax25)) { release_sock(sk); return -EINVAL; } if (addr_len < (addr->fsa_ax25.sax25_ndigis * sizeof(ax25_address) + sizeof(struct sockaddr_ax25))) { release_sock(sk); return -EINVAL; } if (addr->fsa_ax25.sax25_family != AF_NETROM) { release_sock(sk); return -EINVAL; } if ((dev = nr_dev_get(&addr->fsa_ax25.sax25_call)) == NULL) { release_sock(sk); return -EADDRNOTAVAIL; } /* * Only the super user can set an arbitrary user callsign. */ if (addr->fsa_ax25.sax25_ndigis == 1) { if (!capable(CAP_NET_BIND_SERVICE)) { dev_put(dev); release_sock(sk); return -EACCES; } nr->user_addr = addr->fsa_digipeater[0]; nr->source_addr = addr->fsa_ax25.sax25_call; } else { source = &addr->fsa_ax25.sax25_call; user = ax25_findbyuid(current_euid()); if (user) { nr->user_addr = user->call; ax25_uid_put(user); } else { if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) { release_sock(sk); dev_put(dev); return -EPERM; } nr->user_addr = *source; } nr->source_addr = *source; } nr->device = dev; nr_insert_socket(sk); sock_reset_flag(sk, SOCK_ZAPPED); dev_put(dev); release_sock(sk); return 0; } static int nr_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); struct sockaddr_ax25 *addr = (struct sockaddr_ax25 *)uaddr; ax25_address *source = NULL; ax25_uid_assoc *user; struct net_device *dev; int err = 0; lock_sock(sk); if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { sock->state = SS_CONNECTED; goto out_release; /* Connect completed during a ERESTARTSYS event */ } if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { sock->state = SS_UNCONNECTED; err = -ECONNREFUSED; goto out_release; } if (sk->sk_state == TCP_ESTABLISHED) { err = -EISCONN; /* No reconnect on a seqpacket socket */ goto out_release; } sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; if (addr_len != sizeof(struct sockaddr_ax25) && addr_len != sizeof(struct full_sockaddr_ax25)) { err = -EINVAL; goto out_release; } if (addr->sax25_family != AF_NETROM) { err = -EINVAL; goto out_release; } if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */ sock_reset_flag(sk, SOCK_ZAPPED); if ((dev = nr_dev_first()) == NULL) { err = -ENETUNREACH; goto out_release; } source = (ax25_address *)dev->dev_addr; user = ax25_findbyuid(current_euid()); if (user) { nr->user_addr = user->call; ax25_uid_put(user); } else { if (ax25_uid_policy && !capable(CAP_NET_ADMIN)) { dev_put(dev); err = -EPERM; goto out_release; } nr->user_addr = *source; } nr->source_addr = *source; nr->device = dev; dev_put(dev); nr_insert_socket(sk); /* Finish the bind */ } nr->dest_addr = addr->sax25_call; release_sock(sk); circuit = nr_find_next_circuit(); lock_sock(sk); nr->my_index = circuit / 256; nr->my_id = circuit % 256; circuit++; /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; nr_establish_data_link(sk); nr->state = NR_STATE_1; nr_start_heartbeat(sk); /* Now the loop */ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) { err = -EINPROGRESS; goto out_release; } /* * A Connect Ack with Choke or timeout or failed routing will go to * closed. */ if (sk->sk_state == TCP_SYN_SENT) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk->sk_state != TCP_SYN_SENT) break; if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out_release; } if (sk->sk_state != TCP_ESTABLISHED) { sock->state = SS_UNCONNECTED; err = sock_error(sk); /* Always set at this point */ goto out_release; } sock->state = SS_CONNECTED; out_release: release_sock(sk); return err; } static int nr_accept(struct socket *sock, struct socket *newsock, int flags) { struct sk_buff *skb; struct sock *newsk; DEFINE_WAIT(wait); struct sock *sk; int err = 0; if ((sk = sock->sk) == NULL) return -EINVAL; lock_sock(sk); if (sk->sk_type != SOCK_SEQPACKET) { err = -EOPNOTSUPP; goto out_release; } if (sk->sk_state != TCP_LISTEN) { err = -EINVAL; goto out_release; } /* * The write queue this time is holding sockets ready to use * hooked into the SABM we saved */ for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); skb = skb_dequeue(&sk->sk_receive_queue); if (skb) break; if (flags & O_NONBLOCK) { err = -EWOULDBLOCK; break; } if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out_release; newsk = skb->sk; sock_graft(newsk, newsock); /* Now attach up the new socket */ kfree_skb(skb); sk_acceptq_removed(sk); out_release: release_sock(sk); return err; } static int nr_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct full_sockaddr_ax25 *sax = (struct full_sockaddr_ax25 *)uaddr; struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); lock_sock(sk); if (peer != 0) { if (sk->sk_state != TCP_ESTABLISHED) { release_sock(sk); return -ENOTCONN; } sax->fsa_ax25.sax25_family = AF_NETROM; sax->fsa_ax25.sax25_ndigis = 1; sax->fsa_ax25.sax25_call = nr->user_addr; memset(sax->fsa_digipeater, 0, sizeof(sax->fsa_digipeater)); sax->fsa_digipeater[0] = nr->dest_addr; *uaddr_len = sizeof(struct full_sockaddr_ax25); } else { sax->fsa_ax25.sax25_family = AF_NETROM; sax->fsa_ax25.sax25_ndigis = 0; sax->fsa_ax25.sax25_call = nr->source_addr; *uaddr_len = sizeof(struct sockaddr_ax25); } release_sock(sk); return 0; } int nr_rx_frame(struct sk_buff *skb, struct net_device *dev) { struct sock *sk; struct sock *make; struct nr_sock *nr_make; ax25_address *src, *dest, *user; unsigned short circuit_index, circuit_id; unsigned short peer_circuit_index, peer_circuit_id; unsigned short frametype, flags, window, timeout; int ret; skb->sk = NULL; /* Initially we don't know who it's for */ /* * skb->data points to the netrom frame start */ src = (ax25_address *)(skb->data + 0); dest = (ax25_address *)(skb->data + 7); circuit_index = skb->data[15]; circuit_id = skb->data[16]; peer_circuit_index = skb->data[17]; peer_circuit_id = skb->data[18]; frametype = skb->data[19] & 0x0F; flags = skb->data[19] & 0xF0; /* * Check for an incoming IP over NET/ROM frame. */ if (frametype == NR_PROTOEXT && circuit_index == NR_PROTO_IP && circuit_id == NR_PROTO_IP) { skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); skb_reset_transport_header(skb); return nr_rx_ip(skb, dev); } /* * Find an existing socket connection, based on circuit ID, if it's * a Connect Request base it on their circuit ID. * * Circuit ID 0/0 is not valid but it could still be a "reset" for a * circuit that no longer exists at the other end ... */ sk = NULL; if (circuit_index == 0 && circuit_id == 0) { if (frametype == NR_CONNACK && flags == NR_CHOKE_FLAG) sk = nr_find_peer(peer_circuit_index, peer_circuit_id, src); } else { if (frametype == NR_CONNREQ) sk = nr_find_peer(circuit_index, circuit_id, src); else sk = nr_find_socket(circuit_index, circuit_id); } if (sk != NULL) { skb_reset_transport_header(skb); if (frametype == NR_CONNACK && skb->len == 22) nr_sk(sk)->bpqext = 1; else nr_sk(sk)->bpqext = 0; ret = nr_process_rx_frame(sk, skb); bh_unlock_sock(sk); return ret; } /* * Now it should be a CONNREQ. */ if (frametype != NR_CONNREQ) { /* * Here it would be nice to be able to send a reset but * NET/ROM doesn't have one. We've tried to extend the protocol * by sending NR_CONNACK | NR_CHOKE_FLAGS replies but that * apparently kills BPQ boxes... :-( * So now we try to follow the established behaviour of * G8PZT's Xrouter which is sending packets with command type 7 * as an extension of the protocol. */ if (sysctl_netrom_reset_circuit && (frametype != NR_RESET || flags != 0)) nr_transmit_reset(skb, 1); return 0; } sk = nr_find_listener(dest); user = (ax25_address *)(skb->data + 21); if (sk == NULL || sk_acceptq_is_full(sk) || (make = nr_make_new(sk)) == NULL) { nr_transmit_refusal(skb, 0); if (sk) bh_unlock_sock(sk); return 0; } window = skb->data[20]; skb->sk = make; make->sk_state = TCP_ESTABLISHED; /* Fill in his circuit details */ nr_make = nr_sk(make); nr_make->source_addr = *dest; nr_make->dest_addr = *src; nr_make->user_addr = *user; nr_make->your_index = circuit_index; nr_make->your_id = circuit_id; bh_unlock_sock(sk); circuit = nr_find_next_circuit(); bh_lock_sock(sk); nr_make->my_index = circuit / 256; nr_make->my_id = circuit % 256; circuit++; /* Window negotiation */ if (window < nr_make->window) nr_make->window = window; /* L4 timeout negotiation */ if (skb->len == 37) { timeout = skb->data[36] * 256 + skb->data[35]; if (timeout * HZ < nr_make->t1) nr_make->t1 = timeout * HZ; nr_make->bpqext = 1; } else { nr_make->bpqext = 0; } nr_write_internal(make, NR_CONNACK); nr_make->condition = 0x00; nr_make->vs = 0; nr_make->va = 0; nr_make->vr = 0; nr_make->vl = 0; nr_make->state = NR_STATE_3; sk_acceptq_added(sk); skb_queue_head(&sk->sk_receive_queue, skb); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); bh_unlock_sock(sk); nr_insert_socket(make); nr_start_heartbeat(make); nr_start_idletimer(make); return 1; } static int nr_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); struct sockaddr_ax25 *usax = (struct sockaddr_ax25 *)msg->msg_name; int err; struct sockaddr_ax25 sax; struct sk_buff *skb; unsigned char *asmptr; int size; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) return -EINVAL; lock_sock(sk); if (sock_flag(sk, SOCK_ZAPPED)) { err = -EADDRNOTAVAIL; goto out; } if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); err = -EPIPE; goto out; } if (nr->device == NULL) { err = -ENETUNREACH; goto out; } if (usax) { if (msg->msg_namelen < sizeof(sax)) { err = -EINVAL; goto out; } sax = *usax; if (ax25cmp(&nr->dest_addr, &sax.sax25_call) != 0) { err = -EISCONN; goto out; } if (sax.sax25_family != AF_NETROM) { err = -EINVAL; goto out; } } else { if (sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } sax.sax25_family = AF_NETROM; sax.sax25_call = nr->dest_addr; } /* Build a packet - the conventional user limit is 236 bytes. We can do ludicrously large NetROM frames but must not overflow */ if (len > 65536) { err = -EMSGSIZE; goto out; } size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN; if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) goto out; skb_reserve(skb, size - len); skb_reset_transport_header(skb); /* * Push down the NET/ROM header */ asmptr = skb_push(skb, NR_TRANSPORT_LEN); /* Build a NET/ROM Transport header */ *asmptr++ = nr->your_index; *asmptr++ = nr->your_id; *asmptr++ = 0; /* To be filled in later */ *asmptr++ = 0; /* Ditto */ *asmptr++ = NR_INFO; /* * Put the data on the end */ skb_put(skb, len); /* User data follows immediately after the NET/ROM transport header */ if (memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len)) { kfree_skb(skb); err = -EFAULT; goto out; } if (sk->sk_state != TCP_ESTABLISHED) { kfree_skb(skb); err = -ENOTCONN; goto out; } nr_output(sk, skb); /* Shove it onto the queue */ err = len; out: release_sock(sk); return err; } static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name; size_t copied; struct sk_buff *skb; int er; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ lock_sock(sk); if (sk->sk_state != TCP_ESTABLISHED) { release_sock(sk); return -ENOTCONN; } /* Now we can treat all alike */ if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) { release_sock(sk); return er; } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (sax != NULL) { sax->sax25_family = AF_NETROM; skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, AX25_ADDR_LEN); } msg->msg_namelen = sizeof(*sax); skb_free_datagram(sk, skb); release_sock(sk); return copied; } static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; void __user *argp = (void __user *)arg; int ret; switch (cmd) { case TIOCOUTQ: { long amount; lock_sock(sk); amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; release_sock(sk); return put_user(amount, (int __user *)argp); } case TIOCINQ: { struct sk_buff *skb; long amount = 0L; lock_sock(sk); /* These two are safe on a single CPU system as only user tasks fiddle here */ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; release_sock(sk); return put_user(amount, (int __user *)argp); } case SIOCGSTAMP: lock_sock(sk); ret = sock_get_timestamp(sk, argp); release_sock(sk); return ret; case SIOCGSTAMPNS: lock_sock(sk); ret = sock_get_timestampns(sk, argp); release_sock(sk); return ret; case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: return -EINVAL; case SIOCADDRT: case SIOCDELRT: case SIOCNRDECOBS: if (!capable(CAP_NET_ADMIN)) return -EPERM; return nr_rt_ioctl(cmd, argp); default: return -ENOIOCTLCMD; } return 0; } #ifdef CONFIG_PROC_FS static void *nr_info_start(struct seq_file *seq, loff_t *pos) { spin_lock_bh(&nr_list_lock); return seq_hlist_start_head(&nr_list, *pos); } static void *nr_info_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_hlist_next(v, &nr_list, pos); } static void nr_info_stop(struct seq_file *seq, void *v) { spin_unlock_bh(&nr_list_lock); } static int nr_info_show(struct seq_file *seq, void *v) { struct sock *s = sk_entry(v); struct net_device *dev; struct nr_sock *nr; const char *devname; char buf[11]; if (v == SEQ_START_TOKEN) seq_puts(seq, "user_addr dest_node src_node dev my your st vs vr va t1 t2 t4 idle n2 wnd Snd-Q Rcv-Q inode\n"); else { bh_lock_sock(s); nr = nr_sk(s); if ((dev = nr->device) == NULL) devname = "???"; else devname = dev->name; seq_printf(seq, "%-9s ", ax2asc(buf, &nr->user_addr)); seq_printf(seq, "%-9s ", ax2asc(buf, &nr->dest_addr)); seq_printf(seq, "%-9s %-3s %02X/%02X %02X/%02X %2d %3d %3d %3d %3lu/%03lu %2lu/%02lu %3lu/%03lu %3lu/%03lu %2d/%02d %3d %5d %5d %ld\n", ax2asc(buf, &nr->source_addr), devname, nr->my_index, nr->my_id, nr->your_index, nr->your_id, nr->state, nr->vs, nr->vr, nr->va, ax25_display_timer(&nr->t1timer) / HZ, nr->t1 / HZ, ax25_display_timer(&nr->t2timer) / HZ, nr->t2 / HZ, ax25_display_timer(&nr->t4timer) / HZ, nr->t4 / HZ, ax25_display_timer(&nr->idletimer) / (60 * HZ), nr->idle / (60 * HZ), nr->n2count, nr->n2, nr->window, sk_wmem_alloc_get(s), sk_rmem_alloc_get(s), s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); bh_unlock_sock(s); } return 0; } static const struct seq_operations nr_info_seqops = { .start = nr_info_start, .next = nr_info_next, .stop = nr_info_stop, .show = nr_info_show, }; static int nr_info_open(struct inode *inode, struct file *file) { return seq_open(file, &nr_info_seqops); } static const struct file_operations nr_info_fops = { .owner = THIS_MODULE, .open = nr_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* CONFIG_PROC_FS */ static const struct net_proto_family nr_family_ops = { .family = PF_NETROM, .create = nr_create, .owner = THIS_MODULE, }; static const struct proto_ops nr_proto_ops = { .family = PF_NETROM, .owner = THIS_MODULE, .release = nr_release, .bind = nr_bind, .connect = nr_connect, .socketpair = sock_no_socketpair, .accept = nr_accept, .getname = nr_getname, .poll = datagram_poll, .ioctl = nr_ioctl, .listen = nr_listen, .shutdown = sock_no_shutdown, .setsockopt = nr_setsockopt, .getsockopt = nr_getsockopt, .sendmsg = nr_sendmsg, .recvmsg = nr_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct notifier_block nr_dev_notifier = { .notifier_call = nr_device_event, }; static struct net_device **dev_nr; static struct ax25_protocol nr_pid = { .pid = AX25_P_NETROM, .func = nr_route_frame }; static struct ax25_linkfail nr_linkfail_notifier = { .func = nr_link_failed, }; static int __init nr_proto_init(void) { int i; int rc = proto_register(&nr_proto, 0); if (rc != 0) goto out; if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) { printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n"); return -1; } dev_nr = kzalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL); if (dev_nr == NULL) { printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n"); return -1; } for (i = 0; i < nr_ndevs; i++) { char name[IFNAMSIZ]; struct net_device *dev; sprintf(name, "nr%d", i); dev = alloc_netdev(0, name, nr_setup); if (!dev) { printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n"); goto fail; } dev->base_addr = i; if (register_netdev(dev)) { printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n"); free_netdev(dev); goto fail; } nr_set_lockdep_key(dev); dev_nr[i] = dev; } if (sock_register(&nr_family_ops)) { printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n"); goto fail; } register_netdevice_notifier(&nr_dev_notifier); ax25_register_pid(&nr_pid); ax25_linkfail_register(&nr_linkfail_notifier); #ifdef CONFIG_SYSCTL nr_register_sysctl(); #endif nr_loopback_init(); proc_net_fops_create(&init_net, "nr", S_IRUGO, &nr_info_fops); proc_net_fops_create(&init_net, "nr_neigh", S_IRUGO, &nr_neigh_fops); proc_net_fops_create(&init_net, "nr_nodes", S_IRUGO, &nr_nodes_fops); out: return rc; fail: while (--i >= 0) { unregister_netdev(dev_nr[i]); free_netdev(dev_nr[i]); } kfree(dev_nr); proto_unregister(&nr_proto); rc = -1; goto out; } module_init(nr_proto_init); module_param(nr_ndevs, int, 0); MODULE_PARM_DESC(nr_ndevs, "number of NET/ROM devices"); MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>"); MODULE_DESCRIPTION("The amateur radio NET/ROM network and transport layer protocol"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_NETROM); static void __exit nr_exit(void) { int i; proc_net_remove(&init_net, "nr"); proc_net_remove(&init_net, "nr_neigh"); proc_net_remove(&init_net, "nr_nodes"); nr_loopback_clear(); nr_rt_free(); #ifdef CONFIG_SYSCTL nr_unregister_sysctl(); #endif ax25_linkfail_release(&nr_linkfail_notifier); ax25_protocol_release(AX25_P_NETROM); unregister_netdevice_notifier(&nr_dev_notifier); sock_unregister(PF_NETROM); for (i = 0; i < nr_ndevs; i++) { struct net_device *dev = dev_nr[i]; if (dev) { unregister_netdev(dev); free_netdev(dev); } } kfree(dev_nr); proto_unregister(&nr_proto); } module_exit(nr_exit);
gpl-2.0
sktjdgns1189/android_kernel_samsung_SHV-E160S
arch/powerpc/kernel/ibmebus.c
2728
16442
/* * IBM PowerPC IBM eBus Infrastructure Support. * * Copyright (c) 2005 IBM Corporation * Joachim Fenkes <fenkes@de.ibm.com> * Heiko J Schick <schickhj@de.ibm.com> * * All rights reserved. * * This source code is distributed under a dual license of GPL v2.0 and OpenIB * BSD. * * OpenIB BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <linux/init.h> #include <linux/console.h> #include <linux/kobject.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/of_platform.h> #include <asm/ibmebus.h> #include <asm/abs_addr.h> static struct device ibmebus_bus_device = { /* fake "parent" device */ .init_name = "ibmebus", }; struct bus_type ibmebus_bus_type; /* These devices will automatically be added to the bus during init */ static struct of_device_id __initdata ibmebus_matches[] = { { .compatible = "IBM,lhca" }, { .compatible = "IBM,lhea" }, {}, }; static void *ibmebus_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { void *mem; mem = kmalloc(size, flag); *dma_handle = (dma_addr_t)mem; return mem; } static void ibmebus_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { kfree(vaddr); } static dma_addr_t ibmebus_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { return (dma_addr_t)(page_address(page) + offset); } static void ibmebus_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { return; } static int ibmebus_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction, struct dma_attrs *attrs) { struct scatterlist *sg; int i; for_each_sg(sgl, sg, nents, i) { sg->dma_address = (dma_addr_t) sg_virt(sg); sg->dma_length = sg->length; } return nents; } static void ibmebus_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, struct dma_attrs *attrs) { return; } static int ibmebus_dma_supported(struct device *dev, u64 mask) { return 1; } static struct dma_map_ops ibmebus_dma_ops = { .alloc_coherent = ibmebus_alloc_coherent, .free_coherent = ibmebus_free_coherent, .map_sg = ibmebus_map_sg, .unmap_sg = ibmebus_unmap_sg, .dma_supported = ibmebus_dma_supported, .map_page = ibmebus_map_page, .unmap_page = ibmebus_unmap_page, }; static int ibmebus_match_path(struct device *dev, void *data) { struct device_node *dn = to_platform_device(dev)->dev.of_node; return (dn->full_name && (strcasecmp((char *)data, dn->full_name) == 0)); } static int ibmebus_match_node(struct device *dev, void *data) { return to_platform_device(dev)->dev.of_node == data; } static int ibmebus_create_device(struct device_node *dn) { struct platform_device *dev; int ret; dev = of_device_alloc(dn, NULL, &ibmebus_bus_device); if (!dev) return -ENOMEM; dev->dev.bus = &ibmebus_bus_type; dev->dev.archdata.dma_ops = &ibmebus_dma_ops; ret = of_device_add(dev); if (ret) platform_device_put(dev); return ret; } static int ibmebus_create_devices(const struct of_device_id *matches) { struct device_node *root, *child; int ret = 0; root = of_find_node_by_path("/"); for_each_child_of_node(root, child) { if (!of_match_node(matches, child)) continue; if (bus_find_device(&ibmebus_bus_type, NULL, child, ibmebus_match_node)) continue; ret = ibmebus_create_device(child); if (ret) { printk(KERN_ERR "%s: failed to create device (%i)", __func__, ret); of_node_put(child); break; } } of_node_put(root); return ret; } int ibmebus_register_driver(struct of_platform_driver *drv) { /* If the driver uses devices that ibmebus doesn't know, add them */ ibmebus_create_devices(drv->driver.of_match_table); drv->driver.bus = &ibmebus_bus_type; return driver_register(&drv->driver); } EXPORT_SYMBOL(ibmebus_register_driver); void ibmebus_unregister_driver(struct of_platform_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL(ibmebus_unregister_driver); int ibmebus_request_irq(u32 ist, irq_handler_t handler, unsigned long irq_flags, const char *devname, void *dev_id) { unsigned int irq = irq_create_mapping(NULL, ist); if (irq == NO_IRQ) return -EINVAL; return request_irq(irq, handler, irq_flags, devname, dev_id); } EXPORT_SYMBOL(ibmebus_request_irq); void ibmebus_free_irq(u32 ist, void *dev_id) { unsigned int irq = irq_find_mapping(NULL, ist); free_irq(irq, dev_id); irq_dispose_mapping(irq); } EXPORT_SYMBOL(ibmebus_free_irq); static char *ibmebus_chomp(const char *in, size_t count) { char *out = kmalloc(count + 1, GFP_KERNEL); if (!out) return NULL; memcpy(out, in, count); out[count] = '\0'; if (out[count - 1] == '\n') out[count - 1] = '\0'; return out; } static ssize_t ibmebus_store_probe(struct bus_type *bus, const char *buf, size_t count) { struct device_node *dn = NULL; char *path; ssize_t rc = 0; path = ibmebus_chomp(buf, count); if (!path) return -ENOMEM; if (bus_find_device(&ibmebus_bus_type, NULL, path, ibmebus_match_path)) { printk(KERN_WARNING "%s: %s has already been probed\n", __func__, path); rc = -EEXIST; goto out; } if ((dn = of_find_node_by_path(path))) { rc = ibmebus_create_device(dn); of_node_put(dn); } else { printk(KERN_WARNING "%s: no such device node: %s\n", __func__, path); rc = -ENODEV; } out: kfree(path); if (rc) return rc; return count; } static ssize_t ibmebus_store_remove(struct bus_type *bus, const char *buf, size_t count) { struct device *dev; char *path; path = ibmebus_chomp(buf, count); if (!path) return -ENOMEM; if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path, ibmebus_match_path))) { of_device_unregister(to_platform_device(dev)); kfree(path); return count; } else { printk(KERN_WARNING "%s: %s not on the bus\n", __func__, path); kfree(path); return -ENODEV; } } static struct bus_attribute ibmebus_bus_attrs[] = { __ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe), __ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove), __ATTR_NULL }; static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv) { const struct of_device_id *matches = drv->of_match_table; if (!matches) return 0; return of_match_device(matches, dev) != NULL; } static int ibmebus_bus_device_probe(struct device *dev) { int error = -ENODEV; struct of_platform_driver *drv; struct platform_device *of_dev; const struct of_device_id *match; drv = to_of_platform_driver(dev->driver); of_dev = to_platform_device(dev); if (!drv->probe) return error; of_dev_get(of_dev); match = of_match_device(drv->driver.of_match_table, dev); if (match) error = drv->probe(of_dev, match); if (error) of_dev_put(of_dev); return error; } static int ibmebus_bus_device_remove(struct device *dev) { struct platform_device *of_dev = to_platform_device(dev); struct of_platform_driver *drv = to_of_platform_driver(dev->driver); if (dev->driver && drv->remove) drv->remove(of_dev); return 0; } static void ibmebus_bus_device_shutdown(struct device *dev) { struct platform_device *of_dev = to_platform_device(dev); struct of_platform_driver *drv = to_of_platform_driver(dev->driver); if (dev->driver && drv->shutdown) drv->shutdown(of_dev); } /* * ibmebus_bus_device_attrs */ static ssize_t devspec_show(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *ofdev; ofdev = to_platform_device(dev); return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name); } static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *ofdev; ofdev = to_platform_device(dev); return sprintf(buf, "%s\n", ofdev->dev.of_node->name); } static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2); buf[len] = '\n'; buf[len+1] = 0; return len+1; } struct device_attribute ibmebus_bus_device_attrs[] = { __ATTR_RO(devspec), __ATTR_RO(name), __ATTR_RO(modalias), __ATTR_NULL }; #ifdef CONFIG_PM_SLEEP static int ibmebus_bus_legacy_suspend(struct device *dev, pm_message_t mesg) { struct platform_device *of_dev = to_platform_device(dev); struct of_platform_driver *drv = to_of_platform_driver(dev->driver); int ret = 0; if (dev->driver && drv->suspend) ret = drv->suspend(of_dev, mesg); return ret; } static int ibmebus_bus_legacy_resume(struct device *dev) { struct platform_device *of_dev = to_platform_device(dev); struct of_platform_driver *drv = to_of_platform_driver(dev->driver); int ret = 0; if (dev->driver && drv->resume) ret = drv->resume(of_dev); return ret; } static int ibmebus_bus_pm_prepare(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (drv && drv->pm && drv->pm->prepare) ret = drv->pm->prepare(dev); return ret; } static void ibmebus_bus_pm_complete(struct device *dev) { struct device_driver *drv = dev->driver; if (drv && drv->pm && drv->pm->complete) drv->pm->complete(dev); } #ifdef CONFIG_SUSPEND static int ibmebus_bus_pm_suspend(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->suspend) ret = drv->pm->suspend(dev); } else { ret = ibmebus_bus_legacy_suspend(dev, PMSG_SUSPEND); } return ret; } static int ibmebus_bus_pm_suspend_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->suspend_noirq) ret = drv->pm->suspend_noirq(dev); } return ret; } static int ibmebus_bus_pm_resume(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->resume) ret = drv->pm->resume(dev); } else { ret = ibmebus_bus_legacy_resume(dev); } return ret; } static int ibmebus_bus_pm_resume_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->resume_noirq) ret = drv->pm->resume_noirq(dev); } return ret; } #else /* !CONFIG_SUSPEND */ #define ibmebus_bus_pm_suspend NULL #define ibmebus_bus_pm_resume NULL #define ibmebus_bus_pm_suspend_noirq NULL #define ibmebus_bus_pm_resume_noirq NULL #endif /* !CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATE_CALLBACKS static int ibmebus_bus_pm_freeze(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->freeze) ret = drv->pm->freeze(dev); } else { ret = ibmebus_bus_legacy_suspend(dev, PMSG_FREEZE); } return ret; } static int ibmebus_bus_pm_freeze_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->freeze_noirq) ret = drv->pm->freeze_noirq(dev); } return ret; } static int ibmebus_bus_pm_thaw(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->thaw) ret = drv->pm->thaw(dev); } else { ret = ibmebus_bus_legacy_resume(dev); } return ret; } static int ibmebus_bus_pm_thaw_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->thaw_noirq) ret = drv->pm->thaw_noirq(dev); } return ret; } static int ibmebus_bus_pm_poweroff(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->poweroff) ret = drv->pm->poweroff(dev); } else { ret = ibmebus_bus_legacy_suspend(dev, PMSG_HIBERNATE); } return ret; } static int ibmebus_bus_pm_poweroff_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->poweroff_noirq) ret = drv->pm->poweroff_noirq(dev); } return ret; } static int ibmebus_bus_pm_restore(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->restore) ret = drv->pm->restore(dev); } else { ret = ibmebus_bus_legacy_resume(dev); } return ret; } static int ibmebus_bus_pm_restore_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->restore_noirq) ret = drv->pm->restore_noirq(dev); } return ret; } #else /* !CONFIG_HIBERNATE_CALLBACKS */ #define ibmebus_bus_pm_freeze NULL #define ibmebus_bus_pm_thaw NULL #define ibmebus_bus_pm_poweroff NULL #define ibmebus_bus_pm_restore NULL #define ibmebus_bus_pm_freeze_noirq NULL #define ibmebus_bus_pm_thaw_noirq NULL #define ibmebus_bus_pm_poweroff_noirq NULL #define ibmebus_bus_pm_restore_noirq NULL #endif /* !CONFIG_HIBERNATE_CALLBACKS */ static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { .prepare = ibmebus_bus_pm_prepare, .complete = ibmebus_bus_pm_complete, .suspend = ibmebus_bus_pm_suspend, .resume = ibmebus_bus_pm_resume, .freeze = ibmebus_bus_pm_freeze, .thaw = ibmebus_bus_pm_thaw, .poweroff = ibmebus_bus_pm_poweroff, .restore = ibmebus_bus_pm_restore, .suspend_noirq = ibmebus_bus_pm_suspend_noirq, .resume_noirq = ibmebus_bus_pm_resume_noirq, .freeze_noirq = ibmebus_bus_pm_freeze_noirq, .thaw_noirq = ibmebus_bus_pm_thaw_noirq, .poweroff_noirq = ibmebus_bus_pm_poweroff_noirq, .restore_noirq = ibmebus_bus_pm_restore_noirq, }; #define IBMEBUS_BUS_PM_OPS_PTR (&ibmebus_bus_dev_pm_ops) #else /* !CONFIG_PM_SLEEP */ #define IBMEBUS_BUS_PM_OPS_PTR NULL #endif /* !CONFIG_PM_SLEEP */ struct bus_type ibmebus_bus_type = { .name = "ibmebus", .uevent = of_device_uevent, .bus_attrs = ibmebus_bus_attrs, .match = ibmebus_bus_bus_match, .probe = ibmebus_bus_device_probe, .remove = ibmebus_bus_device_remove, .shutdown = ibmebus_bus_device_shutdown, .dev_attrs = ibmebus_bus_device_attrs, .pm = IBMEBUS_BUS_PM_OPS_PTR, }; EXPORT_SYMBOL(ibmebus_bus_type); static int __init ibmebus_bus_init(void) { int err; printk(KERN_INFO "IBM eBus Device Driver\n"); err = bus_register(&ibmebus_bus_type); if (err) { printk(KERN_ERR "%s: failed to register IBM eBus.\n", __func__); return err; } err = device_register(&ibmebus_bus_device); if (err) { printk(KERN_WARNING "%s: device_register returned %i\n", __func__, err); bus_unregister(&ibmebus_bus_type); return err; } err = ibmebus_create_devices(ibmebus_matches); if (err) { device_unregister(&ibmebus_bus_device); bus_unregister(&ibmebus_bus_type); return err; } return 0; } postcore_initcall(ibmebus_bus_init);
gpl-2.0
CyanideL/android_kernel_moto_shamu
tools/perf/ui/util.c
4520
1588
#include "util.h" #include "../debug.h" /* * Default error logging functions */ static int perf_stdio__error(const char *format, va_list args) { fprintf(stderr, "Error:\n"); vfprintf(stderr, format, args); return 0; } static int perf_stdio__warning(const char *format, va_list args) { fprintf(stderr, "Warning:\n"); vfprintf(stderr, format, args); return 0; } static struct perf_error_ops default_eops = { .error = perf_stdio__error, .warning = perf_stdio__warning, }; static struct perf_error_ops *perf_eops = &default_eops; int ui__error(const char *format, ...) { int ret; va_list args; va_start(args, format); ret = perf_eops->error(format, args); va_end(args); return ret; } int ui__warning(const char *format, ...) { int ret; va_list args; va_start(args, format); ret = perf_eops->warning(format, args); va_end(args); return ret; } /** * perf_error__register - Register error logging functions * @eops: The pointer to error logging function struct * * Register UI-specific error logging functions. Before calling this, * other logging functions should be unregistered, if any. */ int perf_error__register(struct perf_error_ops *eops) { if (perf_eops != &default_eops) return -1; perf_eops = eops; return 0; } /** * perf_error__unregister - Unregister error logging functions * @eops: The pointer to error logging function struct * * Unregister already registered error logging functions. */ int perf_error__unregister(struct perf_error_ops *eops) { if (perf_eops != eops) return -1; perf_eops = &default_eops; return 0; }
gpl-2.0
nbr11/android_kernel_lge_hammerhead
drivers/media/video/bt8xx/bttv-driver.c
4776
120295
/* bttv - Bt848 frame grabber driver Copyright (C) 1996,97,98 Ralph Metzler <rjkm@thp.uni-koeln.de> & Marcus Metzler <mocm@thp.uni-koeln.de> (c) 1999-2002 Gerd Knorr <kraxel@bytesex.org> some v4l2 code lines are taken from Justin's bttv2 driver which is (c) 2000 Justin Schoeman <justin@suntiger.ee.up.ac.za> V4L1 removal from: (c) 2005-2006 Nickolay V. Shmyrev <nshmyrev@yandex.ru> Fixes to be fully V4L2 compliant by (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org> Cropping and overscan support Copyright (C) 2005, 2006 Michael H. Schimek <mschimek@gmx.at> Sponsored by OPQ Systems AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/kdev_t.h> #include "bttvp.h" #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/tvaudio.h> #include <media/msp3400.h> #include <linux/dma-mapping.h> #include <asm/io.h> #include <asm/byteorder.h> #include <media/saa6588.h> #define BTTV_VERSION "0.9.19" unsigned int bttv_num; /* number of Bt848s in use */ struct bttv *bttvs[BTTV_MAX]; unsigned int bttv_debug; unsigned int bttv_verbose = 1; unsigned int bttv_gpio; /* config variables */ #ifdef __BIG_ENDIAN static unsigned int bigendian=1; #else static unsigned int bigendian; #endif static unsigned int radio[BTTV_MAX]; static unsigned int irq_debug; static unsigned int gbuffers = 8; static unsigned int gbufsize = 0x208000; static unsigned int reset_crop = 1; static int video_nr[BTTV_MAX] = { [0 ... (BTTV_MAX-1)] = -1 }; static int radio_nr[BTTV_MAX] = { [0 ... (BTTV_MAX-1)] = -1 }; static int vbi_nr[BTTV_MAX] = { [0 ... (BTTV_MAX-1)] = -1 }; static int debug_latency; static int disable_ir; static unsigned int fdsr; /* options */ static unsigned int combfilter; static unsigned int lumafilter; static unsigned int automute = 1; static unsigned int chroma_agc; static unsigned int adc_crush = 1; static unsigned int whitecrush_upper = 0xCF; static unsigned int whitecrush_lower = 0x7F; static unsigned int vcr_hack; static unsigned int irq_iswitch; static unsigned int uv_ratio = 50; static unsigned int full_luma_range; static unsigned int coring; /* API features (turn on/off stuff for testing) */ static unsigned int v4l2 = 1; /* insmod args */ module_param(bttv_verbose, int, 0644); module_param(bttv_gpio, int, 0644); module_param(bttv_debug, int, 0644); module_param(irq_debug, int, 0644); module_param(debug_latency, int, 0644); module_param(disable_ir, int, 0444); module_param(fdsr, int, 0444); module_param(gbuffers, int, 0444); module_param(gbufsize, int, 0444); module_param(reset_crop, int, 0444); module_param(v4l2, int, 0644); module_param(bigendian, int, 0644); module_param(irq_iswitch, int, 0644); module_param(combfilter, int, 0444); module_param(lumafilter, int, 0444); module_param(automute, int, 0444); module_param(chroma_agc, int, 0444); module_param(adc_crush, int, 0444); module_param(whitecrush_upper, int, 0444); module_param(whitecrush_lower, int, 0444); module_param(vcr_hack, int, 0444); module_param(uv_ratio, int, 0444); module_param(full_luma_range, int, 0444); module_param(coring, int, 0444); module_param_array(radio, int, NULL, 0444); module_param_array(video_nr, int, NULL, 0444); module_param_array(radio_nr, int, NULL, 0444); module_param_array(vbi_nr, int, NULL, 0444); MODULE_PARM_DESC(radio,"The TV card supports radio, default is 0 (no)"); MODULE_PARM_DESC(bigendian,"byte order of the framebuffer, default is native endian"); MODULE_PARM_DESC(bttv_verbose,"verbose startup messages, default is 1 (yes)"); MODULE_PARM_DESC(bttv_gpio,"log gpio changes, default is 0 (no)"); MODULE_PARM_DESC(bttv_debug,"debug messages, default is 0 (no)"); MODULE_PARM_DESC(irq_debug,"irq handler debug messages, default is 0 (no)"); MODULE_PARM_DESC(disable_ir, "disable infrared remote support"); MODULE_PARM_DESC(gbuffers,"number of capture buffers. range 2-32, default 8"); MODULE_PARM_DESC(gbufsize,"size of the capture buffers, default is 0x208000"); MODULE_PARM_DESC(reset_crop,"reset cropping parameters at open(), default " "is 1 (yes) for compatibility with older applications"); MODULE_PARM_DESC(automute,"mute audio on bad/missing video signal, default is 1 (yes)"); MODULE_PARM_DESC(chroma_agc,"enables the AGC of chroma signal, default is 0 (no)"); MODULE_PARM_DESC(adc_crush,"enables the luminance ADC crush, default is 1 (yes)"); MODULE_PARM_DESC(whitecrush_upper,"sets the white crush upper value, default is 207"); MODULE_PARM_DESC(whitecrush_lower,"sets the white crush lower value, default is 127"); MODULE_PARM_DESC(vcr_hack,"enables the VCR hack (improves synch on poor VCR tapes), default is 0 (no)"); MODULE_PARM_DESC(irq_iswitch,"switch inputs in irq handler"); MODULE_PARM_DESC(uv_ratio,"ratio between u and v gains, default is 50"); MODULE_PARM_DESC(full_luma_range,"use the full luma range, default is 0 (no)"); MODULE_PARM_DESC(coring,"set the luma coring level, default is 0 (no)"); MODULE_PARM_DESC(video_nr, "video device numbers"); MODULE_PARM_DESC(vbi_nr, "vbi device numbers"); MODULE_PARM_DESC(radio_nr, "radio device numbers"); MODULE_DESCRIPTION("bttv - v4l/v4l2 driver module for bt848/878 based cards"); MODULE_AUTHOR("Ralph Metzler & Marcus Metzler & Gerd Knorr"); MODULE_LICENSE("GPL"); MODULE_VERSION(BTTV_VERSION); /* ----------------------------------------------------------------------- */ /* sysfs */ static ssize_t show_card(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vfd = container_of(cd, struct video_device, dev); struct bttv *btv = video_get_drvdata(vfd); return sprintf(buf, "%d\n", btv ? btv->c.type : UNSET); } static DEVICE_ATTR(card, S_IRUGO, show_card, NULL); /* ----------------------------------------------------------------------- */ /* dvb auto-load setup */ #if defined(CONFIG_MODULES) && defined(MODULE) static void request_module_async(struct work_struct *work) { request_module("dvb-bt8xx"); } static void request_modules(struct bttv *dev) { INIT_WORK(&dev->request_module_wk, request_module_async); schedule_work(&dev->request_module_wk); } static void flush_request_modules(struct bttv *dev) { flush_work_sync(&dev->request_module_wk); } #else #define request_modules(dev) #define flush_request_modules(dev) #endif /* CONFIG_MODULES */ /* ----------------------------------------------------------------------- */ /* static data */ /* special timing tables from conexant... */ static u8 SRAM_Table[][60] = { /* PAL digital input over GPIO[7:0] */ { 45, // 45 bytes following 0x36,0x11,0x01,0x00,0x90,0x02,0x05,0x10,0x04,0x16, 0x12,0x05,0x11,0x00,0x04,0x12,0xC0,0x00,0x31,0x00, 0x06,0x51,0x08,0x03,0x89,0x08,0x07,0xC0,0x44,0x00, 0x81,0x01,0x01,0xA9,0x0D,0x02,0x02,0x50,0x03,0x37, 0x37,0x00,0xAF,0x21,0x00 }, /* NTSC digital input over GPIO[7:0] */ { 51, // 51 bytes following 0x0C,0xC0,0x00,0x00,0x90,0x02,0x03,0x10,0x03,0x06, 0x10,0x04,0x12,0x12,0x05,0x02,0x13,0x04,0x19,0x00, 0x04,0x39,0x00,0x06,0x59,0x08,0x03,0x83,0x08,0x07, 0x03,0x50,0x00,0xC0,0x40,0x00,0x86,0x01,0x01,0xA6, 0x0D,0x02,0x03,0x11,0x01,0x05,0x37,0x00,0xAC,0x21, 0x00, }, // TGB_NTSC392 // quartzsight // This table has been modified to be used for Fusion Rev D { 0x2A, // size of table = 42 0x06, 0x08, 0x04, 0x0a, 0xc0, 0x00, 0x18, 0x08, 0x03, 0x24, 0x08, 0x07, 0x02, 0x90, 0x02, 0x08, 0x10, 0x04, 0x0c, 0x10, 0x05, 0x2c, 0x11, 0x04, 0x55, 0x48, 0x00, 0x05, 0x50, 0x00, 0xbf, 0x0c, 0x02, 0x2f, 0x3d, 0x00, 0x2f, 0x3f, 0x00, 0xc3, 0x20, 0x00 } }; /* minhdelayx1 first video pixel we can capture on a line and hdelayx1 start of active video, both relative to rising edge of /HRESET pulse (0H) in 1 / fCLKx1. swidth width of active video and totalwidth total line width, both in 1 / fCLKx1. sqwidth total line width in square pixels. vdelay start of active video in 2 * field lines relative to trailing edge of /VRESET pulse (VDELAY register). sheight height of active video in 2 * field lines. videostart0 ITU-R frame line number of the line corresponding to vdelay in the first field. */ #define CROPCAP(minhdelayx1, hdelayx1, swidth, totalwidth, sqwidth, \ vdelay, sheight, videostart0) \ .cropcap.bounds.left = minhdelayx1, \ /* * 2 because vertically we count field lines times two, */ \ /* e.g. 23 * 2 to 23 * 2 + 576 in PAL-BGHI defrect. */ \ .cropcap.bounds.top = (videostart0) * 2 - (vdelay) + MIN_VDELAY, \ /* 4 is a safety margin at the end of the line. */ \ .cropcap.bounds.width = (totalwidth) - (minhdelayx1) - 4, \ .cropcap.bounds.height = (sheight) + (vdelay) - MIN_VDELAY, \ .cropcap.defrect.left = hdelayx1, \ .cropcap.defrect.top = (videostart0) * 2, \ .cropcap.defrect.width = swidth, \ .cropcap.defrect.height = sheight, \ .cropcap.pixelaspect.numerator = totalwidth, \ .cropcap.pixelaspect.denominator = sqwidth, const struct bttv_tvnorm bttv_tvnorms[] = { /* PAL-BDGHI */ /* max. active video is actually 922, but 924 is divisible by 4 and 3! */ /* actually, max active PAL with HSCALE=0 is 948, NTSC is 768 - nil */ { .v4l2_id = V4L2_STD_PAL, .name = "PAL", .Fsc = 35468950, .swidth = 924, .sheight = 576, .totalwidth = 1135, .adelay = 0x7f, .bdelay = 0x72, .iform = (BT848_IFORM_PAL_BDGHI|BT848_IFORM_XT1), .scaledtwidth = 1135, .hdelayx1 = 186, .hactivex1 = 924, .vdelay = 0x20, .vbipack = 255, /* min (2048 / 4, 0x1ff) & 0xff */ .sram = 0, /* ITU-R frame line number of the first VBI line we can capture, of the first and second field. The last line is determined by cropcap.bounds. */ .vbistart = { 7, 320 }, CROPCAP(/* minhdelayx1 */ 68, /* hdelayx1 */ 186, /* Should be (768 * 1135 + 944 / 2) / 944. cropcap.defrect is used for image width checks, so we keep the old value 924. */ /* swidth */ 924, /* totalwidth */ 1135, /* sqwidth */ 944, /* vdelay */ 0x20, /* sheight */ 576, /* videostart0 */ 23) /* bt878 (and bt848?) can capture another line below active video. */ .cropcap.bounds.height = (576 + 2) + 0x20 - 2, },{ .v4l2_id = V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_KR, .name = "NTSC", .Fsc = 28636363, .swidth = 768, .sheight = 480, .totalwidth = 910, .adelay = 0x68, .bdelay = 0x5d, .iform = (BT848_IFORM_NTSC|BT848_IFORM_XT0), .scaledtwidth = 910, .hdelayx1 = 128, .hactivex1 = 910, .vdelay = 0x1a, .vbipack = 144, /* min (1600 / 4, 0x1ff) & 0xff */ .sram = 1, .vbistart = { 10, 273 }, CROPCAP(/* minhdelayx1 */ 68, /* hdelayx1 */ 128, /* Should be (640 * 910 + 780 / 2) / 780? */ /* swidth */ 768, /* totalwidth */ 910, /* sqwidth */ 780, /* vdelay */ 0x1a, /* sheight */ 480, /* videostart0 */ 23) },{ .v4l2_id = V4L2_STD_SECAM, .name = "SECAM", .Fsc = 35468950, .swidth = 924, .sheight = 576, .totalwidth = 1135, .adelay = 0x7f, .bdelay = 0xb0, .iform = (BT848_IFORM_SECAM|BT848_IFORM_XT1), .scaledtwidth = 1135, .hdelayx1 = 186, .hactivex1 = 922, .vdelay = 0x20, .vbipack = 255, .sram = 0, /* like PAL, correct? */ .vbistart = { 7, 320 }, CROPCAP(/* minhdelayx1 */ 68, /* hdelayx1 */ 186, /* swidth */ 924, /* totalwidth */ 1135, /* sqwidth */ 944, /* vdelay */ 0x20, /* sheight */ 576, /* videostart0 */ 23) },{ .v4l2_id = V4L2_STD_PAL_Nc, .name = "PAL-Nc", .Fsc = 28636363, .swidth = 640, .sheight = 576, .totalwidth = 910, .adelay = 0x68, .bdelay = 0x5d, .iform = (BT848_IFORM_PAL_NC|BT848_IFORM_XT0), .scaledtwidth = 780, .hdelayx1 = 130, .hactivex1 = 734, .vdelay = 0x1a, .vbipack = 144, .sram = -1, .vbistart = { 7, 320 }, CROPCAP(/* minhdelayx1 */ 68, /* hdelayx1 */ 130, /* swidth */ (640 * 910 + 780 / 2) / 780, /* totalwidth */ 910, /* sqwidth */ 780, /* vdelay */ 0x1a, /* sheight */ 576, /* videostart0 */ 23) },{ .v4l2_id = V4L2_STD_PAL_M, .name = "PAL-M", .Fsc = 28636363, .swidth = 640, .sheight = 480, .totalwidth = 910, .adelay = 0x68, .bdelay = 0x5d, .iform = (BT848_IFORM_PAL_M|BT848_IFORM_XT0), .scaledtwidth = 780, .hdelayx1 = 135, .hactivex1 = 754, .vdelay = 0x1a, .vbipack = 144, .sram = -1, .vbistart = { 10, 273 }, CROPCAP(/* minhdelayx1 */ 68, /* hdelayx1 */ 135, /* swidth */ (640 * 910 + 780 / 2) / 780, /* totalwidth */ 910, /* sqwidth */ 780, /* vdelay */ 0x1a, /* sheight */ 480, /* videostart0 */ 23) },{ .v4l2_id = V4L2_STD_PAL_N, .name = "PAL-N", .Fsc = 35468950, .swidth = 768, .sheight = 576, .totalwidth = 1135, .adelay = 0x7f, .bdelay = 0x72, .iform = (BT848_IFORM_PAL_N|BT848_IFORM_XT1), .scaledtwidth = 944, .hdelayx1 = 186, .hactivex1 = 922, .vdelay = 0x20, .vbipack = 144, .sram = -1, .vbistart = { 7, 320 }, CROPCAP(/* minhdelayx1 */ 68, /* hdelayx1 */ 186, /* swidth */ (768 * 1135 + 944 / 2) / 944, /* totalwidth */ 1135, /* sqwidth */ 944, /* vdelay */ 0x20, /* sheight */ 576, /* videostart0 */ 23) },{ .v4l2_id = V4L2_STD_NTSC_M_JP, .name = "NTSC-JP", .Fsc = 28636363, .swidth = 640, .sheight = 480, .totalwidth = 910, .adelay = 0x68, .bdelay = 0x5d, .iform = (BT848_IFORM_NTSC_J|BT848_IFORM_XT0), .scaledtwidth = 780, .hdelayx1 = 135, .hactivex1 = 754, .vdelay = 0x16, .vbipack = 144, .sram = -1, .vbistart = { 10, 273 }, CROPCAP(/* minhdelayx1 */ 68, /* hdelayx1 */ 135, /* swidth */ (640 * 910 + 780 / 2) / 780, /* totalwidth */ 910, /* sqwidth */ 780, /* vdelay */ 0x16, /* sheight */ 480, /* videostart0 */ 23) },{ /* that one hopefully works with the strange timing * which video recorders produce when playing a NTSC * tape on a PAL TV ... */ .v4l2_id = V4L2_STD_PAL_60, .name = "PAL-60", .Fsc = 35468950, .swidth = 924, .sheight = 480, .totalwidth = 1135, .adelay = 0x7f, .bdelay = 0x72, .iform = (BT848_IFORM_PAL_BDGHI|BT848_IFORM_XT1), .scaledtwidth = 1135, .hdelayx1 = 186, .hactivex1 = 924, .vdelay = 0x1a, .vbipack = 255, .vtotal = 524, .sram = -1, .vbistart = { 10, 273 }, CROPCAP(/* minhdelayx1 */ 68, /* hdelayx1 */ 186, /* swidth */ 924, /* totalwidth */ 1135, /* sqwidth */ 944, /* vdelay */ 0x1a, /* sheight */ 480, /* videostart0 */ 23) } }; static const unsigned int BTTV_TVNORMS = ARRAY_SIZE(bttv_tvnorms); /* ----------------------------------------------------------------------- */ /* bttv format list packed pixel formats must come first */ static const struct bttv_format formats[] = { { .name = "8 bpp, gray", .fourcc = V4L2_PIX_FMT_GREY, .btformat = BT848_COLOR_FMT_Y8, .depth = 8, .flags = FORMAT_FLAGS_PACKED, },{ .name = "8 bpp, dithered color", .fourcc = V4L2_PIX_FMT_HI240, .btformat = BT848_COLOR_FMT_RGB8, .depth = 8, .flags = FORMAT_FLAGS_PACKED | FORMAT_FLAGS_DITHER, },{ .name = "15 bpp RGB, le", .fourcc = V4L2_PIX_FMT_RGB555, .btformat = BT848_COLOR_FMT_RGB15, .depth = 16, .flags = FORMAT_FLAGS_PACKED, },{ .name = "15 bpp RGB, be", .fourcc = V4L2_PIX_FMT_RGB555X, .btformat = BT848_COLOR_FMT_RGB15, .btswap = 0x03, /* byteswap */ .depth = 16, .flags = FORMAT_FLAGS_PACKED, },{ .name = "16 bpp RGB, le", .fourcc = V4L2_PIX_FMT_RGB565, .btformat = BT848_COLOR_FMT_RGB16, .depth = 16, .flags = FORMAT_FLAGS_PACKED, },{ .name = "16 bpp RGB, be", .fourcc = V4L2_PIX_FMT_RGB565X, .btformat = BT848_COLOR_FMT_RGB16, .btswap = 0x03, /* byteswap */ .depth = 16, .flags = FORMAT_FLAGS_PACKED, },{ .name = "24 bpp RGB, le", .fourcc = V4L2_PIX_FMT_BGR24, .btformat = BT848_COLOR_FMT_RGB24, .depth = 24, .flags = FORMAT_FLAGS_PACKED, },{ .name = "32 bpp RGB, le", .fourcc = V4L2_PIX_FMT_BGR32, .btformat = BT848_COLOR_FMT_RGB32, .depth = 32, .flags = FORMAT_FLAGS_PACKED, },{ .name = "32 bpp RGB, be", .fourcc = V4L2_PIX_FMT_RGB32, .btformat = BT848_COLOR_FMT_RGB32, .btswap = 0x0f, /* byte+word swap */ .depth = 32, .flags = FORMAT_FLAGS_PACKED, },{ .name = "4:2:2, packed, YUYV", .fourcc = V4L2_PIX_FMT_YUYV, .btformat = BT848_COLOR_FMT_YUY2, .depth = 16, .flags = FORMAT_FLAGS_PACKED, },{ .name = "4:2:2, packed, YUYV", .fourcc = V4L2_PIX_FMT_YUYV, .btformat = BT848_COLOR_FMT_YUY2, .depth = 16, .flags = FORMAT_FLAGS_PACKED, },{ .name = "4:2:2, packed, UYVY", .fourcc = V4L2_PIX_FMT_UYVY, .btformat = BT848_COLOR_FMT_YUY2, .btswap = 0x03, /* byteswap */ .depth = 16, .flags = FORMAT_FLAGS_PACKED, },{ .name = "4:2:2, planar, Y-Cb-Cr", .fourcc = V4L2_PIX_FMT_YUV422P, .btformat = BT848_COLOR_FMT_YCrCb422, .depth = 16, .flags = FORMAT_FLAGS_PLANAR, .hshift = 1, .vshift = 0, },{ .name = "4:2:0, planar, Y-Cb-Cr", .fourcc = V4L2_PIX_FMT_YUV420, .btformat = BT848_COLOR_FMT_YCrCb422, .depth = 12, .flags = FORMAT_FLAGS_PLANAR, .hshift = 1, .vshift = 1, },{ .name = "4:2:0, planar, Y-Cr-Cb", .fourcc = V4L2_PIX_FMT_YVU420, .btformat = BT848_COLOR_FMT_YCrCb422, .depth = 12, .flags = FORMAT_FLAGS_PLANAR | FORMAT_FLAGS_CrCb, .hshift = 1, .vshift = 1, },{ .name = "4:1:1, planar, Y-Cb-Cr", .fourcc = V4L2_PIX_FMT_YUV411P, .btformat = BT848_COLOR_FMT_YCrCb411, .depth = 12, .flags = FORMAT_FLAGS_PLANAR, .hshift = 2, .vshift = 0, },{ .name = "4:1:0, planar, Y-Cb-Cr", .fourcc = V4L2_PIX_FMT_YUV410, .btformat = BT848_COLOR_FMT_YCrCb411, .depth = 9, .flags = FORMAT_FLAGS_PLANAR, .hshift = 2, .vshift = 2, },{ .name = "4:1:0, planar, Y-Cr-Cb", .fourcc = V4L2_PIX_FMT_YVU410, .btformat = BT848_COLOR_FMT_YCrCb411, .depth = 9, .flags = FORMAT_FLAGS_PLANAR | FORMAT_FLAGS_CrCb, .hshift = 2, .vshift = 2, },{ .name = "raw scanlines", .fourcc = -1, .btformat = BT848_COLOR_FMT_RAW, .depth = 8, .flags = FORMAT_FLAGS_RAW, } }; static const unsigned int FORMATS = ARRAY_SIZE(formats); /* ----------------------------------------------------------------------- */ #define V4L2_CID_PRIVATE_CHROMA_AGC (V4L2_CID_PRIVATE_BASE + 0) #define V4L2_CID_PRIVATE_COMBFILTER (V4L2_CID_PRIVATE_BASE + 1) #define V4L2_CID_PRIVATE_AUTOMUTE (V4L2_CID_PRIVATE_BASE + 2) #define V4L2_CID_PRIVATE_LUMAFILTER (V4L2_CID_PRIVATE_BASE + 3) #define V4L2_CID_PRIVATE_AGC_CRUSH (V4L2_CID_PRIVATE_BASE + 4) #define V4L2_CID_PRIVATE_VCR_HACK (V4L2_CID_PRIVATE_BASE + 5) #define V4L2_CID_PRIVATE_WHITECRUSH_UPPER (V4L2_CID_PRIVATE_BASE + 6) #define V4L2_CID_PRIVATE_WHITECRUSH_LOWER (V4L2_CID_PRIVATE_BASE + 7) #define V4L2_CID_PRIVATE_UV_RATIO (V4L2_CID_PRIVATE_BASE + 8) #define V4L2_CID_PRIVATE_FULL_LUMA_RANGE (V4L2_CID_PRIVATE_BASE + 9) #define V4L2_CID_PRIVATE_CORING (V4L2_CID_PRIVATE_BASE + 10) #define V4L2_CID_PRIVATE_LASTP1 (V4L2_CID_PRIVATE_BASE + 11) static const struct v4l2_queryctrl no_ctl = { .name = "42", .flags = V4L2_CTRL_FLAG_DISABLED, }; static const struct v4l2_queryctrl bttv_ctls[] = { /* --- video --- */ { .id = V4L2_CID_BRIGHTNESS, .name = "Brightness", .minimum = 0, .maximum = 65535, .step = 256, .default_value = 32768, .type = V4L2_CTRL_TYPE_INTEGER, },{ .id = V4L2_CID_CONTRAST, .name = "Contrast", .minimum = 0, .maximum = 65535, .step = 128, .default_value = 32768, .type = V4L2_CTRL_TYPE_INTEGER, },{ .id = V4L2_CID_SATURATION, .name = "Saturation", .minimum = 0, .maximum = 65535, .step = 128, .default_value = 32768, .type = V4L2_CTRL_TYPE_INTEGER, },{ .id = V4L2_CID_HUE, .name = "Hue", .minimum = 0, .maximum = 65535, .step = 256, .default_value = 32768, .type = V4L2_CTRL_TYPE_INTEGER, }, /* --- audio --- */ { .id = V4L2_CID_AUDIO_MUTE, .name = "Mute", .minimum = 0, .maximum = 1, .type = V4L2_CTRL_TYPE_BOOLEAN, },{ .id = V4L2_CID_AUDIO_VOLUME, .name = "Volume", .minimum = 0, .maximum = 65535, .step = 65535/100, .default_value = 65535, .type = V4L2_CTRL_TYPE_INTEGER, },{ .id = V4L2_CID_AUDIO_BALANCE, .name = "Balance", .minimum = 0, .maximum = 65535, .step = 65535/100, .default_value = 32768, .type = V4L2_CTRL_TYPE_INTEGER, },{ .id = V4L2_CID_AUDIO_BASS, .name = "Bass", .minimum = 0, .maximum = 65535, .step = 65535/100, .default_value = 32768, .type = V4L2_CTRL_TYPE_INTEGER, },{ .id = V4L2_CID_AUDIO_TREBLE, .name = "Treble", .minimum = 0, .maximum = 65535, .step = 65535/100, .default_value = 32768, .type = V4L2_CTRL_TYPE_INTEGER, }, /* --- private --- */ { .id = V4L2_CID_PRIVATE_CHROMA_AGC, .name = "chroma agc", .minimum = 0, .maximum = 1, .type = V4L2_CTRL_TYPE_BOOLEAN, },{ .id = V4L2_CID_PRIVATE_COMBFILTER, .name = "combfilter", .minimum = 0, .maximum = 1, .type = V4L2_CTRL_TYPE_BOOLEAN, },{ .id = V4L2_CID_PRIVATE_AUTOMUTE, .name = "automute", .minimum = 0, .maximum = 1, .type = V4L2_CTRL_TYPE_BOOLEAN, },{ .id = V4L2_CID_PRIVATE_LUMAFILTER, .name = "luma decimation filter", .minimum = 0, .maximum = 1, .type = V4L2_CTRL_TYPE_BOOLEAN, },{ .id = V4L2_CID_PRIVATE_AGC_CRUSH, .name = "agc crush", .minimum = 0, .maximum = 1, .type = V4L2_CTRL_TYPE_BOOLEAN, },{ .id = V4L2_CID_PRIVATE_VCR_HACK, .name = "vcr hack", .minimum = 0, .maximum = 1, .type = V4L2_CTRL_TYPE_BOOLEAN, },{ .id = V4L2_CID_PRIVATE_WHITECRUSH_UPPER, .name = "whitecrush upper", .minimum = 0, .maximum = 255, .step = 1, .default_value = 0xCF, .type = V4L2_CTRL_TYPE_INTEGER, },{ .id = V4L2_CID_PRIVATE_WHITECRUSH_LOWER, .name = "whitecrush lower", .minimum = 0, .maximum = 255, .step = 1, .default_value = 0x7F, .type = V4L2_CTRL_TYPE_INTEGER, },{ .id = V4L2_CID_PRIVATE_UV_RATIO, .name = "uv ratio", .minimum = 0, .maximum = 100, .step = 1, .default_value = 50, .type = V4L2_CTRL_TYPE_INTEGER, },{ .id = V4L2_CID_PRIVATE_FULL_LUMA_RANGE, .name = "full luma range", .minimum = 0, .maximum = 1, .type = V4L2_CTRL_TYPE_BOOLEAN, },{ .id = V4L2_CID_PRIVATE_CORING, .name = "coring", .minimum = 0, .maximum = 3, .step = 1, .default_value = 0, .type = V4L2_CTRL_TYPE_INTEGER, } }; static const struct v4l2_queryctrl *ctrl_by_id(int id) { int i; for (i = 0; i < ARRAY_SIZE(bttv_ctls); i++) if (bttv_ctls[i].id == id) return bttv_ctls+i; return NULL; } /* ----------------------------------------------------------------------- */ /* resource management */ /* RESOURCE_ allocated by freed by VIDEO_READ bttv_read 1) bttv_read 2) VIDEO_STREAM VIDIOC_STREAMON VIDIOC_STREAMOFF VIDIOC_QBUF 1) bttv_release VIDIOCMCAPTURE 1) OVERLAY VIDIOCCAPTURE on VIDIOCCAPTURE off VIDIOC_OVERLAY on VIDIOC_OVERLAY off 3) bttv_release VBI VIDIOC_STREAMON VIDIOC_STREAMOFF VIDIOC_QBUF 1) bttv_release bttv_read, bttv_poll 1) 4) 1) The resource must be allocated when we enter buffer prepare functions and remain allocated while buffers are in the DMA queue. 2) This is a single frame read. 3) VIDIOC_S_FBUF and VIDIOC_S_FMT (OVERLAY) still work when RESOURCE_OVERLAY is allocated. 4) This is a continuous read, implies VIDIOC_STREAMON. Note this driver permits video input and standard changes regardless if resources are allocated. */ #define VBI_RESOURCES (RESOURCE_VBI) #define VIDEO_RESOURCES (RESOURCE_VIDEO_READ | \ RESOURCE_VIDEO_STREAM | \ RESOURCE_OVERLAY) static int check_alloc_btres_lock(struct bttv *btv, struct bttv_fh *fh, int bit) { int xbits; /* mutual exclusive resources */ if (fh->resources & bit) /* have it already allocated */ return 1; xbits = bit; if (bit & (RESOURCE_VIDEO_READ | RESOURCE_VIDEO_STREAM)) xbits |= RESOURCE_VIDEO_READ | RESOURCE_VIDEO_STREAM; /* is it free? */ if (btv->resources & xbits) { /* no, someone else uses it */ goto fail; } if ((bit & VIDEO_RESOURCES) && 0 == (btv->resources & VIDEO_RESOURCES)) { /* Do crop - use current, don't - use default parameters. */ __s32 top = btv->crop[!!fh->do_crop].rect.top; if (btv->vbi_end > top) goto fail; /* We cannot capture the same line as video and VBI data. Claim scan lines crop[].rect.top to bottom. */ btv->crop_start = top; } else if (bit & VBI_RESOURCES) { __s32 end = fh->vbi_fmt.end; if (end > btv->crop_start) goto fail; /* Claim scan lines above fh->vbi_fmt.end. */ btv->vbi_end = end; } /* it's free, grab it */ fh->resources |= bit; btv->resources |= bit; return 1; fail: return 0; } static int check_btres(struct bttv_fh *fh, int bit) { return (fh->resources & bit); } static int locked_btres(struct bttv *btv, int bit) { return (btv->resources & bit); } /* Call with btv->lock down. */ static void disclaim_vbi_lines(struct bttv *btv) { btv->vbi_end = 0; } /* Call with btv->lock down. */ static void disclaim_video_lines(struct bttv *btv) { const struct bttv_tvnorm *tvnorm; u8 crop; tvnorm = &bttv_tvnorms[btv->tvnorm]; btv->crop_start = tvnorm->cropcap.bounds.top + tvnorm->cropcap.bounds.height; /* VBI capturing ends at VDELAY, start of video capturing, no matter how many lines the VBI RISC program expects. When video capturing is off, it shall no longer "preempt" VBI capturing, so we set VDELAY to maximum. */ crop = btread(BT848_E_CROP) | 0xc0; btwrite(crop, BT848_E_CROP); btwrite(0xfe, BT848_E_VDELAY_LO); btwrite(crop, BT848_O_CROP); btwrite(0xfe, BT848_O_VDELAY_LO); } static void free_btres_lock(struct bttv *btv, struct bttv_fh *fh, int bits) { if ((fh->resources & bits) != bits) { /* trying to free resources not allocated by us ... */ pr_err("BUG! (btres)\n"); } fh->resources &= ~bits; btv->resources &= ~bits; bits = btv->resources; if (0 == (bits & VIDEO_RESOURCES)) disclaim_video_lines(btv); if (0 == (bits & VBI_RESOURCES)) disclaim_vbi_lines(btv); } /* ----------------------------------------------------------------------- */ /* If Bt848a or Bt849, use PLL for PAL/SECAM and crystal for NTSC */ /* Frequency = (F_input / PLL_X) * PLL_I.PLL_F/PLL_C PLL_X = Reference pre-divider (0=1, 1=2) PLL_C = Post divider (0=6, 1=4) PLL_I = Integer input PLL_F = Fractional input F_input = 28.636363 MHz: PAL (CLKx2 = 35.46895 MHz): PLL_X = 1, PLL_I = 0x0E, PLL_F = 0xDCF9, PLL_C = 0 */ static void set_pll_freq(struct bttv *btv, unsigned int fin, unsigned int fout) { unsigned char fl, fh, fi; /* prevent overflows */ fin/=4; fout/=4; fout*=12; fi=fout/fin; fout=(fout%fin)*256; fh=fout/fin; fout=(fout%fin)*256; fl=fout/fin; btwrite(fl, BT848_PLL_F_LO); btwrite(fh, BT848_PLL_F_HI); btwrite(fi|BT848_PLL_X, BT848_PLL_XCI); } static void set_pll(struct bttv *btv) { int i; if (!btv->pll.pll_crystal) return; if (btv->pll.pll_ofreq == btv->pll.pll_current) { dprintk("%d: PLL: no change required\n", btv->c.nr); return; } if (btv->pll.pll_ifreq == btv->pll.pll_ofreq) { /* no PLL needed */ if (btv->pll.pll_current == 0) return; if (bttv_verbose) pr_info("%d: PLL can sleep, using XTAL (%d)\n", btv->c.nr, btv->pll.pll_ifreq); btwrite(0x00,BT848_TGCTRL); btwrite(0x00,BT848_PLL_XCI); btv->pll.pll_current = 0; return; } if (bttv_verbose) pr_info("%d: Setting PLL: %d => %d (needs up to 100ms)\n", btv->c.nr, btv->pll.pll_ifreq, btv->pll.pll_ofreq); set_pll_freq(btv, btv->pll.pll_ifreq, btv->pll.pll_ofreq); for (i=0; i<10; i++) { /* Let other people run while the PLL stabilizes */ msleep(10); if (btread(BT848_DSTATUS) & BT848_DSTATUS_PLOCK) { btwrite(0,BT848_DSTATUS); } else { btwrite(0x08,BT848_TGCTRL); btv->pll.pll_current = btv->pll.pll_ofreq; if (bttv_verbose) pr_info("PLL set ok\n"); return; } } btv->pll.pll_current = -1; if (bttv_verbose) pr_info("Setting PLL failed\n"); return; } /* used to switch between the bt848's analog/digital video capture modes */ static void bt848A_set_timing(struct bttv *btv) { int i, len; int table_idx = bttv_tvnorms[btv->tvnorm].sram; int fsc = bttv_tvnorms[btv->tvnorm].Fsc; if (btv->input == btv->dig) { dprintk("%d: load digital timing table (table_idx=%d)\n", btv->c.nr,table_idx); /* timing change...reset timing generator address */ btwrite(0x00, BT848_TGCTRL); btwrite(0x02, BT848_TGCTRL); btwrite(0x00, BT848_TGCTRL); len=SRAM_Table[table_idx][0]; for(i = 1; i <= len; i++) btwrite(SRAM_Table[table_idx][i],BT848_TGLB); btv->pll.pll_ofreq = 27000000; set_pll(btv); btwrite(0x11, BT848_TGCTRL); btwrite(0x41, BT848_DVSIF); } else { btv->pll.pll_ofreq = fsc; set_pll(btv); btwrite(0x0, BT848_DVSIF); } } /* ----------------------------------------------------------------------- */ static void bt848_bright(struct bttv *btv, int bright) { int value; // printk("set bright: %d\n", bright); // DEBUG btv->bright = bright; /* We want -128 to 127 we get 0-65535 */ value = (bright >> 8) - 128; btwrite(value & 0xff, BT848_BRIGHT); } static void bt848_hue(struct bttv *btv, int hue) { int value; btv->hue = hue; /* -128 to 127 */ value = (hue >> 8) - 128; btwrite(value & 0xff, BT848_HUE); } static void bt848_contrast(struct bttv *btv, int cont) { int value,hibit; btv->contrast = cont; /* 0-511 */ value = (cont >> 7); hibit = (value >> 6) & 4; btwrite(value & 0xff, BT848_CONTRAST_LO); btaor(hibit, ~4, BT848_E_CONTROL); btaor(hibit, ~4, BT848_O_CONTROL); } static void bt848_sat(struct bttv *btv, int color) { int val_u,val_v,hibits; btv->saturation = color; /* 0-511 for the color */ val_u = ((color * btv->opt_uv_ratio) / 50) >> 7; val_v = (((color * (100 - btv->opt_uv_ratio) / 50) >>7)*180L)/254; hibits = (val_u >> 7) & 2; hibits |= (val_v >> 8) & 1; btwrite(val_u & 0xff, BT848_SAT_U_LO); btwrite(val_v & 0xff, BT848_SAT_V_LO); btaor(hibits, ~3, BT848_E_CONTROL); btaor(hibits, ~3, BT848_O_CONTROL); } /* ----------------------------------------------------------------------- */ static int video_mux(struct bttv *btv, unsigned int input) { int mux,mask2; if (input >= bttv_tvcards[btv->c.type].video_inputs) return -EINVAL; /* needed by RemoteVideo MX */ mask2 = bttv_tvcards[btv->c.type].gpiomask2; if (mask2) gpio_inout(mask2,mask2); if (input == btv->svhs) { btor(BT848_CONTROL_COMP, BT848_E_CONTROL); btor(BT848_CONTROL_COMP, BT848_O_CONTROL); } else { btand(~BT848_CONTROL_COMP, BT848_E_CONTROL); btand(~BT848_CONTROL_COMP, BT848_O_CONTROL); } mux = bttv_muxsel(btv, input); btaor(mux<<5, ~(3<<5), BT848_IFORM); dprintk("%d: video mux: input=%d mux=%d\n", btv->c.nr, input, mux); /* card specific hook */ if(bttv_tvcards[btv->c.type].muxsel_hook) bttv_tvcards[btv->c.type].muxsel_hook (btv, input); return 0; } static char *audio_modes[] = { "audio: tuner", "audio: radio", "audio: extern", "audio: intern", "audio: mute" }; static int audio_mux(struct bttv *btv, int input, int mute) { int gpio_val, signal; struct v4l2_control ctrl; gpio_inout(bttv_tvcards[btv->c.type].gpiomask, bttv_tvcards[btv->c.type].gpiomask); signal = btread(BT848_DSTATUS) & BT848_DSTATUS_HLOC; btv->mute = mute; btv->audio = input; /* automute */ mute = mute || (btv->opt_automute && !signal && !btv->radio_user); if (mute) gpio_val = bttv_tvcards[btv->c.type].gpiomute; else gpio_val = bttv_tvcards[btv->c.type].gpiomux[input]; switch (btv->c.type) { case BTTV_BOARD_VOODOOTV_FM: case BTTV_BOARD_VOODOOTV_200: gpio_val = bttv_tda9880_setnorm(btv, gpio_val); break; default: gpio_bits(bttv_tvcards[btv->c.type].gpiomask, gpio_val); } if (bttv_gpio) bttv_gpio_tracking(btv, audio_modes[mute ? 4 : input]); if (in_interrupt()) return 0; ctrl.id = V4L2_CID_AUDIO_MUTE; ctrl.value = btv->mute; bttv_call_all(btv, core, s_ctrl, &ctrl); if (btv->sd_msp34xx) { u32 in; /* Note: the inputs tuner/radio/extern/intern are translated to msp routings. This assumes common behavior for all msp3400 based TV cards. When this assumption fails, then the specific MSP routing must be added to the card table. For now this is sufficient. */ switch (input) { case TVAUDIO_INPUT_RADIO: in = MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1, MSP_DSP_IN_SCART, MSP_DSP_IN_SCART); break; case TVAUDIO_INPUT_EXTERN: in = MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1, MSP_DSP_IN_SCART, MSP_DSP_IN_SCART); break; case TVAUDIO_INPUT_INTERN: /* Yes, this is the same input as for RADIO. I doubt if this is ever used. The only board with an INTERN input is the BTTV_BOARD_AVERMEDIA98. I wonder how that was tested. My guess is that the whole INTERN input does not work. */ in = MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1, MSP_DSP_IN_SCART, MSP_DSP_IN_SCART); break; case TVAUDIO_INPUT_TUNER: default: /* This is the only card that uses TUNER2, and afaik, is the only difference between the VOODOOTV_FM and VOODOOTV_200 */ if (btv->c.type == BTTV_BOARD_VOODOOTV_200) in = MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER2, \ MSP_DSP_IN_TUNER, MSP_DSP_IN_TUNER); else in = MSP_INPUT_DEFAULT; break; } v4l2_subdev_call(btv->sd_msp34xx, audio, s_routing, in, MSP_OUTPUT_DEFAULT, 0); } if (btv->sd_tvaudio) { v4l2_subdev_call(btv->sd_tvaudio, audio, s_routing, input, 0, 0); } return 0; } static inline int audio_mute(struct bttv *btv, int mute) { return audio_mux(btv, btv->audio, mute); } static inline int audio_input(struct bttv *btv, int input) { return audio_mux(btv, input, btv->mute); } static void bttv_crop_calc_limits(struct bttv_crop *c) { /* Scale factor min. 1:1, max. 16:1. Min. image size 48 x 32. Scaled width must be a multiple of 4. */ if (1) { /* For bug compatibility with VIDIOCGCAP and image size checks in earlier driver versions. */ c->min_scaled_width = 48; c->min_scaled_height = 32; } else { c->min_scaled_width = (max(48, c->rect.width >> 4) + 3) & ~3; c->min_scaled_height = max(32, c->rect.height >> 4); } c->max_scaled_width = c->rect.width & ~3; c->max_scaled_height = c->rect.height; } static void bttv_crop_reset(struct bttv_crop *c, unsigned int norm) { c->rect = bttv_tvnorms[norm].cropcap.defrect; bttv_crop_calc_limits(c); } /* Call with btv->lock down. */ static int set_tvnorm(struct bttv *btv, unsigned int norm) { const struct bttv_tvnorm *tvnorm; v4l2_std_id id; BUG_ON(norm >= BTTV_TVNORMS); BUG_ON(btv->tvnorm >= BTTV_TVNORMS); tvnorm = &bttv_tvnorms[norm]; if (memcmp(&bttv_tvnorms[btv->tvnorm].cropcap, &tvnorm->cropcap, sizeof (tvnorm->cropcap))) { bttv_crop_reset(&btv->crop[0], norm); btv->crop[1] = btv->crop[0]; /* current = default */ if (0 == (btv->resources & VIDEO_RESOURCES)) { btv->crop_start = tvnorm->cropcap.bounds.top + tvnorm->cropcap.bounds.height; } } btv->tvnorm = norm; btwrite(tvnorm->adelay, BT848_ADELAY); btwrite(tvnorm->bdelay, BT848_BDELAY); btaor(tvnorm->iform,~(BT848_IFORM_NORM|BT848_IFORM_XTBOTH), BT848_IFORM); btwrite(tvnorm->vbipack, BT848_VBI_PACK_SIZE); btwrite(1, BT848_VBI_PACK_DEL); bt848A_set_timing(btv); switch (btv->c.type) { case BTTV_BOARD_VOODOOTV_FM: case BTTV_BOARD_VOODOOTV_200: bttv_tda9880_setnorm(btv, gpio_read()); break; } id = tvnorm->v4l2_id; bttv_call_all(btv, core, s_std, id); return 0; } /* Call with btv->lock down. */ static void set_input(struct bttv *btv, unsigned int input, unsigned int norm) { unsigned long flags; btv->input = input; if (irq_iswitch) { spin_lock_irqsave(&btv->s_lock,flags); if (btv->curr.frame_irq) { /* active capture -> delayed input switch */ btv->new_input = input; } else { video_mux(btv,input); } spin_unlock_irqrestore(&btv->s_lock,flags); } else { video_mux(btv,input); } audio_input(btv, (btv->tuner_type != TUNER_ABSENT && input == 0) ? TVAUDIO_INPUT_TUNER : TVAUDIO_INPUT_EXTERN); set_tvnorm(btv, norm); } static void init_irqreg(struct bttv *btv) { /* clear status */ btwrite(0xfffffUL, BT848_INT_STAT); if (bttv_tvcards[btv->c.type].no_video) { /* i2c only */ btwrite(BT848_INT_I2CDONE, BT848_INT_MASK); } else { /* full video */ btwrite((btv->triton1) | (btv->gpioirq ? BT848_INT_GPINT : 0) | BT848_INT_SCERR | (fdsr ? BT848_INT_FDSR : 0) | BT848_INT_RISCI | BT848_INT_OCERR | BT848_INT_FMTCHG|BT848_INT_HLOCK| BT848_INT_I2CDONE, BT848_INT_MASK); } } static void init_bt848(struct bttv *btv) { int val; if (bttv_tvcards[btv->c.type].no_video) { /* very basic init only */ init_irqreg(btv); return; } btwrite(0x00, BT848_CAP_CTL); btwrite(BT848_COLOR_CTL_GAMMA, BT848_COLOR_CTL); btwrite(BT848_IFORM_XTAUTO | BT848_IFORM_AUTO, BT848_IFORM); /* set planar and packed mode trigger points and */ /* set rising edge of inverted GPINTR pin as irq trigger */ btwrite(BT848_GPIO_DMA_CTL_PKTP_32| BT848_GPIO_DMA_CTL_PLTP1_16| BT848_GPIO_DMA_CTL_PLTP23_16| BT848_GPIO_DMA_CTL_GPINTC| BT848_GPIO_DMA_CTL_GPINTI, BT848_GPIO_DMA_CTL); val = btv->opt_chroma_agc ? BT848_SCLOOP_CAGC : 0; btwrite(val, BT848_E_SCLOOP); btwrite(val, BT848_O_SCLOOP); btwrite(0x20, BT848_E_VSCALE_HI); btwrite(0x20, BT848_O_VSCALE_HI); btwrite(BT848_ADC_RESERVED | (btv->opt_adc_crush ? BT848_ADC_CRUSH : 0), BT848_ADC); btwrite(whitecrush_upper, BT848_WC_UP); btwrite(whitecrush_lower, BT848_WC_DOWN); if (btv->opt_lumafilter) { btwrite(0, BT848_E_CONTROL); btwrite(0, BT848_O_CONTROL); } else { btwrite(BT848_CONTROL_LDEC, BT848_E_CONTROL); btwrite(BT848_CONTROL_LDEC, BT848_O_CONTROL); } bt848_bright(btv, btv->bright); bt848_hue(btv, btv->hue); bt848_contrast(btv, btv->contrast); bt848_sat(btv, btv->saturation); /* interrupt */ init_irqreg(btv); } static void bttv_reinit_bt848(struct bttv *btv) { unsigned long flags; if (bttv_verbose) pr_info("%d: reset, reinitialize\n", btv->c.nr); spin_lock_irqsave(&btv->s_lock,flags); btv->errors=0; bttv_set_dma(btv,0); spin_unlock_irqrestore(&btv->s_lock,flags); init_bt848(btv); btv->pll.pll_current = -1; set_input(btv, btv->input, btv->tvnorm); } static int bttv_g_ctrl(struct file *file, void *priv, struct v4l2_control *c) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; switch (c->id) { case V4L2_CID_BRIGHTNESS: c->value = btv->bright; break; case V4L2_CID_HUE: c->value = btv->hue; break; case V4L2_CID_CONTRAST: c->value = btv->contrast; break; case V4L2_CID_SATURATION: c->value = btv->saturation; break; case V4L2_CID_AUDIO_MUTE: case V4L2_CID_AUDIO_VOLUME: case V4L2_CID_AUDIO_BALANCE: case V4L2_CID_AUDIO_BASS: case V4L2_CID_AUDIO_TREBLE: bttv_call_all(btv, core, g_ctrl, c); break; case V4L2_CID_PRIVATE_CHROMA_AGC: c->value = btv->opt_chroma_agc; break; case V4L2_CID_PRIVATE_COMBFILTER: c->value = btv->opt_combfilter; break; case V4L2_CID_PRIVATE_LUMAFILTER: c->value = btv->opt_lumafilter; break; case V4L2_CID_PRIVATE_AUTOMUTE: c->value = btv->opt_automute; break; case V4L2_CID_PRIVATE_AGC_CRUSH: c->value = btv->opt_adc_crush; break; case V4L2_CID_PRIVATE_VCR_HACK: c->value = btv->opt_vcr_hack; break; case V4L2_CID_PRIVATE_WHITECRUSH_UPPER: c->value = btv->opt_whitecrush_upper; break; case V4L2_CID_PRIVATE_WHITECRUSH_LOWER: c->value = btv->opt_whitecrush_lower; break; case V4L2_CID_PRIVATE_UV_RATIO: c->value = btv->opt_uv_ratio; break; case V4L2_CID_PRIVATE_FULL_LUMA_RANGE: c->value = btv->opt_full_luma_range; break; case V4L2_CID_PRIVATE_CORING: c->value = btv->opt_coring; break; default: return -EINVAL; } return 0; } static int bttv_s_ctrl(struct file *file, void *f, struct v4l2_control *c) { int err; int val; struct bttv_fh *fh = f; struct bttv *btv = fh->btv; err = v4l2_prio_check(&btv->prio, fh->prio); if (0 != err) return err; switch (c->id) { case V4L2_CID_BRIGHTNESS: bt848_bright(btv, c->value); break; case V4L2_CID_HUE: bt848_hue(btv, c->value); break; case V4L2_CID_CONTRAST: bt848_contrast(btv, c->value); break; case V4L2_CID_SATURATION: bt848_sat(btv, c->value); break; case V4L2_CID_AUDIO_MUTE: audio_mute(btv, c->value); /* fall through */ case V4L2_CID_AUDIO_VOLUME: if (btv->volume_gpio) btv->volume_gpio(btv, c->value); bttv_call_all(btv, core, s_ctrl, c); break; case V4L2_CID_AUDIO_BALANCE: case V4L2_CID_AUDIO_BASS: case V4L2_CID_AUDIO_TREBLE: bttv_call_all(btv, core, s_ctrl, c); break; case V4L2_CID_PRIVATE_CHROMA_AGC: btv->opt_chroma_agc = c->value; val = btv->opt_chroma_agc ? BT848_SCLOOP_CAGC : 0; btwrite(val, BT848_E_SCLOOP); btwrite(val, BT848_O_SCLOOP); break; case V4L2_CID_PRIVATE_COMBFILTER: btv->opt_combfilter = c->value; break; case V4L2_CID_PRIVATE_LUMAFILTER: btv->opt_lumafilter = c->value; if (btv->opt_lumafilter) { btand(~BT848_CONTROL_LDEC, BT848_E_CONTROL); btand(~BT848_CONTROL_LDEC, BT848_O_CONTROL); } else { btor(BT848_CONTROL_LDEC, BT848_E_CONTROL); btor(BT848_CONTROL_LDEC, BT848_O_CONTROL); } break; case V4L2_CID_PRIVATE_AUTOMUTE: btv->opt_automute = c->value; break; case V4L2_CID_PRIVATE_AGC_CRUSH: btv->opt_adc_crush = c->value; btwrite(BT848_ADC_RESERVED | (btv->opt_adc_crush ? BT848_ADC_CRUSH : 0), BT848_ADC); break; case V4L2_CID_PRIVATE_VCR_HACK: btv->opt_vcr_hack = c->value; break; case V4L2_CID_PRIVATE_WHITECRUSH_UPPER: btv->opt_whitecrush_upper = c->value; btwrite(c->value, BT848_WC_UP); break; case V4L2_CID_PRIVATE_WHITECRUSH_LOWER: btv->opt_whitecrush_lower = c->value; btwrite(c->value, BT848_WC_DOWN); break; case V4L2_CID_PRIVATE_UV_RATIO: btv->opt_uv_ratio = c->value; bt848_sat(btv, btv->saturation); break; case V4L2_CID_PRIVATE_FULL_LUMA_RANGE: btv->opt_full_luma_range = c->value; btaor((c->value<<7), ~BT848_OFORM_RANGE, BT848_OFORM); break; case V4L2_CID_PRIVATE_CORING: btv->opt_coring = c->value; btaor((c->value<<5), ~BT848_OFORM_CORE32, BT848_OFORM); break; default: return -EINVAL; } return 0; } /* ----------------------------------------------------------------------- */ void bttv_gpio_tracking(struct bttv *btv, char *comment) { unsigned int outbits, data; outbits = btread(BT848_GPIO_OUT_EN); data = btread(BT848_GPIO_DATA); pr_debug("%d: gpio: en=%08x, out=%08x in=%08x [%s]\n", btv->c.nr, outbits, data & outbits, data & ~outbits, comment); } static void bttv_field_count(struct bttv *btv) { int need_count = 0; if (btv->users) need_count++; if (need_count) { /* start field counter */ btor(BT848_INT_VSYNC,BT848_INT_MASK); } else { /* stop field counter */ btand(~BT848_INT_VSYNC,BT848_INT_MASK); btv->field_count = 0; } } static const struct bttv_format* format_by_fourcc(int fourcc) { unsigned int i; for (i = 0; i < FORMATS; i++) { if (-1 == formats[i].fourcc) continue; if (formats[i].fourcc == fourcc) return formats+i; } return NULL; } /* ----------------------------------------------------------------------- */ /* misc helpers */ static int bttv_switch_overlay(struct bttv *btv, struct bttv_fh *fh, struct bttv_buffer *new) { struct bttv_buffer *old; unsigned long flags; int retval = 0; dprintk("switch_overlay: enter [new=%p]\n", new); if (new) new->vb.state = VIDEOBUF_DONE; spin_lock_irqsave(&btv->s_lock,flags); old = btv->screen; btv->screen = new; btv->loop_irq |= 1; bttv_set_dma(btv, 0x03); spin_unlock_irqrestore(&btv->s_lock,flags); if (NULL != old) { dprintk("switch_overlay: old=%p state is %d\n", old, old->vb.state); bttv_dma_free(&fh->cap,btv, old); kfree(old); } if (NULL == new) free_btres_lock(btv,fh,RESOURCE_OVERLAY); dprintk("switch_overlay: done\n"); return retval; } /* ----------------------------------------------------------------------- */ /* video4linux (1) interface */ static int bttv_prepare_buffer(struct videobuf_queue *q,struct bttv *btv, struct bttv_buffer *buf, const struct bttv_format *fmt, unsigned int width, unsigned int height, enum v4l2_field field) { struct bttv_fh *fh = q->priv_data; int redo_dma_risc = 0; struct bttv_crop c; int norm; int rc; /* check settings */ if (NULL == fmt) return -EINVAL; if (fmt->btformat == BT848_COLOR_FMT_RAW) { width = RAW_BPL; height = RAW_LINES*2; if (width*height > buf->vb.bsize) return -EINVAL; buf->vb.size = buf->vb.bsize; /* Make sure tvnorm and vbi_end remain consistent until we're done. */ norm = btv->tvnorm; /* In this mode capturing always starts at defrect.top (default VDELAY), ignoring cropping parameters. */ if (btv->vbi_end > bttv_tvnorms[norm].cropcap.defrect.top) { return -EINVAL; } c.rect = bttv_tvnorms[norm].cropcap.defrect; } else { norm = btv->tvnorm; c = btv->crop[!!fh->do_crop]; if (width < c.min_scaled_width || width > c.max_scaled_width || height < c.min_scaled_height) return -EINVAL; switch (field) { case V4L2_FIELD_TOP: case V4L2_FIELD_BOTTOM: case V4L2_FIELD_ALTERNATE: /* btv->crop counts frame lines. Max. scale factor is 16:1 for frames, 8:1 for fields. */ if (height * 2 > c.max_scaled_height) return -EINVAL; break; default: if (height > c.max_scaled_height) return -EINVAL; break; } buf->vb.size = (width * height * fmt->depth) >> 3; if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size) return -EINVAL; } /* alloc + fill struct bttv_buffer (if changed) */ if (buf->vb.width != width || buf->vb.height != height || buf->vb.field != field || buf->tvnorm != norm || buf->fmt != fmt || buf->crop.top != c.rect.top || buf->crop.left != c.rect.left || buf->crop.width != c.rect.width || buf->crop.height != c.rect.height) { buf->vb.width = width; buf->vb.height = height; buf->vb.field = field; buf->tvnorm = norm; buf->fmt = fmt; buf->crop = c.rect; redo_dma_risc = 1; } /* alloc risc memory */ if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { redo_dma_risc = 1; if (0 != (rc = videobuf_iolock(q,&buf->vb,&btv->fbuf))) goto fail; } if (redo_dma_risc) if (0 != (rc = bttv_buffer_risc(btv,buf))) goto fail; buf->vb.state = VIDEOBUF_PREPARED; return 0; fail: bttv_dma_free(q,btv,buf); return rc; } static int buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) { struct bttv_fh *fh = q->priv_data; *size = fh->fmt->depth*fh->width*fh->height >> 3; if (0 == *count) *count = gbuffers; if (*size * *count > gbuffers * gbufsize) *count = (gbuffers * gbufsize) / *size; return 0; } static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb, enum v4l2_field field) { struct bttv_buffer *buf = container_of(vb,struct bttv_buffer,vb); struct bttv_fh *fh = q->priv_data; return bttv_prepare_buffer(q,fh->btv, buf, fh->fmt, fh->width, fh->height, field); } static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb) { struct bttv_buffer *buf = container_of(vb,struct bttv_buffer,vb); struct bttv_fh *fh = q->priv_data; struct bttv *btv = fh->btv; buf->vb.state = VIDEOBUF_QUEUED; list_add_tail(&buf->vb.queue,&btv->capture); if (!btv->curr.frame_irq) { btv->loop_irq |= 1; bttv_set_dma(btv, 0x03); } } static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb) { struct bttv_buffer *buf = container_of(vb,struct bttv_buffer,vb); struct bttv_fh *fh = q->priv_data; bttv_dma_free(q,fh->btv,buf); } static struct videobuf_queue_ops bttv_video_qops = { .buf_setup = buffer_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, .buf_release = buffer_release, }; static int bttv_s_std(struct file *file, void *priv, v4l2_std_id *id) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; unsigned int i; int err; err = v4l2_prio_check(&btv->prio, fh->prio); if (err) goto err; for (i = 0; i < BTTV_TVNORMS; i++) if (*id & bttv_tvnorms[i].v4l2_id) break; if (i == BTTV_TVNORMS) { err = -EINVAL; goto err; } set_tvnorm(btv, i); err: return err; } static int bttv_querystd(struct file *file, void *f, v4l2_std_id *id) { struct bttv_fh *fh = f; struct bttv *btv = fh->btv; if (btread(BT848_DSTATUS) & BT848_DSTATUS_NUML) *id = V4L2_STD_625_50; else *id = V4L2_STD_525_60; return 0; } static int bttv_enum_input(struct file *file, void *priv, struct v4l2_input *i) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; int rc = 0; if (i->index >= bttv_tvcards[btv->c.type].video_inputs) { rc = -EINVAL; goto err; } i->type = V4L2_INPUT_TYPE_CAMERA; i->audioset = 1; if (btv->tuner_type != TUNER_ABSENT && i->index == 0) { sprintf(i->name, "Television"); i->type = V4L2_INPUT_TYPE_TUNER; i->tuner = 0; } else if (i->index == btv->svhs) { sprintf(i->name, "S-Video"); } else { sprintf(i->name, "Composite%d", i->index); } if (i->index == btv->input) { __u32 dstatus = btread(BT848_DSTATUS); if (0 == (dstatus & BT848_DSTATUS_PRES)) i->status |= V4L2_IN_ST_NO_SIGNAL; if (0 == (dstatus & BT848_DSTATUS_HLOC)) i->status |= V4L2_IN_ST_NO_H_LOCK; } i->std = BTTV_NORMS; err: return rc; } static int bttv_g_input(struct file *file, void *priv, unsigned int *i) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; *i = btv->input; return 0; } static int bttv_s_input(struct file *file, void *priv, unsigned int i) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; int err; err = v4l2_prio_check(&btv->prio, fh->prio); if (unlikely(err)) goto err; if (i > bttv_tvcards[btv->c.type].video_inputs) { err = -EINVAL; goto err; } set_input(btv, i, btv->tvnorm); err: return 0; } static int bttv_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; int err; if (unlikely(0 != t->index)) return -EINVAL; if (unlikely(btv->tuner_type == TUNER_ABSENT)) { err = -EINVAL; goto err; } err = v4l2_prio_check(&btv->prio, fh->prio); if (unlikely(err)) goto err; bttv_call_all(btv, tuner, s_tuner, t); if (btv->audio_mode_gpio) btv->audio_mode_gpio(btv, t, 1); err: return 0; } static int bttv_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; f->type = btv->radio_user ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; f->frequency = btv->freq; return 0; } static int bttv_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; int err; if (unlikely(f->tuner != 0)) return -EINVAL; err = v4l2_prio_check(&btv->prio, fh->prio); if (unlikely(err)) goto err; if (unlikely(f->type != (btv->radio_user ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV))) { err = -EINVAL; goto err; } btv->freq = f->frequency; bttv_call_all(btv, tuner, s_frequency, f); if (btv->has_matchbox && btv->radio_user) tea5757_set_freq(btv, btv->freq); err: return 0; } static int bttv_log_status(struct file *file, void *f) { struct bttv_fh *fh = f; struct bttv *btv = fh->btv; bttv_call_all(btv, core, log_status); return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int bttv_g_register(struct file *file, void *f, struct v4l2_dbg_register *reg) { struct bttv_fh *fh = f; struct bttv *btv = fh->btv; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!v4l2_chip_match_host(&reg->match)) return -EINVAL; /* bt848 has a 12-bit register space */ reg->reg &= 0xfff; reg->val = btread(reg->reg); reg->size = 1; return 0; } static int bttv_s_register(struct file *file, void *f, struct v4l2_dbg_register *reg) { struct bttv_fh *fh = f; struct bttv *btv = fh->btv; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!v4l2_chip_match_host(&reg->match)) return -EINVAL; /* bt848 has a 12-bit register space */ reg->reg &= 0xfff; btwrite(reg->val, reg->reg); return 0; } #endif /* Given cropping boundaries b and the scaled width and height of a single field or frame, which must not exceed hardware limits, this function adjusts the cropping parameters c. */ static void bttv_crop_adjust (struct bttv_crop * c, const struct v4l2_rect * b, __s32 width, __s32 height, enum v4l2_field field) { __s32 frame_height = height << !V4L2_FIELD_HAS_BOTH(field); __s32 max_left; __s32 max_top; if (width < c->min_scaled_width) { /* Max. hor. scale factor 16:1. */ c->rect.width = width * 16; } else if (width > c->max_scaled_width) { /* Min. hor. scale factor 1:1. */ c->rect.width = width; max_left = b->left + b->width - width; max_left = min(max_left, (__s32) MAX_HDELAY); if (c->rect.left > max_left) c->rect.left = max_left; } if (height < c->min_scaled_height) { /* Max. vert. scale factor 16:1, single fields 8:1. */ c->rect.height = height * 16; } else if (frame_height > c->max_scaled_height) { /* Min. vert. scale factor 1:1. Top and height count field lines times two. */ c->rect.height = (frame_height + 1) & ~1; max_top = b->top + b->height - c->rect.height; if (c->rect.top > max_top) c->rect.top = max_top; } bttv_crop_calc_limits(c); } /* Returns an error if scaling to a frame or single field with the given width and height is not possible with the current cropping parameters and width aligned according to width_mask. If adjust_size is TRUE the function may adjust the width and/or height instead, rounding width to (width + width_bias) & width_mask. If adjust_crop is TRUE it may also adjust the current cropping parameters to get closer to the desired image size. */ static int limit_scaled_size_lock (struct bttv_fh * fh, __s32 * width, __s32 * height, enum v4l2_field field, unsigned int width_mask, unsigned int width_bias, int adjust_size, int adjust_crop) { struct bttv *btv = fh->btv; const struct v4l2_rect *b; struct bttv_crop *c; __s32 min_width; __s32 min_height; __s32 max_width; __s32 max_height; int rc; BUG_ON((int) width_mask >= 0 || width_bias >= (unsigned int) -width_mask); /* Make sure tvnorm, vbi_end and the current cropping parameters remain consistent until we're done. */ b = &bttv_tvnorms[btv->tvnorm].cropcap.bounds; /* Do crop - use current, don't - use default parameters. */ c = &btv->crop[!!fh->do_crop]; if (fh->do_crop && adjust_size && adjust_crop && !locked_btres(btv, VIDEO_RESOURCES)) { min_width = 48; min_height = 32; /* We cannot scale up. When the scaled image is larger than crop.rect we adjust the crop.rect as required by the V4L2 spec, hence cropcap.bounds are our limit. */ max_width = min(b->width, (__s32) MAX_HACTIVE); max_height = b->height; /* We cannot capture the same line as video and VBI data. Note btv->vbi_end is really a minimum, see bttv_vbi_try_fmt(). */ if (btv->vbi_end > b->top) { max_height -= btv->vbi_end - b->top; rc = -EBUSY; if (min_height > max_height) goto fail; } } else { rc = -EBUSY; if (btv->vbi_end > c->rect.top) goto fail; min_width = c->min_scaled_width; min_height = c->min_scaled_height; max_width = c->max_scaled_width; max_height = c->max_scaled_height; adjust_crop = 0; } min_width = (min_width - width_mask - 1) & width_mask; max_width = max_width & width_mask; /* Max. scale factor is 16:1 for frames, 8:1 for fields. */ min_height = min_height; /* Min. scale factor is 1:1. */ max_height >>= !V4L2_FIELD_HAS_BOTH(field); if (adjust_size) { *width = clamp(*width, min_width, max_width); *height = clamp(*height, min_height, max_height); /* Round after clamping to avoid overflow. */ *width = (*width + width_bias) & width_mask; if (adjust_crop) { bttv_crop_adjust(c, b, *width, *height, field); if (btv->vbi_end > c->rect.top) { /* Move the crop window out of the way. */ c->rect.top = btv->vbi_end; } } } else { rc = -EINVAL; if (*width < min_width || *height < min_height || *width > max_width || *height > max_height || 0 != (*width & ~width_mask)) goto fail; } rc = 0; /* success */ fail: return rc; } /* Returns an error if the given overlay window dimensions are not possible with the current cropping parameters. If adjust_size is TRUE the function may adjust the window width and/or height instead, however it always rounds the horizontal position and width as btcx_align() does. If adjust_crop is TRUE the function may also adjust the current cropping parameters to get closer to the desired window size. */ static int verify_window_lock (struct bttv_fh * fh, struct v4l2_window * win, int adjust_size, int adjust_crop) { enum v4l2_field field; unsigned int width_mask; int rc; if (win->w.width < 48 || win->w.height < 32) return -EINVAL; if (win->clipcount > 2048) return -EINVAL; field = win->field; if (V4L2_FIELD_ANY == field) { __s32 height2; height2 = fh->btv->crop[!!fh->do_crop].rect.height >> 1; field = (win->w.height > height2) ? V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP; } switch (field) { case V4L2_FIELD_TOP: case V4L2_FIELD_BOTTOM: case V4L2_FIELD_INTERLACED: break; default: return -EINVAL; } /* 4-byte alignment. */ if (NULL == fh->ovfmt) return -EINVAL; width_mask = ~0; switch (fh->ovfmt->depth) { case 8: case 24: width_mask = ~3; break; case 16: width_mask = ~1; break; case 32: break; default: BUG(); } win->w.width -= win->w.left & ~width_mask; win->w.left = (win->w.left - width_mask - 1) & width_mask; rc = limit_scaled_size_lock(fh, &win->w.width, &win->w.height, field, width_mask, /* width_bias: round down */ 0, adjust_size, adjust_crop); if (0 != rc) return rc; win->field = field; return 0; } static int setup_window_lock(struct bttv_fh *fh, struct bttv *btv, struct v4l2_window *win, int fixup) { struct v4l2_clip *clips = NULL; int n,size,retval = 0; if (NULL == fh->ovfmt) return -EINVAL; if (!(fh->ovfmt->flags & FORMAT_FLAGS_PACKED)) return -EINVAL; retval = verify_window_lock(fh, win, /* adjust_size */ fixup, /* adjust_crop */ fixup); if (0 != retval) return retval; /* copy clips -- luckily v4l1 + v4l2 are binary compatible here ...*/ n = win->clipcount; size = sizeof(*clips)*(n+4); clips = kmalloc(size,GFP_KERNEL); if (NULL == clips) return -ENOMEM; if (n > 0) { if (copy_from_user(clips,win->clips,sizeof(struct v4l2_clip)*n)) { kfree(clips); return -EFAULT; } } /* clip against screen */ if (NULL != btv->fbuf.base) n = btcx_screen_clips(btv->fbuf.fmt.width, btv->fbuf.fmt.height, &win->w, clips, n); btcx_sort_clips(clips,n); /* 4-byte alignments */ switch (fh->ovfmt->depth) { case 8: case 24: btcx_align(&win->w, clips, n, 3); break; case 16: btcx_align(&win->w, clips, n, 1); break; case 32: /* no alignment fixups needed */ break; default: BUG(); } kfree(fh->ov.clips); fh->ov.clips = clips; fh->ov.nclips = n; fh->ov.w = win->w; fh->ov.field = win->field; fh->ov.setup_ok = 1; btv->init.ov.w.width = win->w.width; btv->init.ov.w.height = win->w.height; btv->init.ov.field = win->field; /* update overlay if needed */ retval = 0; if (check_btres(fh, RESOURCE_OVERLAY)) { struct bttv_buffer *new; new = videobuf_sg_alloc(sizeof(*new)); new->crop = btv->crop[!!fh->do_crop].rect; bttv_overlay_risc(btv, &fh->ov, fh->ovfmt, new); retval = bttv_switch_overlay(btv,fh,new); } return retval; } /* ----------------------------------------------------------------------- */ static struct videobuf_queue* bttv_queue(struct bttv_fh *fh) { struct videobuf_queue* q = NULL; switch (fh->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: q = &fh->cap; break; case V4L2_BUF_TYPE_VBI_CAPTURE: q = &fh->vbi; break; default: BUG(); } return q; } static int bttv_resource(struct bttv_fh *fh) { int res = 0; switch (fh->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: res = RESOURCE_VIDEO_STREAM; break; case V4L2_BUF_TYPE_VBI_CAPTURE: res = RESOURCE_VBI; break; default: BUG(); } return res; } static int bttv_switch_type(struct bttv_fh *fh, enum v4l2_buf_type type) { struct videobuf_queue *q = bttv_queue(fh); int res = bttv_resource(fh); if (check_btres(fh,res)) return -EBUSY; if (videobuf_queue_is_busy(q)) return -EBUSY; fh->type = type; return 0; } static void pix_format_set_size (struct v4l2_pix_format * f, const struct bttv_format * fmt, unsigned int width, unsigned int height) { f->width = width; f->height = height; if (fmt->flags & FORMAT_FLAGS_PLANAR) { f->bytesperline = width; /* Y plane */ f->sizeimage = (width * height * fmt->depth) >> 3; } else { f->bytesperline = (width * fmt->depth) >> 3; f->sizeimage = height * f->bytesperline; } } static int bttv_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct bttv_fh *fh = priv; pix_format_set_size(&f->fmt.pix, fh->fmt, fh->width, fh->height); f->fmt.pix.field = fh->cap.field; f->fmt.pix.pixelformat = fh->fmt->fourcc; return 0; } static int bttv_g_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_format *f) { struct bttv_fh *fh = priv; f->fmt.win.w = fh->ov.w; f->fmt.win.field = fh->ov.field; return 0; } static int bttv_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { const struct bttv_format *fmt; struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; enum v4l2_field field; __s32 width, height; int rc; fmt = format_by_fourcc(f->fmt.pix.pixelformat); if (NULL == fmt) return -EINVAL; field = f->fmt.pix.field; if (V4L2_FIELD_ANY == field) { __s32 height2; height2 = btv->crop[!!fh->do_crop].rect.height >> 1; field = (f->fmt.pix.height > height2) ? V4L2_FIELD_INTERLACED : V4L2_FIELD_BOTTOM; } if (V4L2_FIELD_SEQ_BT == field) field = V4L2_FIELD_SEQ_TB; switch (field) { case V4L2_FIELD_TOP: case V4L2_FIELD_BOTTOM: case V4L2_FIELD_ALTERNATE: case V4L2_FIELD_INTERLACED: break; case V4L2_FIELD_SEQ_TB: if (fmt->flags & FORMAT_FLAGS_PLANAR) return -EINVAL; break; default: return -EINVAL; } width = f->fmt.pix.width; height = f->fmt.pix.height; rc = limit_scaled_size_lock(fh, &width, &height, field, /* width_mask: 4 pixels */ ~3, /* width_bias: nearest */ 2, /* adjust_size */ 1, /* adjust_crop */ 0); if (0 != rc) return rc; /* update data for the application */ f->fmt.pix.field = field; pix_format_set_size(&f->fmt.pix, fmt, width, height); return 0; } static int bttv_try_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_format *f) { struct bttv_fh *fh = priv; return verify_window_lock(fh, &f->fmt.win, /* adjust_size */ 1, /* adjust_crop */ 0); } static int bttv_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { int retval; const struct bttv_format *fmt; struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; __s32 width, height; enum v4l2_field field; retval = bttv_switch_type(fh, f->type); if (0 != retval) return retval; retval = bttv_try_fmt_vid_cap(file, priv, f); if (0 != retval) return retval; width = f->fmt.pix.width; height = f->fmt.pix.height; field = f->fmt.pix.field; retval = limit_scaled_size_lock(fh, &width, &height, f->fmt.pix.field, /* width_mask: 4 pixels */ ~3, /* width_bias: nearest */ 2, /* adjust_size */ 1, /* adjust_crop */ 1); if (0 != retval) return retval; f->fmt.pix.field = field; fmt = format_by_fourcc(f->fmt.pix.pixelformat); /* update our state informations */ fh->fmt = fmt; fh->cap.field = f->fmt.pix.field; fh->cap.last = V4L2_FIELD_NONE; fh->width = f->fmt.pix.width; fh->height = f->fmt.pix.height; btv->init.fmt = fmt; btv->init.width = f->fmt.pix.width; btv->init.height = f->fmt.pix.height; return 0; } static int bttv_s_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_format *f) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; if (no_overlay > 0) { pr_err("V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n"); return -EINVAL; } return setup_window_lock(fh, btv, &f->fmt.win, 1); } static int bttv_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; if (0 == v4l2) return -EINVAL; strlcpy(cap->driver, "bttv", sizeof(cap->driver)); strlcpy(cap->card, btv->video_dev->name, sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", pci_name(btv->c.pci)); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; if (no_overlay <= 0) cap->capabilities |= V4L2_CAP_VIDEO_OVERLAY; /* * No need to lock here: those vars are initialized during board * probe and remains untouched during the rest of the driver lifecycle */ if (btv->has_saa6588) cap->capabilities |= V4L2_CAP_RDS_CAPTURE; if (btv->tuner_type != TUNER_ABSENT) cap->capabilities |= V4L2_CAP_TUNER; return 0; } static int bttv_enum_fmt_cap_ovr(struct v4l2_fmtdesc *f) { int index = -1, i; for (i = 0; i < FORMATS; i++) { if (formats[i].fourcc != -1) index++; if ((unsigned int)index == f->index) break; } if (FORMATS == i) return -EINVAL; f->pixelformat = formats[i].fourcc; strlcpy(f->description, formats[i].name, sizeof(f->description)); return i; } static int bttv_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { int rc = bttv_enum_fmt_cap_ovr(f); if (rc < 0) return rc; return 0; } static int bttv_enum_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_fmtdesc *f) { int rc; if (no_overlay > 0) { pr_err("V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n"); return -EINVAL; } rc = bttv_enum_fmt_cap_ovr(f); if (rc < 0) return rc; if (!(formats[rc].flags & FORMAT_FLAGS_PACKED)) return -EINVAL; return 0; } static int bttv_g_fbuf(struct file *file, void *f, struct v4l2_framebuffer *fb) { struct bttv_fh *fh = f; struct bttv *btv = fh->btv; *fb = btv->fbuf; fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING; if (fh->ovfmt) fb->fmt.pixelformat = fh->ovfmt->fourcc; return 0; } static int bttv_overlay(struct file *file, void *f, unsigned int on) { struct bttv_fh *fh = f; struct bttv *btv = fh->btv; struct bttv_buffer *new; int retval = 0; if (on) { /* verify args */ if (unlikely(!btv->fbuf.base)) { return -EINVAL; } if (unlikely(!fh->ov.setup_ok)) { dprintk("%d: overlay: !setup_ok\n", btv->c.nr); retval = -EINVAL; } if (retval) return retval; } if (!check_alloc_btres_lock(btv, fh, RESOURCE_OVERLAY)) return -EBUSY; if (on) { fh->ov.tvnorm = btv->tvnorm; new = videobuf_sg_alloc(sizeof(*new)); new->crop = btv->crop[!!fh->do_crop].rect; bttv_overlay_risc(btv, &fh->ov, fh->ovfmt, new); } else { new = NULL; } /* switch over */ retval = bttv_switch_overlay(btv, fh, new); return retval; } static int bttv_s_fbuf(struct file *file, void *f, struct v4l2_framebuffer *fb) { struct bttv_fh *fh = f; struct bttv *btv = fh->btv; const struct bttv_format *fmt; int retval; if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO)) return -EPERM; /* check args */ fmt = format_by_fourcc(fb->fmt.pixelformat); if (NULL == fmt) return -EINVAL; if (0 == (fmt->flags & FORMAT_FLAGS_PACKED)) return -EINVAL; retval = -EINVAL; if (fb->flags & V4L2_FBUF_FLAG_OVERLAY) { __s32 width = fb->fmt.width; __s32 height = fb->fmt.height; retval = limit_scaled_size_lock(fh, &width, &height, V4L2_FIELD_INTERLACED, /* width_mask */ ~3, /* width_bias */ 2, /* adjust_size */ 0, /* adjust_crop */ 0); if (0 != retval) return retval; } /* ok, accept it */ btv->fbuf.base = fb->base; btv->fbuf.fmt.width = fb->fmt.width; btv->fbuf.fmt.height = fb->fmt.height; if (0 != fb->fmt.bytesperline) btv->fbuf.fmt.bytesperline = fb->fmt.bytesperline; else btv->fbuf.fmt.bytesperline = btv->fbuf.fmt.width*fmt->depth/8; retval = 0; fh->ovfmt = fmt; btv->init.ovfmt = fmt; if (fb->flags & V4L2_FBUF_FLAG_OVERLAY) { fh->ov.w.left = 0; fh->ov.w.top = 0; fh->ov.w.width = fb->fmt.width; fh->ov.w.height = fb->fmt.height; btv->init.ov.w.width = fb->fmt.width; btv->init.ov.w.height = fb->fmt.height; kfree(fh->ov.clips); fh->ov.clips = NULL; fh->ov.nclips = 0; if (check_btres(fh, RESOURCE_OVERLAY)) { struct bttv_buffer *new; new = videobuf_sg_alloc(sizeof(*new)); new->crop = btv->crop[!!fh->do_crop].rect; bttv_overlay_risc(btv, &fh->ov, fh->ovfmt, new); retval = bttv_switch_overlay(btv, fh, new); } } return retval; } static int bttv_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p) { struct bttv_fh *fh = priv; return videobuf_reqbufs(bttv_queue(fh), p); } static int bttv_querybuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct bttv_fh *fh = priv; return videobuf_querybuf(bttv_queue(fh), b); } static int bttv_qbuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; int res = bttv_resource(fh); if (!check_alloc_btres_lock(btv, fh, res)) return -EBUSY; return videobuf_qbuf(bttv_queue(fh), b); } static int bttv_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct bttv_fh *fh = priv; return videobuf_dqbuf(bttv_queue(fh), b, file->f_flags & O_NONBLOCK); } static int bttv_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; int res = bttv_resource(fh); if (!check_alloc_btres_lock(btv, fh, res)) return -EBUSY; return videobuf_streamon(bttv_queue(fh)); } static int bttv_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; int retval; int res = bttv_resource(fh); retval = videobuf_streamoff(bttv_queue(fh)); if (retval < 0) return retval; free_btres_lock(btv, fh, res); return 0; } static int bttv_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *c) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; const struct v4l2_queryctrl *ctrl; if ((c->id < V4L2_CID_BASE || c->id >= V4L2_CID_LASTP1) && (c->id < V4L2_CID_PRIVATE_BASE || c->id >= V4L2_CID_PRIVATE_LASTP1)) return -EINVAL; if (!btv->volume_gpio && (c->id == V4L2_CID_AUDIO_VOLUME)) *c = no_ctl; else { ctrl = ctrl_by_id(c->id); *c = (NULL != ctrl) ? *ctrl : no_ctl; } return 0; } static int bttv_g_parm(struct file *file, void *f, struct v4l2_streamparm *parm) { struct bttv_fh *fh = f; struct bttv *btv = fh->btv; v4l2_video_std_frame_period(bttv_tvnorms[btv->tvnorm].v4l2_id, &parm->parm.capture.timeperframe); return 0; } static int bttv_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; if (btv->tuner_type == TUNER_ABSENT) return -EINVAL; if (0 != t->index) return -EINVAL; t->rxsubchans = V4L2_TUNER_SUB_MONO; bttv_call_all(btv, tuner, g_tuner, t); strcpy(t->name, "Television"); t->capability = V4L2_TUNER_CAP_NORM; t->type = V4L2_TUNER_ANALOG_TV; if (btread(BT848_DSTATUS)&BT848_DSTATUS_HLOC) t->signal = 0xffff; if (btv->audio_mode_gpio) btv->audio_mode_gpio(btv, t, 0); return 0; } static int bttv_g_priority(struct file *file, void *f, enum v4l2_priority *p) { struct bttv_fh *fh = f; struct bttv *btv = fh->btv; *p = v4l2_prio_max(&btv->prio); return 0; } static int bttv_s_priority(struct file *file, void *f, enum v4l2_priority prio) { struct bttv_fh *fh = f; struct bttv *btv = fh->btv; int rc; rc = v4l2_prio_change(&btv->prio, &fh->prio, prio); return rc; } static int bttv_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cap) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY) return -EINVAL; *cap = bttv_tvnorms[btv->tvnorm].cropcap; return 0; } static int bttv_g_crop(struct file *file, void *f, struct v4l2_crop *crop) { struct bttv_fh *fh = f; struct bttv *btv = fh->btv; if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY) return -EINVAL; /* No fh->do_crop = 1; because btv->crop[1] may be inconsistent with fh->width or fh->height and apps do not expect a change here. */ crop->c = btv->crop[!!fh->do_crop].rect; return 0; } static int bttv_s_crop(struct file *file, void *f, struct v4l2_crop *crop) { struct bttv_fh *fh = f; struct bttv *btv = fh->btv; const struct v4l2_rect *b; int retval; struct bttv_crop c; __s32 b_left; __s32 b_top; __s32 b_right; __s32 b_bottom; if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY) return -EINVAL; /* Make sure tvnorm, vbi_end and the current cropping parameters remain consistent until we're done. Note read() may change vbi_end in check_alloc_btres_lock(). */ retval = v4l2_prio_check(&btv->prio, fh->prio); if (0 != retval) { return retval; } retval = -EBUSY; if (locked_btres(fh->btv, VIDEO_RESOURCES)) { return retval; } b = &bttv_tvnorms[btv->tvnorm].cropcap.bounds; b_left = b->left; b_right = b_left + b->width; b_bottom = b->top + b->height; b_top = max(b->top, btv->vbi_end); if (b_top + 32 >= b_bottom) { return retval; } /* Min. scaled size 48 x 32. */ c.rect.left = clamp(crop->c.left, b_left, b_right - 48); c.rect.left = min(c.rect.left, (__s32) MAX_HDELAY); c.rect.width = clamp(crop->c.width, 48, b_right - c.rect.left); c.rect.top = clamp(crop->c.top, b_top, b_bottom - 32); /* Top and height must be a multiple of two. */ c.rect.top = (c.rect.top + 1) & ~1; c.rect.height = clamp(crop->c.height, 32, b_bottom - c.rect.top); c.rect.height = (c.rect.height + 1) & ~1; bttv_crop_calc_limits(&c); btv->crop[1] = c; fh->do_crop = 1; if (fh->width < c.min_scaled_width) { fh->width = c.min_scaled_width; btv->init.width = c.min_scaled_width; } else if (fh->width > c.max_scaled_width) { fh->width = c.max_scaled_width; btv->init.width = c.max_scaled_width; } if (fh->height < c.min_scaled_height) { fh->height = c.min_scaled_height; btv->init.height = c.min_scaled_height; } else if (fh->height > c.max_scaled_height) { fh->height = c.max_scaled_height; btv->init.height = c.max_scaled_height; } return 0; } static int bttv_g_audio(struct file *file, void *priv, struct v4l2_audio *a) { if (unlikely(a->index)) return -EINVAL; strcpy(a->name, "audio"); return 0; } static int bttv_s_audio(struct file *file, void *priv, struct v4l2_audio *a) { if (unlikely(a->index)) return -EINVAL; return 0; } static ssize_t bttv_read(struct file *file, char __user *data, size_t count, loff_t *ppos) { struct bttv_fh *fh = file->private_data; int retval = 0; if (fh->btv->errors) bttv_reinit_bt848(fh->btv); dprintk("%d: read count=%d type=%s\n", fh->btv->c.nr, (int)count, v4l2_type_names[fh->type]); switch (fh->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: if (!check_alloc_btres_lock(fh->btv, fh, RESOURCE_VIDEO_READ)) { /* VIDEO_READ in use by another fh, or VIDEO_STREAM by any fh. */ return -EBUSY; } retval = videobuf_read_one(&fh->cap, data, count, ppos, file->f_flags & O_NONBLOCK); free_btres_lock(fh->btv, fh, RESOURCE_VIDEO_READ); break; case V4L2_BUF_TYPE_VBI_CAPTURE: if (!check_alloc_btres_lock(fh->btv,fh,RESOURCE_VBI)) return -EBUSY; retval = videobuf_read_stream(&fh->vbi, data, count, ppos, 1, file->f_flags & O_NONBLOCK); break; default: BUG(); } return retval; } static unsigned int bttv_poll(struct file *file, poll_table *wait) { struct bttv_fh *fh = file->private_data; struct bttv_buffer *buf; enum v4l2_field field; unsigned int rc = POLLERR; if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) { if (!check_alloc_btres_lock(fh->btv,fh,RESOURCE_VBI)) return POLLERR; return videobuf_poll_stream(file, &fh->vbi, wait); } if (check_btres(fh,RESOURCE_VIDEO_STREAM)) { /* streaming capture */ if (list_empty(&fh->cap.stream)) goto err; buf = list_entry(fh->cap.stream.next,struct bttv_buffer,vb.stream); } else { /* read() capture */ if (NULL == fh->cap.read_buf) { /* need to capture a new frame */ if (locked_btres(fh->btv,RESOURCE_VIDEO_STREAM)) goto err; fh->cap.read_buf = videobuf_sg_alloc(fh->cap.msize); if (NULL == fh->cap.read_buf) goto err; fh->cap.read_buf->memory = V4L2_MEMORY_USERPTR; field = videobuf_next_field(&fh->cap); if (0 != fh->cap.ops->buf_prepare(&fh->cap,fh->cap.read_buf,field)) { kfree (fh->cap.read_buf); fh->cap.read_buf = NULL; goto err; } fh->cap.ops->buf_queue(&fh->cap,fh->cap.read_buf); fh->cap.read_off = 0; } buf = (struct bttv_buffer*)fh->cap.read_buf; } poll_wait(file, &buf->vb.done, wait); if (buf->vb.state == VIDEOBUF_DONE || buf->vb.state == VIDEOBUF_ERROR) rc = POLLIN|POLLRDNORM; else rc = 0; err: return rc; } static int bttv_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct bttv *btv = video_drvdata(file); struct bttv_fh *fh; enum v4l2_buf_type type = 0; dprintk("open dev=%s\n", video_device_node_name(vdev)); if (vdev->vfl_type == VFL_TYPE_GRABBER) { type = V4L2_BUF_TYPE_VIDEO_CAPTURE; } else if (vdev->vfl_type == VFL_TYPE_VBI) { type = V4L2_BUF_TYPE_VBI_CAPTURE; } else { WARN_ON(1); return -ENODEV; } dprintk("%d: open called (type=%s)\n", btv->c.nr, v4l2_type_names[type]); /* allocate per filehandle data */ fh = kmalloc(sizeof(*fh), GFP_KERNEL); if (unlikely(!fh)) return -ENOMEM; file->private_data = fh; *fh = btv->init; fh->type = type; fh->ov.setup_ok = 0; v4l2_prio_open(&btv->prio, &fh->prio); videobuf_queue_sg_init(&fh->cap, &bttv_video_qops, &btv->c.pci->dev, &btv->s_lock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_INTERLACED, sizeof(struct bttv_buffer), fh, &btv->lock); videobuf_queue_sg_init(&fh->vbi, &bttv_vbi_qops, &btv->c.pci->dev, &btv->s_lock, V4L2_BUF_TYPE_VBI_CAPTURE, V4L2_FIELD_SEQ_TB, sizeof(struct bttv_buffer), fh, &btv->lock); set_tvnorm(btv,btv->tvnorm); set_input(btv, btv->input, btv->tvnorm); btv->users++; /* The V4L2 spec requires one global set of cropping parameters which only change on request. These are stored in btv->crop[1]. However for compatibility with V4L apps and cropping unaware V4L2 apps we now reset the cropping parameters as seen through this fh, which is to say VIDIOC_G_CROP and scaling limit checks will use btv->crop[0], the default cropping parameters for the current video standard, and VIDIOC_S_FMT will not implicitely change the cropping parameters until VIDIOC_S_CROP has been called. */ fh->do_crop = !reset_crop; /* module parameter */ /* Likewise there should be one global set of VBI capture parameters, but for compatibility with V4L apps and earlier driver versions each fh has its own parameters. */ bttv_vbi_fmt_reset(&fh->vbi_fmt, btv->tvnorm); bttv_field_count(btv); return 0; } static int bttv_release(struct file *file) { struct bttv_fh *fh = file->private_data; struct bttv *btv = fh->btv; /* turn off overlay */ if (check_btres(fh, RESOURCE_OVERLAY)) bttv_switch_overlay(btv,fh,NULL); /* stop video capture */ if (check_btres(fh, RESOURCE_VIDEO_STREAM)) { videobuf_streamoff(&fh->cap); free_btres_lock(btv,fh,RESOURCE_VIDEO_STREAM); } if (fh->cap.read_buf) { buffer_release(&fh->cap,fh->cap.read_buf); kfree(fh->cap.read_buf); } if (check_btres(fh, RESOURCE_VIDEO_READ)) { free_btres_lock(btv, fh, RESOURCE_VIDEO_READ); } /* stop vbi capture */ if (check_btres(fh, RESOURCE_VBI)) { videobuf_stop(&fh->vbi); free_btres_lock(btv,fh,RESOURCE_VBI); } /* free stuff */ videobuf_mmap_free(&fh->cap); videobuf_mmap_free(&fh->vbi); v4l2_prio_close(&btv->prio, fh->prio); file->private_data = NULL; kfree(fh); btv->users--; bttv_field_count(btv); if (!btv->users) audio_mute(btv, 1); return 0; } static int bttv_mmap(struct file *file, struct vm_area_struct *vma) { struct bttv_fh *fh = file->private_data; dprintk("%d: mmap type=%s 0x%lx+%ld\n", fh->btv->c.nr, v4l2_type_names[fh->type], vma->vm_start, vma->vm_end - vma->vm_start); return videobuf_mmap_mapper(bttv_queue(fh),vma); } static const struct v4l2_file_operations bttv_fops = { .owner = THIS_MODULE, .open = bttv_open, .release = bttv_release, .unlocked_ioctl = video_ioctl2, .read = bttv_read, .mmap = bttv_mmap, .poll = bttv_poll, }; static const struct v4l2_ioctl_ops bttv_ioctl_ops = { .vidioc_querycap = bttv_querycap, .vidioc_enum_fmt_vid_cap = bttv_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = bttv_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = bttv_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = bttv_s_fmt_vid_cap, .vidioc_enum_fmt_vid_overlay = bttv_enum_fmt_vid_overlay, .vidioc_g_fmt_vid_overlay = bttv_g_fmt_vid_overlay, .vidioc_try_fmt_vid_overlay = bttv_try_fmt_vid_overlay, .vidioc_s_fmt_vid_overlay = bttv_s_fmt_vid_overlay, .vidioc_g_fmt_vbi_cap = bttv_g_fmt_vbi_cap, .vidioc_try_fmt_vbi_cap = bttv_try_fmt_vbi_cap, .vidioc_s_fmt_vbi_cap = bttv_s_fmt_vbi_cap, .vidioc_g_audio = bttv_g_audio, .vidioc_s_audio = bttv_s_audio, .vidioc_cropcap = bttv_cropcap, .vidioc_reqbufs = bttv_reqbufs, .vidioc_querybuf = bttv_querybuf, .vidioc_qbuf = bttv_qbuf, .vidioc_dqbuf = bttv_dqbuf, .vidioc_s_std = bttv_s_std, .vidioc_enum_input = bttv_enum_input, .vidioc_g_input = bttv_g_input, .vidioc_s_input = bttv_s_input, .vidioc_queryctrl = bttv_queryctrl, .vidioc_g_ctrl = bttv_g_ctrl, .vidioc_s_ctrl = bttv_s_ctrl, .vidioc_streamon = bttv_streamon, .vidioc_streamoff = bttv_streamoff, .vidioc_g_tuner = bttv_g_tuner, .vidioc_s_tuner = bttv_s_tuner, .vidioc_g_crop = bttv_g_crop, .vidioc_s_crop = bttv_s_crop, .vidioc_g_fbuf = bttv_g_fbuf, .vidioc_s_fbuf = bttv_s_fbuf, .vidioc_overlay = bttv_overlay, .vidioc_g_priority = bttv_g_priority, .vidioc_s_priority = bttv_s_priority, .vidioc_g_parm = bttv_g_parm, .vidioc_g_frequency = bttv_g_frequency, .vidioc_s_frequency = bttv_s_frequency, .vidioc_log_status = bttv_log_status, .vidioc_querystd = bttv_querystd, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = bttv_g_register, .vidioc_s_register = bttv_s_register, #endif }; static struct video_device bttv_video_template = { .fops = &bttv_fops, .ioctl_ops = &bttv_ioctl_ops, .tvnorms = BTTV_NORMS, .current_norm = V4L2_STD_PAL, }; /* ----------------------------------------------------------------------- */ /* radio interface */ static int radio_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct bttv *btv = video_drvdata(file); struct bttv_fh *fh; dprintk("open dev=%s\n", video_device_node_name(vdev)); dprintk("%d: open called (radio)\n", btv->c.nr); /* allocate per filehandle data */ fh = kmalloc(sizeof(*fh), GFP_KERNEL); if (unlikely(!fh)) return -ENOMEM; file->private_data = fh; *fh = btv->init; v4l2_prio_open(&btv->prio, &fh->prio); btv->radio_user++; bttv_call_all(btv, tuner, s_radio); audio_input(btv,TVAUDIO_INPUT_RADIO); return 0; } static int radio_release(struct file *file) { struct bttv_fh *fh = file->private_data; struct bttv *btv = fh->btv; struct saa6588_command cmd; v4l2_prio_close(&btv->prio, fh->prio); file->private_data = NULL; kfree(fh); btv->radio_user--; bttv_call_all(btv, core, ioctl, SAA6588_CMD_CLOSE, &cmd); return 0; } static int radio_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; strcpy(cap->driver, "bttv"); strlcpy(cap->card, btv->radio_dev->name, sizeof(cap->card)); sprintf(cap->bus_info, "PCI:%s", pci_name(btv->c.pci)); cap->capabilities = V4L2_CAP_TUNER; return 0; } static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; if (btv->tuner_type == TUNER_ABSENT) return -EINVAL; if (0 != t->index) return -EINVAL; strcpy(t->name, "Radio"); t->type = V4L2_TUNER_RADIO; bttv_call_all(btv, tuner, g_tuner, t); if (btv->audio_mode_gpio) btv->audio_mode_gpio(btv, t, 0); return 0; } static int radio_enum_input(struct file *file, void *priv, struct v4l2_input *i) { if (i->index != 0) return -EINVAL; strcpy(i->name, "Radio"); i->type = V4L2_INPUT_TYPE_TUNER; return 0; } static int radio_g_audio(struct file *file, void *priv, struct v4l2_audio *a) { if (unlikely(a->index)) return -EINVAL; strcpy(a->name, "Radio"); return 0; } static int radio_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; if (0 != t->index) return -EINVAL; bttv_call_all(btv, tuner, s_tuner, t); return 0; } static int radio_s_audio(struct file *file, void *priv, struct v4l2_audio *a) { if (unlikely(a->index)) return -EINVAL; return 0; } static int radio_s_input(struct file *filp, void *priv, unsigned int i) { if (unlikely(i)) return -EINVAL; return 0; } static int radio_s_std(struct file *file, void *fh, v4l2_std_id *norm) { return 0; } static int radio_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *c) { const struct v4l2_queryctrl *ctrl; if (c->id < V4L2_CID_BASE || c->id >= V4L2_CID_LASTP1) return -EINVAL; if (c->id == V4L2_CID_AUDIO_MUTE) { ctrl = ctrl_by_id(c->id); *c = *ctrl; } else *c = no_ctl; return 0; } static int radio_g_input(struct file *filp, void *priv, unsigned int *i) { *i = 0; return 0; } static ssize_t radio_read(struct file *file, char __user *data, size_t count, loff_t *ppos) { struct bttv_fh *fh = file->private_data; struct bttv *btv = fh->btv; struct saa6588_command cmd; cmd.block_count = count/3; cmd.buffer = data; cmd.instance = file; cmd.result = -ENODEV; bttv_call_all(btv, core, ioctl, SAA6588_CMD_READ, &cmd); return cmd.result; } static unsigned int radio_poll(struct file *file, poll_table *wait) { struct bttv_fh *fh = file->private_data; struct bttv *btv = fh->btv; struct saa6588_command cmd; cmd.instance = file; cmd.event_list = wait; cmd.result = -ENODEV; bttv_call_all(btv, core, ioctl, SAA6588_CMD_POLL, &cmd); return cmd.result; } static const struct v4l2_file_operations radio_fops = { .owner = THIS_MODULE, .open = radio_open, .read = radio_read, .release = radio_release, .unlocked_ioctl = video_ioctl2, .poll = radio_poll, }; static const struct v4l2_ioctl_ops radio_ioctl_ops = { .vidioc_querycap = radio_querycap, .vidioc_g_tuner = radio_g_tuner, .vidioc_enum_input = radio_enum_input, .vidioc_g_audio = radio_g_audio, .vidioc_s_tuner = radio_s_tuner, .vidioc_s_audio = radio_s_audio, .vidioc_s_input = radio_s_input, .vidioc_s_std = radio_s_std, .vidioc_queryctrl = radio_queryctrl, .vidioc_g_input = radio_g_input, .vidioc_g_ctrl = bttv_g_ctrl, .vidioc_s_ctrl = bttv_s_ctrl, .vidioc_g_frequency = bttv_g_frequency, .vidioc_s_frequency = bttv_s_frequency, }; static struct video_device radio_template = { .fops = &radio_fops, .ioctl_ops = &radio_ioctl_ops, }; /* ----------------------------------------------------------------------- */ /* some debug code */ static int bttv_risc_decode(u32 risc) { static char *instr[16] = { [ BT848_RISC_WRITE >> 28 ] = "write", [ BT848_RISC_SKIP >> 28 ] = "skip", [ BT848_RISC_WRITEC >> 28 ] = "writec", [ BT848_RISC_JUMP >> 28 ] = "jump", [ BT848_RISC_SYNC >> 28 ] = "sync", [ BT848_RISC_WRITE123 >> 28 ] = "write123", [ BT848_RISC_SKIP123 >> 28 ] = "skip123", [ BT848_RISC_WRITE1S23 >> 28 ] = "write1s23", }; static int incr[16] = { [ BT848_RISC_WRITE >> 28 ] = 2, [ BT848_RISC_JUMP >> 28 ] = 2, [ BT848_RISC_SYNC >> 28 ] = 2, [ BT848_RISC_WRITE123 >> 28 ] = 5, [ BT848_RISC_SKIP123 >> 28 ] = 2, [ BT848_RISC_WRITE1S23 >> 28 ] = 3, }; static char *bits[] = { "be0", "be1", "be2", "be3/resync", "set0", "set1", "set2", "set3", "clr0", "clr1", "clr2", "clr3", "irq", "res", "eol", "sol", }; int i; pr_cont("0x%08x [ %s", risc, instr[risc >> 28] ? instr[risc >> 28] : "INVALID"); for (i = ARRAY_SIZE(bits)-1; i >= 0; i--) if (risc & (1 << (i + 12))) pr_cont(" %s", bits[i]); pr_cont(" count=%d ]\n", risc & 0xfff); return incr[risc >> 28] ? incr[risc >> 28] : 1; } static void bttv_risc_disasm(struct bttv *btv, struct btcx_riscmem *risc) { unsigned int i,j,n; pr_info("%s: risc disasm: %p [dma=0x%08lx]\n", btv->c.v4l2_dev.name, risc->cpu, (unsigned long)risc->dma); for (i = 0; i < (risc->size >> 2); i += n) { pr_info("%s: 0x%lx: ", btv->c.v4l2_dev.name, (unsigned long)(risc->dma + (i<<2))); n = bttv_risc_decode(le32_to_cpu(risc->cpu[i])); for (j = 1; j < n; j++) pr_info("%s: 0x%lx: 0x%08x [ arg #%d ]\n", btv->c.v4l2_dev.name, (unsigned long)(risc->dma + ((i+j)<<2)), risc->cpu[i+j], j); if (0 == risc->cpu[i]) break; } } static void bttv_print_riscaddr(struct bttv *btv) { pr_info(" main: %08llx\n", (unsigned long long)btv->main.dma); pr_info(" vbi : o=%08llx e=%08llx\n", btv->cvbi ? (unsigned long long)btv->cvbi->top.dma : 0, btv->cvbi ? (unsigned long long)btv->cvbi->bottom.dma : 0); pr_info(" cap : o=%08llx e=%08llx\n", btv->curr.top ? (unsigned long long)btv->curr.top->top.dma : 0, btv->curr.bottom ? (unsigned long long)btv->curr.bottom->bottom.dma : 0); pr_info(" scr : o=%08llx e=%08llx\n", btv->screen ? (unsigned long long)btv->screen->top.dma : 0, btv->screen ? (unsigned long long)btv->screen->bottom.dma : 0); bttv_risc_disasm(btv, &btv->main); } /* ----------------------------------------------------------------------- */ /* irq handler */ static char *irq_name[] = { "FMTCHG", // format change detected (525 vs. 625) "VSYNC", // vertical sync (new field) "HSYNC", // horizontal sync "OFLOW", // chroma/luma AGC overflow "HLOCK", // horizontal lock changed "VPRES", // video presence changed "6", "7", "I2CDONE", // hw irc operation finished "GPINT", // gpio port triggered irq "10", "RISCI", // risc instruction triggered irq "FBUS", // pixel data fifo dropped data (high pci bus latencies) "FTRGT", // pixel data fifo overrun "FDSR", // fifo data stream resyncronisation "PPERR", // parity error (data transfer) "RIPERR", // parity error (read risc instructions) "PABORT", // pci abort "OCERR", // risc instruction error "SCERR", // syncronisation error }; static void bttv_print_irqbits(u32 print, u32 mark) { unsigned int i; pr_cont("bits:"); for (i = 0; i < ARRAY_SIZE(irq_name); i++) { if (print & (1 << i)) pr_cont(" %s", irq_name[i]); if (mark & (1 << i)) pr_cont("*"); } } static void bttv_irq_debug_low_latency(struct bttv *btv, u32 rc) { pr_warn("%d: irq: skipped frame [main=%lx,o_vbi=%lx,o_field=%lx,rc=%lx]\n", btv->c.nr, (unsigned long)btv->main.dma, (unsigned long)le32_to_cpu(btv->main.cpu[RISC_SLOT_O_VBI+1]), (unsigned long)le32_to_cpu(btv->main.cpu[RISC_SLOT_O_FIELD+1]), (unsigned long)rc); if (0 == (btread(BT848_DSTATUS) & BT848_DSTATUS_HLOC)) { pr_notice("%d: Oh, there (temporarily?) is no input signal. " "Ok, then this is harmless, don't worry ;)\n", btv->c.nr); return; } pr_notice("%d: Uhm. Looks like we have unusual high IRQ latencies\n", btv->c.nr); pr_notice("%d: Lets try to catch the culpit red-handed ...\n", btv->c.nr); dump_stack(); } static int bttv_irq_next_video(struct bttv *btv, struct bttv_buffer_set *set) { struct bttv_buffer *item; memset(set,0,sizeof(*set)); /* capture request ? */ if (!list_empty(&btv->capture)) { set->frame_irq = 1; item = list_entry(btv->capture.next, struct bttv_buffer, vb.queue); if (V4L2_FIELD_HAS_TOP(item->vb.field)) set->top = item; if (V4L2_FIELD_HAS_BOTTOM(item->vb.field)) set->bottom = item; /* capture request for other field ? */ if (!V4L2_FIELD_HAS_BOTH(item->vb.field) && (item->vb.queue.next != &btv->capture)) { item = list_entry(item->vb.queue.next, struct bttv_buffer, vb.queue); /* Mike Isely <isely@pobox.com> - Only check * and set up the bottom field in the logic * below. Don't ever do the top field. This * of course means that if we set up the * bottom field in the above code that we'll * actually skip a field. But that's OK. * Having processed only a single buffer this * time, then the next time around the first * available buffer should be for a top field. * That will then cause us here to set up a * top then a bottom field in the normal way. * The alternative to this understanding is * that we set up the second available buffer * as a top field, but that's out of order * since this driver always processes the top * field first - the effect will be the two * buffers being returned in the wrong order, * with the second buffer also being delayed * by one field time (owing to the fifo nature * of videobuf). Worse still, we'll be stuck * doing fields out of order now every time * until something else causes a field to be * dropped. By effectively forcing a field to * drop this way then we always get back into * sync within a single frame time. (Out of * order fields can screw up deinterlacing * algorithms.) */ if (!V4L2_FIELD_HAS_BOTH(item->vb.field)) { if (NULL == set->bottom && V4L2_FIELD_BOTTOM == item->vb.field) { set->bottom = item; } if (NULL != set->top && NULL != set->bottom) set->top_irq = 2; } } } /* screen overlay ? */ if (NULL != btv->screen) { if (V4L2_FIELD_HAS_BOTH(btv->screen->vb.field)) { if (NULL == set->top && NULL == set->bottom) { set->top = btv->screen; set->bottom = btv->screen; } } else { if (V4L2_FIELD_TOP == btv->screen->vb.field && NULL == set->top) { set->top = btv->screen; } if (V4L2_FIELD_BOTTOM == btv->screen->vb.field && NULL == set->bottom) { set->bottom = btv->screen; } } } dprintk("%d: next set: top=%p bottom=%p [screen=%p,irq=%d,%d]\n", btv->c.nr, set->top, set->bottom, btv->screen, set->frame_irq, set->top_irq); return 0; } static void bttv_irq_wakeup_video(struct bttv *btv, struct bttv_buffer_set *wakeup, struct bttv_buffer_set *curr, unsigned int state) { struct timeval ts; do_gettimeofday(&ts); if (wakeup->top == wakeup->bottom) { if (NULL != wakeup->top && curr->top != wakeup->top) { if (irq_debug > 1) pr_debug("%d: wakeup: both=%p\n", btv->c.nr, wakeup->top); wakeup->top->vb.ts = ts; wakeup->top->vb.field_count = btv->field_count; wakeup->top->vb.state = state; wake_up(&wakeup->top->vb.done); } } else { if (NULL != wakeup->top && curr->top != wakeup->top) { if (irq_debug > 1) pr_debug("%d: wakeup: top=%p\n", btv->c.nr, wakeup->top); wakeup->top->vb.ts = ts; wakeup->top->vb.field_count = btv->field_count; wakeup->top->vb.state = state; wake_up(&wakeup->top->vb.done); } if (NULL != wakeup->bottom && curr->bottom != wakeup->bottom) { if (irq_debug > 1) pr_debug("%d: wakeup: bottom=%p\n", btv->c.nr, wakeup->bottom); wakeup->bottom->vb.ts = ts; wakeup->bottom->vb.field_count = btv->field_count; wakeup->bottom->vb.state = state; wake_up(&wakeup->bottom->vb.done); } } } static void bttv_irq_wakeup_vbi(struct bttv *btv, struct bttv_buffer *wakeup, unsigned int state) { struct timeval ts; if (NULL == wakeup) return; do_gettimeofday(&ts); wakeup->vb.ts = ts; wakeup->vb.field_count = btv->field_count; wakeup->vb.state = state; wake_up(&wakeup->vb.done); } static void bttv_irq_timeout(unsigned long data) { struct bttv *btv = (struct bttv *)data; struct bttv_buffer_set old,new; struct bttv_buffer *ovbi; struct bttv_buffer *item; unsigned long flags; if (bttv_verbose) { pr_info("%d: timeout: drop=%d irq=%d/%d, risc=%08x, ", btv->c.nr, btv->framedrop, btv->irq_me, btv->irq_total, btread(BT848_RISC_COUNT)); bttv_print_irqbits(btread(BT848_INT_STAT),0); pr_cont("\n"); } spin_lock_irqsave(&btv->s_lock,flags); /* deactivate stuff */ memset(&new,0,sizeof(new)); old = btv->curr; ovbi = btv->cvbi; btv->curr = new; btv->cvbi = NULL; btv->loop_irq = 0; bttv_buffer_activate_video(btv, &new); bttv_buffer_activate_vbi(btv, NULL); bttv_set_dma(btv, 0); /* wake up */ bttv_irq_wakeup_video(btv, &old, &new, VIDEOBUF_ERROR); bttv_irq_wakeup_vbi(btv, ovbi, VIDEOBUF_ERROR); /* cancel all outstanding capture / vbi requests */ while (!list_empty(&btv->capture)) { item = list_entry(btv->capture.next, struct bttv_buffer, vb.queue); list_del(&item->vb.queue); item->vb.state = VIDEOBUF_ERROR; wake_up(&item->vb.done); } while (!list_empty(&btv->vcapture)) { item = list_entry(btv->vcapture.next, struct bttv_buffer, vb.queue); list_del(&item->vb.queue); item->vb.state = VIDEOBUF_ERROR; wake_up(&item->vb.done); } btv->errors++; spin_unlock_irqrestore(&btv->s_lock,flags); } static void bttv_irq_wakeup_top(struct bttv *btv) { struct bttv_buffer *wakeup = btv->curr.top; if (NULL == wakeup) return; spin_lock(&btv->s_lock); btv->curr.top_irq = 0; btv->curr.top = NULL; bttv_risc_hook(btv, RISC_SLOT_O_FIELD, NULL, 0); do_gettimeofday(&wakeup->vb.ts); wakeup->vb.field_count = btv->field_count; wakeup->vb.state = VIDEOBUF_DONE; wake_up(&wakeup->vb.done); spin_unlock(&btv->s_lock); } static inline int is_active(struct btcx_riscmem *risc, u32 rc) { if (rc < risc->dma) return 0; if (rc > risc->dma + risc->size) return 0; return 1; } static void bttv_irq_switch_video(struct bttv *btv) { struct bttv_buffer_set new; struct bttv_buffer_set old; dma_addr_t rc; spin_lock(&btv->s_lock); /* new buffer set */ bttv_irq_next_video(btv, &new); rc = btread(BT848_RISC_COUNT); if ((btv->curr.top && is_active(&btv->curr.top->top, rc)) || (btv->curr.bottom && is_active(&btv->curr.bottom->bottom, rc))) { btv->framedrop++; if (debug_latency) bttv_irq_debug_low_latency(btv, rc); spin_unlock(&btv->s_lock); return; } /* switch over */ old = btv->curr; btv->curr = new; btv->loop_irq &= ~1; bttv_buffer_activate_video(btv, &new); bttv_set_dma(btv, 0); /* switch input */ if (UNSET != btv->new_input) { video_mux(btv,btv->new_input); btv->new_input = UNSET; } /* wake up finished buffers */ bttv_irq_wakeup_video(btv, &old, &new, VIDEOBUF_DONE); spin_unlock(&btv->s_lock); } static void bttv_irq_switch_vbi(struct bttv *btv) { struct bttv_buffer *new = NULL; struct bttv_buffer *old; u32 rc; spin_lock(&btv->s_lock); if (!list_empty(&btv->vcapture)) new = list_entry(btv->vcapture.next, struct bttv_buffer, vb.queue); old = btv->cvbi; rc = btread(BT848_RISC_COUNT); if (NULL != old && (is_active(&old->top, rc) || is_active(&old->bottom, rc))) { btv->framedrop++; if (debug_latency) bttv_irq_debug_low_latency(btv, rc); spin_unlock(&btv->s_lock); return; } /* switch */ btv->cvbi = new; btv->loop_irq &= ~4; bttv_buffer_activate_vbi(btv, new); bttv_set_dma(btv, 0); bttv_irq_wakeup_vbi(btv, old, VIDEOBUF_DONE); spin_unlock(&btv->s_lock); } static irqreturn_t bttv_irq(int irq, void *dev_id) { u32 stat,astat; u32 dstat; int count; struct bttv *btv; int handled = 0; btv=(struct bttv *)dev_id; count=0; while (1) { /* get/clear interrupt status bits */ stat=btread(BT848_INT_STAT); astat=stat&btread(BT848_INT_MASK); if (!astat) break; handled = 1; btwrite(stat,BT848_INT_STAT); /* get device status bits */ dstat=btread(BT848_DSTATUS); if (irq_debug) { pr_debug("%d: irq loop=%d fc=%d riscs=%x, riscc=%08x, ", btv->c.nr, count, btv->field_count, stat>>28, btread(BT848_RISC_COUNT)); bttv_print_irqbits(stat,astat); if (stat & BT848_INT_HLOCK) pr_cont(" HLOC => %s", dstat & BT848_DSTATUS_HLOC ? "yes" : "no"); if (stat & BT848_INT_VPRES) pr_cont(" PRES => %s", dstat & BT848_DSTATUS_PRES ? "yes" : "no"); if (stat & BT848_INT_FMTCHG) pr_cont(" NUML => %s", dstat & BT848_DSTATUS_NUML ? "625" : "525"); pr_cont("\n"); } if (astat&BT848_INT_VSYNC) btv->field_count++; if ((astat & BT848_INT_GPINT) && btv->remote) { bttv_input_irq(btv); } if (astat & BT848_INT_I2CDONE) { btv->i2c_done = stat; wake_up(&btv->i2c_queue); } if ((astat & BT848_INT_RISCI) && (stat & (4<<28))) bttv_irq_switch_vbi(btv); if ((astat & BT848_INT_RISCI) && (stat & (2<<28))) bttv_irq_wakeup_top(btv); if ((astat & BT848_INT_RISCI) && (stat & (1<<28))) bttv_irq_switch_video(btv); if ((astat & BT848_INT_HLOCK) && btv->opt_automute) audio_mute(btv, btv->mute); /* trigger automute */ if (astat & (BT848_INT_SCERR|BT848_INT_OCERR)) { pr_info("%d: %s%s @ %08x,", btv->c.nr, (astat & BT848_INT_SCERR) ? "SCERR" : "", (astat & BT848_INT_OCERR) ? "OCERR" : "", btread(BT848_RISC_COUNT)); bttv_print_irqbits(stat,astat); pr_cont("\n"); if (bttv_debug) bttv_print_riscaddr(btv); } if (fdsr && astat & BT848_INT_FDSR) { pr_info("%d: FDSR @ %08x\n", btv->c.nr, btread(BT848_RISC_COUNT)); if (bttv_debug) bttv_print_riscaddr(btv); } count++; if (count > 4) { if (count > 8 || !(astat & BT848_INT_GPINT)) { btwrite(0, BT848_INT_MASK); pr_err("%d: IRQ lockup, cleared int mask [", btv->c.nr); } else { pr_err("%d: IRQ lockup, clearing GPINT from int mask [", btv->c.nr); btwrite(btread(BT848_INT_MASK) & (-1 ^ BT848_INT_GPINT), BT848_INT_MASK); }; bttv_print_irqbits(stat,astat); pr_cont("]\n"); } } btv->irq_total++; if (handled) btv->irq_me++; return IRQ_RETVAL(handled); } /* ----------------------------------------------------------------------- */ /* initialitation */ static struct video_device *vdev_init(struct bttv *btv, const struct video_device *template, const char *type_name) { struct video_device *vfd; vfd = video_device_alloc(); if (NULL == vfd) return NULL; *vfd = *template; vfd->v4l2_dev = &btv->c.v4l2_dev; vfd->release = video_device_release; vfd->debug = bttv_debug; video_set_drvdata(vfd, btv); snprintf(vfd->name, sizeof(vfd->name), "BT%d%s %s (%s)", btv->id, (btv->id==848 && btv->revision==0x12) ? "A" : "", type_name, bttv_tvcards[btv->c.type].name); return vfd; } static void bttv_unregister_video(struct bttv *btv) { if (btv->video_dev) { if (video_is_registered(btv->video_dev)) video_unregister_device(btv->video_dev); else video_device_release(btv->video_dev); btv->video_dev = NULL; } if (btv->vbi_dev) { if (video_is_registered(btv->vbi_dev)) video_unregister_device(btv->vbi_dev); else video_device_release(btv->vbi_dev); btv->vbi_dev = NULL; } if (btv->radio_dev) { if (video_is_registered(btv->radio_dev)) video_unregister_device(btv->radio_dev); else video_device_release(btv->radio_dev); btv->radio_dev = NULL; } } /* register video4linux devices */ static int __devinit bttv_register_video(struct bttv *btv) { if (no_overlay > 0) pr_notice("Overlay support disabled\n"); /* video */ btv->video_dev = vdev_init(btv, &bttv_video_template, "video"); if (NULL == btv->video_dev) goto err; if (video_register_device(btv->video_dev, VFL_TYPE_GRABBER, video_nr[btv->c.nr]) < 0) goto err; pr_info("%d: registered device %s\n", btv->c.nr, video_device_node_name(btv->video_dev)); if (device_create_file(&btv->video_dev->dev, &dev_attr_card)<0) { pr_err("%d: device_create_file 'card' failed\n", btv->c.nr); goto err; } /* vbi */ btv->vbi_dev = vdev_init(btv, &bttv_video_template, "vbi"); if (NULL == btv->vbi_dev) goto err; if (video_register_device(btv->vbi_dev, VFL_TYPE_VBI, vbi_nr[btv->c.nr]) < 0) goto err; pr_info("%d: registered device %s\n", btv->c.nr, video_device_node_name(btv->vbi_dev)); if (!btv->has_radio) return 0; /* radio */ btv->radio_dev = vdev_init(btv, &radio_template, "radio"); if (NULL == btv->radio_dev) goto err; if (video_register_device(btv->radio_dev, VFL_TYPE_RADIO, radio_nr[btv->c.nr]) < 0) goto err; pr_info("%d: registered device %s\n", btv->c.nr, video_device_node_name(btv->radio_dev)); /* all done */ return 0; err: bttv_unregister_video(btv); return -1; } /* on OpenFirmware machines (PowerMac at least), PCI memory cycle */ /* response on cards with no firmware is not enabled by OF */ static void pci_set_command(struct pci_dev *dev) { #if defined(__powerpc__) unsigned int cmd; pci_read_config_dword(dev, PCI_COMMAND, &cmd); cmd = (cmd | PCI_COMMAND_MEMORY ); pci_write_config_dword(dev, PCI_COMMAND, cmd); #endif } static int __devinit bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) { int result; unsigned char lat; struct bttv *btv; if (bttv_num == BTTV_MAX) return -ENOMEM; pr_info("Bt8xx card found (%d)\n", bttv_num); bttvs[bttv_num] = btv = kzalloc(sizeof(*btv), GFP_KERNEL); if (btv == NULL) { pr_err("out of memory\n"); return -ENOMEM; } btv->c.nr = bttv_num; snprintf(btv->c.v4l2_dev.name, sizeof(btv->c.v4l2_dev.name), "bttv%d", btv->c.nr); /* initialize structs / fill in defaults */ mutex_init(&btv->lock); spin_lock_init(&btv->s_lock); spin_lock_init(&btv->gpio_lock); init_waitqueue_head(&btv->i2c_queue); INIT_LIST_HEAD(&btv->c.subs); INIT_LIST_HEAD(&btv->capture); INIT_LIST_HEAD(&btv->vcapture); v4l2_prio_init(&btv->prio); init_timer(&btv->timeout); btv->timeout.function = bttv_irq_timeout; btv->timeout.data = (unsigned long)btv; btv->i2c_rc = -1; btv->tuner_type = UNSET; btv->new_input = UNSET; btv->has_radio=radio[btv->c.nr]; /* pci stuff (init, get irq/mmio, ... */ btv->c.pci = dev; btv->id = dev->device; if (pci_enable_device(dev)) { pr_warn("%d: Can't enable device\n", btv->c.nr); return -EIO; } if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) { pr_warn("%d: No suitable DMA available\n", btv->c.nr); return -EIO; } if (!request_mem_region(pci_resource_start(dev,0), pci_resource_len(dev,0), btv->c.v4l2_dev.name)) { pr_warn("%d: can't request iomem (0x%llx)\n", btv->c.nr, (unsigned long long)pci_resource_start(dev, 0)); return -EBUSY; } pci_set_master(dev); pci_set_command(dev); result = v4l2_device_register(&dev->dev, &btv->c.v4l2_dev); if (result < 0) { pr_warn("%d: v4l2_device_register() failed\n", btv->c.nr); goto fail0; } btv->revision = dev->revision; pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); pr_info("%d: Bt%d (rev %d) at %s, irq: %d, latency: %d, mmio: 0x%llx\n", bttv_num, btv->id, btv->revision, pci_name(dev), btv->c.pci->irq, lat, (unsigned long long)pci_resource_start(dev, 0)); schedule(); btv->bt848_mmio = ioremap(pci_resource_start(dev, 0), 0x1000); if (NULL == btv->bt848_mmio) { pr_err("%d: ioremap() failed\n", btv->c.nr); result = -EIO; goto fail1; } /* identify card */ bttv_idcard(btv); /* disable irqs, register irq handler */ btwrite(0, BT848_INT_MASK); result = request_irq(btv->c.pci->irq, bttv_irq, IRQF_SHARED | IRQF_DISABLED, btv->c.v4l2_dev.name, (void *)btv); if (result < 0) { pr_err("%d: can't get IRQ %d\n", bttv_num, btv->c.pci->irq); goto fail1; } if (0 != bttv_handle_chipset(btv)) { result = -EIO; goto fail2; } /* init options from insmod args */ btv->opt_combfilter = combfilter; btv->opt_lumafilter = lumafilter; btv->opt_automute = automute; btv->opt_chroma_agc = chroma_agc; btv->opt_adc_crush = adc_crush; btv->opt_vcr_hack = vcr_hack; btv->opt_whitecrush_upper = whitecrush_upper; btv->opt_whitecrush_lower = whitecrush_lower; btv->opt_uv_ratio = uv_ratio; btv->opt_full_luma_range = full_luma_range; btv->opt_coring = coring; /* fill struct bttv with some useful defaults */ btv->init.btv = btv; btv->init.ov.w.width = 320; btv->init.ov.w.height = 240; btv->init.fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24); btv->init.width = 320; btv->init.height = 240; btv->input = 0; /* initialize hardware */ if (bttv_gpio) bttv_gpio_tracking(btv,"pre-init"); bttv_risc_init_main(btv); init_bt848(btv); /* gpio */ btwrite(0x00, BT848_GPIO_REG_INP); btwrite(0x00, BT848_GPIO_OUT_EN); if (bttv_verbose) bttv_gpio_tracking(btv,"init"); /* needs to be done before i2c is registered */ bttv_init_card1(btv); /* register i2c + gpio */ init_bttv_i2c(btv); /* some card-specific stuff (needs working i2c) */ bttv_init_card2(btv); bttv_init_tuner(btv); init_irqreg(btv); /* register video4linux + input */ if (!bttv_tvcards[btv->c.type].no_video) { bttv_register_video(btv); bt848_bright(btv,32768); bt848_contrast(btv,32768); bt848_hue(btv,32768); bt848_sat(btv,32768); audio_mute(btv, 1); set_input(btv, 0, btv->tvnorm); bttv_crop_reset(&btv->crop[0], btv->tvnorm); btv->crop[1] = btv->crop[0]; /* current = default */ disclaim_vbi_lines(btv); disclaim_video_lines(btv); } /* add subdevices and autoload dvb-bt8xx if needed */ if (bttv_tvcards[btv->c.type].has_dvb) { bttv_sub_add_device(&btv->c, "dvb"); request_modules(btv); } if (!disable_ir) { init_bttv_i2c_ir(btv); bttv_input_init(btv); } /* everything is fine */ bttv_num++; return 0; fail2: free_irq(btv->c.pci->irq,btv); fail1: v4l2_device_unregister(&btv->c.v4l2_dev); fail0: if (btv->bt848_mmio) iounmap(btv->bt848_mmio); release_mem_region(pci_resource_start(btv->c.pci,0), pci_resource_len(btv->c.pci,0)); return result; } static void __devexit bttv_remove(struct pci_dev *pci_dev) { struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev); struct bttv *btv = to_bttv(v4l2_dev); if (bttv_verbose) pr_info("%d: unloading\n", btv->c.nr); if (bttv_tvcards[btv->c.type].has_dvb) flush_request_modules(btv); /* shutdown everything (DMA+IRQs) */ btand(~15, BT848_GPIO_DMA_CTL); btwrite(0, BT848_INT_MASK); btwrite(~0x0, BT848_INT_STAT); btwrite(0x0, BT848_GPIO_OUT_EN); if (bttv_gpio) bttv_gpio_tracking(btv,"cleanup"); /* tell gpio modules we are leaving ... */ btv->shutdown=1; bttv_input_fini(btv); bttv_sub_del_devices(&btv->c); /* unregister i2c_bus + input */ fini_bttv_i2c(btv); /* unregister video4linux */ bttv_unregister_video(btv); /* free allocated memory */ btcx_riscmem_free(btv->c.pci,&btv->main); /* free ressources */ free_irq(btv->c.pci->irq,btv); iounmap(btv->bt848_mmio); release_mem_region(pci_resource_start(btv->c.pci,0), pci_resource_len(btv->c.pci,0)); v4l2_device_unregister(&btv->c.v4l2_dev); bttvs[btv->c.nr] = NULL; kfree(btv); return; } #ifdef CONFIG_PM static int bttv_suspend(struct pci_dev *pci_dev, pm_message_t state) { struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev); struct bttv *btv = to_bttv(v4l2_dev); struct bttv_buffer_set idle; unsigned long flags; dprintk("%d: suspend %d\n", btv->c.nr, state.event); /* stop dma + irqs */ spin_lock_irqsave(&btv->s_lock,flags); memset(&idle, 0, sizeof(idle)); btv->state.video = btv->curr; btv->state.vbi = btv->cvbi; btv->state.loop_irq = btv->loop_irq; btv->curr = idle; btv->loop_irq = 0; bttv_buffer_activate_video(btv, &idle); bttv_buffer_activate_vbi(btv, NULL); bttv_set_dma(btv, 0); btwrite(0, BT848_INT_MASK); spin_unlock_irqrestore(&btv->s_lock,flags); /* save bt878 state */ btv->state.gpio_enable = btread(BT848_GPIO_OUT_EN); btv->state.gpio_data = gpio_read(); /* save pci state */ pci_save_state(pci_dev); if (0 != pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state))) { pci_disable_device(pci_dev); btv->state.disabled = 1; } return 0; } static int bttv_resume(struct pci_dev *pci_dev) { struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev); struct bttv *btv = to_bttv(v4l2_dev); unsigned long flags; int err; dprintk("%d: resume\n", btv->c.nr); /* restore pci state */ if (btv->state.disabled) { err=pci_enable_device(pci_dev); if (err) { pr_warn("%d: Can't enable device\n", btv->c.nr); return err; } btv->state.disabled = 0; } err=pci_set_power_state(pci_dev, PCI_D0); if (err) { pci_disable_device(pci_dev); pr_warn("%d: Can't enable device\n", btv->c.nr); btv->state.disabled = 1; return err; } pci_restore_state(pci_dev); /* restore bt878 state */ bttv_reinit_bt848(btv); gpio_inout(0xffffff, btv->state.gpio_enable); gpio_write(btv->state.gpio_data); /* restart dma */ spin_lock_irqsave(&btv->s_lock,flags); btv->curr = btv->state.video; btv->cvbi = btv->state.vbi; btv->loop_irq = btv->state.loop_irq; bttv_buffer_activate_video(btv, &btv->curr); bttv_buffer_activate_vbi(btv, btv->cvbi); bttv_set_dma(btv, 0); spin_unlock_irqrestore(&btv->s_lock,flags); return 0; } #endif static struct pci_device_id bttv_pci_tbl[] = { {PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_BT848), 0}, {PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_BT849), 0}, {PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_BT878), 0}, {PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_BT879), 0}, {PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_FUSION879), 0}, {0,} }; MODULE_DEVICE_TABLE(pci, bttv_pci_tbl); static struct pci_driver bttv_pci_driver = { .name = "bttv", .id_table = bttv_pci_tbl, .probe = bttv_probe, .remove = __devexit_p(bttv_remove), #ifdef CONFIG_PM .suspend = bttv_suspend, .resume = bttv_resume, #endif }; static int __init bttv_init_module(void) { int ret; bttv_num = 0; pr_info("driver version %s loaded\n", BTTV_VERSION); if (gbuffers < 2 || gbuffers > VIDEO_MAX_FRAME) gbuffers = 2; if (gbufsize > BTTV_MAX_FBUF) gbufsize = BTTV_MAX_FBUF; gbufsize = (gbufsize + PAGE_SIZE - 1) & PAGE_MASK; if (bttv_verbose) pr_info("using %d buffers with %dk (%d pages) each for capture\n", gbuffers, gbufsize >> 10, gbufsize >> PAGE_SHIFT); bttv_check_chipset(); ret = bus_register(&bttv_sub_bus_type); if (ret < 0) { pr_warn("bus_register error: %d\n", ret); return ret; } ret = pci_register_driver(&bttv_pci_driver); if (ret < 0) bus_unregister(&bttv_sub_bus_type); return ret; } static void __exit bttv_cleanup_module(void) { pci_unregister_driver(&bttv_pci_driver); bus_unregister(&bttv_sub_bus_type); } module_init(bttv_init_module); module_exit(bttv_cleanup_module); /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
widz4rd/WIDzard-A850K
drivers/net/wireless/ath/ath5k/debug.c
4776
31602
/* * Copyright (c) 2007-2008 Bruno Randolf <bruno@thinktube.com> * * This file is free software: you may copy, redistribute and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation, either version 2 of the License, or (at your * option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * * This file incorporates work covered by the following copyright and * permission notice: * * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting * Copyright (c) 2004-2005 Atheros Communications, Inc. * Copyright (c) 2006 Devicescape Software, Inc. * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu> * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include <linux/export.h> #include <linux/moduleparam.h> #include <linux/seq_file.h> #include <linux/list.h> #include "debug.h" #include "ath5k.h" #include "reg.h" #include "base.h" static unsigned int ath5k_debug; module_param_named(debug, ath5k_debug, uint, 0); /* debugfs: registers */ struct reg { const char *name; int addr; }; #define REG_STRUCT_INIT(r) { #r, r } /* just a few random registers, might want to add more */ static const struct reg regs[] = { REG_STRUCT_INIT(AR5K_CR), REG_STRUCT_INIT(AR5K_RXDP), REG_STRUCT_INIT(AR5K_CFG), REG_STRUCT_INIT(AR5K_IER), REG_STRUCT_INIT(AR5K_BCR), REG_STRUCT_INIT(AR5K_RTSD0), REG_STRUCT_INIT(AR5K_RTSD1), REG_STRUCT_INIT(AR5K_TXCFG), REG_STRUCT_INIT(AR5K_RXCFG), REG_STRUCT_INIT(AR5K_RXJLA), REG_STRUCT_INIT(AR5K_MIBC), REG_STRUCT_INIT(AR5K_TOPS), REG_STRUCT_INIT(AR5K_RXNOFRM), REG_STRUCT_INIT(AR5K_TXNOFRM), REG_STRUCT_INIT(AR5K_RPGTO), REG_STRUCT_INIT(AR5K_RFCNT), REG_STRUCT_INIT(AR5K_MISC), REG_STRUCT_INIT(AR5K_QCUDCU_CLKGT), REG_STRUCT_INIT(AR5K_ISR), REG_STRUCT_INIT(AR5K_PISR), REG_STRUCT_INIT(AR5K_SISR0), REG_STRUCT_INIT(AR5K_SISR1), REG_STRUCT_INIT(AR5K_SISR2), REG_STRUCT_INIT(AR5K_SISR3), REG_STRUCT_INIT(AR5K_SISR4), REG_STRUCT_INIT(AR5K_IMR), REG_STRUCT_INIT(AR5K_PIMR), REG_STRUCT_INIT(AR5K_SIMR0), REG_STRUCT_INIT(AR5K_SIMR1), REG_STRUCT_INIT(AR5K_SIMR2), REG_STRUCT_INIT(AR5K_SIMR3), REG_STRUCT_INIT(AR5K_SIMR4), REG_STRUCT_INIT(AR5K_DCM_ADDR), REG_STRUCT_INIT(AR5K_DCCFG), REG_STRUCT_INIT(AR5K_CCFG), REG_STRUCT_INIT(AR5K_CPC0), REG_STRUCT_INIT(AR5K_CPC1), REG_STRUCT_INIT(AR5K_CPC2), REG_STRUCT_INIT(AR5K_CPC3), REG_STRUCT_INIT(AR5K_CPCOVF), REG_STRUCT_INIT(AR5K_RESET_CTL), REG_STRUCT_INIT(AR5K_SLEEP_CTL), REG_STRUCT_INIT(AR5K_INTPEND), REG_STRUCT_INIT(AR5K_SFR), REG_STRUCT_INIT(AR5K_PCICFG), REG_STRUCT_INIT(AR5K_GPIOCR), REG_STRUCT_INIT(AR5K_GPIODO), REG_STRUCT_INIT(AR5K_SREV), }; static void *reg_start(struct seq_file *seq, loff_t *pos) { return *pos < ARRAY_SIZE(regs) ? (void *)&regs[*pos] : NULL; } static void reg_stop(struct seq_file *seq, void *p) { /* nothing to do */ } static void *reg_next(struct seq_file *seq, void *p, loff_t *pos) { ++*pos; return *pos < ARRAY_SIZE(regs) ? (void *)&regs[*pos] : NULL; } static int reg_show(struct seq_file *seq, void *p) { struct ath5k_hw *ah = seq->private; struct reg *r = p; seq_printf(seq, "%-25s0x%08x\n", r->name, ath5k_hw_reg_read(ah, r->addr)); return 0; } static const struct seq_operations register_seq_ops = { .start = reg_start, .next = reg_next, .stop = reg_stop, .show = reg_show }; static int open_file_registers(struct inode *inode, struct file *file) { struct seq_file *s; int res; res = seq_open(file, &register_seq_ops); if (res == 0) { s = file->private_data; s->private = inode->i_private; } return res; } static const struct file_operations fops_registers = { .open = open_file_registers, .read = seq_read, .llseek = seq_lseek, .release = seq_release, .owner = THIS_MODULE, }; /* debugfs: beacons */ static ssize_t read_file_beacon(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[500]; unsigned int len = 0; unsigned int v; u64 tsf; v = ath5k_hw_reg_read(ah, AR5K_BEACON); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n", "AR5K_BEACON", v, v & AR5K_BEACON_PERIOD, (v & AR5K_BEACON_TIM) >> AR5K_BEACON_TIM_S); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n", "AR5K_LAST_TSTP", ath5k_hw_reg_read(ah, AR5K_LAST_TSTP)); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n\n", "AR5K_BEACON_CNT", ath5k_hw_reg_read(ah, AR5K_BEACON_CNT)); v = ath5k_hw_reg_read(ah, AR5K_TIMER0); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n", "AR5K_TIMER0 (TBTT)", v, v); v = ath5k_hw_reg_read(ah, AR5K_TIMER1); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n", "AR5K_TIMER1 (DMA)", v, v >> 3); v = ath5k_hw_reg_read(ah, AR5K_TIMER2); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n", "AR5K_TIMER2 (SWBA)", v, v >> 3); v = ath5k_hw_reg_read(ah, AR5K_TIMER3); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n", "AR5K_TIMER3 (ATIM)", v, v); tsf = ath5k_hw_get_tsf64(ah); len += snprintf(buf + len, sizeof(buf) - len, "TSF\t\t0x%016llx\tTU: %08x\n", (unsigned long long)tsf, TSF_TO_TU(tsf)); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_beacon(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[20]; if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) return -EFAULT; if (strncmp(buf, "disable", 7) == 0) { AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE); printk(KERN_INFO "debugfs disable beacons\n"); } else if (strncmp(buf, "enable", 6) == 0) { AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE); printk(KERN_INFO "debugfs enable beacons\n"); } return count; } static const struct file_operations fops_beacon = { .read = read_file_beacon, .write = write_file_beacon, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; /* debugfs: reset */ static ssize_t write_file_reset(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "debug file triggered reset\n"); ieee80211_queue_work(ah->hw, &ah->reset_work); return count; } static const struct file_operations fops_reset = { .write = write_file_reset, .open = simple_open, .owner = THIS_MODULE, .llseek = noop_llseek, }; /* debugfs: debug level */ static const struct { enum ath5k_debug_level level; const char *name; const char *desc; } dbg_info[] = { { ATH5K_DEBUG_RESET, "reset", "reset and initialization" }, { ATH5K_DEBUG_INTR, "intr", "interrupt handling" }, { ATH5K_DEBUG_MODE, "mode", "mode init/setup" }, { ATH5K_DEBUG_XMIT, "xmit", "basic xmit operation" }, { ATH5K_DEBUG_BEACON, "beacon", "beacon handling" }, { ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" }, { ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" }, { ATH5K_DEBUG_LED, "led", "LED management" }, { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" }, { ATH5K_DEBUG_DMA, "dma", "dma start/stop" }, { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" }, { ATH5K_DEBUG_DESC, "desc", "descriptor chains" }, { ATH5K_DEBUG_ANY, "all", "show all debug levels" }, }; static ssize_t read_file_debug(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[700]; unsigned int len = 0; unsigned int i; len += snprintf(buf + len, sizeof(buf) - len, "DEBUG LEVEL: 0x%08x\n\n", ah->debug.level); for (i = 0; i < ARRAY_SIZE(dbg_info) - 1; i++) { len += snprintf(buf + len, sizeof(buf) - len, "%10s %c 0x%08x - %s\n", dbg_info[i].name, ah->debug.level & dbg_info[i].level ? '+' : ' ', dbg_info[i].level, dbg_info[i].desc); } len += snprintf(buf + len, sizeof(buf) - len, "%10s %c 0x%08x - %s\n", dbg_info[i].name, ah->debug.level == dbg_info[i].level ? '+' : ' ', dbg_info[i].level, dbg_info[i].desc); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_debug(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; unsigned int i; char buf[20]; if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) return -EFAULT; for (i = 0; i < ARRAY_SIZE(dbg_info); i++) { if (strncmp(buf, dbg_info[i].name, strlen(dbg_info[i].name)) == 0) { ah->debug.level ^= dbg_info[i].level; /* toggle bit */ break; } } return count; } static const struct file_operations fops_debug = { .read = read_file_debug, .write = write_file_debug, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; /* debugfs: antenna */ static ssize_t read_file_antenna(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[700]; unsigned int len = 0; unsigned int i; unsigned int v; len += snprintf(buf + len, sizeof(buf) - len, "antenna mode\t%d\n", ah->ah_ant_mode); len += snprintf(buf + len, sizeof(buf) - len, "default antenna\t%d\n", ah->ah_def_ant); len += snprintf(buf + len, sizeof(buf) - len, "tx antenna\t%d\n", ah->ah_tx_ant); len += snprintf(buf + len, sizeof(buf) - len, "\nANTENNA\t\tRX\tTX\n"); for (i = 1; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) { len += snprintf(buf + len, sizeof(buf) - len, "[antenna %d]\t%d\t%d\n", i, ah->stats.antenna_rx[i], ah->stats.antenna_tx[i]); } len += snprintf(buf + len, sizeof(buf) - len, "[invalid]\t%d\t%d\n", ah->stats.antenna_rx[0], ah->stats.antenna_tx[0]); v = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA); len += snprintf(buf + len, sizeof(buf) - len, "\nAR5K_DEFAULT_ANTENNA\t0x%08x\n", v); v = ath5k_hw_reg_read(ah, AR5K_STA_ID1); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_STA_ID1_DEFAULT_ANTENNA\t%d\n", (v & AR5K_STA_ID1_DEFAULT_ANTENNA) != 0); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_STA_ID1_DESC_ANTENNA\t%d\n", (v & AR5K_STA_ID1_DESC_ANTENNA) != 0); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_STA_ID1_RTS_DEF_ANTENNA\t%d\n", (v & AR5K_STA_ID1_RTS_DEF_ANTENNA) != 0); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_STA_ID1_SELFGEN_DEF_ANT\t%d\n", (v & AR5K_STA_ID1_SELFGEN_DEF_ANT) != 0); v = ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL); len += snprintf(buf + len, sizeof(buf) - len, "\nAR5K_PHY_AGCCTL_OFDM_DIV_DIS\t%d\n", (v & AR5K_PHY_AGCCTL_OFDM_DIV_DIS) != 0); v = ath5k_hw_reg_read(ah, AR5K_PHY_RESTART); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_PHY_RESTART_DIV_GC\t\t%x\n", (v & AR5K_PHY_RESTART_DIV_GC) >> AR5K_PHY_RESTART_DIV_GC_S); v = ath5k_hw_reg_read(ah, AR5K_PHY_FAST_ANT_DIV); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_PHY_FAST_ANT_DIV_EN\t%d\n", (v & AR5K_PHY_FAST_ANT_DIV_EN) != 0); v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_0); len += snprintf(buf + len, sizeof(buf) - len, "\nAR5K_PHY_ANT_SWITCH_TABLE_0\t0x%08x\n", v); v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_1); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_PHY_ANT_SWITCH_TABLE_1\t0x%08x\n", v); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_antenna(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; unsigned int i; char buf[20]; if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) return -EFAULT; if (strncmp(buf, "diversity", 9) == 0) { ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT); printk(KERN_INFO "ath5k debug: enable diversity\n"); } else if (strncmp(buf, "fixed-a", 7) == 0) { ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A); printk(KERN_INFO "ath5k debugfs: fixed antenna A\n"); } else if (strncmp(buf, "fixed-b", 7) == 0) { ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B); printk(KERN_INFO "ath5k debug: fixed antenna B\n"); } else if (strncmp(buf, "clear", 5) == 0) { for (i = 0; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) { ah->stats.antenna_rx[i] = 0; ah->stats.antenna_tx[i] = 0; } printk(KERN_INFO "ath5k debug: cleared antenna stats\n"); } return count; } static const struct file_operations fops_antenna = { .read = read_file_antenna, .write = write_file_antenna, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; /* debugfs: misc */ static ssize_t read_file_misc(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[700]; unsigned int len = 0; u32 filt = ath5k_hw_get_rx_filter(ah); len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n", ah->bssidmask); len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ", filt); if (filt & AR5K_RX_FILTER_UCAST) len += snprintf(buf + len, sizeof(buf) - len, " UCAST"); if (filt & AR5K_RX_FILTER_MCAST) len += snprintf(buf + len, sizeof(buf) - len, " MCAST"); if (filt & AR5K_RX_FILTER_BCAST) len += snprintf(buf + len, sizeof(buf) - len, " BCAST"); if (filt & AR5K_RX_FILTER_CONTROL) len += snprintf(buf + len, sizeof(buf) - len, " CONTROL"); if (filt & AR5K_RX_FILTER_BEACON) len += snprintf(buf + len, sizeof(buf) - len, " BEACON"); if (filt & AR5K_RX_FILTER_PROM) len += snprintf(buf + len, sizeof(buf) - len, " PROM"); if (filt & AR5K_RX_FILTER_XRPOLL) len += snprintf(buf + len, sizeof(buf) - len, " XRPOLL"); if (filt & AR5K_RX_FILTER_PROBEREQ) len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ"); if (filt & AR5K_RX_FILTER_PHYERR_5212) len += snprintf(buf + len, sizeof(buf) - len, " PHYERR-5212"); if (filt & AR5K_RX_FILTER_RADARERR_5212) len += snprintf(buf + len, sizeof(buf) - len, " RADARERR-5212"); if (filt & AR5K_RX_FILTER_PHYERR_5211) snprintf(buf + len, sizeof(buf) - len, " PHYERR-5211"); if (filt & AR5K_RX_FILTER_RADARERR_5211) len += snprintf(buf + len, sizeof(buf) - len, " RADARERR-5211"); len += snprintf(buf + len, sizeof(buf) - len, "\nopmode: %s (%d)\n", ath_opmode_to_string(ah->opmode), ah->opmode); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_misc = { .read = read_file_misc, .open = simple_open, .owner = THIS_MODULE, }; /* debugfs: frameerrors */ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; struct ath5k_statistics *st = &ah->stats; char buf[700]; unsigned int len = 0; int i; len += snprintf(buf + len, sizeof(buf) - len, "RX\n---------------------\n"); len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n", st->rxerr_crc, st->rx_all_count > 0 ? st->rxerr_crc * 100 / st->rx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "PHY\t%u\t(%u%%)\n", st->rxerr_phy, st->rx_all_count > 0 ? st->rxerr_phy * 100 / st->rx_all_count : 0); for (i = 0; i < 32; i++) { if (st->rxerr_phy_code[i]) len += snprintf(buf + len, sizeof(buf) - len, " phy_err[%u]\t%u\n", i, st->rxerr_phy_code[i]); } len += snprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n", st->rxerr_fifo, st->rx_all_count > 0 ? st->rxerr_fifo * 100 / st->rx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "decrypt\t%u\t(%u%%)\n", st->rxerr_decrypt, st->rx_all_count > 0 ? st->rxerr_decrypt * 100 / st->rx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "MIC\t%u\t(%u%%)\n", st->rxerr_mic, st->rx_all_count > 0 ? st->rxerr_mic * 100 / st->rx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "process\t%u\t(%u%%)\n", st->rxerr_proc, st->rx_all_count > 0 ? st->rxerr_proc * 100 / st->rx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "jumbo\t%u\t(%u%%)\n", st->rxerr_jumbo, st->rx_all_count > 0 ? st->rxerr_jumbo * 100 / st->rx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "[RX all\t%u]\n", st->rx_all_count); len += snprintf(buf + len, sizeof(buf) - len, "RX-all-bytes\t%u\n", st->rx_bytes_count); len += snprintf(buf + len, sizeof(buf) - len, "\nTX\n---------------------\n"); len += snprintf(buf + len, sizeof(buf) - len, "retry\t%u\t(%u%%)\n", st->txerr_retry, st->tx_all_count > 0 ? st->txerr_retry * 100 / st->tx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n", st->txerr_fifo, st->tx_all_count > 0 ? st->txerr_fifo * 100 / st->tx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "filter\t%u\t(%u%%)\n", st->txerr_filt, st->tx_all_count > 0 ? st->txerr_filt * 100 / st->tx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "[TX all\t%u]\n", st->tx_all_count); len += snprintf(buf + len, sizeof(buf) - len, "TX-all-bytes\t%u\n", st->tx_bytes_count); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_frameerrors(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; struct ath5k_statistics *st = &ah->stats; char buf[20]; if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) return -EFAULT; if (strncmp(buf, "clear", 5) == 0) { st->rxerr_crc = 0; st->rxerr_phy = 0; st->rxerr_fifo = 0; st->rxerr_decrypt = 0; st->rxerr_mic = 0; st->rxerr_proc = 0; st->rxerr_jumbo = 0; st->rx_all_count = 0; st->txerr_retry = 0; st->txerr_fifo = 0; st->txerr_filt = 0; st->tx_all_count = 0; printk(KERN_INFO "ath5k debug: cleared frameerrors stats\n"); } return count; } static const struct file_operations fops_frameerrors = { .read = read_file_frameerrors, .write = write_file_frameerrors, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; /* debugfs: ani */ static ssize_t read_file_ani(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; struct ath5k_statistics *st = &ah->stats; struct ath5k_ani_state *as = &ah->ani_state; char buf[700]; unsigned int len = 0; len += snprintf(buf + len, sizeof(buf) - len, "HW has PHY error counters:\t%s\n", ah->ah_capabilities.cap_has_phyerr_counters ? "yes" : "no"); len += snprintf(buf + len, sizeof(buf) - len, "HW max spur immunity level:\t%d\n", as->max_spur_level); len += snprintf(buf + len, sizeof(buf) - len, "\nANI state\n--------------------------------------------\n"); len += snprintf(buf + len, sizeof(buf) - len, "operating mode:\t\t\t"); switch (as->ani_mode) { case ATH5K_ANI_MODE_OFF: len += snprintf(buf + len, sizeof(buf) - len, "OFF\n"); break; case ATH5K_ANI_MODE_MANUAL_LOW: len += snprintf(buf + len, sizeof(buf) - len, "MANUAL LOW\n"); break; case ATH5K_ANI_MODE_MANUAL_HIGH: len += snprintf(buf + len, sizeof(buf) - len, "MANUAL HIGH\n"); break; case ATH5K_ANI_MODE_AUTO: len += snprintf(buf + len, sizeof(buf) - len, "AUTO\n"); break; default: len += snprintf(buf + len, sizeof(buf) - len, "??? (not good)\n"); break; } len += snprintf(buf + len, sizeof(buf) - len, "noise immunity level:\t\t%d\n", as->noise_imm_level); len += snprintf(buf + len, sizeof(buf) - len, "spur immunity level:\t\t%d\n", as->spur_level); len += snprintf(buf + len, sizeof(buf) - len, "firstep level:\t\t\t%d\n", as->firstep_level); len += snprintf(buf + len, sizeof(buf) - len, "OFDM weak signal detection:\t%s\n", as->ofdm_weak_sig ? "on" : "off"); len += snprintf(buf + len, sizeof(buf) - len, "CCK weak signal detection:\t%s\n", as->cck_weak_sig ? "on" : "off"); len += snprintf(buf + len, sizeof(buf) - len, "\nMIB INTERRUPTS:\t\t%u\n", st->mib_intr); len += snprintf(buf + len, sizeof(buf) - len, "beacon RSSI average:\t%d\n", (int)ewma_read(&ah->ah_beacon_rssi_avg)); #define CC_PRINT(_struct, _field) \ _struct._field, \ _struct.cycles > 0 ? \ _struct._field * 100 / _struct.cycles : 0 len += snprintf(buf + len, sizeof(buf) - len, "profcnt tx\t\t%u\t(%d%%)\n", CC_PRINT(as->last_cc, tx_frame)); len += snprintf(buf + len, sizeof(buf) - len, "profcnt rx\t\t%u\t(%d%%)\n", CC_PRINT(as->last_cc, rx_frame)); len += snprintf(buf + len, sizeof(buf) - len, "profcnt busy\t\t%u\t(%d%%)\n", CC_PRINT(as->last_cc, rx_busy)); #undef CC_PRINT len += snprintf(buf + len, sizeof(buf) - len, "profcnt cycles\t\t%u\n", as->last_cc.cycles); len += snprintf(buf + len, sizeof(buf) - len, "listen time\t\t%d\tlast: %d\n", as->listen_time, as->last_listen); len += snprintf(buf + len, sizeof(buf) - len, "OFDM errors\t\t%u\tlast: %u\tsum: %u\n", as->ofdm_errors, as->last_ofdm_errors, as->sum_ofdm_errors); len += snprintf(buf + len, sizeof(buf) - len, "CCK errors\t\t%u\tlast: %u\tsum: %u\n", as->cck_errors, as->last_cck_errors, as->sum_cck_errors); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_PHYERR_CNT1\t%x\t(=%d)\n", ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1), ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1))); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_PHYERR_CNT2\t%x\t(=%d)\n", ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2), ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2))); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_ani(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[20]; if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) return -EFAULT; if (strncmp(buf, "sens-low", 8) == 0) { ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_HIGH); } else if (strncmp(buf, "sens-high", 9) == 0) { ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_LOW); } else if (strncmp(buf, "ani-off", 7) == 0) { ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF); } else if (strncmp(buf, "ani-on", 6) == 0) { ath5k_ani_init(ah, ATH5K_ANI_MODE_AUTO); } else if (strncmp(buf, "noise-low", 9) == 0) { ath5k_ani_set_noise_immunity_level(ah, 0); } else if (strncmp(buf, "noise-high", 10) == 0) { ath5k_ani_set_noise_immunity_level(ah, ATH5K_ANI_MAX_NOISE_IMM_LVL); } else if (strncmp(buf, "spur-low", 8) == 0) { ath5k_ani_set_spur_immunity_level(ah, 0); } else if (strncmp(buf, "spur-high", 9) == 0) { ath5k_ani_set_spur_immunity_level(ah, ah->ani_state.max_spur_level); } else if (strncmp(buf, "fir-low", 7) == 0) { ath5k_ani_set_firstep_level(ah, 0); } else if (strncmp(buf, "fir-high", 8) == 0) { ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL); } else if (strncmp(buf, "ofdm-off", 8) == 0) { ath5k_ani_set_ofdm_weak_signal_detection(ah, false); } else if (strncmp(buf, "ofdm-on", 7) == 0) { ath5k_ani_set_ofdm_weak_signal_detection(ah, true); } else if (strncmp(buf, "cck-off", 7) == 0) { ath5k_ani_set_cck_weak_signal_detection(ah, false); } else if (strncmp(buf, "cck-on", 6) == 0) { ath5k_ani_set_cck_weak_signal_detection(ah, true); } return count; } static const struct file_operations fops_ani = { .read = read_file_ani, .write = write_file_ani, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; /* debugfs: queues etc */ static ssize_t read_file_queue(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[700]; unsigned int len = 0; struct ath5k_txq *txq; struct ath5k_buf *bf, *bf0; int i, n; len += snprintf(buf + len, sizeof(buf) - len, "available txbuffers: %d\n", ah->txbuf_len); for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { txq = &ah->txqs[i]; len += snprintf(buf + len, sizeof(buf) - len, "%02d: %ssetup\n", i, txq->setup ? "" : "not "); if (!txq->setup) continue; n = 0; spin_lock_bh(&txq->lock); list_for_each_entry_safe(bf, bf0, &txq->q, list) n++; spin_unlock_bh(&txq->lock); len += snprintf(buf + len, sizeof(buf) - len, " len: %d bufs: %d\n", txq->txq_len, n); len += snprintf(buf + len, sizeof(buf) - len, " stuck: %d\n", txq->txq_stuck); } if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_queue(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[20]; if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) return -EFAULT; if (strncmp(buf, "start", 5) == 0) ieee80211_wake_queues(ah->hw); else if (strncmp(buf, "stop", 4) == 0) ieee80211_stop_queues(ah->hw); return count; } static const struct file_operations fops_queue = { .read = read_file_queue, .write = write_file_queue, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; void ath5k_debug_init_device(struct ath5k_hw *ah) { struct dentry *phydir; ah->debug.level = ath5k_debug; phydir = debugfs_create_dir("ath5k", ah->hw->wiphy->debugfsdir); if (!phydir) return; debugfs_create_file("debug", S_IWUSR | S_IRUSR, phydir, ah, &fops_debug); debugfs_create_file("registers", S_IRUSR, phydir, ah, &fops_registers); debugfs_create_file("beacon", S_IWUSR | S_IRUSR, phydir, ah, &fops_beacon); debugfs_create_file("reset", S_IWUSR, phydir, ah, &fops_reset); debugfs_create_file("antenna", S_IWUSR | S_IRUSR, phydir, ah, &fops_antenna); debugfs_create_file("misc", S_IRUSR, phydir, ah, &fops_misc); debugfs_create_file("frameerrors", S_IWUSR | S_IRUSR, phydir, ah, &fops_frameerrors); debugfs_create_file("ani", S_IWUSR | S_IRUSR, phydir, ah, &fops_ani); debugfs_create_file("queue", S_IWUSR | S_IRUSR, phydir, ah, &fops_queue); debugfs_create_bool("32khz_clock", S_IWUSR | S_IRUSR, phydir, &ah->ah_use_32khz_clock); } /* functions used in other places */ void ath5k_debug_dump_bands(struct ath5k_hw *ah) { unsigned int b, i; if (likely(!(ah->debug.level & ATH5K_DEBUG_DUMPBANDS))) return; BUG_ON(!ah->sbands); for (b = 0; b < IEEE80211_NUM_BANDS; b++) { struct ieee80211_supported_band *band = &ah->sbands[b]; char bname[6]; switch (band->band) { case IEEE80211_BAND_2GHZ: strcpy(bname, "2 GHz"); break; case IEEE80211_BAND_5GHZ: strcpy(bname, "5 GHz"); break; default: printk(KERN_DEBUG "Band not supported: %d\n", band->band); return; } printk(KERN_DEBUG "Band %s: channels %d, rates %d\n", bname, band->n_channels, band->n_bitrates); printk(KERN_DEBUG " channels:\n"); for (i = 0; i < band->n_channels; i++) printk(KERN_DEBUG " %3d %d %.4x %.4x\n", ieee80211_frequency_to_channel( band->channels[i].center_freq), band->channels[i].center_freq, band->channels[i].hw_value, band->channels[i].flags); printk(KERN_DEBUG " rates:\n"); for (i = 0; i < band->n_bitrates; i++) printk(KERN_DEBUG " %4d %.4x %.4x %.4x\n", band->bitrates[i].bitrate, band->bitrates[i].hw_value, band->bitrates[i].flags, band->bitrates[i].hw_value_short); } } static inline void ath5k_debug_printrxbuf(struct ath5k_buf *bf, int done, struct ath5k_rx_status *rs) { struct ath5k_desc *ds = bf->desc; struct ath5k_hw_all_rx_desc *rd = &ds->ud.ds_rx; printk(KERN_DEBUG "R (%p %llx) %08x %08x %08x %08x %08x %08x %c\n", ds, (unsigned long long)bf->daddr, ds->ds_link, ds->ds_data, rd->rx_ctl.rx_control_0, rd->rx_ctl.rx_control_1, rd->rx_stat.rx_status_0, rd->rx_stat.rx_status_1, !done ? ' ' : (rs->rs_status == 0) ? '*' : '!'); } void ath5k_debug_printrxbuffs(struct ath5k_hw *ah) { struct ath5k_desc *ds; struct ath5k_buf *bf; struct ath5k_rx_status rs = {}; int status; if (likely(!(ah->debug.level & ATH5K_DEBUG_DESC))) return; printk(KERN_DEBUG "rxdp %x, rxlink %p\n", ath5k_hw_get_rxdp(ah), ah->rxlink); spin_lock_bh(&ah->rxbuflock); list_for_each_entry(bf, &ah->rxbuf, list) { ds = bf->desc; status = ah->ah_proc_rx_desc(ah, ds, &rs); if (!status) ath5k_debug_printrxbuf(bf, status == 0, &rs); } spin_unlock_bh(&ah->rxbuflock); } void ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf) { struct ath5k_desc *ds = bf->desc; struct ath5k_hw_5212_tx_desc *td = &ds->ud.ds_tx5212; struct ath5k_tx_status ts = {}; int done; if (likely(!(ah->debug.level & ATH5K_DEBUG_DESC))) return; done = ah->ah_proc_tx_desc(ah, bf->desc, &ts); printk(KERN_DEBUG "T (%p %llx) %08x %08x %08x %08x %08x %08x %08x " "%08x %c\n", ds, (unsigned long long)bf->daddr, ds->ds_link, ds->ds_data, td->tx_ctl.tx_control_0, td->tx_ctl.tx_control_1, td->tx_ctl.tx_control_2, td->tx_ctl.tx_control_3, td->tx_stat.tx_status_0, td->tx_stat.tx_status_1, done ? ' ' : (ts.ts_status == 0) ? '*' : '!'); }
gpl-2.0