repo_name
string
path
string
copies
string
size
string
content
string
license
string
hyuh/kernel-k2
arch/blackfin/kernel/stacktrace.c
13548
1119
/* * Blackfin stacktrace code (mostly copied from avr32) * * Copyright 2009 Analog Devices Inc. * Licensed under the GPL-2 or later. */ #include <linux/sched.h> #include <linux/stacktrace.h> #include <linux/thread_info.h> #include <linux/module.h> register unsigned long current_frame_pointer asm("FP"); struct stackframe { unsigned long fp; unsigned long rets; }; /* * Save stack-backtrace addresses into a stack_trace buffer. */ void save_stack_trace(struct stack_trace *trace) { unsigned long low, high; unsigned long fp; struct stackframe *frame; int skip = trace->skip; low = (unsigned long)task_stack_page(current); high = low + THREAD_SIZE; fp = current_frame_pointer; while (fp >= low && fp <= (high - sizeof(*frame))) { frame = (struct stackframe *)fp; if (skip) { skip--; } else { trace->entries[trace->nr_entries++] = frame->rets; if (trace->nr_entries >= trace->max_entries) break; } /* * The next frame must be at a higher address than the * current frame. */ low = fp + sizeof(*frame); fp = frame->fp; } } EXPORT_SYMBOL_GPL(save_stack_trace);
gpl-2.0
nychitman1/android_kernel_google_pixel
drivers/misc/cxl/native.c
237
18534
/* * Copyright 2014 IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/mutex.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <asm/synch.h> #include <misc/cxl.h> #include "cxl.h" static int afu_control(struct cxl_afu *afu, u64 command, u64 result, u64 mask, bool enabled) { u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); spin_lock(&afu->afu_cntl_lock); pr_devel("AFU command starting: %llx\n", command); cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command); AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); while ((AFU_Cntl & mask) != result) { if (time_after_eq(jiffies, timeout)) { dev_warn(&afu->dev, "WARNING: AFU control timed out!\n"); spin_unlock(&afu->afu_cntl_lock); return -EBUSY; } pr_devel_ratelimited("AFU control... (0x%.16llx)\n", AFU_Cntl | command); cpu_relax(); AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); }; pr_devel("AFU command complete: %llx\n", command); afu->enabled = enabled; spin_unlock(&afu->afu_cntl_lock); return 0; } static int afu_enable(struct cxl_afu *afu) { pr_devel("AFU enable request\n"); return afu_control(afu, CXL_AFU_Cntl_An_E, CXL_AFU_Cntl_An_ES_Enabled, CXL_AFU_Cntl_An_ES_MASK, true); } int cxl_afu_disable(struct cxl_afu *afu) { pr_devel("AFU disable request\n"); return afu_control(afu, 0, CXL_AFU_Cntl_An_ES_Disabled, CXL_AFU_Cntl_An_ES_MASK, false); } /* This will disable as well as reset */ int cxl_afu_reset(struct cxl_afu *afu) { pr_devel("AFU reset request\n"); return afu_control(afu, CXL_AFU_Cntl_An_RA, CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled, CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK, false); } static int afu_check_and_enable(struct cxl_afu *afu) { if (afu->enabled) return 0; return afu_enable(afu); } int cxl_psl_purge(struct cxl_afu *afu) { u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); u64 dsisr, dar; u64 start, end; unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); pr_devel("PSL purge request\n"); if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { WARN(1, "psl_purge request while AFU not disabled!\n"); cxl_afu_disable(afu); } cxl_p1n_write(afu, CXL_PSL_SCNTL_An, PSL_CNTL | CXL_PSL_SCNTL_An_Pc); start = local_clock(); PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK) == CXL_PSL_SCNTL_An_Ps_Pending) { if (time_after_eq(jiffies, timeout)) { dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n"); return -EBUSY; } dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%.16llx PSL_DSISR: 0x%.16llx\n", PSL_CNTL, dsisr); if (dsisr & CXL_PSL_DSISR_TRANS) { dar = cxl_p2n_read(afu, CXL_PSL_DAR_An); dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%.16llx, DAR: 0x%.16llx\n", dsisr, dar); cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); } else if (dsisr) { dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%.16llx\n", dsisr); cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); } else { cpu_relax(); } PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); }; end = local_clock(); pr_devel("PSL purged in %lld ns\n", end - start); cxl_p1n_write(afu, CXL_PSL_SCNTL_An, PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc); return 0; } static int spa_max_procs(int spa_size) { /* * From the CAIA: * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255 * Most of that junk is really just an overly-complicated way of saying * the last 256 bytes are __aligned(128), so it's really: * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255 * and * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1 * so * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256 * Ignore the alignment (which is safe in this case as long as we are * careful with our rounding) and solve for n: */ return ((spa_size / 8) - 96) / 17; } static int alloc_spa(struct cxl_afu *afu) { u64 spap; /* Work out how many pages to allocate */ afu->spa_order = 0; do { afu->spa_order++; afu->spa_size = (1 << afu->spa_order) * PAGE_SIZE; afu->spa_max_procs = spa_max_procs(afu->spa_size); } while (afu->spa_max_procs < afu->num_procs); WARN_ON(afu->spa_size > 0x100000); /* Max size supported by the hardware */ if (!(afu->spa = (struct cxl_process_element *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->spa_order))) { pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n"); return -ENOMEM; } pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n", 1<<afu->spa_order, afu->spa_max_procs, afu->num_procs); afu->sw_command_status = (__be64 *)((char *)afu->spa + ((afu->spa_max_procs + 3) * 128)); spap = virt_to_phys(afu->spa) & CXL_PSL_SPAP_Addr; spap |= ((afu->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size; spap |= CXL_PSL_SPAP_V; pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap); cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap); return 0; } static void release_spa(struct cxl_afu *afu) { free_pages((unsigned long) afu->spa, afu->spa_order); } int cxl_tlb_slb_invalidate(struct cxl *adapter) { unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); pr_devel("CXL adapter wide TLBIA & SLBIA\n"); cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A); cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL); while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) { if (time_after_eq(jiffies, timeout)) { dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n"); return -EBUSY; } cpu_relax(); } cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL); while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) { if (time_after_eq(jiffies, timeout)) { dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n"); return -EBUSY; } cpu_relax(); } return 0; } int cxl_afu_slbia(struct cxl_afu *afu) { unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); pr_devel("cxl_afu_slbia issuing SLBIA command\n"); cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL); while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) { if (time_after_eq(jiffies, timeout)) { dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n"); return -EBUSY; } cpu_relax(); } return 0; } static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1) { int rc; /* 1. Disable SSTP by writing 0 to SSTP1[V] */ cxl_p2n_write(afu, CXL_SSTP1_An, 0); /* 2. Invalidate all SLB entries */ if ((rc = cxl_afu_slbia(afu))) return rc; /* 3. Set SSTP0_An */ cxl_p2n_write(afu, CXL_SSTP0_An, sstp0); /* 4. Set SSTP1_An */ cxl_p2n_write(afu, CXL_SSTP1_An, sstp1); return 0; } /* Using per slice version may improve performance here. (ie. SLBIA_An) */ static void slb_invalid(struct cxl_context *ctx) { struct cxl *adapter = ctx->afu->adapter; u64 slbia; WARN_ON(!mutex_is_locked(&ctx->afu->spa_mutex)); cxl_p1_write(adapter, CXL_PSL_LBISEL, ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) | be32_to_cpu(ctx->elem->lpid)); cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID); while (1) { slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA); if (!(slbia & CXL_TLB_SLB_P)) break; cpu_relax(); } } static int do_process_element_cmd(struct cxl_context *ctx, u64 cmd, u64 pe_state) { u64 state; unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); WARN_ON(!ctx->afu->enabled); ctx->elem->software_state = cpu_to_be32(pe_state); smp_wmb(); *(ctx->afu->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe); smp_mb(); cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe); while (1) { if (time_after_eq(jiffies, timeout)) { dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n"); return -EBUSY; } state = be64_to_cpup(ctx->afu->sw_command_status); if (state == ~0ULL) { pr_err("cxl: Error adding process element to AFU\n"); return -1; } if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) == (cmd | (cmd >> 16) | ctx->pe)) break; /* * The command won't finish in the PSL if there are * outstanding DSIs. Hence we need to yield here in * case there are outstanding DSIs that we need to * service. Tuning possiblity: we could wait for a * while before sched */ schedule(); } return 0; } static int add_process_element(struct cxl_context *ctx) { int rc = 0; mutex_lock(&ctx->afu->spa_mutex); pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe); if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V))) ctx->pe_inserted = true; pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe); mutex_unlock(&ctx->afu->spa_mutex); return rc; } static int terminate_process_element(struct cxl_context *ctx) { int rc = 0; /* fast path terminate if it's already invalid */ if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V))) return rc; mutex_lock(&ctx->afu->spa_mutex); pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe); rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE, CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T); ctx->elem->software_state = 0; /* Remove Valid bit */ pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe); mutex_unlock(&ctx->afu->spa_mutex); return rc; } static int remove_process_element(struct cxl_context *ctx) { int rc = 0; mutex_lock(&ctx->afu->spa_mutex); pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe); if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0))) ctx->pe_inserted = false; slb_invalid(ctx); pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe); mutex_unlock(&ctx->afu->spa_mutex); return rc; } static void assign_psn_space(struct cxl_context *ctx) { if (!ctx->afu->pp_size || ctx->master) { ctx->psn_phys = ctx->afu->psn_phys; ctx->psn_size = ctx->afu->adapter->ps_size; } else { ctx->psn_phys = ctx->afu->psn_phys + (ctx->afu->pp_offset + ctx->afu->pp_size * ctx->pe); ctx->psn_size = ctx->afu->pp_size; } } static int activate_afu_directed(struct cxl_afu *afu) { int rc; dev_info(&afu->dev, "Activating AFU directed mode\n"); if (alloc_spa(afu)) return -ENOMEM; cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU); cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL); cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L); afu->current_mode = CXL_MODE_DIRECTED; afu->num_procs = afu->max_procs_virtualised; if ((rc = cxl_chardev_m_afu_add(afu))) return rc; if ((rc = cxl_sysfs_afu_m_add(afu))) goto err; if ((rc = cxl_chardev_s_afu_add(afu))) goto err1; return 0; err1: cxl_sysfs_afu_m_remove(afu); err: cxl_chardev_afu_remove(afu); return rc; } #ifdef CONFIG_CPU_LITTLE_ENDIAN #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE) #else #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE)) #endif static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) { u64 sr; int r, result; assign_psn_space(ctx); ctx->elem->ctxtime = 0; /* disable */ ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); ctx->elem->haurp = 0; /* disable */ ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1)); sr = 0; if (ctx->master) sr |= CXL_PSL_SR_An_MP; if (mfspr(SPRN_LPCR) & LPCR_TC) sr |= CXL_PSL_SR_An_TC; /* HV=0, PR=1, R=1 for userspace * For kernel contexts: this would need to change */ sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R; set_endian(sr); sr &= ~(CXL_PSL_SR_An_HV); if (!test_tsk_thread_flag(current, TIF_32BIT)) sr |= CXL_PSL_SR_An_SF; ctx->elem->common.pid = cpu_to_be32(current->pid); ctx->elem->common.tid = 0; ctx->elem->sr = cpu_to_be64(sr); ctx->elem->common.csrp = 0; /* disable */ ctx->elem->common.aurp0 = 0; /* disable */ ctx->elem->common.aurp1 = 0; /* disable */ cxl_prefault(ctx, wed); ctx->elem->common.sstp0 = cpu_to_be64(ctx->sstp0); ctx->elem->common.sstp1 = cpu_to_be64(ctx->sstp1); for (r = 0; r < CXL_IRQ_RANGES; r++) { ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); } ctx->elem->common.amr = cpu_to_be64(amr); ctx->elem->common.wed = cpu_to_be64(wed); /* first guy needs to enable */ if ((result = afu_check_and_enable(ctx->afu))) return result; add_process_element(ctx); return 0; } static int deactivate_afu_directed(struct cxl_afu *afu) { dev_info(&afu->dev, "Deactivating AFU directed mode\n"); afu->current_mode = 0; afu->num_procs = 0; cxl_sysfs_afu_m_remove(afu); cxl_chardev_afu_remove(afu); cxl_afu_reset(afu); cxl_afu_disable(afu); cxl_psl_purge(afu); release_spa(afu); return 0; } static int activate_dedicated_process(struct cxl_afu *afu) { dev_info(&afu->dev, "Activating dedicated process mode\n"); cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process); cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */ cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */ cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL); cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID)); cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */ cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1)); cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */ cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */ cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */ afu->current_mode = CXL_MODE_DEDICATED; afu->num_procs = 1; return cxl_chardev_d_afu_add(afu); } static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr) { struct cxl_afu *afu = ctx->afu; u64 sr; int rc; sr = 0; set_endian(sr); if (ctx->master) sr |= CXL_PSL_SR_An_MP; if (mfspr(SPRN_LPCR) & LPCR_TC) sr |= CXL_PSL_SR_An_TC; sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R; if (!test_tsk_thread_flag(current, TIF_32BIT)) sr |= CXL_PSL_SR_An_SF; cxl_p2n_write(afu, CXL_PSL_PID_TID_An, (u64)current->pid << 32); cxl_p1n_write(afu, CXL_PSL_SR_An, sr); if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1))) return rc; cxl_prefault(ctx, wed); cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, (((u64)ctx->irqs.offset[0] & 0xffff) << 48) | (((u64)ctx->irqs.offset[1] & 0xffff) << 32) | (((u64)ctx->irqs.offset[2] & 0xffff) << 16) | ((u64)ctx->irqs.offset[3] & 0xffff)); cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64) (((u64)ctx->irqs.range[0] & 0xffff) << 48) | (((u64)ctx->irqs.range[1] & 0xffff) << 32) | (((u64)ctx->irqs.range[2] & 0xffff) << 16) | ((u64)ctx->irqs.range[3] & 0xffff)); cxl_p2n_write(afu, CXL_PSL_AMR_An, amr); /* master only context for dedicated */ assign_psn_space(ctx); if ((rc = cxl_afu_reset(afu))) return rc; cxl_p2n_write(afu, CXL_PSL_WED_An, wed); return afu_enable(afu); } static int deactivate_dedicated_process(struct cxl_afu *afu) { dev_info(&afu->dev, "Deactivating dedicated process mode\n"); afu->current_mode = 0; afu->num_procs = 0; cxl_chardev_afu_remove(afu); return 0; } int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode) { if (mode == CXL_MODE_DIRECTED) return deactivate_afu_directed(afu); if (mode == CXL_MODE_DEDICATED) return deactivate_dedicated_process(afu); return 0; } int cxl_afu_deactivate_mode(struct cxl_afu *afu) { return _cxl_afu_deactivate_mode(afu, afu->current_mode); } int cxl_afu_activate_mode(struct cxl_afu *afu, int mode) { if (!mode) return 0; if (!(mode & afu->modes_supported)) return -EINVAL; if (mode == CXL_MODE_DIRECTED) return activate_afu_directed(afu); if (mode == CXL_MODE_DEDICATED) return activate_dedicated_process(afu); return -EINVAL; } int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr) { ctx->kernel = kernel; if (ctx->afu->current_mode == CXL_MODE_DIRECTED) return attach_afu_directed(ctx, wed, amr); if (ctx->afu->current_mode == CXL_MODE_DEDICATED) return attach_dedicated(ctx, wed, amr); return -EINVAL; } static inline int detach_process_native_dedicated(struct cxl_context *ctx) { cxl_afu_reset(ctx->afu); cxl_afu_disable(ctx->afu); cxl_psl_purge(ctx->afu); return 0; } static inline int detach_process_native_afu_directed(struct cxl_context *ctx) { if (!ctx->pe_inserted) return 0; if (terminate_process_element(ctx)) return -1; if (remove_process_element(ctx)) return -1; return 0; } int cxl_detach_process(struct cxl_context *ctx) { if (ctx->afu->current_mode == CXL_MODE_DEDICATED) return detach_process_native_dedicated(ctx); return detach_process_native_afu_directed(ctx); } int cxl_get_irq(struct cxl_context *ctx, struct cxl_irq_info *info) { u64 pidtid; info->dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); info->dar = cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An); info->dsr = cxl_p2n_read(ctx->afu, CXL_PSL_DSR_An); pidtid = cxl_p2n_read(ctx->afu, CXL_PSL_PID_TID_An); info->pid = pidtid >> 32; info->tid = pidtid & 0xffffffff; info->afu_err = cxl_p2n_read(ctx->afu, CXL_AFU_ERR_An); info->errstat = cxl_p2n_read(ctx->afu, CXL_PSL_ErrStat_An); return 0; } static void recover_psl_err(struct cxl_afu *afu, u64 errstat) { u64 dsisr; pr_devel("RECOVERING FROM PSL ERROR... (0x%.16llx)\n", errstat); /* Clear PSL_DSISR[PE] */ dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE); /* Write 1s to clear error status bits */ cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat); } int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) { if (tfc) cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc); if (psl_reset_mask) recover_psl_err(ctx->afu, psl_reset_mask); return 0; } int cxl_check_error(struct cxl_afu *afu) { return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL); }
gpl-2.0
lithid/furnace_kernel_lge_hammerhead
net/ipv4/tcp_cong.c
1517
10667
/* * Plugable TCP congestion control support and newReno * congestion control. * Based on ideas from I/O scheduler suport and Web100. * * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> */ #define pr_fmt(fmt) "TCP: " fmt #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <linux/list.h> #include <linux/gfp.h> #include <net/tcp.h> int sysctl_tcp_max_ssthresh = 0; static DEFINE_SPINLOCK(tcp_cong_list_lock); static LIST_HEAD(tcp_cong_list); /* Simple linear search, don't expect many entries! */ static struct tcp_congestion_ops *tcp_ca_find(const char *name) { struct tcp_congestion_ops *e; list_for_each_entry_rcu(e, &tcp_cong_list, list) { if (strcmp(e->name, name) == 0) return e; } return NULL; } /* * Attach new congestion control algorithm to the list * of available options. */ int tcp_register_congestion_control(struct tcp_congestion_ops *ca) { int ret = 0; /* all algorithms must implement ssthresh and cong_avoid ops */ if (!ca->ssthresh || !ca->cong_avoid) { pr_err("%s does not implement required ops\n", ca->name); return -EINVAL; } spin_lock(&tcp_cong_list_lock); if (tcp_ca_find(ca->name)) { pr_notice("%s already registered\n", ca->name); ret = -EEXIST; } else { list_add_tail_rcu(&ca->list, &tcp_cong_list); pr_info("%s registered\n", ca->name); } spin_unlock(&tcp_cong_list_lock); return ret; } EXPORT_SYMBOL_GPL(tcp_register_congestion_control); /* * Remove congestion control algorithm, called from * the module's remove function. Module ref counts are used * to ensure that this can't be done till all sockets using * that method are closed. */ void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca) { spin_lock(&tcp_cong_list_lock); list_del_rcu(&ca->list); spin_unlock(&tcp_cong_list_lock); } EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control); /* Assign choice of congestion control. */ void tcp_init_congestion_control(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_congestion_ops *ca; /* if no choice made yet assign the current value set as default */ if (icsk->icsk_ca_ops == &tcp_init_congestion_ops) { rcu_read_lock(); list_for_each_entry_rcu(ca, &tcp_cong_list, list) { if (try_module_get(ca->owner)) { icsk->icsk_ca_ops = ca; break; } /* fallback to next available */ } rcu_read_unlock(); } if (icsk->icsk_ca_ops->init) icsk->icsk_ca_ops->init(sk); } /* Manage refcounts on socket close. */ void tcp_cleanup_congestion_control(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_ca_ops->release) icsk->icsk_ca_ops->release(sk); module_put(icsk->icsk_ca_ops->owner); } /* Used by sysctl to change default congestion control */ int tcp_set_default_congestion_control(const char *name) { struct tcp_congestion_ops *ca; int ret = -ENOENT; spin_lock(&tcp_cong_list_lock); ca = tcp_ca_find(name); #ifdef CONFIG_MODULES if (!ca && capable(CAP_NET_ADMIN)) { spin_unlock(&tcp_cong_list_lock); request_module("tcp_%s", name); spin_lock(&tcp_cong_list_lock); ca = tcp_ca_find(name); } #endif if (ca) { ca->flags |= TCP_CONG_NON_RESTRICTED; /* default is always allowed */ list_move(&ca->list, &tcp_cong_list); ret = 0; } spin_unlock(&tcp_cong_list_lock); return ret; } /* Set default value from kernel configuration at bootup */ static int __init tcp_congestion_default(void) { return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG); } late_initcall(tcp_congestion_default); /* Build string with list of available congestion control values */ void tcp_get_available_congestion_control(char *buf, size_t maxlen) { struct tcp_congestion_ops *ca; size_t offs = 0; rcu_read_lock(); list_for_each_entry_rcu(ca, &tcp_cong_list, list) { offs += snprintf(buf + offs, maxlen - offs, "%s%s", offs == 0 ? "" : " ", ca->name); } rcu_read_unlock(); } /* Get current default congestion control */ void tcp_get_default_congestion_control(char *name) { struct tcp_congestion_ops *ca; /* We will always have reno... */ BUG_ON(list_empty(&tcp_cong_list)); rcu_read_lock(); ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list); strncpy(name, ca->name, TCP_CA_NAME_MAX); rcu_read_unlock(); } /* Built list of non-restricted congestion control values */ void tcp_get_allowed_congestion_control(char *buf, size_t maxlen) { struct tcp_congestion_ops *ca; size_t offs = 0; *buf = '\0'; rcu_read_lock(); list_for_each_entry_rcu(ca, &tcp_cong_list, list) { if (!(ca->flags & TCP_CONG_NON_RESTRICTED)) continue; offs += snprintf(buf + offs, maxlen - offs, "%s%s", offs == 0 ? "" : " ", ca->name); } rcu_read_unlock(); } /* Change list of non-restricted congestion control */ int tcp_set_allowed_congestion_control(char *val) { struct tcp_congestion_ops *ca; char *saved_clone, *clone, *name; int ret = 0; saved_clone = clone = kstrdup(val, GFP_USER); if (!clone) return -ENOMEM; spin_lock(&tcp_cong_list_lock); /* pass 1 check for bad entries */ while ((name = strsep(&clone, " ")) && *name) { ca = tcp_ca_find(name); if (!ca) { ret = -ENOENT; goto out; } } /* pass 2 clear old values */ list_for_each_entry_rcu(ca, &tcp_cong_list, list) ca->flags &= ~TCP_CONG_NON_RESTRICTED; /* pass 3 mark as allowed */ while ((name = strsep(&val, " ")) && *name) { ca = tcp_ca_find(name); WARN_ON(!ca); if (ca) ca->flags |= TCP_CONG_NON_RESTRICTED; } out: spin_unlock(&tcp_cong_list_lock); kfree(saved_clone); return ret; } /* Change congestion control for socket */ int tcp_set_congestion_control(struct sock *sk, const char *name) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_congestion_ops *ca; int err = 0; rcu_read_lock(); ca = tcp_ca_find(name); /* no change asking for existing value */ if (ca == icsk->icsk_ca_ops) goto out; #ifdef CONFIG_MODULES /* not found attempt to autoload module */ if (!ca && capable(CAP_NET_ADMIN)) { rcu_read_unlock(); request_module("tcp_%s", name); rcu_read_lock(); ca = tcp_ca_find(name); } #endif if (!ca) err = -ENOENT; else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || capable(CAP_NET_ADMIN))) err = -EPERM; else if (!try_module_get(ca->owner)) err = -EBUSY; else { tcp_cleanup_congestion_control(sk); icsk->icsk_ca_ops = ca; if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init) icsk->icsk_ca_ops->init(sk); } out: rcu_read_unlock(); return err; } /* RFC2861 Check whether we are limited by application or congestion window * This is the inverse of cwnd check in tcp_tso_should_defer */ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) { const struct tcp_sock *tp = tcp_sk(sk); u32 left; if (in_flight >= tp->snd_cwnd) return 1; left = tp->snd_cwnd - in_flight; if (sk_can_gso(sk) && left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && left * tp->mss_cache < sk->sk_gso_max_size && left < sk->sk_gso_max_segs) return 1; return left <= tcp_max_tso_deferred_mss(tp); } EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited); /* * Slow start is used when congestion window is less than slow start * threshold. This version implements the basic RFC2581 version * and optionally supports: * RFC3742 Limited Slow Start - growth limited to max_ssthresh * RFC3465 Appropriate Byte Counting - growth limited by bytes acknowledged */ void tcp_slow_start(struct tcp_sock *tp) { int cnt; /* increase in packets */ /* RFC3465: ABC Slow start * Increase only after a full MSS of bytes is acked * * TCP sender SHOULD increase cwnd by the number of * previously unacknowledged bytes ACKed by each incoming * acknowledgment, provided the increase is not more than L */ if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache) return; if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ else cnt = tp->snd_cwnd; /* exponential increase */ /* RFC3465: ABC * We MAY increase by 2 if discovered delayed ack */ if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache) cnt <<= 1; tp->bytes_acked = 0; tp->snd_cwnd_cnt += cnt; while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { tp->snd_cwnd_cnt -= tp->snd_cwnd; if (tp->snd_cwnd < tp->snd_cwnd_clamp) tp->snd_cwnd++; } } EXPORT_SYMBOL_GPL(tcp_slow_start); /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w) { if (tp->snd_cwnd_cnt >= w) { if (tp->snd_cwnd < tp->snd_cwnd_clamp) tp->snd_cwnd++; tp->snd_cwnd_cnt = 0; } else { tp->snd_cwnd_cnt++; } } EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai); /* * TCP Reno congestion control * This is special case used for fallback as well. */ /* This is Jacobson's slow start and congestion avoidance. * SIGCOMM '88, p. 328. */ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) { struct tcp_sock *tp = tcp_sk(sk); if (!tcp_is_cwnd_limited(sk, in_flight)) return; /* In "safe" area, increase. */ if (tp->snd_cwnd <= tp->snd_ssthresh) tcp_slow_start(tp); /* In dangerous area, increase slowly. */ else if (sysctl_tcp_abc) { /* RFC3465: Appropriate Byte Count * increase once for each full cwnd acked */ if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) { tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache; if (tp->snd_cwnd < tp->snd_cwnd_clamp) tp->snd_cwnd++; } } else { tcp_cong_avoid_ai(tp, tp->snd_cwnd); } } EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); /* Slow start threshold is half the congestion window (min 2) */ u32 tcp_reno_ssthresh(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); return max(tp->snd_cwnd >> 1U, 2U); } EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); /* Lower bound on congestion window with halving. */ u32 tcp_reno_min_cwnd(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); return tp->snd_ssthresh/2; } EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd); struct tcp_congestion_ops tcp_reno = { .flags = TCP_CONG_NON_RESTRICTED, .name = "reno", .owner = THIS_MODULE, .ssthresh = tcp_reno_ssthresh, .cong_avoid = tcp_reno_cong_avoid, .min_cwnd = tcp_reno_min_cwnd, }; /* Initial congestion control used (until SYN) * really reno under another name so we can tell difference * during tcp_set_default_congestion_control */ struct tcp_congestion_ops tcp_init_congestion_ops = { .name = "", .owner = THIS_MODULE, .ssthresh = tcp_reno_ssthresh, .cong_avoid = tcp_reno_cong_avoid, .min_cwnd = tcp_reno_min_cwnd, }; EXPORT_SYMBOL_GPL(tcp_init_congestion_ops);
gpl-2.0
CandyDevices/kernel_motorola_msm8226
net/ipv4/tcp_cong.c
1517
10667
/* * Plugable TCP congestion control support and newReno * congestion control. * Based on ideas from I/O scheduler suport and Web100. * * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> */ #define pr_fmt(fmt) "TCP: " fmt #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <linux/list.h> #include <linux/gfp.h> #include <net/tcp.h> int sysctl_tcp_max_ssthresh = 0; static DEFINE_SPINLOCK(tcp_cong_list_lock); static LIST_HEAD(tcp_cong_list); /* Simple linear search, don't expect many entries! */ static struct tcp_congestion_ops *tcp_ca_find(const char *name) { struct tcp_congestion_ops *e; list_for_each_entry_rcu(e, &tcp_cong_list, list) { if (strcmp(e->name, name) == 0) return e; } return NULL; } /* * Attach new congestion control algorithm to the list * of available options. */ int tcp_register_congestion_control(struct tcp_congestion_ops *ca) { int ret = 0; /* all algorithms must implement ssthresh and cong_avoid ops */ if (!ca->ssthresh || !ca->cong_avoid) { pr_err("%s does not implement required ops\n", ca->name); return -EINVAL; } spin_lock(&tcp_cong_list_lock); if (tcp_ca_find(ca->name)) { pr_notice("%s already registered\n", ca->name); ret = -EEXIST; } else { list_add_tail_rcu(&ca->list, &tcp_cong_list); pr_info("%s registered\n", ca->name); } spin_unlock(&tcp_cong_list_lock); return ret; } EXPORT_SYMBOL_GPL(tcp_register_congestion_control); /* * Remove congestion control algorithm, called from * the module's remove function. Module ref counts are used * to ensure that this can't be done till all sockets using * that method are closed. */ void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca) { spin_lock(&tcp_cong_list_lock); list_del_rcu(&ca->list); spin_unlock(&tcp_cong_list_lock); } EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control); /* Assign choice of congestion control. */ void tcp_init_congestion_control(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_congestion_ops *ca; /* if no choice made yet assign the current value set as default */ if (icsk->icsk_ca_ops == &tcp_init_congestion_ops) { rcu_read_lock(); list_for_each_entry_rcu(ca, &tcp_cong_list, list) { if (try_module_get(ca->owner)) { icsk->icsk_ca_ops = ca; break; } /* fallback to next available */ } rcu_read_unlock(); } if (icsk->icsk_ca_ops->init) icsk->icsk_ca_ops->init(sk); } /* Manage refcounts on socket close. */ void tcp_cleanup_congestion_control(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_ca_ops->release) icsk->icsk_ca_ops->release(sk); module_put(icsk->icsk_ca_ops->owner); } /* Used by sysctl to change default congestion control */ int tcp_set_default_congestion_control(const char *name) { struct tcp_congestion_ops *ca; int ret = -ENOENT; spin_lock(&tcp_cong_list_lock); ca = tcp_ca_find(name); #ifdef CONFIG_MODULES if (!ca && capable(CAP_NET_ADMIN)) { spin_unlock(&tcp_cong_list_lock); request_module("tcp_%s", name); spin_lock(&tcp_cong_list_lock); ca = tcp_ca_find(name); } #endif if (ca) { ca->flags |= TCP_CONG_NON_RESTRICTED; /* default is always allowed */ list_move(&ca->list, &tcp_cong_list); ret = 0; } spin_unlock(&tcp_cong_list_lock); return ret; } /* Set default value from kernel configuration at bootup */ static int __init tcp_congestion_default(void) { return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG); } late_initcall(tcp_congestion_default); /* Build string with list of available congestion control values */ void tcp_get_available_congestion_control(char *buf, size_t maxlen) { struct tcp_congestion_ops *ca; size_t offs = 0; rcu_read_lock(); list_for_each_entry_rcu(ca, &tcp_cong_list, list) { offs += snprintf(buf + offs, maxlen - offs, "%s%s", offs == 0 ? "" : " ", ca->name); } rcu_read_unlock(); } /* Get current default congestion control */ void tcp_get_default_congestion_control(char *name) { struct tcp_congestion_ops *ca; /* We will always have reno... */ BUG_ON(list_empty(&tcp_cong_list)); rcu_read_lock(); ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list); strncpy(name, ca->name, TCP_CA_NAME_MAX); rcu_read_unlock(); } /* Built list of non-restricted congestion control values */ void tcp_get_allowed_congestion_control(char *buf, size_t maxlen) { struct tcp_congestion_ops *ca; size_t offs = 0; *buf = '\0'; rcu_read_lock(); list_for_each_entry_rcu(ca, &tcp_cong_list, list) { if (!(ca->flags & TCP_CONG_NON_RESTRICTED)) continue; offs += snprintf(buf + offs, maxlen - offs, "%s%s", offs == 0 ? "" : " ", ca->name); } rcu_read_unlock(); } /* Change list of non-restricted congestion control */ int tcp_set_allowed_congestion_control(char *val) { struct tcp_congestion_ops *ca; char *saved_clone, *clone, *name; int ret = 0; saved_clone = clone = kstrdup(val, GFP_USER); if (!clone) return -ENOMEM; spin_lock(&tcp_cong_list_lock); /* pass 1 check for bad entries */ while ((name = strsep(&clone, " ")) && *name) { ca = tcp_ca_find(name); if (!ca) { ret = -ENOENT; goto out; } } /* pass 2 clear old values */ list_for_each_entry_rcu(ca, &tcp_cong_list, list) ca->flags &= ~TCP_CONG_NON_RESTRICTED; /* pass 3 mark as allowed */ while ((name = strsep(&val, " ")) && *name) { ca = tcp_ca_find(name); WARN_ON(!ca); if (ca) ca->flags |= TCP_CONG_NON_RESTRICTED; } out: spin_unlock(&tcp_cong_list_lock); kfree(saved_clone); return ret; } /* Change congestion control for socket */ int tcp_set_congestion_control(struct sock *sk, const char *name) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_congestion_ops *ca; int err = 0; rcu_read_lock(); ca = tcp_ca_find(name); /* no change asking for existing value */ if (ca == icsk->icsk_ca_ops) goto out; #ifdef CONFIG_MODULES /* not found attempt to autoload module */ if (!ca && capable(CAP_NET_ADMIN)) { rcu_read_unlock(); request_module("tcp_%s", name); rcu_read_lock(); ca = tcp_ca_find(name); } #endif if (!ca) err = -ENOENT; else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || capable(CAP_NET_ADMIN))) err = -EPERM; else if (!try_module_get(ca->owner)) err = -EBUSY; else { tcp_cleanup_congestion_control(sk); icsk->icsk_ca_ops = ca; if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init) icsk->icsk_ca_ops->init(sk); } out: rcu_read_unlock(); return err; } /* RFC2861 Check whether we are limited by application or congestion window * This is the inverse of cwnd check in tcp_tso_should_defer */ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) { const struct tcp_sock *tp = tcp_sk(sk); u32 left; if (in_flight >= tp->snd_cwnd) return 1; left = tp->snd_cwnd - in_flight; if (sk_can_gso(sk) && left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && left * tp->mss_cache < sk->sk_gso_max_size && left < sk->sk_gso_max_segs) return 1; return left <= tcp_max_tso_deferred_mss(tp); } EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited); /* * Slow start is used when congestion window is less than slow start * threshold. This version implements the basic RFC2581 version * and optionally supports: * RFC3742 Limited Slow Start - growth limited to max_ssthresh * RFC3465 Appropriate Byte Counting - growth limited by bytes acknowledged */ void tcp_slow_start(struct tcp_sock *tp) { int cnt; /* increase in packets */ /* RFC3465: ABC Slow start * Increase only after a full MSS of bytes is acked * * TCP sender SHOULD increase cwnd by the number of * previously unacknowledged bytes ACKed by each incoming * acknowledgment, provided the increase is not more than L */ if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache) return; if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ else cnt = tp->snd_cwnd; /* exponential increase */ /* RFC3465: ABC * We MAY increase by 2 if discovered delayed ack */ if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache) cnt <<= 1; tp->bytes_acked = 0; tp->snd_cwnd_cnt += cnt; while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { tp->snd_cwnd_cnt -= tp->snd_cwnd; if (tp->snd_cwnd < tp->snd_cwnd_clamp) tp->snd_cwnd++; } } EXPORT_SYMBOL_GPL(tcp_slow_start); /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w) { if (tp->snd_cwnd_cnt >= w) { if (tp->snd_cwnd < tp->snd_cwnd_clamp) tp->snd_cwnd++; tp->snd_cwnd_cnt = 0; } else { tp->snd_cwnd_cnt++; } } EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai); /* * TCP Reno congestion control * This is special case used for fallback as well. */ /* This is Jacobson's slow start and congestion avoidance. * SIGCOMM '88, p. 328. */ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) { struct tcp_sock *tp = tcp_sk(sk); if (!tcp_is_cwnd_limited(sk, in_flight)) return; /* In "safe" area, increase. */ if (tp->snd_cwnd <= tp->snd_ssthresh) tcp_slow_start(tp); /* In dangerous area, increase slowly. */ else if (sysctl_tcp_abc) { /* RFC3465: Appropriate Byte Count * increase once for each full cwnd acked */ if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) { tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache; if (tp->snd_cwnd < tp->snd_cwnd_clamp) tp->snd_cwnd++; } } else { tcp_cong_avoid_ai(tp, tp->snd_cwnd); } } EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); /* Slow start threshold is half the congestion window (min 2) */ u32 tcp_reno_ssthresh(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); return max(tp->snd_cwnd >> 1U, 2U); } EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); /* Lower bound on congestion window with halving. */ u32 tcp_reno_min_cwnd(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); return tp->snd_ssthresh/2; } EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd); struct tcp_congestion_ops tcp_reno = { .flags = TCP_CONG_NON_RESTRICTED, .name = "reno", .owner = THIS_MODULE, .ssthresh = tcp_reno_ssthresh, .cong_avoid = tcp_reno_cong_avoid, .min_cwnd = tcp_reno_min_cwnd, }; /* Initial congestion control used (until SYN) * really reno under another name so we can tell difference * during tcp_set_default_congestion_control */ struct tcp_congestion_ops tcp_init_congestion_ops = { .name = "", .owner = THIS_MODULE, .ssthresh = tcp_reno_ssthresh, .cong_avoid = tcp_reno_cong_avoid, .min_cwnd = tcp_reno_min_cwnd, }; EXPORT_SYMBOL_GPL(tcp_init_congestion_ops);
gpl-2.0
actnextgendev/android_kernel_samsung_expressatt
kernel/trace/trace_stack.c
1773
7919
/* * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> * */ #include <linux/stacktrace.h> #include <linux/kallsyms.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/debugfs.h> #include <linux/ftrace.h> #include <linux/module.h> #include <linux/sysctl.h> #include <linux/init.h> #include <linux/fs.h> #include "trace.h" #define STACK_TRACE_ENTRIES 500 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; static unsigned stack_dump_index[STACK_TRACE_ENTRIES]; static struct stack_trace max_stack_trace = { .max_entries = STACK_TRACE_ENTRIES, .entries = stack_dump_trace, }; static unsigned long max_stack_size; static arch_spinlock_t max_stack_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static int stack_trace_disabled __read_mostly; static DEFINE_PER_CPU(int, trace_active); static DEFINE_MUTEX(stack_sysctl_mutex); int stack_tracer_enabled; static int last_stack_tracer_enabled; static inline void check_stack(void) { unsigned long this_size, flags; unsigned long *p, *top, *start; int i; this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1); this_size = THREAD_SIZE - this_size; if (this_size <= max_stack_size) return; /* we do not handle interrupt stacks yet */ if (!object_is_on_stack(&this_size)) return; local_irq_save(flags); arch_spin_lock(&max_stack_lock); /* a race could have already updated it */ if (this_size <= max_stack_size) goto out; max_stack_size = this_size; max_stack_trace.nr_entries = 0; max_stack_trace.skip = 3; save_stack_trace(&max_stack_trace); /* * Now find where in the stack these are. */ i = 0; start = &this_size; top = (unsigned long *) (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); /* * Loop through all the entries. One of the entries may * for some reason be missed on the stack, so we may * have to account for them. If they are all there, this * loop will only happen once. This code only takes place * on a new max, so it is far from a fast path. */ while (i < max_stack_trace.nr_entries) { int found = 0; stack_dump_index[i] = this_size; p = start; for (; p < top && i < max_stack_trace.nr_entries; p++) { if (*p == stack_dump_trace[i]) { this_size = stack_dump_index[i++] = (top - p) * sizeof(unsigned long); found = 1; /* Start the search from here */ start = p + 1; } } if (!found) i++; } out: arch_spin_unlock(&max_stack_lock); local_irq_restore(flags); } static void stack_trace_call(unsigned long ip, unsigned long parent_ip) { int cpu; if (unlikely(!ftrace_enabled || stack_trace_disabled)) return; preempt_disable_notrace(); cpu = raw_smp_processor_id(); /* no atomic needed, we only modify this variable by this cpu */ if (per_cpu(trace_active, cpu)++ != 0) goto out; check_stack(); out: per_cpu(trace_active, cpu)--; /* prevent recursion in schedule */ preempt_enable_notrace(); } static struct ftrace_ops trace_ops __read_mostly = { .func = stack_trace_call, .flags = FTRACE_OPS_FL_GLOBAL, }; static ssize_t stack_max_size_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { unsigned long *ptr = filp->private_data; char buf[64]; int r; r = snprintf(buf, sizeof(buf), "%ld\n", *ptr); if (r > sizeof(buf)) r = sizeof(buf); return simple_read_from_buffer(ubuf, count, ppos, buf, r); } static ssize_t stack_max_size_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { long *ptr = filp->private_data; unsigned long val, flags; char buf[64]; int ret; int cpu; if (count >= sizeof(buf)) return -EINVAL; if (copy_from_user(&buf, ubuf, count)) return -EFAULT; buf[count] = 0; ret = strict_strtoul(buf, 10, &val); if (ret < 0) return ret; local_irq_save(flags); /* * In case we trace inside arch_spin_lock() or after (NMI), * we will cause circular lock, so we also need to increase * the percpu trace_active here. */ cpu = smp_processor_id(); per_cpu(trace_active, cpu)++; arch_spin_lock(&max_stack_lock); *ptr = val; arch_spin_unlock(&max_stack_lock); per_cpu(trace_active, cpu)--; local_irq_restore(flags); return count; } static const struct file_operations stack_max_size_fops = { .open = tracing_open_generic, .read = stack_max_size_read, .write = stack_max_size_write, .llseek = default_llseek, }; static void * __next(struct seq_file *m, loff_t *pos) { long n = *pos - 1; if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX) return NULL; m->private = (void *)n; return &m->private; } static void * t_next(struct seq_file *m, void *v, loff_t *pos) { (*pos)++; return __next(m, pos); } static void *t_start(struct seq_file *m, loff_t *pos) { int cpu; local_irq_disable(); cpu = smp_processor_id(); per_cpu(trace_active, cpu)++; arch_spin_lock(&max_stack_lock); if (*pos == 0) return SEQ_START_TOKEN; return __next(m, pos); } static void t_stop(struct seq_file *m, void *p) { int cpu; arch_spin_unlock(&max_stack_lock); cpu = smp_processor_id(); per_cpu(trace_active, cpu)--; local_irq_enable(); } static int trace_lookup_stack(struct seq_file *m, long i) { unsigned long addr = stack_dump_trace[i]; return seq_printf(m, "%pS\n", (void *)addr); } static void print_disabled(struct seq_file *m) { seq_puts(m, "#\n" "# Stack tracer disabled\n" "#\n" "# To enable the stack tracer, either add 'stacktrace' to the\n" "# kernel command line\n" "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n" "#\n"); } static int t_show(struct seq_file *m, void *v) { long i; int size; if (v == SEQ_START_TOKEN) { seq_printf(m, " Depth Size Location" " (%d entries)\n" " ----- ---- --------\n", max_stack_trace.nr_entries - 1); if (!stack_tracer_enabled && !max_stack_size) print_disabled(m); return 0; } i = *(long *)v; if (i >= max_stack_trace.nr_entries || stack_dump_trace[i] == ULONG_MAX) return 0; if (i+1 == max_stack_trace.nr_entries || stack_dump_trace[i+1] == ULONG_MAX) size = stack_dump_index[i]; else size = stack_dump_index[i] - stack_dump_index[i+1]; seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size); trace_lookup_stack(m, i); return 0; } static const struct seq_operations stack_trace_seq_ops = { .start = t_start, .next = t_next, .stop = t_stop, .show = t_show, }; static int stack_trace_open(struct inode *inode, struct file *file) { return seq_open(file, &stack_trace_seq_ops); } static const struct file_operations stack_trace_fops = { .open = stack_trace_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; int stack_trace_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; mutex_lock(&stack_sysctl_mutex); ret = proc_dointvec(table, write, buffer, lenp, ppos); if (ret || !write || (last_stack_tracer_enabled == !!stack_tracer_enabled)) goto out; last_stack_tracer_enabled = !!stack_tracer_enabled; if (stack_tracer_enabled) register_ftrace_function(&trace_ops); else unregister_ftrace_function(&trace_ops); out: mutex_unlock(&stack_sysctl_mutex); return ret; } static __init int enable_stacktrace(char *str) { stack_tracer_enabled = 1; last_stack_tracer_enabled = 1; return 1; } __setup("stacktrace", enable_stacktrace); static __init int stack_trace_init(void) { struct dentry *d_tracer; d_tracer = tracing_init_dentry(); trace_create_file("stack_max_size", 0644, d_tracer, &max_stack_size, &stack_max_size_fops); trace_create_file("stack_trace", 0444, d_tracer, NULL, &stack_trace_fops); if (stack_tracer_enabled) register_ftrace_function(&trace_ops); return 0; } device_initcall(stack_trace_init);
gpl-2.0
siddhartha100/Kernel
drivers/isdn/hisax/l3ni1.c
1773
77100
/* $Id: l3ni1.c,v 2.8.2.3 2004/01/13 14:31:25 keil Exp $ * * NI1 D-channel protocol * * Author Matt Henderson & Guy Ellis * Copyright by Traverse Technologies Pty Ltd, www.travers.com.au * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * 2000.6.6 Initial implementation of routines for US NI1 * Layer 3 protocol based on the EURO/DSS1 D-channel protocol * driver written by Karsten Keil et al. * NI-1 Hall of Fame - Thanks to.... * Ragnar Paulson - for some handy code fragments * Will Scales - beta tester extraordinaire * Brett Whittacre - beta tester and remote devel system in Vegas * */ #include "hisax.h" #include "isdnl3.h" #include "l3ni1.h" #include <linux/ctype.h> #include <linux/slab.h> extern char *HiSax_getrev(const char *revision); static const char *ni1_revision = "$Revision: 2.8.2.3 $"; #define EXT_BEARER_CAPS 1 #define MsgHead(ptr, cref, mty) \ *ptr++ = 0x8; \ if (cref == -1) { \ *ptr++ = 0x0; \ } else { \ *ptr++ = 0x1; \ *ptr++ = cref^0x80; \ } \ *ptr++ = mty /**********************************************/ /* get a new invoke id for remote operations. */ /* Only a return value != 0 is valid */ /**********************************************/ static unsigned char new_invoke_id(struct PStack *p) { unsigned char retval; int i; i = 32; /* maximum search depth */ retval = p->prot.ni1.last_invoke_id + 1; /* try new id */ while ((i) && (p->prot.ni1.invoke_used[retval >> 3] == 0xFF)) { p->prot.ni1.last_invoke_id = (retval & 0xF8) + 8; i--; } if (i) { while (p->prot.ni1.invoke_used[retval >> 3] & (1 << (retval & 7))) retval++; } else retval = 0; p->prot.ni1.last_invoke_id = retval; p->prot.ni1.invoke_used[retval >> 3] |= (1 << (retval & 7)); return (retval); } /* new_invoke_id */ /*************************/ /* free a used invoke id */ /*************************/ static void free_invoke_id(struct PStack *p, unsigned char id) { if (!id) return; /* 0 = invalid value */ p->prot.ni1.invoke_used[id >> 3] &= ~(1 << (id & 7)); } /* free_invoke_id */ /**********************************************************/ /* create a new l3 process and fill in ni1 specific data */ /**********************************************************/ static struct l3_process *ni1_new_l3_process(struct PStack *st, int cr) { struct l3_process *proc; if (!(proc = new_l3_process(st, cr))) return (NULL); proc->prot.ni1.invoke_id = 0; proc->prot.ni1.remote_operation = 0; proc->prot.ni1.uus1_data[0] = '\0'; return (proc); } /* ni1_new_l3_process */ /************************************************/ /* free a l3 process and all ni1 specific data */ /************************************************/ static void ni1_release_l3_process(struct l3_process *p) { free_invoke_id(p->st, p->prot.ni1.invoke_id); release_l3_process(p); } /* ni1_release_l3_process */ /********************************************************/ /* search a process with invoke id id and dummy callref */ /********************************************************/ static struct l3_process * l3ni1_search_dummy_proc(struct PStack *st, int id) { struct l3_process *pc = st->l3.proc; /* start of processes */ if (!id) return (NULL); while (pc) { if ((pc->callref == -1) && (pc->prot.ni1.invoke_id == id)) return (pc); pc = pc->next; } return (NULL); } /* l3ni1_search_dummy_proc */ /*******************************************************************/ /* called when a facility message with a dummy callref is received */ /* and a return result is delivered. id specifies the invoke id. */ /*******************************************************************/ static void l3ni1_dummy_return_result(struct PStack *st, int id, u_char *p, u_char nlen) { isdn_ctrl ic; struct IsdnCardState *cs; struct l3_process *pc = NULL; if ((pc = l3ni1_search_dummy_proc(st, id))) { L3DelTimer(&pc->timer); /* remove timer */ cs = pc->st->l1.hardware; ic.driver = cs->myid; ic.command = ISDN_STAT_PROT; ic.arg = NI1_STAT_INVOKE_RES; ic.parm.ni1_io.hl_id = pc->prot.ni1.invoke_id; ic.parm.ni1_io.ll_id = pc->prot.ni1.ll_id; ic.parm.ni1_io.proc = pc->prot.ni1.proc; ic.parm.ni1_io.timeout = 0; ic.parm.ni1_io.datalen = nlen; ic.parm.ni1_io.data = p; free_invoke_id(pc->st, pc->prot.ni1.invoke_id); pc->prot.ni1.invoke_id = 0; /* reset id */ cs->iif.statcallb(&ic); ni1_release_l3_process(pc); } else l3_debug(st, "dummy return result id=0x%x result len=%d", id, nlen); } /* l3ni1_dummy_return_result */ /*******************************************************************/ /* called when a facility message with a dummy callref is received */ /* and a return error is delivered. id specifies the invoke id. */ /*******************************************************************/ static void l3ni1_dummy_error_return(struct PStack *st, int id, ulong error) { isdn_ctrl ic; struct IsdnCardState *cs; struct l3_process *pc = NULL; if ((pc = l3ni1_search_dummy_proc(st, id))) { L3DelTimer(&pc->timer); /* remove timer */ cs = pc->st->l1.hardware; ic.driver = cs->myid; ic.command = ISDN_STAT_PROT; ic.arg = NI1_STAT_INVOKE_ERR; ic.parm.ni1_io.hl_id = pc->prot.ni1.invoke_id; ic.parm.ni1_io.ll_id = pc->prot.ni1.ll_id; ic.parm.ni1_io.proc = pc->prot.ni1.proc; ic.parm.ni1_io.timeout = error; ic.parm.ni1_io.datalen = 0; ic.parm.ni1_io.data = NULL; free_invoke_id(pc->st, pc->prot.ni1.invoke_id); pc->prot.ni1.invoke_id = 0; /* reset id */ cs->iif.statcallb(&ic); ni1_release_l3_process(pc); } else l3_debug(st, "dummy return error id=0x%x error=0x%lx", id, error); } /* l3ni1_error_return */ /*******************************************************************/ /* called when a facility message with a dummy callref is received */ /* and a invoke is delivered. id specifies the invoke id. */ /*******************************************************************/ static void l3ni1_dummy_invoke(struct PStack *st, int cr, int id, int ident, u_char *p, u_char nlen) { isdn_ctrl ic; struct IsdnCardState *cs; l3_debug(st, "dummy invoke %s id=0x%x ident=0x%x datalen=%d", (cr == -1) ? "local" : "broadcast", id, ident, nlen); if (cr >= -1) return; /* ignore local data */ cs = st->l1.hardware; ic.driver = cs->myid; ic.command = ISDN_STAT_PROT; ic.arg = NI1_STAT_INVOKE_BRD; ic.parm.ni1_io.hl_id = id; ic.parm.ni1_io.ll_id = 0; ic.parm.ni1_io.proc = ident; ic.parm.ni1_io.timeout = 0; ic.parm.ni1_io.datalen = nlen; ic.parm.ni1_io.data = p; cs->iif.statcallb(&ic); } /* l3ni1_dummy_invoke */ static void l3ni1_parse_facility(struct PStack *st, struct l3_process *pc, int cr, u_char *p) { int qd_len = 0; unsigned char nlen = 0, ilen, cp_tag; int ident, id; ulong err_ret; if (pc) st = pc->st; /* valid Stack */ else if ((!st) || (cr >= 0)) return; /* neither pc nor st specified */ p++; qd_len = *p++; if (qd_len == 0) { l3_debug(st, "qd_len == 0"); return; } if ((*p & 0x1F) != 0x11) { /* Service discriminator, supplementary service */ l3_debug(st, "supplementary service != 0x11"); return; } while (qd_len > 0 && !(*p & 0x80)) { /* extension ? */ p++; qd_len--; } if (qd_len < 2) { l3_debug(st, "qd_len < 2"); return; } p++; qd_len--; if ((*p & 0xE0) != 0xA0) { /* class and form */ l3_debug(st, "class and form != 0xA0"); return; } cp_tag = *p & 0x1F; /* remember tag value */ p++; qd_len--; if (qd_len < 1) { l3_debug(st, "qd_len < 1"); return; } if (*p & 0x80) { /* length format indefinite or limited */ nlen = *p++ & 0x7F; /* number of len bytes or indefinite */ if ((qd_len-- < ((!nlen) ? 3 : (1 + nlen))) || (nlen > 1)) { l3_debug(st, "length format error or not implemented"); return; } if (nlen == 1) { nlen = *p++; /* complete length */ qd_len--; } else { qd_len -= 2; /* trailing null bytes */ if ((*(p + qd_len)) || (*(p + qd_len + 1))) { l3_debug(st, "length format indefinite error"); return; } nlen = qd_len; } } else { nlen = *p++; qd_len--; } if (qd_len < nlen) { l3_debug(st, "qd_len < nlen"); return; } qd_len -= nlen; if (nlen < 2) { l3_debug(st, "nlen < 2"); return; } if (*p != 0x02) { /* invoke identifier tag */ l3_debug(st, "invoke identifier tag !=0x02"); return; } p++; nlen--; if (*p & 0x80) { /* length format */ l3_debug(st, "invoke id length format 2"); return; } ilen = *p++; nlen--; if (ilen > nlen || ilen == 0) { l3_debug(st, "ilen > nlen || ilen == 0"); return; } nlen -= ilen; id = 0; while (ilen > 0) { id = (id << 8) | (*p++ & 0xFF); /* invoke identifier */ ilen--; } switch (cp_tag) { /* component tag */ case 1: /* invoke */ if (nlen < 2) { l3_debug(st, "nlen < 2 22"); return; } if (*p != 0x02) { /* operation value */ l3_debug(st, "operation value !=0x02"); return; } p++; nlen--; ilen = *p++; nlen--; if (ilen > nlen || ilen == 0) { l3_debug(st, "ilen > nlen || ilen == 0 22"); return; } nlen -= ilen; ident = 0; while (ilen > 0) { ident = (ident << 8) | (*p++ & 0xFF); ilen--; } if (!pc) { l3ni1_dummy_invoke(st, cr, id, ident, p, nlen); return; } l3_debug(st, "invoke break"); break; case 2: /* return result */ /* if no process available handle separately */ if (!pc) { if (cr == -1) l3ni1_dummy_return_result(st, id, p, nlen); return; } if ((pc->prot.ni1.invoke_id) && (pc->prot.ni1.invoke_id == id)) { /* Diversion successful */ free_invoke_id(st, pc->prot.ni1.invoke_id); pc->prot.ni1.remote_result = 0; /* success */ pc->prot.ni1.invoke_id = 0; pc->redir_result = pc->prot.ni1.remote_result; st->l3.l3l4(st, CC_REDIR | INDICATION, pc); } /* Diversion successful */ else l3_debug(st, "return error unknown identifier"); break; case 3: /* return error */ err_ret = 0; if (nlen < 2) { l3_debug(st, "return error nlen < 2"); return; } if (*p != 0x02) { /* result tag */ l3_debug(st, "invoke error tag !=0x02"); return; } p++; nlen--; if (*p > 4) { /* length format */ l3_debug(st, "invoke return errlen > 4 "); return; } ilen = *p++; nlen--; if (ilen > nlen || ilen == 0) { l3_debug(st, "error return ilen > nlen || ilen == 0"); return; } nlen -= ilen; while (ilen > 0) { err_ret = (err_ret << 8) | (*p++ & 0xFF); /* error value */ ilen--; } /* if no process available handle separately */ if (!pc) { if (cr == -1) l3ni1_dummy_error_return(st, id, err_ret); return; } if ((pc->prot.ni1.invoke_id) && (pc->prot.ni1.invoke_id == id)) { /* Deflection error */ free_invoke_id(st, pc->prot.ni1.invoke_id); pc->prot.ni1.remote_result = err_ret; /* result */ pc->prot.ni1.invoke_id = 0; pc->redir_result = pc->prot.ni1.remote_result; st->l3.l3l4(st, CC_REDIR | INDICATION, pc); } /* Deflection error */ else l3_debug(st, "return result unknown identifier"); break; default: l3_debug(st, "facility default break tag=0x%02x", cp_tag); break; } } static void l3ni1_message(struct l3_process *pc, u_char mt) { struct sk_buff *skb; u_char *p; if (!(skb = l3_alloc_skb(4))) return; p = skb_put(skb, 4); MsgHead(p, pc->callref, mt); l3_msg(pc->st, DL_DATA | REQUEST, skb); } static void l3ni1_message_plus_chid(struct l3_process *pc, u_char mt) /* sends an l3 messages plus channel id - added GE 05/09/00 */ { struct sk_buff *skb; u_char tmp[16]; u_char *p = tmp; u_char chid; chid = (u_char)(pc->para.bchannel & 0x03) | 0x88; MsgHead(p, pc->callref, mt); *p++ = IE_CHANNEL_ID; *p++ = 0x01; *p++ = chid; if (!(skb = l3_alloc_skb(7))) return; memcpy(skb_put(skb, 7), tmp, 7); l3_msg(pc->st, DL_DATA | REQUEST, skb); } static void l3ni1_message_cause(struct l3_process *pc, u_char mt, u_char cause) { struct sk_buff *skb; u_char tmp[16]; u_char *p = tmp; int l; MsgHead(p, pc->callref, mt); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = cause | 0x80; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); } static void l3ni1_status_send(struct l3_process *pc, u_char pr, void *arg) { u_char tmp[16]; u_char *p = tmp; int l; struct sk_buff *skb; MsgHead(p, pc->callref, MT_STATUS); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = pc->para.cause | 0x80; *p++ = IE_CALL_STATE; *p++ = 0x1; *p++ = pc->state & 0x3f; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); } static void l3ni1_msg_without_setup(struct l3_process *pc, u_char pr, void *arg) { /* This routine is called if here was no SETUP made (checks in ni1up and in * l3ni1_setup) and a RELEASE_COMPLETE have to be sent with an error code * MT_STATUS_ENQUIRE in the NULL state is handled too */ u_char tmp[16]; u_char *p = tmp; int l; struct sk_buff *skb; switch (pc->para.cause) { case 81: /* invalid callreference */ case 88: /* incomp destination */ case 96: /* mandory IE missing */ case 100: /* invalid IE contents */ case 101: /* incompatible Callstate */ MsgHead(p, pc->callref, MT_RELEASE_COMPLETE); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = pc->para.cause | 0x80; break; default: printk(KERN_ERR "HiSax l3ni1_msg_without_setup wrong cause %d\n", pc->para.cause); return; } l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); ni1_release_l3_process(pc); } static int ie_ALERTING[] = {IE_BEARER, IE_CHANNEL_ID | IE_MANDATORY_1, IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_SIGNAL, IE_HLC, IE_USER_USER, -1}; static int ie_CALL_PROCEEDING[] = {IE_BEARER, IE_CHANNEL_ID | IE_MANDATORY_1, IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_HLC, -1}; static int ie_CONNECT[] = {IE_BEARER, IE_CHANNEL_ID | IE_MANDATORY_1, IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_DATE, IE_SIGNAL, IE_CONNECT_PN, IE_CONNECT_SUB, IE_LLC, IE_HLC, IE_USER_USER, -1}; static int ie_CONNECT_ACKNOWLEDGE[] = {IE_CHANNEL_ID, IE_DISPLAY, IE_SIGNAL, -1}; static int ie_DISCONNECT[] = {IE_CAUSE | IE_MANDATORY, IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_SIGNAL, IE_USER_USER, -1}; static int ie_INFORMATION[] = {IE_COMPLETE, IE_DISPLAY, IE_KEYPAD, IE_SIGNAL, IE_CALLED_PN, -1}; static int ie_NOTIFY[] = {IE_BEARER, IE_NOTIFY | IE_MANDATORY, IE_DISPLAY, -1}; static int ie_PROGRESS[] = {IE_BEARER, IE_CAUSE, IE_FACILITY, IE_PROGRESS | IE_MANDATORY, IE_DISPLAY, IE_HLC, IE_USER_USER, -1}; static int ie_RELEASE[] = {IE_CAUSE | IE_MANDATORY_1, IE_FACILITY, IE_DISPLAY, IE_SIGNAL, IE_USER_USER, -1}; /* a RELEASE_COMPLETE with errors don't require special actions static int ie_RELEASE_COMPLETE[] = {IE_CAUSE | IE_MANDATORY_1, IE_DISPLAY, IE_SIGNAL, IE_USER_USER, -1}; */ static int ie_RESUME_ACKNOWLEDGE[] = {IE_CHANNEL_ID | IE_MANDATORY, IE_FACILITY, IE_DISPLAY, -1}; static int ie_RESUME_REJECT[] = {IE_CAUSE | IE_MANDATORY, IE_DISPLAY, -1}; static int ie_SETUP[] = {IE_COMPLETE, IE_BEARER | IE_MANDATORY, IE_CHANNEL_ID | IE_MANDATORY, IE_FACILITY, IE_PROGRESS, IE_NET_FAC, IE_DISPLAY, IE_KEYPAD, IE_SIGNAL, IE_CALLING_PN, IE_CALLING_SUB, IE_CALLED_PN, IE_CALLED_SUB, IE_REDIR_NR, IE_LLC, IE_HLC, IE_USER_USER, -1}; static int ie_SETUP_ACKNOWLEDGE[] = {IE_CHANNEL_ID | IE_MANDATORY, IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_SIGNAL, -1}; static int ie_STATUS[] = {IE_CAUSE | IE_MANDATORY, IE_CALL_STATE | IE_MANDATORY, IE_DISPLAY, -1}; static int ie_STATUS_ENQUIRY[] = {IE_DISPLAY, -1}; static int ie_SUSPEND_ACKNOWLEDGE[] = {IE_DISPLAY, IE_FACILITY, -1}; static int ie_SUSPEND_REJECT[] = {IE_CAUSE | IE_MANDATORY, IE_DISPLAY, -1}; /* not used * static int ie_CONGESTION_CONTROL[] = {IE_CONGESTION | IE_MANDATORY, * IE_CAUSE | IE_MANDATORY, IE_DISPLAY, -1}; * static int ie_USER_INFORMATION[] = {IE_MORE_DATA, IE_USER_USER | IE_MANDATORY, -1}; * static int ie_RESTART[] = {IE_CHANNEL_ID, IE_DISPLAY, IE_RESTART_IND | * IE_MANDATORY, -1}; */ static int ie_FACILITY[] = {IE_FACILITY | IE_MANDATORY, IE_DISPLAY, -1}; static int comp_required[] = {1, 2, 3, 5, 6, 7, 9, 10, 11, 14, 15, -1}; static int l3_valid_states[] = {0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 15, 17, 19, 25, -1}; struct ie_len { int ie; int len; }; static struct ie_len max_ie_len[] = { {IE_SEGMENT, 4}, {IE_BEARER, 12}, {IE_CAUSE, 32}, {IE_CALL_ID, 10}, {IE_CALL_STATE, 3}, {IE_CHANNEL_ID, 34}, {IE_FACILITY, 255}, {IE_PROGRESS, 4}, {IE_NET_FAC, 255}, {IE_NOTIFY, 3}, {IE_DISPLAY, 82}, {IE_DATE, 8}, {IE_KEYPAD, 34}, {IE_SIGNAL, 3}, {IE_INFORATE, 6}, {IE_E2E_TDELAY, 11}, {IE_TDELAY_SEL, 5}, {IE_PACK_BINPARA, 3}, {IE_PACK_WINSIZE, 4}, {IE_PACK_SIZE, 4}, {IE_CUG, 7}, {IE_REV_CHARGE, 3}, {IE_CALLING_PN, 24}, {IE_CALLING_SUB, 23}, {IE_CALLED_PN, 24}, {IE_CALLED_SUB, 23}, {IE_REDIR_NR, 255}, {IE_TRANS_SEL, 255}, {IE_RESTART_IND, 3}, {IE_LLC, 18}, {IE_HLC, 5}, {IE_USER_USER, 131}, {-1, 0}, }; static int getmax_ie_len(u_char ie) { int i = 0; while (max_ie_len[i].ie != -1) { if (max_ie_len[i].ie == ie) return (max_ie_len[i].len); i++; } return (255); } static int ie_in_set(struct l3_process *pc, u_char ie, int *checklist) { int ret = 1; while (*checklist != -1) { if ((*checklist & 0xff) == ie) { if (ie & 0x80) return (-ret); else return (ret); } ret++; checklist++; } return (0); } static int check_infoelements(struct l3_process *pc, struct sk_buff *skb, int *checklist) { int *cl = checklist; u_char mt; u_char *p, ie; int l, newpos, oldpos; int err_seq = 0, err_len = 0, err_compr = 0, err_ureg = 0; u_char codeset = 0; u_char old_codeset = 0; u_char codelock = 1; p = skb->data; /* skip cr */ p++; l = (*p++) & 0xf; p += l; mt = *p++; oldpos = 0; while ((p - skb->data) < skb->len) { if ((*p & 0xf0) == 0x90) { /* shift codeset */ old_codeset = codeset; codeset = *p & 7; if (*p & 0x08) codelock = 0; else codelock = 1; if (pc->debug & L3_DEB_CHECK) l3_debug(pc->st, "check IE shift%scodeset %d->%d", codelock ? " locking " : " ", old_codeset, codeset); p++; continue; } if (!codeset) { /* only codeset 0 */ if ((newpos = ie_in_set(pc, *p, cl))) { if (newpos > 0) { if (newpos < oldpos) err_seq++; else oldpos = newpos; } } else { if (ie_in_set(pc, *p, comp_required)) err_compr++; else err_ureg++; } } ie = *p++; if (ie & 0x80) { l = 1; } else { l = *p++; p += l; l += 2; } if (!codeset && (l > getmax_ie_len(ie))) err_len++; if (!codelock) { if (pc->debug & L3_DEB_CHECK) l3_debug(pc->st, "check IE shift back codeset %d->%d", codeset, old_codeset); codeset = old_codeset; codelock = 1; } } if (err_compr | err_ureg | err_len | err_seq) { if (pc->debug & L3_DEB_CHECK) l3_debug(pc->st, "check IE MT(%x) %d/%d/%d/%d", mt, err_compr, err_ureg, err_len, err_seq); if (err_compr) return (ERR_IE_COMPREHENSION); if (err_ureg) return (ERR_IE_UNRECOGNIZED); if (err_len) return (ERR_IE_LENGTH); if (err_seq) return (ERR_IE_SEQUENCE); } return (0); } /* verify if a message type exists and contain no IE error */ static int l3ni1_check_messagetype_validity(struct l3_process *pc, int mt, void *arg) { switch (mt) { case MT_ALERTING: case MT_CALL_PROCEEDING: case MT_CONNECT: case MT_CONNECT_ACKNOWLEDGE: case MT_DISCONNECT: case MT_INFORMATION: case MT_FACILITY: case MT_NOTIFY: case MT_PROGRESS: case MT_RELEASE: case MT_RELEASE_COMPLETE: case MT_SETUP: case MT_SETUP_ACKNOWLEDGE: case MT_RESUME_ACKNOWLEDGE: case MT_RESUME_REJECT: case MT_SUSPEND_ACKNOWLEDGE: case MT_SUSPEND_REJECT: case MT_USER_INFORMATION: case MT_RESTART: case MT_RESTART_ACKNOWLEDGE: case MT_CONGESTION_CONTROL: case MT_STATUS: case MT_STATUS_ENQUIRY: if (pc->debug & L3_DEB_CHECK) l3_debug(pc->st, "l3ni1_check_messagetype_validity mt(%x) OK", mt); break; case MT_RESUME: /* RESUME only in user->net */ case MT_SUSPEND: /* SUSPEND only in user->net */ default: if (pc->debug & (L3_DEB_CHECK | L3_DEB_WARN)) l3_debug(pc->st, "l3ni1_check_messagetype_validity mt(%x) fail", mt); pc->para.cause = 97; l3ni1_status_send(pc, 0, NULL); return (1); } return (0); } static void l3ni1_std_ie_err(struct l3_process *pc, int ret) { if (pc->debug & L3_DEB_CHECK) l3_debug(pc->st, "check_infoelements ret %d", ret); switch (ret) { case 0: break; case ERR_IE_COMPREHENSION: pc->para.cause = 96; l3ni1_status_send(pc, 0, NULL); break; case ERR_IE_UNRECOGNIZED: pc->para.cause = 99; l3ni1_status_send(pc, 0, NULL); break; case ERR_IE_LENGTH: pc->para.cause = 100; l3ni1_status_send(pc, 0, NULL); break; case ERR_IE_SEQUENCE: default: break; } } static int l3ni1_get_channel_id(struct l3_process *pc, struct sk_buff *skb) { u_char *p; p = skb->data; if ((p = findie(p, skb->len, IE_CHANNEL_ID, 0))) { p++; if (*p != 1) { /* len for BRI = 1 */ if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "wrong chid len %d", *p); return (-2); } p++; if (*p & 0x60) { /* only base rate interface */ if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "wrong chid %x", *p); return (-3); } return (*p & 0x3); } else return (-1); } static int l3ni1_get_cause(struct l3_process *pc, struct sk_buff *skb) { u_char l, i = 0; u_char *p; p = skb->data; pc->para.cause = 31; pc->para.loc = 0; if ((p = findie(p, skb->len, IE_CAUSE, 0))) { p++; l = *p++; if (l > 30) return (1); if (l) { pc->para.loc = *p++; l--; } else { return (2); } if (l && !(pc->para.loc & 0x80)) { l--; p++; /* skip recommendation */ } if (l) { pc->para.cause = *p++; l--; if (!(pc->para.cause & 0x80)) return (3); } else return (4); while (l && (i < 6)) { pc->para.diag[i++] = *p++; l--; } } else return (-1); return (0); } static void l3ni1_msg_with_uus(struct l3_process *pc, u_char cmd) { struct sk_buff *skb; u_char tmp[16 + 40]; u_char *p = tmp; int l; MsgHead(p, pc->callref, cmd); if (pc->prot.ni1.uus1_data[0]) { *p++ = IE_USER_USER; /* UUS info element */ *p++ = strlen(pc->prot.ni1.uus1_data) + 1; *p++ = 0x04; /* IA5 chars */ strcpy(p, pc->prot.ni1.uus1_data); p += strlen(pc->prot.ni1.uus1_data); pc->prot.ni1.uus1_data[0] = '\0'; } l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); } /* l3ni1_msg_with_uus */ static void l3ni1_release_req(struct l3_process *pc, u_char pr, void *arg) { StopAllL3Timer(pc); newl3state(pc, 19); if (!pc->prot.ni1.uus1_data[0]) l3ni1_message(pc, MT_RELEASE); else l3ni1_msg_with_uus(pc, MT_RELEASE); L3AddTimer(&pc->timer, T308, CC_T308_1); } static void l3ni1_release_cmpl(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; if ((ret = l3ni1_get_cause(pc, skb)) > 0) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "RELCMPL get_cause ret(%d)", ret); } else if (ret < 0) pc->para.cause = NO_CAUSE; StopAllL3Timer(pc); newl3state(pc, 0); pc->st->l3.l3l4(pc->st, CC_RELEASE | CONFIRM, pc); ni1_release_l3_process(pc); } #if EXT_BEARER_CAPS static u_char * EncodeASyncParams(u_char *p, u_char si2) { // 7c 06 88 90 21 42 00 bb p[0] = 0; p[1] = 0x40; // Intermediate rate: 16 kbit/s jj 2000.02.19 p[2] = 0x80; if (si2 & 32) // 7 data bits p[2] += 16; else // 8 data bits p[2] += 24; if (si2 & 16) // 2 stop bits p[2] += 96; else // 1 stop bit p[2] += 32; if (si2 & 8) // even parity p[2] += 2; else // no parity p[2] += 3; switch (si2 & 0x07) { case 0: p[0] = 66; // 1200 bit/s break; case 1: p[0] = 88; // 1200/75 bit/s break; case 2: p[0] = 87; // 75/1200 bit/s break; case 3: p[0] = 67; // 2400 bit/s break; case 4: p[0] = 69; // 4800 bit/s break; case 5: p[0] = 72; // 9600 bit/s break; case 6: p[0] = 73; // 14400 bit/s break; case 7: p[0] = 75; // 19200 bit/s break; } return p + 3; } static u_char EncodeSyncParams(u_char si2, u_char ai) { switch (si2) { case 0: return ai + 2; // 1200 bit/s case 1: return ai + 24; // 1200/75 bit/s case 2: return ai + 23; // 75/1200 bit/s case 3: return ai + 3; // 2400 bit/s case 4: return ai + 5; // 4800 bit/s case 5: return ai + 8; // 9600 bit/s case 6: return ai + 9; // 14400 bit/s case 7: return ai + 11; // 19200 bit/s case 8: return ai + 14; // 48000 bit/s case 9: return ai + 15; // 56000 bit/s case 15: return ai + 40; // negotiate bit/s default: break; } return ai; } static u_char DecodeASyncParams(u_char si2, u_char *p) { u_char info; switch (p[5]) { case 66: // 1200 bit/s break; // si2 don't change case 88: // 1200/75 bit/s si2 += 1; break; case 87: // 75/1200 bit/s si2 += 2; break; case 67: // 2400 bit/s si2 += 3; break; case 69: // 4800 bit/s si2 += 4; break; case 72: // 9600 bit/s si2 += 5; break; case 73: // 14400 bit/s si2 += 6; break; case 75: // 19200 bit/s si2 += 7; break; } info = p[7] & 0x7f; if ((info & 16) && (!(info & 8))) // 7 data bits si2 += 32; // else 8 data bits if ((info & 96) == 96) // 2 stop bits si2 += 16; // else 1 stop bit if ((info & 2) && (!(info & 1))) // even parity si2 += 8; // else no parity return si2; } static u_char DecodeSyncParams(u_char si2, u_char info) { info &= 0x7f; switch (info) { case 40: // bit/s negotiation failed ai := 165 not 175! return si2 + 15; case 15: // 56000 bit/s failed, ai := 0 not 169 ! return si2 + 9; case 14: // 48000 bit/s return si2 + 8; case 11: // 19200 bit/s return si2 + 7; case 9: // 14400 bit/s return si2 + 6; case 8: // 9600 bit/s return si2 + 5; case 5: // 4800 bit/s return si2 + 4; case 3: // 2400 bit/s return si2 + 3; case 23: // 75/1200 bit/s return si2 + 2; case 24: // 1200/75 bit/s return si2 + 1; default: // 1200 bit/s return si2; } } static u_char DecodeSI2(struct sk_buff *skb) { u_char *p; //, *pend=skb->data + skb->len; if ((p = findie(skb->data, skb->len, 0x7c, 0))) { switch (p[4] & 0x0f) { case 0x01: if (p[1] == 0x04) // sync. Bitratenadaption return DecodeSyncParams(160, p[5]); // V.110/X.30 else if (p[1] == 0x06) // async. Bitratenadaption return DecodeASyncParams(192, p); // V.110/X.30 break; case 0x08: // if (p[5] == 0x02) // sync. Bitratenadaption if (p[1] > 3) return DecodeSyncParams(176, p[5]); // V.120 break; } } return 0; } #endif static void l3ni1_setup_req(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb; u_char tmp[128]; u_char *p = tmp; u_char *teln; u_char *sub; u_char *sp; int l; MsgHead(p, pc->callref, MT_SETUP); teln = pc->para.setup.phone; *p++ = 0xa1; /* complete indicator */ /* * Set Bearer Capability, Map info from 1TR6-convention to NI1 */ switch (pc->para.setup.si1) { case 1: /* Telephony */ *p++ = IE_BEARER; *p++ = 0x3; /* Length */ *p++ = 0x90; /* 3.1khz Audio */ *p++ = 0x90; /* Circuit-Mode 64kbps */ *p++ = 0xa2; /* u-Law Audio */ break; case 5: /* Datatransmission 64k, BTX */ case 7: /* Datatransmission 64k */ default: *p++ = IE_BEARER; *p++ = 0x2; /* Length */ *p++ = 0x88; /* Coding Std. CCITT, unrestr. dig. Inform. */ *p++ = 0x90; /* Circuit-Mode 64kbps */ break; } sub = NULL; sp = teln; while (*sp) { if ('.' == *sp) { sub = sp; *sp = 0; } else sp++; } *p++ = IE_KEYPAD; *p++ = strlen(teln); while (*teln) *p++ = (*teln++) & 0x7F; if (sub) *sub++ = '.'; #if EXT_BEARER_CAPS if ((pc->para.setup.si2 >= 160) && (pc->para.setup.si2 <= 175)) { // sync. Bitratenadaption, V.110/X.30 *p++ = IE_LLC; *p++ = 0x04; *p++ = 0x88; *p++ = 0x90; *p++ = 0x21; *p++ = EncodeSyncParams(pc->para.setup.si2 - 160, 0x80); } else if ((pc->para.setup.si2 >= 176) && (pc->para.setup.si2 <= 191)) { // sync. Bitratenadaption, V.120 *p++ = IE_LLC; *p++ = 0x05; *p++ = 0x88; *p++ = 0x90; *p++ = 0x28; *p++ = EncodeSyncParams(pc->para.setup.si2 - 176, 0); *p++ = 0x82; } else if (pc->para.setup.si2 >= 192) { // async. Bitratenadaption, V.110/X.30 *p++ = IE_LLC; *p++ = 0x06; *p++ = 0x88; *p++ = 0x90; *p++ = 0x21; p = EncodeASyncParams(p, pc->para.setup.si2 - 192); } else { switch (pc->para.setup.si1) { case 1: /* Telephony */ *p++ = IE_LLC; *p++ = 0x3; /* Length */ *p++ = 0x90; /* Coding Std. CCITT, 3.1 kHz audio */ *p++ = 0x90; /* Circuit-Mode 64kbps */ *p++ = 0xa2; /* u-Law Audio */ break; case 5: /* Datatransmission 64k, BTX */ case 7: /* Datatransmission 64k */ default: *p++ = IE_LLC; *p++ = 0x2; /* Length */ *p++ = 0x88; /* Coding Std. CCITT, unrestr. dig. Inform. */ *p++ = 0x90; /* Circuit-Mode 64kbps */ break; } } #endif l = p - tmp; if (!(skb = l3_alloc_skb(l))) { return; } memcpy(skb_put(skb, l), tmp, l); L3DelTimer(&pc->timer); L3AddTimer(&pc->timer, T303, CC_T303); newl3state(pc, 1); l3_msg(pc->st, DL_DATA | REQUEST, skb); } static void l3ni1_call_proc(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int id, ret; if ((id = l3ni1_get_channel_id(pc, skb)) >= 0) { if ((0 == id) || ((3 == id) && (0x10 == pc->para.moderate))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup answer with wrong chid %x", id); pc->para.cause = 100; l3ni1_status_send(pc, pr, NULL); return; } pc->para.bchannel = id; } else if (1 == pc->state) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup answer wrong chid (ret %d)", id); if (id == -1) pc->para.cause = 96; else pc->para.cause = 100; l3ni1_status_send(pc, pr, NULL); return; } /* Now we are on none mandatory IEs */ ret = check_infoelements(pc, skb, ie_CALL_PROCEEDING); if (ERR_IE_COMPREHENSION == ret) { l3ni1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); newl3state(pc, 3); L3AddTimer(&pc->timer, T310, CC_T310); if (ret) /* STATUS for none mandatory IE errors after actions are taken */ l3ni1_std_ie_err(pc, ret); pc->st->l3.l3l4(pc->st, CC_PROCEEDING | INDICATION, pc); } static void l3ni1_setup_ack(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int id, ret; if ((id = l3ni1_get_channel_id(pc, skb)) >= 0) { if ((0 == id) || ((3 == id) && (0x10 == pc->para.moderate))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup answer with wrong chid %x", id); pc->para.cause = 100; l3ni1_status_send(pc, pr, NULL); return; } pc->para.bchannel = id; } else { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup answer wrong chid (ret %d)", id); if (id == -1) pc->para.cause = 96; else pc->para.cause = 100; l3ni1_status_send(pc, pr, NULL); return; } /* Now we are on none mandatory IEs */ ret = check_infoelements(pc, skb, ie_SETUP_ACKNOWLEDGE); if (ERR_IE_COMPREHENSION == ret) { l3ni1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); newl3state(pc, 2); L3AddTimer(&pc->timer, T304, CC_T304); if (ret) /* STATUS for none mandatory IE errors after actions are taken */ l3ni1_std_ie_err(pc, ret); pc->st->l3.l3l4(pc->st, CC_MORE_INFO | INDICATION, pc); } static void l3ni1_disconnect(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; u_char *p; int ret; u_char cause = 0; StopAllL3Timer(pc); if ((ret = l3ni1_get_cause(pc, skb))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "DISC get_cause ret(%d)", ret); if (ret < 0) cause = 96; else if (ret > 0) cause = 100; } if ((p = findie(skb->data, skb->len, IE_FACILITY, 0))) l3ni1_parse_facility(pc->st, pc, pc->callref, p); ret = check_infoelements(pc, skb, ie_DISCONNECT); if (ERR_IE_COMPREHENSION == ret) cause = 96; else if ((!cause) && (ERR_IE_UNRECOGNIZED == ret)) cause = 99; ret = pc->state; newl3state(pc, 12); if (cause) newl3state(pc, 19); if (11 != ret) pc->st->l3.l3l4(pc->st, CC_DISCONNECT | INDICATION, pc); else if (!cause) l3ni1_release_req(pc, pr, NULL); if (cause) { l3ni1_message_cause(pc, MT_RELEASE, cause); L3AddTimer(&pc->timer, T308, CC_T308_1); } } static void l3ni1_connect(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; ret = check_infoelements(pc, skb, ie_CONNECT); if (ERR_IE_COMPREHENSION == ret) { l3ni1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); /* T310 */ newl3state(pc, 10); pc->para.chargeinfo = 0; /* here should inserted COLP handling KKe */ if (ret) l3ni1_std_ie_err(pc, ret); pc->st->l3.l3l4(pc->st, CC_SETUP | CONFIRM, pc); } static void l3ni1_alerting(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; ret = check_infoelements(pc, skb, ie_ALERTING); if (ERR_IE_COMPREHENSION == ret) { l3ni1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); /* T304 */ newl3state(pc, 4); if (ret) l3ni1_std_ie_err(pc, ret); pc->st->l3.l3l4(pc->st, CC_ALERTING | INDICATION, pc); } static void l3ni1_setup(struct l3_process *pc, u_char pr, void *arg) { u_char *p; int bcfound = 0; char tmp[80]; struct sk_buff *skb = arg; int id; int err = 0; /* * Bearer Capabilities */ p = skb->data; /* only the first occurrence 'll be detected ! */ if ((p = findie(p, skb->len, 0x04, 0))) { if ((p[1] < 2) || (p[1] > 11)) err = 1; else { pc->para.setup.si2 = 0; switch (p[2] & 0x7f) { case 0x00: /* Speech */ case 0x10: /* 3.1 Khz audio */ pc->para.setup.si1 = 1; break; case 0x08: /* Unrestricted digital information */ pc->para.setup.si1 = 7; /* JIM, 05.11.97 I wanna set service indicator 2 */ #if EXT_BEARER_CAPS pc->para.setup.si2 = DecodeSI2(skb); #endif break; case 0x09: /* Restricted digital information */ pc->para.setup.si1 = 2; break; case 0x11: /* Unrestr. digital information with * tones/announcements ( or 7 kHz audio */ pc->para.setup.si1 = 3; break; case 0x18: /* Video */ pc->para.setup.si1 = 4; break; default: err = 2; break; } switch (p[3] & 0x7f) { case 0x40: /* packed mode */ pc->para.setup.si1 = 8; break; case 0x10: /* 64 kbit */ case 0x11: /* 2*64 kbit */ case 0x13: /* 384 kbit */ case 0x15: /* 1536 kbit */ case 0x17: /* 1920 kbit */ pc->para.moderate = p[3] & 0x7f; break; default: err = 3; break; } } if (pc->debug & L3_DEB_SI) l3_debug(pc->st, "SI=%d, AI=%d", pc->para.setup.si1, pc->para.setup.si2); if (err) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup with wrong bearer(l=%d:%x,%x)", p[1], p[2], p[3]); pc->para.cause = 100; l3ni1_msg_without_setup(pc, pr, NULL); return; } } else { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup without bearer capabilities"); /* ETS 300-104 1.3.3 */ pc->para.cause = 96; l3ni1_msg_without_setup(pc, pr, NULL); return; } /* * Channel Identification */ if ((id = l3ni1_get_channel_id(pc, skb)) >= 0) { if ((pc->para.bchannel = id)) { if ((3 == id) && (0x10 == pc->para.moderate)) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup with wrong chid %x", id); pc->para.cause = 100; l3ni1_msg_without_setup(pc, pr, NULL); return; } bcfound++; } else { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup without bchannel, call waiting"); bcfound++; } } else { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup with wrong chid ret %d", id); if (id == -1) pc->para.cause = 96; else pc->para.cause = 100; l3ni1_msg_without_setup(pc, pr, NULL); return; } /* Now we are on none mandatory IEs */ err = check_infoelements(pc, skb, ie_SETUP); if (ERR_IE_COMPREHENSION == err) { pc->para.cause = 96; l3ni1_msg_without_setup(pc, pr, NULL); return; } p = skb->data; if ((p = findie(p, skb->len, 0x70, 0))) iecpy(pc->para.setup.eazmsn, p, 1); else pc->para.setup.eazmsn[0] = 0; p = skb->data; if ((p = findie(p, skb->len, 0x71, 0))) { /* Called party subaddress */ if ((p[1] >= 2) && (p[2] == 0x80) && (p[3] == 0x50)) { tmp[0] = '.'; iecpy(&tmp[1], p, 2); strcat(pc->para.setup.eazmsn, tmp); } else if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "wrong called subaddress"); } p = skb->data; if ((p = findie(p, skb->len, 0x6c, 0))) { pc->para.setup.plan = p[2]; if (p[2] & 0x80) { iecpy(pc->para.setup.phone, p, 1); pc->para.setup.screen = 0; } else { iecpy(pc->para.setup.phone, p, 2); pc->para.setup.screen = p[3]; } } else { pc->para.setup.phone[0] = 0; pc->para.setup.plan = 0; pc->para.setup.screen = 0; } p = skb->data; if ((p = findie(p, skb->len, 0x6d, 0))) { /* Calling party subaddress */ if ((p[1] >= 2) && (p[2] == 0x80) && (p[3] == 0x50)) { tmp[0] = '.'; iecpy(&tmp[1], p, 2); strcat(pc->para.setup.phone, tmp); } else if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "wrong calling subaddress"); } newl3state(pc, 6); if (err) /* STATUS for none mandatory IE errors after actions are taken */ l3ni1_std_ie_err(pc, err); pc->st->l3.l3l4(pc->st, CC_SETUP | INDICATION, pc); } static void l3ni1_reset(struct l3_process *pc, u_char pr, void *arg) { ni1_release_l3_process(pc); } static void l3ni1_disconnect_req(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb; u_char tmp[16 + 40]; u_char *p = tmp; int l; u_char cause = 16; if (pc->para.cause != NO_CAUSE) cause = pc->para.cause; StopAllL3Timer(pc); MsgHead(p, pc->callref, MT_DISCONNECT); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = cause | 0x80; if (pc->prot.ni1.uus1_data[0]) { *p++ = IE_USER_USER; /* UUS info element */ *p++ = strlen(pc->prot.ni1.uus1_data) + 1; *p++ = 0x04; /* IA5 chars */ strcpy(p, pc->prot.ni1.uus1_data); p += strlen(pc->prot.ni1.uus1_data); pc->prot.ni1.uus1_data[0] = '\0'; } l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); newl3state(pc, 11); l3_msg(pc->st, DL_DATA | REQUEST, skb); L3AddTimer(&pc->timer, T305, CC_T305); } static void l3ni1_setup_rsp(struct l3_process *pc, u_char pr, void *arg) { if (!pc->para.bchannel) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "D-chan connect for waiting call"); l3ni1_disconnect_req(pc, pr, arg); return; } newl3state(pc, 8); if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "D-chan connect for waiting call"); l3ni1_message_plus_chid(pc, MT_CONNECT); /* GE 05/09/00 */ L3DelTimer(&pc->timer); L3AddTimer(&pc->timer, T313, CC_T313); } static void l3ni1_connect_ack(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; ret = check_infoelements(pc, skb, ie_CONNECT_ACKNOWLEDGE); if (ERR_IE_COMPREHENSION == ret) { l3ni1_std_ie_err(pc, ret); return; } newl3state(pc, 10); L3DelTimer(&pc->timer); if (ret) l3ni1_std_ie_err(pc, ret); pc->st->l3.l3l4(pc->st, CC_SETUP_COMPL | INDICATION, pc); } static void l3ni1_reject_req(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb; u_char tmp[16]; u_char *p = tmp; int l; u_char cause = 21; if (pc->para.cause != NO_CAUSE) cause = pc->para.cause; MsgHead(p, pc->callref, MT_RELEASE_COMPLETE); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = cause | 0x80; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc); newl3state(pc, 0); ni1_release_l3_process(pc); } static void l3ni1_release(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; u_char *p; int ret, cause = 0; StopAllL3Timer(pc); if ((ret = l3ni1_get_cause(pc, skb)) > 0) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "REL get_cause ret(%d)", ret); } else if (ret < 0) pc->para.cause = NO_CAUSE; if ((p = findie(skb->data, skb->len, IE_FACILITY, 0))) { l3ni1_parse_facility(pc->st, pc, pc->callref, p); } if ((ret < 0) && (pc->state != 11)) cause = 96; else if (ret > 0) cause = 100; ret = check_infoelements(pc, skb, ie_RELEASE); if (ERR_IE_COMPREHENSION == ret) cause = 96; else if ((ERR_IE_UNRECOGNIZED == ret) && (!cause)) cause = 99; if (cause) l3ni1_message_cause(pc, MT_RELEASE_COMPLETE, cause); else l3ni1_message(pc, MT_RELEASE_COMPLETE); pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc); newl3state(pc, 0); ni1_release_l3_process(pc); } static void l3ni1_alert_req(struct l3_process *pc, u_char pr, void *arg) { newl3state(pc, 7); if (!pc->prot.ni1.uus1_data[0]) l3ni1_message(pc, MT_ALERTING); else l3ni1_msg_with_uus(pc, MT_ALERTING); } static void l3ni1_proceed_req(struct l3_process *pc, u_char pr, void *arg) { newl3state(pc, 9); l3ni1_message(pc, MT_CALL_PROCEEDING); pc->st->l3.l3l4(pc->st, CC_PROCEED_SEND | INDICATION, pc); } static void l3ni1_setup_ack_req(struct l3_process *pc, u_char pr, void *arg) { newl3state(pc, 25); L3DelTimer(&pc->timer); L3AddTimer(&pc->timer, T302, CC_T302); l3ni1_message(pc, MT_SETUP_ACKNOWLEDGE); } /********************************************/ /* deliver a incoming display message to HL */ /********************************************/ static void l3ni1_deliver_display(struct l3_process *pc, int pr, u_char *infp) { u_char len; isdn_ctrl ic; struct IsdnCardState *cs; char *p; if (*infp++ != IE_DISPLAY) return; if ((len = *infp++) > 80) return; /* total length <= 82 */ if (!pc->chan) return; p = ic.parm.display; while (len--) *p++ = *infp++; *p = '\0'; ic.command = ISDN_STAT_DISPLAY; cs = pc->st->l1.hardware; ic.driver = cs->myid; ic.arg = pc->chan->chan; cs->iif.statcallb(&ic); } /* l3ni1_deliver_display */ static void l3ni1_progress(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int err = 0; u_char *p; if ((p = findie(skb->data, skb->len, IE_PROGRESS, 0))) { if (p[1] != 2) { err = 1; pc->para.cause = 100; } else if (!(p[2] & 0x70)) { switch (p[2]) { case 0x80: case 0x81: case 0x82: case 0x84: case 0x85: case 0x87: case 0x8a: switch (p[3]) { case 0x81: case 0x82: case 0x83: case 0x84: case 0x88: break; default: err = 2; pc->para.cause = 100; break; } break; default: err = 3; pc->para.cause = 100; break; } } } else { pc->para.cause = 96; err = 4; } if (err) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "progress error %d", err); l3ni1_status_send(pc, pr, NULL); return; } /* Now we are on none mandatory IEs */ err = check_infoelements(pc, skb, ie_PROGRESS); if (err) l3ni1_std_ie_err(pc, err); if (ERR_IE_COMPREHENSION != err) pc->st->l3.l3l4(pc->st, CC_PROGRESS | INDICATION, pc); } static void l3ni1_notify(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int err = 0; u_char *p; if ((p = findie(skb->data, skb->len, IE_NOTIFY, 0))) { if (p[1] != 1) { err = 1; pc->para.cause = 100; } else { switch (p[2]) { case 0x80: case 0x81: case 0x82: break; default: pc->para.cause = 100; err = 2; break; } } } else { pc->para.cause = 96; err = 3; } if (err) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "notify error %d", err); l3ni1_status_send(pc, pr, NULL); return; } /* Now we are on none mandatory IEs */ err = check_infoelements(pc, skb, ie_NOTIFY); if (err) l3ni1_std_ie_err(pc, err); if (ERR_IE_COMPREHENSION != err) pc->st->l3.l3l4(pc->st, CC_NOTIFY | INDICATION, pc); } static void l3ni1_status_enq(struct l3_process *pc, u_char pr, void *arg) { int ret; struct sk_buff *skb = arg; ret = check_infoelements(pc, skb, ie_STATUS_ENQUIRY); l3ni1_std_ie_err(pc, ret); pc->para.cause = 30; /* response to STATUS_ENQUIRY */ l3ni1_status_send(pc, pr, NULL); } static void l3ni1_information(struct l3_process *pc, u_char pr, void *arg) { int ret; struct sk_buff *skb = arg; u_char *p; char tmp[32]; ret = check_infoelements(pc, skb, ie_INFORMATION); if (ret) l3ni1_std_ie_err(pc, ret); if (pc->state == 25) { /* overlap receiving */ L3DelTimer(&pc->timer); p = skb->data; if ((p = findie(p, skb->len, 0x70, 0))) { iecpy(tmp, p, 1); strcat(pc->para.setup.eazmsn, tmp); pc->st->l3.l3l4(pc->st, CC_MORE_INFO | INDICATION, pc); } L3AddTimer(&pc->timer, T302, CC_T302); } } /******************************/ /* handle deflection requests */ /******************************/ static void l3ni1_redir_req(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb; u_char tmp[128]; u_char *p = tmp; u_char *subp; u_char len_phone = 0; u_char len_sub = 0; int l; strcpy(pc->prot.ni1.uus1_data, pc->chan->setup.eazmsn); /* copy uus element if available */ if (!pc->chan->setup.phone[0]) { pc->para.cause = -1; l3ni1_disconnect_req(pc, pr, arg); /* disconnect immediately */ return; } /* only uus */ if (pc->prot.ni1.invoke_id) free_invoke_id(pc->st, pc->prot.ni1.invoke_id); if (!(pc->prot.ni1.invoke_id = new_invoke_id(pc->st))) return; MsgHead(p, pc->callref, MT_FACILITY); for (subp = pc->chan->setup.phone; (*subp) && (*subp != '.'); subp++) len_phone++; /* len of phone number */ if (*subp++ == '.') len_sub = strlen(subp) + 2; /* length including info subaddress element */ *p++ = 0x1c; /* Facility info element */ *p++ = len_phone + len_sub + 2 + 2 + 8 + 3 + 3; /* length of element */ *p++ = 0x91; /* remote operations protocol */ *p++ = 0xa1; /* invoke component */ *p++ = len_phone + len_sub + 2 + 2 + 8 + 3; /* length of data */ *p++ = 0x02; /* invoke id tag, integer */ *p++ = 0x01; /* length */ *p++ = pc->prot.ni1.invoke_id; /* invoke id */ *p++ = 0x02; /* operation value tag, integer */ *p++ = 0x01; /* length */ *p++ = 0x0D; /* Call Deflect */ *p++ = 0x30; /* sequence phone number */ *p++ = len_phone + 2 + 2 + 3 + len_sub; /* length */ *p++ = 0x30; /* Deflected to UserNumber */ *p++ = len_phone + 2 + len_sub; /* length */ *p++ = 0x80; /* NumberDigits */ *p++ = len_phone; /* length */ for (l = 0; l < len_phone; l++) *p++ = pc->chan->setup.phone[l]; if (len_sub) { *p++ = 0x04; /* called party subaddress */ *p++ = len_sub - 2; while (*subp) *p++ = *subp++; } *p++ = 0x01; /* screening identifier */ *p++ = 0x01; *p++ = pc->chan->setup.screen; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); } /* l3ni1_redir_req */ /********************************************/ /* handle deflection request in early state */ /********************************************/ static void l3ni1_redir_req_early(struct l3_process *pc, u_char pr, void *arg) { l3ni1_proceed_req(pc, pr, arg); l3ni1_redir_req(pc, pr, arg); } /* l3ni1_redir_req_early */ /***********************************************/ /* handle special commands for this protocol. */ /* Examples are call independent services like */ /* remote operations with dummy callref. */ /***********************************************/ static int l3ni1_cmd_global(struct PStack *st, isdn_ctrl *ic) { u_char id; u_char temp[265]; u_char *p = temp; int i, l, proc_len; struct sk_buff *skb; struct l3_process *pc = NULL; switch (ic->arg) { case NI1_CMD_INVOKE: if (ic->parm.ni1_io.datalen < 0) return (-2); /* invalid parameter */ for (proc_len = 1, i = ic->parm.ni1_io.proc >> 8; i; i++) i = i >> 8; /* add one byte */ l = ic->parm.ni1_io.datalen + proc_len + 8; /* length excluding ie header */ if (l > 255) return (-2); /* too long */ if (!(id = new_invoke_id(st))) return (0); /* first get a invoke id -> return if no available */ i = -1; MsgHead(p, i, MT_FACILITY); /* build message head */ *p++ = 0x1C; /* Facility IE */ *p++ = l; /* length of ie */ *p++ = 0x91; /* remote operations */ *p++ = 0xA1; /* invoke */ *p++ = l - 3; /* length of invoke */ *p++ = 0x02; /* invoke id tag */ *p++ = 0x01; /* length is 1 */ *p++ = id; /* invoke id */ *p++ = 0x02; /* operation */ *p++ = proc_len; /* length of operation */ for (i = proc_len; i; i--) *p++ = (ic->parm.ni1_io.proc >> (i - 1)) & 0xFF; memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */ l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */ if (ic->parm.ni1_io.timeout > 0) { pc = ni1_new_l3_process(st, -1); if (!pc) { free_invoke_id(st, id); return (-2); } /* remember id */ pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id; /* and procedure */ pc->prot.ni1.proc = ic->parm.ni1_io.proc; } if (!(skb = l3_alloc_skb(l))) { free_invoke_id(st, id); if (pc) ni1_release_l3_process(pc); return (-2); } memcpy(skb_put(skb, l), temp, l); if (pc) { pc->prot.ni1.invoke_id = id; /* remember id */ L3AddTimer(&pc->timer, ic->parm.ni1_io.timeout, CC_TNI1_IO | REQUEST); } l3_msg(st, DL_DATA | REQUEST, skb); ic->parm.ni1_io.hl_id = id; /* return id */ return (0); case NI1_CMD_INVOKE_ABORT: if ((pc = l3ni1_search_dummy_proc(st, ic->parm.ni1_io.hl_id))) { L3DelTimer(&pc->timer); /* remove timer */ ni1_release_l3_process(pc); return (0); } else { l3_debug(st, "l3ni1_cmd_global abort unknown id"); return (-2); } break; default: l3_debug(st, "l3ni1_cmd_global unknown cmd 0x%lx", ic->arg); return (-1); } /* switch ic-> arg */ return (-1); } /* l3ni1_cmd_global */ static void l3ni1_io_timer(struct l3_process *pc) { isdn_ctrl ic; struct IsdnCardState *cs = pc->st->l1.hardware; L3DelTimer(&pc->timer); /* remove timer */ ic.driver = cs->myid; ic.command = ISDN_STAT_PROT; ic.arg = NI1_STAT_INVOKE_ERR; ic.parm.ni1_io.hl_id = pc->prot.ni1.invoke_id; ic.parm.ni1_io.ll_id = pc->prot.ni1.ll_id; ic.parm.ni1_io.proc = pc->prot.ni1.proc; ic.parm.ni1_io.timeout = -1; ic.parm.ni1_io.datalen = 0; ic.parm.ni1_io.data = NULL; free_invoke_id(pc->st, pc->prot.ni1.invoke_id); pc->prot.ni1.invoke_id = 0; /* reset id */ cs->iif.statcallb(&ic); ni1_release_l3_process(pc); } /* l3ni1_io_timer */ static void l3ni1_release_ind(struct l3_process *pc, u_char pr, void *arg) { u_char *p; struct sk_buff *skb = arg; int callState = 0; p = skb->data; if ((p = findie(p, skb->len, IE_CALL_STATE, 0))) { p++; if (1 == *p++) callState = *p; } if (callState == 0) { /* ETS 300-104 7.6.1, 8.6.1, 10.6.1... and 16.1 * set down layer 3 without sending any message */ pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc); newl3state(pc, 0); ni1_release_l3_process(pc); } else { pc->st->l3.l3l4(pc->st, CC_IGNORE | INDICATION, pc); } } static void l3ni1_dummy(struct l3_process *pc, u_char pr, void *arg) { } static void l3ni1_t302(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.loc = 0; pc->para.cause = 28; /* invalid number */ l3ni1_disconnect_req(pc, pr, NULL); pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc); } static void l3ni1_t303(struct l3_process *pc, u_char pr, void *arg) { if (pc->N303 > 0) { pc->N303--; L3DelTimer(&pc->timer); l3ni1_setup_req(pc, pr, arg); } else { L3DelTimer(&pc->timer); l3ni1_message_cause(pc, MT_RELEASE_COMPLETE, 102); pc->st->l3.l3l4(pc->st, CC_NOSETUP_RSP, pc); ni1_release_l3_process(pc); } } static void l3ni1_t304(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.loc = 0; pc->para.cause = 102; l3ni1_disconnect_req(pc, pr, NULL); pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc); } static void l3ni1_t305(struct l3_process *pc, u_char pr, void *arg) { u_char tmp[16]; u_char *p = tmp; int l; struct sk_buff *skb; u_char cause = 16; L3DelTimer(&pc->timer); if (pc->para.cause != NO_CAUSE) cause = pc->para.cause; MsgHead(p, pc->callref, MT_RELEASE); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = cause | 0x80; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); newl3state(pc, 19); l3_msg(pc->st, DL_DATA | REQUEST, skb); L3AddTimer(&pc->timer, T308, CC_T308_1); } static void l3ni1_t310(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.loc = 0; pc->para.cause = 102; l3ni1_disconnect_req(pc, pr, NULL); pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc); } static void l3ni1_t313(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.loc = 0; pc->para.cause = 102; l3ni1_disconnect_req(pc, pr, NULL); pc->st->l3.l3l4(pc->st, CC_CONNECT_ERR, pc); } static void l3ni1_t308_1(struct l3_process *pc, u_char pr, void *arg) { newl3state(pc, 19); L3DelTimer(&pc->timer); l3ni1_message(pc, MT_RELEASE); L3AddTimer(&pc->timer, T308, CC_T308_2); } static void l3ni1_t308_2(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->st->l3.l3l4(pc->st, CC_RELEASE_ERR, pc); ni1_release_l3_process(pc); } static void l3ni1_t318(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.cause = 102; /* Timer expiry */ pc->para.loc = 0; /* local */ pc->st->l3.l3l4(pc->st, CC_RESUME_ERR, pc); newl3state(pc, 19); l3ni1_message(pc, MT_RELEASE); L3AddTimer(&pc->timer, T308, CC_T308_1); } static void l3ni1_t319(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.cause = 102; /* Timer expiry */ pc->para.loc = 0; /* local */ pc->st->l3.l3l4(pc->st, CC_SUSPEND_ERR, pc); newl3state(pc, 10); } static void l3ni1_restart(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc); ni1_release_l3_process(pc); } static void l3ni1_status(struct l3_process *pc, u_char pr, void *arg) { u_char *p; struct sk_buff *skb = arg; int ret; u_char cause = 0, callState = 0; if ((ret = l3ni1_get_cause(pc, skb))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "STATUS get_cause ret(%d)", ret); if (ret < 0) cause = 96; else if (ret > 0) cause = 100; } if ((p = findie(skb->data, skb->len, IE_CALL_STATE, 0))) { p++; if (1 == *p++) { callState = *p; if (!ie_in_set(pc, *p, l3_valid_states)) cause = 100; } else cause = 100; } else cause = 96; if (!cause) { /* no error before */ ret = check_infoelements(pc, skb, ie_STATUS); if (ERR_IE_COMPREHENSION == ret) cause = 96; else if (ERR_IE_UNRECOGNIZED == ret) cause = 99; } if (cause) { u_char tmp; if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "STATUS error(%d/%d)", ret, cause); tmp = pc->para.cause; pc->para.cause = cause; l3ni1_status_send(pc, 0, NULL); if (cause == 99) pc->para.cause = tmp; else return; } cause = pc->para.cause; if (((cause & 0x7f) == 111) && (callState == 0)) { /* ETS 300-104 7.6.1, 8.6.1, 10.6.1... * if received MT_STATUS with cause == 111 and call * state == 0, then we must set down layer 3 */ pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc); newl3state(pc, 0); ni1_release_l3_process(pc); } } static void l3ni1_facility(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; ret = check_infoelements(pc, skb, ie_FACILITY); l3ni1_std_ie_err(pc, ret); { u_char *p; if ((p = findie(skb->data, skb->len, IE_FACILITY, 0))) l3ni1_parse_facility(pc->st, pc, pc->callref, p); } } static void l3ni1_suspend_req(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb; u_char tmp[32]; u_char *p = tmp; u_char i, l; u_char *msg = pc->chan->setup.phone; MsgHead(p, pc->callref, MT_SUSPEND); l = *msg++; if (l && (l <= 10)) { /* Max length 10 octets */ *p++ = IE_CALL_ID; *p++ = l; for (i = 0; i < l; i++) *p++ = *msg++; } else if (l) { l3_debug(pc->st, "SUS wrong CALL_ID len %d", l); return; } l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); newl3state(pc, 15); L3AddTimer(&pc->timer, T319, CC_T319); } static void l3ni1_suspend_ack(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; L3DelTimer(&pc->timer); newl3state(pc, 0); pc->para.cause = NO_CAUSE; pc->st->l3.l3l4(pc->st, CC_SUSPEND | CONFIRM, pc); /* We don't handle suspend_ack for IE errors now */ if ((ret = check_infoelements(pc, skb, ie_SUSPEND_ACKNOWLEDGE))) if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "SUSPACK check ie(%d)", ret); ni1_release_l3_process(pc); } static void l3ni1_suspend_rej(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; if ((ret = l3ni1_get_cause(pc, skb))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "SUSP_REJ get_cause ret(%d)", ret); if (ret < 0) pc->para.cause = 96; else pc->para.cause = 100; l3ni1_status_send(pc, pr, NULL); return; } ret = check_infoelements(pc, skb, ie_SUSPEND_REJECT); if (ERR_IE_COMPREHENSION == ret) { l3ni1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); pc->st->l3.l3l4(pc->st, CC_SUSPEND_ERR, pc); newl3state(pc, 10); if (ret) /* STATUS for none mandatory IE errors after actions are taken */ l3ni1_std_ie_err(pc, ret); } static void l3ni1_resume_req(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb; u_char tmp[32]; u_char *p = tmp; u_char i, l; u_char *msg = pc->para.setup.phone; MsgHead(p, pc->callref, MT_RESUME); l = *msg++; if (l && (l <= 10)) { /* Max length 10 octets */ *p++ = IE_CALL_ID; *p++ = l; for (i = 0; i < l; i++) *p++ = *msg++; } else if (l) { l3_debug(pc->st, "RES wrong CALL_ID len %d", l); return; } l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); newl3state(pc, 17); L3AddTimer(&pc->timer, T318, CC_T318); } static void l3ni1_resume_ack(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int id, ret; if ((id = l3ni1_get_channel_id(pc, skb)) > 0) { if ((0 == id) || ((3 == id) && (0x10 == pc->para.moderate))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "resume ack with wrong chid %x", id); pc->para.cause = 100; l3ni1_status_send(pc, pr, NULL); return; } pc->para.bchannel = id; } else if (1 == pc->state) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "resume ack without chid (ret %d)", id); pc->para.cause = 96; l3ni1_status_send(pc, pr, NULL); return; } ret = check_infoelements(pc, skb, ie_RESUME_ACKNOWLEDGE); if (ERR_IE_COMPREHENSION == ret) { l3ni1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); pc->st->l3.l3l4(pc->st, CC_RESUME | CONFIRM, pc); newl3state(pc, 10); if (ret) /* STATUS for none mandatory IE errors after actions are taken */ l3ni1_std_ie_err(pc, ret); } static void l3ni1_resume_rej(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; if ((ret = l3ni1_get_cause(pc, skb))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "RES_REJ get_cause ret(%d)", ret); if (ret < 0) pc->para.cause = 96; else pc->para.cause = 100; l3ni1_status_send(pc, pr, NULL); return; } ret = check_infoelements(pc, skb, ie_RESUME_REJECT); if (ERR_IE_COMPREHENSION == ret) { l3ni1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); pc->st->l3.l3l4(pc->st, CC_RESUME_ERR, pc); newl3state(pc, 0); if (ret) /* STATUS for none mandatory IE errors after actions are taken */ l3ni1_std_ie_err(pc, ret); ni1_release_l3_process(pc); } static void l3ni1_global_restart(struct l3_process *pc, u_char pr, void *arg) { u_char tmp[32]; u_char *p; u_char ri, ch = 0, chan = 0; int l; struct sk_buff *skb = arg; struct l3_process *up; newl3state(pc, 2); L3DelTimer(&pc->timer); p = skb->data; if ((p = findie(p, skb->len, IE_RESTART_IND, 0))) { ri = p[2]; l3_debug(pc->st, "Restart %x", ri); } else { l3_debug(pc->st, "Restart without restart IE"); ri = 0x86; } p = skb->data; if ((p = findie(p, skb->len, IE_CHANNEL_ID, 0))) { chan = p[2] & 3; ch = p[2]; if (pc->st->l3.debug) l3_debug(pc->st, "Restart for channel %d", chan); } newl3state(pc, 2); up = pc->st->l3.proc; while (up) { if ((ri & 7) == 7) up->st->lli.l4l3(up->st, CC_RESTART | REQUEST, up); else if (up->para.bchannel == chan) up->st->lli.l4l3(up->st, CC_RESTART | REQUEST, up); up = up->next; } p = tmp; MsgHead(p, pc->callref, MT_RESTART_ACKNOWLEDGE); if (chan) { *p++ = IE_CHANNEL_ID; *p++ = 1; *p++ = ch | 0x80; } *p++ = 0x79; /* RESTART Ind */ *p++ = 1; *p++ = ri; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); newl3state(pc, 0); l3_msg(pc->st, DL_DATA | REQUEST, skb); } static void l3ni1_dl_reset(struct l3_process *pc, u_char pr, void *arg) { pc->para.cause = 0x29; /* Temporary failure */ pc->para.loc = 0; l3ni1_disconnect_req(pc, pr, NULL); pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc); } static void l3ni1_dl_release(struct l3_process *pc, u_char pr, void *arg) { newl3state(pc, 0); pc->para.cause = 0x1b; /* Destination out of order */ pc->para.loc = 0; pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc); release_l3_process(pc); } static void l3ni1_dl_reestablish(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); L3AddTimer(&pc->timer, T309, CC_T309); l3_msg(pc->st, DL_ESTABLISH | REQUEST, NULL); } static void l3ni1_dl_reest_status(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.cause = 0x1F; /* normal, unspecified */ l3ni1_status_send(pc, 0, NULL); } static void l3ni1_SendSpid(struct l3_process *pc, u_char pr, struct sk_buff *skb, int iNewState) { u_char *p; char *pSPID; struct Channel *pChan = pc->st->lli.userdata; int l; if (skb) dev_kfree_skb(skb); if (!(pSPID = strchr(pChan->setup.eazmsn, ':'))) { printk(KERN_ERR "SPID not supplied in EAZMSN %s\n", pChan->setup.eazmsn); newl3state(pc, 0); pc->st->l3.l3l2(pc->st, DL_RELEASE | REQUEST, NULL); return; } l = strlen(++pSPID); if (!(skb = l3_alloc_skb(5 + l))) { printk(KERN_ERR "HiSax can't get memory to send SPID\n"); return; } p = skb_put(skb, 5); *p++ = PROTO_DIS_EURO; *p++ = 0; *p++ = MT_INFORMATION; *p++ = IE_SPID; *p++ = l; memcpy(skb_put(skb, l), pSPID, l); newl3state(pc, iNewState); L3DelTimer(&pc->timer); L3AddTimer(&pc->timer, TSPID, CC_TSPID); pc->st->l3.l3l2(pc->st, DL_DATA | REQUEST, skb); } static void l3ni1_spid_send(struct l3_process *pc, u_char pr, void *arg) { l3ni1_SendSpid(pc, pr, arg, 20); } static void l3ni1_spid_epid(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; if (skb->data[1] == 0) if (skb->data[3] == IE_ENDPOINT_ID) { L3DelTimer(&pc->timer); newl3state(pc, 0); l3_msg(pc->st, DL_ESTABLISH | CONFIRM, NULL); } dev_kfree_skb(skb); } static void l3ni1_spid_tout(struct l3_process *pc, u_char pr, void *arg) { if (pc->state < 22) l3ni1_SendSpid(pc, pr, arg, pc->state + 1); else { L3DelTimer(&pc->timer); dev_kfree_skb(arg); printk(KERN_ERR "SPID not accepted\n"); newl3state(pc, 0); pc->st->l3.l3l2(pc->st, DL_RELEASE | REQUEST, NULL); } } /* *INDENT-OFF* */ static struct stateentry downstatelist[] = { {SBIT(0), CC_SETUP | REQUEST, l3ni1_setup_req}, {SBIT(0), CC_RESUME | REQUEST, l3ni1_resume_req}, {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(6) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(25), CC_DISCONNECT | REQUEST, l3ni1_disconnect_req}, {SBIT(12), CC_RELEASE | REQUEST, l3ni1_release_req}, {ALL_STATES, CC_RESTART | REQUEST, l3ni1_restart}, {SBIT(6) | SBIT(25), CC_IGNORE | REQUEST, l3ni1_reset}, {SBIT(6) | SBIT(25), CC_REJECT | REQUEST, l3ni1_reject_req}, {SBIT(6) | SBIT(25), CC_PROCEED_SEND | REQUEST, l3ni1_proceed_req}, {SBIT(6), CC_MORE_INFO | REQUEST, l3ni1_setup_ack_req}, {SBIT(25), CC_MORE_INFO | REQUEST, l3ni1_dummy}, {SBIT(6) | SBIT(9) | SBIT(25), CC_ALERTING | REQUEST, l3ni1_alert_req}, {SBIT(6) | SBIT(7) | SBIT(9) | SBIT(25), CC_SETUP | RESPONSE, l3ni1_setup_rsp}, {SBIT(10), CC_SUSPEND | REQUEST, l3ni1_suspend_req}, {SBIT(7) | SBIT(9) | SBIT(25), CC_REDIR | REQUEST, l3ni1_redir_req}, {SBIT(6), CC_REDIR | REQUEST, l3ni1_redir_req_early}, {SBIT(9) | SBIT(25), CC_DISCONNECT | REQUEST, l3ni1_disconnect_req}, {SBIT(25), CC_T302, l3ni1_t302}, {SBIT(1), CC_T303, l3ni1_t303}, {SBIT(2), CC_T304, l3ni1_t304}, {SBIT(3), CC_T310, l3ni1_t310}, {SBIT(8), CC_T313, l3ni1_t313}, {SBIT(11), CC_T305, l3ni1_t305}, {SBIT(15), CC_T319, l3ni1_t319}, {SBIT(17), CC_T318, l3ni1_t318}, {SBIT(19), CC_T308_1, l3ni1_t308_1}, {SBIT(19), CC_T308_2, l3ni1_t308_2}, {SBIT(10), CC_T309, l3ni1_dl_release}, { SBIT(20) | SBIT(21) | SBIT(22), CC_TSPID, l3ni1_spid_tout }, }; static struct stateentry datastatelist[] = { {ALL_STATES, MT_STATUS_ENQUIRY, l3ni1_status_enq}, {ALL_STATES, MT_FACILITY, l3ni1_facility}, {SBIT(19), MT_STATUS, l3ni1_release_ind}, {ALL_STATES, MT_STATUS, l3ni1_status}, {SBIT(0), MT_SETUP, l3ni1_setup}, {SBIT(6) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17) | SBIT(19) | SBIT(25), MT_SETUP, l3ni1_dummy}, {SBIT(1) | SBIT(2), MT_CALL_PROCEEDING, l3ni1_call_proc}, {SBIT(1), MT_SETUP_ACKNOWLEDGE, l3ni1_setup_ack}, {SBIT(2) | SBIT(3), MT_ALERTING, l3ni1_alerting}, {SBIT(2) | SBIT(3), MT_PROGRESS, l3ni1_progress}, {SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17) | SBIT(19) | SBIT(25), MT_INFORMATION, l3ni1_information}, {SBIT(10) | SBIT(11) | SBIT(15), MT_NOTIFY, l3ni1_notify}, {SBIT(0) | SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(10) | SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17) | SBIT(19) | SBIT(25), MT_RELEASE_COMPLETE, l3ni1_release_cmpl}, {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17) | SBIT(25), MT_RELEASE, l3ni1_release}, {SBIT(19), MT_RELEASE, l3ni1_release_ind}, {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(11) | SBIT(15) | SBIT(17) | SBIT(25), MT_DISCONNECT, l3ni1_disconnect}, {SBIT(19), MT_DISCONNECT, l3ni1_dummy}, {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4), MT_CONNECT, l3ni1_connect}, {SBIT(8), MT_CONNECT_ACKNOWLEDGE, l3ni1_connect_ack}, {SBIT(15), MT_SUSPEND_ACKNOWLEDGE, l3ni1_suspend_ack}, {SBIT(15), MT_SUSPEND_REJECT, l3ni1_suspend_rej}, {SBIT(17), MT_RESUME_ACKNOWLEDGE, l3ni1_resume_ack}, {SBIT(17), MT_RESUME_REJECT, l3ni1_resume_rej}, }; static struct stateentry globalmes_list[] = { {ALL_STATES, MT_STATUS, l3ni1_status}, {SBIT(0), MT_RESTART, l3ni1_global_restart}, /* {SBIT(1), MT_RESTART_ACKNOWLEDGE, l3ni1_restart_ack}, */ { SBIT(0), MT_DL_ESTABLISHED, l3ni1_spid_send }, { SBIT(20) | SBIT(21) | SBIT(22), MT_INFORMATION, l3ni1_spid_epid }, }; static struct stateentry manstatelist[] = { {SBIT(2), DL_ESTABLISH | INDICATION, l3ni1_dl_reset}, {SBIT(10), DL_ESTABLISH | CONFIRM, l3ni1_dl_reest_status}, {SBIT(10), DL_RELEASE | INDICATION, l3ni1_dl_reestablish}, {ALL_STATES, DL_RELEASE | INDICATION, l3ni1_dl_release}, }; /* *INDENT-ON* */ static void global_handler(struct PStack *st, int mt, struct sk_buff *skb) { u_char tmp[16]; u_char *p = tmp; int l; int i; struct l3_process *proc = st->l3.global; if (skb) proc->callref = skb->data[2]; /* cr flag */ else proc->callref = 0; for (i = 0; i < ARRAY_SIZE(globalmes_list); i++) if ((mt == globalmes_list[i].primitive) && ((1 << proc->state) & globalmes_list[i].state)) break; if (i == ARRAY_SIZE(globalmes_list)) { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "ni1 global state %d mt %x unhandled", proc->state, mt); } MsgHead(p, proc->callref, MT_STATUS); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = 81 | 0x80; /* invalid cr */ *p++ = 0x14; /* CallState */ *p++ = 0x1; *p++ = proc->state & 0x3f; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(proc->st, DL_DATA | REQUEST, skb); } else { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "ni1 global %d mt %x", proc->state, mt); } globalmes_list[i].rout(proc, mt, skb); } } static void ni1up(struct PStack *st, int pr, void *arg) { int i, mt, cr, callState; char *ptr; u_char *p; struct sk_buff *skb = arg; struct l3_process *proc; switch (pr) { case (DL_DATA | INDICATION): case (DL_UNIT_DATA | INDICATION): break; case (DL_ESTABLISH | INDICATION): case (DL_RELEASE | INDICATION): case (DL_RELEASE | CONFIRM): l3_msg(st, pr, arg); return; break; case (DL_ESTABLISH | CONFIRM): global_handler(st, MT_DL_ESTABLISHED, NULL); return; default: printk(KERN_ERR "HiSax ni1up unknown pr=%04x\n", pr); return; } if (skb->len < 3) { l3_debug(st, "ni1up frame too short(%d)", skb->len); dev_kfree_skb(skb); return; } if (skb->data[0] != PROTO_DIS_EURO) { if (st->l3.debug & L3_DEB_PROTERR) { l3_debug(st, "ni1up%sunexpected discriminator %x message len %d", (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", skb->data[0], skb->len); } dev_kfree_skb(skb); return; } cr = getcallref(skb->data); if (skb->len < ((skb->data[1] & 0x0f) + 3)) { l3_debug(st, "ni1up frame too short(%d)", skb->len); dev_kfree_skb(skb); return; } mt = skb->data[skb->data[1] + 2]; if (st->l3.debug & L3_DEB_STATE) l3_debug(st, "ni1up cr %d", cr); if (cr == -2) { /* wrong Callref */ if (st->l3.debug & L3_DEB_WARN) l3_debug(st, "ni1up wrong Callref"); dev_kfree_skb(skb); return; } else if (cr == -1) { /* Dummy Callref */ if (mt == MT_FACILITY) { if ((p = findie(skb->data, skb->len, IE_FACILITY, 0))) { l3ni1_parse_facility(st, NULL, (pr == (DL_DATA | INDICATION)) ? -1 : -2, p); dev_kfree_skb(skb); return; } } else { global_handler(st, mt, skb); return; } if (st->l3.debug & L3_DEB_WARN) l3_debug(st, "ni1up dummy Callref (no facility msg or ie)"); dev_kfree_skb(skb); return; } else if ((((skb->data[1] & 0x0f) == 1) && (0 == (cr & 0x7f))) || (((skb->data[1] & 0x0f) == 2) && (0 == (cr & 0x7fff)))) { /* Global CallRef */ if (st->l3.debug & L3_DEB_STATE) l3_debug(st, "ni1up Global CallRef"); global_handler(st, mt, skb); dev_kfree_skb(skb); return; } else if (!(proc = getl3proc(st, cr))) { /* No transaction process exist, that means no call with * this callreference is active */ if (mt == MT_SETUP) { /* Setup creates a new transaction process */ if (skb->data[2] & 0x80) { /* Setup with wrong CREF flag */ if (st->l3.debug & L3_DEB_STATE) l3_debug(st, "ni1up wrong CRef flag"); dev_kfree_skb(skb); return; } if (!(proc = ni1_new_l3_process(st, cr))) { /* May be to answer with RELEASE_COMPLETE and * CAUSE 0x2f "Resource unavailable", but this * need a new_l3_process too ... arghh */ dev_kfree_skb(skb); return; } } else if (mt == MT_STATUS) { if ((ptr = findie(skb->data, skb->len, IE_CAUSE, 0)) != NULL) { ptr++; if (*ptr++ == 2) ptr++; } callState = 0; if ((ptr = findie(skb->data, skb->len, IE_CALL_STATE, 0)) != NULL) { ptr++; if (*ptr++ == 2) ptr++; callState = *ptr; } /* ETS 300-104 part 2.4.1 * if setup has not been made and a message type * MT_STATUS is received with call state == 0, * we must send nothing */ if (callState != 0) { /* ETS 300-104 part 2.4.2 * if setup has not been made and a message type * MT_STATUS is received with call state != 0, * we must send MT_RELEASE_COMPLETE cause 101 */ if ((proc = ni1_new_l3_process(st, cr))) { proc->para.cause = 101; l3ni1_msg_without_setup(proc, 0, NULL); } } dev_kfree_skb(skb); return; } else if (mt == MT_RELEASE_COMPLETE) { dev_kfree_skb(skb); return; } else { /* ETS 300-104 part 2 * if setup has not been made and a message type * (except MT_SETUP and RELEASE_COMPLETE) is received, * we must send MT_RELEASE_COMPLETE cause 81 */ dev_kfree_skb(skb); if ((proc = ni1_new_l3_process(st, cr))) { proc->para.cause = 81; l3ni1_msg_without_setup(proc, 0, NULL); } return; } } if (l3ni1_check_messagetype_validity(proc, mt, skb)) { dev_kfree_skb(skb); return; } if ((p = findie(skb->data, skb->len, IE_DISPLAY, 0)) != NULL) l3ni1_deliver_display(proc, pr, p); /* Display IE included */ for (i = 0; i < ARRAY_SIZE(datastatelist); i++) if ((mt == datastatelist[i].primitive) && ((1 << proc->state) & datastatelist[i].state)) break; if (i == ARRAY_SIZE(datastatelist)) { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "ni1up%sstate %d mt %#x unhandled", (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", proc->state, mt); } if ((MT_RELEASE_COMPLETE != mt) && (MT_RELEASE != mt)) { proc->para.cause = 101; l3ni1_status_send(proc, pr, skb); } } else { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "ni1up%sstate %d mt %x", (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", proc->state, mt); } datastatelist[i].rout(proc, pr, skb); } dev_kfree_skb(skb); return; } static void ni1down(struct PStack *st, int pr, void *arg) { int i, cr; struct l3_process *proc; struct Channel *chan; if ((DL_ESTABLISH | REQUEST) == pr) { l3_msg(st, pr, NULL); return; } else if (((CC_SETUP | REQUEST) == pr) || ((CC_RESUME | REQUEST) == pr)) { chan = arg; cr = newcallref(); cr |= 0x80; if ((proc = ni1_new_l3_process(st, cr))) { proc->chan = chan; chan->proc = proc; memcpy(&proc->para.setup, &chan->setup, sizeof(setup_parm)); proc->callref = cr; } } else { proc = arg; } if (!proc) { printk(KERN_ERR "HiSax ni1down without proc pr=%04x\n", pr); return; } if (pr == (CC_TNI1_IO | REQUEST)) { l3ni1_io_timer(proc); /* timer expires */ return; } for (i = 0; i < ARRAY_SIZE(downstatelist); i++) if ((pr == downstatelist[i].primitive) && ((1 << proc->state) & downstatelist[i].state)) break; if (i == ARRAY_SIZE(downstatelist)) { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "ni1down state %d prim %#x unhandled", proc->state, pr); } } else { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "ni1down state %d prim %#x", proc->state, pr); } downstatelist[i].rout(proc, pr, arg); } } static void ni1man(struct PStack *st, int pr, void *arg) { int i; struct l3_process *proc = arg; if (!proc) { printk(KERN_ERR "HiSax ni1man without proc pr=%04x\n", pr); return; } for (i = 0; i < ARRAY_SIZE(manstatelist); i++) if ((pr == manstatelist[i].primitive) && ((1 << proc->state) & manstatelist[i].state)) break; if (i == ARRAY_SIZE(manstatelist)) { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "cr %d ni1man state %d prim %#x unhandled", proc->callref & 0x7f, proc->state, pr); } } else { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "cr %d ni1man state %d prim %#x", proc->callref & 0x7f, proc->state, pr); } manstatelist[i].rout(proc, pr, arg); } } void setstack_ni1(struct PStack *st) { char tmp[64]; int i; st->lli.l4l3 = ni1down; st->lli.l4l3_proto = l3ni1_cmd_global; st->l2.l2l3 = ni1up; st->l3.l3ml3 = ni1man; st->l3.N303 = 1; st->prot.ni1.last_invoke_id = 0; st->prot.ni1.invoke_used[0] = 1; /* Bit 0 must always be set to 1 */ i = 1; while (i < 32) st->prot.ni1.invoke_used[i++] = 0; if (!(st->l3.global = kmalloc(sizeof(struct l3_process), GFP_ATOMIC))) { printk(KERN_ERR "HiSax can't get memory for ni1 global CR\n"); } else { st->l3.global->state = 0; st->l3.global->callref = 0; st->l3.global->next = NULL; st->l3.global->debug = L3_DEB_WARN; st->l3.global->st = st; st->l3.global->N303 = 1; st->l3.global->prot.ni1.invoke_id = 0; L3InitTimer(st->l3.global, &st->l3.global->timer); } strcpy(tmp, ni1_revision); printk(KERN_INFO "HiSax: National ISDN-1 Rev. %s\n", HiSax_getrev(tmp)); }
gpl-2.0
CyanogenMod/android_kernel_bn_acclaim
arch/arm/mach-imx/clock-imx35.c
2285
15065
/* * Copyright (C) 2009 by Sascha Hauer, Pengutronix * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/list.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/clkdev.h> #include <mach/clock.h> #include <mach/hardware.h> #include <mach/common.h> #define CCM_BASE MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR) #define CCM_CCMR 0x00 #define CCM_PDR0 0x04 #define CCM_PDR1 0x08 #define CCM_PDR2 0x0C #define CCM_PDR3 0x10 #define CCM_PDR4 0x14 #define CCM_RCSR 0x18 #define CCM_MPCTL 0x1C #define CCM_PPCTL 0x20 #define CCM_ACMR 0x24 #define CCM_COSR 0x28 #define CCM_CGR0 0x2C #define CCM_CGR1 0x30 #define CCM_CGR2 0x34 #define CCM_CGR3 0x38 #ifdef HAVE_SET_RATE_SUPPORT static void calc_dividers(u32 div, u32 *pre, u32 *post, u32 maxpost) { u32 min_pre, temp_pre, old_err, err; min_pre = (div - 1) / maxpost + 1; old_err = 8; for (temp_pre = 8; temp_pre >= min_pre; temp_pre--) { if (div > (temp_pre * maxpost)) break; if (div < (temp_pre * temp_pre)) continue; err = div % temp_pre; if (err == 0) { *pre = temp_pre; break; } err = temp_pre - err; if (err < old_err) { old_err = err; *pre = temp_pre; } } *post = (div + *pre - 1) / *pre; } /* get the best values for a 3-bit divider combined with a 6-bit divider */ static void calc_dividers_3_6(u32 div, u32 *pre, u32 *post) { if (div >= 512) { *pre = 8; *post = 64; } else if (div >= 64) { calc_dividers(div, pre, post, 64); } else if (div <= 8) { *pre = div; *post = 1; } else { *pre = 1; *post = div; } } /* get the best values for two cascaded 3-bit dividers */ static void calc_dividers_3_3(u32 div, u32 *pre, u32 *post) { if (div >= 64) { *pre = *post = 8; } else if (div > 8) { calc_dividers(div, pre, post, 8); } else { *pre = 1; *post = div; } } #endif static unsigned long get_rate_mpll(void) { ulong mpctl = __raw_readl(CCM_BASE + CCM_MPCTL); return mxc_decode_pll(mpctl, 24000000); } static unsigned long get_rate_ppll(void) { ulong ppctl = __raw_readl(CCM_BASE + CCM_PPCTL); return mxc_decode_pll(ppctl, 24000000); } struct arm_ahb_div { unsigned char arm, ahb, sel; }; static struct arm_ahb_div clk_consumer[] = { { .arm = 1, .ahb = 4, .sel = 0}, { .arm = 1, .ahb = 3, .sel = 1}, { .arm = 2, .ahb = 2, .sel = 0}, { .arm = 0, .ahb = 0, .sel = 0}, { .arm = 0, .ahb = 0, .sel = 0}, { .arm = 0, .ahb = 0, .sel = 0}, { .arm = 4, .ahb = 1, .sel = 0}, { .arm = 1, .ahb = 5, .sel = 0}, { .arm = 1, .ahb = 8, .sel = 0}, { .arm = 1, .ahb = 6, .sel = 1}, { .arm = 2, .ahb = 4, .sel = 0}, { .arm = 0, .ahb = 0, .sel = 0}, { .arm = 0, .ahb = 0, .sel = 0}, { .arm = 0, .ahb = 0, .sel = 0}, { .arm = 4, .ahb = 2, .sel = 0}, { .arm = 0, .ahb = 0, .sel = 0}, }; static unsigned long get_rate_arm(void) { unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0); struct arm_ahb_div *aad; unsigned long fref = get_rate_mpll(); aad = &clk_consumer[(pdr0 >> 16) & 0xf]; if (aad->sel) fref = fref * 3 / 4; return fref / aad->arm; } static unsigned long get_rate_ahb(struct clk *clk) { unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0); struct arm_ahb_div *aad; unsigned long fref = get_rate_arm(); aad = &clk_consumer[(pdr0 >> 16) & 0xf]; return fref / aad->ahb; } static unsigned long get_rate_ipg(struct clk *clk) { return get_rate_ahb(NULL) >> 1; } static unsigned long get_rate_uart(struct clk *clk) { unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3); unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4); unsigned long div = ((pdr4 >> 10) & 0x3f) + 1; if (pdr3 & (1 << 14)) return get_rate_arm() / div; else return get_rate_ppll() / div; } static unsigned long get_rate_sdhc(struct clk *clk) { unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3); unsigned long div, rate; if (pdr3 & (1 << 6)) rate = get_rate_arm(); else rate = get_rate_ppll(); switch (clk->id) { default: case 0: div = pdr3 & 0x3f; break; case 1: div = (pdr3 >> 8) & 0x3f; break; case 2: div = (pdr3 >> 16) & 0x3f; break; } return rate / (div + 1); } static unsigned long get_rate_mshc(struct clk *clk) { unsigned long pdr1 = __raw_readl(CCM_BASE + CCM_PDR1); unsigned long div1, div2, rate; if (pdr1 & (1 << 7)) rate = get_rate_arm(); else rate = get_rate_ppll(); div1 = (pdr1 >> 29) & 0x7; div2 = (pdr1 >> 22) & 0x3f; return rate / ((div1 + 1) * (div2 + 1)); } static unsigned long get_rate_ssi(struct clk *clk) { unsigned long pdr2 = __raw_readl(CCM_BASE + CCM_PDR2); unsigned long div1, div2, rate; if (pdr2 & (1 << 6)) rate = get_rate_arm(); else rate = get_rate_ppll(); switch (clk->id) { default: case 0: div1 = pdr2 & 0x3f; div2 = (pdr2 >> 24) & 0x7; break; case 1: div1 = (pdr2 >> 8) & 0x3f; div2 = (pdr2 >> 27) & 0x7; break; } return rate / ((div1 + 1) * (div2 + 1)); } static unsigned long get_rate_csi(struct clk *clk) { unsigned long pdr2 = __raw_readl(CCM_BASE + CCM_PDR2); unsigned long rate; if (pdr2 & (1 << 7)) rate = get_rate_arm(); else rate = get_rate_ppll(); return rate / (((pdr2 >> 16) & 0x3f) + 1); } static unsigned long get_rate_otg(struct clk *clk) { unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4); unsigned long rate; if (pdr4 & (1 << 9)) rate = get_rate_arm(); else rate = get_rate_ppll(); return rate / (((pdr4 >> 22) & 0x3f) + 1); } static unsigned long get_rate_ipg_per(struct clk *clk) { unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0); unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4); unsigned long div; if (pdr0 & (1 << 26)) { div = (pdr4 >> 16) & 0x3f; return get_rate_arm() / (div + 1); } else { div = (pdr0 >> 12) & 0x7; return get_rate_ahb(NULL) / (div + 1); } } static unsigned long get_rate_hsp(struct clk *clk) { unsigned long hsp_podf = (__raw_readl(CCM_BASE + CCM_PDR0) >> 20) & 0x03; unsigned long fref = get_rate_mpll(); if (fref > 400 * 1000 * 1000) { switch (hsp_podf) { case 0: return fref >> 2; case 1: return fref >> 3; case 2: return fref / 3; } } else { switch (hsp_podf) { case 0: case 2: return fref / 3; case 1: return fref / 6; } } return 0; } static int clk_cgr_enable(struct clk *clk) { u32 reg; reg = __raw_readl(clk->enable_reg); reg |= 3 << clk->enable_shift; __raw_writel(reg, clk->enable_reg); return 0; } static void clk_cgr_disable(struct clk *clk) { u32 reg; reg = __raw_readl(clk->enable_reg); reg &= ~(3 << clk->enable_shift); __raw_writel(reg, clk->enable_reg); } #define DEFINE_CLOCK(name, i, er, es, gr, sr) \ static struct clk name = { \ .id = i, \ .enable_reg = CCM_BASE + er, \ .enable_shift = es, \ .get_rate = gr, \ .set_rate = sr, \ .enable = clk_cgr_enable, \ .disable = clk_cgr_disable, \ } DEFINE_CLOCK(asrc_clk, 0, CCM_CGR0, 0, NULL, NULL); DEFINE_CLOCK(ata_clk, 0, CCM_CGR0, 2, get_rate_ipg, NULL); /* DEFINE_CLOCK(audmux_clk, 0, CCM_CGR0, 4, NULL, NULL); */ DEFINE_CLOCK(can1_clk, 0, CCM_CGR0, 6, get_rate_ipg, NULL); DEFINE_CLOCK(can2_clk, 1, CCM_CGR0, 8, get_rate_ipg, NULL); DEFINE_CLOCK(cspi1_clk, 0, CCM_CGR0, 10, get_rate_ipg, NULL); DEFINE_CLOCK(cspi2_clk, 1, CCM_CGR0, 12, get_rate_ipg, NULL); DEFINE_CLOCK(ect_clk, 0, CCM_CGR0, 14, get_rate_ipg, NULL); DEFINE_CLOCK(edio_clk, 0, CCM_CGR0, 16, NULL, NULL); DEFINE_CLOCK(emi_clk, 0, CCM_CGR0, 18, get_rate_ipg, NULL); DEFINE_CLOCK(epit1_clk, 0, CCM_CGR0, 20, get_rate_ipg, NULL); DEFINE_CLOCK(epit2_clk, 1, CCM_CGR0, 22, get_rate_ipg, NULL); DEFINE_CLOCK(esai_clk, 0, CCM_CGR0, 24, NULL, NULL); DEFINE_CLOCK(esdhc1_clk, 0, CCM_CGR0, 26, get_rate_sdhc, NULL); DEFINE_CLOCK(esdhc2_clk, 1, CCM_CGR0, 28, get_rate_sdhc, NULL); DEFINE_CLOCK(esdhc3_clk, 2, CCM_CGR0, 30, get_rate_sdhc, NULL); DEFINE_CLOCK(fec_clk, 0, CCM_CGR1, 0, get_rate_ipg, NULL); DEFINE_CLOCK(gpio1_clk, 0, CCM_CGR1, 2, NULL, NULL); DEFINE_CLOCK(gpio2_clk, 1, CCM_CGR1, 4, NULL, NULL); DEFINE_CLOCK(gpio3_clk, 2, CCM_CGR1, 6, NULL, NULL); DEFINE_CLOCK(gpt_clk, 0, CCM_CGR1, 8, get_rate_ipg, NULL); DEFINE_CLOCK(i2c1_clk, 0, CCM_CGR1, 10, get_rate_ipg_per, NULL); DEFINE_CLOCK(i2c2_clk, 1, CCM_CGR1, 12, get_rate_ipg_per, NULL); DEFINE_CLOCK(i2c3_clk, 2, CCM_CGR1, 14, get_rate_ipg_per, NULL); DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL); DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_hsp, NULL); DEFINE_CLOCK(kpp_clk, 0, CCM_CGR1, 20, get_rate_ipg, NULL); DEFINE_CLOCK(mlb_clk, 0, CCM_CGR1, 22, get_rate_ahb, NULL); DEFINE_CLOCK(mshc_clk, 0, CCM_CGR1, 24, get_rate_mshc, NULL); DEFINE_CLOCK(owire_clk, 0, CCM_CGR1, 26, get_rate_ipg_per, NULL); DEFINE_CLOCK(pwm_clk, 0, CCM_CGR1, 28, get_rate_ipg_per, NULL); DEFINE_CLOCK(rngc_clk, 0, CCM_CGR1, 30, get_rate_ipg, NULL); DEFINE_CLOCK(rtc_clk, 0, CCM_CGR2, 0, get_rate_ipg, NULL); DEFINE_CLOCK(rtic_clk, 0, CCM_CGR2, 2, get_rate_ahb, NULL); DEFINE_CLOCK(scc_clk, 0, CCM_CGR2, 4, get_rate_ipg, NULL); DEFINE_CLOCK(sdma_clk, 0, CCM_CGR2, 6, NULL, NULL); DEFINE_CLOCK(spba_clk, 0, CCM_CGR2, 8, get_rate_ipg, NULL); DEFINE_CLOCK(spdif_clk, 0, CCM_CGR2, 10, NULL, NULL); DEFINE_CLOCK(ssi1_clk, 0, CCM_CGR2, 12, get_rate_ssi, NULL); DEFINE_CLOCK(ssi2_clk, 1, CCM_CGR2, 14, get_rate_ssi, NULL); DEFINE_CLOCK(uart1_clk, 0, CCM_CGR2, 16, get_rate_uart, NULL); DEFINE_CLOCK(uart2_clk, 1, CCM_CGR2, 18, get_rate_uart, NULL); DEFINE_CLOCK(uart3_clk, 2, CCM_CGR2, 20, get_rate_uart, NULL); DEFINE_CLOCK(usbotg_clk, 0, CCM_CGR2, 22, get_rate_otg, NULL); DEFINE_CLOCK(wdog_clk, 0, CCM_CGR2, 24, NULL, NULL); DEFINE_CLOCK(max_clk, 0, CCM_CGR2, 26, NULL, NULL); DEFINE_CLOCK(audmux_clk, 0, CCM_CGR2, 30, NULL, NULL); DEFINE_CLOCK(csi_clk, 0, CCM_CGR3, 0, get_rate_csi, NULL); DEFINE_CLOCK(iim_clk, 0, CCM_CGR3, 2, NULL, NULL); DEFINE_CLOCK(gpu2d_clk, 0, CCM_CGR3, 4, NULL, NULL); DEFINE_CLOCK(usbahb_clk, 0, 0, 0, get_rate_ahb, NULL); static int clk_dummy_enable(struct clk *clk) { return 0; } static void clk_dummy_disable(struct clk *clk) { } static unsigned long get_rate_nfc(struct clk *clk) { unsigned long div1; div1 = (__raw_readl(CCM_BASE + CCM_PDR4) >> 28) + 1; return get_rate_ahb(NULL) / div1; } /* NAND Controller: It seems it can't be disabled */ static struct clk nfc_clk = { .id = 0, .enable_reg = 0, .enable_shift = 0, .get_rate = get_rate_nfc, .set_rate = NULL, /* set_rate_nfc, */ .enable = clk_dummy_enable, .disable = clk_dummy_disable }; #define _REGISTER_CLOCK(d, n, c) \ { \ .dev_id = d, \ .con_id = n, \ .clk = &c, \ }, static struct clk_lookup lookups[] = { _REGISTER_CLOCK(NULL, "asrc", asrc_clk) _REGISTER_CLOCK(NULL, "ata", ata_clk) _REGISTER_CLOCK("flexcan.0", NULL, can1_clk) _REGISTER_CLOCK("flexcan.1", NULL, can2_clk) _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk) _REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk) _REGISTER_CLOCK(NULL, "ect", ect_clk) _REGISTER_CLOCK(NULL, "edio", edio_clk) _REGISTER_CLOCK(NULL, "emi", emi_clk) _REGISTER_CLOCK("imx-epit.0", NULL, epit1_clk) _REGISTER_CLOCK("imx-epit.1", NULL, epit2_clk) _REGISTER_CLOCK(NULL, "esai", esai_clk) _REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, esdhc1_clk) _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_clk) _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_clk) _REGISTER_CLOCK("fec.0", NULL, fec_clk) _REGISTER_CLOCK(NULL, "gpio", gpio1_clk) _REGISTER_CLOCK(NULL, "gpio", gpio2_clk) _REGISTER_CLOCK(NULL, "gpio", gpio3_clk) _REGISTER_CLOCK("gpt.0", NULL, gpt_clk) _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk) _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk) _REGISTER_CLOCK("imx-i2c.2", NULL, i2c3_clk) _REGISTER_CLOCK(NULL, "iomuxc", iomuxc_clk) _REGISTER_CLOCK("ipu-core", NULL, ipu_clk) _REGISTER_CLOCK("mx3_sdc_fb", NULL, ipu_clk) _REGISTER_CLOCK(NULL, "kpp", kpp_clk) _REGISTER_CLOCK(NULL, "mlb", mlb_clk) _REGISTER_CLOCK(NULL, "mshc", mshc_clk) _REGISTER_CLOCK("mxc_w1", NULL, owire_clk) _REGISTER_CLOCK(NULL, "pwm", pwm_clk) _REGISTER_CLOCK(NULL, "rngc", rngc_clk) _REGISTER_CLOCK(NULL, "rtc", rtc_clk) _REGISTER_CLOCK(NULL, "rtic", rtic_clk) _REGISTER_CLOCK(NULL, "scc", scc_clk) _REGISTER_CLOCK("imx-sdma", NULL, sdma_clk) _REGISTER_CLOCK(NULL, "spba", spba_clk) _REGISTER_CLOCK(NULL, "spdif", spdif_clk) _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk) _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk) _REGISTER_CLOCK("imx-uart.0", NULL, uart1_clk) _REGISTER_CLOCK("imx-uart.1", NULL, uart2_clk) _REGISTER_CLOCK("imx-uart.2", NULL, uart3_clk) _REGISTER_CLOCK("mxc-ehci.0", "usb", usbotg_clk) _REGISTER_CLOCK("mxc-ehci.1", "usb", usbotg_clk) _REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk) _REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk) _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", usbahb_clk) _REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk) _REGISTER_CLOCK(NULL, "max", max_clk) _REGISTER_CLOCK(NULL, "audmux", audmux_clk) _REGISTER_CLOCK(NULL, "csi", csi_clk) _REGISTER_CLOCK(NULL, "iim", iim_clk) _REGISTER_CLOCK(NULL, "gpu2d", gpu2d_clk) _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk) }; int __init mx35_clocks_init() { unsigned int cgr2 = 3 << 26, cgr3 = 0; #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC) cgr2 |= 3 << 16; #endif clkdev_add_table(lookups, ARRAY_SIZE(lookups)); /* Turn off all clocks except the ones we need to survive, namely: * EMI, GPIO1/2/3, GPT, IOMUX, MAX and eventually uart */ __raw_writel((3 << 18), CCM_BASE + CCM_CGR0); __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16), CCM_BASE + CCM_CGR1); /* * Check if we came up in internal boot mode. If yes, we need some * extra clocks turned on, otherwise the MX35 boot ROM code will * hang after a watchdog reset. */ if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) { /* Additionally turn on UART1, SCC, and IIM clocks */ cgr2 |= 3 << 16 | 3 << 4; cgr3 |= 3 << 2; } __raw_writel(cgr2, CCM_BASE + CCM_CGR2); __raw_writel(cgr3, CCM_BASE + CCM_CGR3); clk_enable(&iim_clk); mx35_read_cpu_rev(); #ifdef CONFIG_MXC_USE_EPIT epit_timer_init(&epit1_clk, MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1); #else mxc_timer_init(&gpt_clk, MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT); #endif return 0; }
gpl-2.0
aatjitra/PR26
arch/arm/mach-s3c2443/clock.c
2541
9133
/* linux/arch/arm/mach-s3c2443/clock.c * * Copyright (c) 2007, 2010 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * S3C2443 Clock control support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/sysdev.h> #include <linux/clk.h> #include <linux/mutex.h> #include <linux/serial_core.h> #include <linux/io.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include <mach/regs-s3c2443-clock.h> #include <plat/cpu-freq.h> #include <plat/s3c2443.h> #include <plat/clock.h> #include <plat/clock-clksrc.h> #include <plat/cpu.h> /* We currently have to assume that the system is running * from the XTPll input, and that all ***REFCLKs are being * fed from it, as we cannot read the state of OM[4] from * software. * * It would be possible for each board initialisation to * set the correct muxing at initialisation */ /* clock selections */ static struct clk clk_i2s_ext = { .name = "i2s-ext", .id = -1, }; /* armdiv * * this clock is sourced from msysclk and can have a number of * divider values applied to it to then be fed into armclk. */ /* armdiv divisor table */ static unsigned int armdiv[16] = { [S3C2443_CLKDIV0_ARMDIV_1 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 1, [S3C2443_CLKDIV0_ARMDIV_2 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 2, [S3C2443_CLKDIV0_ARMDIV_3 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 3, [S3C2443_CLKDIV0_ARMDIV_4 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 4, [S3C2443_CLKDIV0_ARMDIV_6 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 6, [S3C2443_CLKDIV0_ARMDIV_8 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 8, [S3C2443_CLKDIV0_ARMDIV_12 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 12, [S3C2443_CLKDIV0_ARMDIV_16 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 16, }; static inline unsigned int s3c2443_fclk_div(unsigned long clkcon0) { clkcon0 &= S3C2443_CLKDIV0_ARMDIV_MASK; return armdiv[clkcon0 >> S3C2443_CLKDIV0_ARMDIV_SHIFT]; } static unsigned long s3c2443_armclk_roundrate(struct clk *clk, unsigned long rate) { unsigned long parent = clk_get_rate(clk->parent); unsigned long calc; unsigned best = 256; /* bigger than any value */ unsigned div; int ptr; for (ptr = 0; ptr < ARRAY_SIZE(armdiv); ptr++) { div = armdiv[ptr]; calc = parent / div; if (calc <= rate && div < best) best = div; } return parent / best; } static int s3c2443_armclk_setrate(struct clk *clk, unsigned long rate) { unsigned long parent = clk_get_rate(clk->parent); unsigned long calc; unsigned div; unsigned best = 256; /* bigger than any value */ int ptr; int val = -1; for (ptr = 0; ptr < ARRAY_SIZE(armdiv); ptr++) { div = armdiv[ptr]; calc = parent / div; if (calc <= rate && div < best) { best = div; val = ptr; } } if (val >= 0) { unsigned long clkcon0; clkcon0 = __raw_readl(S3C2443_CLKDIV0); clkcon0 &= S3C2443_CLKDIV0_ARMDIV_MASK; clkcon0 |= val << S3C2443_CLKDIV0_ARMDIV_SHIFT; __raw_writel(clkcon0, S3C2443_CLKDIV0); } return (val == -1) ? -EINVAL : 0; } static struct clk clk_armdiv = { .name = "armdiv", .id = -1, .parent = &clk_msysclk.clk, .ops = &(struct clk_ops) { .round_rate = s3c2443_armclk_roundrate, .set_rate = s3c2443_armclk_setrate, }, }; /* armclk * * this is the clock fed into the ARM core itself, from armdiv or from hclk. */ static struct clk *clk_arm_sources[] = { [0] = &clk_armdiv, [1] = &clk_h, }; static struct clksrc_clk clk_arm = { .clk = { .name = "armclk", .id = -1, }, .sources = &(struct clksrc_sources) { .sources = clk_arm_sources, .nr_sources = ARRAY_SIZE(clk_arm_sources), }, .reg_src = { .reg = S3C2443_CLKDIV0, .size = 1, .shift = 13 }, }; /* hsspi * * high-speed spi clock, sourced from esysclk */ static struct clksrc_clk clk_hsspi = { .clk = { .name = "hsspi", .id = -1, .parent = &clk_esysclk.clk, .ctrlbit = S3C2443_SCLKCON_HSSPICLK, .enable = s3c2443_clkcon_enable_s, }, .reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 4 }, }; /* clk_hsmcc_div * * this clock is sourced from epll, and is fed through a divider, * to a mux controlled by sclkcon where either it or a extclk can * be fed to the hsmmc block */ static struct clksrc_clk clk_hsmmc_div = { .clk = { .name = "hsmmc-div", .id = 1, .parent = &clk_esysclk.clk, }, .reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 6 }, }; static int s3c2443_setparent_hsmmc(struct clk *clk, struct clk *parent) { unsigned long clksrc = __raw_readl(S3C2443_SCLKCON); clksrc &= ~(S3C2443_SCLKCON_HSMMCCLK_EXT | S3C2443_SCLKCON_HSMMCCLK_EPLL); if (parent == &clk_epll) clksrc |= S3C2443_SCLKCON_HSMMCCLK_EPLL; else if (parent == &clk_ext) clksrc |= S3C2443_SCLKCON_HSMMCCLK_EXT; else return -EINVAL; if (clk->usage > 0) { __raw_writel(clksrc, S3C2443_SCLKCON); } clk->parent = parent; return 0; } static int s3c2443_enable_hsmmc(struct clk *clk, int enable) { return s3c2443_setparent_hsmmc(clk, clk->parent); } static struct clk clk_hsmmc = { .name = "hsmmc-if", .id = 1, .parent = &clk_hsmmc_div.clk, .enable = s3c2443_enable_hsmmc, .ops = &(struct clk_ops) { .set_parent = s3c2443_setparent_hsmmc, }, }; /* i2s_eplldiv * * This clock is the output from the I2S divisor of ESYSCLK, and is separate * from the mux that comes after it (cannot merge into one single clock) */ static struct clksrc_clk clk_i2s_eplldiv = { .clk = { .name = "i2s-eplldiv", .id = -1, .parent = &clk_esysclk.clk, }, .reg_div = { .reg = S3C2443_CLKDIV1, .size = 4, .shift = 12, }, }; /* i2s-ref * * i2s bus reference clock, selectable from external, esysclk or epllref * * Note, this used to be two clocks, but was compressed into one. */ struct clk *clk_i2s_srclist[] = { [0] = &clk_i2s_eplldiv.clk, [1] = &clk_i2s_ext, [2] = &clk_epllref.clk, [3] = &clk_epllref.clk, }; static struct clksrc_clk clk_i2s = { .clk = { .name = "i2s-if", .id = -1, .ctrlbit = S3C2443_SCLKCON_I2SCLK, .enable = s3c2443_clkcon_enable_s, }, .sources = &(struct clksrc_sources) { .sources = clk_i2s_srclist, .nr_sources = ARRAY_SIZE(clk_i2s_srclist), }, .reg_src = { .reg = S3C2443_CLKSRC, .size = 2, .shift = 14 }, }; /* standard clock definitions */ static struct clk init_clocks_off[] = { { .name = "sdi", .id = -1, .parent = &clk_p, .enable = s3c2443_clkcon_enable_p, .ctrlbit = S3C2443_PCLKCON_SDI, }, { .name = "iis", .id = -1, .parent = &clk_p, .enable = s3c2443_clkcon_enable_p, .ctrlbit = S3C2443_PCLKCON_IIS, }, { .name = "spi", .id = 0, .parent = &clk_p, .enable = s3c2443_clkcon_enable_p, .ctrlbit = S3C2443_PCLKCON_SPI0, }, { .name = "spi", .id = 1, .parent = &clk_p, .enable = s3c2443_clkcon_enable_p, .ctrlbit = S3C2443_PCLKCON_SPI1, } }; static struct clk init_clocks[] = { }; /* clocks to add straight away */ static struct clksrc_clk *clksrcs[] __initdata = { &clk_arm, &clk_i2s_eplldiv, &clk_i2s, &clk_hsspi, &clk_hsmmc_div, }; static struct clk *clks[] __initdata = { &clk_hsmmc, &clk_armdiv, }; void __init_or_cpufreq s3c2443_setup_clocks(void) { s3c2443_common_setup_clocks(s3c2443_get_mpll, s3c2443_fclk_div); } void __init s3c2443_init_clocks(int xtal) { unsigned long epllcon = __raw_readl(S3C2443_EPLLCON); int ptr; clk_epll.rate = s3c2443_get_epll(epllcon, xtal); clk_epll.parent = &clk_epllref.clk; s3c2443_common_init_clocks(xtal, s3c2443_get_mpll, s3c2443_fclk_div); s3c2443_setup_clocks(); s3c24xx_register_clocks(clks, ARRAY_SIZE(clks)); for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++) s3c_register_clksrc(clksrcs[ptr], 1); /* register clocks from clock array */ s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks)); /* We must be careful disabling the clocks we are not intending to * be using at boot time, as subsystems such as the LCD which do * their own DMA requests to the bus can cause the system to lockup * if they where in the middle of requesting bus access. * * Disabling the LCD clock if the LCD is active is very dangerous, * and therefore the bootloader should be careful to not enable * the LCD clock if it is not needed. */ /* install (and disable) the clocks we do not need immediately */ s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_pwmclk_init(); }
gpl-2.0
syhost/android_kernel_zte_n918st
net/netfilter/nf_conntrack_helper.c
2541
12908
/* Helper handling for netfilter. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> * (C) 2006-2012 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/netfilter.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/vmalloc.h> #include <linux/stddef.h> #include <linux/random.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/rculist.h> #include <linux/rtnetlink.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_l3proto.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_extend.h> #include <net/netfilter/nf_log.h> static DEFINE_MUTEX(nf_ct_helper_mutex); struct hlist_head *nf_ct_helper_hash __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_helper_hash); unsigned int nf_ct_helper_hsize __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_helper_hsize); static unsigned int nf_ct_helper_count __read_mostly; static bool nf_ct_auto_assign_helper __read_mostly = true; module_param_named(nf_conntrack_helper, nf_ct_auto_assign_helper, bool, 0644); MODULE_PARM_DESC(nf_conntrack_helper, "Enable automatic conntrack helper assignment (default 1)"); #ifdef CONFIG_SYSCTL static struct ctl_table helper_sysctl_table[] = { { .procname = "nf_conntrack_helper", .data = &init_net.ct.sysctl_auto_assign_helper, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, {} }; static int nf_conntrack_helper_init_sysctl(struct net *net) { struct ctl_table *table; table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table), GFP_KERNEL); if (!table) goto out; table[0].data = &net->ct.sysctl_auto_assign_helper; /* Don't export sysctls to unprivileged users */ if (net->user_ns != &init_user_ns) table[0].procname = NULL; net->ct.helper_sysctl_header = register_net_sysctl(net, "net/netfilter", table); if (!net->ct.helper_sysctl_header) { pr_err("nf_conntrack_helper: can't register to sysctl.\n"); goto out_register; } return 0; out_register: kfree(table); out: return -ENOMEM; } static void nf_conntrack_helper_fini_sysctl(struct net *net) { struct ctl_table *table; table = net->ct.helper_sysctl_header->ctl_table_arg; unregister_net_sysctl_table(net->ct.helper_sysctl_header); kfree(table); } #else static int nf_conntrack_helper_init_sysctl(struct net *net) { return 0; } static void nf_conntrack_helper_fini_sysctl(struct net *net) { } #endif /* CONFIG_SYSCTL */ /* Stupid hash, but collision free for the default registrations of the * helpers currently in the kernel. */ static unsigned int helper_hash(const struct nf_conntrack_tuple *tuple) { return (((tuple->src.l3num << 8) | tuple->dst.protonum) ^ (__force __u16)tuple->src.u.all) % nf_ct_helper_hsize; } static struct nf_conntrack_helper * __nf_ct_helper_find(const struct nf_conntrack_tuple *tuple) { struct nf_conntrack_helper *helper; struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) }; unsigned int h; if (!nf_ct_helper_count) return NULL; h = helper_hash(tuple); hlist_for_each_entry_rcu(helper, &nf_ct_helper_hash[h], hnode) { if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask)) return helper; } return NULL; } struct nf_conntrack_helper * __nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum) { struct nf_conntrack_helper *h; unsigned int i; for (i = 0; i < nf_ct_helper_hsize; i++) { hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) { if (!strcmp(h->name, name) && h->tuple.src.l3num == l3num && h->tuple.dst.protonum == protonum) return h; } } return NULL; } EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find); struct nf_conntrack_helper * nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum) { struct nf_conntrack_helper *h; h = __nf_conntrack_helper_find(name, l3num, protonum); #ifdef CONFIG_MODULES if (h == NULL) { if (request_module("nfct-helper-%s", name) == 0) h = __nf_conntrack_helper_find(name, l3num, protonum); } #endif if (h != NULL && !try_module_get(h->me)) h = NULL; return h; } EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get); struct nf_conn_help * nf_ct_helper_ext_add(struct nf_conn *ct, struct nf_conntrack_helper *helper, gfp_t gfp) { struct nf_conn_help *help; help = nf_ct_ext_add_length(ct, NF_CT_EXT_HELPER, helper->data_len, gfp); if (help) INIT_HLIST_HEAD(&help->expectations); else pr_debug("failed to add helper extension area"); return help; } EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add); int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, gfp_t flags) { struct nf_conntrack_helper *helper = NULL; struct nf_conn_help *help; struct net *net = nf_ct_net(ct); int ret = 0; /* We already got a helper explicitly attached. The function * nf_conntrack_alter_reply - in case NAT is in use - asks for looking * the helper up again. Since now the user is in full control of * making consistent helper configurations, skip this automatic * re-lookup, otherwise we'll lose the helper. */ if (test_bit(IPS_HELPER_BIT, &ct->status)) return 0; if (tmpl != NULL) { help = nfct_help(tmpl); if (help != NULL) { helper = help->helper; set_bit(IPS_HELPER_BIT, &ct->status); } } help = nfct_help(ct); if (net->ct.sysctl_auto_assign_helper && helper == NULL) { helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); if (unlikely(!net->ct.auto_assign_helper_warned && helper)) { pr_info("nf_conntrack: automatic helper " "assignment is deprecated and it will " "be removed soon. Use the iptables CT target " "to attach helpers instead.\n"); net->ct.auto_assign_helper_warned = true; } } if (helper == NULL) { if (help) RCU_INIT_POINTER(help->helper, NULL); goto out; } if (help == NULL) { help = nf_ct_helper_ext_add(ct, helper, flags); if (help == NULL) { ret = -ENOMEM; goto out; } } else { /* We only allow helper re-assignment of the same sort since * we cannot reallocate the helper extension area. */ struct nf_conntrack_helper *tmp = rcu_dereference(help->helper); if (tmp && tmp->help != helper->help) { RCU_INIT_POINTER(help->helper, NULL); goto out; } } rcu_assign_pointer(help->helper, helper); out: return ret; } EXPORT_SYMBOL_GPL(__nf_ct_try_assign_helper); static inline int unhelp(struct nf_conntrack_tuple_hash *i, const struct nf_conntrack_helper *me) { struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i); struct nf_conn_help *help = nfct_help(ct); if (help && rcu_dereference_protected( help->helper, lockdep_is_held(&nf_conntrack_lock) ) == me) { nf_conntrack_event(IPCT_HELPER, ct); RCU_INIT_POINTER(help->helper, NULL); } return 0; } void nf_ct_helper_destroy(struct nf_conn *ct) { struct nf_conn_help *help = nfct_help(ct); struct nf_conntrack_helper *helper; if (help) { rcu_read_lock(); helper = rcu_dereference(help->helper); if (helper && helper->destroy) helper->destroy(ct); rcu_read_unlock(); } } static LIST_HEAD(nf_ct_helper_expectfn_list); void nf_ct_helper_expectfn_register(struct nf_ct_helper_expectfn *n) { spin_lock_bh(&nf_conntrack_lock); list_add_rcu(&n->head, &nf_ct_helper_expectfn_list); spin_unlock_bh(&nf_conntrack_lock); } EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_register); void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n) { spin_lock_bh(&nf_conntrack_lock); list_del_rcu(&n->head); spin_unlock_bh(&nf_conntrack_lock); } EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister); struct nf_ct_helper_expectfn * nf_ct_helper_expectfn_find_by_name(const char *name) { struct nf_ct_helper_expectfn *cur; bool found = false; rcu_read_lock(); list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { if (!strcmp(cur->name, name)) { found = true; break; } } rcu_read_unlock(); return found ? cur : NULL; } EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name); struct nf_ct_helper_expectfn * nf_ct_helper_expectfn_find_by_symbol(const void *symbol) { struct nf_ct_helper_expectfn *cur; bool found = false; rcu_read_lock(); list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { if (cur->expectfn == symbol) { found = true; break; } } rcu_read_unlock(); return found ? cur : NULL; } EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol); __printf(3, 4) void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct, const char *fmt, ...) { const struct nf_conn_help *help; const struct nf_conntrack_helper *helper; struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; /* Called from the helper function, this call never fails */ help = nfct_help(ct); /* rcu_read_lock()ed by nf_hook_slow */ helper = rcu_dereference(help->helper); nf_log_packet(nf_ct_net(ct), nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, "nf_ct_%s: dropping packet: %pV ", helper->name, &vaf); va_end(args); } EXPORT_SYMBOL_GPL(nf_ct_helper_log); int nf_conntrack_helper_register(struct nf_conntrack_helper *me) { int ret = 0; struct nf_conntrack_helper *cur; unsigned int h = helper_hash(&me->tuple); BUG_ON(me->expect_policy == NULL); BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES); BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1); mutex_lock(&nf_ct_helper_mutex); hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) { if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 && cur->tuple.src.l3num == me->tuple.src.l3num && cur->tuple.dst.protonum == me->tuple.dst.protonum) { ret = -EEXIST; goto out; } } hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]); nf_ct_helper_count++; out: mutex_unlock(&nf_ct_helper_mutex); return ret; } EXPORT_SYMBOL_GPL(nf_conntrack_helper_register); static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me, struct net *net) { struct nf_conntrack_tuple_hash *h; struct nf_conntrack_expect *exp; const struct hlist_node *next; const struct hlist_nulls_node *nn; unsigned int i; /* Get rid of expectations */ for (i = 0; i < nf_ct_expect_hsize; i++) { hlist_for_each_entry_safe(exp, next, &net->ct.expect_hash[i], hnode) { struct nf_conn_help *help = nfct_help(exp->master); if ((rcu_dereference_protected( help->helper, lockdep_is_held(&nf_conntrack_lock) ) == me || exp->helper == me) && del_timer(&exp->timeout)) { nf_ct_unlink_expect(exp); nf_ct_expect_put(exp); } } } /* Get rid of expecteds, set helpers to NULL. */ hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode) unhelp(h, me); for (i = 0; i < net->ct.htable_size; i++) { hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) unhelp(h, me); } } void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me) { struct net *net; mutex_lock(&nf_ct_helper_mutex); hlist_del_rcu(&me->hnode); nf_ct_helper_count--; mutex_unlock(&nf_ct_helper_mutex); /* Make sure every nothing is still using the helper unless its a * connection in the hash. */ synchronize_rcu(); rtnl_lock(); spin_lock_bh(&nf_conntrack_lock); for_each_net(net) __nf_conntrack_helper_unregister(me, net); spin_unlock_bh(&nf_conntrack_lock); rtnl_unlock(); } EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); static struct nf_ct_ext_type helper_extend __read_mostly = { .len = sizeof(struct nf_conn_help), .align = __alignof__(struct nf_conn_help), .id = NF_CT_EXT_HELPER, }; int nf_conntrack_helper_pernet_init(struct net *net) { net->ct.auto_assign_helper_warned = false; net->ct.sysctl_auto_assign_helper = nf_ct_auto_assign_helper; return nf_conntrack_helper_init_sysctl(net); } void nf_conntrack_helper_pernet_fini(struct net *net) { nf_conntrack_helper_fini_sysctl(net); } int nf_conntrack_helper_init(void) { int ret; nf_ct_helper_hsize = 1; /* gets rounded up to use one page */ nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0); if (!nf_ct_helper_hash) return -ENOMEM; ret = nf_ct_extend_register(&helper_extend); if (ret < 0) { pr_err("nf_ct_helper: Unable to register helper extension.\n"); goto out_extend; } return 0; out_extend: nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize); return ret; } void nf_conntrack_helper_fini(void) { nf_ct_extend_unregister(&helper_extend); nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize); }
gpl-2.0
Stane1983/buildroot-linux-kernel-m6
arch/mips/bcm63xx/cpu.c
3053
10274
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> * Copyright (C) 2009 Florian Fainelli <florian@openwrt.org> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/cpu.h> #include <asm/cpu.h> #include <asm/cpu-info.h> #include <asm/mipsregs.h> #include <bcm63xx_cpu.h> #include <bcm63xx_regs.h> #include <bcm63xx_io.h> #include <bcm63xx_irq.h> const unsigned long *bcm63xx_regs_base; EXPORT_SYMBOL(bcm63xx_regs_base); const int *bcm63xx_irqs; EXPORT_SYMBOL(bcm63xx_irqs); static u16 bcm63xx_cpu_id; static u16 bcm63xx_cpu_rev; static unsigned int bcm63xx_cpu_freq; static unsigned int bcm63xx_memory_size; /* * 6338 register sets and irqs */ static const unsigned long bcm96338_regs_base[] = { [RSET_DSL_LMEM] = BCM_6338_DSL_LMEM_BASE, [RSET_PERF] = BCM_6338_PERF_BASE, [RSET_TIMER] = BCM_6338_TIMER_BASE, [RSET_WDT] = BCM_6338_WDT_BASE, [RSET_UART0] = BCM_6338_UART0_BASE, [RSET_UART1] = BCM_6338_UART1_BASE, [RSET_GPIO] = BCM_6338_GPIO_BASE, [RSET_SPI] = BCM_6338_SPI_BASE, [RSET_OHCI0] = BCM_6338_OHCI0_BASE, [RSET_OHCI_PRIV] = BCM_6338_OHCI_PRIV_BASE, [RSET_USBH_PRIV] = BCM_6338_USBH_PRIV_BASE, [RSET_UDC0] = BCM_6338_UDC0_BASE, [RSET_MPI] = BCM_6338_MPI_BASE, [RSET_PCMCIA] = BCM_6338_PCMCIA_BASE, [RSET_SDRAM] = BCM_6338_SDRAM_BASE, [RSET_DSL] = BCM_6338_DSL_BASE, [RSET_ENET0] = BCM_6338_ENET0_BASE, [RSET_ENET1] = BCM_6338_ENET1_BASE, [RSET_ENETDMA] = BCM_6338_ENETDMA_BASE, [RSET_MEMC] = BCM_6338_MEMC_BASE, [RSET_DDR] = BCM_6338_DDR_BASE, }; static const int bcm96338_irqs[] = { [IRQ_TIMER] = BCM_6338_TIMER_IRQ, [IRQ_UART0] = BCM_6338_UART0_IRQ, [IRQ_DSL] = BCM_6338_DSL_IRQ, [IRQ_ENET0] = BCM_6338_ENET0_IRQ, [IRQ_ENET_PHY] = BCM_6338_ENET_PHY_IRQ, [IRQ_ENET0_RXDMA] = BCM_6338_ENET0_RXDMA_IRQ, [IRQ_ENET0_TXDMA] = BCM_6338_ENET0_TXDMA_IRQ, }; /* * 6345 register sets and irqs */ static const unsigned long bcm96345_regs_base[] = { [RSET_DSL_LMEM] = BCM_6345_DSL_LMEM_BASE, [RSET_PERF] = BCM_6345_PERF_BASE, [RSET_TIMER] = BCM_6345_TIMER_BASE, [RSET_WDT] = BCM_6345_WDT_BASE, [RSET_UART0] = BCM_6345_UART0_BASE, [RSET_UART1] = BCM_6345_UART1_BASE, [RSET_GPIO] = BCM_6345_GPIO_BASE, [RSET_SPI] = BCM_6345_SPI_BASE, [RSET_UDC0] = BCM_6345_UDC0_BASE, [RSET_OHCI0] = BCM_6345_OHCI0_BASE, [RSET_OHCI_PRIV] = BCM_6345_OHCI_PRIV_BASE, [RSET_USBH_PRIV] = BCM_6345_USBH_PRIV_BASE, [RSET_MPI] = BCM_6345_MPI_BASE, [RSET_PCMCIA] = BCM_6345_PCMCIA_BASE, [RSET_DSL] = BCM_6345_DSL_BASE, [RSET_ENET0] = BCM_6345_ENET0_BASE, [RSET_ENET1] = BCM_6345_ENET1_BASE, [RSET_ENETDMA] = BCM_6345_ENETDMA_BASE, [RSET_EHCI0] = BCM_6345_EHCI0_BASE, [RSET_SDRAM] = BCM_6345_SDRAM_BASE, [RSET_MEMC] = BCM_6345_MEMC_BASE, [RSET_DDR] = BCM_6345_DDR_BASE, }; static const int bcm96345_irqs[] = { [IRQ_TIMER] = BCM_6345_TIMER_IRQ, [IRQ_UART0] = BCM_6345_UART0_IRQ, [IRQ_DSL] = BCM_6345_DSL_IRQ, [IRQ_ENET0] = BCM_6345_ENET0_IRQ, [IRQ_ENET_PHY] = BCM_6345_ENET_PHY_IRQ, [IRQ_ENET0_RXDMA] = BCM_6345_ENET0_RXDMA_IRQ, [IRQ_ENET0_TXDMA] = BCM_6345_ENET0_TXDMA_IRQ, }; /* * 6348 register sets and irqs */ static const unsigned long bcm96348_regs_base[] = { [RSET_DSL_LMEM] = BCM_6348_DSL_LMEM_BASE, [RSET_PERF] = BCM_6348_PERF_BASE, [RSET_TIMER] = BCM_6348_TIMER_BASE, [RSET_WDT] = BCM_6348_WDT_BASE, [RSET_UART0] = BCM_6348_UART0_BASE, [RSET_UART1] = BCM_6348_UART1_BASE, [RSET_GPIO] = BCM_6348_GPIO_BASE, [RSET_SPI] = BCM_6348_SPI_BASE, [RSET_OHCI0] = BCM_6348_OHCI0_BASE, [RSET_OHCI_PRIV] = BCM_6348_OHCI_PRIV_BASE, [RSET_USBH_PRIV] = BCM_6348_USBH_PRIV_BASE, [RSET_MPI] = BCM_6348_MPI_BASE, [RSET_PCMCIA] = BCM_6348_PCMCIA_BASE, [RSET_SDRAM] = BCM_6348_SDRAM_BASE, [RSET_DSL] = BCM_6348_DSL_BASE, [RSET_ENET0] = BCM_6348_ENET0_BASE, [RSET_ENET1] = BCM_6348_ENET1_BASE, [RSET_ENETDMA] = BCM_6348_ENETDMA_BASE, [RSET_MEMC] = BCM_6348_MEMC_BASE, [RSET_DDR] = BCM_6348_DDR_BASE, }; static const int bcm96348_irqs[] = { [IRQ_TIMER] = BCM_6348_TIMER_IRQ, [IRQ_UART0] = BCM_6348_UART0_IRQ, [IRQ_DSL] = BCM_6348_DSL_IRQ, [IRQ_ENET0] = BCM_6348_ENET0_IRQ, [IRQ_ENET1] = BCM_6348_ENET1_IRQ, [IRQ_ENET_PHY] = BCM_6348_ENET_PHY_IRQ, [IRQ_OHCI0] = BCM_6348_OHCI0_IRQ, [IRQ_PCMCIA] = BCM_6348_PCMCIA_IRQ, [IRQ_ENET0_RXDMA] = BCM_6348_ENET0_RXDMA_IRQ, [IRQ_ENET0_TXDMA] = BCM_6348_ENET0_TXDMA_IRQ, [IRQ_ENET1_RXDMA] = BCM_6348_ENET1_RXDMA_IRQ, [IRQ_ENET1_TXDMA] = BCM_6348_ENET1_TXDMA_IRQ, [IRQ_PCI] = BCM_6348_PCI_IRQ, }; /* * 6358 register sets and irqs */ static const unsigned long bcm96358_regs_base[] = { [RSET_DSL_LMEM] = BCM_6358_DSL_LMEM_BASE, [RSET_PERF] = BCM_6358_PERF_BASE, [RSET_TIMER] = BCM_6358_TIMER_BASE, [RSET_WDT] = BCM_6358_WDT_BASE, [RSET_UART0] = BCM_6358_UART0_BASE, [RSET_UART1] = BCM_6358_UART1_BASE, [RSET_GPIO] = BCM_6358_GPIO_BASE, [RSET_SPI] = BCM_6358_SPI_BASE, [RSET_OHCI0] = BCM_6358_OHCI0_BASE, [RSET_EHCI0] = BCM_6358_EHCI0_BASE, [RSET_OHCI_PRIV] = BCM_6358_OHCI_PRIV_BASE, [RSET_USBH_PRIV] = BCM_6358_USBH_PRIV_BASE, [RSET_MPI] = BCM_6358_MPI_BASE, [RSET_PCMCIA] = BCM_6358_PCMCIA_BASE, [RSET_SDRAM] = BCM_6358_SDRAM_BASE, [RSET_DSL] = BCM_6358_DSL_BASE, [RSET_ENET0] = BCM_6358_ENET0_BASE, [RSET_ENET1] = BCM_6358_ENET1_BASE, [RSET_ENETDMA] = BCM_6358_ENETDMA_BASE, [RSET_MEMC] = BCM_6358_MEMC_BASE, [RSET_DDR] = BCM_6358_DDR_BASE, }; static const int bcm96358_irqs[] = { [IRQ_TIMER] = BCM_6358_TIMER_IRQ, [IRQ_UART0] = BCM_6358_UART0_IRQ, [IRQ_UART1] = BCM_6358_UART1_IRQ, [IRQ_DSL] = BCM_6358_DSL_IRQ, [IRQ_ENET0] = BCM_6358_ENET0_IRQ, [IRQ_ENET1] = BCM_6358_ENET1_IRQ, [IRQ_ENET_PHY] = BCM_6358_ENET_PHY_IRQ, [IRQ_OHCI0] = BCM_6358_OHCI0_IRQ, [IRQ_EHCI0] = BCM_6358_EHCI0_IRQ, [IRQ_PCMCIA] = BCM_6358_PCMCIA_IRQ, [IRQ_ENET0_RXDMA] = BCM_6358_ENET0_RXDMA_IRQ, [IRQ_ENET0_TXDMA] = BCM_6358_ENET0_TXDMA_IRQ, [IRQ_ENET1_RXDMA] = BCM_6358_ENET1_RXDMA_IRQ, [IRQ_ENET1_TXDMA] = BCM_6358_ENET1_TXDMA_IRQ, [IRQ_PCI] = BCM_6358_PCI_IRQ, }; u16 __bcm63xx_get_cpu_id(void) { return bcm63xx_cpu_id; } EXPORT_SYMBOL(__bcm63xx_get_cpu_id); u16 bcm63xx_get_cpu_rev(void) { return bcm63xx_cpu_rev; } EXPORT_SYMBOL(bcm63xx_get_cpu_rev); unsigned int bcm63xx_get_cpu_freq(void) { return bcm63xx_cpu_freq; } unsigned int bcm63xx_get_memory_size(void) { return bcm63xx_memory_size; } static unsigned int detect_cpu_clock(void) { unsigned int tmp, n1 = 0, n2 = 0, m1 = 0; /* BCM6338 has a fixed 240 Mhz frequency */ if (BCMCPU_IS_6338()) return 240000000; /* BCM6345 has a fixed 140Mhz frequency */ if (BCMCPU_IS_6345()) return 140000000; /* * frequency depends on PLL configuration: */ if (BCMCPU_IS_6348()) { /* 16MHz * (N1 + 1) * (N2 + 2) / (M1_CPU + 1) */ tmp = bcm_perf_readl(PERF_MIPSPLLCTL_REG); n1 = (tmp & MIPSPLLCTL_N1_MASK) >> MIPSPLLCTL_N1_SHIFT; n2 = (tmp & MIPSPLLCTL_N2_MASK) >> MIPSPLLCTL_N2_SHIFT; m1 = (tmp & MIPSPLLCTL_M1CPU_MASK) >> MIPSPLLCTL_M1CPU_SHIFT; n1 += 1; n2 += 2; m1 += 1; } if (BCMCPU_IS_6358()) { /* 16MHz * N1 * N2 / M1_CPU */ tmp = bcm_ddr_readl(DDR_DMIPSPLLCFG_REG); n1 = (tmp & DMIPSPLLCFG_N1_MASK) >> DMIPSPLLCFG_N1_SHIFT; n2 = (tmp & DMIPSPLLCFG_N2_MASK) >> DMIPSPLLCFG_N2_SHIFT; m1 = (tmp & DMIPSPLLCFG_M1_MASK) >> DMIPSPLLCFG_M1_SHIFT; } return (16 * 1000000 * n1 * n2) / m1; } /* * attempt to detect the amount of memory installed */ static unsigned int detect_memory_size(void) { unsigned int cols = 0, rows = 0, is_32bits = 0, banks = 0; u32 val; if (BCMCPU_IS_6345()) return (8 * 1024 * 1024); if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) { val = bcm_sdram_readl(SDRAM_CFG_REG); rows = (val & SDRAM_CFG_ROW_MASK) >> SDRAM_CFG_ROW_SHIFT; cols = (val & SDRAM_CFG_COL_MASK) >> SDRAM_CFG_COL_SHIFT; is_32bits = (val & SDRAM_CFG_32B_MASK) ? 1 : 0; banks = (val & SDRAM_CFG_BANK_MASK) ? 2 : 1; } if (BCMCPU_IS_6358()) { val = bcm_memc_readl(MEMC_CFG_REG); rows = (val & MEMC_CFG_ROW_MASK) >> MEMC_CFG_ROW_SHIFT; cols = (val & MEMC_CFG_COL_MASK) >> MEMC_CFG_COL_SHIFT; is_32bits = (val & MEMC_CFG_32B_MASK) ? 0 : 1; banks = 2; } /* 0 => 11 address bits ... 2 => 13 address bits */ rows += 11; /* 0 => 8 address bits ... 2 => 10 address bits */ cols += 8; return 1 << (cols + rows + (is_32bits + 1) + banks); } void __init bcm63xx_cpu_init(void) { unsigned int tmp, expected_cpu_id; struct cpuinfo_mips *c = &current_cpu_data; unsigned int cpu = smp_processor_id(); /* soc registers location depends on cpu type */ expected_cpu_id = 0; switch (c->cputype) { case CPU_BMIPS3300: if ((read_c0_prid() & 0xff00) == PRID_IMP_BMIPS3300_ALT) { expected_cpu_id = BCM6348_CPU_ID; bcm63xx_regs_base = bcm96348_regs_base; bcm63xx_irqs = bcm96348_irqs; } else { __cpu_name[cpu] = "Broadcom BCM6338"; expected_cpu_id = BCM6338_CPU_ID; bcm63xx_regs_base = bcm96338_regs_base; bcm63xx_irqs = bcm96338_irqs; } break; case CPU_BMIPS32: expected_cpu_id = BCM6345_CPU_ID; bcm63xx_regs_base = bcm96345_regs_base; bcm63xx_irqs = bcm96345_irqs; break; case CPU_BMIPS4350: expected_cpu_id = BCM6358_CPU_ID; bcm63xx_regs_base = bcm96358_regs_base; bcm63xx_irqs = bcm96358_irqs; break; } /* * really early to panic, but delaying panic would not help since we * will never get any working console */ if (!expected_cpu_id) panic("unsupported Broadcom CPU"); /* * bcm63xx_regs_base is set, we can access soc registers */ /* double check CPU type */ tmp = bcm_perf_readl(PERF_REV_REG); bcm63xx_cpu_id = (tmp & REV_CHIPID_MASK) >> REV_CHIPID_SHIFT; bcm63xx_cpu_rev = (tmp & REV_REVID_MASK) >> REV_REVID_SHIFT; if (bcm63xx_cpu_id != expected_cpu_id) panic("bcm63xx CPU id mismatch"); bcm63xx_cpu_freq = detect_cpu_clock(); bcm63xx_memory_size = detect_memory_size(); printk(KERN_INFO "Detected Broadcom 0x%04x CPU revision %02x\n", bcm63xx_cpu_id, bcm63xx_cpu_rev); printk(KERN_INFO "CPU frequency is %u MHz\n", bcm63xx_cpu_freq / 1000000); printk(KERN_INFO "%uMB of RAM installed\n", bcm63xx_memory_size >> 20); }
gpl-2.0
MoKee/android_kernel_htc_dlx
drivers/video/msm/lcdc_samsung_oled_pt.c
3309
13897
/* Copyright (c) 2009-2010, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/pwm.h> #ifdef CONFIG_SPI_QUP #include <linux/spi/spi.h> #else #include <mach/gpio.h> #endif #include "msm_fb.h" #define DEBUG /* #define SYSFS_DEBUG_CMD */ #ifdef CONFIG_SPI_QUP #define LCDC_SAMSUNG_SPI_DEVICE_NAME "lcdc_samsung_ams367pe02" static struct spi_device *lcdc_spi_client; #else static int spi_cs; static int spi_sclk; static int spi_mosi; #endif struct samsung_state_type { boolean disp_initialized; boolean display_on; boolean disp_powered_up; int brightness; }; struct samsung_spi_data { u8 addr; u8 len; u8 data[22]; }; static struct samsung_spi_data panel_sequence[] = { { .addr = 0xf8, .len = 14, .data = { 0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f, 0x63, 0x86, 0x1a, 0x33, 0x0d, 0x00, 0x00 } }, }; static struct samsung_spi_data display_sequence[] = { { .addr = 0xf2, .len = 5, .data = { 0x02, 0x03, 0x1c, 0x10, 0x10 } }, { .addr = 0xf7, .len = 3, .data = { 0x00, 0x00, 0x30 } }, }; /* lum=300 cd/m2 */ static struct samsung_spi_data gamma_sequence_300[] = { { .addr = 0xfa, .len = 22, .data = { 0x02, 0x18, 0x08, 0x24, 0x7d, 0x77, 0x5b, 0xbe, 0xc1, 0xb1, 0xb3, 0xb7, 0xa6, 0xc3, 0xc5, 0xb9, 0x00, 0xb3, 0x00, 0xaf, 0x00, 0xe8 } }, { .addr = 0xFA, .len = 1, .data = { 0x03 } }, }; /* lum = 180 cd/m2*/ static struct samsung_spi_data gamma_sequence_180[] = { { .addr = 0xfa, .len = 22, .data = { 0x02, 0x18, 0x08, 0x24, 0x83, 0x78, 0x60, 0xc5, 0xc6, 0xb8, 0xba, 0xbe, 0xad, 0xcb, 0xcd, 0xc2, 0x00, 0x92, 0x00, 0x8e, 0x00, 0xbc } }, { .addr = 0xFA, .len = 1, .data = { 0x03 } }, }; /* lum = 80 cd/m2*/ static struct samsung_spi_data gamma_sequence_80[] = { { .addr = 0xfa, .len = 22, .data = { 0x02, 0x18, 0x08, 0x24, 0x94, 0x73, 0x6c, 0xcb, 0xca, 0xbe, 0xc4, 0xc7, 0xb8, 0xd3, 0xd5, 0xcb, 0x00, 0x6d, 0x00, 0x69, 0x00, 0x8b } }, { .addr = 0xFA, .len = 1, .data = { 0x03 } }, }; static struct samsung_spi_data etc_sequence[] = { { .addr = 0xF6, .len = 3, .data = { 0x00, 0x8e, 0x07 } }, { .addr = 0xB3, .len = 1, .data = { 0x0C } }, }; static struct samsung_state_type samsung_state = { .brightness = 180 }; static struct msm_panel_common_pdata *lcdc_samsung_pdata; #ifndef CONFIG_SPI_QUP static void samsung_spi_write_byte(boolean dc, u8 data) { uint32 bit; int bnum; gpio_set_value(spi_sclk, 0); gpio_set_value(spi_mosi, dc ? 1 : 0); udelay(1); /* at least 20 ns */ gpio_set_value(spi_sclk, 1); /* clk high */ udelay(1); /* at least 20 ns */ bnum = 8; /* 8 data bits */ bit = 0x80; while (bnum--) { gpio_set_value(spi_sclk, 0); /* clk low */ gpio_set_value(spi_mosi, (data & bit) ? 1 : 0); udelay(1); gpio_set_value(spi_sclk, 1); /* clk high */ udelay(1); bit >>= 1; } gpio_set_value(spi_mosi, 0); } static void samsung_spi_read_bytes(u8 cmd, u8 *data, int num) { int bnum; /* Chip Select - low */ gpio_set_value(spi_cs, 0); udelay(2); /* command byte first */ samsung_spi_write_byte(0, cmd); udelay(2); gpio_direction_input(spi_mosi); if (num > 1) { /* extra dummy clock */ gpio_set_value(spi_sclk, 0); udelay(1); gpio_set_value(spi_sclk, 1); udelay(1); } /* followed by data bytes */ bnum = num * 8; /* number of bits */ *data = 0; while (bnum) { gpio_set_value(spi_sclk, 0); /* clk low */ udelay(1); *data <<= 1; *data |= gpio_get_value(spi_mosi) ? 1 : 0; gpio_set_value(spi_sclk, 1); /* clk high */ udelay(1); --bnum; if ((bnum % 8) == 0) ++data; } gpio_direction_output(spi_mosi, 0); /* Chip Select - high */ udelay(2); gpio_set_value(spi_cs, 1); } #endif #ifdef DEBUG static const char *byte_to_binary(const u8 *buf, int len) { static char b[32*8+1]; char *p = b; int i, z; for (i = 0; i < len; ++i) { u8 val = *buf++; for (z = 1 << 7; z > 0; z >>= 1) *p++ = (val & z) ? '1' : '0'; } *p = 0; return b; } #endif #define BIT_OFFSET (bit_size % 8) #define ADD_BIT(val) do { \ tx_buf[bit_size / 8] |= \ (u8)((val ? 1 : 0) << (7 - BIT_OFFSET)); \ ++bit_size; \ } while (0) #define ADD_BYTE(data) do { \ tx_buf[bit_size / 8] |= (u8)(data >> BIT_OFFSET); \ bit_size += 8; \ if (BIT_OFFSET != 0) \ tx_buf[bit_size / 8] |= (u8)(data << (8 - BIT_OFFSET));\ } while (0) static int samsung_serigo(struct samsung_spi_data data) { #ifdef CONFIG_SPI_QUP char tx_buf[32]; int bit_size = 0, i, rc; struct spi_message m; struct spi_transfer t; if (!lcdc_spi_client) { pr_err("%s lcdc_spi_client is NULL\n", __func__); return -EINVAL; } memset(&t, 0, sizeof t); memset(tx_buf, 0, sizeof tx_buf); t.tx_buf = tx_buf; spi_setup(lcdc_spi_client); spi_message_init(&m); spi_message_add_tail(&t, &m); ADD_BIT(FALSE); ADD_BYTE(data.addr); for (i = 0; i < data.len; ++i) { ADD_BIT(TRUE); ADD_BYTE(data.data[i]); } /* add padding bits so we round to next byte */ t.len = (bit_size+7) / 8; if (t.len <= 4) t.bits_per_word = bit_size; rc = spi_sync(lcdc_spi_client, &m); #ifdef DEBUG pr_info("%s: addr=0x%02x, #args=%d[%d] [%s], rc=%d\n", __func__, data.addr, t.len, t.bits_per_word, byte_to_binary(tx_buf, t.len), rc); #endif return rc; #else int i; /* Chip Select - low */ gpio_set_value(spi_cs, 0); udelay(2); samsung_spi_write_byte(FALSE, data.addr); udelay(2); for (i = 0; i < data.len; ++i) { samsung_spi_write_byte(TRUE, data.data[i]); udelay(2); } /* Chip Select - high */ gpio_set_value(spi_cs, 1); #ifdef DEBUG pr_info("%s: cmd=0x%02x, #args=%d\n", __func__, data.addr, data.len); #endif return 0; #endif } static int samsung_write_cmd(u8 cmd) { #ifdef CONFIG_SPI_QUP char tx_buf[2]; int bit_size = 0, rc; struct spi_message m; struct spi_transfer t; if (!lcdc_spi_client) { pr_err("%s lcdc_spi_client is NULL\n", __func__); return -EINVAL; } memset(&t, 0, sizeof t); memset(tx_buf, 0, sizeof tx_buf); t.tx_buf = tx_buf; spi_setup(lcdc_spi_client); spi_message_init(&m); spi_message_add_tail(&t, &m); ADD_BIT(FALSE); ADD_BYTE(cmd); t.len = 2; t.bits_per_word = 9; rc = spi_sync(lcdc_spi_client, &m); #ifdef DEBUG pr_info("%s: addr=0x%02x, #args=%d[%d] [%s], rc=%d\n", __func__, cmd, t.len, t.bits_per_word, byte_to_binary(tx_buf, t.len), rc); #endif return rc; #else /* Chip Select - low */ gpio_set_value(spi_cs, 0); udelay(2); samsung_spi_write_byte(FALSE, cmd); /* Chip Select - high */ udelay(2); gpio_set_value(spi_cs, 1); #ifdef DEBUG pr_info("%s: cmd=0x%02x\n", __func__, cmd); #endif return 0; #endif } static int samsung_serigo_list(struct samsung_spi_data *data, int count) { int i, rc; for (i = 0; i < count; ++i, ++data) { rc = samsung_serigo(*data); if (rc) return rc; msleep(10); } return 0; } #ifndef CONFIG_SPI_QUP static void samsung_spi_init(void) { spi_sclk = *(lcdc_samsung_pdata->gpio_num); spi_cs = *(lcdc_samsung_pdata->gpio_num + 1); spi_mosi = *(lcdc_samsung_pdata->gpio_num + 2); /* Set the output so that we don't disturb the slave device */ gpio_set_value(spi_sclk, 1); gpio_set_value(spi_mosi, 0); /* Set the Chip Select deasserted (active low) */ gpio_set_value(spi_cs, 1); } #endif static void samsung_disp_powerup(void) { if (!samsung_state.disp_powered_up && !samsung_state.display_on) samsung_state.disp_powered_up = TRUE; } static struct work_struct disp_on_delayed_work; static void samsung_disp_on_delayed_work(struct work_struct *work_ptr) { /* 0x01: Software Reset */ samsung_write_cmd(0x01); msleep(120); msleep(300); samsung_serigo_list(panel_sequence, sizeof(panel_sequence)/sizeof(*panel_sequence)); samsung_serigo_list(display_sequence, sizeof(display_sequence)/sizeof(*display_sequence)); switch (samsung_state.brightness) { case 300: samsung_serigo_list(gamma_sequence_300, sizeof(gamma_sequence_300)/sizeof(*gamma_sequence_300)); break; case 180: default: samsung_serigo_list(gamma_sequence_180, sizeof(gamma_sequence_180)/sizeof(*gamma_sequence_180)); break; case 80: samsung_serigo_list(gamma_sequence_80, sizeof(gamma_sequence_80)/sizeof(*gamma_sequence_80)); break; } samsung_serigo_list(etc_sequence, sizeof(etc_sequence)/sizeof(*etc_sequence)); /* 0x11: Sleep Out */ samsung_write_cmd(0x11); msleep(120); /* 0x13: Normal Mode On */ samsung_write_cmd(0x13); #ifndef CONFIG_SPI_QUP { u8 data; msleep(120); /* 0x0A: Read Display Power Mode */ samsung_spi_read_bytes(0x0A, &data, 1); pr_info("%s: power=[%s]\n", __func__, byte_to_binary(&data, 1)); msleep(120); /* 0x0C: Read Display Pixel Format */ samsung_spi_read_bytes(0x0C, &data, 1); pr_info("%s: pixel-format=[%s]\n", __func__, byte_to_binary(&data, 1)); } #endif msleep(120); /* 0x29: Display On */ samsung_write_cmd(0x29); } static void samsung_disp_on(void) { if (samsung_state.disp_powered_up && !samsung_state.display_on) { INIT_WORK(&disp_on_delayed_work, samsung_disp_on_delayed_work); schedule_work(&disp_on_delayed_work); samsung_state.display_on = TRUE; } } static int lcdc_samsung_panel_on(struct platform_device *pdev) { pr_info("%s\n", __func__); if (!samsung_state.disp_initialized) { #ifndef CONFIG_SPI_QUP lcdc_samsung_pdata->panel_config_gpio(1); samsung_spi_init(); #endif samsung_disp_powerup(); samsung_disp_on(); samsung_state.disp_initialized = TRUE; } return 0; } static int lcdc_samsung_panel_off(struct platform_device *pdev) { pr_info("%s\n", __func__); if (samsung_state.disp_powered_up && samsung_state.display_on) { /* 0x10: Sleep In */ samsung_write_cmd(0x10); msleep(120); samsung_state.display_on = FALSE; samsung_state.disp_initialized = FALSE; } return 0; } #ifdef SYSFS_DEBUG_CMD static ssize_t samsung_rda_cmd(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t ret = snprintf(buf, PAGE_SIZE, "n/a\n"); pr_info("%s: 'n/a'\n", __func__); return ret; } static ssize_t samsung_wta_cmd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { ssize_t ret = strnlen(buf, PAGE_SIZE); uint32 cmd; sscanf(buf, "%x", &cmd); samsung_write_cmd((u8)cmd); return ret; } static DEVICE_ATTR(cmd, S_IRUGO | S_IWUGO, samsung_rda_cmd, samsung_wta_cmd); static struct attribute *fs_attrs[] = { &dev_attr_cmd.attr, NULL, }; static struct attribute_group fs_attr_group = { .attrs = fs_attrs, }; #endif static struct msm_fb_panel_data samsung_panel_data = { .on = lcdc_samsung_panel_on, .off = lcdc_samsung_panel_off, }; static int __devinit samsung_probe(struct platform_device *pdev) { struct msm_panel_info *pinfo; #ifdef SYSFS_DEBUG_CMD struct platform_device *fb_dev; struct msm_fb_data_type *mfd; int rc; #endif pr_info("%s: id=%d\n", __func__, pdev->id); lcdc_samsung_pdata = pdev->dev.platform_data; pinfo = &samsung_panel_data.panel_info; pinfo->xres = 480; pinfo->yres = 800; pinfo->type = LCDC_PANEL; pinfo->pdest = DISPLAY_1; pinfo->wait_cycle = 0; pinfo->bpp = 24; pinfo->fb_num = 2; pinfo->clk_rate = 25600000; /* Max 27.77MHz */ pinfo->bl_max = 15; pinfo->bl_min = 1; /* AMS367PE02 Operation Manual, Page 7 */ pinfo->lcdc.h_back_porch = 16-2; /* HBP-HLW */ pinfo->lcdc.h_front_porch = 16; pinfo->lcdc.h_pulse_width = 2; /* AMS367PE02 Operation Manual, Page 6 */ pinfo->lcdc.v_back_porch = 3-2; /* VBP-VLW */ pinfo->lcdc.v_front_porch = 28; pinfo->lcdc.v_pulse_width = 2; pinfo->lcdc.border_clr = 0; pinfo->lcdc.underflow_clr = 0xff; pinfo->lcdc.hsync_skew = 0; pdev->dev.platform_data = &samsung_panel_data; #ifndef SYSFS_DEBUG_CMD msm_fb_add_device(pdev); #else fb_dev = msm_fb_add_device(pdev); mfd = platform_get_drvdata(fb_dev); rc = sysfs_create_group(&mfd->fbi->dev->kobj, &fs_attr_group); if (rc) { pr_err("%s: sysfs group creation failed, rc=%d\n", __func__, rc); return rc; } #endif return 0; } #ifdef CONFIG_SPI_QUP static int __devinit lcdc_samsung_spi_probe(struct spi_device *spi) { pr_info("%s\n", __func__); lcdc_spi_client = spi; lcdc_spi_client->bits_per_word = 32; return 0; } static int __devexit lcdc_samsung_spi_remove(struct spi_device *spi) { lcdc_spi_client = NULL; return 0; } static struct spi_driver lcdc_samsung_spi_driver = { .driver.name = LCDC_SAMSUNG_SPI_DEVICE_NAME, .driver.owner = THIS_MODULE, .probe = lcdc_samsung_spi_probe, .remove = __devexit_p(lcdc_samsung_spi_remove), }; #endif static struct platform_driver this_driver = { .probe = samsung_probe, .driver.name = "lcdc_samsung_oled", }; static int __init lcdc_samsung_panel_init(void) { int ret; if (msm_fb_detect_client("lcdc_samsung_oled")) { pr_err("%s: detect failed\n", __func__); return 0; } ret = platform_driver_register(&this_driver); if (ret) { pr_err("%s: driver register failed, rc=%d\n", __func__, ret); return ret; } #ifdef CONFIG_SPI_QUP ret = spi_register_driver(&lcdc_samsung_spi_driver); if (ret) { pr_err("%s: spi register failed: rc=%d\n", __func__, ret); platform_driver_unregister(&this_driver); } else pr_info("%s: SUCCESS (SPI)\n", __func__); #else pr_info("%s: SUCCESS (BitBang)\n", __func__); #endif return ret; } module_init(lcdc_samsung_panel_init); static void __exit lcdc_samsung_panel_exit(void) { pr_info("%s\n", __func__); #ifdef CONFIG_SPI_QUP spi_unregister_driver(&lcdc_samsung_spi_driver); #endif platform_driver_unregister(&this_driver); } module_exit(lcdc_samsung_panel_exit);
gpl-2.0
MojieBuddhist/linux-1
drivers/misc/kgdbts.c
3565
31341
/* * kgdbts is a test suite for kgdb for the sole purpose of validating * that key pieces of the kgdb internals are working properly such as * HW/SW breakpoints, single stepping, and NMI. * * Created by: Jason Wessel <jason.wessel@windriver.com> * * Copyright (c) 2008 Wind River Systems, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Information about the kgdb test suite. * ------------------------------------- * * The kgdb test suite is designed as a KGDB I/O module which * simulates the communications that a debugger would have with kgdb. * The tests are broken up in to a line by line and referenced here as * a "get" which is kgdb requesting input and "put" which is kgdb * sending a response. * * The kgdb suite can be invoked from the kernel command line * arguments system or executed dynamically at run time. The test * suite uses the variable "kgdbts" to obtain the information about * which tests to run and to configure the verbosity level. The * following are the various characters you can use with the kgdbts= * line: * * When using the "kgdbts=" you only choose one of the following core * test types: * A = Run all the core tests silently * V1 = Run all the core tests with minimal output * V2 = Run all the core tests in debug mode * * You can also specify optional tests: * N## = Go to sleep with interrupts of for ## seconds * to test the HW NMI watchdog * F## = Break at do_fork for ## iterations * S## = Break at sys_open for ## iterations * I## = Run the single step test ## iterations * * NOTE: that the do_fork and sys_open tests are mutually exclusive. * * To invoke the kgdb test suite from boot you use a kernel start * argument as follows: * kgdbts=V1 kgdbwait * Or if you wanted to perform the NMI test for 6 seconds and do_fork * test for 100 forks, you could use: * kgdbts=V1N6F100 kgdbwait * * The test suite can also be invoked at run time with: * echo kgdbts=V1N6F100 > /sys/module/kgdbts/parameters/kgdbts * Or as another example: * echo kgdbts=V2 > /sys/module/kgdbts/parameters/kgdbts * * When developing a new kgdb arch specific implementation or * using these tests for the purpose of regression testing, * several invocations are required. * * 1) Boot with the test suite enabled by using the kernel arguments * "kgdbts=V1F100 kgdbwait" * ## If kgdb arch specific implementation has NMI use * "kgdbts=V1N6F100 * * 2) After the system boot run the basic test. * echo kgdbts=V1 > /sys/module/kgdbts/parameters/kgdbts * * 3) Run the concurrency tests. It is best to use n+1 * while loops where n is the number of cpus you have * in your system. The example below uses only two * loops. * * ## This tests break points on sys_open * while [ 1 ] ; do find / > /dev/null 2>&1 ; done & * while [ 1 ] ; do find / > /dev/null 2>&1 ; done & * echo kgdbts=V1S10000 > /sys/module/kgdbts/parameters/kgdbts * fg # and hit control-c * fg # and hit control-c * ## This tests break points on do_fork * while [ 1 ] ; do date > /dev/null ; done & * while [ 1 ] ; do date > /dev/null ; done & * echo kgdbts=V1F1000 > /sys/module/kgdbts/parameters/kgdbts * fg # and hit control-c * */ #include <linux/kernel.h> #include <linux/kgdb.h> #include <linux/ctype.h> #include <linux/uaccess.h> #include <linux/syscalls.h> #include <linux/nmi.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/module.h> #include <asm/sections.h> #define v1printk(a...) do { \ if (verbose) \ printk(KERN_INFO a); \ } while (0) #define v2printk(a...) do { \ if (verbose > 1) \ printk(KERN_INFO a); \ touch_nmi_watchdog(); \ } while (0) #define eprintk(a...) do { \ printk(KERN_ERR a); \ WARN_ON(1); \ } while (0) #define MAX_CONFIG_LEN 40 static struct kgdb_io kgdbts_io_ops; static char get_buf[BUFMAX]; static int get_buf_cnt; static char put_buf[BUFMAX]; static int put_buf_cnt; static char scratch_buf[BUFMAX]; static int verbose; static int repeat_test; static int test_complete; static int send_ack; static int final_ack; static int force_hwbrks; static int hwbreaks_ok; static int hw_break_val; static int hw_break_val2; static int cont_instead_of_sstep; static unsigned long cont_thread_id; static unsigned long sstep_thread_id; #if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC) static int arch_needs_sstep_emulation = 1; #else static int arch_needs_sstep_emulation; #endif static unsigned long cont_addr; static unsigned long sstep_addr; static int restart_from_top_after_write; static int sstep_state; /* Storage for the registers, in GDB format. */ static unsigned long kgdbts_gdb_regs[(NUMREGBYTES + sizeof(unsigned long) - 1) / sizeof(unsigned long)]; static struct pt_regs kgdbts_regs; /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */ static int configured = -1; #ifdef CONFIG_KGDB_TESTS_BOOT_STRING static char config[MAX_CONFIG_LEN] = CONFIG_KGDB_TESTS_BOOT_STRING; #else static char config[MAX_CONFIG_LEN]; #endif static struct kparam_string kps = { .string = config, .maxlen = MAX_CONFIG_LEN, }; static void fill_get_buf(char *buf); struct test_struct { char *get; char *put; void (*get_handler)(char *); int (*put_handler)(char *, char *); }; struct test_state { char *name; struct test_struct *tst; int idx; int (*run_test) (int, int); int (*validate_put) (char *); }; static struct test_state ts; static int kgdbts_unreg_thread(void *ptr) { /* Wait until the tests are complete and then ungresiter the I/O * driver. */ while (!final_ack) msleep_interruptible(1500); /* Pause for any other threads to exit after final ack. */ msleep_interruptible(1000); if (configured) kgdb_unregister_io_module(&kgdbts_io_ops); configured = 0; return 0; } /* This is noinline such that it can be used for a single location to * place a breakpoint */ static noinline void kgdbts_break_test(void) { v2printk("kgdbts: breakpoint complete\n"); } /* Lookup symbol info in the kernel */ static unsigned long lookup_addr(char *arg) { unsigned long addr = 0; if (!strcmp(arg, "kgdbts_break_test")) addr = (unsigned long)kgdbts_break_test; else if (!strcmp(arg, "sys_open")) addr = (unsigned long)do_sys_open; else if (!strcmp(arg, "do_fork")) addr = (unsigned long)do_fork; else if (!strcmp(arg, "hw_break_val")) addr = (unsigned long)&hw_break_val; addr = (unsigned long) dereference_function_descriptor((void *)addr); return addr; } static void break_helper(char *bp_type, char *arg, unsigned long vaddr) { unsigned long addr; if (arg) addr = lookup_addr(arg); else addr = vaddr; sprintf(scratch_buf, "%s,%lx,%i", bp_type, addr, BREAK_INSTR_SIZE); fill_get_buf(scratch_buf); } static void sw_break(char *arg) { break_helper(force_hwbrks ? "Z1" : "Z0", arg, 0); } static void sw_rem_break(char *arg) { break_helper(force_hwbrks ? "z1" : "z0", arg, 0); } static void hw_break(char *arg) { break_helper("Z1", arg, 0); } static void hw_rem_break(char *arg) { break_helper("z1", arg, 0); } static void hw_write_break(char *arg) { break_helper("Z2", arg, 0); } static void hw_rem_write_break(char *arg) { break_helper("z2", arg, 0); } static void hw_access_break(char *arg) { break_helper("Z4", arg, 0); } static void hw_rem_access_break(char *arg) { break_helper("z4", arg, 0); } static void hw_break_val_access(void) { hw_break_val2 = hw_break_val; } static void hw_break_val_write(void) { hw_break_val++; } static int get_thread_id_continue(char *put_str, char *arg) { char *ptr = &put_str[11]; if (put_str[1] != 'T' || put_str[2] != '0') return 1; kgdb_hex2long(&ptr, &cont_thread_id); return 0; } static int check_and_rewind_pc(char *put_str, char *arg) { unsigned long addr = lookup_addr(arg); unsigned long ip; int offset = 0; kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs, NUMREGBYTES); gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs); ip = instruction_pointer(&kgdbts_regs); v2printk("Stopped at IP: %lx\n", ip); #ifdef GDB_ADJUSTS_BREAK_OFFSET /* On some arches, a breakpoint stop requires it to be decremented */ if (addr + BREAK_INSTR_SIZE == ip) offset = -BREAK_INSTR_SIZE; #endif if (arch_needs_sstep_emulation && sstep_addr && ip + offset == sstep_addr && ((!strcmp(arg, "sys_open") || !strcmp(arg, "do_fork")))) { /* This is special case for emulated single step */ v2printk("Emul: rewind hit single step bp\n"); restart_from_top_after_write = 1; } else if (strcmp(arg, "silent") && ip + offset != addr) { eprintk("kgdbts: BP mismatch %lx expected %lx\n", ip + offset, addr); return 1; } /* Readjust the instruction pointer if needed */ ip += offset; cont_addr = ip; #ifdef GDB_ADJUSTS_BREAK_OFFSET instruction_pointer_set(&kgdbts_regs, ip); #endif return 0; } static int check_single_step(char *put_str, char *arg) { unsigned long addr = lookup_addr(arg); static int matched_id; /* * From an arch indepent point of view the instruction pointer * should be on a different instruction */ kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs, NUMREGBYTES); gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs); v2printk("Singlestep stopped at IP: %lx\n", instruction_pointer(&kgdbts_regs)); if (sstep_thread_id != cont_thread_id) { /* * Ensure we stopped in the same thread id as before, else the * debugger should continue until the original thread that was * single stepped is scheduled again, emulating gdb's behavior. */ v2printk("ThrID does not match: %lx\n", cont_thread_id); if (arch_needs_sstep_emulation) { if (matched_id && instruction_pointer(&kgdbts_regs) != addr) goto continue_test; matched_id++; ts.idx -= 2; sstep_state = 0; return 0; } cont_instead_of_sstep = 1; ts.idx -= 4; return 0; } continue_test: matched_id = 0; if (instruction_pointer(&kgdbts_regs) == addr) { eprintk("kgdbts: SingleStep failed at %lx\n", instruction_pointer(&kgdbts_regs)); return 1; } return 0; } static void write_regs(char *arg) { memset(scratch_buf, 0, sizeof(scratch_buf)); scratch_buf[0] = 'G'; pt_regs_to_gdb_regs(kgdbts_gdb_regs, &kgdbts_regs); kgdb_mem2hex((char *)kgdbts_gdb_regs, &scratch_buf[1], NUMREGBYTES); fill_get_buf(scratch_buf); } static void skip_back_repeat_test(char *arg) { int go_back = simple_strtol(arg, NULL, 10); repeat_test--; if (repeat_test <= 0) ts.idx++; else ts.idx -= go_back; fill_get_buf(ts.tst[ts.idx].get); } static int got_break(char *put_str, char *arg) { test_complete = 1; if (!strncmp(put_str+1, arg, 2)) { if (!strncmp(arg, "T0", 2)) test_complete = 2; return 0; } return 1; } static void get_cont_catch(char *arg) { /* Always send detach because the test is completed at this point */ fill_get_buf("D"); } static int put_cont_catch(char *put_str, char *arg) { /* This is at the end of the test and we catch any and all input */ v2printk("kgdbts: cleanup task: %lx\n", sstep_thread_id); ts.idx--; return 0; } static int emul_reset(char *put_str, char *arg) { if (strncmp(put_str, "$OK", 3)) return 1; if (restart_from_top_after_write) { restart_from_top_after_write = 0; ts.idx = -1; } return 0; } static void emul_sstep_get(char *arg) { if (!arch_needs_sstep_emulation) { if (cont_instead_of_sstep) { cont_instead_of_sstep = 0; fill_get_buf("c"); } else { fill_get_buf(arg); } return; } switch (sstep_state) { case 0: v2printk("Emulate single step\n"); /* Start by looking at the current PC */ fill_get_buf("g"); break; case 1: /* set breakpoint */ break_helper("Z0", NULL, sstep_addr); break; case 2: /* Continue */ fill_get_buf("c"); break; case 3: /* Clear breakpoint */ break_helper("z0", NULL, sstep_addr); break; default: eprintk("kgdbts: ERROR failed sstep get emulation\n"); } sstep_state++; } static int emul_sstep_put(char *put_str, char *arg) { if (!arch_needs_sstep_emulation) { char *ptr = &put_str[11]; if (put_str[1] != 'T' || put_str[2] != '0') return 1; kgdb_hex2long(&ptr, &sstep_thread_id); return 0; } switch (sstep_state) { case 1: /* validate the "g" packet to get the IP */ kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs, NUMREGBYTES); gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs); v2printk("Stopped at IP: %lx\n", instruction_pointer(&kgdbts_regs)); /* Want to stop at IP + break instruction size by default */ sstep_addr = cont_addr + BREAK_INSTR_SIZE; break; case 2: if (strncmp(put_str, "$OK", 3)) { eprintk("kgdbts: failed sstep break set\n"); return 1; } break; case 3: if (strncmp(put_str, "$T0", 3)) { eprintk("kgdbts: failed continue sstep\n"); return 1; } else { char *ptr = &put_str[11]; kgdb_hex2long(&ptr, &sstep_thread_id); } break; case 4: if (strncmp(put_str, "$OK", 3)) { eprintk("kgdbts: failed sstep break unset\n"); return 1; } /* Single step is complete so continue on! */ sstep_state = 0; return 0; default: eprintk("kgdbts: ERROR failed sstep put emulation\n"); } /* Continue on the same test line until emulation is complete */ ts.idx--; return 0; } static int final_ack_set(char *put_str, char *arg) { if (strncmp(put_str+1, arg, 2)) return 1; final_ack = 1; return 0; } /* * Test to plant a breakpoint and detach, which should clear out the * breakpoint and restore the original instruction. */ static struct test_struct plant_and_detach_test[] = { { "?", "S0*" }, /* Clear break points */ { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ { "D", "OK" }, /* Detach */ { "", "" }, }; /* * Simple test to write in a software breakpoint, check for the * correct stop location and detach. */ static struct test_struct sw_breakpoint_test[] = { { "?", "S0*" }, /* Clear break points */ { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ { "c", "T0*", }, /* Continue */ { "g", "kgdbts_break_test", NULL, check_and_rewind_pc }, { "write", "OK", write_regs }, { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */ { "D", "OK" }, /* Detach */ { "D", "OK", NULL, got_break }, /* On success we made it here */ { "", "" }, }; /* * Test a known bad memory read location to test the fault handler and * read bytes 1-8 at the bad address */ static struct test_struct bad_read_test[] = { { "?", "S0*" }, /* Clear break points */ { "m0,1", "E*" }, /* read 1 byte at address 1 */ { "m0,2", "E*" }, /* read 1 byte at address 2 */ { "m0,3", "E*" }, /* read 1 byte at address 3 */ { "m0,4", "E*" }, /* read 1 byte at address 4 */ { "m0,5", "E*" }, /* read 1 byte at address 5 */ { "m0,6", "E*" }, /* read 1 byte at address 6 */ { "m0,7", "E*" }, /* read 1 byte at address 7 */ { "m0,8", "E*" }, /* read 1 byte at address 8 */ { "D", "OK" }, /* Detach which removes all breakpoints and continues */ { "", "" }, }; /* * Test for hitting a breakpoint, remove it, single step, plant it * again and detach. */ static struct test_struct singlestep_break_test[] = { { "?", "S0*" }, /* Clear break points */ { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */ { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */ { "g", "kgdbts_break_test", NULL, check_and_rewind_pc }, { "write", "OK", write_regs }, /* Write registers */ { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ { "g", "kgdbts_break_test", NULL, check_single_step }, { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ { "c", "T0*", }, /* Continue */ { "g", "kgdbts_break_test", NULL, check_and_rewind_pc }, { "write", "OK", write_regs }, /* Write registers */ { "D", "OK" }, /* Remove all breakpoints and continues */ { "", "" }, }; /* * Test for hitting a breakpoint at do_fork for what ever the number * of iterations required by the variable repeat_test. */ static struct test_struct do_fork_test[] = { { "?", "S0*" }, /* Clear break points */ { "do_fork", "OK", sw_break, }, /* set sw breakpoint */ { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */ { "do_fork", "OK", sw_rem_break }, /*remove breakpoint */ { "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */ { "write", "OK", write_regs, emul_reset }, /* Write registers */ { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ { "g", "do_fork", NULL, check_single_step }, { "do_fork", "OK", sw_break, }, /* set sw breakpoint */ { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */ { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */ { "", "", get_cont_catch, put_cont_catch }, }; /* Test for hitting a breakpoint at sys_open for what ever the number * of iterations required by the variable repeat_test. */ static struct test_struct sys_open_test[] = { { "?", "S0*" }, /* Clear break points */ { "sys_open", "OK", sw_break, }, /* set sw breakpoint */ { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */ { "sys_open", "OK", sw_rem_break }, /*remove breakpoint */ { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */ { "write", "OK", write_regs, emul_reset }, /* Write registers */ { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ { "g", "sys_open", NULL, check_single_step }, { "sys_open", "OK", sw_break, }, /* set sw breakpoint */ { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */ { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */ { "", "", get_cont_catch, put_cont_catch }, }; /* * Test for hitting a simple hw breakpoint */ static struct test_struct hw_breakpoint_test[] = { { "?", "S0*" }, /* Clear break points */ { "kgdbts_break_test", "OK", hw_break, }, /* set hw breakpoint */ { "c", "T0*", }, /* Continue */ { "g", "kgdbts_break_test", NULL, check_and_rewind_pc }, { "write", "OK", write_regs }, { "kgdbts_break_test", "OK", hw_rem_break }, /*remove breakpoint */ { "D", "OK" }, /* Detach */ { "D", "OK", NULL, got_break }, /* On success we made it here */ { "", "" }, }; /* * Test for hitting a hw write breakpoint */ static struct test_struct hw_write_break_test[] = { { "?", "S0*" }, /* Clear break points */ { "hw_break_val", "OK", hw_write_break, }, /* set hw breakpoint */ { "c", "T0*", NULL, got_break }, /* Continue */ { "g", "silent", NULL, check_and_rewind_pc }, { "write", "OK", write_regs }, { "hw_break_val", "OK", hw_rem_write_break }, /*remove breakpoint */ { "D", "OK" }, /* Detach */ { "D", "OK", NULL, got_break }, /* On success we made it here */ { "", "" }, }; /* * Test for hitting a hw access breakpoint */ static struct test_struct hw_access_break_test[] = { { "?", "S0*" }, /* Clear break points */ { "hw_break_val", "OK", hw_access_break, }, /* set hw breakpoint */ { "c", "T0*", NULL, got_break }, /* Continue */ { "g", "silent", NULL, check_and_rewind_pc }, { "write", "OK", write_regs }, { "hw_break_val", "OK", hw_rem_access_break }, /*remove breakpoint */ { "D", "OK" }, /* Detach */ { "D", "OK", NULL, got_break }, /* On success we made it here */ { "", "" }, }; /* * Test for hitting a hw access breakpoint */ static struct test_struct nmi_sleep_test[] = { { "?", "S0*" }, /* Clear break points */ { "c", "T0*", NULL, got_break }, /* Continue */ { "D", "OK" }, /* Detach */ { "D", "OK", NULL, got_break }, /* On success we made it here */ { "", "" }, }; static void fill_get_buf(char *buf) { unsigned char checksum = 0; int count = 0; char ch; strcpy(get_buf, "$"); strcat(get_buf, buf); while ((ch = buf[count])) { checksum += ch; count++; } strcat(get_buf, "#"); get_buf[count + 2] = hex_asc_hi(checksum); get_buf[count + 3] = hex_asc_lo(checksum); get_buf[count + 4] = '\0'; v2printk("get%i: %s\n", ts.idx, get_buf); } static int validate_simple_test(char *put_str) { char *chk_str; if (ts.tst[ts.idx].put_handler) return ts.tst[ts.idx].put_handler(put_str, ts.tst[ts.idx].put); chk_str = ts.tst[ts.idx].put; if (*put_str == '$') put_str++; while (*chk_str != '\0' && *put_str != '\0') { /* If someone does a * to match the rest of the string, allow * it, or stop if the received string is complete. */ if (*put_str == '#' || *chk_str == '*') return 0; if (*put_str != *chk_str) return 1; chk_str++; put_str++; } if (*chk_str == '\0' && (*put_str == '\0' || *put_str == '#')) return 0; return 1; } static int run_simple_test(int is_get_char, int chr) { int ret = 0; if (is_get_char) { /* Send an ACK on the get if a prior put completed and set the * send ack variable */ if (send_ack) { send_ack = 0; return '+'; } /* On the first get char, fill the transmit buffer and then * take from the get_string. */ if (get_buf_cnt == 0) { if (ts.tst[ts.idx].get_handler) ts.tst[ts.idx].get_handler(ts.tst[ts.idx].get); else fill_get_buf(ts.tst[ts.idx].get); } if (get_buf[get_buf_cnt] == '\0') { eprintk("kgdbts: ERROR GET: EOB on '%s' at %i\n", ts.name, ts.idx); get_buf_cnt = 0; fill_get_buf("D"); } ret = get_buf[get_buf_cnt]; get_buf_cnt++; return ret; } /* This callback is a put char which is when kgdb sends data to * this I/O module. */ if (ts.tst[ts.idx].get[0] == '\0' && ts.tst[ts.idx].put[0] == '\0' && !ts.tst[ts.idx].get_handler) { eprintk("kgdbts: ERROR: beyond end of test on" " '%s' line %i\n", ts.name, ts.idx); return 0; } if (put_buf_cnt >= BUFMAX) { eprintk("kgdbts: ERROR: put buffer overflow on" " '%s' line %i\n", ts.name, ts.idx); put_buf_cnt = 0; return 0; } /* Ignore everything until the first valid packet start '$' */ if (put_buf_cnt == 0 && chr != '$') return 0; put_buf[put_buf_cnt] = chr; put_buf_cnt++; /* End of packet == #XX so look for the '#' */ if (put_buf_cnt > 3 && put_buf[put_buf_cnt - 3] == '#') { if (put_buf_cnt >= BUFMAX) { eprintk("kgdbts: ERROR: put buffer overflow on" " '%s' line %i\n", ts.name, ts.idx); put_buf_cnt = 0; return 0; } put_buf[put_buf_cnt] = '\0'; v2printk("put%i: %s\n", ts.idx, put_buf); /* Trigger check here */ if (ts.validate_put && ts.validate_put(put_buf)) { eprintk("kgdbts: ERROR PUT: end of test " "buffer on '%s' line %i expected %s got %s\n", ts.name, ts.idx, ts.tst[ts.idx].put, put_buf); } ts.idx++; put_buf_cnt = 0; get_buf_cnt = 0; send_ack = 1; } return 0; } static void init_simple_test(void) { memset(&ts, 0, sizeof(ts)); ts.run_test = run_simple_test; ts.validate_put = validate_simple_test; } static void run_plant_and_detach_test(int is_early) { char before[BREAK_INSTR_SIZE]; char after[BREAK_INSTR_SIZE]; probe_kernel_read(before, (char *)kgdbts_break_test, BREAK_INSTR_SIZE); init_simple_test(); ts.tst = plant_and_detach_test; ts.name = "plant_and_detach_test"; /* Activate test with initial breakpoint */ if (!is_early) kgdb_breakpoint(); probe_kernel_read(after, (char *)kgdbts_break_test, BREAK_INSTR_SIZE); if (memcmp(before, after, BREAK_INSTR_SIZE)) { printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n"); panic("kgdb memory corruption"); } /* complete the detach test */ if (!is_early) kgdbts_break_test(); } static void run_breakpoint_test(int is_hw_breakpoint) { test_complete = 0; init_simple_test(); if (is_hw_breakpoint) { ts.tst = hw_breakpoint_test; ts.name = "hw_breakpoint_test"; } else { ts.tst = sw_breakpoint_test; ts.name = "sw_breakpoint_test"; } /* Activate test with initial breakpoint */ kgdb_breakpoint(); /* run code with the break point in it */ kgdbts_break_test(); kgdb_breakpoint(); if (test_complete) return; eprintk("kgdbts: ERROR %s test failed\n", ts.name); if (is_hw_breakpoint) hwbreaks_ok = 0; } static void run_hw_break_test(int is_write_test) { test_complete = 0; init_simple_test(); if (is_write_test) { ts.tst = hw_write_break_test; ts.name = "hw_write_break_test"; } else { ts.tst = hw_access_break_test; ts.name = "hw_access_break_test"; } /* Activate test with initial breakpoint */ kgdb_breakpoint(); hw_break_val_access(); if (is_write_test) { if (test_complete == 2) { eprintk("kgdbts: ERROR %s broke on access\n", ts.name); hwbreaks_ok = 0; } hw_break_val_write(); } kgdb_breakpoint(); if (test_complete == 1) return; eprintk("kgdbts: ERROR %s test failed\n", ts.name); hwbreaks_ok = 0; } static void run_nmi_sleep_test(int nmi_sleep) { unsigned long flags; init_simple_test(); ts.tst = nmi_sleep_test; ts.name = "nmi_sleep_test"; /* Activate test with initial breakpoint */ kgdb_breakpoint(); local_irq_save(flags); mdelay(nmi_sleep*1000); touch_nmi_watchdog(); local_irq_restore(flags); if (test_complete != 2) eprintk("kgdbts: ERROR nmi_test did not hit nmi\n"); kgdb_breakpoint(); if (test_complete == 1) return; eprintk("kgdbts: ERROR %s test failed\n", ts.name); } static void run_bad_read_test(void) { init_simple_test(); ts.tst = bad_read_test; ts.name = "bad_read_test"; /* Activate test with initial breakpoint */ kgdb_breakpoint(); } static void run_do_fork_test(void) { init_simple_test(); ts.tst = do_fork_test; ts.name = "do_fork_test"; /* Activate test with initial breakpoint */ kgdb_breakpoint(); } static void run_sys_open_test(void) { init_simple_test(); ts.tst = sys_open_test; ts.name = "sys_open_test"; /* Activate test with initial breakpoint */ kgdb_breakpoint(); } static void run_singlestep_break_test(void) { init_simple_test(); ts.tst = singlestep_break_test; ts.name = "singlestep_breakpoint_test"; /* Activate test with initial breakpoint */ kgdb_breakpoint(); kgdbts_break_test(); kgdbts_break_test(); } static void kgdbts_run_tests(void) { char *ptr; int fork_test = 0; int do_sys_open_test = 0; int sstep_test = 1000; int nmi_sleep = 0; int i; ptr = strchr(config, 'F'); if (ptr) fork_test = simple_strtol(ptr + 1, NULL, 10); ptr = strchr(config, 'S'); if (ptr) do_sys_open_test = simple_strtol(ptr + 1, NULL, 10); ptr = strchr(config, 'N'); if (ptr) nmi_sleep = simple_strtol(ptr+1, NULL, 10); ptr = strchr(config, 'I'); if (ptr) sstep_test = simple_strtol(ptr+1, NULL, 10); /* All HW break point tests */ if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) { hwbreaks_ok = 1; v1printk("kgdbts:RUN hw breakpoint test\n"); run_breakpoint_test(1); v1printk("kgdbts:RUN hw write breakpoint test\n"); run_hw_break_test(1); v1printk("kgdbts:RUN access write breakpoint test\n"); run_hw_break_test(0); } /* required internal KGDB tests */ v1printk("kgdbts:RUN plant and detach test\n"); run_plant_and_detach_test(0); v1printk("kgdbts:RUN sw breakpoint test\n"); run_breakpoint_test(0); v1printk("kgdbts:RUN bad memory access test\n"); run_bad_read_test(); v1printk("kgdbts:RUN singlestep test %i iterations\n", sstep_test); for (i = 0; i < sstep_test; i++) { run_singlestep_break_test(); if (i % 100 == 0) v1printk("kgdbts:RUN singlestep [%i/%i]\n", i, sstep_test); } /* ===Optional tests=== */ if (nmi_sleep) { v1printk("kgdbts:RUN NMI sleep %i seconds test\n", nmi_sleep); run_nmi_sleep_test(nmi_sleep); } /* If the do_fork test is run it will be the last test that is * executed because a kernel thread will be spawned at the very * end to unregister the debug hooks. */ if (fork_test) { repeat_test = fork_test; printk(KERN_INFO "kgdbts:RUN do_fork for %i breakpoints\n", repeat_test); kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg"); run_do_fork_test(); return; } /* If the sys_open test is run it will be the last test that is * executed because a kernel thread will be spawned at the very * end to unregister the debug hooks. */ if (do_sys_open_test) { repeat_test = do_sys_open_test; printk(KERN_INFO "kgdbts:RUN sys_open for %i breakpoints\n", repeat_test); kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg"); run_sys_open_test(); return; } /* Shutdown and unregister */ kgdb_unregister_io_module(&kgdbts_io_ops); configured = 0; } static int kgdbts_option_setup(char *opt) { if (strlen(opt) >= MAX_CONFIG_LEN) { printk(KERN_ERR "kgdbts: config string too long\n"); return -ENOSPC; } strcpy(config, opt); verbose = 0; if (strstr(config, "V1")) verbose = 1; if (strstr(config, "V2")) verbose = 2; return 0; } __setup("kgdbts=", kgdbts_option_setup); static int configure_kgdbts(void) { int err = 0; if (!strlen(config) || isspace(config[0])) goto noconfig; err = kgdbts_option_setup(config); if (err) goto noconfig; final_ack = 0; run_plant_and_detach_test(1); err = kgdb_register_io_module(&kgdbts_io_ops); if (err) { configured = 0; return err; } configured = 1; kgdbts_run_tests(); return err; noconfig: config[0] = 0; configured = 0; return err; } static int __init init_kgdbts(void) { /* Already configured? */ if (configured == 1) return 0; return configure_kgdbts(); } static int kgdbts_get_char(void) { int val = 0; if (ts.run_test) val = ts.run_test(1, 0); return val; } static void kgdbts_put_char(u8 chr) { if (ts.run_test) ts.run_test(0, chr); } static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp) { int len = strlen(kmessage); if (len >= MAX_CONFIG_LEN) { printk(KERN_ERR "kgdbts: config string too long\n"); return -ENOSPC; } /* Only copy in the string if the init function has not run yet */ if (configured < 0) { strcpy(config, kmessage); return 0; } if (configured == 1) { printk(KERN_ERR "kgdbts: ERROR: Already configured and running.\n"); return -EBUSY; } strcpy(config, kmessage); /* Chop out \n char as a result of echo */ if (config[len - 1] == '\n') config[len - 1] = '\0'; /* Go and configure with the new params. */ return configure_kgdbts(); } static void kgdbts_pre_exp_handler(void) { /* Increment the module count when the debugger is active */ if (!kgdb_connected) try_module_get(THIS_MODULE); } static void kgdbts_post_exp_handler(void) { /* decrement the module count when the debugger detaches */ if (!kgdb_connected) module_put(THIS_MODULE); } static struct kgdb_io kgdbts_io_ops = { .name = "kgdbts", .read_char = kgdbts_get_char, .write_char = kgdbts_put_char, .pre_exception = kgdbts_pre_exp_handler, .post_exception = kgdbts_post_exp_handler, }; module_init(init_kgdbts); module_param_call(kgdbts, param_set_kgdbts_var, param_get_string, &kps, 0644); MODULE_PARM_DESC(kgdbts, "<A|V1|V2>[F#|S#][N#]"); MODULE_DESCRIPTION("KGDB Test Suite"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Wind River Systems, Inc.");
gpl-2.0
intervigilium/android_kernel_google_msm
arch/arm/mach-shmobile/platsmp.c
4589
2023
/* * SMP support for R-Mobile / SH-Mobile * * Copyright (C) 2010 Magnus Damm * Copyright (C) 2011 Paul Mundt * * Based on vexpress, Copyright (C) 2002 ARM Ltd, All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/smp.h> #include <linux/io.h> #include <asm/hardware/gic.h> #include <asm/mach-types.h> #include <mach/common.h> #define is_sh73a0() (machine_is_ag5evm() || machine_is_kota2()) #define is_r8a7779() machine_is_marzen() static unsigned int __init shmobile_smp_get_core_count(void) { if (is_sh73a0()) return sh73a0_get_core_count(); if (is_r8a7779()) return r8a7779_get_core_count(); return 1; } static void __init shmobile_smp_prepare_cpus(void) { if (is_sh73a0()) sh73a0_smp_prepare_cpus(); if (is_r8a7779()) r8a7779_smp_prepare_cpus(); } int shmobile_platform_cpu_kill(unsigned int cpu) { if (is_r8a7779()) return r8a7779_platform_cpu_kill(cpu); return 1; } void __cpuinit platform_secondary_init(unsigned int cpu) { trace_hardirqs_off(); if (is_sh73a0()) sh73a0_secondary_init(cpu); if (is_r8a7779()) r8a7779_secondary_init(cpu); } int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { if (is_sh73a0()) return sh73a0_boot_secondary(cpu); if (is_r8a7779()) return r8a7779_boot_secondary(cpu); return -ENOSYS; } void __init smp_init_cpus(void) { unsigned int ncores = shmobile_smp_get_core_count(); unsigned int i; if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); } void __init platform_smp_prepare_cpus(unsigned int max_cpus) { shmobile_smp_prepare_cpus(); }
gpl-2.0
mrimp/SM-N910T_Kernel
sound/oss/uart6850.c
4589
7241
/* * sound/oss/uart6850.c * * * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. * Extended by Alan Cox for Red Hat Software. Now a loadable MIDI driver. * 28/4/97 - (C) Copyright Alan Cox. Released under the GPL version 2. * * Alan Cox: Updated for new modular code. Removed snd_* irq handling. Now * uses native linux resources * Christoph Hellwig: Adapted to module_init/module_exit * Jeff Garzik: Made it work again, in theory * FIXME: If the request_irq() succeeds, the probe succeeds. Ug. * * Status: Testing required (no shit -jgarzik) * * */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/spinlock.h> /* Mon Nov 22 22:38:35 MET 1993 marco@driq.home.usn.nl: * added 6850 support, used with COVOX SoundMaster II and custom cards. */ #include "sound_config.h" static int uart6850_base = 0x330; static int *uart6850_osp; #define DATAPORT (uart6850_base) #define COMDPORT (uart6850_base+1) #define STATPORT (uart6850_base+1) static int uart6850_status(void) { return inb(STATPORT); } #define input_avail() (uart6850_status()&INPUT_AVAIL) #define output_ready() (uart6850_status()&OUTPUT_READY) static void uart6850_cmd(unsigned char cmd) { outb(cmd, COMDPORT); } static int uart6850_read(void) { return inb(DATAPORT); } static void uart6850_write(unsigned char byte) { outb(byte, DATAPORT); } #define OUTPUT_READY 0x02 /* Mask for data ready Bit */ #define INPUT_AVAIL 0x01 /* Mask for Data Send Ready Bit */ #define UART_RESET 0x95 #define UART_MODE_ON 0x03 static int uart6850_opened; static int uart6850_irq; static int uart6850_detected; static int my_dev; static DEFINE_SPINLOCK(lock); static void (*midi_input_intr) (int dev, unsigned char data); static void poll_uart6850(unsigned long dummy); static DEFINE_TIMER(uart6850_timer, poll_uart6850, 0, 0); static void uart6850_input_loop(void) { int count = 10; while (count) { /* * Not timed out */ if (input_avail()) { unsigned char c = uart6850_read(); count = 100; if (uart6850_opened & OPEN_READ) midi_input_intr(my_dev, c); } else { while (!input_avail() && count) count--; } } } static irqreturn_t m6850intr(int irq, void *dev_id) { if (input_avail()) uart6850_input_loop(); return IRQ_HANDLED; } /* * It looks like there is no input interrupts in the UART mode. Let's try * polling. */ static void poll_uart6850(unsigned long dummy) { unsigned long flags; if (!(uart6850_opened & OPEN_READ)) return; /* Device has been closed */ spin_lock_irqsave(&lock,flags); if (input_avail()) uart6850_input_loop(); uart6850_timer.expires = 1 + jiffies; add_timer(&uart6850_timer); /* * Come back later */ spin_unlock_irqrestore(&lock,flags); } static int uart6850_open(int dev, int mode, void (*input) (int dev, unsigned char data), void (*output) (int dev) ) { if (uart6850_opened) { /* printk("Midi6850: Midi busy\n");*/ return -EBUSY; } uart6850_cmd(UART_RESET); uart6850_input_loop(); midi_input_intr = input; uart6850_opened = mode; poll_uart6850(0); /* * Enable input polling */ return 0; } static void uart6850_close(int dev) { uart6850_cmd(UART_MODE_ON); del_timer(&uart6850_timer); uart6850_opened = 0; } static int uart6850_out(int dev, unsigned char midi_byte) { int timeout; unsigned long flags; /* * Test for input since pending input seems to block the output. */ spin_lock_irqsave(&lock,flags); if (input_avail()) uart6850_input_loop(); spin_unlock_irqrestore(&lock,flags); /* * Sometimes it takes about 13000 loops before the output becomes ready * (After reset). Normally it takes just about 10 loops. */ for (timeout = 30000; timeout > 0 && !output_ready(); timeout--); /* * Wait */ if (!output_ready()) { printk(KERN_WARNING "Midi6850: Timeout\n"); return 0; } uart6850_write(midi_byte); return 1; } static inline int uart6850_command(int dev, unsigned char *midi_byte) { return 1; } static inline int uart6850_start_read(int dev) { return 0; } static inline int uart6850_end_read(int dev) { return 0; } static inline void uart6850_kick(int dev) { } static inline int uart6850_buffer_status(int dev) { return 0; /* * No data in buffers */ } #define MIDI_SYNTH_NAME "6850 UART Midi" #define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT #include "midi_synth.h" static struct midi_operations uart6850_operations = { .owner = THIS_MODULE, .info = {"6850 UART", 0, 0, SNDCARD_UART6850}, .converter = &std_midi_synth, .in_info = {0}, .open = uart6850_open, .close = uart6850_close, .outputc = uart6850_out, .start_read = uart6850_start_read, .end_read = uart6850_end_read, .kick = uart6850_kick, .command = uart6850_command, .buffer_status = uart6850_buffer_status }; static void __init attach_uart6850(struct address_info *hw_config) { int ok, timeout; unsigned long flags; if (!uart6850_detected) return; if ((my_dev = sound_alloc_mididev()) == -1) { printk(KERN_INFO "uart6850: Too many midi devices detected\n"); return; } uart6850_base = hw_config->io_base; uart6850_osp = hw_config->osp; uart6850_irq = hw_config->irq; spin_lock_irqsave(&lock,flags); for (timeout = 30000; timeout > 0 && !output_ready(); timeout--); /* * Wait */ uart6850_cmd(UART_MODE_ON); ok = 1; spin_unlock_irqrestore(&lock,flags); conf_printf("6850 Midi Interface", hw_config); std_midi_synth.midi_dev = my_dev; hw_config->slots[4] = my_dev; midi_devs[my_dev] = &uart6850_operations; sequencer_init(); } static inline int reset_uart6850(void) { uart6850_read(); return 1; /* * OK */ } static int __init probe_uart6850(struct address_info *hw_config) { int ok; uart6850_osp = hw_config->osp; uart6850_base = hw_config->io_base; uart6850_irq = hw_config->irq; if (request_irq(uart6850_irq, m6850intr, 0, "MIDI6850", NULL) < 0) return 0; ok = reset_uart6850(); uart6850_detected = ok; return ok; } static void __exit unload_uart6850(struct address_info *hw_config) { free_irq(hw_config->irq, NULL); sound_unload_mididev(hw_config->slots[4]); } static struct address_info cfg_mpu; static int __initdata io = -1; static int __initdata irq = -1; module_param(io, int, 0); module_param(irq, int, 0); static int __init init_uart6850(void) { cfg_mpu.io_base = io; cfg_mpu.irq = irq; if (cfg_mpu.io_base == -1 || cfg_mpu.irq == -1) { printk(KERN_INFO "uart6850: irq and io must be set.\n"); return -EINVAL; } if (probe_uart6850(&cfg_mpu)) return -ENODEV; attach_uart6850(&cfg_mpu); return 0; } static void __exit cleanup_uart6850(void) { unload_uart6850(&cfg_mpu); } module_init(init_uart6850); module_exit(cleanup_uart6850); #ifndef MODULE static int __init setup_uart6850(char *str) { /* io, irq */ int ints[3]; str = get_options(str, ARRAY_SIZE(ints), ints); io = ints[1]; irq = ints[2]; return 1; } __setup("uart6850=", setup_uart6850); #endif MODULE_LICENSE("GPL");
gpl-2.0
ISTweak/android_kernel_sharp_msm7x30-3.0
net/netfilter/xt_cluster.c
11501
5080
/* * (C) 2008-2009 Pablo Neira Ayuso <pablo@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/jhash.h> #include <linux/ip.h> #include <net/ipv6.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_conntrack.h> #include <linux/netfilter/xt_cluster.h> static inline u32 nf_ct_orig_ipv4_src(const struct nf_conn *ct) { return (__force u32)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip; } static inline const u32 *nf_ct_orig_ipv6_src(const struct nf_conn *ct) { return (__force u32 *)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6; } static inline u_int32_t xt_cluster_hash_ipv4(u_int32_t ip, const struct xt_cluster_match_info *info) { return jhash_1word(ip, info->hash_seed); } static inline u_int32_t xt_cluster_hash_ipv6(const void *ip, const struct xt_cluster_match_info *info) { return jhash2(ip, NF_CT_TUPLE_L3SIZE / sizeof(__u32), info->hash_seed); } static inline u_int32_t xt_cluster_hash(const struct nf_conn *ct, const struct xt_cluster_match_info *info) { u_int32_t hash = 0; switch(nf_ct_l3num(ct)) { case AF_INET: hash = xt_cluster_hash_ipv4(nf_ct_orig_ipv4_src(ct), info); break; case AF_INET6: hash = xt_cluster_hash_ipv6(nf_ct_orig_ipv6_src(ct), info); break; default: WARN_ON(1); break; } return (((u64)hash * info->total_nodes) >> 32); } static inline bool xt_cluster_ipv6_is_multicast(const struct in6_addr *addr) { __be32 st = addr->s6_addr32[0]; return ((st & htonl(0xFF000000)) == htonl(0xFF000000)); } static inline bool xt_cluster_is_multicast_addr(const struct sk_buff *skb, u_int8_t family) { bool is_multicast = false; switch(family) { case NFPROTO_IPV4: is_multicast = ipv4_is_multicast(ip_hdr(skb)->daddr); break; case NFPROTO_IPV6: is_multicast = xt_cluster_ipv6_is_multicast(&ipv6_hdr(skb)->daddr); break; default: WARN_ON(1); break; } return is_multicast; } static bool xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par) { struct sk_buff *pskb = (struct sk_buff *)skb; const struct xt_cluster_match_info *info = par->matchinfo; const struct nf_conn *ct; enum ip_conntrack_info ctinfo; unsigned long hash; /* This match assumes that all nodes see the same packets. This can be * achieved if the switch that connects the cluster nodes support some * sort of 'port mirroring'. However, if your switch does not support * this, your cluster nodes can reply ARP request using a multicast MAC * address. Thus, your switch will flood the same packets to the * cluster nodes with the same multicast MAC address. Using a multicast * link address is a RFC 1812 (section 3.3.2) violation, but this works * fine in practise. * * Unfortunately, if you use the multicast MAC address, the link layer * sets skbuff's pkt_type to PACKET_MULTICAST, which is not accepted * by TCP and others for packets coming to this node. For that reason, * this match mangles skbuff's pkt_type if it detects a packet * addressed to a unicast address but using PACKET_MULTICAST. Yes, I * know, matches should not alter packets, but we are doing this here * because we would need to add a PKTTYPE target for this sole purpose. */ if (!xt_cluster_is_multicast_addr(skb, par->family) && skb->pkt_type == PACKET_MULTICAST) { pskb->pkt_type = PACKET_HOST; } ct = nf_ct_get(skb, &ctinfo); if (ct == NULL) return false; if (nf_ct_is_untracked(ct)) return false; if (ct->master) hash = xt_cluster_hash(ct->master, info); else hash = xt_cluster_hash(ct, info); return !!((1 << hash) & info->node_mask) ^ !!(info->flags & XT_CLUSTER_F_INV); } static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) { struct xt_cluster_match_info *info = par->matchinfo; if (info->total_nodes > XT_CLUSTER_NODES_MAX) { pr_info("you have exceeded the maximum " "number of cluster nodes (%u > %u)\n", info->total_nodes, XT_CLUSTER_NODES_MAX); return -EINVAL; } if (info->node_mask >= (1ULL << info->total_nodes)) { pr_info("this node mask cannot be " "higher than the total number of nodes\n"); return -EDOM; } return 0; } static struct xt_match xt_cluster_match __read_mostly = { .name = "cluster", .family = NFPROTO_UNSPEC, .match = xt_cluster_mt, .checkentry = xt_cluster_mt_checkentry, .matchsize = sizeof(struct xt_cluster_match_info), .me = THIS_MODULE, }; static int __init xt_cluster_mt_init(void) { return xt_register_match(&xt_cluster_match); } static void __exit xt_cluster_mt_fini(void) { xt_unregister_match(&xt_cluster_match); } MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Xtables: hash-based cluster match"); MODULE_ALIAS("ipt_cluster"); MODULE_ALIAS("ip6t_cluster"); module_init(xt_cluster_mt_init); module_exit(xt_cluster_mt_fini);
gpl-2.0
jgcaap/NewKernel
sound/pci/echoaudio/gina20_dsp.c
12525
5668
/**************************************************************************** Copyright Echo Digital Audio Corporation (c) 1998 - 2004 All rights reserved www.echoaudio.com This file is part of Echo Digital Audio's generic driver library. Echo Digital Audio's generic driver library is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ************************************************************************* Translation from C++ and adaptation for use in ALSA-Driver were made by Giuliano Pochini <pochini@shiny.it> ****************************************************************************/ static int set_professional_spdif(struct echoaudio *chip, char prof); static int update_flags(struct echoaudio *chip); static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id) { int err; DE_INIT(("init_hw() - Gina20\n")); if (snd_BUG_ON((subdevice_id & 0xfff0) != GINA20)) return -ENODEV; if ((err = init_dsp_comm_page(chip))) { DE_INIT(("init_hw - could not initialize DSP comm page\n")); return err; } chip->device_id = device_id; chip->subdevice_id = subdevice_id; chip->bad_board = TRUE; chip->dsp_code_to_load = FW_GINA20_DSP; chip->spdif_status = GD_SPDIF_STATUS_UNDEF; chip->clock_state = GD_CLOCK_UNDEF; /* Since this card has no ASIC, mark it as loaded so everything works OK */ chip->asic_loaded = TRUE; chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF; if ((err = load_firmware(chip)) < 0) return err; chip->bad_board = FALSE; DE_INIT(("init_hw done\n")); return err; } static int set_mixer_defaults(struct echoaudio *chip) { chip->professional_spdif = FALSE; return init_line_levels(chip); } static u32 detect_input_clocks(const struct echoaudio *chip) { u32 clocks_from_dsp, clock_bits; /* Map the DSP clock detect bits to the generic driver clock detect bits */ clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks); clock_bits = ECHO_CLOCK_BIT_INTERNAL; if (clocks_from_dsp & GLDM_CLOCK_DETECT_BIT_SPDIF) clock_bits |= ECHO_CLOCK_BIT_SPDIF; return clock_bits; } /* The Gina20 has no ASIC. Just do nothing */ static int load_asic(struct echoaudio *chip) { return 0; } static int set_sample_rate(struct echoaudio *chip, u32 rate) { u8 clock_state, spdif_status; if (wait_handshake(chip)) return -EIO; switch (rate) { case 44100: clock_state = GD_CLOCK_44; spdif_status = GD_SPDIF_STATUS_44; break; case 48000: clock_state = GD_CLOCK_48; spdif_status = GD_SPDIF_STATUS_48; break; default: clock_state = GD_CLOCK_NOCHANGE; spdif_status = GD_SPDIF_STATUS_NOCHANGE; break; } if (chip->clock_state == clock_state) clock_state = GD_CLOCK_NOCHANGE; if (spdif_status == chip->spdif_status) spdif_status = GD_SPDIF_STATUS_NOCHANGE; chip->comm_page->sample_rate = cpu_to_le32(rate); chip->comm_page->gd_clock_state = clock_state; chip->comm_page->gd_spdif_status = spdif_status; chip->comm_page->gd_resampler_state = 3; /* magic number - should always be 3 */ /* Save the new audio state if it changed */ if (clock_state != GD_CLOCK_NOCHANGE) chip->clock_state = clock_state; if (spdif_status != GD_SPDIF_STATUS_NOCHANGE) chip->spdif_status = spdif_status; chip->sample_rate = rate; clear_handshake(chip); return send_vector(chip, DSP_VC_SET_GD_AUDIO_STATE); } static int set_input_clock(struct echoaudio *chip, u16 clock) { DE_ACT(("set_input_clock:\n")); switch (clock) { case ECHO_CLOCK_INTERNAL: /* Reset the audio state to unknown (just in case) */ chip->clock_state = GD_CLOCK_UNDEF; chip->spdif_status = GD_SPDIF_STATUS_UNDEF; set_sample_rate(chip, chip->sample_rate); chip->input_clock = clock; DE_ACT(("Set Gina clock to INTERNAL\n")); break; case ECHO_CLOCK_SPDIF: chip->comm_page->gd_clock_state = GD_CLOCK_SPDIFIN; chip->comm_page->gd_spdif_status = GD_SPDIF_STATUS_NOCHANGE; clear_handshake(chip); send_vector(chip, DSP_VC_SET_GD_AUDIO_STATE); chip->clock_state = GD_CLOCK_SPDIFIN; DE_ACT(("Set Gina20 clock to SPDIF\n")); chip->input_clock = clock; break; default: return -EINVAL; } return 0; } /* Set input bus gain (one unit is 0.5dB !) */ static int set_input_gain(struct echoaudio *chip, u16 input, int gain) { if (snd_BUG_ON(input >= num_busses_in(chip))) return -EINVAL; if (wait_handshake(chip)) return -EIO; chip->input_gain[input] = gain; gain += GL20_INPUT_GAIN_MAGIC_NUMBER; chip->comm_page->line_in_level[input] = gain; return 0; } /* Tell the DSP to reread the flags from the comm page */ static int update_flags(struct echoaudio *chip) { if (wait_handshake(chip)) return -EIO; clear_handshake(chip); return send_vector(chip, DSP_VC_UPDATE_FLAGS); } static int set_professional_spdif(struct echoaudio *chip, char prof) { DE_ACT(("set_professional_spdif %d\n", prof)); if (prof) chip->comm_page->flags |= cpu_to_le32(DSP_FLAG_PROFESSIONAL_SPDIF); else chip->comm_page->flags &= ~cpu_to_le32(DSP_FLAG_PROFESSIONAL_SPDIF); chip->professional_spdif = prof; return update_flags(chip); }
gpl-2.0
Mystic-Mirage/android_kernel_gigabyte_roma_r2_plus
arch/sh/drivers/pci/ops-dreamcast.c
13805
2641
/* * PCI operations for the Sega Dreamcast * * Copyright (C) 2001, 2002 M. R. Brown * Copyright (C) 2002, 2003 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/io.h> #include <mach/pci.h> /* * The !gapspci_config_access case really shouldn't happen, ever, unless * someone implicitly messes around with the last devfn value.. otherwise we * only support a single device anyways, and if we didn't have a BBA, we * wouldn't make it terribly far through the PCI setup anyways. * * Also, we could very easily support both Type 0 and Type 1 configurations * here, but since it doesn't seem that there is any such implementation in * existence, we don't bother. * * I suppose if someone actually gets around to ripping the chip out of * the BBA and hanging some more devices off of it, then this might be * something to take into consideration. However, due to the cost of the BBA, * and the general lack of activity by DC hardware hackers, this doesn't seem * likely to happen anytime soon. */ static int gapspci_config_access(unsigned char bus, unsigned int devfn) { return (bus == 0) && (devfn == 0); } /* * We can also actually read and write in b/w/l sizes! Thankfully this part * was at least done right, and we don't have to do the stupid masking and * shifting that we do on the 7751! Small wonders never cease to amaze. */ static int gapspci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { *val = 0xffffffff; if (!gapspci_config_access(bus->number, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: *val = inb(GAPSPCI_BBA_CONFIG+where); break; case 2: *val = inw(GAPSPCI_BBA_CONFIG+where); break; case 4: *val = inl(GAPSPCI_BBA_CONFIG+where); break; } return PCIBIOS_SUCCESSFUL; } static int gapspci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { if (!gapspci_config_access(bus->number, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: outb(( u8)val, GAPSPCI_BBA_CONFIG+where); break; case 2: outw((u16)val, GAPSPCI_BBA_CONFIG+where); break; case 4: outl((u32)val, GAPSPCI_BBA_CONFIG+where); break; } return PCIBIOS_SUCCESSFUL; } struct pci_ops gapspci_pci_ops = { .read = gapspci_read, .write = gapspci_write, };
gpl-2.0
Asus-T100/kernel
drivers/mfd/lp3943.c
494
4238
/* * TI/National Semiconductor LP3943 MFD Core Driver * * Copyright 2013 Texas Instruments * * Author: Milo Kim <milo.kim@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Driver structure: * LP3943 is an integrated device capable of driving 16 output channels. * It can be used for a GPIO expander and PWM generators. * * LED control General usage for a device * ___________ ____________________________ * * LP3943 MFD ---- GPIO expander leds-gpio eg) HW enable pin * | * --- PWM generator leds-pwm eg) PWM input * * Internal two PWM channels are used for LED dimming effect. * And each output pin can be used as a GPIO as well. * The LED functionality can work with GPIOs or PWMs. * LEDs can be controlled with legacy leds-gpio(static brightness) or * leds-pwm drivers(dynamic brightness control). * Alternatively, it can be used for generic GPIO and PWM controller. * For example, a GPIO is HW enable pin of a device. * A PWM is input pin of a backlight device. */ #include <linux/err.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/mfd/core.h> #include <linux/mfd/lp3943.h> #include <linux/module.h> #include <linux/of.h> #include <linux/slab.h> #define LP3943_MAX_REGISTERS 0x09 /* Register configuration for pin MUX */ static const struct lp3943_reg_cfg lp3943_mux_cfg[] = { /* address, mask, shift */ { LP3943_REG_MUX0, 0x03, 0 }, { LP3943_REG_MUX0, 0x0C, 2 }, { LP3943_REG_MUX0, 0x30, 4 }, { LP3943_REG_MUX0, 0xC0, 6 }, { LP3943_REG_MUX1, 0x03, 0 }, { LP3943_REG_MUX1, 0x0C, 2 }, { LP3943_REG_MUX1, 0x30, 4 }, { LP3943_REG_MUX1, 0xC0, 6 }, { LP3943_REG_MUX2, 0x03, 0 }, { LP3943_REG_MUX2, 0x0C, 2 }, { LP3943_REG_MUX2, 0x30, 4 }, { LP3943_REG_MUX2, 0xC0, 6 }, { LP3943_REG_MUX3, 0x03, 0 }, { LP3943_REG_MUX3, 0x0C, 2 }, { LP3943_REG_MUX3, 0x30, 4 }, { LP3943_REG_MUX3, 0xC0, 6 }, }; static const struct mfd_cell lp3943_devs[] = { { .name = "lp3943-pwm", .of_compatible = "ti,lp3943-pwm", }, { .name = "lp3943-gpio", .of_compatible = "ti,lp3943-gpio", }, }; int lp3943_read_byte(struct lp3943 *lp3943, u8 reg, u8 *read) { int ret; unsigned int val; ret = regmap_read(lp3943->regmap, reg, &val); if (ret < 0) return ret; *read = (u8)val; return 0; } EXPORT_SYMBOL_GPL(lp3943_read_byte); int lp3943_write_byte(struct lp3943 *lp3943, u8 reg, u8 data) { return regmap_write(lp3943->regmap, reg, data); } EXPORT_SYMBOL_GPL(lp3943_write_byte); int lp3943_update_bits(struct lp3943 *lp3943, u8 reg, u8 mask, u8 data) { return regmap_update_bits(lp3943->regmap, reg, mask, data); } EXPORT_SYMBOL_GPL(lp3943_update_bits); static const struct regmap_config lp3943_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = LP3943_MAX_REGISTERS, }; static int lp3943_probe(struct i2c_client *cl, const struct i2c_device_id *id) { struct lp3943 *lp3943; struct device *dev = &cl->dev; lp3943 = devm_kzalloc(dev, sizeof(*lp3943), GFP_KERNEL); if (!lp3943) return -ENOMEM; lp3943->regmap = devm_regmap_init_i2c(cl, &lp3943_regmap_config); if (IS_ERR(lp3943->regmap)) return PTR_ERR(lp3943->regmap); lp3943->pdata = dev_get_platdata(dev); lp3943->dev = dev; lp3943->mux_cfg = lp3943_mux_cfg; i2c_set_clientdata(cl, lp3943); return devm_mfd_add_devices(dev, -1, lp3943_devs, ARRAY_SIZE(lp3943_devs), NULL, 0, NULL); } static const struct i2c_device_id lp3943_ids[] = { { "lp3943", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, lp3943_ids); #ifdef CONFIG_OF static const struct of_device_id lp3943_of_match[] = { { .compatible = "ti,lp3943", }, { } }; MODULE_DEVICE_TABLE(of, lp3943_of_match); #endif static struct i2c_driver lp3943_driver = { .probe = lp3943_probe, .driver = { .name = "lp3943", .of_match_table = of_match_ptr(lp3943_of_match), }, .id_table = lp3943_ids, }; module_i2c_driver(lp3943_driver); MODULE_DESCRIPTION("LP3943 MFD Core Driver"); MODULE_AUTHOR("Milo Kim"); MODULE_LICENSE("GPL");
gpl-2.0
pacificIT/linux-2.6.36
arch/frv/kernel/time.c
1006
3509
/* time.c: FRV arch-specific time handling * * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * - Derived from arch/m68k/kernel/time.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/profile.h> #include <linux/irq.h> #include <linux/mm.h> #include <asm/io.h> #include <asm/timer-regs.h> #include <asm/mb-regs.h> #include <asm/mb86943a.h> #include <linux/timex.h> #define TICK_SIZE (tick_nsec / 1000) unsigned long __nongprelbss __clkin_clock_speed_HZ; unsigned long __nongprelbss __ext_bus_clock_speed_HZ; unsigned long __nongprelbss __res_bus_clock_speed_HZ; unsigned long __nongprelbss __sdram_clock_speed_HZ; unsigned long __nongprelbss __core_bus_clock_speed_HZ; unsigned long __nongprelbss __core_clock_speed_HZ; unsigned long __nongprelbss __dsu_clock_speed_HZ; unsigned long __nongprelbss __serial_clock_speed_HZ; unsigned long __delay_loops_MHz; static irqreturn_t timer_interrupt(int irq, void *dummy); static struct irqaction timer_irq = { .handler = timer_interrupt, .flags = IRQF_DISABLED, .name = "timer", }; /* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "do_timer()" routine every clocktick */ static irqreturn_t timer_interrupt(int irq, void *dummy) { profile_tick(CPU_PROFILING); /* * Here we are in the timer irq handler. We just have irqs locally * disabled but we don't know if the timer_bh is running on the other * CPU. We need to avoid to SMP race with it. NOTE: we don't need * the irq version of write_lock because as just said we have irq * locally disabled. -arca */ write_seqlock(&xtime_lock); do_timer(1); #ifdef CONFIG_HEARTBEAT static unsigned short n; n++; __set_LEDS(n); #endif /* CONFIG_HEARTBEAT */ write_sequnlock(&xtime_lock); update_process_times(user_mode(get_irq_regs())); return IRQ_HANDLED; } void time_divisor_init(void) { unsigned short base, pre, prediv; /* set the scheduling timer going */ pre = 1; prediv = 4; base = __res_bus_clock_speed_HZ / pre / HZ / (1 << prediv); __set_TPRV(pre); __set_TxCKSL_DATA(0, prediv); __set_TCTR(TCTR_SC_CTR0 | TCTR_RL_RW_LH8 | TCTR_MODE_2); __set_TCSR_DATA(0, base & 0xff); __set_TCSR_DATA(0, base >> 8); } void read_persistent_clock(struct timespec *ts) { unsigned int year, mon, day, hour, min, sec; extern void arch_gettod(int *year, int *mon, int *day, int *hour, int *min, int *sec); /* FIX by dqg : Set to zero for platforms that don't have tod */ /* without this time is undefined and can overflow time_t, causing */ /* very strange errors */ year = 1980; mon = day = 1; hour = min = sec = 0; arch_gettod (&year, &mon, &day, &hour, &min, &sec); if ((year += 1900) < 1970) year += 100; ts->tv_sec = mktime(year, mon, day, hour, min, sec); ts->tv_nsec = 0; } void time_init(void) { /* install scheduling interrupt handler */ setup_irq(IRQ_CPU_TIMER0, &timer_irq); time_divisor_init(); } /* * Scheduler clock - returns current time in nanosec units. */ unsigned long long sched_clock(void) { return jiffies_64 * (1000000000 / HZ); }
gpl-2.0
supertoast/kernel-2.6.35-series-U8650-U8510-M865-Gingerbread
fs/ncpfs/ncplib_kernel.c
1518
33057
/* * ncplib_kernel.c * * Copyright (C) 1995, 1996 by Volker Lendecke * Modified for big endian by J.F. Chadima and David S. Miller * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache * Modified 1999 Wolfram Pienkoss for NLS * Modified 2000 Ben Harris, University of Cambridge for NFS NS meta-info * */ #include "ncplib_kernel.h" static inline void assert_server_locked(struct ncp_server *server) { if (server->lock == 0) { DPRINTK("ncpfs: server not locked!\n"); } } static void ncp_add_byte(struct ncp_server *server, __u8 x) { assert_server_locked(server); *(__u8 *) (&(server->packet[server->current_size])) = x; server->current_size += 1; return; } static void ncp_add_word(struct ncp_server *server, __le16 x) { assert_server_locked(server); put_unaligned(x, (__le16 *) (&(server->packet[server->current_size]))); server->current_size += 2; return; } static void ncp_add_be16(struct ncp_server *server, __u16 x) { assert_server_locked(server); put_unaligned(cpu_to_be16(x), (__be16 *) (&(server->packet[server->current_size]))); server->current_size += 2; } static void ncp_add_dword(struct ncp_server *server, __le32 x) { assert_server_locked(server); put_unaligned(x, (__le32 *) (&(server->packet[server->current_size]))); server->current_size += 4; return; } static void ncp_add_be32(struct ncp_server *server, __u32 x) { assert_server_locked(server); put_unaligned(cpu_to_be32(x), (__be32 *)(&(server->packet[server->current_size]))); server->current_size += 4; } static inline void ncp_add_dword_lh(struct ncp_server *server, __u32 x) { ncp_add_dword(server, cpu_to_le32(x)); } static void ncp_add_mem(struct ncp_server *server, const void *source, int size) { assert_server_locked(server); memcpy(&(server->packet[server->current_size]), source, size); server->current_size += size; return; } static void ncp_add_pstring(struct ncp_server *server, const char *s) { int len = strlen(s); assert_server_locked(server); if (len > 255) { DPRINTK("ncpfs: string too long: %s\n", s); len = 255; } ncp_add_byte(server, len); ncp_add_mem(server, s, len); return; } static inline void ncp_init_request(struct ncp_server *server) { ncp_lock_server(server); server->current_size = sizeof(struct ncp_request_header); server->has_subfunction = 0; } static inline void ncp_init_request_s(struct ncp_server *server, int subfunction) { ncp_lock_server(server); server->current_size = sizeof(struct ncp_request_header) + 2; ncp_add_byte(server, subfunction); server->has_subfunction = 1; } static inline char * ncp_reply_data(struct ncp_server *server, int offset) { return &(server->packet[sizeof(struct ncp_reply_header) + offset]); } static inline u8 BVAL(void *data) { return *(u8 *)data; } static u8 ncp_reply_byte(struct ncp_server *server, int offset) { return *(u8 *)ncp_reply_data(server, offset); } static inline u16 WVAL_LH(void *data) { return get_unaligned_le16(data); } static u16 ncp_reply_le16(struct ncp_server *server, int offset) { return get_unaligned_le16(ncp_reply_data(server, offset)); } static u16 ncp_reply_be16(struct ncp_server *server, int offset) { return get_unaligned_be16(ncp_reply_data(server, offset)); } static inline u32 DVAL_LH(void *data) { return get_unaligned_le32(data); } static __le32 ncp_reply_dword(struct ncp_server *server, int offset) { return get_unaligned((__le32 *)ncp_reply_data(server, offset)); } static inline __u32 ncp_reply_dword_lh(struct ncp_server* server, int offset) { return le32_to_cpu(ncp_reply_dword(server, offset)); } int ncp_negotiate_buffersize(struct ncp_server *server, int size, int *target) { int result; ncp_init_request(server); ncp_add_be16(server, size); if ((result = ncp_request(server, 33)) != 0) { ncp_unlock_server(server); return result; } *target = min_t(unsigned int, ncp_reply_be16(server, 0), size); ncp_unlock_server(server); return 0; } /* options: * bit 0 ipx checksum * bit 1 packet signing */ int ncp_negotiate_size_and_options(struct ncp_server *server, int size, int options, int *ret_size, int *ret_options) { int result; /* there is minimum */ if (size < NCP_BLOCK_SIZE) size = NCP_BLOCK_SIZE; ncp_init_request(server); ncp_add_be16(server, size); ncp_add_byte(server, options); if ((result = ncp_request(server, 0x61)) != 0) { ncp_unlock_server(server); return result; } /* NCP over UDP returns 0 (!!!) */ result = ncp_reply_be16(server, 0); if (result >= NCP_BLOCK_SIZE) size = min(result, size); *ret_size = size; *ret_options = ncp_reply_byte(server, 4); ncp_unlock_server(server); return 0; } int ncp_get_volume_info_with_number(struct ncp_server* server, int n, struct ncp_volume_info* target) { int result; int len; ncp_init_request_s(server, 44); ncp_add_byte(server, n); if ((result = ncp_request(server, 22)) != 0) { goto out; } target->total_blocks = ncp_reply_dword_lh(server, 0); target->free_blocks = ncp_reply_dword_lh(server, 4); target->purgeable_blocks = ncp_reply_dword_lh(server, 8); target->not_yet_purgeable_blocks = ncp_reply_dword_lh(server, 12); target->total_dir_entries = ncp_reply_dword_lh(server, 16); target->available_dir_entries = ncp_reply_dword_lh(server, 20); target->sectors_per_block = ncp_reply_byte(server, 28); memset(&(target->volume_name), 0, sizeof(target->volume_name)); result = -EIO; len = ncp_reply_byte(server, 29); if (len > NCP_VOLNAME_LEN) { DPRINTK("ncpfs: volume name too long: %d\n", len); goto out; } memcpy(&(target->volume_name), ncp_reply_data(server, 30), len); result = 0; out: ncp_unlock_server(server); return result; } int ncp_get_directory_info(struct ncp_server* server, __u8 n, struct ncp_volume_info* target) { int result; int len; ncp_init_request_s(server, 45); ncp_add_byte(server, n); if ((result = ncp_request(server, 22)) != 0) { goto out; } target->total_blocks = ncp_reply_dword_lh(server, 0); target->free_blocks = ncp_reply_dword_lh(server, 4); target->purgeable_blocks = 0; target->not_yet_purgeable_blocks = 0; target->total_dir_entries = ncp_reply_dword_lh(server, 8); target->available_dir_entries = ncp_reply_dword_lh(server, 12); target->sectors_per_block = ncp_reply_byte(server, 20); memset(&(target->volume_name), 0, sizeof(target->volume_name)); result = -EIO; len = ncp_reply_byte(server, 21); if (len > NCP_VOLNAME_LEN) { DPRINTK("ncpfs: volume name too long: %d\n", len); goto out; } memcpy(&(target->volume_name), ncp_reply_data(server, 22), len); result = 0; out: ncp_unlock_server(server); return result; } int ncp_close_file(struct ncp_server *server, const char *file_id) { int result; ncp_init_request(server); ncp_add_byte(server, 0); ncp_add_mem(server, file_id, 6); result = ncp_request(server, 66); ncp_unlock_server(server); return result; } int ncp_make_closed(struct inode *inode) { int err; err = 0; mutex_lock(&NCP_FINFO(inode)->open_mutex); if (atomic_read(&NCP_FINFO(inode)->opened) == 1) { atomic_set(&NCP_FINFO(inode)->opened, 0); err = ncp_close_file(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle); if (!err) PPRINTK("ncp_make_closed: volnum=%d, dirent=%u, error=%d\n", NCP_FINFO(inode)->volNumber, NCP_FINFO(inode)->dirEntNum, err); } mutex_unlock(&NCP_FINFO(inode)->open_mutex); return err; } static void ncp_add_handle_path(struct ncp_server *server, __u8 vol_num, __le32 dir_base, int have_dir_base, const char *path) { ncp_add_byte(server, vol_num); ncp_add_dword(server, dir_base); if (have_dir_base != 0) { ncp_add_byte(server, 1); /* dir_base */ } else { ncp_add_byte(server, 0xff); /* no handle */ } if (path != NULL) { ncp_add_byte(server, 1); /* 1 component */ ncp_add_pstring(server, path); } else { ncp_add_byte(server, 0); } } int ncp_dirhandle_alloc(struct ncp_server* server, __u8 volnum, __le32 dirent, __u8* dirhandle) { int result; ncp_init_request(server); ncp_add_byte(server, 12); /* subfunction */ ncp_add_byte(server, NW_NS_DOS); ncp_add_byte(server, 0); ncp_add_word(server, 0); ncp_add_handle_path(server, volnum, dirent, 1, NULL); if ((result = ncp_request(server, 87)) == 0) { *dirhandle = ncp_reply_byte(server, 0); } ncp_unlock_server(server); return result; } int ncp_dirhandle_free(struct ncp_server* server, __u8 dirhandle) { int result; ncp_init_request_s(server, 20); ncp_add_byte(server, dirhandle); result = ncp_request(server, 22); ncp_unlock_server(server); return result; } void ncp_extract_file_info(void *structure, struct nw_info_struct *target) { __u8 *name_len; const int info_struct_size = offsetof(struct nw_info_struct, nameLen); memcpy(target, structure, info_struct_size); name_len = structure + info_struct_size; target->nameLen = *name_len; memcpy(target->entryName, name_len + 1, *name_len); target->entryName[*name_len] = '\0'; target->volNumber = le32_to_cpu(target->volNumber); return; } #ifdef CONFIG_NCPFS_NFS_NS static inline void ncp_extract_nfs_info(unsigned char *structure, struct nw_nfs_info *target) { target->mode = DVAL_LH(structure); target->rdev = DVAL_LH(structure + 8); } #endif int ncp_obtain_nfs_info(struct ncp_server *server, struct nw_info_struct *target) { int result = 0; #ifdef CONFIG_NCPFS_NFS_NS __u32 volnum = target->volNumber; if (ncp_is_nfs_extras(server, volnum)) { ncp_init_request(server); ncp_add_byte(server, 19); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, NW_NS_NFS); ncp_add_byte(server, 0); ncp_add_byte(server, volnum); ncp_add_dword(server, target->dirEntNum); /* We must retrieve both nlinks and rdev, otherwise some server versions report zeroes instead of valid data */ ncp_add_dword_lh(server, NSIBM_NFS_MODE | NSIBM_NFS_NLINKS | NSIBM_NFS_RDEV); if ((result = ncp_request(server, 87)) == 0) { ncp_extract_nfs_info(ncp_reply_data(server, 0), &target->nfs); DPRINTK(KERN_DEBUG "ncp_obtain_nfs_info: (%s) mode=0%o, rdev=0x%x\n", target->entryName, target->nfs.mode, target->nfs.rdev); } else { target->nfs.mode = 0; target->nfs.rdev = 0; } ncp_unlock_server(server); } else #endif { target->nfs.mode = 0; target->nfs.rdev = 0; } return result; } /* * Returns information for a (one-component) name relative to * the specified directory. */ int ncp_obtain_info(struct ncp_server *server, struct inode *dir, char *path, struct nw_info_struct *target) { __u8 volnum = NCP_FINFO(dir)->volNumber; __le32 dirent = NCP_FINFO(dir)->dirEntNum; int result; if (target == NULL) { printk(KERN_ERR "ncp_obtain_info: invalid call\n"); return -EINVAL; } ncp_init_request(server); ncp_add_byte(server, 6); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, server->name_space[volnum]); /* N.B. twice ?? */ ncp_add_word(server, cpu_to_le16(0x8006)); /* get all */ ncp_add_dword(server, RIM_ALL); ncp_add_handle_path(server, volnum, dirent, 1, path); if ((result = ncp_request(server, 87)) != 0) goto out; ncp_extract_file_info(ncp_reply_data(server, 0), target); ncp_unlock_server(server); result = ncp_obtain_nfs_info(server, target); return result; out: ncp_unlock_server(server); return result; } #ifdef CONFIG_NCPFS_NFS_NS static int ncp_obtain_DOS_dir_base(struct ncp_server *server, __u8 volnum, __le32 dirent, char *path, /* At most 1 component */ __le32 *DOS_dir_base) { int result; ncp_init_request(server); ncp_add_byte(server, 6); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, server->name_space[volnum]); ncp_add_word(server, cpu_to_le16(0x8006)); /* get all */ ncp_add_dword(server, RIM_DIRECTORY); ncp_add_handle_path(server, volnum, dirent, 1, path); if ((result = ncp_request(server, 87)) == 0) { if (DOS_dir_base) *DOS_dir_base=ncp_reply_dword(server, 0x34); } ncp_unlock_server(server); return result; } #endif /* CONFIG_NCPFS_NFS_NS */ static inline int ncp_get_known_namespace(struct ncp_server *server, __u8 volume) { #if defined(CONFIG_NCPFS_OS2_NS) || defined(CONFIG_NCPFS_NFS_NS) int result; __u8 *namespace; __u16 no_namespaces; ncp_init_request(server); ncp_add_byte(server, 24); /* Subfunction: Get Name Spaces Loaded */ ncp_add_word(server, 0); ncp_add_byte(server, volume); if ((result = ncp_request(server, 87)) != 0) { ncp_unlock_server(server); return NW_NS_DOS; /* not result ?? */ } result = NW_NS_DOS; no_namespaces = ncp_reply_le16(server, 0); namespace = ncp_reply_data(server, 2); while (no_namespaces > 0) { DPRINTK("get_namespaces: found %d on %d\n", *namespace, volume); #ifdef CONFIG_NCPFS_NFS_NS if ((*namespace == NW_NS_NFS) && !(server->m.flags&NCP_MOUNT_NO_NFS)) { result = NW_NS_NFS; break; } #endif /* CONFIG_NCPFS_NFS_NS */ #ifdef CONFIG_NCPFS_OS2_NS if ((*namespace == NW_NS_OS2) && !(server->m.flags&NCP_MOUNT_NO_OS2)) { result = NW_NS_OS2; } #endif /* CONFIG_NCPFS_OS2_NS */ namespace += 1; no_namespaces -= 1; } ncp_unlock_server(server); return result; #else /* neither OS2 nor NFS - only DOS */ return NW_NS_DOS; #endif /* defined(CONFIG_NCPFS_OS2_NS) || defined(CONFIG_NCPFS_NFS_NS) */ } static int ncp_ObtainSpecificDirBase(struct ncp_server *server, __u8 nsSrc, __u8 nsDst, __u8 vol_num, __le32 dir_base, char *path, /* At most 1 component */ __le32 *dirEntNum, __le32 *DosDirNum) { int result; ncp_init_request(server); ncp_add_byte(server, 6); /* subfunction */ ncp_add_byte(server, nsSrc); ncp_add_byte(server, nsDst); ncp_add_word(server, cpu_to_le16(0x8006)); /* get all */ ncp_add_dword(server, RIM_ALL); ncp_add_handle_path(server, vol_num, dir_base, 1, path); if ((result = ncp_request(server, 87)) != 0) { ncp_unlock_server(server); return result; } if (dirEntNum) *dirEntNum = ncp_reply_dword(server, 0x30); if (DosDirNum) *DosDirNum = ncp_reply_dword(server, 0x34); ncp_unlock_server(server); return 0; } int ncp_mount_subdir(struct ncp_server *server, __u8 volNumber, __u8 srcNS, __le32 dirEntNum, __u32* volume, __le32* newDirEnt, __le32* newDosEnt) { int dstNS; int result; dstNS = ncp_get_known_namespace(server, volNumber); if ((result = ncp_ObtainSpecificDirBase(server, srcNS, dstNS, volNumber, dirEntNum, NULL, newDirEnt, newDosEnt)) != 0) { return result; } server->name_space[volNumber] = dstNS; *volume = volNumber; server->m.mounted_vol[1] = 0; server->m.mounted_vol[0] = 'X'; return 0; } int ncp_get_volume_root(struct ncp_server *server, const char *volname, __u32* volume, __le32* dirent, __le32* dosdirent) { int result; __u8 volnum; DPRINTK("ncp_get_volume_root: looking up vol %s\n", volname); ncp_init_request(server); ncp_add_byte(server, 22); /* Subfunction: Generate dir handle */ ncp_add_byte(server, 0); /* DOS namespace */ ncp_add_byte(server, 0); /* reserved */ ncp_add_byte(server, 0); /* reserved */ ncp_add_byte(server, 0); /* reserved */ ncp_add_byte(server, 0); /* faked volume number */ ncp_add_dword(server, 0); /* faked dir_base */ ncp_add_byte(server, 0xff); /* Don't have a dir_base */ ncp_add_byte(server, 1); /* 1 path component */ ncp_add_pstring(server, volname); if ((result = ncp_request(server, 87)) != 0) { ncp_unlock_server(server); return result; } *dirent = *dosdirent = ncp_reply_dword(server, 4); volnum = ncp_reply_byte(server, 8); ncp_unlock_server(server); *volume = volnum; server->name_space[volnum] = ncp_get_known_namespace(server, volnum); DPRINTK("lookup_vol: namespace[%d] = %d\n", volnum, server->name_space[volnum]); return 0; } int ncp_lookup_volume(struct ncp_server *server, const char *volname, struct nw_info_struct *target) { int result; memset(target, 0, sizeof(*target)); result = ncp_get_volume_root(server, volname, &target->volNumber, &target->dirEntNum, &target->DosDirNum); if (result) { return result; } target->nameLen = strlen(volname); memcpy(target->entryName, volname, target->nameLen+1); target->attributes = aDIR; /* set dates to Jan 1, 1986 00:00 */ target->creationTime = target->modifyTime = cpu_to_le16(0x0000); target->creationDate = target->modifyDate = target->lastAccessDate = cpu_to_le16(0x0C21); target->nfs.mode = 0; return 0; } int ncp_modify_file_or_subdir_dos_info_path(struct ncp_server *server, struct inode *dir, const char *path, __le32 info_mask, const struct nw_modify_dos_info *info) { __u8 volnum = NCP_FINFO(dir)->volNumber; __le32 dirent = NCP_FINFO(dir)->dirEntNum; int result; ncp_init_request(server); ncp_add_byte(server, 7); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, 0); /* reserved */ ncp_add_word(server, cpu_to_le16(0x8006)); /* search attribs: all */ ncp_add_dword(server, info_mask); ncp_add_mem(server, info, sizeof(*info)); ncp_add_handle_path(server, volnum, dirent, 1, path); result = ncp_request(server, 87); ncp_unlock_server(server); return result; } int ncp_modify_file_or_subdir_dos_info(struct ncp_server *server, struct inode *dir, __le32 info_mask, const struct nw_modify_dos_info *info) { return ncp_modify_file_or_subdir_dos_info_path(server, dir, NULL, info_mask, info); } #ifdef CONFIG_NCPFS_NFS_NS int ncp_modify_nfs_info(struct ncp_server *server, __u8 volnum, __le32 dirent, __u32 mode, __u32 rdev) { int result = 0; if (server->name_space[volnum] == NW_NS_NFS) { ncp_init_request(server); ncp_add_byte(server, 25); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, NW_NS_NFS); ncp_add_byte(server, volnum); ncp_add_dword(server, dirent); /* we must always operate on both nlinks and rdev, otherwise rdev is not set */ ncp_add_dword_lh(server, NSIBM_NFS_MODE | NSIBM_NFS_NLINKS | NSIBM_NFS_RDEV); ncp_add_dword_lh(server, mode); ncp_add_dword_lh(server, 1); /* nlinks */ ncp_add_dword_lh(server, rdev); result = ncp_request(server, 87); ncp_unlock_server(server); } return result; } #endif static int ncp_DeleteNSEntry(struct ncp_server *server, __u8 have_dir_base, __u8 volnum, __le32 dirent, char* name, __u8 ns, __le16 attr) { int result; ncp_init_request(server); ncp_add_byte(server, 8); /* subfunction */ ncp_add_byte(server, ns); ncp_add_byte(server, 0); /* reserved */ ncp_add_word(server, attr); /* search attribs: all */ ncp_add_handle_path(server, volnum, dirent, have_dir_base, name); result = ncp_request(server, 87); ncp_unlock_server(server); return result; } int ncp_del_file_or_subdir2(struct ncp_server *server, struct dentry *dentry) { struct inode *inode = dentry->d_inode; __u8 volnum; __le32 dirent; if (!inode) { return 0xFF; /* Any error */ } volnum = NCP_FINFO(inode)->volNumber; dirent = NCP_FINFO(inode)->DosDirNum; return ncp_DeleteNSEntry(server, 1, volnum, dirent, NULL, NW_NS_DOS, cpu_to_le16(0x8006)); } int ncp_del_file_or_subdir(struct ncp_server *server, struct inode *dir, char *name) { __u8 volnum = NCP_FINFO(dir)->volNumber; __le32 dirent = NCP_FINFO(dir)->dirEntNum; #ifdef CONFIG_NCPFS_NFS_NS if (server->name_space[volnum]==NW_NS_NFS) { int result; result=ncp_obtain_DOS_dir_base(server, volnum, dirent, name, &dirent); if (result) return result; return ncp_DeleteNSEntry(server, 1, volnum, dirent, NULL, NW_NS_DOS, cpu_to_le16(0x8006)); } else #endif /* CONFIG_NCPFS_NFS_NS */ return ncp_DeleteNSEntry(server, 1, volnum, dirent, name, server->name_space[volnum], cpu_to_le16(0x8006)); } static inline void ConvertToNWfromDWORD(__u16 v0, __u16 v1, __u8 ret[6]) { __le16 *dest = (__le16 *) ret; dest[1] = cpu_to_le16(v0); dest[2] = cpu_to_le16(v1); dest[0] = cpu_to_le16(v0 + 1); return; } /* If both dir and name are NULL, then in target there's already a looked-up entry that wants to be opened. */ int ncp_open_create_file_or_subdir(struct ncp_server *server, struct inode *dir, char *name, int open_create_mode, __le32 create_attributes, __le16 desired_acc_rights, struct ncp_entry_info *target) { __le16 search_attribs = cpu_to_le16(0x0006); __u8 volnum; __le32 dirent; int result; volnum = NCP_FINFO(dir)->volNumber; dirent = NCP_FINFO(dir)->dirEntNum; if ((create_attributes & aDIR) != 0) { search_attribs |= cpu_to_le16(0x8000); } ncp_init_request(server); ncp_add_byte(server, 1); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, open_create_mode); ncp_add_word(server, search_attribs); ncp_add_dword(server, RIM_ALL); ncp_add_dword(server, create_attributes); /* The desired acc rights seem to be the inherited rights mask for directories */ ncp_add_word(server, desired_acc_rights); ncp_add_handle_path(server, volnum, dirent, 1, name); if ((result = ncp_request(server, 87)) != 0) goto out; if (!(create_attributes & aDIR)) target->opened = 1; /* in target there's a new finfo to fill */ ncp_extract_file_info(ncp_reply_data(server, 6), &(target->i)); target->volume = target->i.volNumber; ConvertToNWfromDWORD(ncp_reply_le16(server, 0), ncp_reply_le16(server, 2), target->file_handle); ncp_unlock_server(server); (void)ncp_obtain_nfs_info(server, &(target->i)); return 0; out: ncp_unlock_server(server); return result; } int ncp_initialize_search(struct ncp_server *server, struct inode *dir, struct nw_search_sequence *target) { __u8 volnum = NCP_FINFO(dir)->volNumber; __le32 dirent = NCP_FINFO(dir)->dirEntNum; int result; ncp_init_request(server); ncp_add_byte(server, 2); /* subfunction */ ncp_add_byte(server, server->name_space[volnum]); ncp_add_byte(server, 0); /* reserved */ ncp_add_handle_path(server, volnum, dirent, 1, NULL); result = ncp_request(server, 87); if (result) goto out; memcpy(target, ncp_reply_data(server, 0), sizeof(*target)); out: ncp_unlock_server(server); return result; } int ncp_search_for_fileset(struct ncp_server *server, struct nw_search_sequence *seq, int* more, int* cnt, char* buffer, size_t bufsize, char** rbuf, size_t* rsize) { int result; ncp_init_request(server); ncp_add_byte(server, 20); ncp_add_byte(server, server->name_space[seq->volNumber]); ncp_add_byte(server, 0); /* datastream */ ncp_add_word(server, cpu_to_le16(0x8006)); ncp_add_dword(server, RIM_ALL); ncp_add_word(server, cpu_to_le16(32767)); /* max returned items */ ncp_add_mem(server, seq, 9); #ifdef CONFIG_NCPFS_NFS_NS if (server->name_space[seq->volNumber] == NW_NS_NFS) { ncp_add_byte(server, 0); /* 0 byte pattern */ } else #endif { ncp_add_byte(server, 2); /* 2 byte pattern */ ncp_add_byte(server, 0xff); /* following is a wildcard */ ncp_add_byte(server, '*'); } result = ncp_request2(server, 87, buffer, bufsize); if (result) { ncp_unlock_server(server); return result; } if (server->ncp_reply_size < 12) { ncp_unlock_server(server); return 0xFF; } *rsize = server->ncp_reply_size - 12; ncp_unlock_server(server); buffer = buffer + sizeof(struct ncp_reply_header); *rbuf = buffer + 12; *cnt = WVAL_LH(buffer + 10); *more = BVAL(buffer + 9); memcpy(seq, buffer, 9); return 0; } static int ncp_RenameNSEntry(struct ncp_server *server, struct inode *old_dir, char *old_name, __le16 old_type, struct inode *new_dir, char *new_name) { int result = -EINVAL; if ((old_dir == NULL) || (old_name == NULL) || (new_dir == NULL) || (new_name == NULL)) goto out; ncp_init_request(server); ncp_add_byte(server, 4); /* subfunction */ ncp_add_byte(server, server->name_space[NCP_FINFO(old_dir)->volNumber]); ncp_add_byte(server, 1); /* rename flag */ ncp_add_word(server, old_type); /* search attributes */ /* source Handle Path */ ncp_add_byte(server, NCP_FINFO(old_dir)->volNumber); ncp_add_dword(server, NCP_FINFO(old_dir)->dirEntNum); ncp_add_byte(server, 1); ncp_add_byte(server, 1); /* 1 source component */ /* dest Handle Path */ ncp_add_byte(server, NCP_FINFO(new_dir)->volNumber); ncp_add_dword(server, NCP_FINFO(new_dir)->dirEntNum); ncp_add_byte(server, 1); ncp_add_byte(server, 1); /* 1 destination component */ /* source path string */ ncp_add_pstring(server, old_name); /* dest path string */ ncp_add_pstring(server, new_name); result = ncp_request(server, 87); ncp_unlock_server(server); out: return result; } int ncp_ren_or_mov_file_or_subdir(struct ncp_server *server, struct inode *old_dir, char *old_name, struct inode *new_dir, char *new_name) { int result; __le16 old_type = cpu_to_le16(0x06); /* If somebody can do it atomic, call me... vandrove@vc.cvut.cz */ result = ncp_RenameNSEntry(server, old_dir, old_name, old_type, new_dir, new_name); if (result == 0xFF) /* File Not Found, try directory */ { old_type = cpu_to_le16(0x16); result = ncp_RenameNSEntry(server, old_dir, old_name, old_type, new_dir, new_name); } if (result != 0x92) return result; /* All except NO_FILES_RENAMED */ result = ncp_del_file_or_subdir(server, new_dir, new_name); if (result != 0) return -EACCES; result = ncp_RenameNSEntry(server, old_dir, old_name, old_type, new_dir, new_name); return result; } /* We have to transfer to/from user space */ int ncp_read_kernel(struct ncp_server *server, const char *file_id, __u32 offset, __u16 to_read, char *target, int *bytes_read) { char *source; int result; ncp_init_request(server); ncp_add_byte(server, 0); ncp_add_mem(server, file_id, 6); ncp_add_be32(server, offset); ncp_add_be16(server, to_read); if ((result = ncp_request(server, 72)) != 0) { goto out; } *bytes_read = ncp_reply_be16(server, 0); source = ncp_reply_data(server, 2 + (offset & 1)); memcpy(target, source, *bytes_read); out: ncp_unlock_server(server); return result; } /* There is a problem... egrep and some other silly tools do: x = mmap(NULL, MAP_PRIVATE, PROT_READ|PROT_WRITE, <ncpfs fd>, 32768); read(<ncpfs fd>, x, 32768); Now copying read result by copy_to_user causes pagefault. This pagefault could not be handled because of server was locked due to read. So we have to use temporary buffer. So ncp_unlock_server must be done before copy_to_user (and for write, copy_from_user must be done before ncp_init_request... same applies for send raw packet ioctl). Because of file is normally read in bigger chunks, caller provides kmalloced (vmalloced) chunk of memory with size >= to_read... */ int ncp_read_bounce(struct ncp_server *server, const char *file_id, __u32 offset, __u16 to_read, char __user *target, int *bytes_read, void* bounce, __u32 bufsize) { int result; ncp_init_request(server); ncp_add_byte(server, 0); ncp_add_mem(server, file_id, 6); ncp_add_be32(server, offset); ncp_add_be16(server, to_read); result = ncp_request2(server, 72, bounce, bufsize); ncp_unlock_server(server); if (!result) { int len = get_unaligned_be16((char *)bounce + sizeof(struct ncp_reply_header)); result = -EIO; if (len <= to_read) { char* source; source = (char*)bounce + sizeof(struct ncp_reply_header) + 2 + (offset & 1); *bytes_read = len; result = 0; if (copy_to_user(target, source, len)) result = -EFAULT; } } return result; } int ncp_write_kernel(struct ncp_server *server, const char *file_id, __u32 offset, __u16 to_write, const char *source, int *bytes_written) { int result; ncp_init_request(server); ncp_add_byte(server, 0); ncp_add_mem(server, file_id, 6); ncp_add_be32(server, offset); ncp_add_be16(server, to_write); ncp_add_mem(server, source, to_write); if ((result = ncp_request(server, 73)) == 0) *bytes_written = to_write; ncp_unlock_server(server); return result; } #ifdef CONFIG_NCPFS_IOCTL_LOCKING int ncp_LogPhysicalRecord(struct ncp_server *server, const char *file_id, __u8 locktype, __u32 offset, __u32 length, __u16 timeout) { int result; ncp_init_request(server); ncp_add_byte(server, locktype); ncp_add_mem(server, file_id, 6); ncp_add_be32(server, offset); ncp_add_be32(server, length); ncp_add_be16(server, timeout); if ((result = ncp_request(server, 0x1A)) != 0) { ncp_unlock_server(server); return result; } ncp_unlock_server(server); return 0; } int ncp_ClearPhysicalRecord(struct ncp_server *server, const char *file_id, __u32 offset, __u32 length) { int result; ncp_init_request(server); ncp_add_byte(server, 0); /* who knows... lanalyzer says that */ ncp_add_mem(server, file_id, 6); ncp_add_be32(server, offset); ncp_add_be32(server, length); if ((result = ncp_request(server, 0x1E)) != 0) { ncp_unlock_server(server); return result; } ncp_unlock_server(server); return 0; } #endif /* CONFIG_NCPFS_IOCTL_LOCKING */ #ifdef CONFIG_NCPFS_NLS /* This are the NLS conversion routines with inspirations and code parts * from the vfat file system and hints from Petr Vandrovec. */ int ncp__io2vol(struct ncp_server *server, unsigned char *vname, unsigned int *vlen, const unsigned char *iname, unsigned int ilen, int cc) { struct nls_table *in = server->nls_io; struct nls_table *out = server->nls_vol; unsigned char *vname_start; unsigned char *vname_end; const unsigned char *iname_end; iname_end = iname + ilen; vname_start = vname; vname_end = vname + *vlen - 1; while (iname < iname_end) { int chl; wchar_t ec; if (NCP_IS_FLAG(server, NCP_FLAG_UTF8)) { int k; unicode_t u; k = utf8_to_utf32(iname, iname_end - iname, &u); if (k < 0 || u > MAX_WCHAR_T) return -EINVAL; iname += k; ec = u; } else { if (*iname == NCP_ESC) { int k; if (iname_end - iname < 5) goto nospec; ec = 0; for (k = 1; k < 5; k++) { unsigned char nc; nc = iname[k] - '0'; if (nc >= 10) { nc -= 'A' - '0' - 10; if ((nc < 10) || (nc > 15)) { goto nospec; } } ec = (ec << 4) | nc; } iname += 5; } else { nospec:; if ( (chl = in->char2uni(iname, iname_end - iname, &ec)) < 0) return chl; iname += chl; } } /* unitoupper should be here! */ chl = out->uni2char(ec, vname, vname_end - vname); if (chl < 0) return chl; /* this is wrong... */ if (cc) { int chi; for (chi = 0; chi < chl; chi++){ vname[chi] = ncp_toupper(out, vname[chi]); } } vname += chl; } *vname = 0; *vlen = vname - vname_start; return 0; } int ncp__vol2io(struct ncp_server *server, unsigned char *iname, unsigned int *ilen, const unsigned char *vname, unsigned int vlen, int cc) { struct nls_table *in = server->nls_vol; struct nls_table *out = server->nls_io; const unsigned char *vname_end; unsigned char *iname_start; unsigned char *iname_end; unsigned char *vname_cc; int err; vname_cc = NULL; if (cc) { int i; /* this is wrong! */ vname_cc = kmalloc(vlen, GFP_KERNEL); if (!vname_cc) return -ENOMEM; for (i = 0; i < vlen; i++) vname_cc[i] = ncp_tolower(in, vname[i]); vname = vname_cc; } iname_start = iname; iname_end = iname + *ilen - 1; vname_end = vname + vlen; while (vname < vname_end) { wchar_t ec; int chl; if ( (chl = in->char2uni(vname, vname_end - vname, &ec)) < 0) { err = chl; goto quit; } vname += chl; /* unitolower should be here! */ if (NCP_IS_FLAG(server, NCP_FLAG_UTF8)) { int k; k = utf32_to_utf8(ec, iname, iname_end - iname); if (k < 0) { err = -ENAMETOOLONG; goto quit; } iname += k; } else { if ( (chl = out->uni2char(ec, iname, iname_end - iname)) >= 0) { iname += chl; } else { int k; if (iname_end - iname < 5) { err = -ENAMETOOLONG; goto quit; } *iname = NCP_ESC; for (k = 4; k > 0; k--) { unsigned char v; v = (ec & 0xF) + '0'; if (v > '9') { v += 'A' - '9' - 1; } iname[k] = v; ec >>= 4; } iname += 5; } } } *iname = 0; *ilen = iname - iname_start; err = 0; quit:; if (cc) kfree(vname_cc); return err; } #else int ncp__io2vol(unsigned char *vname, unsigned int *vlen, const unsigned char *iname, unsigned int ilen, int cc) { int i; if (*vlen <= ilen) return -ENAMETOOLONG; if (cc) for (i = 0; i < ilen; i++) { *vname = toupper(*iname); vname++; iname++; } else { memmove(vname, iname, ilen); vname += ilen; } *vlen = ilen; *vname = 0; return 0; } int ncp__vol2io(unsigned char *iname, unsigned int *ilen, const unsigned char *vname, unsigned int vlen, int cc) { int i; if (*ilen <= vlen) return -ENAMETOOLONG; if (cc) for (i = 0; i < vlen; i++) { *iname = tolower(*vname); iname++; vname++; } else { memmove(iname, vname, vlen); iname += vlen; } *ilen = vlen; *iname = 0; return 0; } #endif
gpl-2.0
beidl/phablet_kernel_samsung_tuna
arch/arm/mach-shmobile/setup-sh7372.c
2286
19036
/* * sh7372 processor support * * Copyright (C) 2010 Magnus Damm * Copyright (C) 2008 Yoshihiro Shimoda * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/uio_driver.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/io.h> #include <linux/serial_sci.h> #include <linux/sh_dma.h> #include <linux/sh_intc.h> #include <linux/sh_timer.h> #include <mach/hardware.h> #include <mach/sh7372.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> /* SCIFA0 */ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xe6c40000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { evt2irq(0x0c00), evt2irq(0x0c00), evt2irq(0x0c00), evt2irq(0x0c00) }, }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .dev = { .platform_data = &scif0_platform_data, }, }; /* SCIFA1 */ static struct plat_sci_port scif1_platform_data = { .mapbase = 0xe6c50000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { evt2irq(0x0c20), evt2irq(0x0c20), evt2irq(0x0c20), evt2irq(0x0c20) }, }; static struct platform_device scif1_device = { .name = "sh-sci", .id = 1, .dev = { .platform_data = &scif1_platform_data, }, }; /* SCIFA2 */ static struct plat_sci_port scif2_platform_data = { .mapbase = 0xe6c60000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { evt2irq(0x0c40), evt2irq(0x0c40), evt2irq(0x0c40), evt2irq(0x0c40) }, }; static struct platform_device scif2_device = { .name = "sh-sci", .id = 2, .dev = { .platform_data = &scif2_platform_data, }, }; /* SCIFA3 */ static struct plat_sci_port scif3_platform_data = { .mapbase = 0xe6c70000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { evt2irq(0x0c60), evt2irq(0x0c60), evt2irq(0x0c60), evt2irq(0x0c60) }, }; static struct platform_device scif3_device = { .name = "sh-sci", .id = 3, .dev = { .platform_data = &scif3_platform_data, }, }; /* SCIFA4 */ static struct plat_sci_port scif4_platform_data = { .mapbase = 0xe6c80000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { evt2irq(0x0d20), evt2irq(0x0d20), evt2irq(0x0d20), evt2irq(0x0d20) }, }; static struct platform_device scif4_device = { .name = "sh-sci", .id = 4, .dev = { .platform_data = &scif4_platform_data, }, }; /* SCIFA5 */ static struct plat_sci_port scif5_platform_data = { .mapbase = 0xe6cb0000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { evt2irq(0x0d40), evt2irq(0x0d40), evt2irq(0x0d40), evt2irq(0x0d40) }, }; static struct platform_device scif5_device = { .name = "sh-sci", .id = 5, .dev = { .platform_data = &scif5_platform_data, }, }; /* SCIFB */ static struct plat_sci_port scif6_platform_data = { .mapbase = 0xe6c30000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFB, .irqs = { evt2irq(0x0d60), evt2irq(0x0d60), evt2irq(0x0d60), evt2irq(0x0d60) }, }; static struct platform_device scif6_device = { .name = "sh-sci", .id = 6, .dev = { .platform_data = &scif6_platform_data, }, }; /* CMT */ static struct sh_timer_config cmt10_platform_data = { .name = "CMT10", .channel_offset = 0x10, .timer_bit = 0, .clockevent_rating = 125, .clocksource_rating = 125, }; static struct resource cmt10_resources[] = { [0] = { .name = "CMT10", .start = 0xe6138010, .end = 0xe613801b, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x0b00), /* CMT1_CMT10 */ .flags = IORESOURCE_IRQ, }, }; static struct platform_device cmt10_device = { .name = "sh_cmt", .id = 10, .dev = { .platform_data = &cmt10_platform_data, }, .resource = cmt10_resources, .num_resources = ARRAY_SIZE(cmt10_resources), }; /* TMU */ static struct sh_timer_config tmu00_platform_data = { .name = "TMU00", .channel_offset = 0x4, .timer_bit = 0, .clockevent_rating = 200, }; static struct resource tmu00_resources[] = { [0] = { .name = "TMU00", .start = 0xfff60008, .end = 0xfff60013, .flags = IORESOURCE_MEM, }, [1] = { .start = intcs_evt2irq(0xe80), /* TMU_TUNI0 */ .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu00_device = { .name = "sh_tmu", .id = 0, .dev = { .platform_data = &tmu00_platform_data, }, .resource = tmu00_resources, .num_resources = ARRAY_SIZE(tmu00_resources), }; static struct sh_timer_config tmu01_platform_data = { .name = "TMU01", .channel_offset = 0x10, .timer_bit = 1, .clocksource_rating = 200, }; static struct resource tmu01_resources[] = { [0] = { .name = "TMU01", .start = 0xfff60014, .end = 0xfff6001f, .flags = IORESOURCE_MEM, }, [1] = { .start = intcs_evt2irq(0xea0), /* TMU_TUNI1 */ .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu01_device = { .name = "sh_tmu", .id = 1, .dev = { .platform_data = &tmu01_platform_data, }, .resource = tmu01_resources, .num_resources = ARRAY_SIZE(tmu01_resources), }; /* I2C */ static struct resource iic0_resources[] = { [0] = { .name = "IIC0", .start = 0xFFF20000, .end = 0xFFF20425 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = intcs_evt2irq(0xe00), /* IIC0_ALI0 */ .end = intcs_evt2irq(0xe60), /* IIC0_DTEI0 */ .flags = IORESOURCE_IRQ, }, }; static struct platform_device iic0_device = { .name = "i2c-sh_mobile", .id = 0, /* "i2c0" clock */ .num_resources = ARRAY_SIZE(iic0_resources), .resource = iic0_resources, }; static struct resource iic1_resources[] = { [0] = { .name = "IIC1", .start = 0xE6C20000, .end = 0xE6C20425 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x780), /* IIC1_ALI1 */ .end = evt2irq(0x7e0), /* IIC1_DTEI1 */ .flags = IORESOURCE_IRQ, }, }; static struct platform_device iic1_device = { .name = "i2c-sh_mobile", .id = 1, /* "i2c1" clock */ .num_resources = ARRAY_SIZE(iic1_resources), .resource = iic1_resources, }; /* DMA */ /* Transmit sizes and respective CHCR register values */ enum { XMIT_SZ_8BIT = 0, XMIT_SZ_16BIT = 1, XMIT_SZ_32BIT = 2, XMIT_SZ_64BIT = 7, XMIT_SZ_128BIT = 3, XMIT_SZ_256BIT = 4, XMIT_SZ_512BIT = 5, }; /* log2(size / 8) - used to calculate number of transfers */ #define TS_SHIFT { \ [XMIT_SZ_8BIT] = 0, \ [XMIT_SZ_16BIT] = 1, \ [XMIT_SZ_32BIT] = 2, \ [XMIT_SZ_64BIT] = 3, \ [XMIT_SZ_128BIT] = 4, \ [XMIT_SZ_256BIT] = 5, \ [XMIT_SZ_512BIT] = 6, \ } #define TS_INDEX2VAL(i) ((((i) & 3) << 3) | \ (((i) & 0xc) << (20 - 2))) static const struct sh_dmae_slave_config sh7372_dmae_slaves[] = { { .slave_id = SHDMA_SLAVE_SCIF0_TX, .addr = 0xe6c40020, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x21, }, { .slave_id = SHDMA_SLAVE_SCIF0_RX, .addr = 0xe6c40024, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x22, }, { .slave_id = SHDMA_SLAVE_SCIF1_TX, .addr = 0xe6c50020, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x25, }, { .slave_id = SHDMA_SLAVE_SCIF1_RX, .addr = 0xe6c50024, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x26, }, { .slave_id = SHDMA_SLAVE_SCIF2_TX, .addr = 0xe6c60020, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x29, }, { .slave_id = SHDMA_SLAVE_SCIF2_RX, .addr = 0xe6c60024, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x2a, }, { .slave_id = SHDMA_SLAVE_SCIF3_TX, .addr = 0xe6c70020, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x2d, }, { .slave_id = SHDMA_SLAVE_SCIF3_RX, .addr = 0xe6c70024, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x2e, }, { .slave_id = SHDMA_SLAVE_SCIF4_TX, .addr = 0xe6c80020, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x39, }, { .slave_id = SHDMA_SLAVE_SCIF4_RX, .addr = 0xe6c80024, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x3a, }, { .slave_id = SHDMA_SLAVE_SCIF5_TX, .addr = 0xe6cb0020, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x35, }, { .slave_id = SHDMA_SLAVE_SCIF5_RX, .addr = 0xe6cb0024, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x36, }, { .slave_id = SHDMA_SLAVE_SCIF6_TX, .addr = 0xe6c30040, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x3d, }, { .slave_id = SHDMA_SLAVE_SCIF6_RX, .addr = 0xe6c30060, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x3e, }, { .slave_id = SHDMA_SLAVE_SDHI0_TX, .addr = 0xe6850030, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), .mid_rid = 0xc1, }, { .slave_id = SHDMA_SLAVE_SDHI0_RX, .addr = 0xe6850030, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), .mid_rid = 0xc2, }, { .slave_id = SHDMA_SLAVE_SDHI1_TX, .addr = 0xe6860030, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), .mid_rid = 0xc9, }, { .slave_id = SHDMA_SLAVE_SDHI1_RX, .addr = 0xe6860030, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), .mid_rid = 0xca, }, { .slave_id = SHDMA_SLAVE_SDHI2_TX, .addr = 0xe6870030, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), .mid_rid = 0xcd, }, { .slave_id = SHDMA_SLAVE_SDHI2_RX, .addr = 0xe6870030, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), .mid_rid = 0xce, }, { .slave_id = SHDMA_SLAVE_MMCIF_TX, .addr = 0xe6bd0034, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), .mid_rid = 0xd1, }, { .slave_id = SHDMA_SLAVE_MMCIF_RX, .addr = 0xe6bd0034, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), .mid_rid = 0xd2, }, }; static const struct sh_dmae_channel sh7372_dmae_channels[] = { { .offset = 0, .dmars = 0, .dmars_bit = 0, }, { .offset = 0x10, .dmars = 0, .dmars_bit = 8, }, { .offset = 0x20, .dmars = 4, .dmars_bit = 0, }, { .offset = 0x30, .dmars = 4, .dmars_bit = 8, }, { .offset = 0x50, .dmars = 8, .dmars_bit = 0, }, { .offset = 0x60, .dmars = 8, .dmars_bit = 8, } }; static const unsigned int ts_shift[] = TS_SHIFT; static struct sh_dmae_pdata dma_platform_data = { .slave = sh7372_dmae_slaves, .slave_num = ARRAY_SIZE(sh7372_dmae_slaves), .channel = sh7372_dmae_channels, .channel_num = ARRAY_SIZE(sh7372_dmae_channels), .ts_low_shift = 3, .ts_low_mask = 0x18, .ts_high_shift = (20 - 2), /* 2 bits for shifted low TS */ .ts_high_mask = 0x00300000, .ts_shift = ts_shift, .ts_shift_num = ARRAY_SIZE(ts_shift), .dmaor_init = DMAOR_DME, }; /* Resource order important! */ static struct resource sh7372_dmae0_resources[] = { { /* Channel registers and DMAOR */ .start = 0xfe008020, .end = 0xfe00808f, .flags = IORESOURCE_MEM, }, { /* DMARSx */ .start = 0xfe009000, .end = 0xfe00900b, .flags = IORESOURCE_MEM, }, { /* DMA error IRQ */ .start = evt2irq(0x20c0), .end = evt2irq(0x20c0), .flags = IORESOURCE_IRQ, }, { /* IRQ for channels 0-5 */ .start = evt2irq(0x2000), .end = evt2irq(0x20a0), .flags = IORESOURCE_IRQ, }, }; /* Resource order important! */ static struct resource sh7372_dmae1_resources[] = { { /* Channel registers and DMAOR */ .start = 0xfe018020, .end = 0xfe01808f, .flags = IORESOURCE_MEM, }, { /* DMARSx */ .start = 0xfe019000, .end = 0xfe01900b, .flags = IORESOURCE_MEM, }, { /* DMA error IRQ */ .start = evt2irq(0x21c0), .end = evt2irq(0x21c0), .flags = IORESOURCE_IRQ, }, { /* IRQ for channels 0-5 */ .start = evt2irq(0x2100), .end = evt2irq(0x21a0), .flags = IORESOURCE_IRQ, }, }; /* Resource order important! */ static struct resource sh7372_dmae2_resources[] = { { /* Channel registers and DMAOR */ .start = 0xfe028020, .end = 0xfe02808f, .flags = IORESOURCE_MEM, }, { /* DMARSx */ .start = 0xfe029000, .end = 0xfe02900b, .flags = IORESOURCE_MEM, }, { /* DMA error IRQ */ .start = evt2irq(0x22c0), .end = evt2irq(0x22c0), .flags = IORESOURCE_IRQ, }, { /* IRQ for channels 0-5 */ .start = evt2irq(0x2200), .end = evt2irq(0x22a0), .flags = IORESOURCE_IRQ, }, }; static struct platform_device dma0_device = { .name = "sh-dma-engine", .id = 0, .resource = sh7372_dmae0_resources, .num_resources = ARRAY_SIZE(sh7372_dmae0_resources), .dev = { .platform_data = &dma_platform_data, }, }; static struct platform_device dma1_device = { .name = "sh-dma-engine", .id = 1, .resource = sh7372_dmae1_resources, .num_resources = ARRAY_SIZE(sh7372_dmae1_resources), .dev = { .platform_data = &dma_platform_data, }, }; static struct platform_device dma2_device = { .name = "sh-dma-engine", .id = 2, .resource = sh7372_dmae2_resources, .num_resources = ARRAY_SIZE(sh7372_dmae2_resources), .dev = { .platform_data = &dma_platform_data, }, }; /* VPU */ static struct uio_info vpu_platform_data = { .name = "VPU5HG", .version = "0", .irq = intcs_evt2irq(0x980), }; static struct resource vpu_resources[] = { [0] = { .name = "VPU", .start = 0xfe900000, .end = 0xfe900157, .flags = IORESOURCE_MEM, }, }; static struct platform_device vpu_device = { .name = "uio_pdrv_genirq", .id = 0, .dev = { .platform_data = &vpu_platform_data, }, .resource = vpu_resources, .num_resources = ARRAY_SIZE(vpu_resources), }; /* VEU0 */ static struct uio_info veu0_platform_data = { .name = "VEU0", .version = "0", .irq = intcs_evt2irq(0x700), }; static struct resource veu0_resources[] = { [0] = { .name = "VEU0", .start = 0xfe920000, .end = 0xfe9200cb, .flags = IORESOURCE_MEM, }, }; static struct platform_device veu0_device = { .name = "uio_pdrv_genirq", .id = 1, .dev = { .platform_data = &veu0_platform_data, }, .resource = veu0_resources, .num_resources = ARRAY_SIZE(veu0_resources), }; /* VEU1 */ static struct uio_info veu1_platform_data = { .name = "VEU1", .version = "0", .irq = intcs_evt2irq(0x720), }; static struct resource veu1_resources[] = { [0] = { .name = "VEU1", .start = 0xfe924000, .end = 0xfe9240cb, .flags = IORESOURCE_MEM, }, }; static struct platform_device veu1_device = { .name = "uio_pdrv_genirq", .id = 2, .dev = { .platform_data = &veu1_platform_data, }, .resource = veu1_resources, .num_resources = ARRAY_SIZE(veu1_resources), }; /* VEU2 */ static struct uio_info veu2_platform_data = { .name = "VEU2", .version = "0", .irq = intcs_evt2irq(0x740), }; static struct resource veu2_resources[] = { [0] = { .name = "VEU2", .start = 0xfe928000, .end = 0xfe928307, .flags = IORESOURCE_MEM, }, }; static struct platform_device veu2_device = { .name = "uio_pdrv_genirq", .id = 3, .dev = { .platform_data = &veu2_platform_data, }, .resource = veu2_resources, .num_resources = ARRAY_SIZE(veu2_resources), }; /* VEU3 */ static struct uio_info veu3_platform_data = { .name = "VEU3", .version = "0", .irq = intcs_evt2irq(0x760), }; static struct resource veu3_resources[] = { [0] = { .name = "VEU3", .start = 0xfe92c000, .end = 0xfe92c307, .flags = IORESOURCE_MEM, }, }; static struct platform_device veu3_device = { .name = "uio_pdrv_genirq", .id = 4, .dev = { .platform_data = &veu3_platform_data, }, .resource = veu3_resources, .num_resources = ARRAY_SIZE(veu3_resources), }; /* JPU */ static struct uio_info jpu_platform_data = { .name = "JPU", .version = "0", .irq = intcs_evt2irq(0x560), }; static struct resource jpu_resources[] = { [0] = { .name = "JPU", .start = 0xfe980000, .end = 0xfe9902d3, .flags = IORESOURCE_MEM, }, }; static struct platform_device jpu_device = { .name = "uio_pdrv_genirq", .id = 5, .dev = { .platform_data = &jpu_platform_data, }, .resource = jpu_resources, .num_resources = ARRAY_SIZE(jpu_resources), }; /* SPU2DSP0 */ static struct uio_info spu0_platform_data = { .name = "SPU2DSP0", .version = "0", .irq = evt2irq(0x1800), }; static struct resource spu0_resources[] = { [0] = { .name = "SPU2DSP0", .start = 0xfe200000, .end = 0xfe2fffff, .flags = IORESOURCE_MEM, }, }; static struct platform_device spu0_device = { .name = "uio_pdrv_genirq", .id = 6, .dev = { .platform_data = &spu0_platform_data, }, .resource = spu0_resources, .num_resources = ARRAY_SIZE(spu0_resources), }; /* SPU2DSP1 */ static struct uio_info spu1_platform_data = { .name = "SPU2DSP1", .version = "0", .irq = evt2irq(0x1820), }; static struct resource spu1_resources[] = { [0] = { .name = "SPU2DSP1", .start = 0xfe300000, .end = 0xfe3fffff, .flags = IORESOURCE_MEM, }, }; static struct platform_device spu1_device = { .name = "uio_pdrv_genirq", .id = 7, .dev = { .platform_data = &spu1_platform_data, }, .resource = spu1_resources, .num_resources = ARRAY_SIZE(spu1_resources), }; static struct platform_device *sh7372_early_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &scif4_device, &scif5_device, &scif6_device, &cmt10_device, &tmu00_device, &tmu01_device, }; static struct platform_device *sh7372_late_devices[] __initdata = { &iic0_device, &iic1_device, &dma0_device, &dma1_device, &dma2_device, &vpu_device, &veu0_device, &veu1_device, &veu2_device, &veu3_device, &jpu_device, &spu0_device, &spu1_device, }; void __init sh7372_add_standard_devices(void) { platform_add_devices(sh7372_early_devices, ARRAY_SIZE(sh7372_early_devices)); platform_add_devices(sh7372_late_devices, ARRAY_SIZE(sh7372_late_devices)); } void __init sh7372_add_early_devices(void) { early_platform_add_devices(sh7372_early_devices, ARRAY_SIZE(sh7372_early_devices)); }
gpl-2.0
balika011/android_kernel_xiaomi_kenzo
drivers/gpu/drm/shmobile/shmob_drm_plane.c
2286
7296
/* * shmob_drm_plane.c -- SH Mobile DRM Planes * * Copyright (C) 2012 Renesas Corporation * * Laurent Pinchart (laurent.pinchart@ideasonboard.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> #include <video/sh_mobile_meram.h> #include "shmob_drm_drv.h" #include "shmob_drm_kms.h" #include "shmob_drm_plane.h" #include "shmob_drm_regs.h" struct shmob_drm_plane { struct drm_plane plane; unsigned int index; unsigned int alpha; const struct shmob_drm_format_info *format; unsigned long dma[2]; unsigned int src_x; unsigned int src_y; unsigned int crtc_x; unsigned int crtc_y; unsigned int crtc_w; unsigned int crtc_h; }; #define to_shmob_plane(p) container_of(p, struct shmob_drm_plane, plane) static void shmob_drm_plane_compute_base(struct shmob_drm_plane *splane, struct drm_framebuffer *fb, int x, int y) { struct drm_gem_cma_object *gem; unsigned int bpp; bpp = splane->format->yuv ? 8 : splane->format->bpp; gem = drm_fb_cma_get_gem_obj(fb, 0); splane->dma[0] = gem->paddr + fb->offsets[0] + y * fb->pitches[0] + x * bpp / 8; if (splane->format->yuv) { bpp = splane->format->bpp - 8; gem = drm_fb_cma_get_gem_obj(fb, 1); splane->dma[1] = gem->paddr + fb->offsets[1] + y / (bpp == 4 ? 2 : 1) * fb->pitches[1] + x * (bpp == 16 ? 2 : 1); } } static void __shmob_drm_plane_setup(struct shmob_drm_plane *splane, struct drm_framebuffer *fb) { struct shmob_drm_device *sdev = splane->plane.dev->dev_private; u32 format; /* TODO: Support ROP3 mode */ format = LDBBSIFR_EN | (splane->alpha << LDBBSIFR_LAY_SHIFT); switch (splane->format->fourcc) { case DRM_FORMAT_RGB565: case DRM_FORMAT_NV21: case DRM_FORMAT_NV61: case DRM_FORMAT_NV42: format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW; break; case DRM_FORMAT_RGB888: case DRM_FORMAT_NV12: case DRM_FORMAT_NV16: case DRM_FORMAT_NV24: format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW | LDBBSIFR_SWPB; break; case DRM_FORMAT_ARGB8888: default: format |= LDBBSIFR_SWPL; break; } switch (splane->format->fourcc) { case DRM_FORMAT_RGB565: format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16; break; case DRM_FORMAT_RGB888: format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24; break; case DRM_FORMAT_ARGB8888: format |= LDBBSIFR_AL_PK | LDBBSIFR_RY | LDDFR_PKF_ARGB32; break; case DRM_FORMAT_NV12: case DRM_FORMAT_NV21: format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_420; break; case DRM_FORMAT_NV16: case DRM_FORMAT_NV61: format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_422; break; case DRM_FORMAT_NV24: case DRM_FORMAT_NV42: format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_444; break; } #define plane_reg_dump(sdev, splane, reg) \ dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x 0x%08x\n", __func__, \ splane->index, #reg, \ lcdc_read(sdev, reg(splane->index)), \ lcdc_read(sdev, reg(splane->index) + LCDC_SIDE_B_OFFSET)) plane_reg_dump(sdev, splane, LDBnBSIFR); plane_reg_dump(sdev, splane, LDBnBSSZR); plane_reg_dump(sdev, splane, LDBnBLOCR); plane_reg_dump(sdev, splane, LDBnBSMWR); plane_reg_dump(sdev, splane, LDBnBSAYR); plane_reg_dump(sdev, splane, LDBnBSACR); lcdc_write(sdev, LDBCR, LDBCR_UPC(splane->index)); dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index, "LDBCR", lcdc_read(sdev, LDBCR)); lcdc_write(sdev, LDBnBSIFR(splane->index), format); lcdc_write(sdev, LDBnBSSZR(splane->index), (splane->crtc_h << LDBBSSZR_BVSS_SHIFT) | (splane->crtc_w << LDBBSSZR_BHSS_SHIFT)); lcdc_write(sdev, LDBnBLOCR(splane->index), (splane->crtc_y << LDBBLOCR_CVLC_SHIFT) | (splane->crtc_x << LDBBLOCR_CHLC_SHIFT)); lcdc_write(sdev, LDBnBSMWR(splane->index), fb->pitches[0] << LDBBSMWR_BSMW_SHIFT); shmob_drm_plane_compute_base(splane, fb, splane->src_x, splane->src_y); lcdc_write(sdev, LDBnBSAYR(splane->index), splane->dma[0]); if (splane->format->yuv) lcdc_write(sdev, LDBnBSACR(splane->index), splane->dma[1]); lcdc_write(sdev, LDBCR, LDBCR_UPF(splane->index) | LDBCR_UPD(splane->index)); dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index, "LDBCR", lcdc_read(sdev, LDBCR)); plane_reg_dump(sdev, splane, LDBnBSIFR); plane_reg_dump(sdev, splane, LDBnBSSZR); plane_reg_dump(sdev, splane, LDBnBLOCR); plane_reg_dump(sdev, splane, LDBnBSMWR); plane_reg_dump(sdev, splane, LDBnBSAYR); plane_reg_dump(sdev, splane, LDBnBSACR); } void shmob_drm_plane_setup(struct drm_plane *plane) { struct shmob_drm_plane *splane = to_shmob_plane(plane); if (plane->fb == NULL || !plane->enabled) return; __shmob_drm_plane_setup(splane, plane->fb); } static int shmob_drm_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { struct shmob_drm_plane *splane = to_shmob_plane(plane); struct shmob_drm_device *sdev = plane->dev->dev_private; const struct shmob_drm_format_info *format; format = shmob_drm_format_info(fb->pixel_format); if (format == NULL) { dev_dbg(sdev->dev, "update_plane: unsupported format %08x\n", fb->pixel_format); return -EINVAL; } if (src_w >> 16 != crtc_w || src_h >> 16 != crtc_h) { dev_dbg(sdev->dev, "%s: scaling not supported\n", __func__); return -EINVAL; } splane->format = format; splane->src_x = src_x >> 16; splane->src_y = src_y >> 16; splane->crtc_x = crtc_x; splane->crtc_y = crtc_y; splane->crtc_w = crtc_w; splane->crtc_h = crtc_h; __shmob_drm_plane_setup(splane, fb); return 0; } static int shmob_drm_plane_disable(struct drm_plane *plane) { struct shmob_drm_plane *splane = to_shmob_plane(plane); struct shmob_drm_device *sdev = plane->dev->dev_private; splane->format = NULL; lcdc_write(sdev, LDBnBSIFR(splane->index), 0); return 0; } static void shmob_drm_plane_destroy(struct drm_plane *plane) { struct shmob_drm_plane *splane = to_shmob_plane(plane); shmob_drm_plane_disable(plane); drm_plane_cleanup(plane); kfree(splane); } static const struct drm_plane_funcs shmob_drm_plane_funcs = { .update_plane = shmob_drm_plane_update, .disable_plane = shmob_drm_plane_disable, .destroy = shmob_drm_plane_destroy, }; static const uint32_t formats[] = { DRM_FORMAT_RGB565, DRM_FORMAT_RGB888, DRM_FORMAT_ARGB8888, DRM_FORMAT_NV12, DRM_FORMAT_NV21, DRM_FORMAT_NV16, DRM_FORMAT_NV61, DRM_FORMAT_NV24, DRM_FORMAT_NV42, }; int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index) { struct shmob_drm_plane *splane; int ret; splane = kzalloc(sizeof(*splane), GFP_KERNEL); if (splane == NULL) return -ENOMEM; splane->index = index; splane->alpha = 255; ret = drm_plane_init(sdev->ddev, &splane->plane, 1, &shmob_drm_plane_funcs, formats, ARRAY_SIZE(formats), false); if (ret < 0) kfree(splane); return ret; }
gpl-2.0
poondog/KANGAROO-kernel
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
2542
7019
/* * x86_energy_perf_policy -- set the energy versus performance * policy preference bias on recent X86 processors. */ /* * Copyright (c) 2010, Intel Corporation. * Len Brown <len.brown@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <stdio.h> #include <unistd.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/resource.h> #include <fcntl.h> #include <signal.h> #include <sys/time.h> #include <stdlib.h> #include <string.h> unsigned int verbose; /* set with -v */ unsigned int read_only; /* set with -r */ char *progname; unsigned long long new_bias; int cpu = -1; /* * Usage: * * -c cpu: limit action to a single CPU (default is all CPUs) * -v: verbose output (can invoke more than once) * -r: read-only, don't change any settings * * performance * Performance is paramount. * Unwilling to sacrifice any performance * for the sake of energy saving. (hardware default) * * normal * Can tolerate minor performance compromise * for potentially significant energy savings. * (reasonable default for most desktops and servers) * * powersave * Can tolerate significant performance hit * to maximize energy savings. * * n * a numerical value to write to the underlying MSR. */ void usage(void) { printf("%s: [-c cpu] [-v] " "(-r | 'performance' | 'normal' | 'powersave' | n)\n", progname); exit(1); } #define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 #define BIAS_PERFORMANCE 0 #define BIAS_BALANCE 6 #define BIAS_POWERSAVE 15 void cmdline(int argc, char **argv) { int opt; progname = argv[0]; while ((opt = getopt(argc, argv, "+rvc:")) != -1) { switch (opt) { case 'c': cpu = atoi(optarg); break; case 'r': read_only = 1; break; case 'v': verbose++; break; default: usage(); } } /* if -r, then should be no additional optind */ if (read_only && (argc > optind)) usage(); /* * if no -r , then must be one additional optind */ if (!read_only) { if (argc != optind + 1) { printf("must supply -r or policy param\n"); usage(); } if (!strcmp("performance", argv[optind])) { new_bias = BIAS_PERFORMANCE; } else if (!strcmp("normal", argv[optind])) { new_bias = BIAS_BALANCE; } else if (!strcmp("powersave", argv[optind])) { new_bias = BIAS_POWERSAVE; } else { char *endptr; new_bias = strtoull(argv[optind], &endptr, 0); if (endptr == argv[optind] || new_bias > BIAS_POWERSAVE) { fprintf(stderr, "invalid value: %s\n", argv[optind]); usage(); } } } } /* * validate_cpuid() * returns on success, quietly exits on failure (make verbose with -v) */ void validate_cpuid(void) { unsigned int eax, ebx, ecx, edx, max_level; char brand[16]; unsigned int fms, family, model, stepping; eax = ebx = ecx = edx = 0; asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0)); if (ebx != 0x756e6547 || edx != 0x49656e69 || ecx != 0x6c65746e) { if (verbose) fprintf(stderr, "%.4s%.4s%.4s != GenuineIntel", (char *)&ebx, (char *)&edx, (char *)&ecx); exit(1); } asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx"); family = (fms >> 8) & 0xf; model = (fms >> 4) & 0xf; stepping = fms & 0xf; if (family == 6 || family == 0xf) model += ((fms >> 16) & 0xf) << 4; if (verbose > 1) printf("CPUID %s %d levels family:model:stepping " "0x%x:%x:%x (%d:%d:%d)\n", brand, max_level, family, model, stepping, family, model, stepping); if (!(edx & (1 << 5))) { if (verbose) printf("CPUID: no MSR\n"); exit(1); } /* * Support for MSR_IA32_ENERGY_PERF_BIAS * is indicated by CPUID.06H.ECX.bit3 */ asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (6)); if (verbose) printf("CPUID.06H.ECX: 0x%x\n", ecx); if (!(ecx & (1 << 3))) { if (verbose) printf("CPUID: No MSR_IA32_ENERGY_PERF_BIAS\n"); exit(1); } return; /* success */ } unsigned long long get_msr(int cpu, int offset) { unsigned long long msr; char msr_path[32]; int retval; int fd; sprintf(msr_path, "/dev/cpu/%d/msr", cpu); fd = open(msr_path, O_RDONLY); if (fd < 0) { printf("Try \"# modprobe msr\"\n"); perror(msr_path); exit(1); } retval = pread(fd, &msr, sizeof msr, offset); if (retval != sizeof msr) { printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval); exit(-2); } close(fd); return msr; } unsigned long long put_msr(int cpu, unsigned long long new_msr, int offset) { unsigned long long old_msr; char msr_path[32]; int retval; int fd; sprintf(msr_path, "/dev/cpu/%d/msr", cpu); fd = open(msr_path, O_RDWR); if (fd < 0) { perror(msr_path); exit(1); } retval = pread(fd, &old_msr, sizeof old_msr, offset); if (retval != sizeof old_msr) { perror("pwrite"); printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval); exit(-2); } retval = pwrite(fd, &new_msr, sizeof new_msr, offset); if (retval != sizeof new_msr) { perror("pwrite"); printf("pwrite cpu%d 0x%x = %d\n", cpu, offset, retval); exit(-2); } close(fd); return old_msr; } void print_msr(int cpu) { printf("cpu%d: 0x%016llx\n", cpu, get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS)); } void update_msr(int cpu) { unsigned long long previous_msr; previous_msr = put_msr(cpu, new_bias, MSR_IA32_ENERGY_PERF_BIAS); if (verbose) printf("cpu%d msr0x%x 0x%016llx -> 0x%016llx\n", cpu, MSR_IA32_ENERGY_PERF_BIAS, previous_msr, new_bias); return; } char *proc_stat = "/proc/stat"; /* * run func() on every cpu in /dev/cpu */ void for_every_cpu(void (func)(int)) { FILE *fp; int retval; fp = fopen(proc_stat, "r"); if (fp == NULL) { perror(proc_stat); exit(1); } retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n"); if (retval != 0) { perror("/proc/stat format"); exit(1); } while (1) { int cpu; retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu); if (retval != 1) return; func(cpu); } fclose(fp); } int main(int argc, char **argv) { cmdline(argc, argv); if (verbose > 1) printf("x86_energy_perf_policy Nov 24, 2010" " - Len Brown <lenb@kernel.org>\n"); if (verbose > 1 && !read_only) printf("new_bias %lld\n", new_bias); validate_cpuid(); if (cpu != -1) { if (read_only) print_msr(cpu); else update_msr(cpu); } else { if (read_only) for_every_cpu(print_msr); else for_every_cpu(update_msr); } return 0; }
gpl-2.0
Electrex/Electroactive-N5
drivers/mfd/pm8921-core.c
2542
31519
/* * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/err.h> #include <linux/msm_ssbi.h> #include <linux/mfd/core.h> #include <linux/mfd/pm8xxx/pm8921.h> #include <linux/mfd/pm8xxx/core.h> #include <linux/mfd/pm8xxx/regulator.h> #include <linux/leds-pm8xxx.h> #define REG_HWREV 0x002 /* PMIC4 revision */ #define REG_HWREV_2 0x0E8 /* PMIC4 revision 2 */ #define REG_MPP_BASE 0x050 #define REG_IRQ_BASE 0x1BB #define REG_TEMP_ALARM_CTRL 0x1B #define REG_TEMP_ALARM_PWM 0x9B #define REG_BATT_ALARM_THRESH 0x023 #define REG_BATT_ALARM_CTRL1 0x024 #define REG_BATT_ALARM_CTRL2 0x021 #define REG_BATT_ALARM_PWM_CTRL 0x020 #define PM8921_VERSION_MASK 0xFFF0 #define PM8921_VERSION_VALUE 0x06F0 #define PM8922_VERSION_VALUE 0x0AF0 #define PM8917_VERSION_VALUE 0x0CF0 #define PM8921_REVISION_MASK 0x000F #define REG_PM8921_PON_CNTRL_3 0x01D #define PM8921_RESTART_REASON_MASK 0x07 #define SINGLE_IRQ_RESOURCE(_name, _irq) \ { \ .name = _name, \ .start = _irq, \ .end = _irq, \ .flags = IORESOURCE_IRQ, \ } struct pm8921 { struct device *dev; struct pm_irq_chip *irq_chip; struct mfd_cell *mfd_regulators; struct pm8xxx_regulator_core_platform_data *regulator_cdata; u32 rev_registers; u8 restart_reason; }; static int pm8921_readb(const struct device *dev, u16 addr, u8 *val) { const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev); const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data; return msm_ssbi_read(pmic->dev->parent, addr, val, 1); } static int pm8921_writeb(const struct device *dev, u16 addr, u8 val) { const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev); const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data; return msm_ssbi_write(pmic->dev->parent, addr, &val, 1); } static int pm8921_read_buf(const struct device *dev, u16 addr, u8 *buf, int cnt) { const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev); const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data; return msm_ssbi_read(pmic->dev->parent, addr, buf, cnt); } static int pm8921_write_buf(const struct device *dev, u16 addr, u8 *buf, int cnt) { const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev); const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data; return msm_ssbi_write(pmic->dev->parent, addr, buf, cnt); } static int pm8921_read_irq_stat(const struct device *dev, int irq) { const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev); const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data; return pm8xxx_get_irq_stat(pmic->irq_chip, irq); } static enum pm8xxx_version pm8921_get_version(const struct device *dev) { const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev); const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data; enum pm8xxx_version version = -ENODEV; if ((pmic->rev_registers & PM8921_VERSION_MASK) == PM8921_VERSION_VALUE) version = PM8XXX_VERSION_8921; else if ((pmic->rev_registers & PM8921_VERSION_MASK) == PM8922_VERSION_VALUE) version = PM8XXX_VERSION_8922; else if ((pmic->rev_registers & PM8921_VERSION_MASK) == PM8917_VERSION_VALUE) version = PM8XXX_VERSION_8917; return version; } static int pm8921_get_revision(const struct device *dev) { const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev); const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data; return pmic->rev_registers & PM8921_REVISION_MASK; } static u8 pm8921_restart_reason(const struct device *dev) { const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev); const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data; return pmic->restart_reason; } static struct pm8xxx_drvdata pm8921_drvdata = { .pmic_readb = pm8921_readb, .pmic_writeb = pm8921_writeb, .pmic_read_buf = pm8921_read_buf, .pmic_write_buf = pm8921_write_buf, .pmic_read_irq_stat = pm8921_read_irq_stat, .pmic_get_version = pm8921_get_version, .pmic_get_revision = pm8921_get_revision, .pmic_restart_reason = pm8921_restart_reason, }; static struct resource gpio_cell_resources[] = { [0] = { .start = PM8921_IRQ_BLOCK_BIT(PM8921_GPIO_BLOCK_START, 0), .end = PM8921_IRQ_BLOCK_BIT(PM8921_GPIO_BLOCK_START, 0) + PM8921_NR_GPIOS - 1, .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell gpio_cell __devinitdata = { .name = PM8XXX_GPIO_DEV_NAME, .id = -1, .resources = gpio_cell_resources, .num_resources = ARRAY_SIZE(gpio_cell_resources), }; static const struct resource adc_cell_resources[] __devinitconst = { SINGLE_IRQ_RESOURCE(NULL, PM8921_ADC_EOC_USR_IRQ), SINGLE_IRQ_RESOURCE(NULL, PM8921_ADC_BATT_TEMP_WARM_IRQ), SINGLE_IRQ_RESOURCE(NULL, PM8921_ADC_BATT_TEMP_COLD_IRQ), }; static struct mfd_cell adc_cell __devinitdata = { .name = PM8XXX_ADC_DEV_NAME, .id = -1, .resources = adc_cell_resources, .num_resources = ARRAY_SIZE(adc_cell_resources), }; static struct resource mpp_cell_resources[] = { { .start = PM8921_IRQ_BLOCK_BIT(PM8921_MPP_BLOCK_START, 0), .end = PM8921_IRQ_BLOCK_BIT(PM8921_MPP_BLOCK_START, 0) + PM8921_NR_MPPS - 1, .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell mpp_cell __devinitdata = { .name = PM8XXX_MPP_DEV_NAME, .id = 0, .resources = mpp_cell_resources, .num_resources = ARRAY_SIZE(mpp_cell_resources), }; static const struct resource rtc_cell_resources[] __devinitconst = { [0] = SINGLE_IRQ_RESOURCE(NULL, PM8921_RTC_ALARM_IRQ), [1] = { .name = "pmic_rtc_base", .start = PM8921_RTC_BASE, .end = PM8921_RTC_BASE, .flags = IORESOURCE_IO, }, }; static struct mfd_cell rtc_cell __devinitdata = { .name = PM8XXX_RTC_DEV_NAME, .id = -1, .resources = rtc_cell_resources, .num_resources = ARRAY_SIZE(rtc_cell_resources), }; static const struct resource resources_pwrkey[] __devinitconst = { SINGLE_IRQ_RESOURCE(NULL, PM8921_PWRKEY_REL_IRQ), SINGLE_IRQ_RESOURCE(NULL, PM8921_PWRKEY_PRESS_IRQ), }; static struct mfd_cell pwrkey_cell __devinitdata = { .name = PM8XXX_PWRKEY_DEV_NAME, .id = -1, .num_resources = ARRAY_SIZE(resources_pwrkey), .resources = resources_pwrkey, }; static const struct resource resources_keypad[] = { SINGLE_IRQ_RESOURCE(NULL, PM8921_KEYPAD_IRQ), SINGLE_IRQ_RESOURCE(NULL, PM8921_KEYSTUCK_IRQ), }; static struct mfd_cell keypad_cell __devinitdata = { .name = PM8XXX_KEYPAD_DEV_NAME, .id = -1, .num_resources = ARRAY_SIZE(resources_keypad), .resources = resources_keypad, }; static struct mfd_cell debugfs_cell __devinitdata = { .name = "pm8xxx-debug", .id = 0, .platform_data = "pm8921-dbg", .pdata_size = sizeof("pm8921-dbg"), }; static struct mfd_cell pwm_cell __devinitdata = { .name = PM8XXX_PWM_DEV_NAME, .id = -1, }; static const struct resource charger_cell_resources[] __devinitconst = { SINGLE_IRQ_RESOURCE("USBIN_VALID_IRQ", PM8921_USBIN_VALID_IRQ), SINGLE_IRQ_RESOURCE("USBIN_OV_IRQ", PM8921_USBIN_OV_IRQ), SINGLE_IRQ_RESOURCE("BATT_INSERTED_IRQ", PM8921_BATT_INSERTED_IRQ), SINGLE_IRQ_RESOURCE("VBATDET_LOW_IRQ", PM8921_VBATDET_LOW_IRQ), SINGLE_IRQ_RESOURCE("USBIN_UV_IRQ", PM8921_USBIN_UV_IRQ), SINGLE_IRQ_RESOURCE("VBAT_OV_IRQ", PM8921_VBAT_OV_IRQ), SINGLE_IRQ_RESOURCE("CHGWDOG_IRQ", PM8921_CHGWDOG_IRQ), SINGLE_IRQ_RESOURCE("VCP_IRQ", PM8921_VCP_IRQ), SINGLE_IRQ_RESOURCE("ATCDONE_IRQ", PM8921_ATCDONE_IRQ), SINGLE_IRQ_RESOURCE("ATCFAIL_IRQ", PM8921_ATCFAIL_IRQ), SINGLE_IRQ_RESOURCE("CHGDONE_IRQ", PM8921_CHGDONE_IRQ), SINGLE_IRQ_RESOURCE("CHGFAIL_IRQ", PM8921_CHGFAIL_IRQ), SINGLE_IRQ_RESOURCE("CHGSTATE_IRQ", PM8921_CHGSTATE_IRQ), SINGLE_IRQ_RESOURCE("LOOP_CHANGE_IRQ", PM8921_LOOP_CHANGE_IRQ), SINGLE_IRQ_RESOURCE("FASTCHG_IRQ", PM8921_FASTCHG_IRQ), SINGLE_IRQ_RESOURCE("TRKLCHG_IRQ", PM8921_TRKLCHG_IRQ), SINGLE_IRQ_RESOURCE("BATT_REMOVED_IRQ", PM8921_BATT_REMOVED_IRQ), SINGLE_IRQ_RESOURCE("BATTTEMP_HOT_IRQ", PM8921_BATTTEMP_HOT_IRQ), SINGLE_IRQ_RESOURCE("CHGHOT_IRQ", PM8921_CHGHOT_IRQ), SINGLE_IRQ_RESOURCE("BATTTEMP_COLD_IRQ", PM8921_BATTTEMP_COLD_IRQ), SINGLE_IRQ_RESOURCE("CHG_GONE_IRQ", PM8921_CHG_GONE_IRQ), SINGLE_IRQ_RESOURCE("BAT_TEMP_OK_IRQ", PM8921_BAT_TEMP_OK_IRQ), SINGLE_IRQ_RESOURCE("COARSE_DET_LOW_IRQ", PM8921_COARSE_DET_LOW_IRQ), SINGLE_IRQ_RESOURCE("VDD_LOOP_IRQ", PM8921_VDD_LOOP_IRQ), SINGLE_IRQ_RESOURCE("VREG_OV_IRQ", PM8921_VREG_OV_IRQ), SINGLE_IRQ_RESOURCE("VBATDET_IRQ", PM8921_VBATDET_IRQ), SINGLE_IRQ_RESOURCE("BATFET_IRQ", PM8921_BATFET_IRQ), SINGLE_IRQ_RESOURCE("PSI_IRQ", PM8921_PSI_IRQ), SINGLE_IRQ_RESOURCE("DCIN_VALID_IRQ", PM8921_DCIN_VALID_IRQ), SINGLE_IRQ_RESOURCE("DCIN_OV_IRQ", PM8921_DCIN_OV_IRQ), SINGLE_IRQ_RESOURCE("DCIN_UV_IRQ", PM8921_DCIN_UV_IRQ), }; static const struct resource bms_cell_resources[] __devinitconst = { SINGLE_IRQ_RESOURCE("PM8921_BMS_SBI_WRITE_OK", PM8921_BMS_SBI_WRITE_OK), SINGLE_IRQ_RESOURCE("PM8921_BMS_CC_THR", PM8921_BMS_CC_THR), SINGLE_IRQ_RESOURCE("PM8921_BMS_VSENSE_THR", PM8921_BMS_VSENSE_THR), SINGLE_IRQ_RESOURCE("PM8921_BMS_VSENSE_FOR_R", PM8921_BMS_VSENSE_FOR_R), SINGLE_IRQ_RESOURCE("PM8921_BMS_OCV_FOR_R", PM8921_BMS_OCV_FOR_R), SINGLE_IRQ_RESOURCE("PM8921_BMS_GOOD_OCV", PM8921_BMS_GOOD_OCV), SINGLE_IRQ_RESOURCE("PM8921_BMS_VSENSE_AVG", PM8921_BMS_VSENSE_AVG), }; static struct mfd_cell charger_cell __devinitdata = { .name = PM8921_CHARGER_DEV_NAME, .id = -1, .resources = charger_cell_resources, .num_resources = ARRAY_SIZE(charger_cell_resources), }; static struct mfd_cell bms_cell __devinitdata = { .name = PM8921_BMS_DEV_NAME, .id = -1, .resources = bms_cell_resources, .num_resources = ARRAY_SIZE(bms_cell_resources), }; static struct mfd_cell misc_cell __devinitdata = { .name = PM8XXX_MISC_DEV_NAME, .id = -1, }; static struct mfd_cell leds_cell __devinitdata = { .name = PM8XXX_LEDS_DEV_NAME, .id = -1, }; static const struct resource thermal_alarm_cell_resources[] __devinitconst = { SINGLE_IRQ_RESOURCE("pm8921_tempstat_irq", PM8921_TEMPSTAT_IRQ), SINGLE_IRQ_RESOURCE("pm8921_overtemp_irq", PM8921_OVERTEMP_IRQ), }; static struct pm8xxx_tm_core_data thermal_alarm_cdata = { .adc_channel = CHANNEL_DIE_TEMP, .adc_type = PM8XXX_TM_ADC_PM8XXX_ADC, .reg_addr_temp_alarm_ctrl = REG_TEMP_ALARM_CTRL, .reg_addr_temp_alarm_pwm = REG_TEMP_ALARM_PWM, .tm_name = "pm8921_tz", .irq_name_temp_stat = "pm8921_tempstat_irq", .irq_name_over_temp = "pm8921_overtemp_irq", }; static struct mfd_cell thermal_alarm_cell __devinitdata = { .name = PM8XXX_TM_DEV_NAME, .id = -1, .resources = thermal_alarm_cell_resources, .num_resources = ARRAY_SIZE(thermal_alarm_cell_resources), .platform_data = &thermal_alarm_cdata, .pdata_size = sizeof(struct pm8xxx_tm_core_data), }; static const struct resource batt_alarm_cell_resources[] __devinitconst = { SINGLE_IRQ_RESOURCE("pm8921_batt_alarm_irq", PM8921_BATT_ALARM_IRQ), }; static struct pm8xxx_batt_alarm_core_data batt_alarm_cdata = { .irq_name = "pm8921_batt_alarm_irq", .reg_addr_threshold = REG_BATT_ALARM_THRESH, .reg_addr_ctrl1 = REG_BATT_ALARM_CTRL1, .reg_addr_ctrl2 = REG_BATT_ALARM_CTRL2, .reg_addr_pwm_ctrl = REG_BATT_ALARM_PWM_CTRL, }; static struct mfd_cell batt_alarm_cell __devinitdata = { .name = PM8XXX_BATT_ALARM_DEV_NAME, .id = -1, .resources = batt_alarm_cell_resources, .num_resources = ARRAY_SIZE(batt_alarm_cell_resources), .platform_data = &batt_alarm_cdata, .pdata_size = sizeof(struct pm8xxx_batt_alarm_core_data), }; static const struct resource ccadc_cell_resources[] __devinitconst = { SINGLE_IRQ_RESOURCE("PM8921_BMS_CCADC_EOC", PM8921_BMS_CCADC_EOC), }; static struct mfd_cell ccadc_cell __devinitdata = { .name = PM8XXX_CCADC_DEV_NAME, .id = -1, .resources = ccadc_cell_resources, .num_resources = ARRAY_SIZE(ccadc_cell_resources), }; static struct mfd_cell vibrator_cell __devinitdata = { .name = PM8XXX_VIBRATOR_DEV_NAME, .id = -1, }; static struct pm8xxx_vreg regulator_data[] = { /* name pc_name ctrl test hpm_min */ NLDO("8921_l1", "8921_l1_pc", 0x0AE, 0x0AF, LDO_150), NLDO("8921_l2", "8921_l2_pc", 0x0B0, 0x0B1, LDO_150), PLDO("8921_l3", "8921_l3_pc", 0x0B2, 0x0B3, LDO_150), PLDO("8921_l4", "8921_l4_pc", 0x0B4, 0x0B5, LDO_50), PLDO("8921_l5", "8921_l5_pc", 0x0B6, 0x0B7, LDO_300), PLDO("8921_l6", "8921_l6_pc", 0x0B8, 0x0B9, LDO_600), PLDO("8921_l7", "8921_l7_pc", 0x0BA, 0x0BB, LDO_150), PLDO("8921_l8", "8921_l8_pc", 0x0BC, 0x0BD, LDO_300), PLDO("8921_l9", "8921_l9_pc", 0x0BE, 0x0BF, LDO_300), PLDO("8921_l10", "8921_l10_pc", 0x0C0, 0x0C1, LDO_600), PLDO("8921_l11", "8921_l11_pc", 0x0C2, 0x0C3, LDO_150), NLDO("8921_l12", "8921_l12_pc", 0x0C4, 0x0C5, LDO_150), PLDO("8921_l14", "8921_l14_pc", 0x0C8, 0x0C9, LDO_50), PLDO("8921_l15", "8921_l15_pc", 0x0CA, 0x0CB, LDO_150), PLDO("8921_l16", "8921_l16_pc", 0x0CC, 0x0CD, LDO_300), PLDO("8921_l17", "8921_l17_pc", 0x0CE, 0x0CF, LDO_150), NLDO("8921_l18", "8921_l18_pc", 0x0D0, 0x0D1, LDO_150), PLDO("8921_l21", "8921_l21_pc", 0x0D6, 0x0D7, LDO_150), PLDO("8921_l22", "8921_l22_pc", 0x0D8, 0x0D9, LDO_150), PLDO("8921_l23", "8921_l23_pc", 0x0DA, 0x0DB, LDO_150), NLDO1200("8921_l24", 0x0DC, 0x0DD, LDO_1200), NLDO1200("8921_l25", 0x0DE, 0x0DF, LDO_1200), NLDO1200("8921_l26", 0x0E0, 0x0E1, LDO_1200), NLDO1200("8921_l27", 0x0E2, 0x0E3, LDO_1200), NLDO1200("8921_l28", 0x0E4, 0x0E5, LDO_1200), PLDO("8921_l29", "8921_l29_pc", 0x0E6, 0x0E7, LDO_150), /* name pc_name ctrl test2 clk sleep hpm_min */ SMPS("8921_s1", "8921_s1_pc", 0x1D0, 0x1D5, 0x009, 0x1D2, SMPS_1500), SMPS("8921_s2", "8921_s2_pc", 0x1D8, 0x1DD, 0x00A, 0x1DA, SMPS_1500), SMPS("8921_s3", "8921_s3_pc", 0x1E0, 0x1E5, 0x00B, 0x1E2, SMPS_1500), SMPS("8921_s4", "8921_s4_pc", 0x1E8, 0x1ED, 0x011, 0x1EA, SMPS_1500), /* name ctrl fts_cnfg1 pfm pwr_cnfg hpm_min */ FTSMPS("8921_s5", 0x025, 0x02E, 0x026, 0x032, SMPS_2000), FTSMPS("8921_s6", 0x036, 0x03F, 0x037, 0x043, SMPS_2000), /* name pc_name ctrl test2 clk sleep hpm_min */ SMPS("8921_s7", "8921_s7_pc", 0x1F0, 0x1F5, 0x012, 0x1F2, SMPS_1500), SMPS("8921_s8", "8921_s8_pc", 0x1F8, 0x1FD, 0x013, 0x1FA, SMPS_1500), /* name pc_name ctrl test */ VS("8921_lvs1", "8921_lvs1_pc", 0x060, 0x061), VS300("8921_lvs2", 0x062, 0x063), VS("8921_lvs3", "8921_lvs3_pc", 0x064, 0x065), VS("8921_lvs4", "8921_lvs4_pc", 0x066, 0x067), VS("8921_lvs5", "8921_lvs5_pc", 0x068, 0x069), VS("8921_lvs6", "8921_lvs6_pc", 0x06A, 0x06B), VS("8921_lvs7", "8921_lvs7_pc", 0x06C, 0x06D), VS300("8921_usb_otg", 0x06E, 0x06F), VS300("8921_hdmi_mvs", 0x070, 0x071), /* name ctrl */ NCP("8921_ncp", 0x090), }; /* * PM8917 adds 6 LDOs and a boost regulator beyond those available on PM8921. * It also replaces SMPS 3 with FTSMPS 3. PM8917 does not have an NCP. */ static struct pm8xxx_vreg pm8917_regulator_data[] = { /* name pc_name ctrl test hpm_min */ PLDO("8917_l30", "8917_l30_pc", 0x0A3, 0x0A4, LDO_150), PLDO("8917_l31", "8917_l31_pc", 0x0A5, 0x0A6, LDO_150), PLDO("8917_l32", "8917_l32_pc", 0x0A7, 0x0A8, LDO_150), PLDO("8917_l33", "8917_l33_pc", 0x0C6, 0x0C7, LDO_150), PLDO("8917_l34", "8917_l34_pc", 0x0D2, 0x0D3, LDO_150), PLDO("8917_l35", "8917_l35_pc", 0x0D4, 0x0D5, LDO_300), PLDO("8917_l36", "8917_l36_pc", 0x0A9, 0x0AA, LDO_50), /* name ctrl */ BOOST("8917_boost", 0x04B), }; #define MAX_NAME_COMPARISON_LEN 32 static int __devinit match_regulator(enum pm8xxx_version version, struct pm8xxx_regulator_core_platform_data *core_data, const char *name) { int found = 0; int i; for (i = 0; i < ARRAY_SIZE(regulator_data); i++) { if (regulator_data[i].rdesc.name && strncmp(regulator_data[i].rdesc.name, name, MAX_NAME_COMPARISON_LEN) == 0) { core_data->is_pin_controlled = false; core_data->vreg = &regulator_data[i]; found = 1; break; } else if (regulator_data[i].rdesc_pc.name && strncmp(regulator_data[i].rdesc_pc.name, name, MAX_NAME_COMPARISON_LEN) == 0) { core_data->is_pin_controlled = true; core_data->vreg = &regulator_data[i]; found = 1; break; } } if (version == PM8XXX_VERSION_8917) { for (i = 0; i < ARRAY_SIZE(pm8917_regulator_data); i++) { if (pm8917_regulator_data[i].rdesc.name && strncmp(pm8917_regulator_data[i].rdesc.name, name, MAX_NAME_COMPARISON_LEN) == 0) { core_data->is_pin_controlled = false; core_data->vreg = &pm8917_regulator_data[i]; found = 1; break; } else if (pm8917_regulator_data[i].rdesc_pc.name && strncmp(pm8917_regulator_data[i].rdesc_pc.name, name, MAX_NAME_COMPARISON_LEN) == 0) { core_data->is_pin_controlled = true; core_data->vreg = &pm8917_regulator_data[i]; found = 1; break; } } } if (!found) pr_err("could not find a match for regulator: %s\n", name); return found; } static int __devinit pm8921_add_regulators(const struct pm8921_platform_data *pdata, struct pm8921 *pmic, int irq_base) { int ret = 0; struct mfd_cell *mfd_regulators; struct pm8xxx_regulator_core_platform_data *cdata; enum pm8xxx_version version; int i; version = pm8xxx_get_version(pmic->dev); /* Add one device for each regulator used by the board. */ mfd_regulators = kzalloc(sizeof(struct mfd_cell) * (pdata->num_regulators), GFP_KERNEL); if (!mfd_regulators) { pr_err("Cannot allocate %d bytes for pm8921 regulator " "mfd cells\n", sizeof(struct mfd_cell) * (pdata->num_regulators)); return -ENOMEM; } cdata = kzalloc(sizeof(struct pm8xxx_regulator_core_platform_data) * pdata->num_regulators, GFP_KERNEL); if (!cdata) { pr_err("Cannot allocate %d bytes for pm8921 regulator " "core data\n", pdata->num_regulators * sizeof(struct pm8xxx_regulator_core_platform_data)); kfree(mfd_regulators); return -ENOMEM; } for (i = 0; i < ARRAY_SIZE(regulator_data); i++) mutex_init(&regulator_data[i].pc_lock); for (i = 0; i < ARRAY_SIZE(pm8917_regulator_data); i++) mutex_init(&pm8917_regulator_data[i].pc_lock); for (i = 0; i < pdata->num_regulators; i++) { if (!pdata->regulator_pdatas[i].init_data.constraints.name) { pr_err("name missing for regulator %d\n", i); ret = -EINVAL; goto bail; } if (!match_regulator(version, &cdata[i], pdata->regulator_pdatas[i].init_data.constraints.name)) { ret = -ENODEV; goto bail; } cdata[i].pdata = &(pdata->regulator_pdatas[i]); mfd_regulators[i].name = PM8XXX_REGULATOR_DEV_NAME; mfd_regulators[i].id = cdata[i].pdata->id; mfd_regulators[i].platform_data = &cdata[i]; mfd_regulators[i].pdata_size = sizeof(struct pm8xxx_regulator_core_platform_data); } ret = mfd_add_devices(pmic->dev, 0, mfd_regulators, pdata->num_regulators, NULL, irq_base); if (ret) goto bail; pmic->mfd_regulators = mfd_regulators; pmic->regulator_cdata = cdata; return ret; bail: for (i = 0; i < ARRAY_SIZE(regulator_data); i++) mutex_destroy(&regulator_data[i].pc_lock); for (i = 0; i < ARRAY_SIZE(pm8917_regulator_data); i++) mutex_destroy(&pm8917_regulator_data[i].pc_lock); kfree(mfd_regulators); kfree(cdata); return ret; } static int __devinit pm8921_add_subdevices(const struct pm8921_platform_data *pdata, struct pm8921 *pmic) { int ret = 0, irq_base = 0; struct pm_irq_chip *irq_chip; enum pm8xxx_version version; version = pm8xxx_get_version(pmic->dev); if (pdata->irq_pdata) { pdata->irq_pdata->irq_cdata.nirqs = PM8921_NR_IRQS; pdata->irq_pdata->irq_cdata.base_addr = REG_IRQ_BASE; irq_base = pdata->irq_pdata->irq_base; irq_chip = pm8xxx_irq_init(pmic->dev, pdata->irq_pdata); if (IS_ERR(irq_chip)) { pr_err("Failed to init interrupts ret=%ld\n", PTR_ERR(irq_chip)); return PTR_ERR(irq_chip); } pmic->irq_chip = irq_chip; } if (pdata->gpio_pdata) { if (version == PM8XXX_VERSION_8917) { gpio_cell_resources[0].end = gpio_cell_resources[0].end + PM8917_NR_GPIOS - PM8921_NR_GPIOS; pdata->gpio_pdata->gpio_cdata.ngpios = PM8917_NR_GPIOS; } else { pdata->gpio_pdata->gpio_cdata.ngpios = PM8921_NR_GPIOS; } gpio_cell.platform_data = pdata->gpio_pdata; gpio_cell.pdata_size = sizeof(struct pm8xxx_gpio_platform_data); ret = mfd_add_devices(pmic->dev, 0, &gpio_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add gpio subdevice ret=%d\n", ret); goto bail; } } if (pdata->mpp_pdata) { if (version == PM8XXX_VERSION_8917) { mpp_cell_resources[0].end = mpp_cell_resources[0].end + PM8917_NR_MPPS - PM8921_NR_MPPS; pdata->mpp_pdata->core_data.nmpps = PM8917_NR_MPPS; } else { pdata->mpp_pdata->core_data.nmpps = PM8921_NR_MPPS; } pdata->mpp_pdata->core_data.base_addr = REG_MPP_BASE; mpp_cell.platform_data = pdata->mpp_pdata; mpp_cell.pdata_size = sizeof(struct pm8xxx_mpp_platform_data); ret = mfd_add_devices(pmic->dev, 0, &mpp_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add mpp subdevice ret=%d\n", ret); goto bail; } } if (pdata->rtc_pdata) { rtc_cell.platform_data = pdata->rtc_pdata; rtc_cell.pdata_size = sizeof(struct pm8xxx_rtc_platform_data); ret = mfd_add_devices(pmic->dev, 0, &rtc_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add rtc subdevice ret=%d\n", ret); goto bail; } } if (pdata->pwrkey_pdata) { pwrkey_cell.platform_data = pdata->pwrkey_pdata; pwrkey_cell.pdata_size = sizeof(struct pm8xxx_pwrkey_platform_data); ret = mfd_add_devices(pmic->dev, 0, &pwrkey_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add pwrkey subdevice ret=%d\n", ret); goto bail; } } if (pdata->keypad_pdata) { keypad_cell.platform_data = pdata->keypad_pdata; keypad_cell.pdata_size = sizeof(struct pm8xxx_keypad_platform_data); ret = mfd_add_devices(pmic->dev, 0, &keypad_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add keypad subdevice ret=%d\n", ret); goto bail; } } if (pdata->charger_pdata) { pdata->charger_pdata->charger_cdata.vbat_channel = CHANNEL_VBAT; pdata->charger_pdata->charger_cdata.batt_temp_channel = CHANNEL_BATT_THERM; pdata->charger_pdata->charger_cdata.batt_id_channel = CHANNEL_BATT_ID; charger_cell.platform_data = pdata->charger_pdata; charger_cell.pdata_size = sizeof(struct pm8921_charger_platform_data); ret = mfd_add_devices(pmic->dev, 0, &charger_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add charger subdevice ret=%d\n", ret); goto bail; } } if (pdata->adc_pdata) { adc_cell.platform_data = pdata->adc_pdata; adc_cell.pdata_size = sizeof(struct pm8xxx_adc_platform_data); ret = mfd_add_devices(pmic->dev, 0, &adc_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add regulator subdevices ret=%d\n", ret); } } if (pdata->bms_pdata) { pdata->bms_pdata->bms_cdata.batt_temp_channel = CHANNEL_BATT_THERM; pdata->bms_pdata->bms_cdata.vbat_channel = CHANNEL_VBAT; pdata->bms_pdata->bms_cdata.ref625mv_channel = CHANNEL_625MV; pdata->bms_pdata->bms_cdata.ref1p25v_channel = CHANNEL_125V; pdata->bms_pdata->bms_cdata.batt_id_channel = CHANNEL_BATT_ID; bms_cell.platform_data = pdata->bms_pdata; bms_cell.pdata_size = sizeof(struct pm8921_bms_platform_data); ret = mfd_add_devices(pmic->dev, 0, &bms_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add bms subdevice ret=%d\n", ret); goto bail; } } if (pdata->num_regulators > 0 && pdata->regulator_pdatas) { ret = pm8921_add_regulators(pdata, pmic, irq_base); if (ret) { pr_err("Failed to add regulator subdevices ret=%d\n", ret); goto bail; } } ret = mfd_add_devices(pmic->dev, 0, &debugfs_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add debugfs subdevice ret=%d\n", ret); goto bail; } if (pdata->misc_pdata) { misc_cell.platform_data = pdata->misc_pdata; misc_cell.pdata_size = sizeof(struct pm8xxx_misc_platform_data); ret = mfd_add_devices(pmic->dev, 0, &misc_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add misc subdevice ret=%d\n", ret); goto bail; } } ret = mfd_add_devices(pmic->dev, 0, &thermal_alarm_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add thermal alarm subdevice ret=%d\n", ret); goto bail; } ret = mfd_add_devices(pmic->dev, 0, &batt_alarm_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add battery alarm subdevice ret=%d\n", ret); goto bail; } if (version != PM8XXX_VERSION_8917) { if (pdata->pwm_pdata) { pwm_cell.platform_data = pdata->pwm_pdata; pwm_cell.pdata_size = sizeof(struct pm8xxx_pwm_platform_data); } ret = mfd_add_devices(pmic->dev, 0, &pwm_cell, 1, NULL, 0); if (ret) { pr_err("Failed to add pwm subdevice ret=%d\n", ret); goto bail; } if (pdata->leds_pdata) { leds_cell.platform_data = pdata->leds_pdata; leds_cell.pdata_size = sizeof(struct pm8xxx_led_platform_data); ret = mfd_add_devices(pmic->dev, 0, &leds_cell, 1, NULL, 0); if (ret) { pr_err("Failed to add leds subdevice ret=%d\n", ret); goto bail; } } if (pdata->vibrator_pdata) { vibrator_cell.platform_data = pdata->vibrator_pdata; vibrator_cell.pdata_size = sizeof(struct pm8xxx_vibrator_platform_data); ret = mfd_add_devices(pmic->dev, 0, &vibrator_cell, 1, NULL, 0); if (ret) { pr_err("Failed to add vibrator ret=%d\n", ret); goto bail; } } } if (pdata->ccadc_pdata) { pdata->ccadc_pdata->ccadc_cdata.batt_temp_channel = CHANNEL_BATT_THERM; ccadc_cell.platform_data = pdata->ccadc_pdata; ccadc_cell.pdata_size = sizeof(struct pm8xxx_ccadc_platform_data); ret = mfd_add_devices(pmic->dev, 0, &ccadc_cell, 1, NULL, irq_base); if (ret) { pr_err("Failed to add ccadc subdevice ret=%d\n", ret); goto bail; } } return 0; bail: if (pmic->irq_chip) { pm8xxx_irq_exit(pmic->irq_chip); pmic->irq_chip = NULL; } return ret; } static const char * const pm8921_rev_names[] = { [PM8XXX_REVISION_8921_TEST] = "test", [PM8XXX_REVISION_8921_1p0] = "1.0", [PM8XXX_REVISION_8921_1p1] = "1.1", [PM8XXX_REVISION_8921_2p0] = "2.0", [PM8XXX_REVISION_8921_3p0] = "3.0", [PM8XXX_REVISION_8921_3p1] = "3.1", }; static const char * const pm8922_rev_names[] = { [PM8XXX_REVISION_8922_TEST] = "test", [PM8XXX_REVISION_8922_1p0] = "1.0", [PM8XXX_REVISION_8922_1p1] = "1.1", [PM8XXX_REVISION_8922_2p0] = "2.0", }; static const char * const pm8917_rev_names[] = { [PM8XXX_REVISION_8917_TEST] = "test", [PM8XXX_REVISION_8917_1p0] = "1.0", }; static int __devinit pm8921_probe(struct platform_device *pdev) { const struct pm8921_platform_data *pdata = pdev->dev.platform_data; const char *revision_name = "unknown"; struct pm8921 *pmic; enum pm8xxx_version version; int revision; int rc; u8 val; if (!pdata) { pr_err("missing platform data\n"); return -EINVAL; } pmic = kzalloc(sizeof(struct pm8921), GFP_KERNEL); if (!pmic) { pr_err("Cannot alloc pm8921 struct\n"); return -ENOMEM; } /* Read PMIC chip revision */ rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV, &val, sizeof(val)); if (rc) { pr_err("Failed to read hw rev reg %d:rc=%d\n", REG_HWREV, rc); goto err_read_rev; } pr_info("PMIC revision 1: %02X\n", val); pmic->rev_registers = val; /* Read PMIC chip revision 2 */ rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV_2, &val, sizeof(val)); if (rc) { pr_err("Failed to read hw rev 2 reg %d:rc=%d\n", REG_HWREV_2, rc); goto err_read_rev; } pr_info("PMIC revision 2: %02X\n", val); pmic->rev_registers |= val << BITS_PER_BYTE; pmic->dev = &pdev->dev; pm8921_drvdata.pm_chip_data = pmic; platform_set_drvdata(pdev, &pm8921_drvdata); /* Print out human readable version and revision names. */ version = pm8xxx_get_version(pmic->dev); revision = pm8xxx_get_revision(pmic->dev); if (version == PM8XXX_VERSION_8921) { if (revision >= 0 && revision < ARRAY_SIZE(pm8921_rev_names)) revision_name = pm8921_rev_names[revision]; pr_info("PMIC version: PM8921 rev %s\n", revision_name); } else if (version == PM8XXX_VERSION_8922) { if (revision >= 0 && revision < ARRAY_SIZE(pm8922_rev_names)) revision_name = pm8922_rev_names[revision]; pr_info("PMIC version: PM8922 rev %s\n", revision_name); } else if (version == PM8XXX_VERSION_8917) { if (revision >= 0 && revision < ARRAY_SIZE(pm8917_rev_names)) revision_name = pm8917_rev_names[revision]; pr_info("PMIC version: PM8917 rev %s\n", revision_name); } else { WARN_ON(version != PM8XXX_VERSION_8921 && version != PM8XXX_VERSION_8922 && version != PM8XXX_VERSION_8917); } /* Log human readable restart reason */ rc = msm_ssbi_read(pdev->dev.parent, REG_PM8921_PON_CNTRL_3, &val, 1); if (rc) { pr_err("Cannot read restart reason rc=%d\n", rc); goto err_read_rev; } val &= PM8XXX_RESTART_REASON_MASK; pr_info("PMIC Restart Reason: %s\n", pm8xxx_restart_reason_str[val]); pmic->restart_reason = val; rc = pm8921_add_subdevices(pdata, pmic); if (rc) { pr_err("Cannot add subdevices rc=%d\n", rc); goto err; } /* gpio might not work if no irq device is found */ WARN_ON(pmic->irq_chip == NULL); return 0; err: mfd_remove_devices(pmic->dev); platform_set_drvdata(pdev, NULL); kfree(pmic->mfd_regulators); kfree(pmic->regulator_cdata); err_read_rev: kfree(pmic); return rc; } static int __devexit pm8921_remove(struct platform_device *pdev) { struct pm8xxx_drvdata *drvdata; struct pm8921 *pmic = NULL; int i; drvdata = platform_get_drvdata(pdev); if (drvdata) pmic = drvdata->pm_chip_data; if (pmic) { if (pmic->dev) mfd_remove_devices(pmic->dev); if (pmic->irq_chip) pm8xxx_irq_exit(pmic->irq_chip); if (pmic->mfd_regulators) { for (i = 0; i < ARRAY_SIZE(regulator_data); i++) mutex_destroy(&regulator_data[i].pc_lock); for (i = 0; i < ARRAY_SIZE(pm8917_regulator_data); i++) mutex_destroy( &pm8917_regulator_data[i].pc_lock); } kfree(pmic->mfd_regulators); kfree(pmic->regulator_cdata); kfree(pmic); } platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver pm8921_driver = { .probe = pm8921_probe, .remove = __devexit_p(pm8921_remove), .driver = { .name = "pm8921-core", .owner = THIS_MODULE, }, }; static int __init pm8921_init(void) { return platform_driver_register(&pm8921_driver); } postcore_initcall(pm8921_init); static void __exit pm8921_exit(void) { platform_driver_unregister(&pm8921_driver); } module_exit(pm8921_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PMIC 8921 core driver"); MODULE_VERSION("1.0"); MODULE_ALIAS("platform:pm8921-core");
gpl-2.0
MikePach/stock_kernel_lge_l2s
drivers/mtd/mtdblock.c
2798
10196
/* * Direct MTD block device access * * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> * Copyright © 2000-2003 Nicolas Pitre <nico@fluxnic.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/vmalloc.h> #include <linux/mtd/mtd.h> #include <linux/mtd/blktrans.h> #include <linux/mutex.h> struct mtdblk_dev { struct mtd_blktrans_dev mbd; int count; struct mutex cache_mutex; unsigned char *cache_data; unsigned long cache_offset; unsigned int cache_size; enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; }; static struct mutex mtdblks_lock; /* * Cache stuff... * * Since typical flash erasable sectors are much larger than what Linux's * buffer cache can handle, we must implement read-modify-write on flash * sectors for each block write requests. To avoid over-erasing flash sectors * and to speed things up, we locally cache a whole flash sector while it is * being written to until a different sector is required. */ static void erase_callback(struct erase_info *done) { wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; wake_up(wait_q); } static int erase_write (struct mtd_info *mtd, unsigned long pos, int len, const char *buf) { struct erase_info erase; DECLARE_WAITQUEUE(wait, current); wait_queue_head_t wait_q; size_t retlen; int ret; /* * First, let's erase the flash block. */ init_waitqueue_head(&wait_q); erase.mtd = mtd; erase.callback = erase_callback; erase.addr = pos; erase.len = len; erase.priv = (u_long)&wait_q; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&wait_q, &wait); ret = mtd->erase(mtd, &erase); if (ret) { set_current_state(TASK_RUNNING); remove_wait_queue(&wait_q, &wait); printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] " "on \"%s\" failed\n", pos, len, mtd->name); return ret; } schedule(); /* Wait for erase to finish. */ remove_wait_queue(&wait_q, &wait); /* * Next, write the data to flash. */ ret = mtd->write(mtd, pos, len, &retlen, buf); if (ret) return ret; if (retlen != len) return -EIO; return 0; } static int write_cached_data (struct mtdblk_dev *mtdblk) { struct mtd_info *mtd = mtdblk->mbd.mtd; int ret; if (mtdblk->cache_state != STATE_DIRTY) return 0; DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for \"%s\" " "at 0x%lx, size 0x%x\n", mtd->name, mtdblk->cache_offset, mtdblk->cache_size); ret = erase_write (mtd, mtdblk->cache_offset, mtdblk->cache_size, mtdblk->cache_data); if (ret) return ret; /* * Here we could arguably set the cache state to STATE_CLEAN. * However this could lead to inconsistency since we will not * be notified if this content is altered on the flash by other * means. Let's declare it empty and leave buffering tasks to * the buffer cache instead. */ mtdblk->cache_state = STATE_EMPTY; return 0; } static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, int len, const char *buf) { struct mtd_info *mtd = mtdblk->mbd.mtd; unsigned int sect_size = mtdblk->cache_size; size_t retlen; int ret; DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n", mtd->name, pos, len); if (!sect_size) return mtd->write(mtd, pos, len, &retlen, buf); while (len > 0) { unsigned long sect_start = (pos/sect_size)*sect_size; unsigned int offset = pos - sect_start; unsigned int size = sect_size - offset; if( size > len ) size = len; if (size == sect_size) { /* * We are covering a whole sector. Thus there is no * need to bother with the cache while it may still be * useful for other partial writes. */ ret = erase_write (mtd, pos, size, buf); if (ret) return ret; } else { /* Partial sector: need to use the cache */ if (mtdblk->cache_state == STATE_DIRTY && mtdblk->cache_offset != sect_start) { ret = write_cached_data(mtdblk); if (ret) return ret; } if (mtdblk->cache_state == STATE_EMPTY || mtdblk->cache_offset != sect_start) { /* fill the cache with the current sector */ mtdblk->cache_state = STATE_EMPTY; ret = mtd->read(mtd, sect_start, sect_size, &retlen, mtdblk->cache_data); if (ret) return ret; if (retlen != sect_size) return -EIO; mtdblk->cache_offset = sect_start; mtdblk->cache_size = sect_size; mtdblk->cache_state = STATE_CLEAN; } /* write data to our local cache */ memcpy (mtdblk->cache_data + offset, buf, size); mtdblk->cache_state = STATE_DIRTY; } buf += size; pos += size; len -= size; } return 0; } static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, int len, char *buf) { struct mtd_info *mtd = mtdblk->mbd.mtd; unsigned int sect_size = mtdblk->cache_size; size_t retlen; int ret; DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n", mtd->name, pos, len); if (!sect_size) return mtd->read(mtd, pos, len, &retlen, buf); while (len > 0) { unsigned long sect_start = (pos/sect_size)*sect_size; unsigned int offset = pos - sect_start; unsigned int size = sect_size - offset; if (size > len) size = len; /* * Check if the requested data is already cached * Read the requested amount of data from our internal cache if it * contains what we want, otherwise we read the data directly * from flash. */ if (mtdblk->cache_state != STATE_EMPTY && mtdblk->cache_offset == sect_start) { memcpy (buf, mtdblk->cache_data + offset, size); } else { ret = mtd->read(mtd, pos, size, &retlen, buf); if (ret) return ret; if (retlen != size) return -EIO; } buf += size; pos += size; len -= size; } return 0; } static int mtdblock_readsect(struct mtd_blktrans_dev *dev, unsigned long block, char *buf) { struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); return do_cached_read(mtdblk, block<<9, 512, buf); } static int mtdblock_writesect(struct mtd_blktrans_dev *dev, unsigned long block, char *buf) { struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) { mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize); if (!mtdblk->cache_data) return -EINTR; /* -EINTR is not really correct, but it is the best match * documented in man 2 write for all cases. We could also * return -EAGAIN sometimes, but why bother? */ } return do_cached_write(mtdblk, block<<9, 512, buf); } static int mtdblock_open(struct mtd_blktrans_dev *mbd) { struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n"); mutex_lock(&mtdblks_lock); if (mtdblk->count) { mtdblk->count++; mutex_unlock(&mtdblks_lock); return 0; } /* OK, it's not open. Create cache info for it */ mtdblk->count = 1; mutex_init(&mtdblk->cache_mutex); mtdblk->cache_state = STATE_EMPTY; if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) { mtdblk->cache_size = mbd->mtd->erasesize; mtdblk->cache_data = NULL; } mutex_unlock(&mtdblks_lock); DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); return 0; } static int mtdblock_release(struct mtd_blktrans_dev *mbd) { struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n"); mutex_lock(&mtdblks_lock); mutex_lock(&mtdblk->cache_mutex); write_cached_data(mtdblk); mutex_unlock(&mtdblk->cache_mutex); if (!--mtdblk->count) { /* It was the last usage. Free the cache */ if (mbd->mtd->sync) mbd->mtd->sync(mbd->mtd); vfree(mtdblk->cache_data); } mutex_unlock(&mtdblks_lock); DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); return 0; } static int mtdblock_flush(struct mtd_blktrans_dev *dev) { struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); mutex_lock(&mtdblk->cache_mutex); write_cached_data(mtdblk); mutex_unlock(&mtdblk->cache_mutex); if (dev->mtd->sync) dev->mtd->sync(dev->mtd); return 0; } static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) { struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return; dev->mbd.mtd = mtd; dev->mbd.devnum = mtd->index; dev->mbd.size = mtd->size >> 9; dev->mbd.tr = tr; if (!(mtd->flags & MTD_WRITEABLE)) dev->mbd.readonly = 1; if (add_mtd_blktrans_dev(&dev->mbd)) kfree(dev); } static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev) { del_mtd_blktrans_dev(dev); } static struct mtd_blktrans_ops mtdblock_tr = { .name = "mtdblock", .major = 31, .part_bits = 0, .blksize = 512, .open = mtdblock_open, .flush = mtdblock_flush, .release = mtdblock_release, .readsect = mtdblock_readsect, .writesect = mtdblock_writesect, .add_mtd = mtdblock_add_mtd, .remove_dev = mtdblock_remove_dev, .owner = THIS_MODULE, }; static int __init init_mtdblock(void) { mutex_init(&mtdblks_lock); return register_mtd_blktrans(&mtdblock_tr); } static void __exit cleanup_mtdblock(void) { deregister_mtd_blktrans(&mtdblock_tr); } module_init(init_mtdblock); module_exit(cleanup_mtdblock); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net> et al."); MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");
gpl-2.0
mdxy2010/forlinux-ok6410
kernel/fs/fcntl.c
3054
19167
/* * linux/fs/fcntl.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/syscalls.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/capability.h> #include <linux/dnotify.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/pipe_fs_i.h> #include <linux/security.h> #include <linux/ptrace.h> #include <linux/signal.h> #include <linux/rcupdate.h> #include <linux/pid_namespace.h> #include <asm/poll.h> #include <asm/siginfo.h> #include <asm/uaccess.h> void set_close_on_exec(unsigned int fd, int flag) { struct files_struct *files = current->files; struct fdtable *fdt; spin_lock(&files->file_lock); fdt = files_fdtable(files); if (flag) FD_SET(fd, fdt->close_on_exec); else FD_CLR(fd, fdt->close_on_exec); spin_unlock(&files->file_lock); } static int get_close_on_exec(unsigned int fd) { struct files_struct *files = current->files; struct fdtable *fdt; int res; rcu_read_lock(); fdt = files_fdtable(files); res = FD_ISSET(fd, fdt->close_on_exec); rcu_read_unlock(); return res; } SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags) { int err = -EBADF; struct file * file, *tofree; struct files_struct * files = current->files; struct fdtable *fdt; if ((flags & ~O_CLOEXEC) != 0) return -EINVAL; if (unlikely(oldfd == newfd)) return -EINVAL; spin_lock(&files->file_lock); err = expand_files(files, newfd); file = fcheck(oldfd); if (unlikely(!file)) goto Ebadf; if (unlikely(err < 0)) { if (err == -EMFILE) goto Ebadf; goto out_unlock; } /* * We need to detect attempts to do dup2() over allocated but still * not finished descriptor. NB: OpenBSD avoids that at the price of * extra work in their equivalent of fget() - they insert struct * file immediately after grabbing descriptor, mark it larval if * more work (e.g. actual opening) is needed and make sure that * fget() treats larval files as absent. Potentially interesting, * but while extra work in fget() is trivial, locking implications * and amount of surgery on open()-related paths in VFS are not. * FreeBSD fails with -EBADF in the same situation, NetBSD "solution" * deadlocks in rather amusing ways, AFAICS. All of that is out of * scope of POSIX or SUS, since neither considers shared descriptor * tables and this condition does not arise without those. */ err = -EBUSY; fdt = files_fdtable(files); tofree = fdt->fd[newfd]; if (!tofree && FD_ISSET(newfd, fdt->open_fds)) goto out_unlock; get_file(file); rcu_assign_pointer(fdt->fd[newfd], file); FD_SET(newfd, fdt->open_fds); if (flags & O_CLOEXEC) FD_SET(newfd, fdt->close_on_exec); else FD_CLR(newfd, fdt->close_on_exec); spin_unlock(&files->file_lock); if (tofree) filp_close(tofree, files); return newfd; Ebadf: err = -EBADF; out_unlock: spin_unlock(&files->file_lock); return err; } SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd) { if (unlikely(newfd == oldfd)) { /* corner case */ struct files_struct *files = current->files; int retval = oldfd; rcu_read_lock(); if (!fcheck_files(files, oldfd)) retval = -EBADF; rcu_read_unlock(); return retval; } return sys_dup3(oldfd, newfd, 0); } SYSCALL_DEFINE1(dup, unsigned int, fildes) { int ret = -EBADF; struct file *file = fget_raw(fildes); if (file) { ret = get_unused_fd(); if (ret >= 0) fd_install(ret, file); else fput(file); } return ret; } #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME) static int setfl(int fd, struct file * filp, unsigned long arg) { struct inode * inode = filp->f_path.dentry->d_inode; int error = 0; /* * O_APPEND cannot be cleared if the file is marked as append-only * and the file is open for write. */ if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode)) return -EPERM; /* O_NOATIME can only be set by the owner or superuser */ if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME)) if (!inode_owner_or_capable(inode)) return -EPERM; /* required for strict SunOS emulation */ if (O_NONBLOCK != O_NDELAY) if (arg & O_NDELAY) arg |= O_NONBLOCK; if (arg & O_DIRECT) { if (!filp->f_mapping || !filp->f_mapping->a_ops || !filp->f_mapping->a_ops->direct_IO) return -EINVAL; } if (filp->f_op && filp->f_op->check_flags) error = filp->f_op->check_flags(arg); if (error) return error; /* * ->fasync() is responsible for setting the FASYNC bit. */ if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op && filp->f_op->fasync) { error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0); if (error < 0) goto out; if (error > 0) error = 0; } spin_lock(&filp->f_lock); filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK); spin_unlock(&filp->f_lock); out: return error; } static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, int force) { write_lock_irq(&filp->f_owner.lock); if (force || !filp->f_owner.pid) { put_pid(filp->f_owner.pid); filp->f_owner.pid = get_pid(pid); filp->f_owner.pid_type = type; if (pid) { const struct cred *cred = current_cred(); filp->f_owner.uid = cred->uid; filp->f_owner.euid = cred->euid; } } write_unlock_irq(&filp->f_owner.lock); } int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, int force) { int err; err = security_file_set_fowner(filp); if (err) return err; f_modown(filp, pid, type, force); return 0; } EXPORT_SYMBOL(__f_setown); int f_setown(struct file *filp, unsigned long arg, int force) { enum pid_type type; struct pid *pid; int who = arg; int result; type = PIDTYPE_PID; if (who < 0) { type = PIDTYPE_PGID; who = -who; } rcu_read_lock(); pid = find_vpid(who); result = __f_setown(filp, pid, type, force); rcu_read_unlock(); return result; } EXPORT_SYMBOL(f_setown); void f_delown(struct file *filp) { f_modown(filp, NULL, PIDTYPE_PID, 1); } pid_t f_getown(struct file *filp) { pid_t pid; read_lock(&filp->f_owner.lock); pid = pid_vnr(filp->f_owner.pid); if (filp->f_owner.pid_type == PIDTYPE_PGID) pid = -pid; read_unlock(&filp->f_owner.lock); return pid; } static int f_setown_ex(struct file *filp, unsigned long arg) { struct f_owner_ex * __user owner_p = (void * __user)arg; struct f_owner_ex owner; struct pid *pid; int type; int ret; ret = copy_from_user(&owner, owner_p, sizeof(owner)); if (ret) return -EFAULT; switch (owner.type) { case F_OWNER_TID: type = PIDTYPE_MAX; break; case F_OWNER_PID: type = PIDTYPE_PID; break; case F_OWNER_PGRP: type = PIDTYPE_PGID; break; default: return -EINVAL; } rcu_read_lock(); pid = find_vpid(owner.pid); if (owner.pid && !pid) ret = -ESRCH; else ret = __f_setown(filp, pid, type, 1); rcu_read_unlock(); return ret; } static int f_getown_ex(struct file *filp, unsigned long arg) { struct f_owner_ex * __user owner_p = (void * __user)arg; struct f_owner_ex owner; int ret = 0; read_lock(&filp->f_owner.lock); owner.pid = pid_vnr(filp->f_owner.pid); switch (filp->f_owner.pid_type) { case PIDTYPE_MAX: owner.type = F_OWNER_TID; break; case PIDTYPE_PID: owner.type = F_OWNER_PID; break; case PIDTYPE_PGID: owner.type = F_OWNER_PGRP; break; default: WARN_ON(1); ret = -EINVAL; break; } read_unlock(&filp->f_owner.lock); if (!ret) { ret = copy_to_user(owner_p, &owner, sizeof(owner)); if (ret) ret = -EFAULT; } return ret; } static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, struct file *filp) { long err = -EINVAL; switch (cmd) { case F_DUPFD: case F_DUPFD_CLOEXEC: if (arg >= rlimit(RLIMIT_NOFILE)) break; err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0); if (err >= 0) { get_file(filp); fd_install(err, filp); } break; case F_GETFD: err = get_close_on_exec(fd) ? FD_CLOEXEC : 0; break; case F_SETFD: err = 0; set_close_on_exec(fd, arg & FD_CLOEXEC); break; case F_GETFL: err = filp->f_flags; break; case F_SETFL: err = setfl(fd, filp, arg); break; case F_GETLK: err = fcntl_getlk(filp, (struct flock __user *) arg); break; case F_SETLK: case F_SETLKW: err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg); break; case F_GETOWN: /* * XXX If f_owner is a process group, the * negative return value will get converted * into an error. Oops. If we keep the * current syscall conventions, the only way * to fix this will be in libc. */ err = f_getown(filp); force_successful_syscall_return(); break; case F_SETOWN: err = f_setown(filp, arg, 1); break; case F_GETOWN_EX: err = f_getown_ex(filp, arg); break; case F_SETOWN_EX: err = f_setown_ex(filp, arg); break; case F_GETSIG: err = filp->f_owner.signum; break; case F_SETSIG: /* arg == 0 restores default behaviour. */ if (!valid_signal(arg)) { break; } err = 0; filp->f_owner.signum = arg; break; case F_GETLEASE: err = fcntl_getlease(filp); break; case F_SETLEASE: err = fcntl_setlease(fd, filp, arg); break; case F_NOTIFY: err = fcntl_dirnotify(fd, filp, arg); break; case F_SETPIPE_SZ: case F_GETPIPE_SZ: err = pipe_fcntl(filp, cmd, arg); break; default: break; } return err; } static int check_fcntl_cmd(unsigned cmd) { switch (cmd) { case F_DUPFD: case F_DUPFD_CLOEXEC: case F_GETFD: case F_SETFD: case F_GETFL: return 1; } return 0; } SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) { struct file *filp; long err = -EBADF; filp = fget_raw(fd); if (!filp) goto out; if (unlikely(filp->f_mode & FMODE_PATH)) { if (!check_fcntl_cmd(cmd)) { fput(filp); goto out; } } err = security_file_fcntl(filp, cmd, arg); if (err) { fput(filp); return err; } err = do_fcntl(fd, cmd, arg, filp); fput(filp); out: return err; } #if BITS_PER_LONG == 32 SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd, unsigned long, arg) { struct file * filp; long err; err = -EBADF; filp = fget_raw(fd); if (!filp) goto out; if (unlikely(filp->f_mode & FMODE_PATH)) { if (!check_fcntl_cmd(cmd)) { fput(filp); goto out; } } err = security_file_fcntl(filp, cmd, arg); if (err) { fput(filp); return err; } err = -EBADF; switch (cmd) { case F_GETLK64: err = fcntl_getlk64(filp, (struct flock64 __user *) arg); break; case F_SETLK64: case F_SETLKW64: err = fcntl_setlk64(fd, filp, cmd, (struct flock64 __user *) arg); break; default: err = do_fcntl(fd, cmd, arg, filp); break; } fput(filp); out: return err; } #endif /* Table to convert sigio signal codes into poll band bitmaps */ static const long band_table[NSIGPOLL] = { POLLIN | POLLRDNORM, /* POLL_IN */ POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */ POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */ POLLERR, /* POLL_ERR */ POLLPRI | POLLRDBAND, /* POLL_PRI */ POLLHUP | POLLERR /* POLL_HUP */ }; static inline int sigio_perm(struct task_struct *p, struct fown_struct *fown, int sig) { const struct cred *cred; int ret; rcu_read_lock(); cred = __task_cred(p); ret = ((fown->euid == 0 || fown->euid == cred->suid || fown->euid == cred->uid || fown->uid == cred->suid || fown->uid == cred->uid) && !security_file_send_sigiotask(p, fown, sig)); rcu_read_unlock(); return ret; } static void send_sigio_to_task(struct task_struct *p, struct fown_struct *fown, int fd, int reason, int group) { /* * F_SETSIG can change ->signum lockless in parallel, make * sure we read it once and use the same value throughout. */ int signum = ACCESS_ONCE(fown->signum); if (!sigio_perm(p, fown, signum)) return; switch (signum) { siginfo_t si; default: /* Queue a rt signal with the appropriate fd as its value. We use SI_SIGIO as the source, not SI_KERNEL, since kernel signals always get delivered even if we can't queue. Failure to queue in this case _should_ be reported; we fall back to SIGIO in that case. --sct */ si.si_signo = signum; si.si_errno = 0; si.si_code = reason; /* Make sure we are called with one of the POLL_* reasons, otherwise we could leak kernel stack into userspace. */ BUG_ON((reason & __SI_MASK) != __SI_POLL); if (reason - POLL_IN >= NSIGPOLL) si.si_band = ~0L; else si.si_band = band_table[reason - POLL_IN]; si.si_fd = fd; if (!do_send_sig_info(signum, &si, p, group)) break; /* fall-through: fall back on the old plain SIGIO signal */ case 0: do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, group); } } void send_sigio(struct fown_struct *fown, int fd, int band) { struct task_struct *p; enum pid_type type; struct pid *pid; int group = 1; read_lock(&fown->lock); type = fown->pid_type; if (type == PIDTYPE_MAX) { group = 0; type = PIDTYPE_PID; } pid = fown->pid; if (!pid) goto out_unlock_fown; read_lock(&tasklist_lock); do_each_pid_task(pid, type, p) { send_sigio_to_task(p, fown, fd, band, group); } while_each_pid_task(pid, type, p); read_unlock(&tasklist_lock); out_unlock_fown: read_unlock(&fown->lock); } static void send_sigurg_to_task(struct task_struct *p, struct fown_struct *fown, int group) { if (sigio_perm(p, fown, SIGURG)) do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, group); } int send_sigurg(struct fown_struct *fown) { struct task_struct *p; enum pid_type type; struct pid *pid; int group = 1; int ret = 0; read_lock(&fown->lock); type = fown->pid_type; if (type == PIDTYPE_MAX) { group = 0; type = PIDTYPE_PID; } pid = fown->pid; if (!pid) goto out_unlock_fown; ret = 1; read_lock(&tasklist_lock); do_each_pid_task(pid, type, p) { send_sigurg_to_task(p, fown, group); } while_each_pid_task(pid, type, p); read_unlock(&tasklist_lock); out_unlock_fown: read_unlock(&fown->lock); return ret; } static DEFINE_SPINLOCK(fasync_lock); static struct kmem_cache *fasync_cache __read_mostly; static void fasync_free_rcu(struct rcu_head *head) { kmem_cache_free(fasync_cache, container_of(head, struct fasync_struct, fa_rcu)); } /* * Remove a fasync entry. If successfully removed, return * positive and clear the FASYNC flag. If no entry exists, * do nothing and return 0. * * NOTE! It is very important that the FASYNC flag always * match the state "is the filp on a fasync list". * */ int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp) { struct fasync_struct *fa, **fp; int result = 0; spin_lock(&filp->f_lock); spin_lock(&fasync_lock); for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { if (fa->fa_file != filp) continue; spin_lock_irq(&fa->fa_lock); fa->fa_file = NULL; spin_unlock_irq(&fa->fa_lock); *fp = fa->fa_next; call_rcu(&fa->fa_rcu, fasync_free_rcu); filp->f_flags &= ~FASYNC; result = 1; break; } spin_unlock(&fasync_lock); spin_unlock(&filp->f_lock); return result; } struct fasync_struct *fasync_alloc(void) { return kmem_cache_alloc(fasync_cache, GFP_KERNEL); } /* * NOTE! This can be used only for unused fasync entries: * entries that actually got inserted on the fasync list * need to be released by rcu - see fasync_remove_entry. */ void fasync_free(struct fasync_struct *new) { kmem_cache_free(fasync_cache, new); } /* * Insert a new entry into the fasync list. Return the pointer to the * old one if we didn't use the new one. * * NOTE! It is very important that the FASYNC flag always * match the state "is the filp on a fasync list". */ struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new) { struct fasync_struct *fa, **fp; spin_lock(&filp->f_lock); spin_lock(&fasync_lock); for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { if (fa->fa_file != filp) continue; spin_lock_irq(&fa->fa_lock); fa->fa_fd = fd; spin_unlock_irq(&fa->fa_lock); goto out; } spin_lock_init(&new->fa_lock); new->magic = FASYNC_MAGIC; new->fa_file = filp; new->fa_fd = fd; new->fa_next = *fapp; rcu_assign_pointer(*fapp, new); filp->f_flags |= FASYNC; out: spin_unlock(&fasync_lock); spin_unlock(&filp->f_lock); return fa; } /* * Add a fasync entry. Return negative on error, positive if * added, and zero if did nothing but change an existing one. */ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp) { struct fasync_struct *new; new = fasync_alloc(); if (!new) return -ENOMEM; /* * fasync_insert_entry() returns the old (update) entry if * it existed. * * So free the (unused) new entry and return 0 to let the * caller know that we didn't add any new fasync entries. */ if (fasync_insert_entry(fd, filp, fapp, new)) { fasync_free(new); return 0; } return 1; } /* * fasync_helper() is used by almost all character device drivers * to set up the fasync queue, and for regular files by the file * lease code. It returns negative on error, 0 if it did no changes * and positive if it added/deleted the entry. */ int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp) { if (!on) return fasync_remove_entry(filp, fapp); return fasync_add_entry(fd, filp, fapp); } EXPORT_SYMBOL(fasync_helper); /* * rcu_read_lock() is held */ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band) { while (fa) { struct fown_struct *fown; unsigned long flags; if (fa->magic != FASYNC_MAGIC) { printk(KERN_ERR "kill_fasync: bad magic number in " "fasync_struct!\n"); return; } spin_lock_irqsave(&fa->fa_lock, flags); if (fa->fa_file) { fown = &fa->fa_file->f_owner; /* Don't send SIGURG to processes which have not set a queued signum: SIGURG has its own default signalling mechanism. */ if (!(sig == SIGURG && fown->signum == 0)) send_sigio(fown, fa->fa_fd, band); } spin_unlock_irqrestore(&fa->fa_lock, flags); fa = rcu_dereference(fa->fa_next); } } void kill_fasync(struct fasync_struct **fp, int sig, int band) { /* First a quick test without locking: usually * the list is empty. */ if (*fp) { rcu_read_lock(); kill_fasync_rcu(rcu_dereference(*fp), sig, band); rcu_read_unlock(); } } EXPORT_SYMBOL(kill_fasync); static int __init fcntl_init(void) { /* * Please add new bits here to ensure allocation uniqueness. * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY * is defined as O_NONBLOCK on some platforms and not on others. */ BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | O_APPEND | /* O_NONBLOCK | */ __O_SYNC | O_DSYNC | FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | __FMODE_EXEC | O_PATH )); fasync_cache = kmem_cache_create("fasync_cache", sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL); return 0; } module_init(fcntl_init)
gpl-2.0
airk000/kernel_htc_7x30
arch/mips/kernel/cevt-bcm1480.c
3054
4480
/* * Copyright (C) 2000,2001,2004 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/smp.h> #include <linux/irq.h> #include <asm/addrspace.h> #include <asm/io.h> #include <asm/time.h> #include <asm/sibyte/bcm1480_regs.h> #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/bcm1480_int.h> #include <asm/sibyte/bcm1480_scd.h> #include <asm/sibyte/sb1250.h> #define IMR_IP2_VAL K_BCM1480_INT_MAP_I0 #define IMR_IP3_VAL K_BCM1480_INT_MAP_I1 #define IMR_IP4_VAL K_BCM1480_INT_MAP_I2 /* * The general purpose timer ticks at 1MHz independent if * the rest of the system */ static void sibyte_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { unsigned int cpu = smp_processor_id(); void __iomem *cfg, *init; cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: __raw_writeq(0, cfg); __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init); __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg); break; case CLOCK_EVT_MODE_ONESHOT: /* Stop the timer until we actually program a shot */ case CLOCK_EVT_MODE_SHUTDOWN: __raw_writeq(0, cfg); break; case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */ case CLOCK_EVT_MODE_RESUME: ; } } static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd) { unsigned int cpu = smp_processor_id(); void __iomem *cfg, *init; cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); __raw_writeq(0, cfg); __raw_writeq(delta - 1, init); __raw_writeq(M_SCD_TIMER_ENABLE, cfg); return 0; } static irqreturn_t sibyte_counter_handler(int irq, void *dev_id) { unsigned int cpu = smp_processor_id(); struct clock_event_device *cd = dev_id; void __iomem *cfg; unsigned long tmode; if (cd->mode == CLOCK_EVT_MODE_PERIODIC) tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS; else tmode = 0; /* ACK interrupt */ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); ____raw_writeq(tmode, cfg); cd->event_handler(cd); return IRQ_HANDLED; } static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent); static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction); static DEFINE_PER_CPU(char [18], sibyte_hpt_name); void __cpuinit sb1480_clockevent_init(void) { unsigned int cpu = smp_processor_id(); unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu; struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu); struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu); unsigned char *name = per_cpu(sibyte_hpt_name, cpu); BUG_ON(cpu > 3); /* Only have 4 general purpose timers */ sprintf(name, "bcm1480-counter-%d", cpu); cd->name = name; cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; clockevent_set_clock(cd, V_SCD_TIMER_FREQ); cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd); cd->min_delta_ns = clockevent_delta2ns(2, cd); cd->rating = 200; cd->irq = irq; cd->cpumask = cpumask_of(cpu); cd->set_next_event = sibyte_next_event; cd->set_mode = sibyte_set_mode; clockevents_register_device(cd); bcm1480_mask_irq(cpu, irq); /* * Map the timer interrupt to IP[4] of this cpu */ __raw_writeq(IMR_IP4_VAL, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) + (irq << 3))); bcm1480_unmask_irq(cpu, irq); action->handler = sibyte_counter_handler; action->flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER; action->name = name; action->dev_id = cd; irq_set_affinity(irq, cpumask_of(cpu)); setup_irq(irq, action); }
gpl-2.0
trevd/android_kernel_ti_archos
drivers/net/wireless/b43legacy/pio.c
3566
17975
/* Broadcom B43legacy wireless driver PIO Transmission Copyright (c) 2005 Michael Buesch <mb@bu3sch.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "b43legacy.h" #include "pio.h" #include "main.h" #include "xmit.h" #include <linux/delay.h> #include <linux/slab.h> static void tx_start(struct b43legacy_pioqueue *queue) { b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, B43legacy_PIO_TXCTL_INIT); } static void tx_octet(struct b43legacy_pioqueue *queue, u8 octet) { if (queue->need_workarounds) { b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, B43legacy_PIO_TXCTL_WRITELO); } else { b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, B43legacy_PIO_TXCTL_WRITELO); b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); } } static u16 tx_get_next_word(const u8 *txhdr, const u8 *packet, size_t txhdr_size, unsigned int *pos) { const u8 *source; unsigned int i = *pos; u16 ret; if (i < txhdr_size) source = txhdr; else { source = packet; i -= txhdr_size; } ret = le16_to_cpu(*((__le16 *)(source + i))); *pos += 2; return ret; } static void tx_data(struct b43legacy_pioqueue *queue, u8 *txhdr, const u8 *packet, unsigned int octets) { u16 data; unsigned int i = 0; if (queue->need_workarounds) { data = tx_get_next_word(txhdr, packet, sizeof(struct b43legacy_txhdr_fw3), &i); b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data); } b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, B43legacy_PIO_TXCTL_WRITELO | B43legacy_PIO_TXCTL_WRITEHI); while (i < octets - 1) { data = tx_get_next_word(txhdr, packet, sizeof(struct b43legacy_txhdr_fw3), &i); b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data); } if (octets % 2) tx_octet(queue, packet[octets - sizeof(struct b43legacy_txhdr_fw3) - 1]); } static void tx_complete(struct b43legacy_pioqueue *queue, struct sk_buff *skb) { if (queue->need_workarounds) { b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, skb->data[skb->len - 1]); b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, B43legacy_PIO_TXCTL_WRITELO | B43legacy_PIO_TXCTL_COMPLETE); } else b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, B43legacy_PIO_TXCTL_COMPLETE); } static u16 generate_cookie(struct b43legacy_pioqueue *queue, struct b43legacy_pio_txpacket *packet) { u16 cookie = 0x0000; int packetindex; /* We use the upper 4 bits for the PIO * controller ID and the lower 12 bits * for the packet index (in the cache). */ switch (queue->mmio_base) { case B43legacy_MMIO_PIO1_BASE: break; case B43legacy_MMIO_PIO2_BASE: cookie = 0x1000; break; case B43legacy_MMIO_PIO3_BASE: cookie = 0x2000; break; case B43legacy_MMIO_PIO4_BASE: cookie = 0x3000; break; default: B43legacy_WARN_ON(1); } packetindex = pio_txpacket_getindex(packet); B43legacy_WARN_ON(!(((u16)packetindex & 0xF000) == 0x0000)); cookie |= (u16)packetindex; return cookie; } static struct b43legacy_pioqueue *parse_cookie(struct b43legacy_wldev *dev, u16 cookie, struct b43legacy_pio_txpacket **packet) { struct b43legacy_pio *pio = &dev->pio; struct b43legacy_pioqueue *queue = NULL; int packetindex; switch (cookie & 0xF000) { case 0x0000: queue = pio->queue0; break; case 0x1000: queue = pio->queue1; break; case 0x2000: queue = pio->queue2; break; case 0x3000: queue = pio->queue3; break; default: B43legacy_WARN_ON(1); } packetindex = (cookie & 0x0FFF); B43legacy_WARN_ON(!(packetindex >= 0 && packetindex < B43legacy_PIO_MAXTXPACKETS)); *packet = &(queue->tx_packets_cache[packetindex]); return queue; } union txhdr_union { struct b43legacy_txhdr_fw3 txhdr_fw3; }; static int pio_tx_write_fragment(struct b43legacy_pioqueue *queue, struct sk_buff *skb, struct b43legacy_pio_txpacket *packet, size_t txhdr_size) { union txhdr_union txhdr_data; u8 *txhdr = NULL; unsigned int octets; int err; txhdr = (u8 *)(&txhdr_data.txhdr_fw3); B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); err = b43legacy_generate_txhdr(queue->dev, txhdr, skb->data, skb->len, IEEE80211_SKB_CB(skb), generate_cookie(queue, packet)); if (err) return err; tx_start(queue); octets = skb->len + txhdr_size; if (queue->need_workarounds) octets--; tx_data(queue, txhdr, (u8 *)skb->data, octets); tx_complete(queue, skb); return 0; } static void free_txpacket(struct b43legacy_pio_txpacket *packet, int irq_context) { struct b43legacy_pioqueue *queue = packet->queue; if (packet->skb) { if (irq_context) dev_kfree_skb_irq(packet->skb); else dev_kfree_skb(packet->skb); } list_move(&packet->list, &queue->txfree); queue->nr_txfree++; } static int pio_tx_packet(struct b43legacy_pio_txpacket *packet) { struct b43legacy_pioqueue *queue = packet->queue; struct sk_buff *skb = packet->skb; u16 octets; int err; octets = (u16)skb->len + sizeof(struct b43legacy_txhdr_fw3); if (queue->tx_devq_size < octets) { b43legacywarn(queue->dev->wl, "PIO queue too small. " "Dropping packet.\n"); /* Drop it silently (return success) */ free_txpacket(packet, 1); return 0; } B43legacy_WARN_ON(queue->tx_devq_packets > B43legacy_PIO_MAXTXDEVQPACKETS); B43legacy_WARN_ON(queue->tx_devq_used > queue->tx_devq_size); /* Check if there is sufficient free space on the device * TX queue. If not, return and let the TX tasklet * retry later. */ if (queue->tx_devq_packets == B43legacy_PIO_MAXTXDEVQPACKETS) return -EBUSY; if (queue->tx_devq_used + octets > queue->tx_devq_size) return -EBUSY; /* Now poke the device. */ err = pio_tx_write_fragment(queue, skb, packet, sizeof(struct b43legacy_txhdr_fw3)); if (unlikely(err == -ENOKEY)) { /* Drop this packet, as we don't have the encryption key * anymore and must not transmit it unencrypted. */ free_txpacket(packet, 1); return 0; } /* Account for the packet size. * (We must not overflow the device TX queue) */ queue->tx_devq_packets++; queue->tx_devq_used += octets; /* Transmission started, everything ok, move the * packet to the txrunning list. */ list_move_tail(&packet->list, &queue->txrunning); return 0; } static void tx_tasklet(unsigned long d) { struct b43legacy_pioqueue *queue = (struct b43legacy_pioqueue *)d; struct b43legacy_wldev *dev = queue->dev; unsigned long flags; struct b43legacy_pio_txpacket *packet, *tmp_packet; int err; u16 txctl; spin_lock_irqsave(&dev->wl->irq_lock, flags); if (queue->tx_frozen) goto out_unlock; txctl = b43legacy_pio_read(queue, B43legacy_PIO_TXCTL); if (txctl & B43legacy_PIO_TXCTL_SUSPEND) goto out_unlock; list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) { /* Try to transmit the packet. This can fail, if * the device queue is full. In case of failure, the * packet is left in the txqueue. * If transmission succeed, the packet is moved to txrunning. * If it is impossible to transmit the packet, it * is dropped. */ err = pio_tx_packet(packet); if (err) break; } out_unlock: spin_unlock_irqrestore(&dev->wl->irq_lock, flags); } static void setup_txqueues(struct b43legacy_pioqueue *queue) { struct b43legacy_pio_txpacket *packet; int i; queue->nr_txfree = B43legacy_PIO_MAXTXPACKETS; for (i = 0; i < B43legacy_PIO_MAXTXPACKETS; i++) { packet = &(queue->tx_packets_cache[i]); packet->queue = queue; INIT_LIST_HEAD(&packet->list); list_add(&packet->list, &queue->txfree); } } static struct b43legacy_pioqueue *b43legacy_setup_pioqueue(struct b43legacy_wldev *dev, u16 pio_mmio_base) { struct b43legacy_pioqueue *queue; u32 value; u16 qsize; queue = kzalloc(sizeof(*queue), GFP_KERNEL); if (!queue) goto out; queue->dev = dev; queue->mmio_base = pio_mmio_base; queue->need_workarounds = (dev->dev->id.revision < 3); INIT_LIST_HEAD(&queue->txfree); INIT_LIST_HEAD(&queue->txqueue); INIT_LIST_HEAD(&queue->txrunning); tasklet_init(&queue->txtask, tx_tasklet, (unsigned long)queue); value = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); value &= ~B43legacy_MACCTL_BE; b43legacy_write32(dev, B43legacy_MMIO_MACCTL, value); qsize = b43legacy_read16(dev, queue->mmio_base + B43legacy_PIO_TXQBUFSIZE); if (qsize == 0) { b43legacyerr(dev->wl, "This card does not support PIO " "operation mode. Please use DMA mode " "(module parameter pio=0).\n"); goto err_freequeue; } if (qsize <= B43legacy_PIO_TXQADJUST) { b43legacyerr(dev->wl, "PIO tx device-queue too small (%u)\n", qsize); goto err_freequeue; } qsize -= B43legacy_PIO_TXQADJUST; queue->tx_devq_size = qsize; setup_txqueues(queue); out: return queue; err_freequeue: kfree(queue); queue = NULL; goto out; } static void cancel_transfers(struct b43legacy_pioqueue *queue) { struct b43legacy_pio_txpacket *packet, *tmp_packet; tasklet_disable(&queue->txtask); list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list) free_txpacket(packet, 0); list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) free_txpacket(packet, 0); } static void b43legacy_destroy_pioqueue(struct b43legacy_pioqueue *queue) { if (!queue) return; cancel_transfers(queue); kfree(queue); } void b43legacy_pio_free(struct b43legacy_wldev *dev) { struct b43legacy_pio *pio; if (!b43legacy_using_pio(dev)) return; pio = &dev->pio; b43legacy_destroy_pioqueue(pio->queue3); pio->queue3 = NULL; b43legacy_destroy_pioqueue(pio->queue2); pio->queue2 = NULL; b43legacy_destroy_pioqueue(pio->queue1); pio->queue1 = NULL; b43legacy_destroy_pioqueue(pio->queue0); pio->queue0 = NULL; } int b43legacy_pio_init(struct b43legacy_wldev *dev) { struct b43legacy_pio *pio = &dev->pio; struct b43legacy_pioqueue *queue; int err = -ENOMEM; queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO1_BASE); if (!queue) goto out; pio->queue0 = queue; queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO2_BASE); if (!queue) goto err_destroy0; pio->queue1 = queue; queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO3_BASE); if (!queue) goto err_destroy1; pio->queue2 = queue; queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO4_BASE); if (!queue) goto err_destroy2; pio->queue3 = queue; if (dev->dev->id.revision < 3) dev->irq_mask |= B43legacy_IRQ_PIO_WORKAROUND; b43legacydbg(dev->wl, "PIO initialized\n"); err = 0; out: return err; err_destroy2: b43legacy_destroy_pioqueue(pio->queue2); pio->queue2 = NULL; err_destroy1: b43legacy_destroy_pioqueue(pio->queue1); pio->queue1 = NULL; err_destroy0: b43legacy_destroy_pioqueue(pio->queue0); pio->queue0 = NULL; goto out; } int b43legacy_pio_tx(struct b43legacy_wldev *dev, struct sk_buff *skb) { struct b43legacy_pioqueue *queue = dev->pio.queue1; struct b43legacy_pio_txpacket *packet; B43legacy_WARN_ON(queue->tx_suspended); B43legacy_WARN_ON(list_empty(&queue->txfree)); packet = list_entry(queue->txfree.next, struct b43legacy_pio_txpacket, list); packet->skb = skb; list_move_tail(&packet->list, &queue->txqueue); queue->nr_txfree--; B43legacy_WARN_ON(queue->nr_txfree >= B43legacy_PIO_MAXTXPACKETS); tasklet_schedule(&queue->txtask); return 0; } void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev, const struct b43legacy_txstatus *status) { struct b43legacy_pioqueue *queue; struct b43legacy_pio_txpacket *packet; struct ieee80211_tx_info *info; int retry_limit; queue = parse_cookie(dev, status->cookie, &packet); B43legacy_WARN_ON(!queue); if (!packet->skb) return; queue->tx_devq_packets--; queue->tx_devq_used -= (packet->skb->len + sizeof(struct b43legacy_txhdr_fw3)); info = IEEE80211_SKB_CB(packet->skb); /* preserve the confiured retry limit before clearing the status * The xmit function has overwritten the rc's value with the actual * retry limit done by the hardware */ retry_limit = info->status.rates[0].count; ieee80211_tx_info_clear_status(info); if (status->acked) info->flags |= IEEE80211_TX_STAT_ACK; if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) { /* * If the short retries (RTS, not data frame) have exceeded * the limit, the hw will not have tried the selected rate, * but will have used the fallback rate instead. * Don't let the rate control count attempts for the selected * rate in this case, otherwise the statistics will be off. */ info->status.rates[0].count = 0; info->status.rates[1].count = status->frame_count; } else { if (status->frame_count > retry_limit) { info->status.rates[0].count = retry_limit; info->status.rates[1].count = status->frame_count - retry_limit; } else { info->status.rates[0].count = status->frame_count; info->status.rates[1].idx = -1; } } ieee80211_tx_status_irqsafe(dev->wl->hw, packet->skb); packet->skb = NULL; free_txpacket(packet, 1); /* If there are packets on the txqueue, poke the tasklet * to transmit them. */ if (!list_empty(&queue->txqueue)) tasklet_schedule(&queue->txtask); } static void pio_rx_error(struct b43legacy_pioqueue *queue, int clear_buffers, const char *error) { int i; b43legacyerr(queue->dev->wl, "PIO RX error: %s\n", error); b43legacy_pio_write(queue, B43legacy_PIO_RXCTL, B43legacy_PIO_RXCTL_READY); if (clear_buffers) { B43legacy_WARN_ON(queue->mmio_base != B43legacy_MMIO_PIO1_BASE); for (i = 0; i < 15; i++) { /* Dummy read. */ b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); } } } void b43legacy_pio_rx(struct b43legacy_pioqueue *queue) { __le16 preamble[21] = { 0 }; struct b43legacy_rxhdr_fw3 *rxhdr; u16 tmp; u16 len; u16 macstat; int i; int preamble_readwords; struct sk_buff *skb; tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL); if (!(tmp & B43legacy_PIO_RXCTL_DATAAVAILABLE)) return; b43legacy_pio_write(queue, B43legacy_PIO_RXCTL, B43legacy_PIO_RXCTL_DATAAVAILABLE); for (i = 0; i < 10; i++) { tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL); if (tmp & B43legacy_PIO_RXCTL_READY) goto data_ready; udelay(10); } b43legacydbg(queue->dev->wl, "PIO RX timed out\n"); return; data_ready: len = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); if (unlikely(len > 0x700)) { pio_rx_error(queue, 0, "len > 0x700"); return; } if (unlikely(len == 0 && queue->mmio_base != B43legacy_MMIO_PIO4_BASE)) { pio_rx_error(queue, 0, "len == 0"); return; } preamble[0] = cpu_to_le16(len); if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE) preamble_readwords = 14 / sizeof(u16); else preamble_readwords = 18 / sizeof(u16); for (i = 0; i < preamble_readwords; i++) { tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); preamble[i + 1] = cpu_to_le16(tmp); } rxhdr = (struct b43legacy_rxhdr_fw3 *)preamble; macstat = le16_to_cpu(rxhdr->mac_status); if (macstat & B43legacy_RX_MAC_FCSERR) { pio_rx_error(queue, (queue->mmio_base == B43legacy_MMIO_PIO1_BASE), "Frame FCS error"); return; } if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE) { /* We received an xmit status. */ struct b43legacy_hwtxstatus *hw; hw = (struct b43legacy_hwtxstatus *)(preamble + 1); b43legacy_handle_hwtxstatus(queue->dev, hw); return; } skb = dev_alloc_skb(len); if (unlikely(!skb)) { pio_rx_error(queue, 1, "OOM"); return; } skb_put(skb, len); for (i = 0; i < len - 1; i += 2) { tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); *((__le16 *)(skb->data + i)) = cpu_to_le16(tmp); } if (len % 2) { tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); skb->data[len - 1] = (tmp & 0x00FF); } b43legacy_rx(queue->dev, skb, rxhdr); } void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue) { b43legacy_power_saving_ctl_bits(queue->dev, -1, 1); b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, b43legacy_pio_read(queue, B43legacy_PIO_TXCTL) | B43legacy_PIO_TXCTL_SUSPEND); } void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue) { b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, b43legacy_pio_read(queue, B43legacy_PIO_TXCTL) & ~B43legacy_PIO_TXCTL_SUSPEND); b43legacy_power_saving_ctl_bits(queue->dev, -1, -1); tasklet_schedule(&queue->txtask); } void b43legacy_pio_freeze_txqueues(struct b43legacy_wldev *dev) { struct b43legacy_pio *pio; B43legacy_WARN_ON(!b43legacy_using_pio(dev)); pio = &dev->pio; pio->queue0->tx_frozen = 1; pio->queue1->tx_frozen = 1; pio->queue2->tx_frozen = 1; pio->queue3->tx_frozen = 1; } void b43legacy_pio_thaw_txqueues(struct b43legacy_wldev *dev) { struct b43legacy_pio *pio; B43legacy_WARN_ON(!b43legacy_using_pio(dev)); pio = &dev->pio; pio->queue0->tx_frozen = 0; pio->queue1->tx_frozen = 0; pio->queue2->tx_frozen = 0; pio->queue3->tx_frozen = 0; if (!list_empty(&pio->queue0->txqueue)) tasklet_schedule(&pio->queue0->txtask); if (!list_empty(&pio->queue1->txqueue)) tasklet_schedule(&pio->queue1->txtask); if (!list_empty(&pio->queue2->txqueue)) tasklet_schedule(&pio->queue2->txtask); if (!list_empty(&pio->queue3->txqueue)) tasklet_schedule(&pio->queue3->txtask); }
gpl-2.0
keks2293/kernel_zte
drivers/media/common/b2c2/flexcop-fe-tuner.c
3566
16155
/* * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III * flexcop-fe-tuner.c - methods for frontend attachment and DiSEqC controlling * see flexcop.c for copyright information */ #include <media/tuner.h> #include "flexcop.h" #include "mt312.h" #include "stv0299.h" #include "s5h1420.h" #include "itd1000.h" #include "cx24113.h" #include "cx24123.h" #include "isl6421.h" #include "mt352.h" #include "bcm3510.h" #include "nxt200x.h" #include "dvb-pll.h" #include "lgdt330x.h" #include "tuner-simple.h" #include "stv0297.h" /* Can we use the specified front-end? Remember that if we are compiled * into the kernel we can't call code that's in modules. */ #define FE_SUPPORTED(fe) (defined(CONFIG_DVB_##fe) || \ (defined(CONFIG_DVB_##fe##_MODULE) && defined(MODULE))) /* lnb control */ #if FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299) static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct flexcop_device *fc = fe->dvb->priv; flexcop_ibi_value v; deb_tuner("polarity/voltage = %u\n", voltage); v = fc->read_ibi_reg(fc, misc_204); switch (voltage) { case SEC_VOLTAGE_OFF: v.misc_204.ACPI1_sig = 1; break; case SEC_VOLTAGE_13: v.misc_204.ACPI1_sig = 0; v.misc_204.LNB_L_H_sig = 0; break; case SEC_VOLTAGE_18: v.misc_204.ACPI1_sig = 0; v.misc_204.LNB_L_H_sig = 1; break; default: err("unknown SEC_VOLTAGE value"); return -EINVAL; } return fc->write_ibi_reg(fc, misc_204, v); } #endif #if FE_SUPPORTED(S5H1420) || FE_SUPPORTED(STV0299) || FE_SUPPORTED(MT312) static int flexcop_sleep(struct dvb_frontend* fe) { struct flexcop_device *fc = fe->dvb->priv; if (fc->fe_sleep) return fc->fe_sleep(fe); return 0; } #endif /* SkyStar2 DVB-S rev 2.3 */ #if FE_SUPPORTED(MT312) && FE_SUPPORTED(PLL) static int flexcop_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) { /* u16 wz_half_period_for_45_mhz[] = { 0x01ff, 0x0154, 0x00ff, 0x00cc }; */ struct flexcop_device *fc = fe->dvb->priv; flexcop_ibi_value v; u16 ax; v.raw = 0; deb_tuner("tone = %u\n",tone); switch (tone) { case SEC_TONE_ON: ax = 0x01ff; break; case SEC_TONE_OFF: ax = 0; break; default: err("unknown SEC_TONE value"); return -EINVAL; } v.lnb_switch_freq_200.LNB_CTLPrescaler_sig = 1; /* divide by 2 */ v.lnb_switch_freq_200.LNB_CTLHighCount_sig = ax; v.lnb_switch_freq_200.LNB_CTLLowCount_sig = ax == 0 ? 0x1ff : ax; return fc->write_ibi_reg(fc,lnb_switch_freq_200,v); } static void flexcop_diseqc_send_bit(struct dvb_frontend* fe, int data) { flexcop_set_tone(fe, SEC_TONE_ON); udelay(data ? 500 : 1000); flexcop_set_tone(fe, SEC_TONE_OFF); udelay(data ? 1000 : 500); } static void flexcop_diseqc_send_byte(struct dvb_frontend* fe, int data) { int i, par = 1, d; for (i = 7; i >= 0; i--) { d = (data >> i) & 1; par ^= d; flexcop_diseqc_send_bit(fe, d); } flexcop_diseqc_send_bit(fe, par); } static int flexcop_send_diseqc_msg(struct dvb_frontend *fe, int len, u8 *msg, unsigned long burst) { int i; flexcop_set_tone(fe, SEC_TONE_OFF); mdelay(16); for (i = 0; i < len; i++) flexcop_diseqc_send_byte(fe,msg[i]); mdelay(16); if (burst != -1) { if (burst) flexcop_diseqc_send_byte(fe, 0xff); else { flexcop_set_tone(fe, SEC_TONE_ON); mdelay(12); udelay(500); flexcop_set_tone(fe, SEC_TONE_OFF); } msleep(20); } return 0; } static int flexcop_diseqc_send_master_cmd(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *cmd) { return flexcop_send_diseqc_msg(fe, cmd->msg_len, cmd->msg, 0); } static int flexcop_diseqc_send_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t minicmd) { return flexcop_send_diseqc_msg(fe, 0, NULL, minicmd); } static struct mt312_config skystar23_samsung_tbdu18132_config = { .demod_address = 0x0e, }; static int skystar2_rev23_attach(struct flexcop_device *fc, struct i2c_adapter *i2c) { struct dvb_frontend_ops *ops; fc->fe = dvb_attach(mt312_attach, &skystar23_samsung_tbdu18132_config, i2c); if (!fc->fe) return 0; if (!dvb_attach(dvb_pll_attach, fc->fe, 0x61, i2c, DVB_PLL_SAMSUNG_TBDU18132)) return 0; ops = &fc->fe->ops; ops->diseqc_send_master_cmd = flexcop_diseqc_send_master_cmd; ops->diseqc_send_burst = flexcop_diseqc_send_burst; ops->set_tone = flexcop_set_tone; ops->set_voltage = flexcop_set_voltage; fc->fe_sleep = ops->sleep; ops->sleep = flexcop_sleep; return 1; } #else #define skystar2_rev23_attach NULL #endif /* SkyStar2 DVB-S rev 2.6 */ #if FE_SUPPORTED(STV0299) && FE_SUPPORTED(PLL) static int samsung_tbmu24112_set_symbol_rate(struct dvb_frontend *fe, u32 srate, u32 ratio) { u8 aclk = 0; u8 bclk = 0; if (srate < 1500000) { aclk = 0xb7; bclk = 0x47; } else if (srate < 3000000) { aclk = 0xb7; bclk = 0x4b; } else if (srate < 7000000) { aclk = 0xb7; bclk = 0x4f; } else if (srate < 14000000) { aclk = 0xb7; bclk = 0x53; } else if (srate < 30000000) { aclk = 0xb6; bclk = 0x53; } else if (srate < 45000000) { aclk = 0xb4; bclk = 0x51; } stv0299_writereg(fe, 0x13, aclk); stv0299_writereg(fe, 0x14, bclk); stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff); stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff); stv0299_writereg(fe, 0x21, ratio & 0xf0); return 0; } static u8 samsung_tbmu24112_inittab[] = { 0x01, 0x15, 0x02, 0x30, 0x03, 0x00, 0x04, 0x7D, 0x05, 0x35, 0x06, 0x02, 0x07, 0x00, 0x08, 0xC3, 0x0C, 0x00, 0x0D, 0x81, 0x0E, 0x23, 0x0F, 0x12, 0x10, 0x7E, 0x11, 0x84, 0x12, 0xB9, 0x13, 0x88, 0x14, 0x89, 0x15, 0xC9, 0x16, 0x00, 0x17, 0x5C, 0x18, 0x00, 0x19, 0x00, 0x1A, 0x00, 0x1C, 0x00, 0x1D, 0x00, 0x1E, 0x00, 0x1F, 0x3A, 0x20, 0x2E, 0x21, 0x80, 0x22, 0xFF, 0x23, 0xC1, 0x28, 0x00, 0x29, 0x1E, 0x2A, 0x14, 0x2B, 0x0F, 0x2C, 0x09, 0x2D, 0x05, 0x31, 0x1F, 0x32, 0x19, 0x33, 0xFE, 0x34, 0x93, 0xff, 0xff, }; static struct stv0299_config samsung_tbmu24112_config = { .demod_address = 0x68, .inittab = samsung_tbmu24112_inittab, .mclk = 88000000UL, .invert = 0, .skip_reinit = 0, .lock_output = STV0299_LOCKOUTPUT_LK, .volt13_op0_op1 = STV0299_VOLT13_OP1, .min_delay_ms = 100, .set_symbol_rate = samsung_tbmu24112_set_symbol_rate, }; static int skystar2_rev26_attach(struct flexcop_device *fc, struct i2c_adapter *i2c) { fc->fe = dvb_attach(stv0299_attach, &samsung_tbmu24112_config, i2c); if (!fc->fe) return 0; if (!dvb_attach(dvb_pll_attach, fc->fe, 0x61, i2c, DVB_PLL_SAMSUNG_TBMU24112)) return 0; fc->fe->ops.set_voltage = flexcop_set_voltage; fc->fe_sleep = fc->fe->ops.sleep; fc->fe->ops.sleep = flexcop_sleep; return 1; } #else #define skystar2_rev26_attach NULL #endif /* SkyStar2 DVB-S rev 2.7 */ #if FE_SUPPORTED(S5H1420) && FE_SUPPORTED(ISL6421) && FE_SUPPORTED(TUNER_ITD1000) static struct s5h1420_config skystar2_rev2_7_s5h1420_config = { .demod_address = 0x53, .invert = 1, .repeated_start_workaround = 1, .serial_mpeg = 1, }; static struct itd1000_config skystar2_rev2_7_itd1000_config = { .i2c_address = 0x61, }; static int skystar2_rev27_attach(struct flexcop_device *fc, struct i2c_adapter *i2c) { flexcop_ibi_value r108; struct i2c_adapter *i2c_tuner; /* enable no_base_addr - no repeated start when reading */ fc->fc_i2c_adap[0].no_base_addr = 1; fc->fe = dvb_attach(s5h1420_attach, &skystar2_rev2_7_s5h1420_config, i2c); if (!fc->fe) goto fail; i2c_tuner = s5h1420_get_tuner_i2c_adapter(fc->fe); if (!i2c_tuner) goto fail; fc->fe_sleep = fc->fe->ops.sleep; fc->fe->ops.sleep = flexcop_sleep; /* enable no_base_addr - no repeated start when reading */ fc->fc_i2c_adap[2].no_base_addr = 1; if (!dvb_attach(isl6421_attach, fc->fe, &fc->fc_i2c_adap[2].i2c_adap, 0x08, 1, 1, false)) { err("ISL6421 could NOT be attached"); goto fail_isl; } info("ISL6421 successfully attached"); /* the ITD1000 requires a lower i2c clock - is it a problem ? */ r108.raw = 0x00000506; fc->write_ibi_reg(fc, tw_sm_c_108, r108); if (!dvb_attach(itd1000_attach, fc->fe, i2c_tuner, &skystar2_rev2_7_itd1000_config)) { err("ITD1000 could NOT be attached"); /* Should i2c clock be restored? */ goto fail_isl; } info("ITD1000 successfully attached"); return 1; fail_isl: fc->fc_i2c_adap[2].no_base_addr = 0; fail: /* for the next devices we need it again */ fc->fc_i2c_adap[0].no_base_addr = 0; return 0; } #else #define skystar2_rev27_attach NULL #endif /* SkyStar2 rev 2.8 */ #if FE_SUPPORTED(CX24123) && FE_SUPPORTED(ISL6421) && FE_SUPPORTED(TUNER_CX24113) static struct cx24123_config skystar2_rev2_8_cx24123_config = { .demod_address = 0x55, .dont_use_pll = 1, .agc_callback = cx24113_agc_callback, }; static const struct cx24113_config skystar2_rev2_8_cx24113_config = { .i2c_addr = 0x54, .xtal_khz = 10111, }; static int skystar2_rev28_attach(struct flexcop_device *fc, struct i2c_adapter *i2c) { struct i2c_adapter *i2c_tuner; fc->fe = dvb_attach(cx24123_attach, &skystar2_rev2_8_cx24123_config, i2c); if (!fc->fe) return 0; i2c_tuner = cx24123_get_tuner_i2c_adapter(fc->fe); if (!i2c_tuner) return 0; if (!dvb_attach(cx24113_attach, fc->fe, &skystar2_rev2_8_cx24113_config, i2c_tuner)) { err("CX24113 could NOT be attached"); return 0; } info("CX24113 successfully attached"); fc->fc_i2c_adap[2].no_base_addr = 1; if (!dvb_attach(isl6421_attach, fc->fe, &fc->fc_i2c_adap[2].i2c_adap, 0x08, 0, 0, false)) { err("ISL6421 could NOT be attached"); fc->fc_i2c_adap[2].no_base_addr = 0; return 0; } info("ISL6421 successfully attached"); /* TODO on i2c_adap[1] addr 0x11 (EEPROM) there seems to be an * IR-receiver (PIC16F818) - but the card has no input for that ??? */ return 1; } #else #define skystar2_rev28_attach NULL #endif /* AirStar DVB-T */ #if FE_SUPPORTED(MT352) && FE_SUPPORTED(PLL) static int samsung_tdtc9251dh0_demod_init(struct dvb_frontend *fe) { static u8 mt352_clock_config[] = { 0x89, 0x18, 0x2d }; static u8 mt352_reset[] = { 0x50, 0x80 }; static u8 mt352_adc_ctl_1_cfg[] = { 0x8E, 0x40 }; static u8 mt352_agc_cfg[] = { 0x67, 0x28, 0xa1 }; static u8 mt352_capt_range_cfg[] = { 0x75, 0x32 }; mt352_write(fe, mt352_clock_config, sizeof(mt352_clock_config)); udelay(2000); mt352_write(fe, mt352_reset, sizeof(mt352_reset)); mt352_write(fe, mt352_adc_ctl_1_cfg, sizeof(mt352_adc_ctl_1_cfg)); mt352_write(fe, mt352_agc_cfg, sizeof(mt352_agc_cfg)); mt352_write(fe, mt352_capt_range_cfg, sizeof(mt352_capt_range_cfg)); return 0; } static struct mt352_config samsung_tdtc9251dh0_config = { .demod_address = 0x0f, .demod_init = samsung_tdtc9251dh0_demod_init, }; static int airstar_dvbt_attach(struct flexcop_device *fc, struct i2c_adapter *i2c) { fc->fe = dvb_attach(mt352_attach, &samsung_tdtc9251dh0_config, i2c); if (!fc->fe) return 0; return !!dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL, DVB_PLL_SAMSUNG_TDTC9251DH0); } #else #define airstar_dvbt_attach NULL #endif /* AirStar ATSC 1st generation */ #if FE_SUPPORTED(BCM3510) static int flexcop_fe_request_firmware(struct dvb_frontend *fe, const struct firmware **fw, char* name) { struct flexcop_device *fc = fe->dvb->priv; return request_firmware(fw, name, fc->dev); } static struct bcm3510_config air2pc_atsc_first_gen_config = { .demod_address = 0x0f, .request_firmware = flexcop_fe_request_firmware, }; static int airstar_atsc1_attach(struct flexcop_device *fc, struct i2c_adapter *i2c) { fc->fe = dvb_attach(bcm3510_attach, &air2pc_atsc_first_gen_config, i2c); return fc->fe != NULL; } #else #define airstar_atsc1_attach NULL #endif /* AirStar ATSC 2nd generation */ #if FE_SUPPORTED(NXT200X) && FE_SUPPORTED(PLL) static struct nxt200x_config samsung_tbmv_config = { .demod_address = 0x0a, }; static int airstar_atsc2_attach(struct flexcop_device *fc, struct i2c_adapter *i2c) { fc->fe = dvb_attach(nxt200x_attach, &samsung_tbmv_config, i2c); if (!fc->fe) return 0; return !!dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL, DVB_PLL_SAMSUNG_TBMV); } #else #define airstar_atsc2_attach NULL #endif /* AirStar ATSC 3rd generation */ #if FE_SUPPORTED(LGDT330X) static struct lgdt330x_config air2pc_atsc_hd5000_config = { .demod_address = 0x59, .demod_chip = LGDT3303, .serial_mpeg = 0x04, .clock_polarity_flip = 1, }; static int airstar_atsc3_attach(struct flexcop_device *fc, struct i2c_adapter *i2c) { fc->fe = dvb_attach(lgdt330x_attach, &air2pc_atsc_hd5000_config, i2c); if (!fc->fe) return 0; return !!dvb_attach(simple_tuner_attach, fc->fe, i2c, 0x61, TUNER_LG_TDVS_H06XF); } #else #define airstar_atsc3_attach NULL #endif /* CableStar2 DVB-C */ #if FE_SUPPORTED(STV0297) && FE_SUPPORTED(PLL) static u8 alps_tdee4_stv0297_inittab[] = { 0x80, 0x01, 0x80, 0x00, 0x81, 0x01, 0x81, 0x00, 0x00, 0x48, 0x01, 0x58, 0x03, 0x00, 0x04, 0x00, 0x07, 0x00, 0x08, 0x00, 0x30, 0xff, 0x31, 0x9d, 0x32, 0xff, 0x33, 0x00, 0x34, 0x29, 0x35, 0x55, 0x36, 0x80, 0x37, 0x6e, 0x38, 0x9c, 0x40, 0x1a, 0x41, 0xfe, 0x42, 0x33, 0x43, 0x00, 0x44, 0xff, 0x45, 0x00, 0x46, 0x00, 0x49, 0x04, 0x4a, 0x51, 0x4b, 0xf8, 0x52, 0x30, 0x53, 0x06, 0x59, 0x06, 0x5a, 0x5e, 0x5b, 0x04, 0x61, 0x49, 0x62, 0x0a, 0x70, 0xff, 0x71, 0x04, 0x72, 0x00, 0x73, 0x00, 0x74, 0x0c, 0x80, 0x20, 0x81, 0x00, 0x82, 0x30, 0x83, 0x00, 0x84, 0x04, 0x85, 0x22, 0x86, 0x08, 0x87, 0x1b, 0x88, 0x00, 0x89, 0x00, 0x90, 0x00, 0x91, 0x04, 0xa0, 0x86, 0xa1, 0x00, 0xa2, 0x00, 0xb0, 0x91, 0xb1, 0x0b, 0xc0, 0x5b, 0xc1, 0x10, 0xc2, 0x12, 0xd0, 0x02, 0xd1, 0x00, 0xd2, 0x00, 0xd3, 0x00, 0xd4, 0x02, 0xd5, 0x00, 0xde, 0x00, 0xdf, 0x01, 0xff, 0xff, }; static struct stv0297_config alps_tdee4_stv0297_config = { .demod_address = 0x1c, .inittab = alps_tdee4_stv0297_inittab, }; static int cablestar2_attach(struct flexcop_device *fc, struct i2c_adapter *i2c) { fc->fc_i2c_adap[0].no_base_addr = 1; fc->fe = dvb_attach(stv0297_attach, &alps_tdee4_stv0297_config, i2c); if (!fc->fe) goto fail; /* This tuner doesn't use the stv0297's I2C gate, but instead the * tuner is connected to a different flexcop I2C adapter. */ if (fc->fe->ops.i2c_gate_ctrl) fc->fe->ops.i2c_gate_ctrl(fc->fe, 0); fc->fe->ops.i2c_gate_ctrl = NULL; if (!dvb_attach(dvb_pll_attach, fc->fe, 0x61, &fc->fc_i2c_adap[2].i2c_adap, DVB_PLL_TDEE4)) goto fail; return 1; fail: /* Reset for next frontend to try */ fc->fc_i2c_adap[0].no_base_addr = 0; return 0; } #else #define cablestar2_attach NULL #endif static struct { flexcop_device_type_t type; int (*attach)(struct flexcop_device *, struct i2c_adapter *); } flexcop_frontends[] = { { FC_SKY_REV27, skystar2_rev27_attach }, { FC_SKY_REV28, skystar2_rev28_attach }, { FC_SKY_REV26, skystar2_rev26_attach }, { FC_AIR_DVBT, airstar_dvbt_attach }, { FC_AIR_ATSC2, airstar_atsc2_attach }, { FC_AIR_ATSC3, airstar_atsc3_attach }, { FC_AIR_ATSC1, airstar_atsc1_attach }, { FC_CABLE, cablestar2_attach }, { FC_SKY_REV23, skystar2_rev23_attach }, }; /* try to figure out the frontend */ int flexcop_frontend_init(struct flexcop_device *fc) { int i; for (i = 0; i < ARRAY_SIZE(flexcop_frontends); i++) { if (!flexcop_frontends[i].attach) continue; /* type needs to be set before, because of some workarounds * done based on the probed card type */ fc->dev_type = flexcop_frontends[i].type; if (flexcop_frontends[i].attach(fc, &fc->fc_i2c_adap[0].i2c_adap)) goto fe_found; /* Clean up partially attached frontend */ if (fc->fe) { dvb_frontend_detach(fc->fe); fc->fe = NULL; } } fc->dev_type = FC_UNK; err("no frontend driver found for this B2C2/FlexCop adapter"); return -ENODEV; fe_found: info("found '%s' .", fc->fe->ops.info.name); if (dvb_register_frontend(&fc->dvb_adapter, fc->fe)) { err("frontend registration failed!"); dvb_frontend_detach(fc->fe); fc->fe = NULL; return -EINVAL; } fc->init_state |= FC_STATE_FE_INIT; return 0; } void flexcop_frontend_exit(struct flexcop_device *fc) { if (fc->init_state & FC_STATE_FE_INIT) { dvb_unregister_frontend(fc->fe); dvb_frontend_detach(fc->fe); } fc->init_state &= ~FC_STATE_FE_INIT; }
gpl-2.0
ztotherad/nd7
drivers/media/common/tuners/tda827x.c
4078
26582
/* * * (c) 2005 Hartmut Hackmann * (c) 2007 Michael Krufky * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/slab.h> #include <asm/types.h> #include <linux/dvb/frontend.h> #include <linux/videodev2.h> #include "tda827x.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); #define dprintk(args...) \ do { \ if (debug) printk(KERN_DEBUG "tda827x: " args); \ } while (0) struct tda827x_priv { int i2c_addr; struct i2c_adapter *i2c_adap; struct tda827x_config *cfg; unsigned int sgIF; unsigned char lpsel; u32 frequency; u32 bandwidth; }; static void tda827x_set_std(struct dvb_frontend *fe, struct analog_parameters *params) { struct tda827x_priv *priv = fe->tuner_priv; char *mode; priv->lpsel = 0; if (params->std & V4L2_STD_MN) { priv->sgIF = 92; priv->lpsel = 1; mode = "MN"; } else if (params->std & V4L2_STD_B) { priv->sgIF = 108; mode = "B"; } else if (params->std & V4L2_STD_GH) { priv->sgIF = 124; mode = "GH"; } else if (params->std & V4L2_STD_PAL_I) { priv->sgIF = 124; mode = "I"; } else if (params->std & V4L2_STD_DK) { priv->sgIF = 124; mode = "DK"; } else if (params->std & V4L2_STD_SECAM_L) { priv->sgIF = 124; mode = "L"; } else if (params->std & V4L2_STD_SECAM_LC) { priv->sgIF = 20; mode = "LC"; } else { priv->sgIF = 124; mode = "xx"; } if (params->mode == V4L2_TUNER_RADIO) { priv->sgIF = 88; /* if frequency is 5.5 MHz */ dprintk("setting tda827x to radio FM\n"); } else dprintk("setting tda827x to system %s\n", mode); } /* ------------------------------------------------------------------ */ struct tda827x_data { u32 lomax; u8 spd; u8 bs; u8 bp; u8 cp; u8 gc3; u8 div1p5; }; static const struct tda827x_data tda827x_table[] = { { .lomax = 62000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 1}, { .lomax = 66000000, .spd = 3, .bs = 3, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 1}, { .lomax = 76000000, .spd = 3, .bs = 1, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 0}, { .lomax = 84000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 0}, { .lomax = 93000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 1, .div1p5 = 0}, { .lomax = 98000000, .spd = 3, .bs = 3, .bp = 0, .cp = 0, .gc3 = 1, .div1p5 = 0}, { .lomax = 109000000, .spd = 3, .bs = 3, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0}, { .lomax = 123000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 1}, { .lomax = 133000000, .spd = 2, .bs = 3, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 1}, { .lomax = 151000000, .spd = 2, .bs = 1, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0}, { .lomax = 154000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0}, { .lomax = 181000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 0, .div1p5 = 0}, { .lomax = 185000000, .spd = 2, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0}, { .lomax = 217000000, .spd = 2, .bs = 3, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0}, { .lomax = 244000000, .spd = 1, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 1}, { .lomax = 265000000, .spd = 1, .bs = 3, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 1}, { .lomax = 302000000, .spd = 1, .bs = 1, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0}, { .lomax = 324000000, .spd = 1, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0}, { .lomax = 370000000, .spd = 1, .bs = 2, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0}, { .lomax = 454000000, .spd = 1, .bs = 3, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0}, { .lomax = 493000000, .spd = 0, .bs = 2, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 1}, { .lomax = 530000000, .spd = 0, .bs = 3, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 1}, { .lomax = 554000000, .spd = 0, .bs = 1, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0}, { .lomax = 604000000, .spd = 0, .bs = 1, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0}, { .lomax = 696000000, .spd = 0, .bs = 2, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0}, { .lomax = 740000000, .spd = 0, .bs = 2, .bp = 4, .cp = 1, .gc3 = 0, .div1p5 = 0}, { .lomax = 820000000, .spd = 0, .bs = 3, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0}, { .lomax = 865000000, .spd = 0, .bs = 3, .bp = 4, .cp = 1, .gc3 = 0, .div1p5 = 0}, { .lomax = 0, .spd = 0, .bs = 0, .bp = 0, .cp = 0, .gc3 = 0, .div1p5 = 0} }; static int tuner_transfer(struct dvb_frontend *fe, struct i2c_msg *msg, const int size) { int rc; struct tda827x_priv *priv = fe->tuner_priv; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); rc = i2c_transfer(priv->i2c_adap, msg, size); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); if (rc >= 0 && rc != size) return -EIO; return rc; } static int tda827xo_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) { struct tda827x_priv *priv = fe->tuner_priv; u8 buf[14]; int rc; struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0, .buf = buf, .len = sizeof(buf) }; int i, tuner_freq, if_freq; u32 N; dprintk("%s:\n", __func__); switch (params->u.ofdm.bandwidth) { case BANDWIDTH_6_MHZ: if_freq = 4000000; break; case BANDWIDTH_7_MHZ: if_freq = 4500000; break; default: /* 8 MHz or Auto */ if_freq = 5000000; break; } tuner_freq = params->frequency + if_freq; i = 0; while (tda827x_table[i].lomax < tuner_freq) { if (tda827x_table[i + 1].lomax == 0) break; i++; } N = ((tuner_freq + 125000) / 250000) << (tda827x_table[i].spd + 2); buf[0] = 0; buf[1] = (N>>8) | 0x40; buf[2] = N & 0xff; buf[3] = 0; buf[4] = 0x52; buf[5] = (tda827x_table[i].spd << 6) + (tda827x_table[i].div1p5 << 5) + (tda827x_table[i].bs << 3) + tda827x_table[i].bp; buf[6] = (tda827x_table[i].gc3 << 4) + 0x8f; buf[7] = 0xbf; buf[8] = 0x2a; buf[9] = 0x05; buf[10] = 0xff; buf[11] = 0x00; buf[12] = 0x00; buf[13] = 0x40; msg.len = 14; rc = tuner_transfer(fe, &msg, 1); if (rc < 0) goto err; msleep(500); /* correct CP value */ buf[0] = 0x30; buf[1] = 0x50 + tda827x_table[i].cp; msg.len = 2; rc = tuner_transfer(fe, &msg, 1); if (rc < 0) goto err; priv->frequency = params->frequency; priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0; return 0; err: printk(KERN_ERR "%s: could not write to tuner at addr: 0x%02x\n", __func__, priv->i2c_addr << 1); return rc; } static int tda827xo_sleep(struct dvb_frontend *fe) { struct tda827x_priv *priv = fe->tuner_priv; static u8 buf[] = { 0x30, 0xd0 }; struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0, .buf = buf, .len = sizeof(buf) }; dprintk("%s:\n", __func__); tuner_transfer(fe, &msg, 1); if (priv->cfg && priv->cfg->sleep) priv->cfg->sleep(fe); return 0; } /* ------------------------------------------------------------------ */ static int tda827xo_set_analog_params(struct dvb_frontend *fe, struct analog_parameters *params) { unsigned char tuner_reg[8]; unsigned char reg2[2]; u32 N; int i; struct tda827x_priv *priv = fe->tuner_priv; struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0 }; unsigned int freq = params->frequency; tda827x_set_std(fe, params); if (params->mode == V4L2_TUNER_RADIO) freq = freq / 1000; N = freq + priv->sgIF; i = 0; while (tda827x_table[i].lomax < N * 62500) { if (tda827x_table[i + 1].lomax == 0) break; i++; } N = N << tda827x_table[i].spd; tuner_reg[0] = 0; tuner_reg[1] = (unsigned char)(N>>8); tuner_reg[2] = (unsigned char) N; tuner_reg[3] = 0x40; tuner_reg[4] = 0x52 + (priv->lpsel << 5); tuner_reg[5] = (tda827x_table[i].spd << 6) + (tda827x_table[i].div1p5 << 5) + (tda827x_table[i].bs << 3) + tda827x_table[i].bp; tuner_reg[6] = 0x8f + (tda827x_table[i].gc3 << 4); tuner_reg[7] = 0x8f; msg.buf = tuner_reg; msg.len = 8; tuner_transfer(fe, &msg, 1); msg.buf = reg2; msg.len = 2; reg2[0] = 0x80; reg2[1] = 0; tuner_transfer(fe, &msg, 1); reg2[0] = 0x60; reg2[1] = 0xbf; tuner_transfer(fe, &msg, 1); reg2[0] = 0x30; reg2[1] = tuner_reg[4] + 0x80; tuner_transfer(fe, &msg, 1); msleep(1); reg2[0] = 0x30; reg2[1] = tuner_reg[4] + 4; tuner_transfer(fe, &msg, 1); msleep(1); reg2[0] = 0x30; reg2[1] = tuner_reg[4]; tuner_transfer(fe, &msg, 1); msleep(550); reg2[0] = 0x30; reg2[1] = (tuner_reg[4] & 0xfc) + tda827x_table[i].cp; tuner_transfer(fe, &msg, 1); reg2[0] = 0x60; reg2[1] = 0x3f; tuner_transfer(fe, &msg, 1); reg2[0] = 0x80; reg2[1] = 0x08; /* Vsync en */ tuner_transfer(fe, &msg, 1); priv->frequency = params->frequency; return 0; } static void tda827xo_agcf(struct dvb_frontend *fe) { struct tda827x_priv *priv = fe->tuner_priv; unsigned char data[] = { 0x80, 0x0c }; struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0, .buf = data, .len = 2}; tuner_transfer(fe, &msg, 1); } /* ------------------------------------------------------------------ */ struct tda827xa_data { u32 lomax; u8 svco; u8 spd; u8 scr; u8 sbs; u8 gc3; }; static struct tda827xa_data tda827xa_dvbt[] = { { .lomax = 56875000, .svco = 3, .spd = 4, .scr = 0, .sbs = 0, .gc3 = 1}, { .lomax = 67250000, .svco = 0, .spd = 3, .scr = 0, .sbs = 0, .gc3 = 1}, { .lomax = 81250000, .svco = 1, .spd = 3, .scr = 0, .sbs = 0, .gc3 = 1}, { .lomax = 97500000, .svco = 2, .spd = 3, .scr = 0, .sbs = 0, .gc3 = 1}, { .lomax = 113750000, .svco = 3, .spd = 3, .scr = 0, .sbs = 1, .gc3 = 1}, { .lomax = 134500000, .svco = 0, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1}, { .lomax = 154000000, .svco = 1, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1}, { .lomax = 162500000, .svco = 1, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1}, { .lomax = 183000000, .svco = 2, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1}, { .lomax = 195000000, .svco = 2, .spd = 2, .scr = 0, .sbs = 2, .gc3 = 1}, { .lomax = 227500000, .svco = 3, .spd = 2, .scr = 0, .sbs = 2, .gc3 = 1}, { .lomax = 269000000, .svco = 0, .spd = 1, .scr = 0, .sbs = 2, .gc3 = 1}, { .lomax = 290000000, .svco = 1, .spd = 1, .scr = 0, .sbs = 2, .gc3 = 1}, { .lomax = 325000000, .svco = 1, .spd = 1, .scr = 0, .sbs = 3, .gc3 = 1}, { .lomax = 390000000, .svco = 2, .spd = 1, .scr = 0, .sbs = 3, .gc3 = 1}, { .lomax = 455000000, .svco = 3, .spd = 1, .scr = 0, .sbs = 3, .gc3 = 1}, { .lomax = 520000000, .svco = 0, .spd = 0, .scr = 0, .sbs = 3, .gc3 = 1}, { .lomax = 538000000, .svco = 0, .spd = 0, .scr = 1, .sbs = 3, .gc3 = 1}, { .lomax = 550000000, .svco = 1, .spd = 0, .scr = 0, .sbs = 3, .gc3 = 1}, { .lomax = 620000000, .svco = 1, .spd = 0, .scr = 0, .sbs = 4, .gc3 = 0}, { .lomax = 650000000, .svco = 1, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 0}, { .lomax = 700000000, .svco = 2, .spd = 0, .scr = 0, .sbs = 4, .gc3 = 0}, { .lomax = 780000000, .svco = 2, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 0}, { .lomax = 820000000, .svco = 3, .spd = 0, .scr = 0, .sbs = 4, .gc3 = 0}, { .lomax = 870000000, .svco = 3, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 0}, { .lomax = 911000000, .svco = 3, .spd = 0, .scr = 2, .sbs = 4, .gc3 = 0}, { .lomax = 0, .svco = 0, .spd = 0, .scr = 0, .sbs = 0, .gc3 = 0} }; static struct tda827xa_data tda827xa_dvbc[] = { { .lomax = 50125000, .svco = 2, .spd = 4, .scr = 2, .sbs = 0, .gc3 = 3}, { .lomax = 58500000, .svco = 3, .spd = 4, .scr = 2, .sbs = 0, .gc3 = 3}, { .lomax = 69250000, .svco = 0, .spd = 3, .scr = 2, .sbs = 0, .gc3 = 3}, { .lomax = 83625000, .svco = 1, .spd = 3, .scr = 2, .sbs = 0, .gc3 = 3}, { .lomax = 97500000, .svco = 2, .spd = 3, .scr = 2, .sbs = 0, .gc3 = 3}, { .lomax = 100250000, .svco = 2, .spd = 3, .scr = 2, .sbs = 1, .gc3 = 1}, { .lomax = 117000000, .svco = 3, .spd = 3, .scr = 2, .sbs = 1, .gc3 = 1}, { .lomax = 138500000, .svco = 0, .spd = 2, .scr = 2, .sbs = 1, .gc3 = 1}, { .lomax = 167250000, .svco = 1, .spd = 2, .scr = 2, .sbs = 1, .gc3 = 1}, { .lomax = 187000000, .svco = 2, .spd = 2, .scr = 2, .sbs = 1, .gc3 = 1}, { .lomax = 200500000, .svco = 2, .spd = 2, .scr = 2, .sbs = 2, .gc3 = 1}, { .lomax = 234000000, .svco = 3, .spd = 2, .scr = 2, .sbs = 2, .gc3 = 3}, { .lomax = 277000000, .svco = 0, .spd = 1, .scr = 2, .sbs = 2, .gc3 = 3}, { .lomax = 325000000, .svco = 1, .spd = 1, .scr = 2, .sbs = 2, .gc3 = 1}, { .lomax = 334500000, .svco = 1, .spd = 1, .scr = 2, .sbs = 3, .gc3 = 3}, { .lomax = 401000000, .svco = 2, .spd = 1, .scr = 2, .sbs = 3, .gc3 = 3}, { .lomax = 468000000, .svco = 3, .spd = 1, .scr = 2, .sbs = 3, .gc3 = 1}, { .lomax = 535000000, .svco = 0, .spd = 0, .scr = 1, .sbs = 3, .gc3 = 1}, { .lomax = 554000000, .svco = 0, .spd = 0, .scr = 2, .sbs = 3, .gc3 = 1}, { .lomax = 638000000, .svco = 1, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 1}, { .lomax = 669000000, .svco = 1, .spd = 0, .scr = 2, .sbs = 4, .gc3 = 1}, { .lomax = 720000000, .svco = 2, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 1}, { .lomax = 802000000, .svco = 2, .spd = 0, .scr = 2, .sbs = 4, .gc3 = 1}, { .lomax = 835000000, .svco = 3, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 1}, { .lomax = 885000000, .svco = 3, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 1}, { .lomax = 911000000, .svco = 3, .spd = 0, .scr = 2, .sbs = 4, .gc3 = 1}, { .lomax = 0, .svco = 0, .spd = 0, .scr = 0, .sbs = 0, .gc3 = 0} }; static struct tda827xa_data tda827xa_analog[] = { { .lomax = 56875000, .svco = 3, .spd = 4, .scr = 0, .sbs = 0, .gc3 = 3}, { .lomax = 67250000, .svco = 0, .spd = 3, .scr = 0, .sbs = 0, .gc3 = 3}, { .lomax = 81250000, .svco = 1, .spd = 3, .scr = 0, .sbs = 0, .gc3 = 3}, { .lomax = 97500000, .svco = 2, .spd = 3, .scr = 0, .sbs = 0, .gc3 = 3}, { .lomax = 113750000, .svco = 3, .spd = 3, .scr = 0, .sbs = 1, .gc3 = 1}, { .lomax = 134500000, .svco = 0, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1}, { .lomax = 154000000, .svco = 1, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1}, { .lomax = 162500000, .svco = 1, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1}, { .lomax = 183000000, .svco = 2, .spd = 2, .scr = 0, .sbs = 1, .gc3 = 1}, { .lomax = 195000000, .svco = 2, .spd = 2, .scr = 0, .sbs = 2, .gc3 = 1}, { .lomax = 227500000, .svco = 3, .spd = 2, .scr = 0, .sbs = 2, .gc3 = 3}, { .lomax = 269000000, .svco = 0, .spd = 1, .scr = 0, .sbs = 2, .gc3 = 3}, { .lomax = 325000000, .svco = 1, .spd = 1, .scr = 0, .sbs = 2, .gc3 = 1}, { .lomax = 390000000, .svco = 2, .spd = 1, .scr = 0, .sbs = 3, .gc3 = 3}, { .lomax = 455000000, .svco = 3, .spd = 1, .scr = 0, .sbs = 3, .gc3 = 3}, { .lomax = 520000000, .svco = 0, .spd = 0, .scr = 0, .sbs = 3, .gc3 = 1}, { .lomax = 538000000, .svco = 0, .spd = 0, .scr = 1, .sbs = 3, .gc3 = 1}, { .lomax = 554000000, .svco = 1, .spd = 0, .scr = 0, .sbs = 3, .gc3 = 1}, { .lomax = 620000000, .svco = 1, .spd = 0, .scr = 0, .sbs = 4, .gc3 = 0}, { .lomax = 650000000, .svco = 1, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 0}, { .lomax = 700000000, .svco = 2, .spd = 0, .scr = 0, .sbs = 4, .gc3 = 0}, { .lomax = 780000000, .svco = 2, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 0}, { .lomax = 820000000, .svco = 3, .spd = 0, .scr = 0, .sbs = 4, .gc3 = 0}, { .lomax = 870000000, .svco = 3, .spd = 0, .scr = 1, .sbs = 4, .gc3 = 0}, { .lomax = 911000000, .svco = 3, .spd = 0, .scr = 2, .sbs = 4, .gc3 = 0}, { .lomax = 0, .svco = 0, .spd = 0, .scr = 0, .sbs = 0, .gc3 = 0} }; static int tda827xa_sleep(struct dvb_frontend *fe) { struct tda827x_priv *priv = fe->tuner_priv; static u8 buf[] = { 0x30, 0x90 }; struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0, .buf = buf, .len = sizeof(buf) }; dprintk("%s:\n", __func__); tuner_transfer(fe, &msg, 1); if (priv->cfg && priv->cfg->sleep) priv->cfg->sleep(fe); return 0; } static void tda827xa_lna_gain(struct dvb_frontend *fe, int high, struct analog_parameters *params) { struct tda827x_priv *priv = fe->tuner_priv; unsigned char buf[] = {0x22, 0x01}; int arg; int gp_func; struct i2c_msg msg = { .flags = 0, .buf = buf, .len = sizeof(buf) }; if (NULL == priv->cfg) { dprintk("tda827x_config not defined, cannot set LNA gain!\n"); return; } msg.addr = priv->cfg->switch_addr; if (priv->cfg->config) { if (high) dprintk("setting LNA to high gain\n"); else dprintk("setting LNA to low gain\n"); } switch (priv->cfg->config) { case 0: /* no LNA */ break; case 1: /* switch is GPIO 0 of tda8290 */ case 2: if (params == NULL) { gp_func = 0; arg = 0; } else { /* turn Vsync on */ gp_func = 1; if (params->std & V4L2_STD_MN) arg = 1; else arg = 0; } if (fe->callback) fe->callback(priv->i2c_adap->algo_data, DVB_FRONTEND_COMPONENT_TUNER, gp_func, arg); buf[1] = high ? 0 : 1; if (priv->cfg->config == 2) buf[1] = high ? 1 : 0; tuner_transfer(fe, &msg, 1); break; case 3: /* switch with GPIO of saa713x */ if (fe->callback) fe->callback(priv->i2c_adap->algo_data, DVB_FRONTEND_COMPONENT_TUNER, 0, high); break; } } static int tda827xa_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) { struct tda827x_priv *priv = fe->tuner_priv; struct tda827xa_data *frequency_map = tda827xa_dvbt; u8 buf[11]; struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0, .buf = buf, .len = sizeof(buf) }; int i, tuner_freq, if_freq, rc; u32 N; dprintk("%s:\n", __func__); tda827xa_lna_gain(fe, 1, NULL); msleep(20); switch (params->u.ofdm.bandwidth) { case BANDWIDTH_6_MHZ: if_freq = 4000000; break; case BANDWIDTH_7_MHZ: if_freq = 4500000; break; default: /* 8 MHz or Auto */ if_freq = 5000000; break; } tuner_freq = params->frequency + if_freq; if (fe->ops.info.type == FE_QAM) { dprintk("%s select tda827xa_dvbc\n", __func__); frequency_map = tda827xa_dvbc; } i = 0; while (frequency_map[i].lomax < tuner_freq) { if (frequency_map[i + 1].lomax == 0) break; i++; } N = ((tuner_freq + 31250) / 62500) << frequency_map[i].spd; buf[0] = 0; // subaddress buf[1] = N >> 8; buf[2] = N & 0xff; buf[3] = 0; buf[4] = 0x16; buf[5] = (frequency_map[i].spd << 5) + (frequency_map[i].svco << 3) + frequency_map[i].sbs; buf[6] = 0x4b + (frequency_map[i].gc3 << 4); buf[7] = 0x1c; buf[8] = 0x06; buf[9] = 0x24; buf[10] = 0x00; msg.len = 11; rc = tuner_transfer(fe, &msg, 1); if (rc < 0) goto err; buf[0] = 0x90; buf[1] = 0xff; buf[2] = 0x60; buf[3] = 0x00; buf[4] = 0x59; // lpsel, for 6MHz + 2 msg.len = 5; rc = tuner_transfer(fe, &msg, 1); if (rc < 0) goto err; buf[0] = 0xa0; buf[1] = 0x40; msg.len = 2; rc = tuner_transfer(fe, &msg, 1); if (rc < 0) goto err; msleep(11); msg.flags = I2C_M_RD; rc = tuner_transfer(fe, &msg, 1); if (rc < 0) goto err; msg.flags = 0; buf[1] >>= 4; dprintk("tda8275a AGC2 gain is: %d\n", buf[1]); if ((buf[1]) < 2) { tda827xa_lna_gain(fe, 0, NULL); buf[0] = 0x60; buf[1] = 0x0c; rc = tuner_transfer(fe, &msg, 1); if (rc < 0) goto err; } buf[0] = 0xc0; buf[1] = 0x99; // lpsel, for 6MHz + 2 rc = tuner_transfer(fe, &msg, 1); if (rc < 0) goto err; buf[0] = 0x60; buf[1] = 0x3c; rc = tuner_transfer(fe, &msg, 1); if (rc < 0) goto err; /* correct CP value */ buf[0] = 0x30; buf[1] = 0x10 + frequency_map[i].scr; rc = tuner_transfer(fe, &msg, 1); if (rc < 0) goto err; msleep(163); buf[0] = 0xc0; buf[1] = 0x39; // lpsel, for 6MHz + 2 rc = tuner_transfer(fe, &msg, 1); if (rc < 0) goto err; msleep(3); /* freeze AGC1 */ buf[0] = 0x50; buf[1] = 0x4f + (frequency_map[i].gc3 << 4); rc = tuner_transfer(fe, &msg, 1); if (rc < 0) goto err; priv->frequency = params->frequency; priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0; return 0; err: printk(KERN_ERR "%s: could not write to tuner at addr: 0x%02x\n", __func__, priv->i2c_addr << 1); return rc; } static int tda827xa_set_analog_params(struct dvb_frontend *fe, struct analog_parameters *params) { unsigned char tuner_reg[11]; u32 N; int i; struct tda827x_priv *priv = fe->tuner_priv; struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = 0, .buf = tuner_reg, .len = sizeof(tuner_reg) }; unsigned int freq = params->frequency; tda827x_set_std(fe, params); tda827xa_lna_gain(fe, 1, params); msleep(10); if (params->mode == V4L2_TUNER_RADIO) freq = freq / 1000; N = freq + priv->sgIF; i = 0; while (tda827xa_analog[i].lomax < N * 62500) { if (tda827xa_analog[i + 1].lomax == 0) break; i++; } N = N << tda827xa_analog[i].spd; tuner_reg[0] = 0; tuner_reg[1] = (unsigned char)(N>>8); tuner_reg[2] = (unsigned char) N; tuner_reg[3] = 0; tuner_reg[4] = 0x16; tuner_reg[5] = (tda827xa_analog[i].spd << 5) + (tda827xa_analog[i].svco << 3) + tda827xa_analog[i].sbs; tuner_reg[6] = 0x8b + (tda827xa_analog[i].gc3 << 4); tuner_reg[7] = 0x1c; tuner_reg[8] = 4; tuner_reg[9] = 0x20; tuner_reg[10] = 0x00; msg.len = 11; tuner_transfer(fe, &msg, 1); tuner_reg[0] = 0x90; tuner_reg[1] = 0xff; tuner_reg[2] = 0xe0; tuner_reg[3] = 0; tuner_reg[4] = 0x99 + (priv->lpsel << 1); msg.len = 5; tuner_transfer(fe, &msg, 1); tuner_reg[0] = 0xa0; tuner_reg[1] = 0xc0; msg.len = 2; tuner_transfer(fe, &msg, 1); tuner_reg[0] = 0x30; tuner_reg[1] = 0x10 + tda827xa_analog[i].scr; tuner_transfer(fe, &msg, 1); msg.flags = I2C_M_RD; tuner_transfer(fe, &msg, 1); msg.flags = 0; tuner_reg[1] >>= 4; dprintk("AGC2 gain is: %d\n", tuner_reg[1]); if (tuner_reg[1] < 1) tda827xa_lna_gain(fe, 0, params); msleep(100); tuner_reg[0] = 0x60; tuner_reg[1] = 0x3c; tuner_transfer(fe, &msg, 1); msleep(163); tuner_reg[0] = 0x50; tuner_reg[1] = 0x8f + (tda827xa_analog[i].gc3 << 4); tuner_transfer(fe, &msg, 1); tuner_reg[0] = 0x80; tuner_reg[1] = 0x28; tuner_transfer(fe, &msg, 1); tuner_reg[0] = 0xb0; tuner_reg[1] = 0x01; tuner_transfer(fe, &msg, 1); tuner_reg[0] = 0xc0; tuner_reg[1] = 0x19 + (priv->lpsel << 1); tuner_transfer(fe, &msg, 1); priv->frequency = params->frequency; return 0; } static void tda827xa_agcf(struct dvb_frontend *fe) { struct tda827x_priv *priv = fe->tuner_priv; unsigned char data[] = {0x80, 0x2c}; struct i2c_msg msg = {.addr = priv->i2c_addr, .flags = 0, .buf = data, .len = 2}; tuner_transfer(fe, &msg, 1); } /* ------------------------------------------------------------------ */ static int tda827x_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } static int tda827x_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct tda827x_priv *priv = fe->tuner_priv; *frequency = priv->frequency; return 0; } static int tda827x_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth) { struct tda827x_priv *priv = fe->tuner_priv; *bandwidth = priv->bandwidth; return 0; } static int tda827x_init(struct dvb_frontend *fe) { struct tda827x_priv *priv = fe->tuner_priv; dprintk("%s:\n", __func__); if (priv->cfg && priv->cfg->init) priv->cfg->init(fe); return 0; } static int tda827x_probe_version(struct dvb_frontend *fe); static int tda827x_initial_init(struct dvb_frontend *fe) { int ret; ret = tda827x_probe_version(fe); if (ret) return ret; return fe->ops.tuner_ops.init(fe); } static int tda827x_initial_sleep(struct dvb_frontend *fe) { int ret; ret = tda827x_probe_version(fe); if (ret) return ret; return fe->ops.tuner_ops.sleep(fe); } static struct dvb_tuner_ops tda827xo_tuner_ops = { .info = { .name = "Philips TDA827X", .frequency_min = 55000000, .frequency_max = 860000000, .frequency_step = 250000 }, .release = tda827x_release, .init = tda827x_initial_init, .sleep = tda827x_initial_sleep, .set_params = tda827xo_set_params, .set_analog_params = tda827xo_set_analog_params, .get_frequency = tda827x_get_frequency, .get_bandwidth = tda827x_get_bandwidth, }; static struct dvb_tuner_ops tda827xa_tuner_ops = { .info = { .name = "Philips TDA827XA", .frequency_min = 44000000, .frequency_max = 906000000, .frequency_step = 62500 }, .release = tda827x_release, .init = tda827x_init, .sleep = tda827xa_sleep, .set_params = tda827xa_set_params, .set_analog_params = tda827xa_set_analog_params, .get_frequency = tda827x_get_frequency, .get_bandwidth = tda827x_get_bandwidth, }; static int tda827x_probe_version(struct dvb_frontend *fe) { u8 data; int rc; struct tda827x_priv *priv = fe->tuner_priv; struct i2c_msg msg = { .addr = priv->i2c_addr, .flags = I2C_M_RD, .buf = &data, .len = 1 }; rc = tuner_transfer(fe, &msg, 1); if (rc < 0) { printk("%s: could not read from tuner at addr: 0x%02x\n", __func__, msg.addr << 1); return rc; } if ((data & 0x3c) == 0) { dprintk("tda827x tuner found\n"); fe->ops.tuner_ops.init = tda827x_init; fe->ops.tuner_ops.sleep = tda827xo_sleep; if (priv->cfg) priv->cfg->agcf = tda827xo_agcf; } else { dprintk("tda827xa tuner found\n"); memcpy(&fe->ops.tuner_ops, &tda827xa_tuner_ops, sizeof(struct dvb_tuner_ops)); if (priv->cfg) priv->cfg->agcf = tda827xa_agcf; } return 0; } struct dvb_frontend *tda827x_attach(struct dvb_frontend *fe, int addr, struct i2c_adapter *i2c, struct tda827x_config *cfg) { struct tda827x_priv *priv = NULL; dprintk("%s:\n", __func__); priv = kzalloc(sizeof(struct tda827x_priv), GFP_KERNEL); if (priv == NULL) return NULL; priv->i2c_addr = addr; priv->i2c_adap = i2c; priv->cfg = cfg; memcpy(&fe->ops.tuner_ops, &tda827xo_tuner_ops, sizeof(struct dvb_tuner_ops)); fe->tuner_priv = priv; dprintk("type set to %s\n", fe->ops.tuner_ops.info.name); return fe; } EXPORT_SYMBOL_GPL(tda827x_attach); MODULE_DESCRIPTION("DVB TDA827x driver"); MODULE_AUTHOR("Hartmut Hackmann <hartmut.hackmann@t-online.de>"); MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>"); MODULE_LICENSE("GPL"); /* * Overrides for Emacs so that we follow Linus's tabbing style. * --------------------------------------------------------------------------- * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
omegamoon/rockchip-rk30xx-mk808
drivers/s390/cio/crw.c
4334
4121
/* * Channel report handling code * * Copyright IBM Corp. 2000,2009 * Author(s): Ingo Adlung <adlung@de.ibm.com>, * Martin Schwidefsky <schwidefsky@de.ibm.com>, * Cornelia Huck <cornelia.huck@de.ibm.com>, * Heiko Carstens <heiko.carstens@de.ibm.com>, */ #include <linux/mutex.h> #include <linux/kthread.h> #include <linux/init.h> #include <linux/wait.h> #include <asm/crw.h> static DEFINE_MUTEX(crw_handler_mutex); static crw_handler_t crw_handlers[NR_RSCS]; static atomic_t crw_nr_req = ATOMIC_INIT(0); static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q); /** * crw_register_handler() - register a channel report word handler * @rsc: reporting source code to handle * @handler: handler to be registered * * Returns %0 on success and a negative error value otherwise. */ int crw_register_handler(int rsc, crw_handler_t handler) { int rc = 0; if ((rsc < 0) || (rsc >= NR_RSCS)) return -EINVAL; mutex_lock(&crw_handler_mutex); if (crw_handlers[rsc]) rc = -EBUSY; else crw_handlers[rsc] = handler; mutex_unlock(&crw_handler_mutex); return rc; } /** * crw_unregister_handler() - unregister a channel report word handler * @rsc: reporting source code to handle */ void crw_unregister_handler(int rsc) { if ((rsc < 0) || (rsc >= NR_RSCS)) return; mutex_lock(&crw_handler_mutex); crw_handlers[rsc] = NULL; mutex_unlock(&crw_handler_mutex); } /* * Retrieve CRWs and call function to handle event. */ static int crw_collect_info(void *unused) { struct crw crw[2]; int ccode, signal; unsigned int chain; repeat: signal = wait_event_interruptible(crw_handler_wait_q, atomic_read(&crw_nr_req) > 0); if (unlikely(signal)) atomic_inc(&crw_nr_req); chain = 0; while (1) { crw_handler_t handler; if (unlikely(chain > 1)) { struct crw tmp_crw; printk(KERN_WARNING"%s: Code does not support more " "than two chained crws; please report to " "linux390@de.ibm.com!\n", __func__); ccode = stcrw(&tmp_crw); printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", __func__, tmp_crw.slct, tmp_crw.oflw, tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc, tmp_crw.erc, tmp_crw.rsid); printk(KERN_WARNING"%s: This was crw number %x in the " "chain\n", __func__, chain); if (ccode != 0) break; chain = tmp_crw.chn ? chain + 1 : 0; continue; } ccode = stcrw(&crw[chain]); if (ccode != 0) break; printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw[chain].slct, crw[chain].oflw, crw[chain].chn, crw[chain].rsc, crw[chain].anc, crw[chain].erc, crw[chain].rsid); /* Check for overflows. */ if (crw[chain].oflw) { int i; pr_debug("%s: crw overflow detected!\n", __func__); mutex_lock(&crw_handler_mutex); for (i = 0; i < NR_RSCS; i++) { if (crw_handlers[i]) crw_handlers[i](NULL, NULL, 1); } mutex_unlock(&crw_handler_mutex); chain = 0; continue; } if (crw[0].chn && !chain) { chain++; continue; } mutex_lock(&crw_handler_mutex); handler = crw_handlers[crw[chain].rsc]; if (handler) handler(&crw[0], chain ? &crw[1] : NULL, 0); mutex_unlock(&crw_handler_mutex); /* chain is always 0 or 1 here. */ chain = crw[chain].chn ? chain + 1 : 0; } if (atomic_dec_and_test(&crw_nr_req)) wake_up(&crw_handler_wait_q); goto repeat; return 0; } void crw_handle_channel_report(void) { atomic_inc(&crw_nr_req); wake_up(&crw_handler_wait_q); } void crw_wait_for_channel_report(void) { crw_handle_channel_report(); wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0); } /* * Machine checks for the channel subsystem must be enabled * after the channel subsystem is initialized */ static int __init crw_machine_check_init(void) { struct task_struct *task; task = kthread_run(crw_collect_info, NULL, "kmcheck"); if (IS_ERR(task)) return PTR_ERR(task); ctl_set_bit(14, 28); /* enable channel report MCH */ return 0; } device_initcall(crw_machine_check_init);
gpl-2.0
tilaksidduram/android_kernel_samsung_smdk4412
drivers/hwmon/thmc50.c
4334
14381
/* thmc50.c - Part of lm_sensors, Linux kernel modules for hardware monitoring Copyright (C) 2007 Krzysztof Helt <krzysztof.h1@wp.pl> Based on 2.4 driver by Frodo Looijaard <frodol@dds.nl> and Philip Edelbrock <phil@netroedge.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> MODULE_LICENSE("GPL"); /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; /* Insmod parameters */ enum chips { thmc50, adm1022 }; static unsigned short adm1022_temp3[16]; static unsigned int adm1022_temp3_num; module_param_array(adm1022_temp3, ushort, &adm1022_temp3_num, 0); MODULE_PARM_DESC(adm1022_temp3, "List of adapter,address pairs " "to enable 3rd temperature (ADM1022 only)"); /* Many THMC50 constants specified below */ /* The THMC50 registers */ #define THMC50_REG_CONF 0x40 #define THMC50_REG_COMPANY_ID 0x3E #define THMC50_REG_DIE_CODE 0x3F #define THMC50_REG_ANALOG_OUT 0x19 /* * The mirror status register cannot be used as * reading it does not clear alarms. */ #define THMC50_REG_INTR 0x41 static const u8 THMC50_REG_TEMP[] = { 0x27, 0x26, 0x20 }; static const u8 THMC50_REG_TEMP_MIN[] = { 0x3A, 0x38, 0x2C }; static const u8 THMC50_REG_TEMP_MAX[] = { 0x39, 0x37, 0x2B }; static const u8 THMC50_REG_TEMP_CRITICAL[] = { 0x13, 0x14, 0x14 }; static const u8 THMC50_REG_TEMP_DEFAULT[] = { 0x17, 0x18, 0x18 }; #define THMC50_REG_CONF_nFANOFF 0x20 #define THMC50_REG_CONF_PROGRAMMED 0x08 /* Each client has this additional data */ struct thmc50_data { struct device *hwmon_dev; struct mutex update_lock; enum chips type; unsigned long last_updated; /* In jiffies */ char has_temp3; /* !=0 if it is ADM1022 in temp3 mode */ char valid; /* !=0 if following fields are valid */ /* Register values */ s8 temp_input[3]; s8 temp_max[3]; s8 temp_min[3]; s8 temp_critical[3]; u8 analog_out; u8 alarms; }; static int thmc50_detect(struct i2c_client *client, struct i2c_board_info *info); static int thmc50_probe(struct i2c_client *client, const struct i2c_device_id *id); static int thmc50_remove(struct i2c_client *client); static void thmc50_init_client(struct i2c_client *client); static struct thmc50_data *thmc50_update_device(struct device *dev); static const struct i2c_device_id thmc50_id[] = { { "adm1022", adm1022 }, { "thmc50", thmc50 }, { } }; MODULE_DEVICE_TABLE(i2c, thmc50_id); static struct i2c_driver thmc50_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "thmc50", }, .probe = thmc50_probe, .remove = thmc50_remove, .id_table = thmc50_id, .detect = thmc50_detect, .address_list = normal_i2c, }; static ssize_t show_analog_out(struct device *dev, struct device_attribute *attr, char *buf) { struct thmc50_data *data = thmc50_update_device(dev); return sprintf(buf, "%d\n", data->analog_out); } static ssize_t set_analog_out(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct thmc50_data *data = i2c_get_clientdata(client); int tmp = simple_strtoul(buf, NULL, 10); int config; mutex_lock(&data->update_lock); data->analog_out = SENSORS_LIMIT(tmp, 0, 255); i2c_smbus_write_byte_data(client, THMC50_REG_ANALOG_OUT, data->analog_out); config = i2c_smbus_read_byte_data(client, THMC50_REG_CONF); if (data->analog_out == 0) config &= ~THMC50_REG_CONF_nFANOFF; else config |= THMC50_REG_CONF_nFANOFF; i2c_smbus_write_byte_data(client, THMC50_REG_CONF, config); mutex_unlock(&data->update_lock); return count; } /* There is only one PWM mode = DC */ static ssize_t show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "0\n"); } /* Temperatures */ static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct thmc50_data *data = thmc50_update_device(dev); return sprintf(buf, "%d\n", data->temp_input[nr] * 1000); } static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct thmc50_data *data = thmc50_update_device(dev); return sprintf(buf, "%d\n", data->temp_min[nr] * 1000); } static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(attr)->index; struct i2c_client *client = to_i2c_client(dev); struct thmc50_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->temp_min[nr] = SENSORS_LIMIT(val / 1000, -128, 127); i2c_smbus_write_byte_data(client, THMC50_REG_TEMP_MIN[nr], data->temp_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct thmc50_data *data = thmc50_update_device(dev); return sprintf(buf, "%d\n", data->temp_max[nr] * 1000); } static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(attr)->index; struct i2c_client *client = to_i2c_client(dev); struct thmc50_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->temp_max[nr] = SENSORS_LIMIT(val / 1000, -128, 127); i2c_smbus_write_byte_data(client, THMC50_REG_TEMP_MAX[nr], data->temp_max[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_critical(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct thmc50_data *data = thmc50_update_device(dev); return sprintf(buf, "%d\n", data->temp_critical[nr] * 1000); } static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int index = to_sensor_dev_attr(attr)->index; struct thmc50_data *data = thmc50_update_device(dev); return sprintf(buf, "%u\n", (data->alarms >> index) & 1); } #define temp_reg(offset) \ static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, show_temp, \ NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \ show_temp_min, set_temp_min, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \ show_temp_max, set_temp_max, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_crit, S_IRUGO, \ show_temp_critical, NULL, offset - 1); temp_reg(1); temp_reg(2); temp_reg(3); static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 7); static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_analog_out, set_analog_out, 0); static SENSOR_DEVICE_ATTR(pwm1_mode, S_IRUGO, show_pwm_mode, NULL, 0); static struct attribute *thmc50_attributes[] = { &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_crit.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, &sensor_dev_attr_temp2_fault.dev_attr.attr, &sensor_dev_attr_pwm1.dev_attr.attr, &sensor_dev_attr_pwm1_mode.dev_attr.attr, NULL }; static const struct attribute_group thmc50_group = { .attrs = thmc50_attributes, }; /* for ADM1022 3rd temperature mode */ static struct attribute *temp3_attributes[] = { &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_crit.dev_attr.attr, &sensor_dev_attr_temp3_alarm.dev_attr.attr, &sensor_dev_attr_temp3_fault.dev_attr.attr, NULL }; static const struct attribute_group temp3_group = { .attrs = temp3_attributes, }; /* Return 0 if detection is successful, -ENODEV otherwise */ static int thmc50_detect(struct i2c_client *client, struct i2c_board_info *info) { unsigned company; unsigned revision; unsigned config; struct i2c_adapter *adapter = client->adapter; const char *type_name; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { pr_debug("thmc50: detect failed, " "smbus byte data not supported!\n"); return -ENODEV; } pr_debug("thmc50: Probing for THMC50 at 0x%2X on bus %d\n", client->addr, i2c_adapter_id(client->adapter)); company = i2c_smbus_read_byte_data(client, THMC50_REG_COMPANY_ID); revision = i2c_smbus_read_byte_data(client, THMC50_REG_DIE_CODE); config = i2c_smbus_read_byte_data(client, THMC50_REG_CONF); if (revision < 0xc0 || (config & 0x10)) return -ENODEV; if (company == 0x41) { int id = i2c_adapter_id(client->adapter); int i; type_name = "adm1022"; for (i = 0; i + 1 < adm1022_temp3_num; i += 2) if (adm1022_temp3[i] == id && adm1022_temp3[i + 1] == client->addr) { /* enable 2nd remote temp */ config |= (1 << 7); i2c_smbus_write_byte_data(client, THMC50_REG_CONF, config); break; } } else if (company == 0x49) { type_name = "thmc50"; } else { pr_debug("thmc50: Detection of THMC50/ADM1022 failed\n"); return -ENODEV; } pr_debug("thmc50: Detected %s (version %x, revision %x)\n", type_name, (revision >> 4) - 0xc, revision & 0xf); strlcpy(info->type, type_name, I2C_NAME_SIZE); return 0; } static int thmc50_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct thmc50_data *data; int err; data = kzalloc(sizeof(struct thmc50_data), GFP_KERNEL); if (!data) { pr_debug("thmc50: detect failed, kzalloc failed!\n"); err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); data->type = id->driver_data; mutex_init(&data->update_lock); thmc50_init_client(client); /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &thmc50_group))) goto exit_free; /* Register ADM1022 sysfs hooks */ if (data->has_temp3) if ((err = sysfs_create_group(&client->dev.kobj, &temp3_group))) goto exit_remove_sysfs_thmc50; /* Register a new directory entry with module sensors */ data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove_sysfs; } return 0; exit_remove_sysfs: if (data->has_temp3) sysfs_remove_group(&client->dev.kobj, &temp3_group); exit_remove_sysfs_thmc50: sysfs_remove_group(&client->dev.kobj, &thmc50_group); exit_free: kfree(data); exit: return err; } static int thmc50_remove(struct i2c_client *client) { struct thmc50_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &thmc50_group); if (data->has_temp3) sysfs_remove_group(&client->dev.kobj, &temp3_group); kfree(data); return 0; } static void thmc50_init_client(struct i2c_client *client) { struct thmc50_data *data = i2c_get_clientdata(client); int config; data->analog_out = i2c_smbus_read_byte_data(client, THMC50_REG_ANALOG_OUT); /* set up to at least 1 */ if (data->analog_out == 0) { data->analog_out = 1; i2c_smbus_write_byte_data(client, THMC50_REG_ANALOG_OUT, data->analog_out); } config = i2c_smbus_read_byte_data(client, THMC50_REG_CONF); config |= 0x1; /* start the chip if it is in standby mode */ if (data->type == adm1022 && (config & (1 << 7))) data->has_temp3 = 1; i2c_smbus_write_byte_data(client, THMC50_REG_CONF, config); } static struct thmc50_data *thmc50_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct thmc50_data *data = i2c_get_clientdata(client); int timeout = HZ / 5 + (data->type == thmc50 ? HZ : 0); mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + timeout) || !data->valid) { int temps = data->has_temp3 ? 3 : 2; int i; int prog = i2c_smbus_read_byte_data(client, THMC50_REG_CONF); prog &= THMC50_REG_CONF_PROGRAMMED; for (i = 0; i < temps; i++) { data->temp_input[i] = i2c_smbus_read_byte_data(client, THMC50_REG_TEMP[i]); data->temp_max[i] = i2c_smbus_read_byte_data(client, THMC50_REG_TEMP_MAX[i]); data->temp_min[i] = i2c_smbus_read_byte_data(client, THMC50_REG_TEMP_MIN[i]); data->temp_critical[i] = i2c_smbus_read_byte_data(client, prog ? THMC50_REG_TEMP_CRITICAL[i] : THMC50_REG_TEMP_DEFAULT[i]); } data->analog_out = i2c_smbus_read_byte_data(client, THMC50_REG_ANALOG_OUT); data->alarms = i2c_smbus_read_byte_data(client, THMC50_REG_INTR); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } static int __init sm_thmc50_init(void) { return i2c_add_driver(&thmc50_driver); } static void __exit sm_thmc50_exit(void) { i2c_del_driver(&thmc50_driver); } MODULE_AUTHOR("Krzysztof Helt <krzysztof.h1@wp.pl>"); MODULE_DESCRIPTION("THMC50 driver"); module_init(sm_thmc50_init); module_exit(sm_thmc50_exit);
gpl-2.0
uileyar/fastsocket
kernel/arch/m68knommu/platform/5407/config.c
4590
3287
/***************************************************************************/ /* * linux/arch/m68knommu/platform/5407/config.c * * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com) * Copyright (C) 2000, Lineo (www.lineo.com) */ /***************************************************************************/ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <linux/io.h> #include <asm/machdep.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> #include <asm/mcfuart.h> /***************************************************************************/ static struct mcf_platform_uart m5407_uart_platform[] = { { .mapbase = MCF_MBAR + MCFUART_BASE1, .irq = 73, }, { .mapbase = MCF_MBAR + MCFUART_BASE2, .irq = 74, }, { }, }; static struct platform_device m5407_uart = { .name = "mcfuart", .id = 0, .dev.platform_data = m5407_uart_platform, }; static struct platform_device *m5407_devices[] __initdata = { &m5407_uart, }; /***************************************************************************/ static void __init m5407_uart_init_line(int line, int irq) { if (line == 0) { writeb(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR); writeb(irq, MCF_MBAR + MCFUART_BASE1 + MCFUART_UIVR); mcf_mapirq2imr(irq, MCFINTC_UART0); } else if (line == 1) { writeb(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI2, MCF_MBAR + MCFSIM_UART2ICR); writeb(irq, MCF_MBAR + MCFUART_BASE2 + MCFUART_UIVR); mcf_mapirq2imr(irq, MCFINTC_UART1); } } static void __init m5407_uarts_init(void) { const int nrlines = ARRAY_SIZE(m5407_uart_platform); int line; for (line = 0; (line < nrlines); line++) m5407_uart_init_line(line, m5407_uart_platform[line].irq); } /***************************************************************************/ static void __init m5407_timers_init(void) { /* Timer1 is always used as system timer */ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI3, MCF_MBAR + MCFSIM_TIMER1ICR); mcf_mapirq2imr(MCF_IRQ_TIMER, MCFINTC_TIMER1); #ifdef CONFIG_HIGHPROFILE /* Timer2 is to be used as a high speed profile timer */ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL7 | MCFSIM_ICR_PRI3, MCF_MBAR + MCFSIM_TIMER2ICR); mcf_mapirq2imr(MCF_IRQ_PROFILER, MCFINTC_TIMER2); #endif } /***************************************************************************/ void m5407_cpu_reset(void) { local_irq_disable(); /* set watchdog to soft reset, and enabled */ __raw_writeb(0xc0, MCF_MBAR + MCFSIM_SYPCR); for (;;) /* wait for watchdog to timeout */; } /***************************************************************************/ void __init config_BSP(char *commandp, int size) { mach_reset = m5407_cpu_reset; m5407_timers_init(); m5407_uarts_init(); /* Only support the external interrupts on their primary level */ mcf_mapirq2imr(25, MCFINTC_EINT1); mcf_mapirq2imr(27, MCFINTC_EINT3); mcf_mapirq2imr(29, MCFINTC_EINT5); mcf_mapirq2imr(31, MCFINTC_EINT7); } /***************************************************************************/ static int __init init_BSP(void) { platform_add_devices(m5407_devices, ARRAY_SIZE(m5407_devices)); return 0; } arch_initcall(init_BSP); /***************************************************************************/
gpl-2.0
SuperHanss/android_kernel_sony_apq8064
drivers/parisc/lba_pci.c
4846
47324
/* ** ** PCI Lower Bus Adapter (LBA) manager ** ** (c) Copyright 1999,2000 Grant Grundler ** (c) Copyright 1999,2000 Hewlett-Packard Company ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** ** This module primarily provides access to PCI bus (config/IOport ** spaces) on platforms with an SBA/LBA chipset. A/B/C/J/L/N-class ** with 4 digit model numbers - eg C3000 (and A400...sigh). ** ** LBA driver isn't as simple as the Dino driver because: ** (a) this chip has substantial bug fixes between revisions ** (Only one Dino bug has a software workaround :^( ) ** (b) has more options which we don't (yet) support (DMA hints, OLARD) ** (c) IRQ support lives in the I/O SAPIC driver (not with PCI driver) ** (d) play nicely with both PAT and "Legacy" PA-RISC firmware (PDC). ** (dino only deals with "Legacy" PDC) ** ** LBA driver passes the I/O SAPIC HPA to the I/O SAPIC driver. ** (I/O SAPIC is integratd in the LBA chip). ** ** FIXME: Add support to SBA and LBA drivers for DMA hint sets ** FIXME: Add support for PCI card hot-plug (OLARD). */ #include <linux/delay.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/init.h> /* for __init and __devinit */ #include <linux/pci.h> #include <linux/ioport.h> #include <linux/slab.h> #include <asm/byteorder.h> #include <asm/pdc.h> #include <asm/pdcpat.h> #include <asm/page.h> #include <asm/ropes.h> #include <asm/hardware.h> /* for register_parisc_driver() stuff */ #include <asm/parisc-device.h> #include <asm/io.h> /* read/write stuff */ #undef DEBUG_LBA /* general stuff */ #undef DEBUG_LBA_PORT /* debug I/O Port access */ #undef DEBUG_LBA_CFG /* debug Config Space Access (ie PCI Bus walk) */ #undef DEBUG_LBA_PAT /* debug PCI Resource Mgt code - PDC PAT only */ #undef FBB_SUPPORT /* Fast Back-Back xfers - NOT READY YET */ #ifdef DEBUG_LBA #define DBG(x...) printk(x) #else #define DBG(x...) #endif #ifdef DEBUG_LBA_PORT #define DBG_PORT(x...) printk(x) #else #define DBG_PORT(x...) #endif #ifdef DEBUG_LBA_CFG #define DBG_CFG(x...) printk(x) #else #define DBG_CFG(x...) #endif #ifdef DEBUG_LBA_PAT #define DBG_PAT(x...) printk(x) #else #define DBG_PAT(x...) #endif /* ** Config accessor functions only pass in the 8-bit bus number and not ** the 8-bit "PCI Segment" number. Each LBA will be assigned a PCI bus ** number based on what firmware wrote into the scratch register. ** ** The "secondary" bus number is set to this before calling ** pci_register_ops(). If any PPB's are present, the scan will ** discover them and update the "secondary" and "subordinate" ** fields in the pci_bus structure. ** ** Changes in the configuration *may* result in a different ** bus number for each LBA depending on what firmware does. */ #define MODULE_NAME "LBA" /* non-postable I/O port space, densely packed */ #define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL) static void __iomem *astro_iop_base __read_mostly; static u32 lba_t32; /* lba flags */ #define LBA_FLAG_SKIP_PROBE 0x10 #define LBA_SKIP_PROBE(d) ((d)->flags & LBA_FLAG_SKIP_PROBE) /* Looks nice and keeps the compiler happy */ #define LBA_DEV(d) ((struct lba_device *) (d)) /* ** Only allow 8 subsidiary busses per LBA ** Problem is the PCI bus numbering is globally shared. */ #define LBA_MAX_NUM_BUSES 8 /************************************ * LBA register read and write support * * BE WARNED: register writes are posted. * (ie follow writes which must reach HW with a read) */ #define READ_U8(addr) __raw_readb(addr) #define READ_U16(addr) __raw_readw(addr) #define READ_U32(addr) __raw_readl(addr) #define WRITE_U8(value, addr) __raw_writeb(value, addr) #define WRITE_U16(value, addr) __raw_writew(value, addr) #define WRITE_U32(value, addr) __raw_writel(value, addr) #define READ_REG8(addr) readb(addr) #define READ_REG16(addr) readw(addr) #define READ_REG32(addr) readl(addr) #define READ_REG64(addr) readq(addr) #define WRITE_REG8(value, addr) writeb(value, addr) #define WRITE_REG16(value, addr) writew(value, addr) #define WRITE_REG32(value, addr) writel(value, addr) #define LBA_CFG_TOK(bus,dfn) ((u32) ((bus)<<16 | (dfn)<<8)) #define LBA_CFG_BUS(tok) ((u8) ((tok)>>16)) #define LBA_CFG_DEV(tok) ((u8) ((tok)>>11) & 0x1f) #define LBA_CFG_FUNC(tok) ((u8) ((tok)>>8 ) & 0x7) /* ** Extract LBA (Rope) number from HPA ** REVISIT: 16 ropes for Stretch/Ike? */ #define ROPES_PER_IOC 8 #define LBA_NUM(x) ((((unsigned long) x) >> 13) & (ROPES_PER_IOC-1)) static void lba_dump_res(struct resource *r, int d) { int i; if (NULL == r) return; printk(KERN_DEBUG "(%p)", r->parent); for (i = d; i ; --i) printk(" "); printk(KERN_DEBUG "%p [%lx,%lx]/%lx\n", r, (long)r->start, (long)r->end, r->flags); lba_dump_res(r->child, d+2); lba_dump_res(r->sibling, d); } /* ** LBA rev 2.0, 2.1, 2.2, and 3.0 bus walks require a complex ** workaround for cfg cycles: ** -- preserve LBA state ** -- prevent any DMA from occurring ** -- turn on smart mode ** -- probe with config writes before doing config reads ** -- check ERROR_STATUS ** -- clear ERROR_STATUS ** -- restore LBA state ** ** The workaround is only used for device discovery. */ static int lba_device_present(u8 bus, u8 dfn, struct lba_device *d) { u8 first_bus = d->hba.hba_bus->secondary; u8 last_sub_bus = d->hba.hba_bus->subordinate; if ((bus < first_bus) || (bus > last_sub_bus) || ((bus - first_bus) >= LBA_MAX_NUM_BUSES)) { return 0; } return 1; } #define LBA_CFG_SETUP(d, tok) { \ /* Save contents of error config register. */ \ error_config = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG); \ \ /* Save contents of status control register. */ \ status_control = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); \ \ /* For LBA rev 2.0, 2.1, 2.2, and 3.0, we must disable DMA \ ** arbitration for full bus walks. \ */ \ /* Save contents of arb mask register. */ \ arb_mask = READ_REG32(d->hba.base_addr + LBA_ARB_MASK); \ \ /* \ * Turn off all device arbitration bits (i.e. everything \ * except arbitration enable bit). \ */ \ WRITE_REG32(0x1, d->hba.base_addr + LBA_ARB_MASK); \ \ /* \ * Set the smart mode bit so that master aborts don't cause \ * LBA to go into PCI fatal mode (required). \ */ \ WRITE_REG32(error_config | LBA_SMART_MODE, d->hba.base_addr + LBA_ERROR_CONFIG); \ } #define LBA_CFG_PROBE(d, tok) { \ /* \ * Setup Vendor ID write and read back the address register \ * to make sure that LBA is the bus master. \ */ \ WRITE_REG32(tok | PCI_VENDOR_ID, (d)->hba.base_addr + LBA_PCI_CFG_ADDR);\ /* \ * Read address register to ensure that LBA is the bus master, \ * which implies that DMA traffic has stopped when DMA arb is off. \ */ \ lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \ /* \ * Generate a cfg write cycle (will have no affect on \ * Vendor ID register since read-only). \ */ \ WRITE_REG32(~0, (d)->hba.base_addr + LBA_PCI_CFG_DATA); \ /* \ * Make sure write has completed before proceeding further, \ * i.e. before setting clear enable. \ */ \ lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \ } /* * HPREVISIT: * -- Can't tell if config cycle got the error. * * OV bit is broken until rev 4.0, so can't use OV bit and * LBA_ERROR_LOG_ADDR to tell if error belongs to config cycle. * * As of rev 4.0, no longer need the error check. * * -- Even if we could tell, we still want to return -1 * for **ANY** error (not just master abort). * * -- Only clear non-fatal errors (we don't want to bring * LBA out of pci-fatal mode). * * Actually, there is still a race in which * we could be clearing a fatal error. We will * live with this during our initial bus walk * until rev 4.0 (no driver activity during * initial bus walk). The initial bus walk * has race conditions concerning the use of * smart mode as well. */ #define LBA_MASTER_ABORT_ERROR 0xc #define LBA_FATAL_ERROR 0x10 #define LBA_CFG_MASTER_ABORT_CHECK(d, base, tok, error) { \ u32 error_status = 0; \ /* \ * Set clear enable (CE) bit. Unset by HW when new \ * errors are logged -- LBA HW ERS section 14.3.3). \ */ \ WRITE_REG32(status_control | CLEAR_ERRLOG_ENABLE, base + LBA_STAT_CTL); \ error_status = READ_REG32(base + LBA_ERROR_STATUS); \ if ((error_status & 0x1f) != 0) { \ /* \ * Fail the config read request. \ */ \ error = 1; \ if ((error_status & LBA_FATAL_ERROR) == 0) { \ /* \ * Clear error status (if fatal bit not set) by setting \ * clear error log bit (CL). \ */ \ WRITE_REG32(status_control | CLEAR_ERRLOG, base + LBA_STAT_CTL); \ } \ } \ } #define LBA_CFG_TR4_ADDR_SETUP(d, addr) \ WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR); #define LBA_CFG_ADDR_SETUP(d, addr) { \ WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR); \ /* \ * Read address register to ensure that LBA is the bus master, \ * which implies that DMA traffic has stopped when DMA arb is off. \ */ \ lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \ } #define LBA_CFG_RESTORE(d, base) { \ /* \ * Restore status control register (turn off clear enable). \ */ \ WRITE_REG32(status_control, base + LBA_STAT_CTL); \ /* \ * Restore error config register (turn off smart mode). \ */ \ WRITE_REG32(error_config, base + LBA_ERROR_CONFIG); \ /* \ * Restore arb mask register (reenables DMA arbitration). \ */ \ WRITE_REG32(arb_mask, base + LBA_ARB_MASK); \ } static unsigned int lba_rd_cfg(struct lba_device *d, u32 tok, u8 reg, u32 size) { u32 data = ~0U; int error = 0; u32 arb_mask = 0; /* used by LBA_CFG_SETUP/RESTORE */ u32 error_config = 0; /* used by LBA_CFG_SETUP/RESTORE */ u32 status_control = 0; /* used by LBA_CFG_SETUP/RESTORE */ LBA_CFG_SETUP(d, tok); LBA_CFG_PROBE(d, tok); LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error); if (!error) { void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; LBA_CFG_ADDR_SETUP(d, tok | reg); switch (size) { case 1: data = (u32) READ_REG8(data_reg + (reg & 3)); break; case 2: data = (u32) READ_REG16(data_reg+ (reg & 2)); break; case 4: data = READ_REG32(data_reg); break; } } LBA_CFG_RESTORE(d, d->hba.base_addr); return(data); } static int elroy_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data) { struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge)); u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary; u32 tok = LBA_CFG_TOK(local_bus, devfn); void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; if ((pos > 255) || (devfn > 255)) return -EINVAL; /* FIXME: B2K/C3600 workaround is always use old method... */ /* if (!LBA_SKIP_PROBE(d)) */ { /* original - Generate config cycle on broken elroy with risk we will miss PCI bus errors. */ *data = lba_rd_cfg(d, tok, pos, size); DBG_CFG("%s(%x+%2x) -> 0x%x (a)\n", __func__, tok, pos, *data); return 0; } if (LBA_SKIP_PROBE(d) && !lba_device_present(bus->secondary, devfn, d)) { DBG_CFG("%s(%x+%2x) -> -1 (b)\n", __func__, tok, pos); /* either don't want to look or know device isn't present. */ *data = ~0U; return(0); } /* Basic Algorithm ** Should only get here on fully working LBA rev. ** This is how simple the code should have been. */ LBA_CFG_ADDR_SETUP(d, tok | pos); switch(size) { case 1: *data = READ_REG8 (data_reg + (pos & 3)); break; case 2: *data = READ_REG16(data_reg + (pos & 2)); break; case 4: *data = READ_REG32(data_reg); break; } DBG_CFG("%s(%x+%2x) -> 0x%x (c)\n", __func__, tok, pos, *data); return 0; } static void lba_wr_cfg(struct lba_device *d, u32 tok, u8 reg, u32 data, u32 size) { int error = 0; u32 arb_mask = 0; u32 error_config = 0; u32 status_control = 0; void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; LBA_CFG_SETUP(d, tok); LBA_CFG_ADDR_SETUP(d, tok | reg); switch (size) { case 1: WRITE_REG8 (data, data_reg + (reg & 3)); break; case 2: WRITE_REG16(data, data_reg + (reg & 2)); break; case 4: WRITE_REG32(data, data_reg); break; } LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error); LBA_CFG_RESTORE(d, d->hba.base_addr); } /* * LBA 4.0 config write code implements non-postable semantics * by doing a read of CONFIG ADDR after the write. */ static int elroy_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data) { struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge)); u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary; u32 tok = LBA_CFG_TOK(local_bus,devfn); if ((pos > 255) || (devfn > 255)) return -EINVAL; if (!LBA_SKIP_PROBE(d)) { /* Original Workaround */ lba_wr_cfg(d, tok, pos, (u32) data, size); DBG_CFG("%s(%x+%2x) = 0x%x (a)\n", __func__, tok, pos,data); return 0; } if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->secondary, devfn, d))) { DBG_CFG("%s(%x+%2x) = 0x%x (b)\n", __func__, tok, pos,data); return 1; /* New Workaround */ } DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __func__, tok, pos, data); /* Basic Algorithm */ LBA_CFG_ADDR_SETUP(d, tok | pos); switch(size) { case 1: WRITE_REG8 (data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 3)); break; case 2: WRITE_REG16(data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 2)); break; case 4: WRITE_REG32(data, d->hba.base_addr + LBA_PCI_CFG_DATA); break; } /* flush posted write */ lba_t32 = READ_REG32(d->hba.base_addr + LBA_PCI_CFG_ADDR); return 0; } static struct pci_ops elroy_cfg_ops = { .read = elroy_cfg_read, .write = elroy_cfg_write, }; /* * The mercury_cfg_ops are slightly misnamed; they're also used for Elroy * TR4.0 as no additional bugs were found in this areea between Elroy and * Mercury */ static int mercury_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data) { struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge)); u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary; u32 tok = LBA_CFG_TOK(local_bus, devfn); void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; if ((pos > 255) || (devfn > 255)) return -EINVAL; LBA_CFG_TR4_ADDR_SETUP(d, tok | pos); switch(size) { case 1: *data = READ_REG8(data_reg + (pos & 3)); break; case 2: *data = READ_REG16(data_reg + (pos & 2)); break; case 4: *data = READ_REG32(data_reg); break; break; } DBG_CFG("mercury_cfg_read(%x+%2x) -> 0x%x\n", tok, pos, *data); return 0; } /* * LBA 4.0 config write code implements non-postable semantics * by doing a read of CONFIG ADDR after the write. */ static int mercury_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data) { struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge)); void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary; u32 tok = LBA_CFG_TOK(local_bus,devfn); if ((pos > 255) || (devfn > 255)) return -EINVAL; DBG_CFG("%s(%x+%2x) <- 0x%x (c)\n", __func__, tok, pos, data); LBA_CFG_TR4_ADDR_SETUP(d, tok | pos); switch(size) { case 1: WRITE_REG8 (data, data_reg + (pos & 3)); break; case 2: WRITE_REG16(data, data_reg + (pos & 2)); break; case 4: WRITE_REG32(data, data_reg); break; } /* flush posted write */ lba_t32 = READ_U32(d->hba.base_addr + LBA_PCI_CFG_ADDR); return 0; } static struct pci_ops mercury_cfg_ops = { .read = mercury_cfg_read, .write = mercury_cfg_write, }; static void lba_bios_init(void) { DBG(MODULE_NAME ": lba_bios_init\n"); } #ifdef CONFIG_64BIT /* * truncate_pat_collision: Deal with overlaps or outright collisions * between PAT PDC reported ranges. * * Broken PA8800 firmware will report lmmio range that * overlaps with CPU HPA. Just truncate the lmmio range. * * BEWARE: conflicts with this lmmio range may be an * elmmio range which is pointing down another rope. * * FIXME: only deals with one collision per range...theoretically we * could have several. Supporting more than one collision will get messy. */ static unsigned long truncate_pat_collision(struct resource *root, struct resource *new) { unsigned long start = new->start; unsigned long end = new->end; struct resource *tmp = root->child; if (end <= start || start < root->start || !tmp) return 0; /* find first overlap */ while (tmp && tmp->end < start) tmp = tmp->sibling; /* no entries overlap */ if (!tmp) return 0; /* found one that starts behind the new one ** Don't need to do anything. */ if (tmp->start >= end) return 0; if (tmp->start <= start) { /* "front" of new one overlaps */ new->start = tmp->end + 1; if (tmp->end >= end) { /* AACCKK! totally overlaps! drop this range. */ return 1; } } if (tmp->end < end ) { /* "end" of new one overlaps */ new->end = tmp->start - 1; } printk(KERN_WARNING "LBA: Truncating lmmio_space [%lx/%lx] " "to [%lx,%lx]\n", start, end, (long)new->start, (long)new->end ); return 0; /* truncation successful */ } #else #define truncate_pat_collision(r,n) (0) #endif /* ** The algorithm is generic code. ** But it needs to access local data structures to get the IRQ base. ** Could make this a "pci_fixup_irq(bus, region)" but not sure ** it's worth it. ** ** Called by do_pci_scan_bus() immediately after each PCI bus is walked. ** Resources aren't allocated until recursive buswalk below HBA is completed. */ static void lba_fixup_bus(struct pci_bus *bus) { struct list_head *ln; #ifdef FBB_SUPPORT u16 status; #endif struct lba_device *ldev = LBA_DEV(parisc_walk_tree(bus->bridge)); DBG("lba_fixup_bus(0x%p) bus %d platform_data 0x%p\n", bus, bus->secondary, bus->bridge->platform_data); /* ** Properly Setup MMIO resources for this bus. ** pci_alloc_primary_bus() mangles this. */ if (bus->parent) { int i; /* PCI-PCI Bridge */ pci_read_bridge_bases(bus); for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { pci_claim_resource(bus->self, i); } } else { /* Host-PCI Bridge */ int err; DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n", ldev->hba.io_space.name, ldev->hba.io_space.start, ldev->hba.io_space.end, ldev->hba.io_space.flags); DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n", ldev->hba.lmmio_space.name, ldev->hba.lmmio_space.start, ldev->hba.lmmio_space.end, ldev->hba.lmmio_space.flags); err = request_resource(&ioport_resource, &(ldev->hba.io_space)); if (err < 0) { lba_dump_res(&ioport_resource, 2); BUG(); } if (ldev->hba.elmmio_space.start) { err = request_resource(&iomem_resource, &(ldev->hba.elmmio_space)); if (err < 0) { printk("FAILED: lba_fixup_bus() request for " "elmmio_space [%lx/%lx]\n", (long)ldev->hba.elmmio_space.start, (long)ldev->hba.elmmio_space.end); /* lba_dump_res(&iomem_resource, 2); */ /* BUG(); */ } } if (ldev->hba.lmmio_space.flags) { err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space)); if (err < 0) { printk(KERN_ERR "FAILED: lba_fixup_bus() request for " "lmmio_space [%lx/%lx]\n", (long)ldev->hba.lmmio_space.start, (long)ldev->hba.lmmio_space.end); } } #ifdef CONFIG_64BIT /* GMMIO is distributed range. Every LBA/Rope gets part it. */ if (ldev->hba.gmmio_space.flags) { err = request_resource(&iomem_resource, &(ldev->hba.gmmio_space)); if (err < 0) { printk("FAILED: lba_fixup_bus() request for " "gmmio_space [%lx/%lx]\n", (long)ldev->hba.gmmio_space.start, (long)ldev->hba.gmmio_space.end); lba_dump_res(&iomem_resource, 2); BUG(); } } #endif } list_for_each(ln, &bus->devices) { int i; struct pci_dev *dev = pci_dev_b(ln); DBG("lba_fixup_bus() %s\n", pci_name(dev)); /* Virtualize Device/Bridge Resources. */ for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) { struct resource *res = &dev->resource[i]; /* If resource not allocated - skip it */ if (!res->start) continue; /* ** FIXME: this will result in whinging for devices ** that share expansion ROMs (think quad tulip), but ** isn't harmful. */ pci_claim_resource(dev, i); } #ifdef FBB_SUPPORT /* ** If one device does not support FBB transfers, ** No one on the bus can be allowed to use them. */ (void) pci_read_config_word(dev, PCI_STATUS, &status); bus->bridge_ctl &= ~(status & PCI_STATUS_FAST_BACK); #endif /* ** P2PB's have no IRQs. ignore them. */ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) continue; /* Adjust INTERRUPT_LINE for this dev */ iosapic_fixup_irq(ldev->iosapic_obj, dev); } #ifdef FBB_SUPPORT /* FIXME/REVISIT - finish figuring out to set FBB on both ** pci_setup_bridge() clobbers PCI_BRIDGE_CONTROL. ** Can't fixup here anyway....garr... */ if (fbb_enable) { if (bus->parent) { u8 control; /* enable on PPB */ (void) pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &control); (void) pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, control | PCI_STATUS_FAST_BACK); } else { /* enable on LBA */ } fbb_enable = PCI_COMMAND_FAST_BACK; } /* Lastly enable FBB/PERR/SERR on all devices too */ list_for_each(ln, &bus->devices) { (void) pci_read_config_word(dev, PCI_COMMAND, &status); status |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR | fbb_enable; (void) pci_write_config_word(dev, PCI_COMMAND, status); } #endif } static struct pci_bios_ops lba_bios_ops = { .init = lba_bios_init, .fixup_bus = lba_fixup_bus, }; /******************************************************* ** ** LBA Sprockets "I/O Port" Space Accessor Functions ** ** This set of accessor functions is intended for use with ** "legacy firmware" (ie Sprockets on Allegro/Forte boxes). ** ** Many PCI devices don't require use of I/O port space (eg Tulip, ** NCR720) since they export the same registers to both MMIO and ** I/O port space. In general I/O port space is slower than ** MMIO since drivers are designed so PIO writes can be posted. ** ********************************************************/ #define LBA_PORT_IN(size, mask) \ static u##size lba_astro_in##size (struct pci_hba_data *d, u16 addr) \ { \ u##size t; \ t = READ_REG##size(astro_iop_base + addr); \ DBG_PORT(" 0x%x\n", t); \ return (t); \ } LBA_PORT_IN( 8, 3) LBA_PORT_IN(16, 2) LBA_PORT_IN(32, 0) /* ** BUG X4107: Ordering broken - DMA RD return can bypass PIO WR ** ** Fixed in Elroy 2.2. The READ_U32(..., LBA_FUNC_ID) below is ** guarantee non-postable completion semantics - not avoid X4107. ** The READ_U32 only guarantees the write data gets to elroy but ** out to the PCI bus. We can't read stuff from I/O port space ** since we don't know what has side-effects. Attempting to read ** from configuration space would be suicidal given the number of ** bugs in that elroy functionality. ** ** Description: ** DMA read results can improperly pass PIO writes (X4107). The ** result of this bug is that if a processor modifies a location in ** memory after having issued PIO writes, the PIO writes are not ** guaranteed to be completed before a PCI device is allowed to see ** the modified data in a DMA read. ** ** Note that IKE bug X3719 in TR1 IKEs will result in the same ** symptom. ** ** Workaround: ** The workaround for this bug is to always follow a PIO write with ** a PIO read to the same bus before starting DMA on that PCI bus. ** */ #define LBA_PORT_OUT(size, mask) \ static void lba_astro_out##size (struct pci_hba_data *d, u16 addr, u##size val) \ { \ DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, d, addr, val); \ WRITE_REG##size(val, astro_iop_base + addr); \ if (LBA_DEV(d)->hw_rev < 3) \ lba_t32 = READ_U32(d->base_addr + LBA_FUNC_ID); \ } LBA_PORT_OUT( 8, 3) LBA_PORT_OUT(16, 2) LBA_PORT_OUT(32, 0) static struct pci_port_ops lba_astro_port_ops = { .inb = lba_astro_in8, .inw = lba_astro_in16, .inl = lba_astro_in32, .outb = lba_astro_out8, .outw = lba_astro_out16, .outl = lba_astro_out32 }; #ifdef CONFIG_64BIT #define PIOP_TO_GMMIO(lba, addr) \ ((lba)->iop_base + (((addr)&0xFFFC)<<10) + ((addr)&3)) /******************************************************* ** ** LBA PAT "I/O Port" Space Accessor Functions ** ** This set of accessor functions is intended for use with ** "PAT PDC" firmware (ie Prelude/Rhapsody/Piranha boxes). ** ** This uses the PIOP space located in the first 64MB of GMMIO. ** Each rope gets a full 64*KB* (ie 4 bytes per page) this way. ** bits 1:0 stay the same. bits 15:2 become 25:12. ** Then add the base and we can generate an I/O Port cycle. ********************************************************/ #undef LBA_PORT_IN #define LBA_PORT_IN(size, mask) \ static u##size lba_pat_in##size (struct pci_hba_data *l, u16 addr) \ { \ u##size t; \ DBG_PORT("%s(0x%p, 0x%x) ->", __func__, l, addr); \ t = READ_REG##size(PIOP_TO_GMMIO(LBA_DEV(l), addr)); \ DBG_PORT(" 0x%x\n", t); \ return (t); \ } LBA_PORT_IN( 8, 3) LBA_PORT_IN(16, 2) LBA_PORT_IN(32, 0) #undef LBA_PORT_OUT #define LBA_PORT_OUT(size, mask) \ static void lba_pat_out##size (struct pci_hba_data *l, u16 addr, u##size val) \ { \ void __iomem *where = PIOP_TO_GMMIO(LBA_DEV(l), addr); \ DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, l, addr, val); \ WRITE_REG##size(val, where); \ /* flush the I/O down to the elroy at least */ \ lba_t32 = READ_U32(l->base_addr + LBA_FUNC_ID); \ } LBA_PORT_OUT( 8, 3) LBA_PORT_OUT(16, 2) LBA_PORT_OUT(32, 0) static struct pci_port_ops lba_pat_port_ops = { .inb = lba_pat_in8, .inw = lba_pat_in16, .inl = lba_pat_in32, .outb = lba_pat_out8, .outw = lba_pat_out16, .outl = lba_pat_out32 }; /* ** make range information from PDC available to PCI subsystem. ** We make the PDC call here in order to get the PCI bus range ** numbers. The rest will get forwarded in pcibios_fixup_bus(). ** We don't have a struct pci_bus assigned to us yet. */ static void lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) { unsigned long bytecnt; long io_count; long status; /* PDC return status */ long pa_count; pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell; /* PA_VIEW */ pdc_pat_cell_mod_maddr_block_t *io_pdc_cell; /* IO_VIEW */ int i; pa_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL); if (!pa_pdc_cell) return; io_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL); if (!io_pdc_cell) { kfree(pa_pdc_cell); return; } /* return cell module (IO view) */ status = pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index, PA_VIEW, pa_pdc_cell); pa_count = pa_pdc_cell->mod[1]; status |= pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index, IO_VIEW, io_pdc_cell); io_count = io_pdc_cell->mod[1]; /* We've already done this once for device discovery...*/ if (status != PDC_OK) { panic("pdc_pat_cell_module() call failed for LBA!\n"); } if (PAT_GET_ENTITY(pa_pdc_cell->mod_info) != PAT_ENTITY_LBA) { panic("pdc_pat_cell_module() entity returned != PAT_ENTITY_LBA!\n"); } /* ** Inspect the resources PAT tells us about */ for (i = 0; i < pa_count; i++) { struct { unsigned long type; unsigned long start; unsigned long end; /* aka finish */ } *p, *io; struct resource *r; p = (void *) &(pa_pdc_cell->mod[2+i*3]); io = (void *) &(io_pdc_cell->mod[2+i*3]); /* Convert the PAT range data to PCI "struct resource" */ switch(p->type & 0xff) { case PAT_PBNUM: lba_dev->hba.bus_num.start = p->start; lba_dev->hba.bus_num.end = p->end; break; case PAT_LMMIO: /* used to fix up pre-initialized MEM BARs */ if (!lba_dev->hba.lmmio_space.start) { sprintf(lba_dev->hba.lmmio_name, "PCI%02x LMMIO", (int)lba_dev->hba.bus_num.start); lba_dev->hba.lmmio_space_offset = p->start - io->start; r = &lba_dev->hba.lmmio_space; r->name = lba_dev->hba.lmmio_name; } else if (!lba_dev->hba.elmmio_space.start) { sprintf(lba_dev->hba.elmmio_name, "PCI%02x ELMMIO", (int)lba_dev->hba.bus_num.start); r = &lba_dev->hba.elmmio_space; r->name = lba_dev->hba.elmmio_name; } else { printk(KERN_WARNING MODULE_NAME " only supports 2 LMMIO resources!\n"); break; } r->start = p->start; r->end = p->end; r->flags = IORESOURCE_MEM; r->parent = r->sibling = r->child = NULL; break; case PAT_GMMIO: /* MMIO space > 4GB phys addr; for 64-bit BAR */ sprintf(lba_dev->hba.gmmio_name, "PCI%02x GMMIO", (int)lba_dev->hba.bus_num.start); r = &lba_dev->hba.gmmio_space; r->name = lba_dev->hba.gmmio_name; r->start = p->start; r->end = p->end; r->flags = IORESOURCE_MEM; r->parent = r->sibling = r->child = NULL; break; case PAT_NPIOP: printk(KERN_WARNING MODULE_NAME " range[%d] : ignoring NPIOP (0x%lx)\n", i, p->start); break; case PAT_PIOP: /* ** Postable I/O port space is per PCI host adapter. ** base of 64MB PIOP region */ lba_dev->iop_base = ioremap_nocache(p->start, 64 * 1024 * 1024); sprintf(lba_dev->hba.io_name, "PCI%02x Ports", (int)lba_dev->hba.bus_num.start); r = &lba_dev->hba.io_space; r->name = lba_dev->hba.io_name; r->start = HBA_PORT_BASE(lba_dev->hba.hba_num); r->end = r->start + HBA_PORT_SPACE_SIZE - 1; r->flags = IORESOURCE_IO; r->parent = r->sibling = r->child = NULL; break; default: printk(KERN_WARNING MODULE_NAME " range[%d] : unknown pat range type (0x%lx)\n", i, p->type & 0xff); break; } } kfree(pa_pdc_cell); kfree(io_pdc_cell); } #else /* keep compiler from complaining about missing declarations */ #define lba_pat_port_ops lba_astro_port_ops #define lba_pat_resources(pa_dev, lba_dev) #endif /* CONFIG_64BIT */ extern void sba_distributed_lmmio(struct parisc_device *, struct resource *); extern void sba_directed_lmmio(struct parisc_device *, struct resource *); static void lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) { struct resource *r; int lba_num; lba_dev->hba.lmmio_space_offset = PCI_F_EXTEND; /* ** With "legacy" firmware, the lowest byte of FW_SCRATCH ** represents bus->secondary and the second byte represents ** bus->subsidiary (i.e. highest PPB programmed by firmware). ** PCI bus walk *should* end up with the same result. ** FIXME: But we don't have sanity checks in PCI or LBA. */ lba_num = READ_REG32(lba_dev->hba.base_addr + LBA_FW_SCRATCH); r = &(lba_dev->hba.bus_num); r->name = "LBA PCI Busses"; r->start = lba_num & 0xff; r->end = (lba_num>>8) & 0xff; /* Set up local PCI Bus resources - we don't need them for ** Legacy boxes but it's nice to see in /proc/iomem. */ r = &(lba_dev->hba.lmmio_space); sprintf(lba_dev->hba.lmmio_name, "PCI%02x LMMIO", (int)lba_dev->hba.bus_num.start); r->name = lba_dev->hba.lmmio_name; #if 1 /* We want the CPU -> IO routing of addresses. * The SBA BASE/MASK registers control CPU -> IO routing. * Ask SBA what is routed to this rope/LBA. */ sba_distributed_lmmio(pa_dev, r); #else /* * The LBA BASE/MASK registers control IO -> System routing. * * The following code works but doesn't get us what we want. * Well, only because firmware (v5.0) on C3000 doesn't program * the LBA BASE/MASE registers to be the exact inverse of * the corresponding SBA registers. Other Astro/Pluto * based platform firmware may do it right. * * Should someone want to mess with MSI, they may need to * reprogram LBA BASE/MASK registers. Thus preserve the code * below until MSI is known to work on C3000/A500/N4000/RP3440. * * Using the code below, /proc/iomem shows: * ... * f0000000-f0ffffff : PCI00 LMMIO * f05d0000-f05d0000 : lcd_data * f05d0008-f05d0008 : lcd_cmd * f1000000-f1ffffff : PCI01 LMMIO * f4000000-f4ffffff : PCI02 LMMIO * f4000000-f4001fff : sym53c8xx * f4002000-f4003fff : sym53c8xx * f4004000-f40043ff : sym53c8xx * f4005000-f40053ff : sym53c8xx * f4007000-f4007fff : ohci_hcd * f4008000-f40083ff : tulip * f6000000-f6ffffff : PCI03 LMMIO * f8000000-fbffffff : PCI00 ELMMIO * fa100000-fa4fffff : stifb mmio * fb000000-fb1fffff : stifb fb * * But everything listed under PCI02 actually lives under PCI00. * This is clearly wrong. * * Asking SBA how things are routed tells the correct story: * LMMIO_BASE/MASK/ROUTE f4000001 fc000000 00000000 * DIR0_BASE/MASK/ROUTE fa000001 fe000000 00000006 * DIR1_BASE/MASK/ROUTE f9000001 ff000000 00000004 * DIR2_BASE/MASK/ROUTE f0000000 fc000000 00000000 * DIR3_BASE/MASK/ROUTE f0000000 fc000000 00000000 * * Which looks like this in /proc/iomem: * f4000000-f47fffff : PCI00 LMMIO * f4000000-f4001fff : sym53c8xx * ...[deteled core devices - same as above]... * f4008000-f40083ff : tulip * f4800000-f4ffffff : PCI01 LMMIO * f6000000-f67fffff : PCI02 LMMIO * f7000000-f77fffff : PCI03 LMMIO * f9000000-f9ffffff : PCI02 ELMMIO * fa000000-fbffffff : PCI03 ELMMIO * fa100000-fa4fffff : stifb mmio * fb000000-fb1fffff : stifb fb * * ie all Built-in core are under now correctly under PCI00. * The "PCI02 ELMMIO" directed range is for: * +-[02]---03.0 3Dfx Interactive, Inc. Voodoo 2 * * All is well now. */ r->start = READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_BASE); if (r->start & 1) { unsigned long rsize; r->flags = IORESOURCE_MEM; /* mmio_mask also clears Enable bit */ r->start &= mmio_mask; r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start); rsize = ~ READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_MASK); /* ** Each rope only gets part of the distributed range. ** Adjust "window" for this rope. */ rsize /= ROPES_PER_IOC; r->start += (rsize + 1) * LBA_NUM(pa_dev->hpa.start); r->end = r->start + rsize; } else { r->end = r->start = 0; /* Not enabled. */ } #endif /* ** "Directed" ranges are used when the "distributed range" isn't ** sufficient for all devices below a given LBA. Typically devices ** like graphics cards or X25 may need a directed range when the ** bus has multiple slots (ie multiple devices) or the device ** needs more than the typical 4 or 8MB a distributed range offers. ** ** The main reason for ignoring it now frigging complications. ** Directed ranges may overlap (and have precedence) over ** distributed ranges. Or a distributed range assigned to a unused ** rope may be used by a directed range on a different rope. ** Support for graphics devices may require fixing this ** since they may be assigned a directed range which overlaps ** an existing (but unused portion of) distributed range. */ r = &(lba_dev->hba.elmmio_space); sprintf(lba_dev->hba.elmmio_name, "PCI%02x ELMMIO", (int)lba_dev->hba.bus_num.start); r->name = lba_dev->hba.elmmio_name; #if 1 /* See comment which precedes call to sba_directed_lmmio() */ sba_directed_lmmio(pa_dev, r); #else r->start = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_BASE); if (r->start & 1) { unsigned long rsize; r->flags = IORESOURCE_MEM; /* mmio_mask also clears Enable bit */ r->start &= mmio_mask; r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start); rsize = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_MASK); r->end = r->start + ~rsize; } #endif r = &(lba_dev->hba.io_space); sprintf(lba_dev->hba.io_name, "PCI%02x Ports", (int)lba_dev->hba.bus_num.start); r->name = lba_dev->hba.io_name; r->flags = IORESOURCE_IO; r->start = READ_REG32(lba_dev->hba.base_addr + LBA_IOS_BASE) & ~1L; r->end = r->start + (READ_REG32(lba_dev->hba.base_addr + LBA_IOS_MASK) ^ (HBA_PORT_SPACE_SIZE - 1)); /* Virtualize the I/O Port space ranges */ lba_num = HBA_PORT_BASE(lba_dev->hba.hba_num); r->start |= lba_num; r->end |= lba_num; } /************************************************************************** ** ** LBA initialization code (HW and SW) ** ** o identify LBA chip itself ** o initialize LBA chip modes (HardFail) ** o FIXME: initialize DMA hints for reasonable defaults ** o enable configuration functions ** o call pci_register_ops() to discover devs (fixup/fixup_bus get invoked) ** **************************************************************************/ static int __init lba_hw_init(struct lba_device *d) { u32 stat; u32 bus_reset; /* PDC_PAT_BUG */ #if 0 printk(KERN_DEBUG "LBA %lx STAT_CTL %Lx ERROR_CFG %Lx STATUS %Lx DMA_CTL %Lx\n", d->hba.base_addr, READ_REG64(d->hba.base_addr + LBA_STAT_CTL), READ_REG64(d->hba.base_addr + LBA_ERROR_CONFIG), READ_REG64(d->hba.base_addr + LBA_ERROR_STATUS), READ_REG64(d->hba.base_addr + LBA_DMA_CTL) ); printk(KERN_DEBUG " ARB mask %Lx pri %Lx mode %Lx mtlt %Lx\n", READ_REG64(d->hba.base_addr + LBA_ARB_MASK), READ_REG64(d->hba.base_addr + LBA_ARB_PRI), READ_REG64(d->hba.base_addr + LBA_ARB_MODE), READ_REG64(d->hba.base_addr + LBA_ARB_MTLT) ); printk(KERN_DEBUG " HINT cfg 0x%Lx\n", READ_REG64(d->hba.base_addr + LBA_HINT_CFG)); printk(KERN_DEBUG " HINT reg "); { int i; for (i=LBA_HINT_BASE; i< (14*8 + LBA_HINT_BASE); i+=8) printk(" %Lx", READ_REG64(d->hba.base_addr + i)); } printk("\n"); #endif /* DEBUG_LBA_PAT */ #ifdef CONFIG_64BIT /* * FIXME add support for PDC_PAT_IO "Get slot status" - OLAR support * Only N-Class and up can really make use of Get slot status. * maybe L-class too but I've never played with it there. */ #endif /* PDC_PAT_BUG: exhibited in rev 40.48 on L2000 */ bus_reset = READ_REG32(d->hba.base_addr + LBA_STAT_CTL + 4) & 1; if (bus_reset) { printk(KERN_DEBUG "NOTICE: PCI bus reset still asserted! (clearing)\n"); } stat = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG); if (stat & LBA_SMART_MODE) { printk(KERN_DEBUG "NOTICE: LBA in SMART mode! (cleared)\n"); stat &= ~LBA_SMART_MODE; WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG); } /* Set HF mode as the default (vs. -1 mode). */ stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL); /* ** Writing a zero to STAT_CTL.rf (bit 0) will clear reset signal ** if it's not already set. If we just cleared the PCI Bus Reset ** signal, wait a bit for the PCI devices to recover and setup. */ if (bus_reset) mdelay(pci_post_reset_delay); if (0 == READ_REG32(d->hba.base_addr + LBA_ARB_MASK)) { /* ** PDC_PAT_BUG: PDC rev 40.48 on L2000. ** B2000/C3600/J6000 also have this problem? ** ** Elroys with hot pluggable slots don't get configured ** correctly if the slot is empty. ARB_MASK is set to 0 ** and we can't master transactions on the bus if it's ** not at least one. 0x3 enables elroy and first slot. */ printk(KERN_DEBUG "NOTICE: Enabling PCI Arbitration\n"); WRITE_REG32(0x3, d->hba.base_addr + LBA_ARB_MASK); } /* ** FIXME: Hint registers are programmed with default hint ** values by firmware. Hints should be sane even if we ** can't reprogram them the way drivers want. */ return 0; } /* * Unfortunately, when firmware numbers busses, it doesn't take into account * Cardbus bridges. So we have to renumber the busses to suit ourselves. * Elroy/Mercury don't actually know what bus number they're attached to; * we use bus 0 to indicate the directly attached bus and any other bus * number will be taken care of by the PCI-PCI bridge. */ static unsigned int lba_next_bus = 0; /* * Determine if lba should claim this chip (return 0) or not (return 1). * If so, initialize the chip and tell other partners in crime they * have work to do. */ static int __init lba_driver_probe(struct parisc_device *dev) { struct lba_device *lba_dev; LIST_HEAD(resources); struct pci_bus *lba_bus; struct pci_ops *cfg_ops; u32 func_class; void *tmp_obj; char *version; void __iomem *addr = ioremap_nocache(dev->hpa.start, 4096); /* Read HW Rev First */ func_class = READ_REG32(addr + LBA_FCLASS); if (IS_ELROY(dev)) { func_class &= 0xf; switch (func_class) { case 0: version = "TR1.0"; break; case 1: version = "TR2.0"; break; case 2: version = "TR2.1"; break; case 3: version = "TR2.2"; break; case 4: version = "TR3.0"; break; case 5: version = "TR4.0"; break; default: version = "TR4+"; } printk(KERN_INFO "Elroy version %s (0x%x) found at 0x%lx\n", version, func_class & 0xf, (long)dev->hpa.start); if (func_class < 2) { printk(KERN_WARNING "Can't support LBA older than " "TR2.1 - continuing under adversity.\n"); } #if 0 /* Elroy TR4.0 should work with simple algorithm. But it doesn't. Still missing something. *sigh* */ if (func_class > 4) { cfg_ops = &mercury_cfg_ops; } else #endif { cfg_ops = &elroy_cfg_ops; } } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) { int major, minor; func_class &= 0xff; major = func_class >> 4, minor = func_class & 0xf; /* We could use one printk for both Elroy and Mercury, * but for the mask for func_class. */ printk(KERN_INFO "%s version TR%d.%d (0x%x) found at 0x%lx\n", IS_MERCURY(dev) ? "Mercury" : "Quicksilver", major, minor, func_class, (long)dev->hpa.start); cfg_ops = &mercury_cfg_ops; } else { printk(KERN_ERR "Unknown LBA found at 0x%lx\n", (long)dev->hpa.start); return -ENODEV; } /* Tell I/O SAPIC driver we have a IRQ handler/region. */ tmp_obj = iosapic_register(dev->hpa.start + LBA_IOSAPIC_BASE); /* NOTE: PCI devices (e.g. 103c:1005 graphics card) which don't ** have an IRT entry will get NULL back from iosapic code. */ lba_dev = kzalloc(sizeof(struct lba_device), GFP_KERNEL); if (!lba_dev) { printk(KERN_ERR "lba_init_chip - couldn't alloc lba_device\n"); return(1); } /* ---------- First : initialize data we already have --------- */ lba_dev->hw_rev = func_class; lba_dev->hba.base_addr = addr; lba_dev->hba.dev = dev; lba_dev->iosapic_obj = tmp_obj; /* save interrupt handle */ lba_dev->hba.iommu = sba_get_iommu(dev); /* get iommu data */ parisc_set_drvdata(dev, lba_dev); /* ------------ Second : initialize common stuff ---------- */ pci_bios = &lba_bios_ops; pcibios_register_hba(HBA_DATA(lba_dev)); spin_lock_init(&lba_dev->lba_lock); if (lba_hw_init(lba_dev)) return(1); /* ---------- Third : setup I/O Port and MMIO resources --------- */ if (is_pdc_pat()) { /* PDC PAT firmware uses PIOP region of GMMIO space. */ pci_port = &lba_pat_port_ops; /* Go ask PDC PAT what resources this LBA has */ lba_pat_resources(dev, lba_dev); } else { if (!astro_iop_base) { /* Sprockets PDC uses NPIOP region */ astro_iop_base = ioremap_nocache(LBA_PORT_BASE, 64 * 1024); pci_port = &lba_astro_port_ops; } /* Poke the chip a bit for /proc output */ lba_legacy_resources(dev, lba_dev); } if (lba_dev->hba.bus_num.start < lba_next_bus) lba_dev->hba.bus_num.start = lba_next_bus; /* Overlaps with elmmio can (and should) fail here. * We will prune (or ignore) the distributed range. * * FIXME: SBA code should register all elmmio ranges first. * that would take care of elmmio ranges routed * to a different rope (already discovered) from * getting registered *after* LBA code has already * registered it's distributed lmmio range. */ if (truncate_pat_collision(&iomem_resource, &(lba_dev->hba.lmmio_space))) { printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n", (long)lba_dev->hba.lmmio_space.start, (long)lba_dev->hba.lmmio_space.end); lba_dev->hba.lmmio_space.flags = 0; } pci_add_resource_offset(&resources, &lba_dev->hba.io_space, HBA_PORT_BASE(lba_dev->hba.hba_num)); if (lba_dev->hba.elmmio_space.start) pci_add_resource_offset(&resources, &lba_dev->hba.elmmio_space, lba_dev->hba.lmmio_space_offset); if (lba_dev->hba.lmmio_space.flags) pci_add_resource_offset(&resources, &lba_dev->hba.lmmio_space, lba_dev->hba.lmmio_space_offset); if (lba_dev->hba.gmmio_space.flags) pci_add_resource(&resources, &lba_dev->hba.gmmio_space); dev->dev.platform_data = lba_dev; lba_bus = lba_dev->hba.hba_bus = pci_create_root_bus(&dev->dev, lba_dev->hba.bus_num.start, cfg_ops, NULL, &resources); if (!lba_bus) { pci_free_resource_list(&resources); return 0; } lba_bus->subordinate = pci_scan_child_bus(lba_bus); /* This is in lieu of calling pci_assign_unassigned_resources() */ if (is_pdc_pat()) { /* assign resources to un-initialized devices */ DBG_PAT("LBA pci_bus_size_bridges()\n"); pci_bus_size_bridges(lba_bus); DBG_PAT("LBA pci_bus_assign_resources()\n"); pci_bus_assign_resources(lba_bus); #ifdef DEBUG_LBA_PAT DBG_PAT("\nLBA PIOP resource tree\n"); lba_dump_res(&lba_dev->hba.io_space, 2); DBG_PAT("\nLBA LMMIO resource tree\n"); lba_dump_res(&lba_dev->hba.lmmio_space, 2); #endif } pci_enable_bridges(lba_bus); /* ** Once PCI register ops has walked the bus, access to config ** space is restricted. Avoids master aborts on config cycles. ** Early LBA revs go fatal on *any* master abort. */ if (cfg_ops == &elroy_cfg_ops) { lba_dev->flags |= LBA_FLAG_SKIP_PROBE; } lba_next_bus = lba_bus->subordinate + 1; pci_bus_add_devices(lba_bus); /* Whew! Finally done! Tell services we got this one covered. */ return 0; } static struct parisc_device_id lba_tbl[] = { { HPHW_BRIDGE, HVERSION_REV_ANY_ID, ELROY_HVERS, 0xa }, { HPHW_BRIDGE, HVERSION_REV_ANY_ID, MERCURY_HVERS, 0xa }, { HPHW_BRIDGE, HVERSION_REV_ANY_ID, QUICKSILVER_HVERS, 0xa }, { 0, } }; static struct parisc_driver lba_driver = { .name = MODULE_NAME, .id_table = lba_tbl, .probe = lba_driver_probe, }; /* ** One time initialization to let the world know the LBA was found. ** Must be called exactly once before pci_init(). */ void __init lba_init(void) { register_parisc_driver(&lba_driver); } /* ** Initialize the IBASE/IMASK registers for LBA (Elroy). ** Only called from sba_iommu.c in order to route ranges (MMIO vs DMA). ** sba_iommu is responsible for locking (none needed at init time). */ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask) { void __iomem * base_addr = ioremap_nocache(lba->hpa.start, 4096); imask <<= 2; /* adjust for hints - 2 more bits */ /* Make sure we aren't trying to set bits that aren't writeable. */ WARN_ON((ibase & 0x001fffff) != 0); WARN_ON((imask & 0x001fffff) != 0); DBG("%s() ibase 0x%x imask 0x%x\n", __func__, ibase, imask); WRITE_REG32( imask, base_addr + LBA_IMASK); WRITE_REG32( ibase, base_addr + LBA_IBASE); iounmap(base_addr); }
gpl-2.0
AOKP/kernel_asus_flo
arch/arm/mach-omap1/dma.c
4846
8656
/* * OMAP1/OMAP7xx - specific DMA driver * * Copyright (C) 2003 - 2008 Nokia Corporation * Author: Juha Yrjölä <juha.yrjola@nokia.com> * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com> * Graphics DMA and LCD DMA graphics tranformations * by Imre Deak <imre.deak@nokia.com> * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc. * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc. * * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ * Converted DMA library into platform driver * - G, Manjunath Kondaiah <manjugk@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/err.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/io.h> #include <plat/dma.h> #include <plat/tc.h> #include <plat/irqs.h> #define OMAP1_DMA_BASE (0xfffed800) #define OMAP1_LOGICAL_DMA_CH_COUNT 17 #define OMAP1_DMA_STRIDE 0x40 static u32 errata; static u32 enable_1510_mode; static u8 dma_stride; static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end; static u16 reg_map[] = { [GCR] = 0x400, [GSCR] = 0x404, [GRST1] = 0x408, [HW_ID] = 0x442, [PCH2_ID] = 0x444, [PCH0_ID] = 0x446, [PCH1_ID] = 0x448, [PCHG_ID] = 0x44a, [PCHD_ID] = 0x44c, [CAPS_0] = 0x44e, [CAPS_1] = 0x452, [CAPS_2] = 0x456, [CAPS_3] = 0x458, [CAPS_4] = 0x45a, [PCH2_SR] = 0x460, [PCH0_SR] = 0x480, [PCH1_SR] = 0x482, [PCHD_SR] = 0x4c0, /* Common Registers */ [CSDP] = 0x00, [CCR] = 0x02, [CICR] = 0x04, [CSR] = 0x06, [CEN] = 0x10, [CFN] = 0x12, [CSFI] = 0x14, [CSEI] = 0x16, [CPC] = 0x18, /* 15xx only */ [CSAC] = 0x18, [CDAC] = 0x1a, [CDEI] = 0x1c, [CDFI] = 0x1e, [CLNK_CTRL] = 0x28, /* Channel specific register offsets */ [CSSA] = 0x08, [CDSA] = 0x0c, [COLOR] = 0x20, [CCR2] = 0x24, [LCH_CTRL] = 0x2a, }; static struct resource res[] __initdata = { [0] = { .start = OMAP1_DMA_BASE, .end = OMAP1_DMA_BASE + SZ_2K - 1, .flags = IORESOURCE_MEM, }, [1] = { .name = "0", .start = INT_DMA_CH0_6, .flags = IORESOURCE_IRQ, }, [2] = { .name = "1", .start = INT_DMA_CH1_7, .flags = IORESOURCE_IRQ, }, [3] = { .name = "2", .start = INT_DMA_CH2_8, .flags = IORESOURCE_IRQ, }, [4] = { .name = "3", .start = INT_DMA_CH3, .flags = IORESOURCE_IRQ, }, [5] = { .name = "4", .start = INT_DMA_CH4, .flags = IORESOURCE_IRQ, }, [6] = { .name = "5", .start = INT_DMA_CH5, .flags = IORESOURCE_IRQ, }, /* Handled in lcd_dma.c */ [7] = { .name = "6", .start = INT_1610_DMA_CH6, .flags = IORESOURCE_IRQ, }, /* irq's for omap16xx and omap7xx */ [8] = { .name = "7", .start = INT_1610_DMA_CH7, .flags = IORESOURCE_IRQ, }, [9] = { .name = "8", .start = INT_1610_DMA_CH8, .flags = IORESOURCE_IRQ, }, [10] = { .name = "9", .start = INT_1610_DMA_CH9, .flags = IORESOURCE_IRQ, }, [11] = { .name = "10", .start = INT_1610_DMA_CH10, .flags = IORESOURCE_IRQ, }, [12] = { .name = "11", .start = INT_1610_DMA_CH11, .flags = IORESOURCE_IRQ, }, [13] = { .name = "12", .start = INT_1610_DMA_CH12, .flags = IORESOURCE_IRQ, }, [14] = { .name = "13", .start = INT_1610_DMA_CH13, .flags = IORESOURCE_IRQ, }, [15] = { .name = "14", .start = INT_1610_DMA_CH14, .flags = IORESOURCE_IRQ, }, [16] = { .name = "15", .start = INT_1610_DMA_CH15, .flags = IORESOURCE_IRQ, }, [17] = { .name = "16", .start = INT_DMA_LCD, .flags = IORESOURCE_IRQ, }, }; static void __iomem *dma_base; static inline void dma_write(u32 val, int reg, int lch) { u8 stride; u32 offset; stride = (reg >= dma_common_ch_start) ? dma_stride : 0; offset = reg_map[reg] + (stride * lch); __raw_writew(val, dma_base + offset); if ((reg > CLNK_CTRL && reg < CCEN) || (reg > PCHD_ID && reg < CAPS_2)) { u32 offset2 = reg_map[reg] + 2 + (stride * lch); __raw_writew(val >> 16, dma_base + offset2); } } static inline u32 dma_read(int reg, int lch) { u8 stride; u32 offset, val; stride = (reg >= dma_common_ch_start) ? dma_stride : 0; offset = reg_map[reg] + (stride * lch); val = __raw_readw(dma_base + offset); if ((reg > CLNK_CTRL && reg < CCEN) || (reg > PCHD_ID && reg < CAPS_2)) { u16 upper; u32 offset2 = reg_map[reg] + 2 + (stride * lch); upper = __raw_readw(dma_base + offset2); val |= (upper << 16); } return val; } static void omap1_clear_lch_regs(int lch) { int i = dma_common_ch_start; for (; i <= dma_common_ch_end; i += 1) dma_write(0, i, lch); } static void omap1_clear_dma(int lch) { u32 l; l = dma_read(CCR, lch); l &= ~OMAP_DMA_CCR_EN; dma_write(l, CCR, lch); /* Clear pending interrupts */ l = dma_read(CSR, lch); } static void omap1_show_dma_caps(void) { if (enable_1510_mode) { printk(KERN_INFO "DMA support for OMAP15xx initialized\n"); } else { u16 w; printk(KERN_INFO "OMAP DMA hardware version %d\n", dma_read(HW_ID, 0)); printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n", dma_read(CAPS_0, 0), dma_read(CAPS_1, 0), dma_read(CAPS_2, 0), dma_read(CAPS_3, 0), dma_read(CAPS_4, 0)); /* Disable OMAP 3.0/3.1 compatibility mode. */ w = dma_read(GSCR, 0); w |= 1 << 3; dma_write(w, GSCR, 0); } return; } static u32 configure_dma_errata(void) { /* * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is * read before the DMA controller finished disabling the channel. */ if (!cpu_is_omap15xx()) SET_DMA_ERRATA(DMA_ERRATA_3_3); return errata; } static int __init omap1_system_dma_init(void) { struct omap_system_dma_plat_info *p; struct omap_dma_dev_attr *d; struct platform_device *pdev; int ret; pdev = platform_device_alloc("omap_dma_system", 0); if (!pdev) { pr_err("%s: Unable to device alloc for dma\n", __func__); return -ENOMEM; } dma_base = ioremap(res[0].start, resource_size(&res[0])); if (!dma_base) { pr_err("%s: Unable to ioremap\n", __func__); ret = -ENODEV; goto exit_device_put; } ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); if (ret) { dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", __func__, pdev->name, pdev->id); goto exit_device_put; } p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL); if (!p) { dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n", __func__, pdev->name); ret = -ENOMEM; goto exit_device_del; } d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL); if (!d) { dev_err(&pdev->dev, "%s: Unable to allocate 'd' for %s\n", __func__, pdev->name); ret = -ENOMEM; goto exit_release_p; } d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT; /* Valid attributes for omap1 plus processors */ if (cpu_is_omap15xx()) d->dev_caps = ENABLE_1510_MODE; enable_1510_mode = d->dev_caps & ENABLE_1510_MODE; d->dev_caps |= SRC_PORT; d->dev_caps |= DST_PORT; d->dev_caps |= SRC_INDEX; d->dev_caps |= DST_INDEX; d->dev_caps |= IS_BURST_ONLY4; d->dev_caps |= CLEAR_CSR_ON_READ; d->dev_caps |= IS_WORD_16; d->chan = kzalloc(sizeof(struct omap_dma_lch) * (d->lch_count), GFP_KERNEL); if (!d->chan) { dev_err(&pdev->dev, "%s: Memory allocation failed" "for d->chan!!!\n", __func__); goto exit_release_d; } if (cpu_is_omap15xx()) d->chan_count = 9; else if (cpu_is_omap16xx() || cpu_is_omap7xx()) { if (!(d->dev_caps & ENABLE_1510_MODE)) d->chan_count = 16; else d->chan_count = 9; } p->dma_attr = d; p->show_dma_caps = omap1_show_dma_caps; p->clear_lch_regs = omap1_clear_lch_regs; p->clear_dma = omap1_clear_dma; p->dma_write = dma_write; p->dma_read = dma_read; p->disable_irq_lch = NULL; p->errata = configure_dma_errata(); ret = platform_device_add_data(pdev, p, sizeof(*p)); if (ret) { dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", __func__, pdev->name, pdev->id); goto exit_release_chan; } ret = platform_device_add(pdev); if (ret) { dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", __func__, pdev->name, pdev->id); goto exit_release_chan; } dma_stride = OMAP1_DMA_STRIDE; dma_common_ch_start = CPC; dma_common_ch_end = COLOR; return ret; exit_release_chan: kfree(d->chan); exit_release_d: kfree(d); exit_release_p: kfree(p); exit_device_del: platform_device_del(pdev); exit_device_put: platform_device_put(pdev); return ret; } arch_initcall(omap1_system_dma_init);
gpl-2.0
kostoulhs/android_kernel_samsung_loganrelte
arch/arm/mach-s3c24xx/dma-s3c2412.c
5102
4685
/* linux/arch/arm/mach-s3c2412/dma.c * * Copyright (c) 2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * S3C2412 DMA selection * * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/serial_core.h> #include <linux/io.h> #include <mach/dma.h> #include <plat/dma-s3c24xx.h> #include <plat/cpu.h> #include <plat/regs-serial.h> #include <mach/regs-gpio.h> #include <plat/regs-ac97.h> #include <plat/regs-dma.h> #include <mach/regs-mem.h> #include <mach/regs-lcd.h> #include <mach/regs-sdi.h> #include <plat/regs-iis.h> #include <plat/regs-spi.h> #define MAP(x) { (x)| DMA_CH_VALID, (x)| DMA_CH_VALID, (x)| DMA_CH_VALID, (x)| DMA_CH_VALID } static struct s3c24xx_dma_map __initdata s3c2412_dma_mappings[] = { [DMACH_XD0] = { .name = "xdreq0", .channels = MAP(S3C2412_DMAREQSEL_XDREQ0), .channels_rx = MAP(S3C2412_DMAREQSEL_XDREQ0), }, [DMACH_XD1] = { .name = "xdreq1", .channels = MAP(S3C2412_DMAREQSEL_XDREQ1), .channels_rx = MAP(S3C2412_DMAREQSEL_XDREQ1), }, [DMACH_SDI] = { .name = "sdi", .channels = MAP(S3C2412_DMAREQSEL_SDI), .channels_rx = MAP(S3C2412_DMAREQSEL_SDI), }, [DMACH_SPI0] = { .name = "spi0", .channels = MAP(S3C2412_DMAREQSEL_SPI0TX), .channels_rx = MAP(S3C2412_DMAREQSEL_SPI0RX), }, [DMACH_SPI1] = { .name = "spi1", .channels = MAP(S3C2412_DMAREQSEL_SPI1TX), .channels_rx = MAP(S3C2412_DMAREQSEL_SPI1RX), }, [DMACH_UART0] = { .name = "uart0", .channels = MAP(S3C2412_DMAREQSEL_UART0_0), .channels_rx = MAP(S3C2412_DMAREQSEL_UART0_0), }, [DMACH_UART1] = { .name = "uart1", .channels = MAP(S3C2412_DMAREQSEL_UART1_0), .channels_rx = MAP(S3C2412_DMAREQSEL_UART1_0), }, [DMACH_UART2] = { .name = "uart2", .channels = MAP(S3C2412_DMAREQSEL_UART2_0), .channels_rx = MAP(S3C2412_DMAREQSEL_UART2_0), }, [DMACH_UART0_SRC2] = { .name = "uart0", .channels = MAP(S3C2412_DMAREQSEL_UART0_1), .channels_rx = MAP(S3C2412_DMAREQSEL_UART0_1), }, [DMACH_UART1_SRC2] = { .name = "uart1", .channels = MAP(S3C2412_DMAREQSEL_UART1_1), .channels_rx = MAP(S3C2412_DMAREQSEL_UART1_1), }, [DMACH_UART2_SRC2] = { .name = "uart2", .channels = MAP(S3C2412_DMAREQSEL_UART2_1), .channels_rx = MAP(S3C2412_DMAREQSEL_UART2_1), }, [DMACH_TIMER] = { .name = "timer", .channels = MAP(S3C2412_DMAREQSEL_TIMER), .channels_rx = MAP(S3C2412_DMAREQSEL_TIMER), }, [DMACH_I2S_IN] = { .name = "i2s-sdi", .channels = MAP(S3C2412_DMAREQSEL_I2SRX), .channels_rx = MAP(S3C2412_DMAREQSEL_I2SRX), }, [DMACH_I2S_OUT] = { .name = "i2s-sdo", .channels = MAP(S3C2412_DMAREQSEL_I2STX), .channels_rx = MAP(S3C2412_DMAREQSEL_I2STX), }, [DMACH_USB_EP1] = { .name = "usb-ep1", .channels = MAP(S3C2412_DMAREQSEL_USBEP1), .channels_rx = MAP(S3C2412_DMAREQSEL_USBEP1), }, [DMACH_USB_EP2] = { .name = "usb-ep2", .channels = MAP(S3C2412_DMAREQSEL_USBEP2), .channels_rx = MAP(S3C2412_DMAREQSEL_USBEP2), }, [DMACH_USB_EP3] = { .name = "usb-ep3", .channels = MAP(S3C2412_DMAREQSEL_USBEP3), .channels_rx = MAP(S3C2412_DMAREQSEL_USBEP3), }, [DMACH_USB_EP4] = { .name = "usb-ep4", .channels = MAP(S3C2412_DMAREQSEL_USBEP4), .channels_rx = MAP(S3C2412_DMAREQSEL_USBEP4), }, }; static void s3c2412_dma_direction(struct s3c2410_dma_chan *chan, struct s3c24xx_dma_map *map, enum dma_data_direction dir) { unsigned long chsel; if (dir == DMA_FROM_DEVICE) chsel = map->channels_rx[0]; else chsel = map->channels[0]; chsel &= ~DMA_CH_VALID; chsel |= S3C2412_DMAREQSEL_HW; writel(chsel, chan->regs + S3C2412_DMA_DMAREQSEL); } static void s3c2412_dma_select(struct s3c2410_dma_chan *chan, struct s3c24xx_dma_map *map) { s3c2412_dma_direction(chan, map, chan->source); } static struct s3c24xx_dma_selection __initdata s3c2412_dma_sel = { .select = s3c2412_dma_select, .direction = s3c2412_dma_direction, .dcon_mask = 0, .map = s3c2412_dma_mappings, .map_size = ARRAY_SIZE(s3c2412_dma_mappings), }; static int __init s3c2412_dma_add(struct device *dev, struct subsys_interface *sif) { s3c2410_dma_init(); return s3c24xx_dma_init_map(&s3c2412_dma_sel); } static struct subsys_interface s3c2412_dma_interface = { .name = "s3c2412_dma", .subsys = &s3c2412_subsys, .add_dev = s3c2412_dma_add, }; static int __init s3c2412_dma_init(void) { return subsys_interface_register(&s3c2412_dma_interface); } arch_initcall(s3c2412_dma_init);
gpl-2.0
roguesyko/the_reaper_mako
sound/pci/cs46xx/cs46xx_lib.c
5102
107332
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Abramo Bagnara <abramo@alsa-project.org> * Cirrus Logic, Inc. * Routines for control of Cirrus Logic CS461x chips * * KNOWN BUGS: * - Sometimes the SPDIF input DSP tasks get's unsynchronized * and the SPDIF get somewhat "distorcionated", or/and left right channel * are swapped. To get around this problem when it happens, mute and unmute * the SPDIF input mixer control. * - On the Hercules Game Theater XP the amplifier are sometimes turned * off on inadecuate moments which causes distorcions on sound. * * TODO: * - Secondary CODEC on some soundcards * - SPDIF input support for other sample rates then 48khz * - Posibility to mix the SPDIF output with analog sources. * - PCM channels for Center and LFE on secondary codec * * NOTE: with CONFIG_SND_CS46XX_NEW_DSP unset uses old DSP image (which * is default configuration), no SPDIF, no secondary codec, no * multi channel PCM. But known to work. * * FINALLY: A credit to the developers Tom and Jordan * at Cirrus for have helping me out with the DSP, however we * still don't have sufficient documentation and technical * references to be able to implement all fancy feutures * supported by the cs46xx DSP's. * Benny <benny@hostmobility.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/pci.h> #include <linux/pm.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/gameport.h> #include <linux/mutex.h> #include <linux/export.h> #include <sound/core.h> #include <sound/control.h> #include <sound/info.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/cs46xx.h> #include <asm/io.h> #include "cs46xx_lib.h" #include "dsp_spos.h" static void amp_voyetra(struct snd_cs46xx *chip, int change); #ifdef CONFIG_SND_CS46XX_NEW_DSP static struct snd_pcm_ops snd_cs46xx_playback_rear_ops; static struct snd_pcm_ops snd_cs46xx_playback_indirect_rear_ops; static struct snd_pcm_ops snd_cs46xx_playback_clfe_ops; static struct snd_pcm_ops snd_cs46xx_playback_indirect_clfe_ops; static struct snd_pcm_ops snd_cs46xx_playback_iec958_ops; static struct snd_pcm_ops snd_cs46xx_playback_indirect_iec958_ops; #endif static struct snd_pcm_ops snd_cs46xx_playback_ops; static struct snd_pcm_ops snd_cs46xx_playback_indirect_ops; static struct snd_pcm_ops snd_cs46xx_capture_ops; static struct snd_pcm_ops snd_cs46xx_capture_indirect_ops; static unsigned short snd_cs46xx_codec_read(struct snd_cs46xx *chip, unsigned short reg, int codec_index) { int count; unsigned short result,tmp; u32 offset = 0; if (snd_BUG_ON(codec_index != CS46XX_PRIMARY_CODEC_INDEX && codec_index != CS46XX_SECONDARY_CODEC_INDEX)) return -EINVAL; chip->active_ctrl(chip, 1); if (codec_index == CS46XX_SECONDARY_CODEC_INDEX) offset = CS46XX_SECONDARY_CODEC_OFFSET; /* * 1. Write ACCAD = Command Address Register = 46Ch for AC97 register address * 2. Write ACCDA = Command Data Register = 470h for data to write to AC97 * 3. Write ACCTL = Control Register = 460h for initiating the write7---55 * 4. Read ACCTL = 460h, DCV should be reset by now and 460h = 17h * 5. if DCV not cleared, break and return error * 6. Read ACSTS = Status Register = 464h, check VSTS bit */ snd_cs46xx_peekBA0(chip, BA0_ACSDA + offset); tmp = snd_cs46xx_peekBA0(chip, BA0_ACCTL); if ((tmp & ACCTL_VFRM) == 0) { snd_printk(KERN_WARNING "cs46xx: ACCTL_VFRM not set 0x%x\n",tmp); snd_cs46xx_pokeBA0(chip, BA0_ACCTL, (tmp & (~ACCTL_ESYN)) | ACCTL_VFRM ); msleep(50); tmp = snd_cs46xx_peekBA0(chip, BA0_ACCTL + offset); snd_cs46xx_pokeBA0(chip, BA0_ACCTL, tmp | ACCTL_ESYN | ACCTL_VFRM ); } /* * Setup the AC97 control registers on the CS461x to send the * appropriate command to the AC97 to perform the read. * ACCAD = Command Address Register = 46Ch * ACCDA = Command Data Register = 470h * ACCTL = Control Register = 460h * set DCV - will clear when process completed * set CRW - Read command * set VFRM - valid frame enabled * set ESYN - ASYNC generation enabled * set RSTN - ARST# inactive, AC97 codec not reset */ snd_cs46xx_pokeBA0(chip, BA0_ACCAD, reg); snd_cs46xx_pokeBA0(chip, BA0_ACCDA, 0); if (codec_index == CS46XX_PRIMARY_CODEC_INDEX) { snd_cs46xx_pokeBA0(chip, BA0_ACCTL,/* clear ACCTL_DCV */ ACCTL_CRW | ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); snd_cs46xx_pokeBA0(chip, BA0_ACCTL, ACCTL_DCV | ACCTL_CRW | ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); } else { snd_cs46xx_pokeBA0(chip, BA0_ACCTL, ACCTL_DCV | ACCTL_TC | ACCTL_CRW | ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); } /* * Wait for the read to occur. */ for (count = 0; count < 1000; count++) { /* * First, we want to wait for a short time. */ udelay(10); /* * Now, check to see if the read has completed. * ACCTL = 460h, DCV should be reset by now and 460h = 17h */ if (!(snd_cs46xx_peekBA0(chip, BA0_ACCTL) & ACCTL_DCV)) goto ok1; } snd_printk(KERN_ERR "AC'97 read problem (ACCTL_DCV), reg = 0x%x\n", reg); result = 0xffff; goto end; ok1: /* * Wait for the valid status bit to go active. */ for (count = 0; count < 100; count++) { /* * Read the AC97 status register. * ACSTS = Status Register = 464h * VSTS - Valid Status */ if (snd_cs46xx_peekBA0(chip, BA0_ACSTS + offset) & ACSTS_VSTS) goto ok2; udelay(10); } snd_printk(KERN_ERR "AC'97 read problem (ACSTS_VSTS), codec_index %d, reg = 0x%x\n", codec_index, reg); result = 0xffff; goto end; ok2: /* * Read the data returned from the AC97 register. * ACSDA = Status Data Register = 474h */ #if 0 printk(KERN_DEBUG "e) reg = 0x%x, val = 0x%x, BA0_ACCAD = 0x%x\n", reg, snd_cs46xx_peekBA0(chip, BA0_ACSDA), snd_cs46xx_peekBA0(chip, BA0_ACCAD)); #endif //snd_cs46xx_peekBA0(chip, BA0_ACCAD); result = snd_cs46xx_peekBA0(chip, BA0_ACSDA + offset); end: chip->active_ctrl(chip, -1); return result; } static unsigned short snd_cs46xx_ac97_read(struct snd_ac97 * ac97, unsigned short reg) { struct snd_cs46xx *chip = ac97->private_data; unsigned short val; int codec_index = ac97->num; if (snd_BUG_ON(codec_index != CS46XX_PRIMARY_CODEC_INDEX && codec_index != CS46XX_SECONDARY_CODEC_INDEX)) return 0xffff; val = snd_cs46xx_codec_read(chip, reg, codec_index); return val; } static void snd_cs46xx_codec_write(struct snd_cs46xx *chip, unsigned short reg, unsigned short val, int codec_index) { int count; if (snd_BUG_ON(codec_index != CS46XX_PRIMARY_CODEC_INDEX && codec_index != CS46XX_SECONDARY_CODEC_INDEX)) return; chip->active_ctrl(chip, 1); /* * 1. Write ACCAD = Command Address Register = 46Ch for AC97 register address * 2. Write ACCDA = Command Data Register = 470h for data to write to AC97 * 3. Write ACCTL = Control Register = 460h for initiating the write * 4. Read ACCTL = 460h, DCV should be reset by now and 460h = 07h * 5. if DCV not cleared, break and return error */ /* * Setup the AC97 control registers on the CS461x to send the * appropriate command to the AC97 to perform the read. * ACCAD = Command Address Register = 46Ch * ACCDA = Command Data Register = 470h * ACCTL = Control Register = 460h * set DCV - will clear when process completed * reset CRW - Write command * set VFRM - valid frame enabled * set ESYN - ASYNC generation enabled * set RSTN - ARST# inactive, AC97 codec not reset */ snd_cs46xx_pokeBA0(chip, BA0_ACCAD , reg); snd_cs46xx_pokeBA0(chip, BA0_ACCDA , val); snd_cs46xx_peekBA0(chip, BA0_ACCTL); if (codec_index == CS46XX_PRIMARY_CODEC_INDEX) { snd_cs46xx_pokeBA0(chip, BA0_ACCTL, /* clear ACCTL_DCV */ ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); snd_cs46xx_pokeBA0(chip, BA0_ACCTL, ACCTL_DCV | ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); } else { snd_cs46xx_pokeBA0(chip, BA0_ACCTL, ACCTL_DCV | ACCTL_TC | ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); } for (count = 0; count < 4000; count++) { /* * First, we want to wait for a short time. */ udelay(10); /* * Now, check to see if the write has completed. * ACCTL = 460h, DCV should be reset by now and 460h = 07h */ if (!(snd_cs46xx_peekBA0(chip, BA0_ACCTL) & ACCTL_DCV)) { goto end; } } snd_printk(KERN_ERR "AC'97 write problem, codec_index = %d, reg = 0x%x, val = 0x%x\n", codec_index, reg, val); end: chip->active_ctrl(chip, -1); } static void snd_cs46xx_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct snd_cs46xx *chip = ac97->private_data; int codec_index = ac97->num; if (snd_BUG_ON(codec_index != CS46XX_PRIMARY_CODEC_INDEX && codec_index != CS46XX_SECONDARY_CODEC_INDEX)) return; snd_cs46xx_codec_write(chip, reg, val, codec_index); } /* * Chip initialization */ int snd_cs46xx_download(struct snd_cs46xx *chip, u32 *src, unsigned long offset, unsigned long len) { void __iomem *dst; unsigned int bank = offset >> 16; offset = offset & 0xffff; if (snd_BUG_ON((offset & 3) || (len & 3))) return -EINVAL; dst = chip->region.idx[bank+1].remap_addr + offset; len /= sizeof(u32); /* writel already converts 32-bit value to right endianess */ while (len-- > 0) { writel(*src++, dst); dst += sizeof(u32); } return 0; } #ifdef CONFIG_SND_CS46XX_NEW_DSP #include "imgs/cwc4630.h" #include "imgs/cwcasync.h" #include "imgs/cwcsnoop.h" #include "imgs/cwcbinhack.h" #include "imgs/cwcdma.h" int snd_cs46xx_clear_BA1(struct snd_cs46xx *chip, unsigned long offset, unsigned long len) { void __iomem *dst; unsigned int bank = offset >> 16; offset = offset & 0xffff; if (snd_BUG_ON((offset & 3) || (len & 3))) return -EINVAL; dst = chip->region.idx[bank+1].remap_addr + offset; len /= sizeof(u32); /* writel already converts 32-bit value to right endianess */ while (len-- > 0) { writel(0, dst); dst += sizeof(u32); } return 0; } #else /* old DSP image */ #include "cs46xx_image.h" int snd_cs46xx_download_image(struct snd_cs46xx *chip) { int idx, err; unsigned long offset = 0; for (idx = 0; idx < BA1_MEMORY_COUNT; idx++) { if ((err = snd_cs46xx_download(chip, &BA1Struct.map[offset], BA1Struct.memory[idx].offset, BA1Struct.memory[idx].size)) < 0) return err; offset += BA1Struct.memory[idx].size >> 2; } return 0; } #endif /* CONFIG_SND_CS46XX_NEW_DSP */ /* * Chip reset */ static void snd_cs46xx_reset(struct snd_cs46xx *chip) { int idx; /* * Write the reset bit of the SP control register. */ snd_cs46xx_poke(chip, BA1_SPCR, SPCR_RSTSP); /* * Write the control register. */ snd_cs46xx_poke(chip, BA1_SPCR, SPCR_DRQEN); /* * Clear the trap registers. */ for (idx = 0; idx < 8; idx++) { snd_cs46xx_poke(chip, BA1_DREG, DREG_REGID_TRAP_SELECT + idx); snd_cs46xx_poke(chip, BA1_TWPR, 0xFFFF); } snd_cs46xx_poke(chip, BA1_DREG, 0); /* * Set the frame timer to reflect the number of cycles per frame. */ snd_cs46xx_poke(chip, BA1_FRMT, 0xadf); } static int cs46xx_wait_for_fifo(struct snd_cs46xx * chip,int retry_timeout) { u32 i, status = 0; /* * Make sure the previous FIFO write operation has completed. */ for(i = 0; i < 50; i++){ status = snd_cs46xx_peekBA0(chip, BA0_SERBST); if( !(status & SERBST_WBSY) ) break; mdelay(retry_timeout); } if(status & SERBST_WBSY) { snd_printk(KERN_ERR "cs46xx: failure waiting for " "FIFO command to complete\n"); return -EINVAL; } return 0; } static void snd_cs46xx_clear_serial_FIFOs(struct snd_cs46xx *chip) { int idx, powerdown = 0; unsigned int tmp; /* * See if the devices are powered down. If so, we must power them up first * or they will not respond. */ tmp = snd_cs46xx_peekBA0(chip, BA0_CLKCR1); if (!(tmp & CLKCR1_SWCE)) { snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, tmp | CLKCR1_SWCE); powerdown = 1; } /* * We want to clear out the serial port FIFOs so we don't end up playing * whatever random garbage happens to be in them. We fill the sample FIFOS * with zero (silence). */ snd_cs46xx_pokeBA0(chip, BA0_SERBWP, 0); /* * Fill all 256 sample FIFO locations. */ for (idx = 0; idx < 0xFF; idx++) { /* * Make sure the previous FIFO write operation has completed. */ if (cs46xx_wait_for_fifo(chip,1)) { snd_printdd ("failed waiting for FIFO at addr (%02X)\n",idx); if (powerdown) snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, tmp); break; } /* * Write the serial port FIFO index. */ snd_cs46xx_pokeBA0(chip, BA0_SERBAD, idx); /* * Tell the serial port to load the new value into the FIFO location. */ snd_cs46xx_pokeBA0(chip, BA0_SERBCM, SERBCM_WRC); } /* * Now, if we powered up the devices, then power them back down again. * This is kinda ugly, but should never happen. */ if (powerdown) snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, tmp); } static void snd_cs46xx_proc_start(struct snd_cs46xx *chip) { int cnt; /* * Set the frame timer to reflect the number of cycles per frame. */ snd_cs46xx_poke(chip, BA1_FRMT, 0xadf); /* * Turn on the run, run at frame, and DMA enable bits in the local copy of * the SP control register. */ snd_cs46xx_poke(chip, BA1_SPCR, SPCR_RUN | SPCR_RUNFR | SPCR_DRQEN); /* * Wait until the run at frame bit resets itself in the SP control * register. */ for (cnt = 0; cnt < 25; cnt++) { udelay(50); if (!(snd_cs46xx_peek(chip, BA1_SPCR) & SPCR_RUNFR)) break; } if (snd_cs46xx_peek(chip, BA1_SPCR) & SPCR_RUNFR) snd_printk(KERN_ERR "SPCR_RUNFR never reset\n"); } static void snd_cs46xx_proc_stop(struct snd_cs46xx *chip) { /* * Turn off the run, run at frame, and DMA enable bits in the local copy of * the SP control register. */ snd_cs46xx_poke(chip, BA1_SPCR, 0); } /* * Sample rate routines */ #define GOF_PER_SEC 200 static void snd_cs46xx_set_play_sample_rate(struct snd_cs46xx *chip, unsigned int rate) { unsigned long flags; unsigned int tmp1, tmp2; unsigned int phiIncr; unsigned int correctionPerGOF, correctionPerSec; /* * Compute the values used to drive the actual sample rate conversion. * The following formulas are being computed, using inline assembly * since we need to use 64 bit arithmetic to compute the values: * * phiIncr = floor((Fs,in * 2^26) / Fs,out) * correctionPerGOF = floor((Fs,in * 2^26 - Fs,out * phiIncr) / * GOF_PER_SEC) * ulCorrectionPerSec = Fs,in * 2^26 - Fs,out * phiIncr -M * GOF_PER_SEC * correctionPerGOF * * i.e. * * phiIncr:other = dividend:remainder((Fs,in * 2^26) / Fs,out) * correctionPerGOF:correctionPerSec = * dividend:remainder(ulOther / GOF_PER_SEC) */ tmp1 = rate << 16; phiIncr = tmp1 / 48000; tmp1 -= phiIncr * 48000; tmp1 <<= 10; phiIncr <<= 10; tmp2 = tmp1 / 48000; phiIncr += tmp2; tmp1 -= tmp2 * 48000; correctionPerGOF = tmp1 / GOF_PER_SEC; tmp1 -= correctionPerGOF * GOF_PER_SEC; correctionPerSec = tmp1; /* * Fill in the SampleRateConverter control block. */ spin_lock_irqsave(&chip->reg_lock, flags); snd_cs46xx_poke(chip, BA1_PSRC, ((correctionPerSec << 16) & 0xFFFF0000) | (correctionPerGOF & 0xFFFF)); snd_cs46xx_poke(chip, BA1_PPI, phiIncr); spin_unlock_irqrestore(&chip->reg_lock, flags); } static void snd_cs46xx_set_capture_sample_rate(struct snd_cs46xx *chip, unsigned int rate) { unsigned long flags; unsigned int phiIncr, coeffIncr, tmp1, tmp2; unsigned int correctionPerGOF, correctionPerSec, initialDelay; unsigned int frameGroupLength, cnt; /* * We can only decimate by up to a factor of 1/9th the hardware rate. * Correct the value if an attempt is made to stray outside that limit. */ if ((rate * 9) < 48000) rate = 48000 / 9; /* * We can not capture at at rate greater than the Input Rate (48000). * Return an error if an attempt is made to stray outside that limit. */ if (rate > 48000) rate = 48000; /* * Compute the values used to drive the actual sample rate conversion. * The following formulas are being computed, using inline assembly * since we need to use 64 bit arithmetic to compute the values: * * coeffIncr = -floor((Fs,out * 2^23) / Fs,in) * phiIncr = floor((Fs,in * 2^26) / Fs,out) * correctionPerGOF = floor((Fs,in * 2^26 - Fs,out * phiIncr) / * GOF_PER_SEC) * correctionPerSec = Fs,in * 2^26 - Fs,out * phiIncr - * GOF_PER_SEC * correctionPerGOF * initialDelay = ceil((24 * Fs,in) / Fs,out) * * i.e. * * coeffIncr = neg(dividend((Fs,out * 2^23) / Fs,in)) * phiIncr:ulOther = dividend:remainder((Fs,in * 2^26) / Fs,out) * correctionPerGOF:correctionPerSec = * dividend:remainder(ulOther / GOF_PER_SEC) * initialDelay = dividend(((24 * Fs,in) + Fs,out - 1) / Fs,out) */ tmp1 = rate << 16; coeffIncr = tmp1 / 48000; tmp1 -= coeffIncr * 48000; tmp1 <<= 7; coeffIncr <<= 7; coeffIncr += tmp1 / 48000; coeffIncr ^= 0xFFFFFFFF; coeffIncr++; tmp1 = 48000 << 16; phiIncr = tmp1 / rate; tmp1 -= phiIncr * rate; tmp1 <<= 10; phiIncr <<= 10; tmp2 = tmp1 / rate; phiIncr += tmp2; tmp1 -= tmp2 * rate; correctionPerGOF = tmp1 / GOF_PER_SEC; tmp1 -= correctionPerGOF * GOF_PER_SEC; correctionPerSec = tmp1; initialDelay = ((48000 * 24) + rate - 1) / rate; /* * Fill in the VariDecimate control block. */ spin_lock_irqsave(&chip->reg_lock, flags); snd_cs46xx_poke(chip, BA1_CSRC, ((correctionPerSec << 16) & 0xFFFF0000) | (correctionPerGOF & 0xFFFF)); snd_cs46xx_poke(chip, BA1_CCI, coeffIncr); snd_cs46xx_poke(chip, BA1_CD, (((BA1_VARIDEC_BUF_1 + (initialDelay << 2)) << 16) & 0xFFFF0000) | 0x80); snd_cs46xx_poke(chip, BA1_CPI, phiIncr); spin_unlock_irqrestore(&chip->reg_lock, flags); /* * Figure out the frame group length for the write back task. Basically, * this is just the factors of 24000 (2^6*3*5^3) that are not present in * the output sample rate. */ frameGroupLength = 1; for (cnt = 2; cnt <= 64; cnt *= 2) { if (((rate / cnt) * cnt) != rate) frameGroupLength *= 2; } if (((rate / 3) * 3) != rate) { frameGroupLength *= 3; } for (cnt = 5; cnt <= 125; cnt *= 5) { if (((rate / cnt) * cnt) != rate) frameGroupLength *= 5; } /* * Fill in the WriteBack control block. */ spin_lock_irqsave(&chip->reg_lock, flags); snd_cs46xx_poke(chip, BA1_CFG1, frameGroupLength); snd_cs46xx_poke(chip, BA1_CFG2, (0x00800000 | frameGroupLength)); snd_cs46xx_poke(chip, BA1_CCST, 0x0000FFFF); snd_cs46xx_poke(chip, BA1_CSPB, ((65536 * rate) / 24000)); snd_cs46xx_poke(chip, (BA1_CSPB + 4), 0x0000FFFF); spin_unlock_irqrestore(&chip->reg_lock, flags); } /* * PCM part */ static void snd_cs46xx_pb_trans_copy(struct snd_pcm_substream *substream, struct snd_pcm_indirect *rec, size_t bytes) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_cs46xx_pcm * cpcm = runtime->private_data; memcpy(cpcm->hw_buf.area + rec->hw_data, runtime->dma_area + rec->sw_data, bytes); } static int snd_cs46xx_playback_transfer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_cs46xx_pcm * cpcm = runtime->private_data; snd_pcm_indirect_playback_transfer(substream, &cpcm->pcm_rec, snd_cs46xx_pb_trans_copy); return 0; } static void snd_cs46xx_cp_trans_copy(struct snd_pcm_substream *substream, struct snd_pcm_indirect *rec, size_t bytes) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; memcpy(runtime->dma_area + rec->sw_data, chip->capt.hw_buf.area + rec->hw_data, bytes); } static int snd_cs46xx_capture_transfer(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); snd_pcm_indirect_capture_transfer(substream, &chip->capt.pcm_rec, snd_cs46xx_cp_trans_copy); return 0; } static snd_pcm_uframes_t snd_cs46xx_playback_direct_pointer(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); size_t ptr; struct snd_cs46xx_pcm *cpcm = substream->runtime->private_data; if (snd_BUG_ON(!cpcm->pcm_channel)) return -ENXIO; #ifdef CONFIG_SND_CS46XX_NEW_DSP ptr = snd_cs46xx_peek(chip, (cpcm->pcm_channel->pcm_reader_scb->address + 2) << 2); #else ptr = snd_cs46xx_peek(chip, BA1_PBA); #endif ptr -= cpcm->hw_buf.addr; return ptr >> cpcm->shift; } static snd_pcm_uframes_t snd_cs46xx_playback_indirect_pointer(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); size_t ptr; struct snd_cs46xx_pcm *cpcm = substream->runtime->private_data; #ifdef CONFIG_SND_CS46XX_NEW_DSP if (snd_BUG_ON(!cpcm->pcm_channel)) return -ENXIO; ptr = snd_cs46xx_peek(chip, (cpcm->pcm_channel->pcm_reader_scb->address + 2) << 2); #else ptr = snd_cs46xx_peek(chip, BA1_PBA); #endif ptr -= cpcm->hw_buf.addr; return snd_pcm_indirect_playback_pointer(substream, &cpcm->pcm_rec, ptr); } static snd_pcm_uframes_t snd_cs46xx_capture_direct_pointer(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); size_t ptr = snd_cs46xx_peek(chip, BA1_CBA) - chip->capt.hw_buf.addr; return ptr >> chip->capt.shift; } static snd_pcm_uframes_t snd_cs46xx_capture_indirect_pointer(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); size_t ptr = snd_cs46xx_peek(chip, BA1_CBA) - chip->capt.hw_buf.addr; return snd_pcm_indirect_capture_pointer(substream, &chip->capt.pcm_rec, ptr); } static int snd_cs46xx_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); /*struct snd_pcm_runtime *runtime = substream->runtime;*/ int result = 0; #ifdef CONFIG_SND_CS46XX_NEW_DSP struct snd_cs46xx_pcm *cpcm = substream->runtime->private_data; if (! cpcm->pcm_channel) { return -ENXIO; } #endif switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: #ifdef CONFIG_SND_CS46XX_NEW_DSP /* magic value to unmute PCM stream playback volume */ snd_cs46xx_poke(chip, (cpcm->pcm_channel->pcm_reader_scb->address + SCBVolumeCtrl) << 2, 0x80008000); if (cpcm->pcm_channel->unlinked) cs46xx_dsp_pcm_link(chip,cpcm->pcm_channel); if (substream->runtime->periods != CS46XX_FRAGS) snd_cs46xx_playback_transfer(substream); #else spin_lock(&chip->reg_lock); if (substream->runtime->periods != CS46XX_FRAGS) snd_cs46xx_playback_transfer(substream); { unsigned int tmp; tmp = snd_cs46xx_peek(chip, BA1_PCTL); tmp &= 0x0000ffff; snd_cs46xx_poke(chip, BA1_PCTL, chip->play_ctl | tmp); } spin_unlock(&chip->reg_lock); #endif break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: #ifdef CONFIG_SND_CS46XX_NEW_DSP /* magic mute channel */ snd_cs46xx_poke(chip, (cpcm->pcm_channel->pcm_reader_scb->address + SCBVolumeCtrl) << 2, 0xffffffff); if (!cpcm->pcm_channel->unlinked) cs46xx_dsp_pcm_unlink(chip,cpcm->pcm_channel); #else spin_lock(&chip->reg_lock); { unsigned int tmp; tmp = snd_cs46xx_peek(chip, BA1_PCTL); tmp &= 0x0000ffff; snd_cs46xx_poke(chip, BA1_PCTL, tmp); } spin_unlock(&chip->reg_lock); #endif break; default: result = -EINVAL; break; } return result; } static int snd_cs46xx_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); unsigned int tmp; int result = 0; spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: tmp = snd_cs46xx_peek(chip, BA1_CCTL); tmp &= 0xffff0000; snd_cs46xx_poke(chip, BA1_CCTL, chip->capt.ctl | tmp); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: tmp = snd_cs46xx_peek(chip, BA1_CCTL); tmp &= 0xffff0000; snd_cs46xx_poke(chip, BA1_CCTL, tmp); break; default: result = -EINVAL; break; } spin_unlock(&chip->reg_lock); return result; } #ifdef CONFIG_SND_CS46XX_NEW_DSP static int _cs46xx_adjust_sample_rate (struct snd_cs46xx *chip, struct snd_cs46xx_pcm *cpcm, int sample_rate) { /* If PCMReaderSCB and SrcTaskSCB not created yet ... */ if ( cpcm->pcm_channel == NULL) { cpcm->pcm_channel = cs46xx_dsp_create_pcm_channel (chip, sample_rate, cpcm, cpcm->hw_buf.addr,cpcm->pcm_channel_id); if (cpcm->pcm_channel == NULL) { snd_printk(KERN_ERR "cs46xx: failed to create virtual PCM channel\n"); return -ENOMEM; } cpcm->pcm_channel->sample_rate = sample_rate; } else /* if sample rate is changed */ if ((int)cpcm->pcm_channel->sample_rate != sample_rate) { int unlinked = cpcm->pcm_channel->unlinked; cs46xx_dsp_destroy_pcm_channel (chip,cpcm->pcm_channel); if ( (cpcm->pcm_channel = cs46xx_dsp_create_pcm_channel (chip, sample_rate, cpcm, cpcm->hw_buf.addr, cpcm->pcm_channel_id)) == NULL) { snd_printk(KERN_ERR "cs46xx: failed to re-create virtual PCM channel\n"); return -ENOMEM; } if (!unlinked) cs46xx_dsp_pcm_link (chip,cpcm->pcm_channel); cpcm->pcm_channel->sample_rate = sample_rate; } return 0; } #endif static int snd_cs46xx_playback_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_cs46xx_pcm *cpcm; int err; #ifdef CONFIG_SND_CS46XX_NEW_DSP struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); int sample_rate = params_rate(hw_params); int period_size = params_period_bytes(hw_params); #endif cpcm = runtime->private_data; #ifdef CONFIG_SND_CS46XX_NEW_DSP if (snd_BUG_ON(!sample_rate)) return -ENXIO; mutex_lock(&chip->spos_mutex); if (_cs46xx_adjust_sample_rate (chip,cpcm,sample_rate)) { mutex_unlock(&chip->spos_mutex); return -ENXIO; } snd_BUG_ON(!cpcm->pcm_channel); if (!cpcm->pcm_channel) { mutex_unlock(&chip->spos_mutex); return -ENXIO; } if (cs46xx_dsp_pcm_channel_set_period (chip,cpcm->pcm_channel,period_size)) { mutex_unlock(&chip->spos_mutex); return -EINVAL; } snd_printdd ("period_size (%d), periods (%d) buffer_size(%d)\n", period_size, params_periods(hw_params), params_buffer_bytes(hw_params)); #endif if (params_periods(hw_params) == CS46XX_FRAGS) { if (runtime->dma_area != cpcm->hw_buf.area) snd_pcm_lib_free_pages(substream); runtime->dma_area = cpcm->hw_buf.area; runtime->dma_addr = cpcm->hw_buf.addr; runtime->dma_bytes = cpcm->hw_buf.bytes; #ifdef CONFIG_SND_CS46XX_NEW_DSP if (cpcm->pcm_channel_id == DSP_PCM_MAIN_CHANNEL) { substream->ops = &snd_cs46xx_playback_ops; } else if (cpcm->pcm_channel_id == DSP_PCM_REAR_CHANNEL) { substream->ops = &snd_cs46xx_playback_rear_ops; } else if (cpcm->pcm_channel_id == DSP_PCM_CENTER_LFE_CHANNEL) { substream->ops = &snd_cs46xx_playback_clfe_ops; } else if (cpcm->pcm_channel_id == DSP_IEC958_CHANNEL) { substream->ops = &snd_cs46xx_playback_iec958_ops; } else { snd_BUG(); } #else substream->ops = &snd_cs46xx_playback_ops; #endif } else { if (runtime->dma_area == cpcm->hw_buf.area) { runtime->dma_area = NULL; runtime->dma_addr = 0; runtime->dma_bytes = 0; } if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) { #ifdef CONFIG_SND_CS46XX_NEW_DSP mutex_unlock(&chip->spos_mutex); #endif return err; } #ifdef CONFIG_SND_CS46XX_NEW_DSP if (cpcm->pcm_channel_id == DSP_PCM_MAIN_CHANNEL) { substream->ops = &snd_cs46xx_playback_indirect_ops; } else if (cpcm->pcm_channel_id == DSP_PCM_REAR_CHANNEL) { substream->ops = &snd_cs46xx_playback_indirect_rear_ops; } else if (cpcm->pcm_channel_id == DSP_PCM_CENTER_LFE_CHANNEL) { substream->ops = &snd_cs46xx_playback_indirect_clfe_ops; } else if (cpcm->pcm_channel_id == DSP_IEC958_CHANNEL) { substream->ops = &snd_cs46xx_playback_indirect_iec958_ops; } else { snd_BUG(); } #else substream->ops = &snd_cs46xx_playback_indirect_ops; #endif } #ifdef CONFIG_SND_CS46XX_NEW_DSP mutex_unlock(&chip->spos_mutex); #endif return 0; } static int snd_cs46xx_playback_hw_free(struct snd_pcm_substream *substream) { /*struct snd_cs46xx *chip = snd_pcm_substream_chip(substream);*/ struct snd_pcm_runtime *runtime = substream->runtime; struct snd_cs46xx_pcm *cpcm; cpcm = runtime->private_data; /* if play_back open fails, then this function is called and cpcm can actually be NULL here */ if (!cpcm) return -ENXIO; if (runtime->dma_area != cpcm->hw_buf.area) snd_pcm_lib_free_pages(substream); runtime->dma_area = NULL; runtime->dma_addr = 0; runtime->dma_bytes = 0; return 0; } static int snd_cs46xx_playback_prepare(struct snd_pcm_substream *substream) { unsigned int tmp; unsigned int pfie; struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_cs46xx_pcm *cpcm; cpcm = runtime->private_data; #ifdef CONFIG_SND_CS46XX_NEW_DSP if (snd_BUG_ON(!cpcm->pcm_channel)) return -ENXIO; pfie = snd_cs46xx_peek(chip, (cpcm->pcm_channel->pcm_reader_scb->address + 1) << 2 ); pfie &= ~0x0000f03f; #else /* old dsp */ pfie = snd_cs46xx_peek(chip, BA1_PFIE); pfie &= ~0x0000f03f; #endif cpcm->shift = 2; /* if to convert from stereo to mono */ if (runtime->channels == 1) { cpcm->shift--; pfie |= 0x00002000; } /* if to convert from 8 bit to 16 bit */ if (snd_pcm_format_width(runtime->format) == 8) { cpcm->shift--; pfie |= 0x00001000; } /* if to convert to unsigned */ if (snd_pcm_format_unsigned(runtime->format)) pfie |= 0x00008000; /* Never convert byte order when sample stream is 8 bit */ if (snd_pcm_format_width(runtime->format) != 8) { /* convert from big endian to little endian */ if (snd_pcm_format_big_endian(runtime->format)) pfie |= 0x00004000; } memset(&cpcm->pcm_rec, 0, sizeof(cpcm->pcm_rec)); cpcm->pcm_rec.sw_buffer_size = snd_pcm_lib_buffer_bytes(substream); cpcm->pcm_rec.hw_buffer_size = runtime->period_size * CS46XX_FRAGS << cpcm->shift; #ifdef CONFIG_SND_CS46XX_NEW_DSP tmp = snd_cs46xx_peek(chip, (cpcm->pcm_channel->pcm_reader_scb->address) << 2); tmp &= ~0x000003ff; tmp |= (4 << cpcm->shift) - 1; /* playback transaction count register */ snd_cs46xx_poke(chip, (cpcm->pcm_channel->pcm_reader_scb->address) << 2, tmp); /* playback format && interrupt enable */ snd_cs46xx_poke(chip, (cpcm->pcm_channel->pcm_reader_scb->address + 1) << 2, pfie | cpcm->pcm_channel->pcm_slot); #else snd_cs46xx_poke(chip, BA1_PBA, cpcm->hw_buf.addr); tmp = snd_cs46xx_peek(chip, BA1_PDTC); tmp &= ~0x000003ff; tmp |= (4 << cpcm->shift) - 1; snd_cs46xx_poke(chip, BA1_PDTC, tmp); snd_cs46xx_poke(chip, BA1_PFIE, pfie); snd_cs46xx_set_play_sample_rate(chip, runtime->rate); #endif return 0; } static int snd_cs46xx_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; #ifdef CONFIG_SND_CS46XX_NEW_DSP cs46xx_dsp_pcm_ostream_set_period (chip, params_period_bytes(hw_params)); #endif if (runtime->periods == CS46XX_FRAGS) { if (runtime->dma_area != chip->capt.hw_buf.area) snd_pcm_lib_free_pages(substream); runtime->dma_area = chip->capt.hw_buf.area; runtime->dma_addr = chip->capt.hw_buf.addr; runtime->dma_bytes = chip->capt.hw_buf.bytes; substream->ops = &snd_cs46xx_capture_ops; } else { if (runtime->dma_area == chip->capt.hw_buf.area) { runtime->dma_area = NULL; runtime->dma_addr = 0; runtime->dma_bytes = 0; } if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) return err; substream->ops = &snd_cs46xx_capture_indirect_ops; } return 0; } static int snd_cs46xx_capture_hw_free(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->dma_area != chip->capt.hw_buf.area) snd_pcm_lib_free_pages(substream); runtime->dma_area = NULL; runtime->dma_addr = 0; runtime->dma_bytes = 0; return 0; } static int snd_cs46xx_capture_prepare(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; snd_cs46xx_poke(chip, BA1_CBA, chip->capt.hw_buf.addr); chip->capt.shift = 2; memset(&chip->capt.pcm_rec, 0, sizeof(chip->capt.pcm_rec)); chip->capt.pcm_rec.sw_buffer_size = snd_pcm_lib_buffer_bytes(substream); chip->capt.pcm_rec.hw_buffer_size = runtime->period_size * CS46XX_FRAGS << 2; snd_cs46xx_set_capture_sample_rate(chip, runtime->rate); return 0; } static irqreturn_t snd_cs46xx_interrupt(int irq, void *dev_id) { struct snd_cs46xx *chip = dev_id; u32 status1; #ifdef CONFIG_SND_CS46XX_NEW_DSP struct dsp_spos_instance * ins = chip->dsp_spos_instance; u32 status2; int i; struct snd_cs46xx_pcm *cpcm = NULL; #endif /* * Read the Interrupt Status Register to clear the interrupt */ status1 = snd_cs46xx_peekBA0(chip, BA0_HISR); if ((status1 & 0x7fffffff) == 0) { snd_cs46xx_pokeBA0(chip, BA0_HICR, HICR_CHGM | HICR_IEV); return IRQ_NONE; } #ifdef CONFIG_SND_CS46XX_NEW_DSP status2 = snd_cs46xx_peekBA0(chip, BA0_HSR0); for (i = 0; i < DSP_MAX_PCM_CHANNELS; ++i) { if (i <= 15) { if ( status1 & (1 << i) ) { if (i == CS46XX_DSP_CAPTURE_CHANNEL) { if (chip->capt.substream) snd_pcm_period_elapsed(chip->capt.substream); } else { if (ins->pcm_channels[i].active && ins->pcm_channels[i].private_data && !ins->pcm_channels[i].unlinked) { cpcm = ins->pcm_channels[i].private_data; snd_pcm_period_elapsed(cpcm->substream); } } } } else { if ( status2 & (1 << (i - 16))) { if (ins->pcm_channels[i].active && ins->pcm_channels[i].private_data && !ins->pcm_channels[i].unlinked) { cpcm = ins->pcm_channels[i].private_data; snd_pcm_period_elapsed(cpcm->substream); } } } } #else /* old dsp */ if ((status1 & HISR_VC0) && chip->playback_pcm) { if (chip->playback_pcm->substream) snd_pcm_period_elapsed(chip->playback_pcm->substream); } if ((status1 & HISR_VC1) && chip->pcm) { if (chip->capt.substream) snd_pcm_period_elapsed(chip->capt.substream); } #endif if ((status1 & HISR_MIDI) && chip->rmidi) { unsigned char c; spin_lock(&chip->reg_lock); while ((snd_cs46xx_peekBA0(chip, BA0_MIDSR) & MIDSR_RBE) == 0) { c = snd_cs46xx_peekBA0(chip, BA0_MIDRP); if ((chip->midcr & MIDCR_RIE) == 0) continue; snd_rawmidi_receive(chip->midi_input, &c, 1); } while ((snd_cs46xx_peekBA0(chip, BA0_MIDSR) & MIDSR_TBF) == 0) { if ((chip->midcr & MIDCR_TIE) == 0) break; if (snd_rawmidi_transmit(chip->midi_output, &c, 1) != 1) { chip->midcr &= ~MIDCR_TIE; snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); break; } snd_cs46xx_pokeBA0(chip, BA0_MIDWP, c); } spin_unlock(&chip->reg_lock); } /* * EOI to the PCI part....reenables interrupts */ snd_cs46xx_pokeBA0(chip, BA0_HICR, HICR_CHGM | HICR_IEV); return IRQ_HANDLED; } static struct snd_pcm_hardware snd_cs46xx_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER /*|*/ /*SNDRV_PCM_INFO_RESUME*/), .formats = (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U16_BE), .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 5500, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (256 * 1024), .period_bytes_min = CS46XX_MIN_PERIOD_SIZE, .period_bytes_max = CS46XX_MAX_PERIOD_SIZE, .periods_min = CS46XX_FRAGS, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware snd_cs46xx_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER /*|*/ /*SNDRV_PCM_INFO_RESUME*/), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 5500, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (256 * 1024), .period_bytes_min = CS46XX_MIN_PERIOD_SIZE, .period_bytes_max = CS46XX_MAX_PERIOD_SIZE, .periods_min = CS46XX_FRAGS, .periods_max = 1024, .fifo_size = 0, }; #ifdef CONFIG_SND_CS46XX_NEW_DSP static unsigned int period_sizes[] = { 32, 64, 128, 256, 512, 1024, 2048 }; static struct snd_pcm_hw_constraint_list hw_constraints_period_sizes = { .count = ARRAY_SIZE(period_sizes), .list = period_sizes, .mask = 0 }; #endif static void snd_cs46xx_pcm_free_substream(struct snd_pcm_runtime *runtime) { kfree(runtime->private_data); } static int _cs46xx_playback_open_channel (struct snd_pcm_substream *substream,int pcm_channel_id) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); struct snd_cs46xx_pcm * cpcm; struct snd_pcm_runtime *runtime = substream->runtime; cpcm = kzalloc(sizeof(*cpcm), GFP_KERNEL); if (cpcm == NULL) return -ENOMEM; if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), PAGE_SIZE, &cpcm->hw_buf) < 0) { kfree(cpcm); return -ENOMEM; } runtime->hw = snd_cs46xx_playback; runtime->private_data = cpcm; runtime->private_free = snd_cs46xx_pcm_free_substream; cpcm->substream = substream; #ifdef CONFIG_SND_CS46XX_NEW_DSP mutex_lock(&chip->spos_mutex); cpcm->pcm_channel = NULL; cpcm->pcm_channel_id = pcm_channel_id; snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, &hw_constraints_period_sizes); mutex_unlock(&chip->spos_mutex); #else chip->playback_pcm = cpcm; /* HACK */ #endif if (chip->accept_valid) substream->runtime->hw.info |= SNDRV_PCM_INFO_MMAP_VALID; chip->active_ctrl(chip, 1); return 0; } static int snd_cs46xx_playback_open(struct snd_pcm_substream *substream) { snd_printdd("open front channel\n"); return _cs46xx_playback_open_channel(substream,DSP_PCM_MAIN_CHANNEL); } #ifdef CONFIG_SND_CS46XX_NEW_DSP static int snd_cs46xx_playback_open_rear(struct snd_pcm_substream *substream) { snd_printdd("open rear channel\n"); return _cs46xx_playback_open_channel(substream,DSP_PCM_REAR_CHANNEL); } static int snd_cs46xx_playback_open_clfe(struct snd_pcm_substream *substream) { snd_printdd("open center - LFE channel\n"); return _cs46xx_playback_open_channel(substream,DSP_PCM_CENTER_LFE_CHANNEL); } static int snd_cs46xx_playback_open_iec958(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); snd_printdd("open raw iec958 channel\n"); mutex_lock(&chip->spos_mutex); cs46xx_iec958_pre_open (chip); mutex_unlock(&chip->spos_mutex); return _cs46xx_playback_open_channel(substream,DSP_IEC958_CHANNEL); } static int snd_cs46xx_playback_close(struct snd_pcm_substream *substream); static int snd_cs46xx_playback_close_iec958(struct snd_pcm_substream *substream) { int err; struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); snd_printdd("close raw iec958 channel\n"); err = snd_cs46xx_playback_close(substream); mutex_lock(&chip->spos_mutex); cs46xx_iec958_post_close (chip); mutex_unlock(&chip->spos_mutex); return err; } #endif static int snd_cs46xx_capture_open(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), PAGE_SIZE, &chip->capt.hw_buf) < 0) return -ENOMEM; chip->capt.substream = substream; substream->runtime->hw = snd_cs46xx_capture; if (chip->accept_valid) substream->runtime->hw.info |= SNDRV_PCM_INFO_MMAP_VALID; chip->active_ctrl(chip, 1); #ifdef CONFIG_SND_CS46XX_NEW_DSP snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, &hw_constraints_period_sizes); #endif return 0; } static int snd_cs46xx_playback_close(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_cs46xx_pcm * cpcm; cpcm = runtime->private_data; /* when playback_open fails, then cpcm can be NULL */ if (!cpcm) return -ENXIO; #ifdef CONFIG_SND_CS46XX_NEW_DSP mutex_lock(&chip->spos_mutex); if (cpcm->pcm_channel) { cs46xx_dsp_destroy_pcm_channel(chip,cpcm->pcm_channel); cpcm->pcm_channel = NULL; } mutex_unlock(&chip->spos_mutex); #else chip->playback_pcm = NULL; #endif cpcm->substream = NULL; snd_dma_free_pages(&cpcm->hw_buf); chip->active_ctrl(chip, -1); return 0; } static int snd_cs46xx_capture_close(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); chip->capt.substream = NULL; snd_dma_free_pages(&chip->capt.hw_buf); chip->active_ctrl(chip, -1); return 0; } #ifdef CONFIG_SND_CS46XX_NEW_DSP static struct snd_pcm_ops snd_cs46xx_playback_rear_ops = { .open = snd_cs46xx_playback_open_rear, .close = snd_cs46xx_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_playback_hw_params, .hw_free = snd_cs46xx_playback_hw_free, .prepare = snd_cs46xx_playback_prepare, .trigger = snd_cs46xx_playback_trigger, .pointer = snd_cs46xx_playback_direct_pointer, }; static struct snd_pcm_ops snd_cs46xx_playback_indirect_rear_ops = { .open = snd_cs46xx_playback_open_rear, .close = snd_cs46xx_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_playback_hw_params, .hw_free = snd_cs46xx_playback_hw_free, .prepare = snd_cs46xx_playback_prepare, .trigger = snd_cs46xx_playback_trigger, .pointer = snd_cs46xx_playback_indirect_pointer, .ack = snd_cs46xx_playback_transfer, }; static struct snd_pcm_ops snd_cs46xx_playback_clfe_ops = { .open = snd_cs46xx_playback_open_clfe, .close = snd_cs46xx_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_playback_hw_params, .hw_free = snd_cs46xx_playback_hw_free, .prepare = snd_cs46xx_playback_prepare, .trigger = snd_cs46xx_playback_trigger, .pointer = snd_cs46xx_playback_direct_pointer, }; static struct snd_pcm_ops snd_cs46xx_playback_indirect_clfe_ops = { .open = snd_cs46xx_playback_open_clfe, .close = snd_cs46xx_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_playback_hw_params, .hw_free = snd_cs46xx_playback_hw_free, .prepare = snd_cs46xx_playback_prepare, .trigger = snd_cs46xx_playback_trigger, .pointer = snd_cs46xx_playback_indirect_pointer, .ack = snd_cs46xx_playback_transfer, }; static struct snd_pcm_ops snd_cs46xx_playback_iec958_ops = { .open = snd_cs46xx_playback_open_iec958, .close = snd_cs46xx_playback_close_iec958, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_playback_hw_params, .hw_free = snd_cs46xx_playback_hw_free, .prepare = snd_cs46xx_playback_prepare, .trigger = snd_cs46xx_playback_trigger, .pointer = snd_cs46xx_playback_direct_pointer, }; static struct snd_pcm_ops snd_cs46xx_playback_indirect_iec958_ops = { .open = snd_cs46xx_playback_open_iec958, .close = snd_cs46xx_playback_close_iec958, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_playback_hw_params, .hw_free = snd_cs46xx_playback_hw_free, .prepare = snd_cs46xx_playback_prepare, .trigger = snd_cs46xx_playback_trigger, .pointer = snd_cs46xx_playback_indirect_pointer, .ack = snd_cs46xx_playback_transfer, }; #endif static struct snd_pcm_ops snd_cs46xx_playback_ops = { .open = snd_cs46xx_playback_open, .close = snd_cs46xx_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_playback_hw_params, .hw_free = snd_cs46xx_playback_hw_free, .prepare = snd_cs46xx_playback_prepare, .trigger = snd_cs46xx_playback_trigger, .pointer = snd_cs46xx_playback_direct_pointer, }; static struct snd_pcm_ops snd_cs46xx_playback_indirect_ops = { .open = snd_cs46xx_playback_open, .close = snd_cs46xx_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_playback_hw_params, .hw_free = snd_cs46xx_playback_hw_free, .prepare = snd_cs46xx_playback_prepare, .trigger = snd_cs46xx_playback_trigger, .pointer = snd_cs46xx_playback_indirect_pointer, .ack = snd_cs46xx_playback_transfer, }; static struct snd_pcm_ops snd_cs46xx_capture_ops = { .open = snd_cs46xx_capture_open, .close = snd_cs46xx_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_capture_hw_params, .hw_free = snd_cs46xx_capture_hw_free, .prepare = snd_cs46xx_capture_prepare, .trigger = snd_cs46xx_capture_trigger, .pointer = snd_cs46xx_capture_direct_pointer, }; static struct snd_pcm_ops snd_cs46xx_capture_indirect_ops = { .open = snd_cs46xx_capture_open, .close = snd_cs46xx_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_capture_hw_params, .hw_free = snd_cs46xx_capture_hw_free, .prepare = snd_cs46xx_capture_prepare, .trigger = snd_cs46xx_capture_trigger, .pointer = snd_cs46xx_capture_indirect_pointer, .ack = snd_cs46xx_capture_transfer, }; #ifdef CONFIG_SND_CS46XX_NEW_DSP #define MAX_PLAYBACK_CHANNELS (DSP_MAX_PCM_CHANNELS - 1) #else #define MAX_PLAYBACK_CHANNELS 1 #endif int __devinit snd_cs46xx_pcm(struct snd_cs46xx *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "CS46xx", device, MAX_PLAYBACK_CHANNELS, 1, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_cs46xx_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_cs46xx_capture_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "CS46xx"); chip->pcm = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } #ifdef CONFIG_SND_CS46XX_NEW_DSP int __devinit snd_cs46xx_pcm_rear(struct snd_cs46xx *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "CS46xx - Rear", device, MAX_PLAYBACK_CHANNELS, 0, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_cs46xx_playback_rear_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "CS46xx - Rear"); chip->pcm_rear = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } int __devinit snd_cs46xx_pcm_center_lfe(struct snd_cs46xx *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "CS46xx - Center LFE", device, MAX_PLAYBACK_CHANNELS, 0, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_cs46xx_playback_clfe_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "CS46xx - Center LFE"); chip->pcm_center_lfe = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } int __devinit snd_cs46xx_pcm_iec958(struct snd_cs46xx *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "CS46xx - IEC958", device, 1, 0, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_cs46xx_playback_iec958_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "CS46xx - IEC958"); chip->pcm_rear = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } #endif /* * Mixer routines */ static void snd_cs46xx_mixer_free_ac97_bus(struct snd_ac97_bus *bus) { struct snd_cs46xx *chip = bus->private_data; chip->ac97_bus = NULL; } static void snd_cs46xx_mixer_free_ac97(struct snd_ac97 *ac97) { struct snd_cs46xx *chip = ac97->private_data; if (snd_BUG_ON(ac97 != chip->ac97[CS46XX_PRIMARY_CODEC_INDEX] && ac97 != chip->ac97[CS46XX_SECONDARY_CODEC_INDEX])) return; if (ac97 == chip->ac97[CS46XX_PRIMARY_CODEC_INDEX]) { chip->ac97[CS46XX_PRIMARY_CODEC_INDEX] = NULL; chip->eapd_switch = NULL; } else chip->ac97[CS46XX_SECONDARY_CODEC_INDEX] = NULL; } static int snd_cs46xx_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 0x7fff; return 0; } static int snd_cs46xx_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value; unsigned int val = snd_cs46xx_peek(chip, reg); ucontrol->value.integer.value[0] = 0xffff - (val >> 16); ucontrol->value.integer.value[1] = 0xffff - (val & 0xffff); return 0; } static int snd_cs46xx_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value; unsigned int val = ((0xffff - ucontrol->value.integer.value[0]) << 16 | (0xffff - ucontrol->value.integer.value[1])); unsigned int old = snd_cs46xx_peek(chip, reg); int change = (old != val); if (change) { snd_cs46xx_poke(chip, reg, val); } return change; } #ifdef CONFIG_SND_CS46XX_NEW_DSP static int snd_cs46xx_vol_dac_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = chip->dsp_spos_instance->dac_volume_left; ucontrol->value.integer.value[1] = chip->dsp_spos_instance->dac_volume_right; return 0; } static int snd_cs46xx_vol_dac_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); int change = 0; if (chip->dsp_spos_instance->dac_volume_right != ucontrol->value.integer.value[0] || chip->dsp_spos_instance->dac_volume_left != ucontrol->value.integer.value[1]) { cs46xx_dsp_set_dac_volume(chip, ucontrol->value.integer.value[0], ucontrol->value.integer.value[1]); change = 1; } return change; } #if 0 static int snd_cs46xx_vol_iec958_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = chip->dsp_spos_instance->spdif_input_volume_left; ucontrol->value.integer.value[1] = chip->dsp_spos_instance->spdif_input_volume_right; return 0; } static int snd_cs46xx_vol_iec958_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); int change = 0; if (chip->dsp_spos_instance->spdif_input_volume_left != ucontrol->value.integer.value[0] || chip->dsp_spos_instance->spdif_input_volume_right!= ucontrol->value.integer.value[1]) { cs46xx_dsp_set_iec958_volume (chip, ucontrol->value.integer.value[0], ucontrol->value.integer.value[1]); change = 1; } return change; } #endif #define snd_mixer_boolean_info snd_ctl_boolean_mono_info static int snd_cs46xx_iec958_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value; if (reg == CS46XX_MIXER_SPDIF_OUTPUT_ELEMENT) ucontrol->value.integer.value[0] = (chip->dsp_spos_instance->spdif_status_out & DSP_SPDIF_STATUS_OUTPUT_ENABLED); else ucontrol->value.integer.value[0] = chip->dsp_spos_instance->spdif_status_in; return 0; } static int snd_cs46xx_iec958_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); int change, res; switch (kcontrol->private_value) { case CS46XX_MIXER_SPDIF_OUTPUT_ELEMENT: mutex_lock(&chip->spos_mutex); change = (chip->dsp_spos_instance->spdif_status_out & DSP_SPDIF_STATUS_OUTPUT_ENABLED); if (ucontrol->value.integer.value[0] && !change) cs46xx_dsp_enable_spdif_out(chip); else if (change && !ucontrol->value.integer.value[0]) cs46xx_dsp_disable_spdif_out(chip); res = (change != (chip->dsp_spos_instance->spdif_status_out & DSP_SPDIF_STATUS_OUTPUT_ENABLED)); mutex_unlock(&chip->spos_mutex); break; case CS46XX_MIXER_SPDIF_INPUT_ELEMENT: change = chip->dsp_spos_instance->spdif_status_in; if (ucontrol->value.integer.value[0] && !change) { cs46xx_dsp_enable_spdif_in(chip); /* restore volume */ } else if (change && !ucontrol->value.integer.value[0]) cs46xx_dsp_disable_spdif_in(chip); res = (change != chip->dsp_spos_instance->spdif_status_in); break; default: res = -EINVAL; snd_BUG(); /* should never happen ... */ } return res; } static int snd_cs46xx_adc_capture_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); struct dsp_spos_instance * ins = chip->dsp_spos_instance; if (ins->adc_input != NULL) ucontrol->value.integer.value[0] = 1; else ucontrol->value.integer.value[0] = 0; return 0; } static int snd_cs46xx_adc_capture_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); struct dsp_spos_instance * ins = chip->dsp_spos_instance; int change = 0; if (ucontrol->value.integer.value[0] && !ins->adc_input) { cs46xx_dsp_enable_adc_capture(chip); change = 1; } else if (!ucontrol->value.integer.value[0] && ins->adc_input) { cs46xx_dsp_disable_adc_capture(chip); change = 1; } return change; } static int snd_cs46xx_pcm_capture_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); struct dsp_spos_instance * ins = chip->dsp_spos_instance; if (ins->pcm_input != NULL) ucontrol->value.integer.value[0] = 1; else ucontrol->value.integer.value[0] = 0; return 0; } static int snd_cs46xx_pcm_capture_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); struct dsp_spos_instance * ins = chip->dsp_spos_instance; int change = 0; if (ucontrol->value.integer.value[0] && !ins->pcm_input) { cs46xx_dsp_enable_pcm_capture(chip); change = 1; } else if (!ucontrol->value.integer.value[0] && ins->pcm_input) { cs46xx_dsp_disable_pcm_capture(chip); change = 1; } return change; } static int snd_herc_spdif_select_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); int val1 = snd_cs46xx_peekBA0(chip, BA0_EGPIODR); if (val1 & EGPIODR_GPOE0) ucontrol->value.integer.value[0] = 1; else ucontrol->value.integer.value[0] = 0; return 0; } /* * Game Theatre XP card - EGPIO[0] is used to select SPDIF input optical or coaxial. */ static int snd_herc_spdif_select_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); int val1 = snd_cs46xx_peekBA0(chip, BA0_EGPIODR); int val2 = snd_cs46xx_peekBA0(chip, BA0_EGPIOPTR); if (ucontrol->value.integer.value[0]) { /* optical is default */ snd_cs46xx_pokeBA0(chip, BA0_EGPIODR, EGPIODR_GPOE0 | val1); /* enable EGPIO0 output */ snd_cs46xx_pokeBA0(chip, BA0_EGPIOPTR, EGPIOPTR_GPPT0 | val2); /* open-drain on output */ } else { /* coaxial */ snd_cs46xx_pokeBA0(chip, BA0_EGPIODR, val1 & ~EGPIODR_GPOE0); /* disable */ snd_cs46xx_pokeBA0(chip, BA0_EGPIOPTR, val2 & ~EGPIOPTR_GPPT0); /* disable */ } /* checking diff from the EGPIO direction register should be enough */ return (val1 != (int)snd_cs46xx_peekBA0(chip, BA0_EGPIODR)); } static int snd_cs46xx_spdif_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_cs46xx_spdif_default_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); struct dsp_spos_instance * ins = chip->dsp_spos_instance; mutex_lock(&chip->spos_mutex); ucontrol->value.iec958.status[0] = _wrap_all_bits((ins->spdif_csuv_default >> 24) & 0xff); ucontrol->value.iec958.status[1] = _wrap_all_bits((ins->spdif_csuv_default >> 16) & 0xff); ucontrol->value.iec958.status[2] = 0; ucontrol->value.iec958.status[3] = _wrap_all_bits((ins->spdif_csuv_default) & 0xff); mutex_unlock(&chip->spos_mutex); return 0; } static int snd_cs46xx_spdif_default_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx * chip = snd_kcontrol_chip(kcontrol); struct dsp_spos_instance * ins = chip->dsp_spos_instance; unsigned int val; int change; mutex_lock(&chip->spos_mutex); val = ((unsigned int)_wrap_all_bits(ucontrol->value.iec958.status[0]) << 24) | ((unsigned int)_wrap_all_bits(ucontrol->value.iec958.status[2]) << 16) | ((unsigned int)_wrap_all_bits(ucontrol->value.iec958.status[3])) | /* left and right validity bit */ (1 << 13) | (1 << 12); change = (unsigned int)ins->spdif_csuv_default != val; ins->spdif_csuv_default = val; if ( !(ins->spdif_status_out & DSP_SPDIF_STATUS_PLAYBACK_OPEN) ) cs46xx_poke_via_dsp (chip,SP_SPDOUT_CSUV,val); mutex_unlock(&chip->spos_mutex); return change; } static int snd_cs46xx_spdif_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.iec958.status[0] = 0xff; ucontrol->value.iec958.status[1] = 0xff; ucontrol->value.iec958.status[2] = 0x00; ucontrol->value.iec958.status[3] = 0xff; return 0; } static int snd_cs46xx_spdif_stream_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); struct dsp_spos_instance * ins = chip->dsp_spos_instance; mutex_lock(&chip->spos_mutex); ucontrol->value.iec958.status[0] = _wrap_all_bits((ins->spdif_csuv_stream >> 24) & 0xff); ucontrol->value.iec958.status[1] = _wrap_all_bits((ins->spdif_csuv_stream >> 16) & 0xff); ucontrol->value.iec958.status[2] = 0; ucontrol->value.iec958.status[3] = _wrap_all_bits((ins->spdif_csuv_stream) & 0xff); mutex_unlock(&chip->spos_mutex); return 0; } static int snd_cs46xx_spdif_stream_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx * chip = snd_kcontrol_chip(kcontrol); struct dsp_spos_instance * ins = chip->dsp_spos_instance; unsigned int val; int change; mutex_lock(&chip->spos_mutex); val = ((unsigned int)_wrap_all_bits(ucontrol->value.iec958.status[0]) << 24) | ((unsigned int)_wrap_all_bits(ucontrol->value.iec958.status[1]) << 16) | ((unsigned int)_wrap_all_bits(ucontrol->value.iec958.status[3])) | /* left and right validity bit */ (1 << 13) | (1 << 12); change = ins->spdif_csuv_stream != val; ins->spdif_csuv_stream = val; if ( ins->spdif_status_out & DSP_SPDIF_STATUS_PLAYBACK_OPEN ) cs46xx_poke_via_dsp (chip,SP_SPDOUT_CSUV,val); mutex_unlock(&chip->spos_mutex); return change; } #endif /* CONFIG_SND_CS46XX_NEW_DSP */ static struct snd_kcontrol_new snd_cs46xx_controls[] __devinitdata = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "DAC Volume", .info = snd_cs46xx_vol_info, #ifndef CONFIG_SND_CS46XX_NEW_DSP .get = snd_cs46xx_vol_get, .put = snd_cs46xx_vol_put, .private_value = BA1_PVOL, #else .get = snd_cs46xx_vol_dac_get, .put = snd_cs46xx_vol_dac_put, #endif }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "ADC Volume", .info = snd_cs46xx_vol_info, .get = snd_cs46xx_vol_get, .put = snd_cs46xx_vol_put, #ifndef CONFIG_SND_CS46XX_NEW_DSP .private_value = BA1_CVOL, #else .private_value = (VARIDECIMATE_SCB_ADDR + 0xE) << 2, #endif }, #ifdef CONFIG_SND_CS46XX_NEW_DSP { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "ADC Capture Switch", .info = snd_mixer_boolean_info, .get = snd_cs46xx_adc_capture_get, .put = snd_cs46xx_adc_capture_put }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "DAC Capture Switch", .info = snd_mixer_boolean_info, .get = snd_cs46xx_pcm_capture_get, .put = snd_cs46xx_pcm_capture_put }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SNDRV_CTL_NAME_IEC958("Output ",NONE,SWITCH), .info = snd_mixer_boolean_info, .get = snd_cs46xx_iec958_get, .put = snd_cs46xx_iec958_put, .private_value = CS46XX_MIXER_SPDIF_OUTPUT_ELEMENT, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SNDRV_CTL_NAME_IEC958("Input ",NONE,SWITCH), .info = snd_mixer_boolean_info, .get = snd_cs46xx_iec958_get, .put = snd_cs46xx_iec958_put, .private_value = CS46XX_MIXER_SPDIF_INPUT_ELEMENT, }, #if 0 /* Input IEC958 volume does not work for the moment. (Benny) */ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SNDRV_CTL_NAME_IEC958("Input ",NONE,VOLUME), .info = snd_cs46xx_vol_info, .get = snd_cs46xx_vol_iec958_get, .put = snd_cs46xx_vol_iec958_put, .private_value = (ASYNCRX_SCB_ADDR + 0xE) << 2, }, #endif { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), .info = snd_cs46xx_spdif_info, .get = snd_cs46xx_spdif_default_get, .put = snd_cs46xx_spdif_default_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK), .info = snd_cs46xx_spdif_info, .get = snd_cs46xx_spdif_mask_get, .access = SNDRV_CTL_ELEM_ACCESS_READ }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PCM_STREAM), .info = snd_cs46xx_spdif_info, .get = snd_cs46xx_spdif_stream_get, .put = snd_cs46xx_spdif_stream_put }, #endif }; #ifdef CONFIG_SND_CS46XX_NEW_DSP /* set primary cs4294 codec into Extended Audio Mode */ static int snd_cs46xx_front_dup_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); unsigned short val; val = snd_ac97_read(chip->ac97[CS46XX_PRIMARY_CODEC_INDEX], AC97_CSR_ACMODE); ucontrol->value.integer.value[0] = (val & 0x200) ? 0 : 1; return 0; } static int snd_cs46xx_front_dup_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); return snd_ac97_update_bits(chip->ac97[CS46XX_PRIMARY_CODEC_INDEX], AC97_CSR_ACMODE, 0x200, ucontrol->value.integer.value[0] ? 0 : 0x200); } static struct snd_kcontrol_new snd_cs46xx_front_dup_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Duplicate Front", .info = snd_mixer_boolean_info, .get = snd_cs46xx_front_dup_get, .put = snd_cs46xx_front_dup_put, }; #endif #ifdef CONFIG_SND_CS46XX_NEW_DSP /* Only available on the Hercules Game Theater XP soundcard */ static struct snd_kcontrol_new snd_hercules_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Optical/Coaxial SPDIF Input Switch", .info = snd_mixer_boolean_info, .get = snd_herc_spdif_select_get, .put = snd_herc_spdif_select_put, }, }; static void snd_cs46xx_codec_reset (struct snd_ac97 * ac97) { unsigned long end_time; int err; /* reset to defaults */ snd_ac97_write(ac97, AC97_RESET, 0); /* set the desired CODEC mode */ if (ac97->num == CS46XX_PRIMARY_CODEC_INDEX) { snd_printdd("cs46xx: CODEC1 mode %04x\n", 0x0); snd_cs46xx_ac97_write(ac97, AC97_CSR_ACMODE, 0x0); } else if (ac97->num == CS46XX_SECONDARY_CODEC_INDEX) { snd_printdd("cs46xx: CODEC2 mode %04x\n", 0x3); snd_cs46xx_ac97_write(ac97, AC97_CSR_ACMODE, 0x3); } else { snd_BUG(); /* should never happen ... */ } udelay(50); /* it's necessary to wait awhile until registers are accessible after RESET */ /* because the PCM or MASTER volume registers can be modified, */ /* the REC_GAIN register is used for tests */ end_time = jiffies + HZ; do { unsigned short ext_mid; /* use preliminary reads to settle the communication */ snd_ac97_read(ac97, AC97_RESET); snd_ac97_read(ac97, AC97_VENDOR_ID1); snd_ac97_read(ac97, AC97_VENDOR_ID2); /* modem? */ ext_mid = snd_ac97_read(ac97, AC97_EXTENDED_MID); if (ext_mid != 0xffff && (ext_mid & 1) != 0) return; /* test if we can write to the record gain volume register */ snd_ac97_write(ac97, AC97_REC_GAIN, 0x8a05); if ((err = snd_ac97_read(ac97, AC97_REC_GAIN)) == 0x8a05) return; msleep(10); } while (time_after_eq(end_time, jiffies)); snd_printk(KERN_ERR "CS46xx secondary codec doesn't respond!\n"); } #endif static int __devinit cs46xx_detect_codec(struct snd_cs46xx *chip, int codec) { int idx, err; struct snd_ac97_template ac97; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; ac97.private_free = snd_cs46xx_mixer_free_ac97; ac97.num = codec; if (chip->amplifier_ctrl == amp_voyetra) ac97.scaps = AC97_SCAP_INV_EAPD; if (codec == CS46XX_SECONDARY_CODEC_INDEX) { snd_cs46xx_codec_write(chip, AC97_RESET, 0, codec); udelay(10); if (snd_cs46xx_codec_read(chip, AC97_RESET, codec) & 0x8000) { snd_printdd("snd_cs46xx: seconadry codec not present\n"); return -ENXIO; } } snd_cs46xx_codec_write(chip, AC97_MASTER, 0x8000, codec); for (idx = 0; idx < 100; ++idx) { if (snd_cs46xx_codec_read(chip, AC97_MASTER, codec) == 0x8000) { err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97[codec]); return err; } msleep(10); } snd_printdd("snd_cs46xx: codec %d detection timeout\n", codec); return -ENXIO; } int __devinit snd_cs46xx_mixer(struct snd_cs46xx *chip, int spdif_device) { struct snd_card *card = chip->card; struct snd_ctl_elem_id id; int err; unsigned int idx; static struct snd_ac97_bus_ops ops = { #ifdef CONFIG_SND_CS46XX_NEW_DSP .reset = snd_cs46xx_codec_reset, #endif .write = snd_cs46xx_ac97_write, .read = snd_cs46xx_ac97_read, }; /* detect primary codec */ chip->nr_ac97_codecs = 0; snd_printdd("snd_cs46xx: detecting primary codec\n"); if ((err = snd_ac97_bus(card, 0, &ops, chip, &chip->ac97_bus)) < 0) return err; chip->ac97_bus->private_free = snd_cs46xx_mixer_free_ac97_bus; if (cs46xx_detect_codec(chip, CS46XX_PRIMARY_CODEC_INDEX) < 0) return -ENXIO; chip->nr_ac97_codecs = 1; #ifdef CONFIG_SND_CS46XX_NEW_DSP snd_printdd("snd_cs46xx: detecting seconadry codec\n"); /* try detect a secondary codec */ if (! cs46xx_detect_codec(chip, CS46XX_SECONDARY_CODEC_INDEX)) chip->nr_ac97_codecs = 2; #endif /* CONFIG_SND_CS46XX_NEW_DSP */ /* add cs4630 mixer controls */ for (idx = 0; idx < ARRAY_SIZE(snd_cs46xx_controls); idx++) { struct snd_kcontrol *kctl; kctl = snd_ctl_new1(&snd_cs46xx_controls[idx], chip); if (kctl && kctl->id.iface == SNDRV_CTL_ELEM_IFACE_PCM) kctl->id.device = spdif_device; if ((err = snd_ctl_add(card, kctl)) < 0) return err; } /* get EAPD mixer switch (for voyetra hack) */ memset(&id, 0, sizeof(id)); id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; strcpy(id.name, "External Amplifier"); chip->eapd_switch = snd_ctl_find_id(chip->card, &id); #ifdef CONFIG_SND_CS46XX_NEW_DSP if (chip->nr_ac97_codecs == 1) { unsigned int id2 = chip->ac97[CS46XX_PRIMARY_CODEC_INDEX]->id & 0xffff; if (id2 == 0x592b || id2 == 0x592d) { err = snd_ctl_add(card, snd_ctl_new1(&snd_cs46xx_front_dup_ctl, chip)); if (err < 0) return err; snd_ac97_write_cache(chip->ac97[CS46XX_PRIMARY_CODEC_INDEX], AC97_CSR_ACMODE, 0x200); } } /* do soundcard specific mixer setup */ if (chip->mixer_init) { snd_printdd ("calling chip->mixer_init(chip);\n"); chip->mixer_init(chip); } #endif /* turn on amplifier */ chip->amplifier_ctrl(chip, 1); return 0; } /* * RawMIDI interface */ static void snd_cs46xx_midi_reset(struct snd_cs46xx *chip) { snd_cs46xx_pokeBA0(chip, BA0_MIDCR, MIDCR_MRST); udelay(100); snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); } static int snd_cs46xx_midi_input_open(struct snd_rawmidi_substream *substream) { struct snd_cs46xx *chip = substream->rmidi->private_data; chip->active_ctrl(chip, 1); spin_lock_irq(&chip->reg_lock); chip->uartm |= CS46XX_MODE_INPUT; chip->midcr |= MIDCR_RXE; chip->midi_input = substream; if (!(chip->uartm & CS46XX_MODE_OUTPUT)) { snd_cs46xx_midi_reset(chip); } else { snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); } spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_cs46xx_midi_input_close(struct snd_rawmidi_substream *substream) { struct snd_cs46xx *chip = substream->rmidi->private_data; spin_lock_irq(&chip->reg_lock); chip->midcr &= ~(MIDCR_RXE | MIDCR_RIE); chip->midi_input = NULL; if (!(chip->uartm & CS46XX_MODE_OUTPUT)) { snd_cs46xx_midi_reset(chip); } else { snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); } chip->uartm &= ~CS46XX_MODE_INPUT; spin_unlock_irq(&chip->reg_lock); chip->active_ctrl(chip, -1); return 0; } static int snd_cs46xx_midi_output_open(struct snd_rawmidi_substream *substream) { struct snd_cs46xx *chip = substream->rmidi->private_data; chip->active_ctrl(chip, 1); spin_lock_irq(&chip->reg_lock); chip->uartm |= CS46XX_MODE_OUTPUT; chip->midcr |= MIDCR_TXE; chip->midi_output = substream; if (!(chip->uartm & CS46XX_MODE_INPUT)) { snd_cs46xx_midi_reset(chip); } else { snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); } spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_cs46xx_midi_output_close(struct snd_rawmidi_substream *substream) { struct snd_cs46xx *chip = substream->rmidi->private_data; spin_lock_irq(&chip->reg_lock); chip->midcr &= ~(MIDCR_TXE | MIDCR_TIE); chip->midi_output = NULL; if (!(chip->uartm & CS46XX_MODE_INPUT)) { snd_cs46xx_midi_reset(chip); } else { snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); } chip->uartm &= ~CS46XX_MODE_OUTPUT; spin_unlock_irq(&chip->reg_lock); chip->active_ctrl(chip, -1); return 0; } static void snd_cs46xx_midi_input_trigger(struct snd_rawmidi_substream *substream, int up) { unsigned long flags; struct snd_cs46xx *chip = substream->rmidi->private_data; spin_lock_irqsave(&chip->reg_lock, flags); if (up) { if ((chip->midcr & MIDCR_RIE) == 0) { chip->midcr |= MIDCR_RIE; snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); } } else { if (chip->midcr & MIDCR_RIE) { chip->midcr &= ~MIDCR_RIE; snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); } } spin_unlock_irqrestore(&chip->reg_lock, flags); } static void snd_cs46xx_midi_output_trigger(struct snd_rawmidi_substream *substream, int up) { unsigned long flags; struct snd_cs46xx *chip = substream->rmidi->private_data; unsigned char byte; spin_lock_irqsave(&chip->reg_lock, flags); if (up) { if ((chip->midcr & MIDCR_TIE) == 0) { chip->midcr |= MIDCR_TIE; /* fill UART FIFO buffer at first, and turn Tx interrupts only if necessary */ while ((chip->midcr & MIDCR_TIE) && (snd_cs46xx_peekBA0(chip, BA0_MIDSR) & MIDSR_TBF) == 0) { if (snd_rawmidi_transmit(substream, &byte, 1) != 1) { chip->midcr &= ~MIDCR_TIE; } else { snd_cs46xx_pokeBA0(chip, BA0_MIDWP, byte); } } snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); } } else { if (chip->midcr & MIDCR_TIE) { chip->midcr &= ~MIDCR_TIE; snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); } } spin_unlock_irqrestore(&chip->reg_lock, flags); } static struct snd_rawmidi_ops snd_cs46xx_midi_output = { .open = snd_cs46xx_midi_output_open, .close = snd_cs46xx_midi_output_close, .trigger = snd_cs46xx_midi_output_trigger, }; static struct snd_rawmidi_ops snd_cs46xx_midi_input = { .open = snd_cs46xx_midi_input_open, .close = snd_cs46xx_midi_input_close, .trigger = snd_cs46xx_midi_input_trigger, }; int __devinit snd_cs46xx_midi(struct snd_cs46xx *chip, int device, struct snd_rawmidi **rrawmidi) { struct snd_rawmidi *rmidi; int err; if (rrawmidi) *rrawmidi = NULL; if ((err = snd_rawmidi_new(chip->card, "CS46XX", device, 1, 1, &rmidi)) < 0) return err; strcpy(rmidi->name, "CS46XX"); snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_cs46xx_midi_output); snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_cs46xx_midi_input); rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX; rmidi->private_data = chip; chip->rmidi = rmidi; if (rrawmidi) *rrawmidi = NULL; return 0; } /* * gameport interface */ #if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) static void snd_cs46xx_gameport_trigger(struct gameport *gameport) { struct snd_cs46xx *chip = gameport_get_port_data(gameport); if (snd_BUG_ON(!chip)) return; snd_cs46xx_pokeBA0(chip, BA0_JSPT, 0xFF); //outb(gameport->io, 0xFF); } static unsigned char snd_cs46xx_gameport_read(struct gameport *gameport) { struct snd_cs46xx *chip = gameport_get_port_data(gameport); if (snd_BUG_ON(!chip)) return 0; return snd_cs46xx_peekBA0(chip, BA0_JSPT); //inb(gameport->io); } static int snd_cs46xx_gameport_cooked_read(struct gameport *gameport, int *axes, int *buttons) { struct snd_cs46xx *chip = gameport_get_port_data(gameport); unsigned js1, js2, jst; if (snd_BUG_ON(!chip)) return 0; js1 = snd_cs46xx_peekBA0(chip, BA0_JSC1); js2 = snd_cs46xx_peekBA0(chip, BA0_JSC2); jst = snd_cs46xx_peekBA0(chip, BA0_JSPT); *buttons = (~jst >> 4) & 0x0F; axes[0] = ((js1 & JSC1_Y1V_MASK) >> JSC1_Y1V_SHIFT) & 0xFFFF; axes[1] = ((js1 & JSC1_X1V_MASK) >> JSC1_X1V_SHIFT) & 0xFFFF; axes[2] = ((js2 & JSC2_Y2V_MASK) >> JSC2_Y2V_SHIFT) & 0xFFFF; axes[3] = ((js2 & JSC2_X2V_MASK) >> JSC2_X2V_SHIFT) & 0xFFFF; for(jst=0;jst<4;++jst) if(axes[jst]==0xFFFF) axes[jst] = -1; return 0; } static int snd_cs46xx_gameport_open(struct gameport *gameport, int mode) { switch (mode) { case GAMEPORT_MODE_COOKED: return 0; case GAMEPORT_MODE_RAW: return 0; default: return -1; } return 0; } int __devinit snd_cs46xx_gameport(struct snd_cs46xx *chip) { struct gameport *gp; chip->gameport = gp = gameport_allocate_port(); if (!gp) { printk(KERN_ERR "cs46xx: cannot allocate memory for gameport\n"); return -ENOMEM; } gameport_set_name(gp, "CS46xx Gameport"); gameport_set_phys(gp, "pci%s/gameport0", pci_name(chip->pci)); gameport_set_dev_parent(gp, &chip->pci->dev); gameport_set_port_data(gp, chip); gp->open = snd_cs46xx_gameport_open; gp->read = snd_cs46xx_gameport_read; gp->trigger = snd_cs46xx_gameport_trigger; gp->cooked_read = snd_cs46xx_gameport_cooked_read; snd_cs46xx_pokeBA0(chip, BA0_JSIO, 0xFF); // ? snd_cs46xx_pokeBA0(chip, BA0_JSCTL, JSCTL_SP_MEDIUM_SLOW); gameport_register_port(gp); return 0; } static inline void snd_cs46xx_remove_gameport(struct snd_cs46xx *chip) { if (chip->gameport) { gameport_unregister_port(chip->gameport); chip->gameport = NULL; } } #else int __devinit snd_cs46xx_gameport(struct snd_cs46xx *chip) { return -ENOSYS; } static inline void snd_cs46xx_remove_gameport(struct snd_cs46xx *chip) { } #endif /* CONFIG_GAMEPORT */ #ifdef CONFIG_PROC_FS /* * proc interface */ static ssize_t snd_cs46xx_io_read(struct snd_info_entry *entry, void *file_private_data, struct file *file, char __user *buf, size_t count, loff_t pos) { struct snd_cs46xx_region *region = entry->private_data; if (copy_to_user_fromio(buf, region->remap_addr + pos, count)) return -EFAULT; return count; } static struct snd_info_entry_ops snd_cs46xx_proc_io_ops = { .read = snd_cs46xx_io_read, }; static int __devinit snd_cs46xx_proc_init(struct snd_card *card, struct snd_cs46xx *chip) { struct snd_info_entry *entry; int idx; for (idx = 0; idx < 5; idx++) { struct snd_cs46xx_region *region = &chip->region.idx[idx]; if (! snd_card_proc_new(card, region->name, &entry)) { entry->content = SNDRV_INFO_CONTENT_DATA; entry->private_data = chip; entry->c.ops = &snd_cs46xx_proc_io_ops; entry->size = region->size; entry->mode = S_IFREG | S_IRUSR; } } #ifdef CONFIG_SND_CS46XX_NEW_DSP cs46xx_dsp_proc_init(card, chip); #endif return 0; } static int snd_cs46xx_proc_done(struct snd_cs46xx *chip) { #ifdef CONFIG_SND_CS46XX_NEW_DSP cs46xx_dsp_proc_done(chip); #endif return 0; } #else /* !CONFIG_PROC_FS */ #define snd_cs46xx_proc_init(card, chip) #define snd_cs46xx_proc_done(chip) #endif /* * stop the h/w */ static void snd_cs46xx_hw_stop(struct snd_cs46xx *chip) { unsigned int tmp; tmp = snd_cs46xx_peek(chip, BA1_PFIE); tmp &= ~0x0000f03f; tmp |= 0x00000010; snd_cs46xx_poke(chip, BA1_PFIE, tmp); /* playback interrupt disable */ tmp = snd_cs46xx_peek(chip, BA1_CIE); tmp &= ~0x0000003f; tmp |= 0x00000011; snd_cs46xx_poke(chip, BA1_CIE, tmp); /* capture interrupt disable */ /* * Stop playback DMA. */ tmp = snd_cs46xx_peek(chip, BA1_PCTL); snd_cs46xx_poke(chip, BA1_PCTL, tmp & 0x0000ffff); /* * Stop capture DMA. */ tmp = snd_cs46xx_peek(chip, BA1_CCTL); snd_cs46xx_poke(chip, BA1_CCTL, tmp & 0xffff0000); /* * Reset the processor. */ snd_cs46xx_reset(chip); snd_cs46xx_proc_stop(chip); /* * Power down the PLL. */ snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, 0); /* * Turn off the Processor by turning off the software clock enable flag in * the clock control register. */ tmp = snd_cs46xx_peekBA0(chip, BA0_CLKCR1) & ~CLKCR1_SWCE; snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, tmp); } static int snd_cs46xx_free(struct snd_cs46xx *chip) { int idx; if (snd_BUG_ON(!chip)) return -EINVAL; if (chip->active_ctrl) chip->active_ctrl(chip, 1); snd_cs46xx_remove_gameport(chip); if (chip->amplifier_ctrl) chip->amplifier_ctrl(chip, -chip->amplifier); /* force to off */ snd_cs46xx_proc_done(chip); if (chip->region.idx[0].resource) snd_cs46xx_hw_stop(chip); if (chip->irq >= 0) free_irq(chip->irq, chip); if (chip->active_ctrl) chip->active_ctrl(chip, -chip->amplifier); for (idx = 0; idx < 5; idx++) { struct snd_cs46xx_region *region = &chip->region.idx[idx]; if (region->remap_addr) iounmap(region->remap_addr); release_and_free_resource(region->resource); } #ifdef CONFIG_SND_CS46XX_NEW_DSP if (chip->dsp_spos_instance) { cs46xx_dsp_spos_destroy(chip); chip->dsp_spos_instance = NULL; } #endif #ifdef CONFIG_PM kfree(chip->saved_regs); #endif pci_disable_device(chip->pci); kfree(chip); return 0; } static int snd_cs46xx_dev_free(struct snd_device *device) { struct snd_cs46xx *chip = device->device_data; return snd_cs46xx_free(chip); } /* * initialize chip */ static int snd_cs46xx_chip_init(struct snd_cs46xx *chip) { int timeout; /* * First, blast the clock control register to zero so that the PLL starts * out in a known state, and blast the master serial port control register * to zero so that the serial ports also start out in a known state. */ snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, 0); snd_cs46xx_pokeBA0(chip, BA0_SERMC1, 0); /* * If we are in AC97 mode, then we must set the part to a host controlled * AC-link. Otherwise, we won't be able to bring up the link. */ #ifdef CONFIG_SND_CS46XX_NEW_DSP snd_cs46xx_pokeBA0(chip, BA0_SERACC, SERACC_HSP | SERACC_CHIP_TYPE_2_0 | SERACC_TWO_CODECS); /* 2.00 dual codecs */ /* snd_cs46xx_pokeBA0(chip, BA0_SERACC, SERACC_HSP | SERACC_CHIP_TYPE_2_0); */ /* 2.00 codec */ #else snd_cs46xx_pokeBA0(chip, BA0_SERACC, SERACC_HSP | SERACC_CHIP_TYPE_1_03); /* 1.03 codec */ #endif /* * Drive the ARST# pin low for a minimum of 1uS (as defined in the AC97 * spec) and then drive it high. This is done for non AC97 modes since * there might be logic external to the CS461x that uses the ARST# line * for a reset. */ snd_cs46xx_pokeBA0(chip, BA0_ACCTL, 0); #ifdef CONFIG_SND_CS46XX_NEW_DSP snd_cs46xx_pokeBA0(chip, BA0_ACCTL2, 0); #endif udelay(50); snd_cs46xx_pokeBA0(chip, BA0_ACCTL, ACCTL_RSTN); #ifdef CONFIG_SND_CS46XX_NEW_DSP snd_cs46xx_pokeBA0(chip, BA0_ACCTL2, ACCTL_RSTN); #endif /* * The first thing we do here is to enable sync generation. As soon * as we start receiving bit clock, we'll start producing the SYNC * signal. */ snd_cs46xx_pokeBA0(chip, BA0_ACCTL, ACCTL_ESYN | ACCTL_RSTN); #ifdef CONFIG_SND_CS46XX_NEW_DSP snd_cs46xx_pokeBA0(chip, BA0_ACCTL2, ACCTL_ESYN | ACCTL_RSTN); #endif /* * Now wait for a short while to allow the AC97 part to start * generating bit clock (so we don't try to start the PLL without an * input clock). */ mdelay(10); /* * Set the serial port timing configuration, so that * the clock control circuit gets its clock from the correct place. */ snd_cs46xx_pokeBA0(chip, BA0_SERMC1, SERMC1_PTC_AC97); /* * Write the selected clock control setup to the hardware. Do not turn on * SWCE yet (if requested), so that the devices clocked by the output of * PLL are not clocked until the PLL is stable. */ snd_cs46xx_pokeBA0(chip, BA0_PLLCC, PLLCC_LPF_1050_2780_KHZ | PLLCC_CDR_73_104_MHZ); snd_cs46xx_pokeBA0(chip, BA0_PLLM, 0x3a); snd_cs46xx_pokeBA0(chip, BA0_CLKCR2, CLKCR2_PDIVS_8); /* * Power up the PLL. */ snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, CLKCR1_PLLP); /* * Wait until the PLL has stabilized. */ msleep(100); /* * Turn on clocking of the core so that we can setup the serial ports. */ snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, CLKCR1_PLLP | CLKCR1_SWCE); /* * Enable FIFO Host Bypass */ snd_cs46xx_pokeBA0(chip, BA0_SERBCF, SERBCF_HBP); /* * Fill the serial port FIFOs with silence. */ snd_cs46xx_clear_serial_FIFOs(chip); /* * Set the serial port FIFO pointer to the first sample in the FIFO. */ /* snd_cs46xx_pokeBA0(chip, BA0_SERBSP, 0); */ /* * Write the serial port configuration to the part. The master * enable bit is not set until all other values have been written. */ snd_cs46xx_pokeBA0(chip, BA0_SERC1, SERC1_SO1F_AC97 | SERC1_SO1EN); snd_cs46xx_pokeBA0(chip, BA0_SERC2, SERC2_SI1F_AC97 | SERC1_SO1EN); snd_cs46xx_pokeBA0(chip, BA0_SERMC1, SERMC1_PTC_AC97 | SERMC1_MSPE); #ifdef CONFIG_SND_CS46XX_NEW_DSP snd_cs46xx_pokeBA0(chip, BA0_SERC7, SERC7_ASDI2EN); snd_cs46xx_pokeBA0(chip, BA0_SERC3, 0); snd_cs46xx_pokeBA0(chip, BA0_SERC4, 0); snd_cs46xx_pokeBA0(chip, BA0_SERC5, 0); snd_cs46xx_pokeBA0(chip, BA0_SERC6, 1); #endif mdelay(5); /* * Wait for the codec ready signal from the AC97 codec. */ timeout = 150; while (timeout-- > 0) { /* * Read the AC97 status register to see if we've seen a CODEC READY * signal from the AC97 codec. */ if (snd_cs46xx_peekBA0(chip, BA0_ACSTS) & ACSTS_CRDY) goto ok1; msleep(10); } snd_printk(KERN_ERR "create - never read codec ready from AC'97\n"); snd_printk(KERN_ERR "it is not probably bug, try to use CS4236 driver\n"); return -EIO; ok1: #ifdef CONFIG_SND_CS46XX_NEW_DSP { int count; for (count = 0; count < 150; count++) { /* First, we want to wait for a short time. */ udelay(25); if (snd_cs46xx_peekBA0(chip, BA0_ACSTS2) & ACSTS_CRDY) break; } /* * Make sure CODEC is READY. */ if (!(snd_cs46xx_peekBA0(chip, BA0_ACSTS2) & ACSTS_CRDY)) snd_printdd("cs46xx: never read card ready from secondary AC'97\n"); } #endif /* * Assert the vaid frame signal so that we can start sending commands * to the AC97 codec. */ snd_cs46xx_pokeBA0(chip, BA0_ACCTL, ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); #ifdef CONFIG_SND_CS46XX_NEW_DSP snd_cs46xx_pokeBA0(chip, BA0_ACCTL2, ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); #endif /* * Wait until we've sampled input slots 3 and 4 as valid, meaning that * the codec is pumping ADC data across the AC-link. */ timeout = 150; while (timeout-- > 0) { /* * Read the input slot valid register and see if input slots 3 and * 4 are valid yet. */ if ((snd_cs46xx_peekBA0(chip, BA0_ACISV) & (ACISV_ISV3 | ACISV_ISV4)) == (ACISV_ISV3 | ACISV_ISV4)) goto ok2; msleep(10); } #ifndef CONFIG_SND_CS46XX_NEW_DSP snd_printk(KERN_ERR "create - never read ISV3 & ISV4 from AC'97\n"); return -EIO; #else /* This may happen on a cold boot with a Terratec SiXPack 5.1. Reloading the driver may help, if there's other soundcards with the same problem I would like to know. (Benny) */ snd_printk(KERN_ERR "ERROR: snd-cs46xx: never read ISV3 & ISV4 from AC'97\n"); snd_printk(KERN_ERR " Try reloading the ALSA driver, if you find something\n"); snd_printk(KERN_ERR " broken or not working on your soundcard upon\n"); snd_printk(KERN_ERR " this message please report to alsa-devel@alsa-project.org\n"); return -EIO; #endif ok2: /* * Now, assert valid frame and the slot 3 and 4 valid bits. This will * commense the transfer of digital audio data to the AC97 codec. */ snd_cs46xx_pokeBA0(chip, BA0_ACOSV, ACOSV_SLV3 | ACOSV_SLV4); /* * Power down the DAC and ADC. We will power them up (if) when we need * them. */ /* snd_cs46xx_pokeBA0(chip, BA0_AC97_POWERDOWN, 0x300); */ /* * Turn off the Processor by turning off the software clock enable flag in * the clock control register. */ /* tmp = snd_cs46xx_peekBA0(chip, BA0_CLKCR1) & ~CLKCR1_SWCE; */ /* snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, tmp); */ return 0; } /* * start and load DSP */ static void cs46xx_enable_stream_irqs(struct snd_cs46xx *chip) { unsigned int tmp; snd_cs46xx_pokeBA0(chip, BA0_HICR, HICR_IEV | HICR_CHGM); tmp = snd_cs46xx_peek(chip, BA1_PFIE); tmp &= ~0x0000f03f; snd_cs46xx_poke(chip, BA1_PFIE, tmp); /* playback interrupt enable */ tmp = snd_cs46xx_peek(chip, BA1_CIE); tmp &= ~0x0000003f; tmp |= 0x00000001; snd_cs46xx_poke(chip, BA1_CIE, tmp); /* capture interrupt enable */ } int __devinit snd_cs46xx_start_dsp(struct snd_cs46xx *chip) { unsigned int tmp; /* * Reset the processor. */ snd_cs46xx_reset(chip); /* * Download the image to the processor. */ #ifdef CONFIG_SND_CS46XX_NEW_DSP #if 0 if (cs46xx_dsp_load_module(chip, &cwcemb80_module) < 0) { snd_printk(KERN_ERR "image download error\n"); return -EIO; } #endif if (cs46xx_dsp_load_module(chip, &cwc4630_module) < 0) { snd_printk(KERN_ERR "image download error [cwc4630]\n"); return -EIO; } if (cs46xx_dsp_load_module(chip, &cwcasync_module) < 0) { snd_printk(KERN_ERR "image download error [cwcasync]\n"); return -EIO; } if (cs46xx_dsp_load_module(chip, &cwcsnoop_module) < 0) { snd_printk(KERN_ERR "image download error [cwcsnoop]\n"); return -EIO; } if (cs46xx_dsp_load_module(chip, &cwcbinhack_module) < 0) { snd_printk(KERN_ERR "image download error [cwcbinhack]\n"); return -EIO; } if (cs46xx_dsp_load_module(chip, &cwcdma_module) < 0) { snd_printk(KERN_ERR "image download error [cwcdma]\n"); return -EIO; } if (cs46xx_dsp_scb_and_task_init(chip) < 0) return -EIO; #else /* old image */ if (snd_cs46xx_download_image(chip) < 0) { snd_printk(KERN_ERR "image download error\n"); return -EIO; } /* * Stop playback DMA. */ tmp = snd_cs46xx_peek(chip, BA1_PCTL); chip->play_ctl = tmp & 0xffff0000; snd_cs46xx_poke(chip, BA1_PCTL, tmp & 0x0000ffff); #endif /* * Stop capture DMA. */ tmp = snd_cs46xx_peek(chip, BA1_CCTL); chip->capt.ctl = tmp & 0x0000ffff; snd_cs46xx_poke(chip, BA1_CCTL, tmp & 0xffff0000); mdelay(5); snd_cs46xx_set_play_sample_rate(chip, 8000); snd_cs46xx_set_capture_sample_rate(chip, 8000); snd_cs46xx_proc_start(chip); cs46xx_enable_stream_irqs(chip); #ifndef CONFIG_SND_CS46XX_NEW_DSP /* set the attenuation to 0dB */ snd_cs46xx_poke(chip, BA1_PVOL, 0x80008000); snd_cs46xx_poke(chip, BA1_CVOL, 0x80008000); #endif return 0; } /* * AMP control - null AMP */ static void amp_none(struct snd_cs46xx *chip, int change) { } #ifdef CONFIG_SND_CS46XX_NEW_DSP static int voyetra_setup_eapd_slot(struct snd_cs46xx *chip) { u32 idx, valid_slots,tmp,powerdown = 0; u16 modem_power,pin_config,logic_type; snd_printdd ("cs46xx: cs46xx_setup_eapd_slot()+\n"); /* * See if the devices are powered down. If so, we must power them up first * or they will not respond. */ tmp = snd_cs46xx_peekBA0(chip, BA0_CLKCR1); if (!(tmp & CLKCR1_SWCE)) { snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, tmp | CLKCR1_SWCE); powerdown = 1; } /* * Clear PRA. The Bonzo chip will be used for GPIO not for modem * stuff. */ if(chip->nr_ac97_codecs != 2) { snd_printk (KERN_ERR "cs46xx: cs46xx_setup_eapd_slot() - no secondary codec configured\n"); return -EINVAL; } modem_power = snd_cs46xx_codec_read (chip, AC97_EXTENDED_MSTATUS, CS46XX_SECONDARY_CODEC_INDEX); modem_power &=0xFEFF; snd_cs46xx_codec_write(chip, AC97_EXTENDED_MSTATUS, modem_power, CS46XX_SECONDARY_CODEC_INDEX); /* * Set GPIO pin's 7 and 8 so that they are configured for output. */ pin_config = snd_cs46xx_codec_read (chip, AC97_GPIO_CFG, CS46XX_SECONDARY_CODEC_INDEX); pin_config &=0x27F; snd_cs46xx_codec_write(chip, AC97_GPIO_CFG, pin_config, CS46XX_SECONDARY_CODEC_INDEX); /* * Set GPIO pin's 7 and 8 so that they are compatible with CMOS logic. */ logic_type = snd_cs46xx_codec_read(chip, AC97_GPIO_POLARITY, CS46XX_SECONDARY_CODEC_INDEX); logic_type &=0x27F; snd_cs46xx_codec_write (chip, AC97_GPIO_POLARITY, logic_type, CS46XX_SECONDARY_CODEC_INDEX); valid_slots = snd_cs46xx_peekBA0(chip, BA0_ACOSV); valid_slots |= 0x200; snd_cs46xx_pokeBA0(chip, BA0_ACOSV, valid_slots); if ( cs46xx_wait_for_fifo(chip,1) ) { snd_printdd("FIFO is busy\n"); return -EINVAL; } /* * Fill slots 12 with the correct value for the GPIO pins. */ for(idx = 0x90; idx <= 0x9F; idx++) { /* * Initialize the fifo so that bits 7 and 8 are on. * * Remember that the GPIO pins in bonzo are shifted by 4 bits to * the left. 0x1800 corresponds to bits 7 and 8. */ snd_cs46xx_pokeBA0(chip, BA0_SERBWP, 0x1800); /* * Wait for command to complete */ if ( cs46xx_wait_for_fifo(chip,200) ) { snd_printdd("failed waiting for FIFO at addr (%02X)\n",idx); return -EINVAL; } /* * Write the serial port FIFO index. */ snd_cs46xx_pokeBA0(chip, BA0_SERBAD, idx); /* * Tell the serial port to load the new value into the FIFO location. */ snd_cs46xx_pokeBA0(chip, BA0_SERBCM, SERBCM_WRC); } /* wait for last command to complete */ cs46xx_wait_for_fifo(chip,200); /* * Now, if we powered up the devices, then power them back down again. * This is kinda ugly, but should never happen. */ if (powerdown) snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, tmp); return 0; } #endif /* * Crystal EAPD mode */ static void amp_voyetra(struct snd_cs46xx *chip, int change) { /* Manage the EAPD bit on the Crystal 4297 and the Analog AD1885 */ #ifdef CONFIG_SND_CS46XX_NEW_DSP int old = chip->amplifier; #endif int oval, val; chip->amplifier += change; oval = snd_cs46xx_codec_read(chip, AC97_POWERDOWN, CS46XX_PRIMARY_CODEC_INDEX); val = oval; if (chip->amplifier) { /* Turn the EAPD amp on */ val |= 0x8000; } else { /* Turn the EAPD amp off */ val &= ~0x8000; } if (val != oval) { snd_cs46xx_codec_write(chip, AC97_POWERDOWN, val, CS46XX_PRIMARY_CODEC_INDEX); if (chip->eapd_switch) snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->eapd_switch->id); } #ifdef CONFIG_SND_CS46XX_NEW_DSP if (chip->amplifier && !old) { voyetra_setup_eapd_slot(chip); } #endif } static void hercules_init(struct snd_cs46xx *chip) { /* default: AMP off, and SPDIF input optical */ snd_cs46xx_pokeBA0(chip, BA0_EGPIODR, EGPIODR_GPOE0); snd_cs46xx_pokeBA0(chip, BA0_EGPIOPTR, EGPIODR_GPOE0); } /* * Game Theatre XP card - EGPIO[2] is used to enable the external amp. */ static void amp_hercules(struct snd_cs46xx *chip, int change) { int old = chip->amplifier; int val1 = snd_cs46xx_peekBA0(chip, BA0_EGPIODR); int val2 = snd_cs46xx_peekBA0(chip, BA0_EGPIOPTR); chip->amplifier += change; if (chip->amplifier && !old) { snd_printdd ("Hercules amplifier ON\n"); snd_cs46xx_pokeBA0(chip, BA0_EGPIODR, EGPIODR_GPOE2 | val1); /* enable EGPIO2 output */ snd_cs46xx_pokeBA0(chip, BA0_EGPIOPTR, EGPIOPTR_GPPT2 | val2); /* open-drain on output */ } else if (old && !chip->amplifier) { snd_printdd ("Hercules amplifier OFF\n"); snd_cs46xx_pokeBA0(chip, BA0_EGPIODR, val1 & ~EGPIODR_GPOE2); /* disable */ snd_cs46xx_pokeBA0(chip, BA0_EGPIOPTR, val2 & ~EGPIOPTR_GPPT2); /* disable */ } } static void voyetra_mixer_init (struct snd_cs46xx *chip) { snd_printdd ("initializing Voyetra mixer\n"); /* Enable SPDIF out */ snd_cs46xx_pokeBA0(chip, BA0_EGPIODR, EGPIODR_GPOE0); snd_cs46xx_pokeBA0(chip, BA0_EGPIOPTR, EGPIODR_GPOE0); } static void hercules_mixer_init (struct snd_cs46xx *chip) { #ifdef CONFIG_SND_CS46XX_NEW_DSP unsigned int idx; int err; struct snd_card *card = chip->card; #endif /* set EGPIO to default */ hercules_init(chip); snd_printdd ("initializing Hercules mixer\n"); #ifdef CONFIG_SND_CS46XX_NEW_DSP if (chip->in_suspend) return; for (idx = 0 ; idx < ARRAY_SIZE(snd_hercules_controls); idx++) { struct snd_kcontrol *kctl; kctl = snd_ctl_new1(&snd_hercules_controls[idx], chip); if ((err = snd_ctl_add(card, kctl)) < 0) { printk (KERN_ERR "cs46xx: failed to initialize Hercules mixer (%d)\n",err); break; } } #endif } #if 0 /* * Untested */ static void amp_voyetra_4294(struct snd_cs46xx *chip, int change) { chip->amplifier += change; if (chip->amplifier) { /* Switch the GPIO pins 7 and 8 to open drain */ snd_cs46xx_codec_write(chip, 0x4C, snd_cs46xx_codec_read(chip, 0x4C) & 0xFE7F); snd_cs46xx_codec_write(chip, 0x4E, snd_cs46xx_codec_read(chip, 0x4E) | 0x0180); /* Now wake the AMP (this might be backwards) */ snd_cs46xx_codec_write(chip, 0x54, snd_cs46xx_codec_read(chip, 0x54) & ~0x0180); } else { snd_cs46xx_codec_write(chip, 0x54, snd_cs46xx_codec_read(chip, 0x54) | 0x0180); } } #endif /* * Handle the CLKRUN on a thinkpad. We must disable CLKRUN support * whenever we need to beat on the chip. * * The original idea and code for this hack comes from David Kaiser at * Linuxcare. Perhaps one day Crystal will document their chips well * enough to make them useful. */ static void clkrun_hack(struct snd_cs46xx *chip, int change) { u16 control, nval; if (!chip->acpi_port) return; chip->amplifier += change; /* Read ACPI port */ nval = control = inw(chip->acpi_port + 0x10); /* Flip CLKRUN off while running */ if (! chip->amplifier) nval |= 0x2000; else nval &= ~0x2000; if (nval != control) outw(nval, chip->acpi_port + 0x10); } /* * detect intel piix4 */ static void clkrun_init(struct snd_cs46xx *chip) { struct pci_dev *pdev; u8 pp; chip->acpi_port = 0; pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, NULL); if (pdev == NULL) return; /* Not a thinkpad thats for sure */ /* Find the control port */ pci_read_config_byte(pdev, 0x41, &pp); chip->acpi_port = pp << 8; pci_dev_put(pdev); } /* * Card subid table */ struct cs_card_type { u16 vendor; u16 id; char *name; void (*init)(struct snd_cs46xx *); void (*amp)(struct snd_cs46xx *, int); void (*active)(struct snd_cs46xx *, int); void (*mixer_init)(struct snd_cs46xx *); }; static struct cs_card_type __devinitdata cards[] = { { .vendor = 0x1489, .id = 0x7001, .name = "Genius Soundmaker 128 value", /* nothing special */ }, { .vendor = 0x5053, .id = 0x3357, .name = "Voyetra", .amp = amp_voyetra, .mixer_init = voyetra_mixer_init, }, { .vendor = 0x1071, .id = 0x6003, .name = "Mitac MI6020/21", .amp = amp_voyetra, }, /* Hercules Game Theatre XP */ { .vendor = 0x14af, /* Guillemot Corporation */ .id = 0x0050, .name = "Hercules Game Theatre XP", .amp = amp_hercules, .mixer_init = hercules_mixer_init, }, { .vendor = 0x1681, .id = 0x0050, .name = "Hercules Game Theatre XP", .amp = amp_hercules, .mixer_init = hercules_mixer_init, }, { .vendor = 0x1681, .id = 0x0051, .name = "Hercules Game Theatre XP", .amp = amp_hercules, .mixer_init = hercules_mixer_init, }, { .vendor = 0x1681, .id = 0x0052, .name = "Hercules Game Theatre XP", .amp = amp_hercules, .mixer_init = hercules_mixer_init, }, { .vendor = 0x1681, .id = 0x0053, .name = "Hercules Game Theatre XP", .amp = amp_hercules, .mixer_init = hercules_mixer_init, }, { .vendor = 0x1681, .id = 0x0054, .name = "Hercules Game Theatre XP", .amp = amp_hercules, .mixer_init = hercules_mixer_init, }, /* Herculess Fortissimo */ { .vendor = 0x1681, .id = 0xa010, .name = "Hercules Gamesurround Fortissimo II", }, { .vendor = 0x1681, .id = 0xa011, .name = "Hercules Gamesurround Fortissimo III 7.1", }, /* Teratec */ { .vendor = 0x153b, .id = 0x112e, .name = "Terratec DMX XFire 1024", }, { .vendor = 0x153b, .id = 0x1136, .name = "Terratec SiXPack 5.1", }, /* Not sure if the 570 needs the clkrun hack */ { .vendor = PCI_VENDOR_ID_IBM, .id = 0x0132, .name = "Thinkpad 570", .init = clkrun_init, .active = clkrun_hack, }, { .vendor = PCI_VENDOR_ID_IBM, .id = 0x0153, .name = "Thinkpad 600X/A20/T20", .init = clkrun_init, .active = clkrun_hack, }, { .vendor = PCI_VENDOR_ID_IBM, .id = 0x1010, .name = "Thinkpad 600E (unsupported)", }, {} /* terminator */ }; /* * APM support */ #ifdef CONFIG_PM static unsigned int saved_regs[] = { BA0_ACOSV, /*BA0_ASER_FADDR,*/ BA0_ASER_MASTER, BA1_PVOL, BA1_CVOL, }; int snd_cs46xx_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct snd_cs46xx *chip = card->private_data; int i, amp_saved; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); chip->in_suspend = 1; snd_pcm_suspend_all(chip->pcm); // chip->ac97_powerdown = snd_cs46xx_codec_read(chip, AC97_POWER_CONTROL); // chip->ac97_general_purpose = snd_cs46xx_codec_read(chip, BA0_AC97_GENERAL_PURPOSE); snd_ac97_suspend(chip->ac97[CS46XX_PRIMARY_CODEC_INDEX]); snd_ac97_suspend(chip->ac97[CS46XX_SECONDARY_CODEC_INDEX]); /* save some registers */ for (i = 0; i < ARRAY_SIZE(saved_regs); i++) chip->saved_regs[i] = snd_cs46xx_peekBA0(chip, saved_regs[i]); amp_saved = chip->amplifier; /* turn off amp */ chip->amplifier_ctrl(chip, -chip->amplifier); snd_cs46xx_hw_stop(chip); /* disable CLKRUN */ chip->active_ctrl(chip, -chip->amplifier); chip->amplifier = amp_saved; /* restore the status */ pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } int snd_cs46xx_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct snd_cs46xx *chip = card->private_data; int amp_saved; #ifdef CONFIG_SND_CS46XX_NEW_DSP int i; #endif unsigned int tmp; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "cs46xx: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); amp_saved = chip->amplifier; chip->amplifier = 0; chip->active_ctrl(chip, 1); /* force to on */ snd_cs46xx_chip_init(chip); snd_cs46xx_reset(chip); #ifdef CONFIG_SND_CS46XX_NEW_DSP cs46xx_dsp_resume(chip); /* restore some registers */ for (i = 0; i < ARRAY_SIZE(saved_regs); i++) snd_cs46xx_pokeBA0(chip, saved_regs[i], chip->saved_regs[i]); #else snd_cs46xx_download_image(chip); #endif #if 0 snd_cs46xx_codec_write(chip, BA0_AC97_GENERAL_PURPOSE, chip->ac97_general_purpose); snd_cs46xx_codec_write(chip, AC97_POWER_CONTROL, chip->ac97_powerdown); mdelay(10); snd_cs46xx_codec_write(chip, BA0_AC97_POWERDOWN, chip->ac97_powerdown); mdelay(5); #endif snd_ac97_resume(chip->ac97[CS46XX_PRIMARY_CODEC_INDEX]); snd_ac97_resume(chip->ac97[CS46XX_SECONDARY_CODEC_INDEX]); /* * Stop capture DMA. */ tmp = snd_cs46xx_peek(chip, BA1_CCTL); chip->capt.ctl = tmp & 0x0000ffff; snd_cs46xx_poke(chip, BA1_CCTL, tmp & 0xffff0000); mdelay(5); /* reset playback/capture */ snd_cs46xx_set_play_sample_rate(chip, 8000); snd_cs46xx_set_capture_sample_rate(chip, 8000); snd_cs46xx_proc_start(chip); cs46xx_enable_stream_irqs(chip); if (amp_saved) chip->amplifier_ctrl(chip, 1); /* turn amp on */ else chip->active_ctrl(chip, -1); /* disable CLKRUN */ chip->amplifier = amp_saved; chip->in_suspend = 0; snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif /* CONFIG_PM */ /* */ int __devinit snd_cs46xx_create(struct snd_card *card, struct pci_dev * pci, int external_amp, int thinkpad, struct snd_cs46xx ** rchip) { struct snd_cs46xx *chip; int err, idx; struct snd_cs46xx_region *region; struct cs_card_type *cp; u16 ss_card, ss_vendor; static struct snd_device_ops ops = { .dev_free = snd_cs46xx_dev_free, }; *rchip = NULL; /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) return err; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&chip->reg_lock); #ifdef CONFIG_SND_CS46XX_NEW_DSP mutex_init(&chip->spos_mutex); #endif chip->card = card; chip->pci = pci; chip->irq = -1; chip->ba0_addr = pci_resource_start(pci, 0); chip->ba1_addr = pci_resource_start(pci, 1); if (chip->ba0_addr == 0 || chip->ba0_addr == (unsigned long)~0 || chip->ba1_addr == 0 || chip->ba1_addr == (unsigned long)~0) { snd_printk(KERN_ERR "wrong address(es) - ba0 = 0x%lx, ba1 = 0x%lx\n", chip->ba0_addr, chip->ba1_addr); snd_cs46xx_free(chip); return -ENOMEM; } region = &chip->region.name.ba0; strcpy(region->name, "CS46xx_BA0"); region->base = chip->ba0_addr; region->size = CS46XX_BA0_SIZE; region = &chip->region.name.data0; strcpy(region->name, "CS46xx_BA1_data0"); region->base = chip->ba1_addr + BA1_SP_DMEM0; region->size = CS46XX_BA1_DATA0_SIZE; region = &chip->region.name.data1; strcpy(region->name, "CS46xx_BA1_data1"); region->base = chip->ba1_addr + BA1_SP_DMEM1; region->size = CS46XX_BA1_DATA1_SIZE; region = &chip->region.name.pmem; strcpy(region->name, "CS46xx_BA1_pmem"); region->base = chip->ba1_addr + BA1_SP_PMEM; region->size = CS46XX_BA1_PRG_SIZE; region = &chip->region.name.reg; strcpy(region->name, "CS46xx_BA1_reg"); region->base = chip->ba1_addr + BA1_SP_REG; region->size = CS46XX_BA1_REG_SIZE; /* set up amp and clkrun hack */ pci_read_config_word(pci, PCI_SUBSYSTEM_VENDOR_ID, &ss_vendor); pci_read_config_word(pci, PCI_SUBSYSTEM_ID, &ss_card); for (cp = &cards[0]; cp->name; cp++) { if (cp->vendor == ss_vendor && cp->id == ss_card) { snd_printdd ("hack for %s enabled\n", cp->name); chip->amplifier_ctrl = cp->amp; chip->active_ctrl = cp->active; chip->mixer_init = cp->mixer_init; if (cp->init) cp->init(chip); break; } } if (external_amp) { snd_printk(KERN_INFO "Crystal EAPD support forced on.\n"); chip->amplifier_ctrl = amp_voyetra; } if (thinkpad) { snd_printk(KERN_INFO "Activating CLKRUN hack for Thinkpad.\n"); chip->active_ctrl = clkrun_hack; clkrun_init(chip); } if (chip->amplifier_ctrl == NULL) chip->amplifier_ctrl = amp_none; if (chip->active_ctrl == NULL) chip->active_ctrl = amp_none; chip->active_ctrl(chip, 1); /* enable CLKRUN */ pci_set_master(pci); for (idx = 0; idx < 5; idx++) { region = &chip->region.idx[idx]; if ((region->resource = request_mem_region(region->base, region->size, region->name)) == NULL) { snd_printk(KERN_ERR "unable to request memory region 0x%lx-0x%lx\n", region->base, region->base + region->size - 1); snd_cs46xx_free(chip); return -EBUSY; } region->remap_addr = ioremap_nocache(region->base, region->size); if (region->remap_addr == NULL) { snd_printk(KERN_ERR "%s ioremap problem\n", region->name); snd_cs46xx_free(chip); return -ENOMEM; } } if (request_irq(pci->irq, snd_cs46xx_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_cs46xx_free(chip); return -EBUSY; } chip->irq = pci->irq; #ifdef CONFIG_SND_CS46XX_NEW_DSP chip->dsp_spos_instance = cs46xx_dsp_spos_create(chip); if (chip->dsp_spos_instance == NULL) { snd_cs46xx_free(chip); return -ENOMEM; } #endif err = snd_cs46xx_chip_init(chip); if (err < 0) { snd_cs46xx_free(chip); return err; } if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_cs46xx_free(chip); return err; } snd_cs46xx_proc_init(card, chip); #ifdef CONFIG_PM chip->saved_regs = kmalloc(sizeof(*chip->saved_regs) * ARRAY_SIZE(saved_regs), GFP_KERNEL); if (!chip->saved_regs) { snd_cs46xx_free(chip); return -ENOMEM; } #endif chip->active_ctrl(chip, -1); /* disable CLKRUN */ snd_card_set_dev(card, &pci->dev); *rchip = chip; return 0; }
gpl-2.0
CyanogenMod/android_kernel_samsung_exynos5410
sound/i2c/other/ak4xxx-adda.c
6638
27244
/* * ALSA driver for AK4524 / AK4528 / AK4529 / AK4355 / AK4358 / AK4381 * AD and DA converters * * Copyright (c) 2000-2004 Jaroslav Kysela <perex@perex.cz>, * Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/module.h> #include <sound/core.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/ak4xxx-adda.h> #include <sound/info.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("Routines for control of AK452x / AK43xx AD/DA converters"); MODULE_LICENSE("GPL"); /* write the given register and save the data to the cache */ void snd_akm4xxx_write(struct snd_akm4xxx *ak, int chip, unsigned char reg, unsigned char val) { ak->ops.lock(ak, chip); ak->ops.write(ak, chip, reg, val); /* save the data */ snd_akm4xxx_set(ak, chip, reg, val); ak->ops.unlock(ak, chip); } EXPORT_SYMBOL(snd_akm4xxx_write); /* reset procedure for AK4524 and AK4528 */ static void ak4524_reset(struct snd_akm4xxx *ak, int state) { unsigned int chip; unsigned char reg; for (chip = 0; chip < ak->num_dacs/2; chip++) { snd_akm4xxx_write(ak, chip, 0x01, state ? 0x00 : 0x03); if (state) continue; /* DAC volumes */ for (reg = 0x04; reg < ak->total_regs; reg++) snd_akm4xxx_write(ak, chip, reg, snd_akm4xxx_get(ak, chip, reg)); } } /* reset procedure for AK4355 and AK4358 */ static void ak435X_reset(struct snd_akm4xxx *ak, int state) { unsigned char reg; if (state) { snd_akm4xxx_write(ak, 0, 0x01, 0x02); /* reset and soft-mute */ return; } for (reg = 0x00; reg < ak->total_regs; reg++) if (reg != 0x01) snd_akm4xxx_write(ak, 0, reg, snd_akm4xxx_get(ak, 0, reg)); snd_akm4xxx_write(ak, 0, 0x01, 0x01); /* un-reset, unmute */ } /* reset procedure for AK4381 */ static void ak4381_reset(struct snd_akm4xxx *ak, int state) { unsigned int chip; unsigned char reg; for (chip = 0; chip < ak->num_dacs/2; chip++) { snd_akm4xxx_write(ak, chip, 0x00, state ? 0x0c : 0x0f); if (state) continue; for (reg = 0x01; reg < ak->total_regs; reg++) snd_akm4xxx_write(ak, chip, reg, snd_akm4xxx_get(ak, chip, reg)); } } /* * reset the AKM codecs * @state: 1 = reset codec, 0 = restore the registers * * assert the reset operation and restores the register values to the chips. */ void snd_akm4xxx_reset(struct snd_akm4xxx *ak, int state) { switch (ak->type) { case SND_AK4524: case SND_AK4528: case SND_AK4620: ak4524_reset(ak, state); break; case SND_AK4529: /* FIXME: needed for ak4529? */ break; case SND_AK4355: ak435X_reset(ak, state); break; case SND_AK4358: ak435X_reset(ak, state); break; case SND_AK4381: ak4381_reset(ak, state); break; default: break; } } EXPORT_SYMBOL(snd_akm4xxx_reset); /* * Volume conversion table for non-linear volumes * from -63.5dB (mute) to 0dB step 0.5dB * * Used for AK4524/AK4620 input/ouput attenuation, AK4528, and * AK5365 input attenuation */ static const unsigned char vol_cvt_datt[128] = { 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x06, 0x06, 0x06, 0x07, 0x07, 0x08, 0x08, 0x08, 0x09, 0x0a, 0x0a, 0x0b, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x0f, 0x10, 0x10, 0x11, 0x12, 0x12, 0x13, 0x13, 0x14, 0x15, 0x16, 0x17, 0x17, 0x18, 0x19, 0x1a, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x23, 0x24, 0x25, 0x26, 0x28, 0x29, 0x2a, 0x2b, 0x2d, 0x2e, 0x30, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x37, 0x38, 0x39, 0x3b, 0x3c, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x46, 0x47, 0x48, 0x4a, 0x4b, 0x4d, 0x4e, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x58, 0x59, 0x5b, 0x5c, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x64, 0x65, 0x66, 0x67, 0x69, 0x6a, 0x6c, 0x6d, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x75, 0x76, 0x77, 0x79, 0x7a, 0x7c, 0x7d, 0x7f, }; /* * dB tables */ static const DECLARE_TLV_DB_SCALE(db_scale_vol_datt, -6350, 50, 1); static const DECLARE_TLV_DB_SCALE(db_scale_8bit, -12750, 50, 1); static const DECLARE_TLV_DB_SCALE(db_scale_7bit, -6350, 50, 1); static const DECLARE_TLV_DB_LINEAR(db_scale_linear, TLV_DB_GAIN_MUTE, 0); /* * initialize all the ak4xxx chips */ void snd_akm4xxx_init(struct snd_akm4xxx *ak) { static const unsigned char inits_ak4524[] = { 0x00, 0x07, /* 0: all power up */ 0x01, 0x00, /* 1: ADC/DAC reset */ 0x02, 0x60, /* 2: 24bit I2S */ 0x03, 0x19, /* 3: deemphasis off */ 0x01, 0x03, /* 1: ADC/DAC enable */ 0x04, 0x00, /* 4: ADC left muted */ 0x05, 0x00, /* 5: ADC right muted */ 0x06, 0x00, /* 6: DAC left muted */ 0x07, 0x00, /* 7: DAC right muted */ 0xff, 0xff }; static const unsigned char inits_ak4528[] = { 0x00, 0x07, /* 0: all power up */ 0x01, 0x00, /* 1: ADC/DAC reset */ 0x02, 0x60, /* 2: 24bit I2S */ 0x03, 0x0d, /* 3: deemphasis off, turn LR highpass filters on */ 0x01, 0x03, /* 1: ADC/DAC enable */ 0x04, 0x00, /* 4: ADC left muted */ 0x05, 0x00, /* 5: ADC right muted */ 0xff, 0xff }; static const unsigned char inits_ak4529[] = { 0x09, 0x01, /* 9: ATS=0, RSTN=1 */ 0x0a, 0x3f, /* A: all power up, no zero/overflow detection */ 0x00, 0x0c, /* 0: TDM=0, 24bit I2S, SMUTE=0 */ 0x01, 0x00, /* 1: ACKS=0, ADC, loop off */ 0x02, 0xff, /* 2: LOUT1 muted */ 0x03, 0xff, /* 3: ROUT1 muted */ 0x04, 0xff, /* 4: LOUT2 muted */ 0x05, 0xff, /* 5: ROUT2 muted */ 0x06, 0xff, /* 6: LOUT3 muted */ 0x07, 0xff, /* 7: ROUT3 muted */ 0x0b, 0xff, /* B: LOUT4 muted */ 0x0c, 0xff, /* C: ROUT4 muted */ 0x08, 0x55, /* 8: deemphasis all off */ 0xff, 0xff }; static const unsigned char inits_ak4355[] = { 0x01, 0x02, /* 1: reset and soft-mute */ 0x00, 0x06, /* 0: mode3(i2s), disable auto-clock detect, * disable DZF, sharp roll-off, RSTN#=0 */ 0x02, 0x0e, /* 2: DA's power up, normal speed, RSTN#=0 */ // 0x02, 0x2e, /* quad speed */ 0x03, 0x01, /* 3: de-emphasis off */ 0x04, 0x00, /* 4: LOUT1 volume muted */ 0x05, 0x00, /* 5: ROUT1 volume muted */ 0x06, 0x00, /* 6: LOUT2 volume muted */ 0x07, 0x00, /* 7: ROUT2 volume muted */ 0x08, 0x00, /* 8: LOUT3 volume muted */ 0x09, 0x00, /* 9: ROUT3 volume muted */ 0x0a, 0x00, /* a: DATT speed=0, ignore DZF */ 0x01, 0x01, /* 1: un-reset, unmute */ 0xff, 0xff }; static const unsigned char inits_ak4358[] = { 0x01, 0x02, /* 1: reset and soft-mute */ 0x00, 0x06, /* 0: mode3(i2s), disable auto-clock detect, * disable DZF, sharp roll-off, RSTN#=0 */ 0x02, 0x4e, /* 2: DA's power up, normal speed, RSTN#=0 */ /* 0x02, 0x6e,*/ /* quad speed */ 0x03, 0x01, /* 3: de-emphasis off */ 0x04, 0x00, /* 4: LOUT1 volume muted */ 0x05, 0x00, /* 5: ROUT1 volume muted */ 0x06, 0x00, /* 6: LOUT2 volume muted */ 0x07, 0x00, /* 7: ROUT2 volume muted */ 0x08, 0x00, /* 8: LOUT3 volume muted */ 0x09, 0x00, /* 9: ROUT3 volume muted */ 0x0b, 0x00, /* b: LOUT4 volume muted */ 0x0c, 0x00, /* c: ROUT4 volume muted */ 0x0a, 0x00, /* a: DATT speed=0, ignore DZF */ 0x01, 0x01, /* 1: un-reset, unmute */ 0xff, 0xff }; static const unsigned char inits_ak4381[] = { 0x00, 0x0c, /* 0: mode3(i2s), disable auto-clock detect */ 0x01, 0x02, /* 1: de-emphasis off, normal speed, * sharp roll-off, DZF off */ // 0x01, 0x12, /* quad speed */ 0x02, 0x00, /* 2: DZF disabled */ 0x03, 0x00, /* 3: LATT 0 */ 0x04, 0x00, /* 4: RATT 0 */ 0x00, 0x0f, /* 0: power-up, un-reset */ 0xff, 0xff }; static const unsigned char inits_ak4620[] = { 0x00, 0x07, /* 0: normal */ 0x01, 0x00, /* 0: reset */ 0x01, 0x02, /* 1: RSTAD */ 0x01, 0x03, /* 1: RSTDA */ 0x01, 0x0f, /* 1: normal */ 0x02, 0x60, /* 2: 24bit I2S */ 0x03, 0x01, /* 3: deemphasis off */ 0x04, 0x00, /* 4: LIN muted */ 0x05, 0x00, /* 5: RIN muted */ 0x06, 0x00, /* 6: LOUT muted */ 0x07, 0x00, /* 7: ROUT muted */ 0xff, 0xff }; int chip; const unsigned char *ptr, *inits; unsigned char reg, data; memset(ak->images, 0, sizeof(ak->images)); memset(ak->volumes, 0, sizeof(ak->volumes)); switch (ak->type) { case SND_AK4524: inits = inits_ak4524; ak->num_chips = ak->num_dacs / 2; ak->name = "ak4524"; ak->total_regs = 0x08; break; case SND_AK4528: inits = inits_ak4528; ak->num_chips = ak->num_dacs / 2; ak->name = "ak4528"; ak->total_regs = 0x06; break; case SND_AK4529: inits = inits_ak4529; ak->num_chips = 1; ak->name = "ak4529"; ak->total_regs = 0x0d; break; case SND_AK4355: inits = inits_ak4355; ak->num_chips = 1; ak->name = "ak4355"; ak->total_regs = 0x0b; break; case SND_AK4358: inits = inits_ak4358; ak->num_chips = 1; ak->name = "ak4358"; ak->total_regs = 0x10; break; case SND_AK4381: inits = inits_ak4381; ak->num_chips = ak->num_dacs / 2; ak->name = "ak4381"; ak->total_regs = 0x05; break; case SND_AK5365: /* FIXME: any init sequence? */ ak->num_chips = 1; ak->name = "ak5365"; ak->total_regs = 0x08; return; case SND_AK4620: inits = inits_ak4620; ak->num_chips = ak->num_dacs / 2; ak->name = "ak4620"; ak->total_regs = 0x08; break; default: snd_BUG(); return; } for (chip = 0; chip < ak->num_chips; chip++) { ptr = inits; while (*ptr != 0xff) { reg = *ptr++; data = *ptr++; snd_akm4xxx_write(ak, chip, reg, data); udelay(10); } } } EXPORT_SYMBOL(snd_akm4xxx_init); /* * Mixer callbacks */ #define AK_IPGA (1<<20) /* including IPGA */ #define AK_VOL_CVT (1<<21) /* need dB conversion */ #define AK_NEEDSMSB (1<<22) /* need MSB update bit */ #define AK_INVERT (1<<23) /* data is inverted */ #define AK_GET_CHIP(val) (((val) >> 8) & 0xff) #define AK_GET_ADDR(val) ((val) & 0xff) #define AK_GET_SHIFT(val) (((val) >> 16) & 0x0f) #define AK_GET_VOL_CVT(val) (((val) >> 21) & 1) #define AK_GET_IPGA(val) (((val) >> 20) & 1) #define AK_GET_NEEDSMSB(val) (((val) >> 22) & 1) #define AK_GET_INVERT(val) (((val) >> 23) & 1) #define AK_GET_MASK(val) (((val) >> 24) & 0xff) #define AK_COMPOSE(chip,addr,shift,mask) \ (((chip) << 8) | (addr) | ((shift) << 16) | ((mask) << 24)) static int snd_akm4xxx_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { unsigned int mask = AK_GET_MASK(kcontrol->private_value); uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_akm4xxx_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_akm4xxx *ak = snd_kcontrol_chip(kcontrol); int chip = AK_GET_CHIP(kcontrol->private_value); int addr = AK_GET_ADDR(kcontrol->private_value); ucontrol->value.integer.value[0] = snd_akm4xxx_get_vol(ak, chip, addr); return 0; } static int put_ak_reg(struct snd_kcontrol *kcontrol, int addr, unsigned char nval) { struct snd_akm4xxx *ak = snd_kcontrol_chip(kcontrol); unsigned int mask = AK_GET_MASK(kcontrol->private_value); int chip = AK_GET_CHIP(kcontrol->private_value); if (snd_akm4xxx_get_vol(ak, chip, addr) == nval) return 0; snd_akm4xxx_set_vol(ak, chip, addr, nval); if (AK_GET_VOL_CVT(kcontrol->private_value) && nval < 128) nval = vol_cvt_datt[nval]; if (AK_GET_IPGA(kcontrol->private_value) && nval >= 128) nval++; /* need to correct + 1 since both 127 and 128 are 0dB */ if (AK_GET_INVERT(kcontrol->private_value)) nval = mask - nval; if (AK_GET_NEEDSMSB(kcontrol->private_value)) nval |= 0x80; /* printk(KERN_DEBUG "DEBUG - AK writing reg: chip %x addr %x, nval %x\n", chip, addr, nval); */ snd_akm4xxx_write(ak, chip, addr, nval); return 1; } static int snd_akm4xxx_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { unsigned int mask = AK_GET_MASK(kcontrol->private_value); unsigned int val = ucontrol->value.integer.value[0]; if (val > mask) return -EINVAL; return put_ak_reg(kcontrol, AK_GET_ADDR(kcontrol->private_value), val); } static int snd_akm4xxx_stereo_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { unsigned int mask = AK_GET_MASK(kcontrol->private_value); uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_akm4xxx_stereo_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_akm4xxx *ak = snd_kcontrol_chip(kcontrol); int chip = AK_GET_CHIP(kcontrol->private_value); int addr = AK_GET_ADDR(kcontrol->private_value); ucontrol->value.integer.value[0] = snd_akm4xxx_get_vol(ak, chip, addr); ucontrol->value.integer.value[1] = snd_akm4xxx_get_vol(ak, chip, addr+1); return 0; } static int snd_akm4xxx_stereo_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int addr = AK_GET_ADDR(kcontrol->private_value); unsigned int mask = AK_GET_MASK(kcontrol->private_value); unsigned int val[2]; int change; val[0] = ucontrol->value.integer.value[0]; val[1] = ucontrol->value.integer.value[1]; if (val[0] > mask || val[1] > mask) return -EINVAL; change = put_ak_reg(kcontrol, addr, val[0]); change |= put_ak_reg(kcontrol, addr + 1, val[1]); return change; } static int snd_akm4xxx_deemphasis_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[4] = { "44.1kHz", "Off", "48kHz", "32kHz", }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 4; if (uinfo->value.enumerated.item >= 4) uinfo->value.enumerated.item = 3; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_akm4xxx_deemphasis_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_akm4xxx *ak = snd_kcontrol_chip(kcontrol); int chip = AK_GET_CHIP(kcontrol->private_value); int addr = AK_GET_ADDR(kcontrol->private_value); int shift = AK_GET_SHIFT(kcontrol->private_value); ucontrol->value.enumerated.item[0] = (snd_akm4xxx_get(ak, chip, addr) >> shift) & 3; return 0; } static int snd_akm4xxx_deemphasis_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_akm4xxx *ak = snd_kcontrol_chip(kcontrol); int chip = AK_GET_CHIP(kcontrol->private_value); int addr = AK_GET_ADDR(kcontrol->private_value); int shift = AK_GET_SHIFT(kcontrol->private_value); unsigned char nval = ucontrol->value.enumerated.item[0] & 3; int change; nval = (nval << shift) | (snd_akm4xxx_get(ak, chip, addr) & ~(3 << shift)); change = snd_akm4xxx_get(ak, chip, addr) != nval; if (change) snd_akm4xxx_write(ak, chip, addr, nval); return change; } #define ak4xxx_switch_info snd_ctl_boolean_mono_info static int ak4xxx_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_akm4xxx *ak = snd_kcontrol_chip(kcontrol); int chip = AK_GET_CHIP(kcontrol->private_value); int addr = AK_GET_ADDR(kcontrol->private_value); int shift = AK_GET_SHIFT(kcontrol->private_value); int invert = AK_GET_INVERT(kcontrol->private_value); /* we observe the (1<<shift) bit only */ unsigned char val = snd_akm4xxx_get(ak, chip, addr) & (1<<shift); if (invert) val = ! val; ucontrol->value.integer.value[0] = (val & (1<<shift)) != 0; return 0; } static int ak4xxx_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_akm4xxx *ak = snd_kcontrol_chip(kcontrol); int chip = AK_GET_CHIP(kcontrol->private_value); int addr = AK_GET_ADDR(kcontrol->private_value); int shift = AK_GET_SHIFT(kcontrol->private_value); int invert = AK_GET_INVERT(kcontrol->private_value); long flag = ucontrol->value.integer.value[0]; unsigned char val, oval; int change; if (invert) flag = ! flag; oval = snd_akm4xxx_get(ak, chip, addr); if (flag) val = oval | (1<<shift); else val = oval & ~(1<<shift); change = (oval != val); if (change) snd_akm4xxx_write(ak, chip, addr, val); return change; } #define AK5365_NUM_INPUTS 5 static int ak4xxx_capture_num_inputs(struct snd_akm4xxx *ak, int mixer_ch) { int num_names; const char **input_names; input_names = ak->adc_info[mixer_ch].input_names; num_names = 0; while (num_names < AK5365_NUM_INPUTS && input_names[num_names]) ++num_names; return num_names; } static int ak4xxx_capture_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_akm4xxx *ak = snd_kcontrol_chip(kcontrol); int mixer_ch = AK_GET_SHIFT(kcontrol->private_value); const char **input_names; int num_names, idx; num_names = ak4xxx_capture_num_inputs(ak, mixer_ch); if (!num_names) return -EINVAL; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = num_names; idx = uinfo->value.enumerated.item; if (idx >= num_names) return -EINVAL; input_names = ak->adc_info[mixer_ch].input_names; strncpy(uinfo->value.enumerated.name, input_names[idx], sizeof(uinfo->value.enumerated.name)); return 0; } static int ak4xxx_capture_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_akm4xxx *ak = snd_kcontrol_chip(kcontrol); int chip = AK_GET_CHIP(kcontrol->private_value); int addr = AK_GET_ADDR(kcontrol->private_value); int mask = AK_GET_MASK(kcontrol->private_value); unsigned char val; val = snd_akm4xxx_get(ak, chip, addr) & mask; ucontrol->value.enumerated.item[0] = val; return 0; } static int ak4xxx_capture_source_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_akm4xxx *ak = snd_kcontrol_chip(kcontrol); int mixer_ch = AK_GET_SHIFT(kcontrol->private_value); int chip = AK_GET_CHIP(kcontrol->private_value); int addr = AK_GET_ADDR(kcontrol->private_value); int mask = AK_GET_MASK(kcontrol->private_value); unsigned char oval, val; int num_names = ak4xxx_capture_num_inputs(ak, mixer_ch); if (ucontrol->value.enumerated.item[0] >= num_names) return -EINVAL; oval = snd_akm4xxx_get(ak, chip, addr); val = oval & ~mask; val |= ucontrol->value.enumerated.item[0] & mask; if (val != oval) { snd_akm4xxx_write(ak, chip, addr, val); return 1; } return 0; } /* * build AK4xxx controls */ static int build_dac_controls(struct snd_akm4xxx *ak) { int idx, err, mixer_ch, num_stereo; struct snd_kcontrol_new knew; mixer_ch = 0; for (idx = 0; idx < ak->num_dacs; ) { /* mute control for Revolution 7.1 - AK4381 */ if (ak->type == SND_AK4381 && ak->dac_info[mixer_ch].switch_name) { memset(&knew, 0, sizeof(knew)); knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER; knew.count = 1; knew.access = SNDRV_CTL_ELEM_ACCESS_READWRITE; knew.name = ak->dac_info[mixer_ch].switch_name; knew.info = ak4xxx_switch_info; knew.get = ak4xxx_switch_get; knew.put = ak4xxx_switch_put; knew.access = 0; /* register 1, bit 0 (SMUTE): 0 = normal operation, 1 = mute */ knew.private_value = AK_COMPOSE(idx/2, 1, 0, 0) | AK_INVERT; err = snd_ctl_add(ak->card, snd_ctl_new1(&knew, ak)); if (err < 0) return err; } memset(&knew, 0, sizeof(knew)); if (! ak->dac_info || ! ak->dac_info[mixer_ch].name) { knew.name = "DAC Volume"; knew.index = mixer_ch + ak->idx_offset * 2; num_stereo = 1; } else { knew.name = ak->dac_info[mixer_ch].name; num_stereo = ak->dac_info[mixer_ch].num_channels; } knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER; knew.count = 1; knew.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ; if (num_stereo == 2) { knew.info = snd_akm4xxx_stereo_volume_info; knew.get = snd_akm4xxx_stereo_volume_get; knew.put = snd_akm4xxx_stereo_volume_put; } else { knew.info = snd_akm4xxx_volume_info; knew.get = snd_akm4xxx_volume_get; knew.put = snd_akm4xxx_volume_put; } switch (ak->type) { case SND_AK4524: /* register 6 & 7 */ knew.private_value = AK_COMPOSE(idx/2, (idx%2) + 6, 0, 127) | AK_VOL_CVT; knew.tlv.p = db_scale_vol_datt; break; case SND_AK4528: /* register 4 & 5 */ knew.private_value = AK_COMPOSE(idx/2, (idx%2) + 4, 0, 127) | AK_VOL_CVT; knew.tlv.p = db_scale_vol_datt; break; case SND_AK4529: { /* registers 2-7 and b,c */ int val = idx < 6 ? idx + 2 : (idx - 6) + 0xb; knew.private_value = AK_COMPOSE(0, val, 0, 255) | AK_INVERT; knew.tlv.p = db_scale_8bit; break; } case SND_AK4355: /* register 4-9, chip #0 only */ knew.private_value = AK_COMPOSE(0, idx + 4, 0, 255); knew.tlv.p = db_scale_8bit; break; case SND_AK4358: { /* register 4-9 and 11-12, chip #0 only */ int addr = idx < 6 ? idx + 4 : idx + 5; knew.private_value = AK_COMPOSE(0, addr, 0, 127) | AK_NEEDSMSB; knew.tlv.p = db_scale_7bit; break; } case SND_AK4381: /* register 3 & 4 */ knew.private_value = AK_COMPOSE(idx/2, (idx%2) + 3, 0, 255); knew.tlv.p = db_scale_linear; break; case SND_AK4620: /* register 6 & 7 */ knew.private_value = AK_COMPOSE(idx/2, (idx%2) + 6, 0, 255); knew.tlv.p = db_scale_linear; break; default: return -EINVAL; } err = snd_ctl_add(ak->card, snd_ctl_new1(&knew, ak)); if (err < 0) return err; idx += num_stereo; mixer_ch++; } return 0; } static int build_adc_controls(struct snd_akm4xxx *ak) { int idx, err, mixer_ch, num_stereo, max_steps; struct snd_kcontrol_new knew; mixer_ch = 0; if (ak->type == SND_AK4528) return 0; /* no controls */ for (idx = 0; idx < ak->num_adcs;) { memset(&knew, 0, sizeof(knew)); if (! ak->adc_info || ! ak->adc_info[mixer_ch].name) { knew.name = "ADC Volume"; knew.index = mixer_ch + ak->idx_offset * 2; num_stereo = 1; } else { knew.name = ak->adc_info[mixer_ch].name; num_stereo = ak->adc_info[mixer_ch].num_channels; } knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER; knew.count = 1; knew.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ; if (num_stereo == 2) { knew.info = snd_akm4xxx_stereo_volume_info; knew.get = snd_akm4xxx_stereo_volume_get; knew.put = snd_akm4xxx_stereo_volume_put; } else { knew.info = snd_akm4xxx_volume_info; knew.get = snd_akm4xxx_volume_get; knew.put = snd_akm4xxx_volume_put; } /* register 4 & 5 */ if (ak->type == SND_AK5365) max_steps = 152; else max_steps = 164; knew.private_value = AK_COMPOSE(idx/2, (idx%2) + 4, 0, max_steps) | AK_VOL_CVT | AK_IPGA; knew.tlv.p = db_scale_vol_datt; err = snd_ctl_add(ak->card, snd_ctl_new1(&knew, ak)); if (err < 0) return err; if (ak->type == SND_AK5365 && (idx % 2) == 0) { if (! ak->adc_info || ! ak->adc_info[mixer_ch].switch_name) { knew.name = "Capture Switch"; knew.index = mixer_ch + ak->idx_offset * 2; } else knew.name = ak->adc_info[mixer_ch].switch_name; knew.info = ak4xxx_switch_info; knew.get = ak4xxx_switch_get; knew.put = ak4xxx_switch_put; knew.access = 0; /* register 2, bit 0 (SMUTE): 0 = normal operation, 1 = mute */ knew.private_value = AK_COMPOSE(idx/2, 2, 0, 0) | AK_INVERT; err = snd_ctl_add(ak->card, snd_ctl_new1(&knew, ak)); if (err < 0) return err; memset(&knew, 0, sizeof(knew)); knew.name = ak->adc_info[mixer_ch].selector_name; if (!knew.name) { knew.name = "Capture Channel"; knew.index = mixer_ch + ak->idx_offset * 2; } knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER; knew.info = ak4xxx_capture_source_info; knew.get = ak4xxx_capture_source_get; knew.put = ak4xxx_capture_source_put; knew.access = 0; /* input selector control: reg. 1, bits 0-2. * mis-use 'shift' to pass mixer_ch */ knew.private_value = AK_COMPOSE(idx/2, 1, mixer_ch, 0x07); err = snd_ctl_add(ak->card, snd_ctl_new1(&knew, ak)); if (err < 0) return err; } idx += num_stereo; mixer_ch++; } return 0; } static int build_deemphasis(struct snd_akm4xxx *ak, int num_emphs) { int idx, err; struct snd_kcontrol_new knew; for (idx = 0; idx < num_emphs; idx++) { memset(&knew, 0, sizeof(knew)); knew.name = "Deemphasis"; knew.index = idx + ak->idx_offset; knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER; knew.count = 1; knew.info = snd_akm4xxx_deemphasis_info; knew.get = snd_akm4xxx_deemphasis_get; knew.put = snd_akm4xxx_deemphasis_put; switch (ak->type) { case SND_AK4524: case SND_AK4528: case SND_AK4620: /* register 3 */ knew.private_value = AK_COMPOSE(idx, 3, 0, 0); break; case SND_AK4529: { int shift = idx == 3 ? 6 : (2 - idx) * 2; /* register 8 with shift */ knew.private_value = AK_COMPOSE(0, 8, shift, 0); break; } case SND_AK4355: case SND_AK4358: knew.private_value = AK_COMPOSE(idx, 3, 0, 0); break; case SND_AK4381: knew.private_value = AK_COMPOSE(idx, 1, 1, 0); break; default: return -EINVAL; } err = snd_ctl_add(ak->card, snd_ctl_new1(&knew, ak)); if (err < 0) return err; } return 0; } #ifdef CONFIG_PROC_FS static void proc_regs_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_akm4xxx *ak = entry->private_data; int reg, val, chip; for (chip = 0; chip < ak->num_chips; chip++) { for (reg = 0; reg < ak->total_regs; reg++) { val = snd_akm4xxx_get(ak, chip, reg); snd_iprintf(buffer, "chip %d: 0x%02x = 0x%02x\n", chip, reg, val); } } } static int proc_init(struct snd_akm4xxx *ak) { struct snd_info_entry *entry; int err; err = snd_card_proc_new(ak->card, ak->name, &entry); if (err < 0) return err; snd_info_set_text_ops(entry, ak, proc_regs_read); return 0; } #else /* !CONFIG_PROC_FS */ static int proc_init(struct snd_akm4xxx *ak) { return 0; } #endif int snd_akm4xxx_build_controls(struct snd_akm4xxx *ak) { int err, num_emphs; err = build_dac_controls(ak); if (err < 0) return err; err = build_adc_controls(ak); if (err < 0) return err; if (ak->type == SND_AK4355 || ak->type == SND_AK4358) num_emphs = 1; else if (ak->type == SND_AK4620) num_emphs = 0; else num_emphs = ak->num_dacs / 2; err = build_deemphasis(ak, num_emphs); if (err < 0) return err; err = proc_init(ak); if (err < 0) return err; return 0; } EXPORT_SYMBOL(snd_akm4xxx_build_controls); static int __init alsa_akm4xxx_module_init(void) { return 0; } static void __exit alsa_akm4xxx_module_exit(void) { } module_init(alsa_akm4xxx_module_init) module_exit(alsa_akm4xxx_module_exit)
gpl-2.0
pershoot/android_kernel_google_msm
arch/blackfin/mach-bf537/boards/pnav10.c
7150
12391
/* * Copyright 2004-2009 Analog Devices Inc. * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/etherdevice.h> #include <linux/export.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/irq.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/portmux.h> #include <linux/spi/ad7877.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "ADI PNAV-1.0"; /* * Driver needs to know address, irq and flag pin. */ #if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) static struct resource bfin_pcmcia_cf_resources[] = { { .start = 0x20310000, /* IO PORT */ .end = 0x20312000, .flags = IORESOURCE_MEM, }, { .start = 0x20311000, /* Attribute Memory */ .end = 0x20311FFF, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF4, .end = IRQ_PF4, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, }, { .start = 6, /* Card Detect PF6 */ .end = 6, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_pcmcia_cf_device = { .name = "bfin_cf_pcmcia", .id = -1, .num_resources = ARRAY_SIZE(bfin_pcmcia_cf_resources), .resource = bfin_pcmcia_cf_resources, }; #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; #endif #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) #include <linux/smc91x.h> static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource smc91x_resources[] = { { .name = "smc91x-regs", .start = 0x20300300, .end = 0x20300300 + 16, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF7, .end = IRQ_PF7, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &smc91x_info, }, }; #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) #include <linux/bfin_mac.h> static const unsigned short bfin_mac_peripherals[] = P_RMII0; static struct bfin_phydev_platform_data bfin_phydev_data[] = { { .addr = 1, .irq = IRQ_MAC_PHYINT, }, }; static struct bfin_mii_bus_platform_data bfin_mii_bus_data = { .phydev_number = 1, .phydev_data = bfin_phydev_data, .phy_mode = PHY_INTERFACE_MODE_RMII, .mac_peripherals = bfin_mac_peripherals, }; static struct platform_device bfin_mii_bus = { .name = "bfin_mii_bus", .dev = { .platform_data = &bfin_mii_bus_data, } }; static struct platform_device bfin_mac_device = { .name = "bfin_mac", .dev = { .platform_data = &bfin_mii_bus, } }; #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) static struct resource net2272_bfin_resources[] = { { .start = 0x20300000, .end = 0x20300000 + 0x100, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF7, .end = IRQ_PF7, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device net2272_bfin_device = { .name = "net2272", .id = -1, .num_resources = ARRAY_SIZE(net2272_bfin_resources), .resource = net2272_bfin_resources, }; #endif #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) /* all SPI peripherals info goes here */ #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) static struct mtd_partition bfin_spi_flash_partitions[] = { { .name = "bootloader(spi)", .size = 0x00020000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "linux kernel(spi)", .size = 0xe0000, .offset = 0x20000 }, { .name = "file system(spi)", .size = 0x700000, .offset = 0x00100000, } }; static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), .type = "m25p64", }; /* SPI flash chip (m25p64) */ static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ }; #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) static struct bfin5xx_spi_chip mmc_spi_chip_info = { .enable_dma = 0, }; #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) static const struct ad7877_platform_data bfin_ad7877_ts_info = { .model = 7877, .vref_delay_usecs = 50, /* internal, no capacitor */ .x_plate_ohms = 419, .y_plate_ohms = 486, .pressure_max = 1000, .pressure_min = 0, .stopacq_polarity = 1, .first_conversion_delay = 3, .acquisition_time = 1, .averaging = 1, .pen_down_acc_interval = 1, }; #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) { /* the modalias must be the same as spi device driver name */ .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) { .modalias = "ad183x", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 4, }, #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) { .modalias = "mmc_spi", .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, .controller_data = &mmc_spi_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) { .modalias = "ad7877", .platform_data = &bfin_ad7877_ts_info, .irq = IRQ_PF2, .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, }, #endif }; /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI, .end = CH_SPI, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI, .end = IRQ_SPI, .flags = IORESOURCE_IRQ, }, }; /* SPI controller data */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = 8, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; #endif /* spi master and devices */ #if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE) static struct platform_device bfin_fb_device = { .name = "bf537-lq035", }; #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = UART0_THR, .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_TX, .end = IRQ_UART0_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_ERROR, .end = IRQ_UART0_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_TX, .end = CH_UART0_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART0_RX, .end = CH_UART0_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { .start = UART1_THR, .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_TX, .end = IRQ_UART1_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_ERROR, .end = IRQ_UART1_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_TX, .end = CH_UART1_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART1_RX, .end = CH_UART1_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart1_peripherals[] = { P_UART1_TX, P_UART1_RX, 0 }; static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_uart1_resources), .resource = bfin_uart1_resources, .dev = { .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #ifdef CONFIG_BFIN_SIR1 static struct resource bfin_sir1_resources[] = { { .start = 0xFFC02000, .end = 0xFFC020FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_RX, .end = CH_UART1_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir1_device = { .name = "bfin_sir", .id = 1, .num_resources = ARRAY_SIZE(bfin_sir1_resources), .resource = bfin_sir1_resources, }; #endif #endif static struct platform_device *stamp_devices[] __initdata = { #if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) &bfin_pcmcia_cf_device, #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) &smc91x_device, #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) &bfin_mii_bus, &bfin_mac_device, #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) &net2272_bfin_device, #endif #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) &bfin_spi0_device, #endif #if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE) &bfin_fb_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #ifdef CONFIG_BFIN_SIR1 &bfin_sir1_device, #endif #endif }; static int __init pnav_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices)); #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); #endif return 0; } arch_initcall(pnav_init); static struct platform_device *stamp_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(stamp_early_devices, ARRAY_SIZE(stamp_early_devices)); } int bfin_get_ether_addr(char *addr) { return 1; } EXPORT_SYMBOL(bfin_get_ether_addr);
gpl-2.0
sandeep1027/kernel-3.10
arch/mips/txx9/rbtx4938/irq.c
12526
4127
/* * Toshiba RBTX4938 specific interrupt handlers * Copyright (C) 2000-2001 Toshiba Corporation * * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the * terms of the GNU General Public License version 2. This program is * licensed "as is" without any warranty of any kind, whether express * or implied. * * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com) */ /* * MIPS_CPU_IRQ_BASE+00 Software 0 * MIPS_CPU_IRQ_BASE+01 Software 1 * MIPS_CPU_IRQ_BASE+02 Cascade TX4938-CP0 * MIPS_CPU_IRQ_BASE+03 Multiplexed -- do not use * MIPS_CPU_IRQ_BASE+04 Multiplexed -- do not use * MIPS_CPU_IRQ_BASE+05 Multiplexed -- do not use * MIPS_CPU_IRQ_BASE+06 Multiplexed -- do not use * MIPS_CPU_IRQ_BASE+07 CPU TIMER * * TXX9_IRQ_BASE+00 * TXX9_IRQ_BASE+01 * TXX9_IRQ_BASE+02 Cascade RBTX4938-IOC * TXX9_IRQ_BASE+03 RBTX4938 RTL-8019AS Ethernet * TXX9_IRQ_BASE+04 * TXX9_IRQ_BASE+05 TX4938 ETH1 * TXX9_IRQ_BASE+06 TX4938 ETH0 * TXX9_IRQ_BASE+07 * TXX9_IRQ_BASE+08 TX4938 SIO 0 * TXX9_IRQ_BASE+09 TX4938 SIO 1 * TXX9_IRQ_BASE+10 TX4938 DMA0 * TXX9_IRQ_BASE+11 TX4938 DMA1 * TXX9_IRQ_BASE+12 TX4938 DMA2 * TXX9_IRQ_BASE+13 TX4938 DMA3 * TXX9_IRQ_BASE+14 * TXX9_IRQ_BASE+15 * TXX9_IRQ_BASE+16 TX4938 PCIC * TXX9_IRQ_BASE+17 TX4938 TMR0 * TXX9_IRQ_BASE+18 TX4938 TMR1 * TXX9_IRQ_BASE+19 TX4938 TMR2 * TXX9_IRQ_BASE+20 * TXX9_IRQ_BASE+21 * TXX9_IRQ_BASE+22 TX4938 PCIERR * TXX9_IRQ_BASE+23 * TXX9_IRQ_BASE+24 * TXX9_IRQ_BASE+25 * TXX9_IRQ_BASE+26 * TXX9_IRQ_BASE+27 * TXX9_IRQ_BASE+28 * TXX9_IRQ_BASE+29 * TXX9_IRQ_BASE+30 * TXX9_IRQ_BASE+31 TX4938 SPI * * RBTX4938_IRQ_IOC+00 PCI-D * RBTX4938_IRQ_IOC+01 PCI-C * RBTX4938_IRQ_IOC+02 PCI-B * RBTX4938_IRQ_IOC+03 PCI-A * RBTX4938_IRQ_IOC+04 RTC * RBTX4938_IRQ_IOC+05 ATA * RBTX4938_IRQ_IOC+06 MODEM * RBTX4938_IRQ_IOC+07 SWINT */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/mipsregs.h> #include <asm/txx9/generic.h> #include <asm/txx9/rbtx4938.h> static int toshiba_rbtx4938_irq_nested(int sw_irq) { u8 level3; level3 = readb(rbtx4938_imstat_addr); if (unlikely(!level3)) return -1; /* must use fls so onboard ATA has priority */ return RBTX4938_IRQ_IOC + __fls8(level3); } static void toshiba_rbtx4938_irq_ioc_enable(struct irq_data *d) { unsigned char v; v = readb(rbtx4938_imask_addr); v |= (1 << (d->irq - RBTX4938_IRQ_IOC)); writeb(v, rbtx4938_imask_addr); mmiowb(); } static void toshiba_rbtx4938_irq_ioc_disable(struct irq_data *d) { unsigned char v; v = readb(rbtx4938_imask_addr); v &= ~(1 << (d->irq - RBTX4938_IRQ_IOC)); writeb(v, rbtx4938_imask_addr); mmiowb(); } #define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC" static struct irq_chip toshiba_rbtx4938_irq_ioc_type = { .name = TOSHIBA_RBTX4938_IOC_NAME, .irq_mask = toshiba_rbtx4938_irq_ioc_disable, .irq_unmask = toshiba_rbtx4938_irq_ioc_enable, }; static int rbtx4938_irq_dispatch(int pending) { int irq; if (pending & STATUSF_IP7) irq = MIPS_CPU_IRQ_BASE + 7; else if (pending & STATUSF_IP2) { irq = txx9_irq(); if (irq == RBTX4938_IRQ_IOCINT) irq = toshiba_rbtx4938_irq_nested(irq); } else if (pending & STATUSF_IP1) irq = MIPS_CPU_IRQ_BASE + 0; else if (pending & STATUSF_IP0) irq = MIPS_CPU_IRQ_BASE + 1; else irq = -1; return irq; } static void __init toshiba_rbtx4938_irq_ioc_init(void) { int i; for (i = RBTX4938_IRQ_IOC; i < RBTX4938_IRQ_IOC + RBTX4938_NR_IRQ_IOC; i++) irq_set_chip_and_handler(i, &toshiba_rbtx4938_irq_ioc_type, handle_level_irq); irq_set_chained_handler(RBTX4938_IRQ_IOCINT, handle_simple_irq); } void __init rbtx4938_irq_setup(void) { txx9_irq_dispatch = rbtx4938_irq_dispatch; /* Now, interrupt control disabled, */ /* all IRC interrupts are masked, */ /* all IRC interrupt mode are Low Active. */ /* mask all IOC interrupts */ writeb(0, rbtx4938_imask_addr); /* clear SoftInt interrupts */ writeb(0, rbtx4938_softint_addr); tx4938_irq_init(); toshiba_rbtx4938_irq_ioc_init(); /* Onboard 10M Ether: High Active */ irq_set_irq_type(RBTX4938_IRQ_ETHER, IRQF_TRIGGER_HIGH); }
gpl-2.0
nikhil16242/stock-golfu-kenrel
arch/powerpc/boot/cuboot-83xx.c
14062
1525
/* * Old U-boot compatibility for 83xx * * Author: Scott Wood <scottwood@freescale.com> * * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "stdio.h" #include "cuboot.h" #define TARGET_83xx #include "ppcboot.h" static bd_t bd; static void platform_fixups(void) { void *soc; dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 4, bd.bi_busfreq); /* Unfortunately, the specific model number is encoded in the * soc node name in existing dts files -- once that is fixed, * this can do a simple path lookup. */ soc = find_node_by_devtype(NULL, "soc"); if (soc) { void *serial = NULL; setprop(soc, "bus-frequency", &bd.bi_busfreq, sizeof(bd.bi_busfreq)); while ((serial = find_node_by_devtype(serial, "serial"))) { if (get_parent(serial) != soc) continue; setprop(serial, "clock-frequency", &bd.bi_busfreq, sizeof(bd.bi_busfreq)); } } } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); fdt_init(_dtb_start); serial_console_init(); platform_ops.fixups = platform_fixups; }
gpl-2.0
luckasfb/kernel-novathor-ux500
arch/powerpc/boot/cuboot-85xx-cpm2.c
14062
1764
/* * Old U-boot compatibility for 85xx * * Author: Scott Wood <scottwood@freescale.com> * * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "stdio.h" #include "cuboot.h" #define TARGET_85xx #define TARGET_CPM2 #include "ppcboot.h" static bd_t bd; static void platform_fixups(void) { void *devp; dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); dt_fixup_mac_address_by_alias("ethernet2", bd.bi_enet2addr); dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 8, bd.bi_busfreq); /* Unfortunately, the specific model number is encoded in the * soc node name in existing dts files -- once that is fixed, * this can do a simple path lookup. */ devp = find_node_by_devtype(NULL, "soc"); if (devp) { void *serial = NULL; setprop(devp, "bus-frequency", &bd.bi_busfreq, sizeof(bd.bi_busfreq)); while ((serial = find_node_by_devtype(serial, "serial"))) { if (get_parent(serial) != devp) continue; setprop(serial, "clock-frequency", &bd.bi_busfreq, sizeof(bd.bi_busfreq)); } } devp = find_node_by_compatible(NULL, "fsl,cpm2-brg"); if (devp) setprop(devp, "clock-frequency", &bd.bi_brgfreq, sizeof(bd.bi_brgfreq)); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); fdt_init(_dtb_start); serial_console_init(); platform_ops.fixups = platform_fixups; }
gpl-2.0
chinaopenx/linux
drivers/scsi/qla2xxx/qla_nx.c
239
118619
/* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include <linux/delay.h> #include <linux/pci.h> #include <linux/ratelimit.h> #include <linux/vmalloc.h> #include <scsi/scsi_tcq.h> #define MASK(n) ((1ULL<<(n))-1) #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \ ((addr >> 25) & 0x3ff)) #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \ ((addr >> 25) & 0x3ff)) #define MS_WIN(addr) (addr & 0x0ffc0000) #define QLA82XX_PCI_MN_2M (0) #define QLA82XX_PCI_MS_2M (0x80000) #define QLA82XX_PCI_OCM0_2M (0xc0000) #define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800) #define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) #define BLOCK_PROTECT_BITS 0x0F /* CRB window related */ #define CRB_BLK(off) ((off >> 20) & 0x3f) #define CRB_SUBBLK(off) ((off >> 16) & 0xf) #define CRB_WINDOW_2M (0x130060) #define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL) #define CRB_HI(off) ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \ ((off) & 0xf0000)) #define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL) #define CRB_INDIRECT_2M (0x1e0000UL) #define MAX_CRB_XFORM 60 static unsigned long crb_addr_xform[MAX_CRB_XFORM]; static int qla82xx_crb_table_initialized; #define qla82xx_crb_addr_transform(name) \ (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) static void qla82xx_crb_addr_transform_setup(void) { qla82xx_crb_addr_transform(XDMA); qla82xx_crb_addr_transform(TIMR); qla82xx_crb_addr_transform(SRE); qla82xx_crb_addr_transform(SQN3); qla82xx_crb_addr_transform(SQN2); qla82xx_crb_addr_transform(SQN1); qla82xx_crb_addr_transform(SQN0); qla82xx_crb_addr_transform(SQS3); qla82xx_crb_addr_transform(SQS2); qla82xx_crb_addr_transform(SQS1); qla82xx_crb_addr_transform(SQS0); qla82xx_crb_addr_transform(RPMX7); qla82xx_crb_addr_transform(RPMX6); qla82xx_crb_addr_transform(RPMX5); qla82xx_crb_addr_transform(RPMX4); qla82xx_crb_addr_transform(RPMX3); qla82xx_crb_addr_transform(RPMX2); qla82xx_crb_addr_transform(RPMX1); qla82xx_crb_addr_transform(RPMX0); qla82xx_crb_addr_transform(ROMUSB); qla82xx_crb_addr_transform(SN); qla82xx_crb_addr_transform(QMN); qla82xx_crb_addr_transform(QMS); qla82xx_crb_addr_transform(PGNI); qla82xx_crb_addr_transform(PGND); qla82xx_crb_addr_transform(PGN3); qla82xx_crb_addr_transform(PGN2); qla82xx_crb_addr_transform(PGN1); qla82xx_crb_addr_transform(PGN0); qla82xx_crb_addr_transform(PGSI); qla82xx_crb_addr_transform(PGSD); qla82xx_crb_addr_transform(PGS3); qla82xx_crb_addr_transform(PGS2); qla82xx_crb_addr_transform(PGS1); qla82xx_crb_addr_transform(PGS0); qla82xx_crb_addr_transform(PS); qla82xx_crb_addr_transform(PH); qla82xx_crb_addr_transform(NIU); qla82xx_crb_addr_transform(I2Q); qla82xx_crb_addr_transform(EG); qla82xx_crb_addr_transform(MN); qla82xx_crb_addr_transform(MS); qla82xx_crb_addr_transform(CAS2); qla82xx_crb_addr_transform(CAS1); qla82xx_crb_addr_transform(CAS0); qla82xx_crb_addr_transform(CAM); qla82xx_crb_addr_transform(C2C1); qla82xx_crb_addr_transform(C2C0); qla82xx_crb_addr_transform(SMB); qla82xx_crb_addr_transform(OCM0); /* * Used only in P3 just define it for P2 also. */ qla82xx_crb_addr_transform(I2C0); qla82xx_crb_table_initialized = 1; } static struct crb_128M_2M_block_map crb_128M_2M_map[64] = { {{{0, 0, 0, 0} } }, {{{1, 0x0100000, 0x0102000, 0x120000}, {1, 0x0110000, 0x0120000, 0x130000}, {1, 0x0120000, 0x0122000, 0x124000}, {1, 0x0130000, 0x0132000, 0x126000}, {1, 0x0140000, 0x0142000, 0x128000}, {1, 0x0150000, 0x0152000, 0x12a000}, {1, 0x0160000, 0x0170000, 0x110000}, {1, 0x0170000, 0x0172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x01e0000, 0x01e0800, 0x122000}, {0, 0x0000000, 0x0000000, 0x000000} } } , {{{1, 0x0200000, 0x0210000, 0x180000} } }, {{{0, 0, 0, 0} } }, {{{1, 0x0400000, 0x0401000, 0x169000} } }, {{{1, 0x0500000, 0x0510000, 0x140000} } }, {{{1, 0x0600000, 0x0610000, 0x1c0000} } }, {{{1, 0x0700000, 0x0704000, 0x1b8000} } }, {{{1, 0x0800000, 0x0802000, 0x170000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x08f0000, 0x08f2000, 0x172000} } }, {{{1, 0x0900000, 0x0902000, 0x174000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x09f0000, 0x09f2000, 0x176000} } }, {{{0, 0x0a00000, 0x0a02000, 0x178000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0af0000, 0x0af2000, 0x17a000} } }, {{{0, 0x0b00000, 0x0b02000, 0x17c000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } }, {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } }, {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } }, {{{1, 0x0f00000, 0x0f01000, 0x164000} } }, {{{0, 0x1000000, 0x1004000, 0x1a8000} } }, {{{1, 0x1100000, 0x1101000, 0x160000} } }, {{{1, 0x1200000, 0x1201000, 0x161000} } }, {{{1, 0x1300000, 0x1301000, 0x162000} } }, {{{1, 0x1400000, 0x1401000, 0x163000} } }, {{{1, 0x1500000, 0x1501000, 0x165000} } }, {{{1, 0x1600000, 0x1601000, 0x166000} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{1, 0x1d00000, 0x1d10000, 0x190000} } }, {{{1, 0x1e00000, 0x1e01000, 0x16a000} } }, {{{1, 0x1f00000, 0x1f10000, 0x150000} } }, {{{0} } }, {{{1, 0x2100000, 0x2102000, 0x120000}, {1, 0x2110000, 0x2120000, 0x130000}, {1, 0x2120000, 0x2122000, 0x124000}, {1, 0x2130000, 0x2132000, 0x126000}, {1, 0x2140000, 0x2142000, 0x128000}, {1, 0x2150000, 0x2152000, 0x12a000}, {1, 0x2160000, 0x2170000, 0x110000}, {1, 0x2170000, 0x2172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000} } }, {{{1, 0x2200000, 0x2204000, 0x1b0000} } }, {{{0} } }, {{{0} } }, {{{0} } }, {{{0} } }, {{{0} } }, {{{1, 0x2800000, 0x2804000, 0x1a4000} } }, {{{1, 0x2900000, 0x2901000, 0x16b000} } }, {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } }, {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } }, {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } }, {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } }, {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } }, {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } }, {{{1, 0x3000000, 0x3000400, 0x1adc00} } }, {{{0, 0x3100000, 0x3104000, 0x1a8000} } }, {{{1, 0x3200000, 0x3204000, 0x1d4000} } }, {{{1, 0x3300000, 0x3304000, 0x1a0000} } }, {{{0} } }, {{{1, 0x3500000, 0x3500400, 0x1ac000} } }, {{{1, 0x3600000, 0x3600400, 0x1ae000} } }, {{{1, 0x3700000, 0x3700400, 0x1ae400} } }, {{{1, 0x3800000, 0x3804000, 0x1d0000} } }, {{{1, 0x3900000, 0x3904000, 0x1b4000} } }, {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } }, {{{0} } }, {{{0} } }, {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } }, {{{1, 0x3e00000, 0x3e01000, 0x167000} } }, {{{1, 0x3f00000, 0x3f01000, 0x168000} } } }; /* * top 12 bits of crb internal address (hub, agent) */ static unsigned qla82xx_crb_hub_agt[64] = { 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PS, QLA82XX_HW_CRB_HUB_AGT_ADR_MN, QLA82XX_HW_CRB_HUB_AGT_ADR_MS, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_SRE, QLA82XX_HW_CRB_HUB_AGT_ADR_NIU, QLA82XX_HW_CRB_HUB_AGT_ADR_QMN, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3, QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4, QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3, QLA82XX_HW_CRB_HUB_AGT_ADR_PGND, QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI, QLA82XX_HW_CRB_HUB_AGT_ADR_SN, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_EG, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PS, QLA82XX_HW_CRB_HUB_AGT_ADR_CAM, 0, 0, 0, 0, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7, QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9, QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_SMB, QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0, QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC, 0, }; /* Device states */ static char *q_dev_state[] = { "Unknown", "Cold", "Initializing", "Ready", "Need Reset", "Need Quiescent", "Failed", "Quiescent", }; char *qdev_state(uint32_t dev_state) { return q_dev_state[dev_state]; } /* * In: 'off_in' is offset from CRB space in 128M pci map * Out: 'off_out' is 2M pci map addr * side effect: lock crb window */ static void qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in, void __iomem **off_out) { u32 win_read; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ha->crb_win = CRB_HI(off_in); writel(ha->crb_win, CRB_WINDOW_2M + ha->nx_pcibase); /* Read back value to make sure write has gone through before trying * to use it. */ win_read = RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase); if (win_read != ha->crb_win) { ql_dbg(ql_dbg_p3p, vha, 0xb000, "%s: Written crbwin (0x%x) " "!= Read crbwin (0x%x), off=0x%lx.\n", __func__, ha->crb_win, win_read, off_in); } *off_out = (off_in & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; } static inline unsigned long qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); /* See if we are currently pointing to the region we want to use next */ if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) { /* No need to change window. PCIX and PCIEregs are in both * regs are in both windows. */ return off; } if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) { /* We are in first CRB window */ if (ha->curr_window != 0) WARN_ON(1); return off; } if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) { /* We are in second CRB window */ off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST; if (ha->curr_window != 1) return off; /* We are in the QM or direct access * register region - do nothing */ if ((off >= QLA82XX_PCI_DIRECT_CRB) && (off < QLA82XX_PCI_CAMQM_MAX)) return off; } /* strange address given */ ql_dbg(ql_dbg_p3p, vha, 0xb001, "%s: Warning: unm_nic_pci_set_crbwindow " "called with an unknown address(%llx).\n", QLA2XXX_DRIVER_NAME, off); return off; } static int qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in, void __iomem **off_out) { struct crb_128M_2M_sub_block_map *m; if (off_in >= QLA82XX_CRB_MAX) return -1; if (off_in >= QLA82XX_PCI_CAMQM && off_in < QLA82XX_PCI_CAMQM_2M_END) { *off_out = (off_in - QLA82XX_PCI_CAMQM) + QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase; return 0; } if (off_in < QLA82XX_PCI_CRBSPACE) return -1; *off_out = (void __iomem *)(off_in - QLA82XX_PCI_CRBSPACE); /* Try direct map */ m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)]; if (m->valid && (m->start_128M <= off_in) && (m->end_128M > off_in)) { *off_out = off_in + m->start_2M - m->start_128M + ha->nx_pcibase; return 0; } /* Not in direct map, use crb window */ return 1; } #define CRB_WIN_LOCK_TIMEOUT 100000000 static int qla82xx_crb_win_lock(struct qla_hw_data *ha) { int done = 0, timeout = 0; while (!done) { /* acquire semaphore3 from PCI HW block */ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK)); if (done == 1) break; if (timeout >= CRB_WIN_LOCK_TIMEOUT) return -1; timeout++; } qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum); return 0; } int qla82xx_wr_32(struct qla_hw_data *ha, ulong off_in, u32 data) { void __iomem *off; unsigned long flags = 0; int rv; rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off); BUG_ON(rv == -1); if (rv == 1) { #ifndef __CHECKER__ write_lock_irqsave(&ha->hw_lock, flags); #endif qla82xx_crb_win_lock(ha); qla82xx_pci_set_crbwindow_2M(ha, off_in, &off); } writel(data, (void __iomem *)off); if (rv == 1) { qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); #ifndef __CHECKER__ write_unlock_irqrestore(&ha->hw_lock, flags); #endif } return 0; } int qla82xx_rd_32(struct qla_hw_data *ha, ulong off_in) { void __iomem *off; unsigned long flags = 0; int rv; u32 data; rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off); BUG_ON(rv == -1); if (rv == 1) { #ifndef __CHECKER__ write_lock_irqsave(&ha->hw_lock, flags); #endif qla82xx_crb_win_lock(ha); qla82xx_pci_set_crbwindow_2M(ha, off_in, &off); } data = RD_REG_DWORD(off); if (rv == 1) { qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); #ifndef __CHECKER__ write_unlock_irqrestore(&ha->hw_lock, flags); #endif } return data; } #define IDC_LOCK_TIMEOUT 100000000 int qla82xx_idc_lock(struct qla_hw_data *ha) { int i; int done = 0, timeout = 0; while (!done) { /* acquire semaphore5 from PCI HW block */ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK)); if (done == 1) break; if (timeout >= IDC_LOCK_TIMEOUT) return -1; timeout++; /* Yield CPU */ if (!in_interrupt()) schedule(); else { for (i = 0; i < 20; i++) cpu_relax(); } } return 0; } void qla82xx_idc_unlock(struct qla_hw_data *ha) { qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK)); } /* * check memory access boundary. * used by test agent. support ddr access only for now */ static unsigned long qla82xx_pci_mem_bound_check(struct qla_hw_data *ha, unsigned long long addr, int size) { if (!addr_in_range(addr, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX) || !addr_in_range(addr + size - 1, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX) || ((size != 1) && (size != 2) && (size != 4) && (size != 8))) return 0; else return 1; } static int qla82xx_pci_set_window_warning_count; static unsigned long qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) { int window; u32 win_read; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX)) { /* DDR network side */ window = MN_WIN(addr); ha->ddr_mn_window = window; qla82xx_wr_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window); win_read = qla82xx_rd_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); if ((win_read << 17) != window) { ql_dbg(ql_dbg_p3p, vha, 0xb003, "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n", __func__, window, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; } else if (addr_in_range(addr, QLA82XX_ADDR_OCM0, QLA82XX_ADDR_OCM0_MAX)) { unsigned int temp1; if ((addr & 0x00ff800) == 0xff800) { ql_log(ql_log_warn, vha, 0xb004, "%s: QM access not handled.\n", __func__); addr = -1UL; } window = OCM_WIN(addr); ha->ddr_mn_window = window; qla82xx_wr_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window); win_read = qla82xx_rd_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); temp1 = ((window & 0x1FF) << 7) | ((window & 0x0FFFE0000) >> 17); if (win_read != temp1) { ql_log(ql_log_warn, vha, 0xb005, "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n", __func__, temp1, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; } else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET, QLA82XX_P3_ADDR_QDR_NET_MAX)) { /* QDR network side */ window = MS_WIN(addr); ha->qdr_sn_window = window; qla82xx_wr_32(ha, ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window); win_read = qla82xx_rd_32(ha, ha->ms_win_crb | QLA82XX_PCI_CRBSPACE); if (win_read != window) { ql_log(ql_log_warn, vha, 0xb006, "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n", __func__, window, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET; } else { /* * peg gdb frequently accesses memory that doesn't exist, * this limits the chit chat so debugging isn't slowed down. */ if ((qla82xx_pci_set_window_warning_count++ < 8) || (qla82xx_pci_set_window_warning_count%64 == 0)) { ql_log(ql_log_warn, vha, 0xb007, "%s: Warning:%s Unknown address range!.\n", __func__, QLA2XXX_DRIVER_NAME); } addr = -1UL; } return addr; } /* check if address is in the same windows as the previous access */ static int qla82xx_pci_is_same_window(struct qla_hw_data *ha, unsigned long long addr) { int window; unsigned long long qdr_max; qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX; /* DDR network side */ if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX)) BUG(); else if (addr_in_range(addr, QLA82XX_ADDR_OCM0, QLA82XX_ADDR_OCM0_MAX)) return 1; else if (addr_in_range(addr, QLA82XX_ADDR_OCM1, QLA82XX_ADDR_OCM1_MAX)) return 1; else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) { /* QDR network side */ window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f; if (ha->qdr_sn_window == window) return 1; } return 0; } static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha, u64 off, void *data, int size) { unsigned long flags; void __iomem *addr = NULL; int ret = 0; u64 start; uint8_t __iomem *mem_ptr = NULL; unsigned long mem_base; unsigned long mem_page; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); write_lock_irqsave(&ha->hw_lock, flags); /* * If attempting to access unknown address or straddle hw windows, * do not access. */ start = qla82xx_pci_set_window(ha, off); if ((start == -1UL) || (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { write_unlock_irqrestore(&ha->hw_lock, flags); ql_log(ql_log_fatal, vha, 0xb008, "%s out of bound pci memory " "access, offset is 0x%llx.\n", QLA2XXX_DRIVER_NAME, off); return -1; } write_unlock_irqrestore(&ha->hw_lock, flags); mem_base = pci_resource_start(ha->pdev, 0); mem_page = start & PAGE_MASK; /* Map two pages whenever user tries to access addresses in two * consecutive pages. */ if (mem_page != ((start + size - 1) & PAGE_MASK)) mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2); else mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); if (mem_ptr == NULL) { *(u8 *)data = 0; return -1; } addr = mem_ptr; addr += start & (PAGE_SIZE - 1); write_lock_irqsave(&ha->hw_lock, flags); switch (size) { case 1: *(u8 *)data = readb(addr); break; case 2: *(u16 *)data = readw(addr); break; case 4: *(u32 *)data = readl(addr); break; case 8: *(u64 *)data = readq(addr); break; default: ret = -1; break; } write_unlock_irqrestore(&ha->hw_lock, flags); if (mem_ptr) iounmap(mem_ptr); return ret; } static int qla82xx_pci_mem_write_direct(struct qla_hw_data *ha, u64 off, void *data, int size) { unsigned long flags; void __iomem *addr = NULL; int ret = 0; u64 start; uint8_t __iomem *mem_ptr = NULL; unsigned long mem_base; unsigned long mem_page; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); write_lock_irqsave(&ha->hw_lock, flags); /* * If attempting to access unknown address or straddle hw windows, * do not access. */ start = qla82xx_pci_set_window(ha, off); if ((start == -1UL) || (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { write_unlock_irqrestore(&ha->hw_lock, flags); ql_log(ql_log_fatal, vha, 0xb009, "%s out of bount memory " "access, offset is 0x%llx.\n", QLA2XXX_DRIVER_NAME, off); return -1; } write_unlock_irqrestore(&ha->hw_lock, flags); mem_base = pci_resource_start(ha->pdev, 0); mem_page = start & PAGE_MASK; /* Map two pages whenever user tries to access addresses in two * consecutive pages. */ if (mem_page != ((start + size - 1) & PAGE_MASK)) mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2); else mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); if (mem_ptr == NULL) return -1; addr = mem_ptr; addr += start & (PAGE_SIZE - 1); write_lock_irqsave(&ha->hw_lock, flags); switch (size) { case 1: writeb(*(u8 *)data, addr); break; case 2: writew(*(u16 *)data, addr); break; case 4: writel(*(u32 *)data, addr); break; case 8: writeq(*(u64 *)data, addr); break; default: ret = -1; break; } write_unlock_irqrestore(&ha->hw_lock, flags); if (mem_ptr) iounmap(mem_ptr); return ret; } #define MTU_FUDGE_FACTOR 100 static unsigned long qla82xx_decode_crb_addr(unsigned long addr) { int i; unsigned long base_addr, offset, pci_base; if (!qla82xx_crb_table_initialized) qla82xx_crb_addr_transform_setup(); pci_base = ADDR_ERROR; base_addr = addr & 0xfff00000; offset = addr & 0x000fffff; for (i = 0; i < MAX_CRB_XFORM; i++) { if (crb_addr_xform[i] == base_addr) { pci_base = i << 20; break; } } if (pci_base == ADDR_ERROR) return pci_base; return pci_base + offset; } static long rom_max_timeout = 100; static long qla82xx_rom_lock_timeout = 100; static int qla82xx_rom_lock(struct qla_hw_data *ha) { int done = 0, timeout = 0; uint32_t lock_owner = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while (!done) { /* acquire semaphore2 from PCI HW block */ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK)); if (done == 1) break; if (timeout >= qla82xx_rom_lock_timeout) { lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); ql_dbg(ql_dbg_p3p, vha, 0xb157, "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d", __func__, ha->portnum, lock_owner); return -1; } timeout++; } qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ha->portnum); return 0; } static void qla82xx_rom_unlock(struct qla_hw_data *ha) { qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, 0xffffffff); qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); } static int qla82xx_wait_rom_busy(struct qla_hw_data *ha) { long timeout = 0; long done = 0 ; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while (done == 0) { done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); done &= 4; timeout++; if (timeout >= rom_max_timeout) { ql_dbg(ql_dbg_p3p, vha, 0xb00a, "%s: Timeout reached waiting for rom busy.\n", QLA2XXX_DRIVER_NAME); return -1; } } return 0; } static int qla82xx_wait_rom_done(struct qla_hw_data *ha) { long timeout = 0; long done = 0 ; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while (done == 0) { done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); done &= 2; timeout++; if (timeout >= rom_max_timeout) { ql_dbg(ql_dbg_p3p, vha, 0xb00b, "%s: Timeout reached waiting for rom done.\n", QLA2XXX_DRIVER_NAME); return -1; } } return 0; } static int qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag) { uint32_t off_value, rval = 0; WRT_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000); /* Read back value to make sure write has gone through */ RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase); off_value = (off & 0x0000FFFF); if (flag) WRT_REG_DWORD(off_value + CRB_INDIRECT_2M + ha->nx_pcibase, data); else rval = RD_REG_DWORD(off_value + CRB_INDIRECT_2M + ha->nx_pcibase); return rval; } static int qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) { /* Dword reads to flash. */ qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, (addr & 0xFFFF0000), 1); *valp = qla82xx_md_rw_32(ha, MD_DIRECT_ROM_READ_BASE + (addr & 0x0000FFFF), 0, 0); return 0; } static int qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) { int ret, loops = 0; uint32_t lock_owner = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { udelay(100); schedule(); loops++; } if (loops >= 50000) { lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); ql_log(ql_log_fatal, vha, 0x00b9, "Failed to acquire SEM2 lock, Lock Owner %u.\n", lock_owner); return -1; } ret = qla82xx_do_rom_fast_read(ha, addr, valp); qla82xx_rom_unlock(ha); return ret; } static int qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR); qla82xx_wait_rom_busy(ha); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb00c, "Error waiting for rom done.\n"); return -1; } *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); return 0; } static int qla82xx_flash_wait_write_finish(struct qla_hw_data *ha) { long timeout = 0; uint32_t done = 1 ; uint32_t val; int ret = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); while ((done != 0) && (ret == 0)) { ret = qla82xx_read_status_reg(ha, &val); done = val & 1; timeout++; udelay(10); cond_resched(); if (timeout >= 50000) { ql_log(ql_log_warn, vha, 0xb00d, "Timeout reached waiting for write finish.\n"); return -1; } } return ret; } static int qla82xx_flash_set_write_enable(struct qla_hw_data *ha) { uint32_t val; qla82xx_wait_rom_busy(ha); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN); qla82xx_wait_rom_busy(ha); if (qla82xx_wait_rom_done(ha)) return -1; if (qla82xx_read_status_reg(ha, &val) != 0) return -1; if ((val & 2) != 2) return -1; return 0; } static int qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); if (qla82xx_flash_set_write_enable(ha)) return -1; qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb00e, "Error waiting for rom done.\n"); return -1; } return qla82xx_flash_wait_write_finish(ha); } static int qla82xx_write_disable_flash(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb00f, "Error waiting for rom done.\n"); return -1; } return 0; } static int ql82xx_rom_lock_d(struct qla_hw_data *ha) { int loops = 0; uint32_t lock_owner = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { udelay(100); cond_resched(); loops++; } if (loops >= 50000) { lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); ql_log(ql_log_warn, vha, 0xb010, "ROM lock failed, Lock Owner %u.\n", lock_owner); return -1; } return 0; } static int qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr, uint32_t data) { int ret = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(ql_log_warn, vha, 0xb011, "ROM lock failed.\n"); return ret; } if (qla82xx_flash_set_write_enable(ha)) goto done_write; qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP); qla82xx_wait_rom_busy(ha); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb012, "Error waiting for rom done.\n"); ret = -1; goto done_write; } ret = qla82xx_flash_wait_write_finish(ha); done_write: qla82xx_rom_unlock(ha); return ret; } /* This routine does CRB initialize sequence * to put the ISP into operational state */ static int qla82xx_pinit_from_rom(scsi_qla_host_t *vha) { int addr, val; int i ; struct crb_addr_pair *buf; unsigned long off; unsigned offset, n; struct qla_hw_data *ha = vha->hw; struct crb_addr_pair { long addr; long data; }; /* Halt all the individual PEGs and other blocks of the ISP */ qla82xx_rom_lock(ha); /* disable all I2Q */ qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0); /* disable all niu interrupts */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); /* disable xge rx/tx */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); /* disable xg1 rx/tx */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); /* disable sideband mac */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00); /* disable ap0 mac */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00); /* disable ap1 mac */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00); /* halt sre */ val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1))); /* halt epg */ qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1); /* halt timers */ qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0); /* halt pegs */ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); msleep(20); /* big hammer */ if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) /* don't reset CAM block on reset */ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); else qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); qla82xx_rom_unlock(ha); /* Read the signature value from the flash. * Offset 0: Contain signature (0xcafecafe) * Offset 4: Offset and number of addr/value pairs * that present in CRB initialize sequence */ if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || qla82xx_rom_fast_read(ha, 4, &n) != 0) { ql_log(ql_log_fatal, vha, 0x006e, "Error Reading crb_init area: n: %08x.\n", n); return -1; } /* Offset in flash = lower 16 bits * Number of entries = upper 16 bits */ offset = n & 0xffffU; n = (n >> 16) & 0xffffU; /* number of addr/value pair should not exceed 1024 entries */ if (n >= 1024) { ql_log(ql_log_fatal, vha, 0x0071, "Card flash not initialized:n=0x%x.\n", n); return -1; } ql_log(ql_log_info, vha, 0x0072, "%d CRB init values found in ROM.\n", n); buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL); if (buf == NULL) { ql_log(ql_log_fatal, vha, 0x010c, "Unable to allocate memory.\n"); return -1; } for (i = 0; i < n; i++) { if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 || qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) { kfree(buf); return -1; } buf[i].addr = addr; buf[i].data = val; } for (i = 0; i < n; i++) { /* Translate internal CRB initialization * address to PCI bus address */ off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) + QLA82XX_PCI_CRBSPACE; /* Not all CRB addr/value pair to be written, * some of them are skipped */ /* skipping cold reboot MAGIC */ if (off == QLA82XX_CAM_RAM(0x1fc)) continue; /* do not reset PCI */ if (off == (ROMUSB_GLB + 0xbc)) continue; /* skip core clock, so that firmware can increase the clock */ if (off == (ROMUSB_GLB + 0xc8)) continue; /* skip the function enable register */ if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION)) continue; if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2)) continue; if ((off & 0x0ff00000) == QLA82XX_CRB_SMB) continue; if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET) continue; if (off == ADDR_ERROR) { ql_log(ql_log_fatal, vha, 0x0116, "Unknown addr: 0x%08lx.\n", buf[i].addr); continue; } qla82xx_wr_32(ha, off, buf[i].data); /* ISP requires much bigger delay to settle down, * else crb_window returns 0xffffffff */ if (off == QLA82XX_ROMUSB_GLB_SW_RESET) msleep(1000); /* ISP requires millisec delay between * successive CRB register updation */ msleep(1); } kfree(buf); /* Resetting the data and instruction cache */ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8); /* Clear all protocol processing engines */ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0); return 0; } static int qla82xx_pci_mem_write_2M(struct qla_hw_data *ha, u64 off, void *data, int size) { int i, j, ret = 0, loop, sz[2], off0; int scale, shift_amount, startword; uint32_t temp; uint64_t off8, mem_crb, tmpw, word[2] = {0, 0}; /* * If not MN, go check for MS or invalid. */ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) mem_crb = QLA82XX_CRB_QDR_NET; else { mem_crb = QLA82XX_CRB_DDR_NET; if (qla82xx_pci_mem_bound_check(ha, off, size) == 0) return qla82xx_pci_mem_write_direct(ha, off, data, size); } off0 = off & 0x7; sz[0] = (size < (8 - off0)) ? size : (8 - off0); sz[1] = size - sz[0]; off8 = off & 0xfffffff0; loop = (((off & 0xf) + size - 1) >> 4) + 1; shift_amount = 4; scale = 2; startword = (off & 0xf)/8; for (i = 0; i < loop; i++) { if (qla82xx_pci_mem_read_2M(ha, off8 + (i << shift_amount), &word[i * scale], 8)) return -1; } switch (size) { case 1: tmpw = *((uint8_t *)data); break; case 2: tmpw = *((uint16_t *)data); break; case 4: tmpw = *((uint32_t *)data); break; case 8: default: tmpw = *((uint64_t *)data); break; } if (sz[0] == 8) { word[startword] = tmpw; } else { word[startword] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8)); word[startword] |= tmpw << (off0 * 8); } if (sz[1] != 0) { word[startword+1] &= ~(~0ULL << (sz[1] * 8)); word[startword+1] |= tmpw >> (sz[0] * 8); } for (i = 0; i < loop; i++) { temp = off8 + (i << shift_amount); qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp); temp = 0; qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp); temp = word[i * scale] & 0xffffffff; qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp); temp = (word[i * scale] >> 32) & 0xffffffff; qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp); temp = word[i*scale + 1] & 0xffffffff; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO, temp); temp = (word[i*scale + 1] >> 32) & 0xffffffff; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI, temp); temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); if ((temp & MIU_TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&ha->pdev->dev, "failed to write through agent.\n"); ret = -1; break; } } return ret; } static int qla82xx_fw_load_from_flash(struct qla_hw_data *ha) { int i; long size = 0; long flashaddr = ha->flt_region_bootload << 2; long memaddr = BOOTLD_START; u64 data; u32 high, low; size = (IMAGE_START - BOOTLD_START) / 8; for (i = 0; i < size; i++) { if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) || (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) { return -1; } data = ((u64)high << 32) | low ; qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8); flashaddr += 8; memaddr += 8; if (i % 0x1000 == 0) msleep(1); } udelay(100); read_lock(&ha->hw_lock); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); read_unlock(&ha->hw_lock); return 0; } int qla82xx_pci_mem_read_2M(struct qla_hw_data *ha, u64 off, void *data, int size) { int i, j = 0, k, start, end, loop, sz[2], off0[2]; int shift_amount; uint32_t temp; uint64_t off8, val, mem_crb, word[2] = {0, 0}; /* * If not MN, go check for MS or invalid. */ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) mem_crb = QLA82XX_CRB_QDR_NET; else { mem_crb = QLA82XX_CRB_DDR_NET; if (qla82xx_pci_mem_bound_check(ha, off, size) == 0) return qla82xx_pci_mem_read_direct(ha, off, data, size); } off8 = off & 0xfffffff0; off0[0] = off & 0xf; sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]); shift_amount = 4; loop = ((off0[0] + size - 1) >> shift_amount) + 1; off0[1] = 0; sz[1] = size - sz[0]; for (i = 0; i < loop; i++) { temp = off8 + (i << shift_amount); qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp); temp = 0; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp); temp = MIU_TA_CTL_ENABLE; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); if ((temp & MIU_TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&ha->pdev->dev, "failed to read through agent.\n"); break; } start = off0[i] >> 2; end = (off0[i] + sz[i] - 1) >> 2; for (k = start; k <= end; k++) { temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_RDDATA(k)); word[i] |= ((uint64_t)temp << (32 * (k & 1))); } } if (j >= MAX_CTL_CHECK) return -1; if ((off0[0] & 7) == 0) { val = word[0]; } else { val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) | ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8)); } switch (size) { case 1: *(uint8_t *)data = val; break; case 2: *(uint16_t *)data = val; break; case 4: *(uint32_t *)data = val; break; case 8: *(uint64_t *)data = val; break; } return 0; } static struct qla82xx_uri_table_desc * qla82xx_get_table_desc(const u8 *unirom, int section) { uint32_t i; struct qla82xx_uri_table_desc *directory = (struct qla82xx_uri_table_desc *)&unirom[0]; __le32 offset; __le32 tab_type; __le32 entries = cpu_to_le32(directory->num_entries); for (i = 0; i < entries; i++) { offset = cpu_to_le32(directory->findex) + (i * cpu_to_le32(directory->entry_size)); tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8)); if (tab_type == section) return (struct qla82xx_uri_table_desc *)&unirom[offset]; } return NULL; } static struct qla82xx_uri_data_desc * qla82xx_get_data_desc(struct qla_hw_data *ha, u32 section, u32 idx_offset) { const u8 *unirom = ha->hablob->fw->data; int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset)); struct qla82xx_uri_table_desc *tab_desc = NULL; __le32 offset; tab_desc = qla82xx_get_table_desc(unirom, section); if (!tab_desc) return NULL; offset = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * idx); return (struct qla82xx_uri_data_desc *)&unirom[offset]; } static u8 * qla82xx_get_bootld_offset(struct qla_hw_data *ha) { u32 offset = BOOTLD_START; struct qla82xx_uri_data_desc *uri_desc = NULL; if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF); if (uri_desc) offset = cpu_to_le32(uri_desc->findex); } return (u8 *)&ha->hablob->fw->data[offset]; } static __le32 qla82xx_get_fw_size(struct qla_hw_data *ha) { struct qla82xx_uri_data_desc *uri_desc = NULL; if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, QLA82XX_URI_FIRMWARE_IDX_OFF); if (uri_desc) return cpu_to_le32(uri_desc->size); } return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]); } static u8 * qla82xx_get_fw_offs(struct qla_hw_data *ha) { u32 offset = IMAGE_START; struct qla82xx_uri_data_desc *uri_desc = NULL; if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, QLA82XX_URI_FIRMWARE_IDX_OFF); if (uri_desc) offset = cpu_to_le32(uri_desc->findex); } return (u8 *)&ha->hablob->fw->data[offset]; } /* PCI related functions */ int qla82xx_pci_region_offset(struct pci_dev *pdev, int region) { unsigned long val = 0; u32 control; switch (region) { case 0: val = 0; break; case 1: pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control); val = control + QLA82XX_MSIX_TBL_SPACE; break; } return val; } int qla82xx_iospace_config(struct qla_hw_data *ha) { uint32_t len = 0; if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) { ql_log_pci(ql_log_fatal, ha->pdev, 0x000c, "Failed to reserver selected regions.\n"); goto iospace_error_exit; } /* Use MMIO operations for all accesses. */ if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { ql_log_pci(ql_log_fatal, ha->pdev, 0x000d, "Region #0 not an MMIO resource, aborting.\n"); goto iospace_error_exit; } len = pci_resource_len(ha->pdev, 0); ha->nx_pcibase = ioremap(pci_resource_start(ha->pdev, 0), len); if (!ha->nx_pcibase) { ql_log_pci(ql_log_fatal, ha->pdev, 0x000e, "Cannot remap pcibase MMIO, aborting.\n"); goto iospace_error_exit; } /* Mapping of IO base pointer */ if (IS_QLA8044(ha)) { ha->iobase = ha->nx_pcibase; } else if (IS_QLA82XX(ha)) { ha->iobase = ha->nx_pcibase + 0xbc000 + (ha->pdev->devfn << 11); } if (!ql2xdbwr) { ha->nxdb_wr_ptr = ioremap((pci_resource_start(ha->pdev, 4) + (ha->pdev->devfn << 12)), 4); if (!ha->nxdb_wr_ptr) { ql_log_pci(ql_log_fatal, ha->pdev, 0x000f, "Cannot remap MMIO, aborting.\n"); goto iospace_error_exit; } /* Mapping of IO base pointer, * door bell read and write pointer */ ha->nxdb_rd_ptr = ha->nx_pcibase + (512 * 1024) + (ha->pdev->devfn * 8); } else { ha->nxdb_wr_ptr = (void __iomem *)(ha->pdev->devfn == 6 ? QLA82XX_CAMRAM_DB1 : QLA82XX_CAMRAM_DB2); } ha->max_req_queues = ha->max_rsp_queues = 1; ha->msix_count = ha->max_rsp_queues + 1; ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006, "nx_pci_base=%p iobase=%p " "max_req_queues=%d msix_count=%d.\n", ha->nx_pcibase, ha->iobase, ha->max_req_queues, ha->msix_count); ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010, "nx_pci_base=%p iobase=%p " "max_req_queues=%d msix_count=%d.\n", ha->nx_pcibase, ha->iobase, ha->max_req_queues, ha->msix_count); return 0; iospace_error_exit: return -ENOMEM; } /* GS related functions */ /* Initialization related functions */ /** * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers. * @ha: HA context * * Returns 0 on success. */ int qla82xx_pci_config(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int ret; pci_set_master(ha->pdev); ret = pci_set_mwi(ha->pdev); ha->chip_revision = ha->pdev->revision; ql_dbg(ql_dbg_init, vha, 0x0043, "Chip revision:%d; pci_set_mwi() returned %d.\n", ha->chip_revision, ret); return 0; } /** * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers. * @ha: HA context * * Returns 0 on success. */ void qla82xx_reset_chip(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; ha->isp_ops->disable_intrs(ha); } void qla82xx_config_rings(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; struct init_cb_81xx *icb; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; /* Setup ring parameters in initialization control block. */ icb = (struct init_cb_81xx *)ha->init_cb; icb->request_q_outpointer = cpu_to_le16(0); icb->response_q_inpointer = cpu_to_le16(0); icb->request_q_length = cpu_to_le16(req->length); icb->response_q_length = cpu_to_le16(rsp->length); icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); icb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); WRT_REG_DWORD(&reg->req_q_out[0], 0); WRT_REG_DWORD(&reg->rsp_q_in[0], 0); WRT_REG_DWORD(&reg->rsp_q_out[0], 0); } static int qla82xx_fw_load_from_blob(struct qla_hw_data *ha) { u64 *ptr64; u32 i, flashaddr, size; __le64 data; size = (IMAGE_START - BOOTLD_START) / 8; ptr64 = (u64 *)qla82xx_get_bootld_offset(ha); flashaddr = BOOTLD_START; for (i = 0; i < size; i++) { data = cpu_to_le64(ptr64[i]); if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8)) return -EIO; flashaddr += 8; } flashaddr = FLASH_ADDR_START; size = (__force u32)qla82xx_get_fw_size(ha) / 8; ptr64 = (u64 *)qla82xx_get_fw_offs(ha); for (i = 0; i < size; i++) { data = cpu_to_le64(ptr64[i]); if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8)) return -EIO; flashaddr += 8; } udelay(100); /* Write a magic value to CAMRAM register * at a specified offset to indicate * that all data is written and * ready for firmware to initialize. */ qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC); read_lock(&ha->hw_lock); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); read_unlock(&ha->hw_lock); return 0; } static int qla82xx_set_product_offset(struct qla_hw_data *ha) { struct qla82xx_uri_table_desc *ptab_desc = NULL; const uint8_t *unirom = ha->hablob->fw->data; uint32_t i; __le32 entries; __le32 flags, file_chiprev, offset; uint8_t chiprev = ha->chip_revision; /* Hardcoding mn_present flag for P3P */ int mn_present = 0; uint32_t flagbit; ptab_desc = qla82xx_get_table_desc(unirom, QLA82XX_URI_DIR_SECT_PRODUCT_TBL); if (!ptab_desc) return -1; entries = cpu_to_le32(ptab_desc->num_entries); for (i = 0; i < entries; i++) { offset = cpu_to_le32(ptab_desc->findex) + (i * cpu_to_le32(ptab_desc->entry_size)); flags = cpu_to_le32(*((int *)&unirom[offset] + QLA82XX_URI_FLAGS_OFF)); file_chiprev = cpu_to_le32(*((int *)&unirom[offset] + QLA82XX_URI_CHIP_REV_OFF)); flagbit = mn_present ? 1 : 2; if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) { ha->file_prd_off = offset; return 0; } } return -1; } static int qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type) { __le32 val; uint32_t min_size; struct qla_hw_data *ha = vha->hw; const struct firmware *fw = ha->hablob->fw; ha->fw_type = fw_type; if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) { if (qla82xx_set_product_offset(ha)) return -EINVAL; min_size = QLA82XX_URI_FW_MIN_SIZE; } else { val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]); if ((__force u32)val != QLA82XX_BDINFO_MAGIC) return -EINVAL; min_size = QLA82XX_FW_MIN_SIZE; } if (fw->size < min_size) return -EINVAL; return 0; } static int qla82xx_check_cmdpeg_state(struct qla_hw_data *ha) { u32 val = 0; int retries = 60; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); do { read_lock(&ha->hw_lock); val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE); read_unlock(&ha->hw_lock); switch (val) { case PHAN_INITIALIZE_COMPLETE: case PHAN_INITIALIZE_ACK: return QLA_SUCCESS; case PHAN_INITIALIZE_FAILED: break; default: break; } ql_log(ql_log_info, vha, 0x00a8, "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n", val, retries); msleep(500); } while (--retries); ql_log(ql_log_fatal, vha, 0x00a9, "Cmd Peg initialization failed: 0x%x.\n", val); val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); read_lock(&ha->hw_lock); qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); read_unlock(&ha->hw_lock); return QLA_FUNCTION_FAILED; } static int qla82xx_check_rcvpeg_state(struct qla_hw_data *ha) { u32 val = 0; int retries = 60; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); do { read_lock(&ha->hw_lock); val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE); read_unlock(&ha->hw_lock); switch (val) { case PHAN_INITIALIZE_COMPLETE: case PHAN_INITIALIZE_ACK: return QLA_SUCCESS; case PHAN_INITIALIZE_FAILED: break; default: break; } ql_log(ql_log_info, vha, 0x00ab, "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n", val, retries); msleep(500); } while (--retries); ql_log(ql_log_fatal, vha, 0x00ac, "Rcv Peg initializatin failed: 0x%x.\n", val); read_lock(&ha->hw_lock); qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED); read_unlock(&ha->hw_lock); return QLA_FUNCTION_FAILED; } /* ISR related functions */ static struct qla82xx_legacy_intr_set legacy_intr[] = \ QLA82XX_LEGACY_INTR_CONFIG; /* * qla82xx_mbx_completion() - Process mailbox command completions. * @ha: SCSI driver HA context * @mb0: Mailbox0 register */ void qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) { uint16_t cnt; uint16_t __iomem *wptr; struct qla_hw_data *ha = vha->hw; struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; wptr = (uint16_t __iomem *)&reg->mailbox_out[1]; /* Load return mailbox registers. */ ha->flags.mbox_int = 1; ha->mailbox_out[0] = mb0; for (cnt = 1; cnt < ha->mbx_count; cnt++) { ha->mailbox_out[cnt] = RD_REG_WORD(wptr); wptr++; } if (!ha->mcp) ql_dbg(ql_dbg_async, vha, 0x5053, "MBX pointer ERROR.\n"); } /* * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. * @irq: * @dev_id: SCSI driver HA context * @regs: * * Called by system whenever the host adapter generates an interrupt. * * Returns handled flag. */ irqreturn_t qla82xx_intr_handler(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; int status = 0, status1 = 0; unsigned long flags; unsigned long iter; uint32_t stat = 0; uint16_t mb[4]; rsp = (struct rsp_que *) dev_id; if (!rsp) { ql_log(ql_log_info, NULL, 0xb053, "%s: NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; if (!ha->flags.msi_enabled) { status = qla82xx_rd_32(ha, ISR_INT_VECTOR); if (!(status & ha->nx_legacy_intr.int_vec_bit)) return IRQ_NONE; status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG); if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1)) return IRQ_NONE; } /* clear the interrupt */ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff); /* read twice to ensure write is flushed */ qla82xx_rd_32(ha, ISR_INT_VECTOR); qla82xx_rd_32(ha, ISR_INT_VECTOR); reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 1; iter--; ) { if (RD_REG_DWORD(&reg->host_int)) { stat = RD_REG_DWORD(&reg->host_status); switch (stat & 0xff) { case 0x1: case 0x2: case 0x10: case 0x11: qla82xx_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; break; case 0x12: mb[0] = MSW(stat); mb[1] = RD_REG_WORD(&reg->mailbox_out[1]); mb[2] = RD_REG_WORD(&reg->mailbox_out[2]); mb[3] = RD_REG_WORD(&reg->mailbox_out[3]); qla2x00_async_event(vha, rsp, mb); break; case 0x13: qla24xx_process_response_queue(vha, rsp); break; default: ql_dbg(ql_dbg_async, vha, 0x5054, "Unrecognized interrupt type (%d).\n", stat & 0xff); break; } } WRT_REG_DWORD(&reg->host_int, 0); } qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (!ha->flags.msi_enabled) qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); return IRQ_HANDLED; } irqreturn_t qla82xx_msix_default(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; int status = 0; unsigned long flags; uint32_t stat = 0; uint32_t host_int = 0; uint16_t mb[4]; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO "%s(): NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); do { host_int = RD_REG_DWORD(&reg->host_int); if (qla2x00_check_reg32_for_disconnect(vha, host_int)) break; if (host_int) { stat = RD_REG_DWORD(&reg->host_status); switch (stat & 0xff) { case 0x1: case 0x2: case 0x10: case 0x11: qla82xx_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; break; case 0x12: mb[0] = MSW(stat); mb[1] = RD_REG_WORD(&reg->mailbox_out[1]); mb[2] = RD_REG_WORD(&reg->mailbox_out[2]); mb[3] = RD_REG_WORD(&reg->mailbox_out[3]); qla2x00_async_event(vha, rsp, mb); break; case 0x13: qla24xx_process_response_queue(vha, rsp); break; default: ql_dbg(ql_dbg_async, vha, 0x5041, "Unrecognized interrupt type (%d).\n", stat & 0xff); break; } } WRT_REG_DWORD(&reg->host_int, 0); } while (0); qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } irqreturn_t qla82xx_msix_rsp_q(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; unsigned long flags; uint32_t host_int = 0; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO "%s(): NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); host_int = RD_REG_DWORD(&reg->host_int); if (qla2x00_check_reg32_for_disconnect(vha, host_int)) goto out; qla24xx_process_response_queue(vha, rsp); WRT_REG_DWORD(&reg->host_int, 0); out: spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } void qla82xx_poll(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; int status = 0; uint32_t stat; uint32_t host_int = 0; uint16_t mb[4]; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO "%s(): NULL response queue pointer.\n", __func__); return; } ha = rsp->hw; reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); host_int = RD_REG_DWORD(&reg->host_int); if (qla2x00_check_reg32_for_disconnect(vha, host_int)) goto out; if (host_int) { stat = RD_REG_DWORD(&reg->host_status); switch (stat & 0xff) { case 0x1: case 0x2: case 0x10: case 0x11: qla82xx_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; break; case 0x12: mb[0] = MSW(stat); mb[1] = RD_REG_WORD(&reg->mailbox_out[1]); mb[2] = RD_REG_WORD(&reg->mailbox_out[2]); mb[3] = RD_REG_WORD(&reg->mailbox_out[3]); qla2x00_async_event(vha, rsp, mb); break; case 0x13: qla24xx_process_response_queue(vha, rsp); break; default: ql_dbg(ql_dbg_p3p, vha, 0xb013, "Unrecognized interrupt type (%d).\n", stat * 0xff); break; } WRT_REG_DWORD(&reg->host_int, 0); } out: spin_unlock_irqrestore(&ha->hardware_lock, flags); } void qla82xx_enable_intrs(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_mbx_intr_enable(vha); spin_lock_irq(&ha->hardware_lock); if (IS_QLA8044(ha)) qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 0); else qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); spin_unlock_irq(&ha->hardware_lock); ha->interrupts_on = 1; } void qla82xx_disable_intrs(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_mbx_intr_disable(vha); spin_lock_irq(&ha->hardware_lock); if (IS_QLA8044(ha)) qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 1); else qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); spin_unlock_irq(&ha->hardware_lock); ha->interrupts_on = 0; } void qla82xx_init_flags(struct qla_hw_data *ha) { struct qla82xx_legacy_intr_set *nx_legacy_intr; /* ISP 8021 initializations */ rwlock_init(&ha->hw_lock); ha->qdr_sn_window = -1; ha->ddr_mn_window = -1; ha->curr_window = 255; ha->portnum = PCI_FUNC(ha->pdev->devfn); nx_legacy_intr = &legacy_intr[ha->portnum]; ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg; ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; } static inline void qla82xx_set_idc_version(scsi_qla_host_t *vha) { int idc_ver; uint32_t drv_active; struct qla_hw_data *ha = vha->hw; drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); if (drv_active == (QLA82XX_DRV_ACTIVE << (ha->portnum * 4))) { qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION); ql_log(ql_log_info, vha, 0xb082, "IDC version updated to %d\n", QLA82XX_IDC_VERSION); } else { idc_ver = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_IDC_VERSION); if (idc_ver != QLA82XX_IDC_VERSION) ql_log(ql_log_info, vha, 0xb083, "qla2xxx driver IDC version %d is not compatible " "with IDC version %d of the other drivers\n", QLA82XX_IDC_VERSION, idc_ver); } } inline void qla82xx_set_drv_active(scsi_qla_host_t *vha) { uint32_t drv_active; struct qla_hw_data *ha = vha->hw; drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); /* If reset value is all FF's, initialize DRV_ACTIVE */ if (drv_active == 0xffffffff) { qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, QLA82XX_DRV_NOT_ACTIVE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); } drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); } inline void qla82xx_clear_drv_active(struct qla_hw_data *ha) { uint32_t drv_active; drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); } static inline int qla82xx_need_reset(struct qla_hw_data *ha) { uint32_t drv_state; int rval; if (ha->flags.nic_core_reset_owner) return 1; else { drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); return rval; } } static inline void qla82xx_set_rst_ready(struct qla_hw_data *ha) { uint32_t drv_state; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); /* If reset value is all FF's, initialize DRV_STATE */ if (drv_state == 0xffffffff) { qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); } drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); ql_dbg(ql_dbg_init, vha, 0x00bb, "drv_state = 0x%08x.\n", drv_state); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); } static inline void qla82xx_clear_rst_ready(struct qla_hw_data *ha) { uint32_t drv_state; drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); } static inline void qla82xx_set_qsnt_ready(struct qla_hw_data *ha) { uint32_t qsnt_state; qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); } void qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint32_t qsnt_state; qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); } static int qla82xx_load_fw(scsi_qla_host_t *vha) { int rst; struct fw_blob *blob; struct qla_hw_data *ha = vha->hw; if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0x009f, "Error during CRB initialization.\n"); return QLA_FUNCTION_FAILED; } udelay(500); /* Bring QM and CAMRAM out of reset */ rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET); rst &= ~((1 << 28) | (1 << 24)); qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst); /* * FW Load priority: * 1) Operational firmware residing in flash. * 2) Firmware via request-firmware interface (.bin file). */ if (ql2xfwloadbin == 2) goto try_blob_fw; ql_log(ql_log_info, vha, 0x00a0, "Attempting to load firmware from flash.\n"); if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { ql_log(ql_log_info, vha, 0x00a1, "Firmware loaded successfully from flash.\n"); return QLA_SUCCESS; } else { ql_log(ql_log_warn, vha, 0x0108, "Firmware load from flash failed.\n"); } try_blob_fw: ql_log(ql_log_info, vha, 0x00a2, "Attempting to load firmware from blob.\n"); /* Load firmware blob. */ blob = ha->hablob = qla2x00_request_firmware(vha); if (!blob) { ql_log(ql_log_fatal, vha, 0x00a3, "Firmware image not present.\n"); goto fw_load_failed; } /* Validating firmware blob */ if (qla82xx_validate_firmware_blob(vha, QLA82XX_FLASH_ROMIMAGE)) { /* Fallback to URI format */ if (qla82xx_validate_firmware_blob(vha, QLA82XX_UNIFIED_ROMIMAGE)) { ql_log(ql_log_fatal, vha, 0x00a4, "No valid firmware image found.\n"); return QLA_FUNCTION_FAILED; } } if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) { ql_log(ql_log_info, vha, 0x00a5, "Firmware loaded successfully from binary blob.\n"); return QLA_SUCCESS; } ql_log(ql_log_fatal, vha, 0x00a6, "Firmware load failed for binary blob.\n"); blob->fw = NULL; blob = NULL; fw_load_failed: return QLA_FUNCTION_FAILED; } int qla82xx_start_firmware(scsi_qla_host_t *vha) { uint16_t lnk; struct qla_hw_data *ha = vha->hw; /* scrub dma mask expansion register */ qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE); /* Put both the PEG CMD and RCV PEG to default state * of 0 before resetting the hardware */ qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0); qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0); /* Overwrite stale initialization register values */ qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0); qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0); if (qla82xx_load_fw(vha) != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0x00a7, "Error trying to start fw.\n"); return QLA_FUNCTION_FAILED; } /* Handshake with the card before we register the devices. */ if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0x00aa, "Error during card handshake.\n"); return QLA_FUNCTION_FAILED; } /* Negotiated Link width */ pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk); ha->link_width = (lnk >> 4) & 0x3f; /* Synchronize with Receive peg */ return qla82xx_check_rcvpeg_state(ha); } static uint32_t * qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, uint32_t length) { uint32_t i; uint32_t val; struct qla_hw_data *ha = vha->hw; /* Dword reads to flash. */ for (i = 0; i < length/4; i++, faddr += 4) { if (qla82xx_rom_fast_read(ha, faddr, &val)) { ql_log(ql_log_warn, vha, 0x0106, "Do ROM fast read failed.\n"); goto done_read; } dwptr[i] = cpu_to_le32(val); } done_read: return dwptr; } static int qla82xx_unprotect_flash(struct qla_hw_data *ha) { int ret; uint32_t val; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(ql_log_warn, vha, 0xb014, "ROM Lock failed.\n"); return ret; } ret = qla82xx_read_status_reg(ha, &val); if (ret < 0) goto done_unprotect; val &= ~(BLOCK_PROTECT_BITS << 2); ret = qla82xx_write_status_reg(ha, val); if (ret < 0) { val |= (BLOCK_PROTECT_BITS << 2); qla82xx_write_status_reg(ha, val); } if (qla82xx_write_disable_flash(ha) != 0) ql_log(ql_log_warn, vha, 0xb015, "Write disable failed.\n"); done_unprotect: qla82xx_rom_unlock(ha); return ret; } static int qla82xx_protect_flash(struct qla_hw_data *ha) { int ret; uint32_t val; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(ql_log_warn, vha, 0xb016, "ROM Lock failed.\n"); return ret; } ret = qla82xx_read_status_reg(ha, &val); if (ret < 0) goto done_protect; val |= (BLOCK_PROTECT_BITS << 2); /* LOCK all sectors */ ret = qla82xx_write_status_reg(ha, val); if (ret < 0) ql_log(ql_log_warn, vha, 0xb017, "Write status register failed.\n"); if (qla82xx_write_disable_flash(ha) != 0) ql_log(ql_log_warn, vha, 0xb018, "Write disable failed.\n"); done_protect: qla82xx_rom_unlock(ha); return ret; } static int qla82xx_erase_sector(struct qla_hw_data *ha, int addr) { int ret = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(ql_log_warn, vha, 0xb019, "ROM Lock failed.\n"); return ret; } qla82xx_flash_set_write_enable(ha); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb01a, "Error waiting for rom done.\n"); ret = -1; goto done; } ret = qla82xx_flash_wait_write_finish(ha); done: qla82xx_rom_unlock(ha); return ret; } /* * Address and length are byte address */ uint8_t * qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, uint32_t offset, uint32_t length) { scsi_block_requests(vha->host); qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length); scsi_unblock_requests(vha->host); return buf; } static int qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr, uint32_t faddr, uint32_t dwords) { int ret; uint32_t liter; uint32_t rest_addr; dma_addr_t optrom_dma; void *optrom = NULL; int page_mode = 0; struct qla_hw_data *ha = vha->hw; ret = -1; /* Prepare burst-capable write on supported ISPs. */ if (page_mode && !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) { optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, &optrom_dma, GFP_KERNEL); if (!optrom) { ql_log(ql_log_warn, vha, 0xb01b, "Unable to allocate memory " "for optrom burst write (%x KB).\n", OPTROM_BURST_SIZE / 1024); } } rest_addr = ha->fdt_block_size - 1; ret = qla82xx_unprotect_flash(ha); if (ret) { ql_log(ql_log_warn, vha, 0xb01c, "Unable to unprotect flash for update.\n"); goto write_done; } for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) { /* Are we at the beginning of a sector? */ if ((faddr & rest_addr) == 0) { ret = qla82xx_erase_sector(ha, faddr); if (ret) { ql_log(ql_log_warn, vha, 0xb01d, "Unable to erase sector: address=%x.\n", faddr); break; } } /* Go with burst-write. */ if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) { /* Copy data to DMA'ble buffer. */ memcpy(optrom, dwptr, OPTROM_BURST_SIZE); ret = qla2x00_load_ram(vha, optrom_dma, (ha->flash_data_off | faddr), OPTROM_BURST_DWORDS); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xb01e, "Unable to burst-write optrom segment " "(%x/%x/%llx).\n", ret, (ha->flash_data_off | faddr), (unsigned long long)optrom_dma); ql_log(ql_log_warn, vha, 0xb01f, "Reverting to slow-write.\n"); dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, optrom, optrom_dma); optrom = NULL; } else { liter += OPTROM_BURST_DWORDS - 1; faddr += OPTROM_BURST_DWORDS - 1; dwptr += OPTROM_BURST_DWORDS - 1; continue; } } ret = qla82xx_write_flash_dword(ha, faddr, cpu_to_le32(*dwptr)); if (ret) { ql_dbg(ql_dbg_p3p, vha, 0xb020, "Unable to program flash address=%x data=%x.\n", faddr, *dwptr); break; } } ret = qla82xx_protect_flash(ha); if (ret) ql_log(ql_log_warn, vha, 0xb021, "Unable to protect flash after update.\n"); write_done: if (optrom) dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, optrom, optrom_dma); return ret; } int qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, uint32_t offset, uint32_t length) { int rval; /* Suspend HBA. */ scsi_block_requests(vha->host); rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset, length >> 2); scsi_unblock_requests(vha->host); /* Convert return ISP82xx to generic */ if (rval) rval = QLA_FUNCTION_FAILED; else rval = QLA_SUCCESS; return rval; } void qla82xx_start_iocbs(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; uint32_t dbval; /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else req->ring_ptr++; dbval = 0x04 | (ha->portnum << 5); dbval = dbval | (req->id << 8) | (req->ring_index << 16); if (ql2xdbwr) qla82xx_wr_32(ha, (unsigned long)ha->nxdb_wr_ptr, dbval); else { WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); wmb(); while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); wmb(); } } } static void qla82xx_rom_lock_recovery(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); uint32_t lock_owner = 0; if (qla82xx_rom_lock(ha)) { lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); /* Someone else is holding the lock. */ ql_log(ql_log_info, vha, 0xb022, "Resetting rom_lock, Lock Owner %u.\n", lock_owner); } /* * Either we got the lock, or someone * else died while holding it. * In either case, unlock. */ qla82xx_rom_unlock(ha); } /* * qla82xx_device_bootstrap * Initialize device, set DEV_READY, start fw * * Note: * IDC lock must be held upon entry * * Return: * Success : 0 * Failed : 1 */ static int qla82xx_device_bootstrap(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; int i; uint32_t old_count, count; struct qla_hw_data *ha = vha->hw; int need_reset = 0; need_reset = qla82xx_need_reset(ha); if (need_reset) { /* We are trying to perform a recovery here. */ if (ha->flags.isp82xx_fw_hung) qla82xx_rom_lock_recovery(ha); } else { old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); for (i = 0; i < 10; i++) { msleep(200); count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); if (count != old_count) { rval = QLA_SUCCESS; goto dev_ready; } } qla82xx_rom_lock_recovery(ha); } /* set to DEV_INITIALIZING */ ql_log(ql_log_info, vha, 0x009e, "HW State: INITIALIZING.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_INITIALIZING); qla82xx_idc_unlock(ha); rval = qla82xx_start_firmware(vha); qla82xx_idc_lock(ha); if (rval != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0x00ad, "HW State: FAILED.\n"); qla82xx_clear_drv_active(ha); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED); return rval; } dev_ready: ql_log(ql_log_info, vha, 0x00ae, "HW State: READY.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_READY); return QLA_SUCCESS; } /* * qla82xx_need_qsnt_handler * Code to start quiescence sequence * * Note: * IDC lock must be held upon entry * * Return: void */ static void qla82xx_need_qsnt_handler(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint32_t dev_state, drv_state, drv_active; unsigned long reset_timeout; if (vha->flags.online) { /*Block any further I/O and wait for pending cmnds to complete*/ qla2x00_quiesce_io(vha); } /* Set the quiescence ready bit */ qla82xx_set_qsnt_ready(ha); /*wait for 30 secs for other functions to ack */ reset_timeout = jiffies + (30 * HZ); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); /* Its 2 that is written when qsnt is acked, moving one bit */ drv_active = drv_active << 0x01; while (drv_state != drv_active) { if (time_after_eq(jiffies, reset_timeout)) { /* quiescence timeout, other functions didn't ack * changing the state to DEV_READY */ ql_log(ql_log_info, vha, 0xb023, "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d " "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME, drv_active, drv_state); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_READY); ql_log(ql_log_info, vha, 0xb025, "HW State: DEV_READY.\n"); qla82xx_idc_unlock(ha); qla2x00_perform_loop_resync(vha); qla82xx_idc_lock(ha); qla82xx_clear_qsnt_ready(vha); return; } qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); drv_active = drv_active << 0x01; } dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); /* everyone acked so set the state to DEV_QUIESCENCE */ if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { ql_log(ql_log_info, vha, 0xb026, "HW State: DEV_QUIESCENT.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_QUIESCENT); } } /* * qla82xx_wait_for_state_change * Wait for device state to change from given current state * * Note: * IDC lock must not be held upon entry * * Return: * Changed device state. */ uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state) { struct qla_hw_data *ha = vha->hw; uint32_t dev_state; do { msleep(1000); qla82xx_idc_lock(ha); dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); qla82xx_idc_unlock(ha); } while (dev_state == curr_state); return dev_state; } void qla8xxx_dev_failed_handler(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; /* Disable the board */ ql_log(ql_log_fatal, vha, 0x00b8, "Disabling the board.\n"); if (IS_QLA82XX(ha)) { qla82xx_clear_drv_active(ha); qla82xx_idc_unlock(ha); } else if (IS_QLA8044(ha)) { qla8044_clear_drv_active(ha); qla8044_idc_unlock(ha); } /* Set DEV_FAILED flag to disable timer */ vha->device_flags |= DFLG_DEV_FAILED; qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); qla2x00_mark_all_devices_lost(vha, 0); vha->flags.online = 0; vha->flags.init_done = 0; } /* * qla82xx_need_reset_handler * Code to start reset sequence * * Note: * IDC lock must be held upon entry * * Return: * Success : 0 * Failed : 1 */ static void qla82xx_need_reset_handler(scsi_qla_host_t *vha) { uint32_t dev_state, drv_state, drv_active; uint32_t active_mask = 0; unsigned long reset_timeout; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; if (vha->flags.online) { qla82xx_idc_unlock(ha); qla2x00_abort_isp_cleanup(vha); ha->isp_ops->get_flash_version(vha, req->ring); ha->isp_ops->nvram_config(vha); qla82xx_idc_lock(ha); } drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); if (!ha->flags.nic_core_reset_owner) { ql_dbg(ql_dbg_p3p, vha, 0xb028, "reset_acknowledged by 0x%x\n", ha->portnum); qla82xx_set_rst_ready(ha); } else { active_mask = ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); drv_active &= active_mask; ql_dbg(ql_dbg_p3p, vha, 0xb029, "active_mask: 0x%08x\n", active_mask); } /* wait for 10 seconds for reset ack from all functions */ reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); ql_dbg(ql_dbg_p3p, vha, 0xb02a, "drv_state: 0x%08x, drv_active: 0x%08x, " "dev_state: 0x%08x, active_mask: 0x%08x\n", drv_state, drv_active, dev_state, active_mask); while (drv_state != drv_active && dev_state != QLA8XXX_DEV_INITIALIZING) { if (time_after_eq(jiffies, reset_timeout)) { ql_log(ql_log_warn, vha, 0x00b5, "Reset timeout.\n"); break; } qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); if (ha->flags.nic_core_reset_owner) drv_active &= active_mask; dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); } ql_dbg(ql_dbg_p3p, vha, 0xb02b, "drv_state: 0x%08x, drv_active: 0x%08x, " "dev_state: 0x%08x, active_mask: 0x%08x\n", drv_state, drv_active, dev_state, active_mask); ql_log(ql_log_info, vha, 0x00b6, "Device state is 0x%x = %s.\n", dev_state, dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); /* Force to DEV_COLD unless someone else is starting a reset */ if (dev_state != QLA8XXX_DEV_INITIALIZING && dev_state != QLA8XXX_DEV_COLD) { ql_log(ql_log_info, vha, 0x00b7, "HW State: COLD/RE-INIT.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD); qla82xx_set_rst_ready(ha); if (ql2xmdenable) { if (qla82xx_md_collect(vha)) ql_log(ql_log_warn, vha, 0xb02c, "Minidump not collected.\n"); } else ql_log(ql_log_warn, vha, 0xb04f, "Minidump disabled.\n"); } } int qla82xx_check_md_needed(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint16_t fw_major_version, fw_minor_version, fw_subminor_version; int rval = QLA_SUCCESS; fw_major_version = ha->fw_major_version; fw_minor_version = ha->fw_minor_version; fw_subminor_version = ha->fw_subminor_version; rval = qla2x00_get_fw_version(vha); if (rval != QLA_SUCCESS) return rval; if (ql2xmdenable) { if (!ha->fw_dumped) { if ((fw_major_version != ha->fw_major_version || fw_minor_version != ha->fw_minor_version || fw_subminor_version != ha->fw_subminor_version) || (ha->prev_minidump_failed)) { ql_dbg(ql_dbg_p3p, vha, 0xb02d, "Firmware version differs Previous version: %d:%d:%d - New version: %d:%d:%d, prev_minidump_failed: %d.\n", fw_major_version, fw_minor_version, fw_subminor_version, ha->fw_major_version, ha->fw_minor_version, ha->fw_subminor_version, ha->prev_minidump_failed); /* Release MiniDump resources */ qla82xx_md_free(vha); /* ALlocate MiniDump resources */ qla82xx_md_prep(vha); } } else ql_log(ql_log_info, vha, 0xb02e, "Firmware dump available to retrieve\n"); } return rval; } static int qla82xx_check_fw_alive(scsi_qla_host_t *vha) { uint32_t fw_heartbeat_counter; int status = 0; fw_heartbeat_counter = qla82xx_rd_32(vha->hw, QLA82XX_PEG_ALIVE_COUNTER); /* all 0xff, assume AER/EEH in progress, ignore */ if (fw_heartbeat_counter == 0xffffffff) { ql_dbg(ql_dbg_timer, vha, 0x6003, "FW heartbeat counter is 0xffffffff, " "returning status=%d.\n", status); return status; } if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { vha->seconds_since_last_heartbeat++; /* FW not alive after 2 seconds */ if (vha->seconds_since_last_heartbeat == 2) { vha->seconds_since_last_heartbeat = 0; status = 1; } } else vha->seconds_since_last_heartbeat = 0; vha->fw_heartbeat_counter = fw_heartbeat_counter; if (status) ql_dbg(ql_dbg_timer, vha, 0x6004, "Returning status=%d.\n", status); return status; } /* * qla82xx_device_state_handler * Main state handler * * Note: * IDC lock must be held upon entry * * Return: * Success : 0 * Failed : 1 */ int qla82xx_device_state_handler(scsi_qla_host_t *vha) { uint32_t dev_state; uint32_t old_dev_state; int rval = QLA_SUCCESS; unsigned long dev_init_timeout; struct qla_hw_data *ha = vha->hw; int loopcount = 0; qla82xx_idc_lock(ha); if (!vha->flags.init_done) { qla82xx_set_drv_active(vha); qla82xx_set_idc_version(vha); } dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); old_dev_state = dev_state; ql_log(ql_log_info, vha, 0x009b, "Device state is 0x%x = %s.\n", dev_state, dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); /* wait for 30 seconds for device to go ready */ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); while (1) { if (time_after_eq(jiffies, dev_init_timeout)) { ql_log(ql_log_fatal, vha, 0x009c, "Device init failed.\n"); rval = QLA_FUNCTION_FAILED; break; } dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); if (old_dev_state != dev_state) { loopcount = 0; old_dev_state = dev_state; } if (loopcount < 5) { ql_log(ql_log_info, vha, 0x009d, "Device state is 0x%x = %s.\n", dev_state, dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); } switch (dev_state) { case QLA8XXX_DEV_READY: ha->flags.nic_core_reset_owner = 0; goto rel_lock; case QLA8XXX_DEV_COLD: rval = qla82xx_device_bootstrap(vha); break; case QLA8XXX_DEV_INITIALIZING: qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); break; case QLA8XXX_DEV_NEED_RESET: if (!ql2xdontresethba) qla82xx_need_reset_handler(vha); else { qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); } dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); break; case QLA8XXX_DEV_NEED_QUIESCENT: qla82xx_need_qsnt_handler(vha); /* Reset timeout value after quiescence handler */ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\ * HZ); break; case QLA8XXX_DEV_QUIESCENT: /* Owner will exit and other will wait for the state * to get changed */ if (ha->flags.quiesce_owner) goto rel_lock; qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); /* Reset timeout value after quiescence handler */ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\ * HZ); break; case QLA8XXX_DEV_FAILED: qla8xxx_dev_failed_handler(vha); rval = QLA_FUNCTION_FAILED; goto exit; default: qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); } loopcount++; } rel_lock: qla82xx_idc_unlock(ha); exit: return rval; } static int qla82xx_check_temp(scsi_qla_host_t *vha) { uint32_t temp, temp_state, temp_val; struct qla_hw_data *ha = vha->hw; temp = qla82xx_rd_32(ha, CRB_TEMP_STATE); temp_state = qla82xx_get_temp_state(temp); temp_val = qla82xx_get_temp_val(temp); if (temp_state == QLA82XX_TEMP_PANIC) { ql_log(ql_log_warn, vha, 0x600e, "Device temperature %d degrees C exceeds " " maximum allowed. Hardware has been shut down.\n", temp_val); return 1; } else if (temp_state == QLA82XX_TEMP_WARN) { ql_log(ql_log_warn, vha, 0x600f, "Device temperature %d degrees C exceeds " "operating range. Immediate action needed.\n", temp_val); } return 0; } int qla82xx_read_temperature(scsi_qla_host_t *vha) { uint32_t temp; temp = qla82xx_rd_32(vha->hw, CRB_TEMP_STATE); return qla82xx_get_temp_val(temp); } void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; if (ha->flags.mbox_busy) { ha->flags.mbox_int = 1; ha->flags.mbox_busy = 0; ql_log(ql_log_warn, vha, 0x6010, "Doing premature completion of mbx command.\n"); if (test_and_clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) complete(&ha->mbx_intr_comp); } } void qla82xx_watchdog(scsi_qla_host_t *vha) { uint32_t dev_state, halt_status; struct qla_hw_data *ha = vha->hw; /* don't poll if reset is going on */ if (!ha->flags.nic_core_reset_hdlr_active) { dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); if (qla82xx_check_temp(vha)) { set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); ha->flags.isp82xx_fw_hung = 1; qla82xx_clear_pending_mbx(vha); } else if (dev_state == QLA8XXX_DEV_NEED_RESET && !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { ql_log(ql_log_warn, vha, 0x6001, "Adapter reset needed.\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { ql_log(ql_log_warn, vha, 0x6002, "Quiescent needed.\n"); set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); } else if (dev_state == QLA8XXX_DEV_FAILED && !test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) && vha->flags.online == 1) { ql_log(ql_log_warn, vha, 0xb055, "Adapter state is failed. Offlining.\n"); set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); ha->flags.isp82xx_fw_hung = 1; qla82xx_clear_pending_mbx(vha); } else { if (qla82xx_check_fw_alive(vha)) { ql_dbg(ql_dbg_timer, vha, 0x6011, "disabling pause transmit on port 0 & 1.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1); halt_status = qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS1); ql_log(ql_log_info, vha, 0x6005, "dumping hw/fw registers:.\n " " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n " " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n " " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n " " PEG_NET_4_PC: 0x%x.\n", halt_status, qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c)); if (((halt_status & 0x1fffff00) >> 8) == 0x67) ql_log(ql_log_warn, vha, 0xb052, "Firmware aborted with " "error code 0x00006700. Device is " "being reset.\n"); if (halt_status & HALT_STATUS_UNRECOVERABLE) { set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); } else { ql_log(ql_log_info, vha, 0x6006, "Detect abort needed.\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } ha->flags.isp82xx_fw_hung = 1; ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n"); qla82xx_clear_pending_mbx(vha); } } } } int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval = -1; struct qla_hw_data *ha = vha->hw; if (IS_QLA82XX(ha)) rval = qla82xx_device_state_handler(vha); else if (IS_QLA8044(ha)) { qla8044_idc_lock(ha); /* Decide the reset ownership */ qla83xx_reset_ownership(vha); qla8044_idc_unlock(ha); rval = qla8044_device_state_handler(vha); } return rval; } void qla82xx_set_reset_owner(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint32_t dev_state = 0; if (IS_QLA82XX(ha)) dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); else if (IS_QLA8044(ha)) dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); if (dev_state == QLA8XXX_DEV_READY) { ql_log(ql_log_info, vha, 0xb02f, "HW State: NEED RESET\n"); if (IS_QLA82XX(ha)) { qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_NEED_RESET); ha->flags.nic_core_reset_owner = 1; ql_dbg(ql_dbg_p3p, vha, 0xb030, "reset_owner is 0x%x\n", ha->portnum); } else if (IS_QLA8044(ha)) qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_NEED_RESET); } else ql_log(ql_log_info, vha, 0xb031, "Device state is 0x%x = %s.\n", dev_state, dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); } /* * qla82xx_abort_isp * Resets ISP and aborts all outstanding commands. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success */ int qla82xx_abort_isp(scsi_qla_host_t *vha) { int rval = -1; struct qla_hw_data *ha = vha->hw; if (vha->device_flags & DFLG_DEV_FAILED) { ql_log(ql_log_warn, vha, 0x8024, "Device in failed state, exiting.\n"); return QLA_SUCCESS; } ha->flags.nic_core_reset_hdlr_active = 1; qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); if (IS_QLA82XX(ha)) rval = qla82xx_device_state_handler(vha); else if (IS_QLA8044(ha)) { qla8044_idc_lock(ha); /* Decide the reset ownership */ qla83xx_reset_ownership(vha); qla8044_idc_unlock(ha); rval = qla8044_device_state_handler(vha); } qla82xx_idc_lock(ha); qla82xx_clear_rst_ready(ha); qla82xx_idc_unlock(ha); if (rval == QLA_SUCCESS) { ha->flags.isp82xx_fw_hung = 0; ha->flags.nic_core_reset_hdlr_active = 0; qla82xx_restart_isp(vha); } if (rval) { vha->flags.online = 1; if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { if (ha->isp_abort_cnt == 0) { ql_log(ql_log_warn, vha, 0x8027, "ISP error recover failed - board " "disabled.\n"); /* * The next call disables the board * completely. */ ha->isp_ops->reset_adapter(vha); vha->flags.online = 0; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); rval = QLA_SUCCESS; } else { /* schedule another ISP abort */ ha->isp_abort_cnt--; ql_log(ql_log_warn, vha, 0x8036, "ISP abort - retry remaining %d.\n", ha->isp_abort_cnt); rval = QLA_FUNCTION_FAILED; } } else { ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; ql_dbg(ql_dbg_taskm, vha, 0x8029, "ISP error recovery - retrying (%d) more times.\n", ha->isp_abort_cnt); set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); rval = QLA_FUNCTION_FAILED; } } return rval; } /* * qla82xx_fcoe_ctx_reset * Perform a quick reset and aborts all outstanding commands. * This will only perform an FCoE context reset and avoids a full blown * chip reset. * * Input: * ha = adapter block pointer. * is_reset_path = flag for identifying the reset path. * * Returns: * 0 = success */ int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha) { int rval = QLA_FUNCTION_FAILED; if (vha->flags.online) { /* Abort all outstanding commands, so as to be requeued later */ qla2x00_abort_isp_cleanup(vha); } /* Stop currently executing firmware. * This will destroy existing FCoE context at the F/W end. */ qla2x00_try_to_stop_firmware(vha); /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */ rval = qla82xx_restart_isp(vha); return rval; } /* * qla2x00_wait_for_fcoe_ctx_reset * Wait till the FCoE context is reset. * * Note: * Does context switching here. * Release SPIN_LOCK (if any) before calling this routine. * * Return: * Success (fcoe_ctx reset is done) : 0 * Failed (fcoe_ctx reset not completed within max loop timout ) : 1 */ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha) { int status = QLA_FUNCTION_FAILED; unsigned long wait_reset; wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && time_before(jiffies, wait_reset)) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(HZ); if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { status = QLA_SUCCESS; break; } } ql_dbg(ql_dbg_p3p, vha, 0xb027, "%s: status=%d.\n", __func__, status); return status; } void qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) { int i, fw_state = 0; unsigned long flags; struct qla_hw_data *ha = vha->hw; /* Check if 82XX firmware is alive or not * We may have arrived here from NEED_RESET * detection only */ if (!ha->flags.isp82xx_fw_hung) { for (i = 0; i < 2; i++) { msleep(1000); if (IS_QLA82XX(ha)) fw_state = qla82xx_check_fw_alive(vha); else if (IS_QLA8044(ha)) fw_state = qla8044_check_fw_alive(vha); if (fw_state) { ha->flags.isp82xx_fw_hung = 1; qla82xx_clear_pending_mbx(vha); break; } } } ql_dbg(ql_dbg_init, vha, 0x00b0, "Entered %s fw_hung=%d.\n", __func__, ha->flags.isp82xx_fw_hung); /* Abort all commands gracefully if fw NOT hung */ if (!ha->flags.isp82xx_fw_hung) { int cnt, que; srb_t *sp; struct req_que *req; spin_lock_irqsave(&ha->hardware_lock, flags); for (que = 0; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; if (!req) continue; for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { if ((!sp->u.scmd.ctx || (sp->flags & SRB_FCP_CMND_DMA_VALID)) && !ha->flags.isp82xx_fw_hung) { spin_unlock_irqrestore( &ha->hardware_lock, flags); if (ha->isp_ops->abort_command(sp)) { ql_log(ql_log_info, vha, 0x00b1, "mbx abort failed.\n"); } else { ql_log(ql_log_info, vha, 0x00b2, "mbx abort success.\n"); } spin_lock_irqsave(&ha->hardware_lock, flags); } } } } spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Wait for pending cmds (physical and virtual) to complete */ if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) == QLA_SUCCESS) { ql_dbg(ql_dbg_init, vha, 0x00b3, "Done wait for " "pending commands.\n"); } } } /* Minidump related functions */ static int qla82xx_minidump_process_control(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; struct qla82xx_md_entry_crb *crb_entry; uint32_t read_value, opcode, poll_time; uint32_t addr, index, crb_addr; unsigned long wtime; struct qla82xx_md_template_hdr *tmplt_hdr; uint32_t rval = QLA_SUCCESS; int i; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr; crb_addr = crb_entry->addr; for (i = 0; i < crb_entry->op_count; i++) { opcode = crb_entry->crb_ctrl.opcode; if (opcode & QLA82XX_DBG_OPCODE_WR) { qla82xx_md_rw_32(ha, crb_addr, crb_entry->value_1, 1); opcode &= ~QLA82XX_DBG_OPCODE_WR; } if (opcode & QLA82XX_DBG_OPCODE_RW) { read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); qla82xx_md_rw_32(ha, crb_addr, read_value, 1); opcode &= ~QLA82XX_DBG_OPCODE_RW; } if (opcode & QLA82XX_DBG_OPCODE_AND) { read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); read_value &= crb_entry->value_2; opcode &= ~QLA82XX_DBG_OPCODE_AND; if (opcode & QLA82XX_DBG_OPCODE_OR) { read_value |= crb_entry->value_3; opcode &= ~QLA82XX_DBG_OPCODE_OR; } qla82xx_md_rw_32(ha, crb_addr, read_value, 1); } if (opcode & QLA82XX_DBG_OPCODE_OR) { read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); read_value |= crb_entry->value_3; qla82xx_md_rw_32(ha, crb_addr, read_value, 1); opcode &= ~QLA82XX_DBG_OPCODE_OR; } if (opcode & QLA82XX_DBG_OPCODE_POLL) { poll_time = crb_entry->crb_strd.poll_timeout; wtime = jiffies + poll_time; read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); do { if ((read_value & crb_entry->value_2) == crb_entry->value_1) break; else if (time_after_eq(jiffies, wtime)) { /* capturing dump failed */ rval = QLA_FUNCTION_FAILED; break; } else read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); } while (1); opcode &= ~QLA82XX_DBG_OPCODE_POLL; } if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) { if (crb_entry->crb_strd.state_index_a) { index = crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else addr = crb_addr; read_value = qla82xx_md_rw_32(ha, addr, 0, 0); index = crb_entry->crb_ctrl.state_index_v; tmplt_hdr->saved_state_array[index] = read_value; opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE; } if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) { if (crb_entry->crb_strd.state_index_a) { index = crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else addr = crb_addr; if (crb_entry->crb_ctrl.state_index_v) { index = crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; } else read_value = crb_entry->value_1; qla82xx_md_rw_32(ha, addr, read_value, 1); opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE; } if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) { index = crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; read_value <<= crb_entry->crb_ctrl.shl; read_value >>= crb_entry->crb_ctrl.shr; if (crb_entry->value_2) read_value &= crb_entry->value_2; read_value |= crb_entry->value_3; read_value += crb_entry->value_1; tmplt_hdr->saved_state_array[index] = read_value; opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE; } crb_addr += crb_entry->crb_strd.addr_stride; } return rval; } static void qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_stride, loop_cnt, i, r_value; struct qla82xx_md_entry_rdocm *ocm_hdr; uint32_t *data_ptr = *d_ptr; ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr; r_addr = ocm_hdr->read_addr; r_stride = ocm_hdr->read_addr_stride; loop_cnt = ocm_hdr->op_count; for (i = 0; i < loop_cnt; i++) { r_value = RD_REG_DWORD(r_addr + ha->nx_pcibase); *data_ptr++ = cpu_to_le32(r_value); r_addr += r_stride; } *d_ptr = data_ptr; } static void qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; struct qla82xx_md_entry_mux *mux_hdr; uint32_t *data_ptr = *d_ptr; mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr; r_addr = mux_hdr->read_addr; s_addr = mux_hdr->select_addr; s_stride = mux_hdr->select_value_stride; s_value = mux_hdr->select_value; loop_cnt = mux_hdr->op_count; for (i = 0; i < loop_cnt; i++) { qla82xx_md_rw_32(ha, s_addr, s_value, 1); r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); *data_ptr++ = cpu_to_le32(s_value); *data_ptr++ = cpu_to_le32(r_value); s_value += s_stride; } *d_ptr = data_ptr; } static void qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_stride, loop_cnt, i, r_value; struct qla82xx_md_entry_crb *crb_hdr; uint32_t *data_ptr = *d_ptr; crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr; r_addr = crb_hdr->addr; r_stride = crb_hdr->crb_strd.addr_stride; loop_cnt = crb_hdr->op_count; for (i = 0; i < loop_cnt; i++) { r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); *data_ptr++ = cpu_to_le32(r_addr); *data_ptr++ = cpu_to_le32(r_value); r_addr += r_stride; } *d_ptr = data_ptr; } static int qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t addr, r_addr, c_addr, t_r_addr; uint32_t i, k, loop_count, t_value, r_cnt, r_value; unsigned long p_wait, w_time, p_mask; uint32_t c_value_w, c_value_r; struct qla82xx_md_entry_cache *cache_hdr; int rval = QLA_FUNCTION_FAILED; uint32_t *data_ptr = *d_ptr; cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = cache_hdr->addr_ctrl.init_tag_value; r_cnt = cache_hdr->read_ctrl.read_addr_cnt; p_wait = cache_hdr->cache_ctrl.poll_wait; p_mask = cache_hdr->cache_ctrl.poll_mask; for (i = 0; i < loop_count; i++) { qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); if (c_value_w) qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); if (p_mask) { w_time = jiffies + p_wait; do { c_value_r = qla82xx_md_rw_32(ha, c_addr, 0, 0); if ((c_value_r & p_mask) == 0) break; else if (time_after_eq(jiffies, w_time)) { /* capturing dump failed */ ql_dbg(ql_dbg_p3p, vha, 0xb032, "c_value_r: 0x%x, poll_mask: 0x%lx, " "w_time: 0x%lx\n", c_value_r, p_mask, w_time); return rval; } } while (1); } addr = r_addr; for (k = 0; k < r_cnt; k++) { r_value = qla82xx_md_rw_32(ha, addr, 0, 0); *data_ptr++ = cpu_to_le32(r_value); addr += cache_hdr->read_ctrl.read_addr_stride; } t_value += cache_hdr->addr_ctrl.tag_value_stride; } *d_ptr = data_ptr; return QLA_SUCCESS; } static void qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t addr, r_addr, c_addr, t_r_addr; uint32_t i, k, loop_count, t_value, r_cnt, r_value; uint32_t c_value_w; struct qla82xx_md_entry_cache *cache_hdr; uint32_t *data_ptr = *d_ptr; cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = cache_hdr->addr_ctrl.init_tag_value; r_cnt = cache_hdr->read_ctrl.read_addr_cnt; for (i = 0; i < loop_count; i++) { qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); addr = r_addr; for (k = 0; k < r_cnt; k++) { r_value = qla82xx_md_rw_32(ha, addr, 0, 0); *data_ptr++ = cpu_to_le32(r_value); addr += cache_hdr->read_ctrl.read_addr_stride; } t_value += cache_hdr->addr_ctrl.tag_value_stride; } *d_ptr = data_ptr; } static void qla82xx_minidump_process_queue(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t s_addr, r_addr; uint32_t r_stride, r_value, r_cnt, qid = 0; uint32_t i, k, loop_cnt; struct qla82xx_md_entry_queue *q_hdr; uint32_t *data_ptr = *d_ptr; q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr; s_addr = q_hdr->select_addr; r_cnt = q_hdr->rd_strd.read_addr_cnt; r_stride = q_hdr->rd_strd.read_addr_stride; loop_cnt = q_hdr->op_count; for (i = 0; i < loop_cnt; i++) { qla82xx_md_rw_32(ha, s_addr, qid, 1); r_addr = q_hdr->read_addr; for (k = 0; k < r_cnt; k++) { r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); *data_ptr++ = cpu_to_le32(r_value); r_addr += r_stride; } qid += q_hdr->q_strd.queue_id_stride; } *d_ptr = data_ptr; } static void qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_value; uint32_t i, loop_cnt; struct qla82xx_md_entry_rdrom *rom_hdr; uint32_t *data_ptr = *d_ptr; rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr; r_addr = rom_hdr->read_addr; loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t); for (i = 0; i < loop_cnt; i++) { qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, (r_addr & 0xFFFF0000), 1); r_value = qla82xx_md_rw_32(ha, MD_DIRECT_ROM_READ_BASE + (r_addr & 0x0000FFFF), 0, 0); *data_ptr++ = cpu_to_le32(r_value); r_addr += sizeof(uint32_t); } *d_ptr = data_ptr; } static int qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_value, r_data; uint32_t i, j, loop_cnt; struct qla82xx_md_entry_rdmem *m_hdr; unsigned long flags; int rval = QLA_FUNCTION_FAILED; uint32_t *data_ptr = *d_ptr; m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr; r_addr = m_hdr->read_addr; loop_cnt = m_hdr->read_data_size/16; if (r_addr & 0xf) { ql_log(ql_log_warn, vha, 0xb033, "Read addr 0x%x not 16 bytes aligned\n", r_addr); return rval; } if (m_hdr->read_data_size % 16) { ql_log(ql_log_warn, vha, 0xb034, "Read data[0x%x] not multiple of 16 bytes\n", m_hdr->read_data_size); return rval; } ql_dbg(ql_dbg_p3p, vha, 0xb035, "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", __func__, r_addr, m_hdr->read_data_size, loop_cnt); write_lock_irqsave(&ha->hw_lock, flags); for (i = 0; i < loop_cnt; i++) { qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1); r_value = 0; qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1); r_value = MIU_TA_CTL_ENABLE; qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); for (j = 0; j < MAX_CTL_CHECK; j++) { r_value = qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, 0, 0); if ((r_value & MIU_TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { printk_ratelimited(KERN_ERR "failed to read through agent\n"); write_unlock_irqrestore(&ha->hw_lock, flags); return rval; } for (j = 0; j < 4; j++) { r_data = qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_RDDATA[j], 0, 0); *data_ptr++ = cpu_to_le32(r_data); } r_addr += 16; } write_unlock_irqrestore(&ha->hw_lock, flags); *d_ptr = data_ptr; return QLA_SUCCESS; } int qla82xx_validate_template_chksum(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint64_t chksum = 0; uint32_t *d_ptr = (uint32_t *)ha->md_tmplt_hdr; int count = ha->md_template_size/sizeof(uint32_t); while (count-- > 0) chksum += *d_ptr++; while (chksum >> 32) chksum = (chksum & 0xFFFFFFFF) + (chksum >> 32); return ~chksum; } static void qla82xx_mark_entry_skipped(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, int index) { entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; ql_dbg(ql_dbg_p3p, vha, 0xb036, "Skipping entry[%d]: " "ETYPE[0x%x]-ELEVEL[0x%x]\n", index, entry_hdr->entry_type, entry_hdr->d_ctrl.entry_capture_mask); } int qla82xx_md_collect(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int no_entry_hdr = 0; qla82xx_md_entry_hdr_t *entry_hdr; struct qla82xx_md_template_hdr *tmplt_hdr; uint32_t *data_ptr; uint32_t total_data_size = 0, f_capture_mask, data_collected = 0; int i = 0, rval = QLA_FUNCTION_FAILED; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; data_ptr = (uint32_t *)ha->md_dump; if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xb037, "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); goto md_failed; } ha->fw_dumped = 0; if (!ha->md_tmplt_hdr || !ha->md_dump) { ql_log(ql_log_warn, vha, 0xb038, "Memory not allocated for minidump capture\n"); goto md_failed; } if (ha->flags.isp82xx_no_md_cap) { ql_log(ql_log_warn, vha, 0xb054, "Forced reset from application, " "ignore minidump capture\n"); ha->flags.isp82xx_no_md_cap = 0; goto md_failed; } if (qla82xx_validate_template_chksum(vha)) { ql_log(ql_log_info, vha, 0xb039, "Template checksum validation error\n"); goto md_failed; } no_entry_hdr = tmplt_hdr->num_of_entries; ql_dbg(ql_dbg_p3p, vha, 0xb03a, "No of entry headers in Template: 0x%x\n", no_entry_hdr); ql_dbg(ql_dbg_p3p, vha, 0xb03b, "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level); f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF; /* Validate whether required debug level is set */ if ((f_capture_mask & 0x3) != 0x3) { ql_log(ql_log_warn, vha, 0xb03c, "Minimum required capture mask[0x%x] level not set\n", f_capture_mask); goto md_failed; } tmplt_hdr->driver_capture_mask = ql2xmdcapmask; tmplt_hdr->driver_info[0] = vha->host_no; tmplt_hdr->driver_info[1] = (QLA_DRIVER_MAJOR_VER << 24) | (QLA_DRIVER_MINOR_VER << 16) | (QLA_DRIVER_PATCH_VER << 8) | QLA_DRIVER_BETA_VER; total_data_size = ha->md_dump_size; ql_dbg(ql_dbg_p3p, vha, 0xb03d, "Total minidump data_size 0x%x to be captured\n", total_data_size); /* Check whether template obtained is valid */ if (tmplt_hdr->entry_type != QLA82XX_TLHDR) { ql_log(ql_log_warn, vha, 0xb04e, "Bad template header entry type: 0x%x obtained\n", tmplt_hdr->entry_type); goto md_failed; } entry_hdr = (qla82xx_md_entry_hdr_t *) \ (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset); /* Walk through the entry headers */ for (i = 0; i < no_entry_hdr; i++) { if (data_collected > total_data_size) { ql_log(ql_log_warn, vha, 0xb03e, "More MiniDump data collected: [0x%x]\n", data_collected); goto md_failed; } if (!(entry_hdr->d_ctrl.entry_capture_mask & ql2xmdcapmask)) { entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; ql_dbg(ql_dbg_p3p, vha, 0xb03f, "Skipping entry[%d]: " "ETYPE[0x%x]-ELEVEL[0x%x]\n", i, entry_hdr->entry_type, entry_hdr->d_ctrl.entry_capture_mask); goto skip_nxt_entry; } ql_dbg(ql_dbg_p3p, vha, 0xb040, "[%s]: data ptr[%d]: %p, entry_hdr: %p\n" "entry_type: 0x%x, captrue_mask: 0x%x\n", __func__, i, data_ptr, entry_hdr, entry_hdr->entry_type, entry_hdr->d_ctrl.entry_capture_mask); ql_dbg(ql_dbg_p3p, vha, 0xb041, "Data collected: [0x%x], Dump size left:[0x%x]\n", data_collected, (ha->md_dump_size - data_collected)); /* Decode the entry type and take * required action to capture debug data */ switch (entry_hdr->entry_type) { case QLA82XX_RDEND: qla82xx_mark_entry_skipped(vha, entry_hdr, i); break; case QLA82XX_CNTRL: rval = qla82xx_minidump_process_control(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) { qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } break; case QLA82XX_RDCRB: qla82xx_minidump_process_rdcrb(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDMEM: rval = qla82xx_minidump_process_rdmem(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) { qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } break; case QLA82XX_BOARD: case QLA82XX_RDROM: qla82xx_minidump_process_rdrom(vha, entry_hdr, &data_ptr); break; case QLA82XX_L2DTG: case QLA82XX_L2ITG: case QLA82XX_L2DAT: case QLA82XX_L2INS: rval = qla82xx_minidump_process_l2tag(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) { qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } break; case QLA82XX_L1DAT: case QLA82XX_L1INS: qla82xx_minidump_process_l1cache(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDOCM: qla82xx_minidump_process_rdocm(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDMUX: qla82xx_minidump_process_rdmux(vha, entry_hdr, &data_ptr); break; case QLA82XX_QUEUE: qla82xx_minidump_process_queue(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDNOP: default: qla82xx_mark_entry_skipped(vha, entry_hdr, i); break; } ql_dbg(ql_dbg_p3p, vha, 0xb042, "[%s]: data ptr[%d]: %p\n", __func__, i, data_ptr); data_collected = (uint8_t *)data_ptr - (uint8_t *)ha->md_dump; skip_nxt_entry: entry_hdr = (qla82xx_md_entry_hdr_t *) \ (((uint8_t *)entry_hdr) + entry_hdr->entry_size); } if (data_collected != total_data_size) { ql_dbg(ql_dbg_p3p, vha, 0xb043, "MiniDump data mismatch: Data collected: [0x%x]," "total_data_size:[0x%x]\n", data_collected, total_data_size); goto md_failed; } ql_log(ql_log_info, vha, 0xb044, "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); ha->fw_dumped = 1; qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); md_failed: return rval; } int qla82xx_md_alloc(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int i, k; struct qla82xx_md_template_hdr *tmplt_hdr; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; if (ql2xmdcapmask < 0x3 || ql2xmdcapmask > 0x7F) { ql2xmdcapmask = tmplt_hdr->capture_debug_level & 0xFF; ql_log(ql_log_info, vha, 0xb045, "Forcing driver capture mask to firmware default capture mask: 0x%x.\n", ql2xmdcapmask); } for (i = 0x2, k = 1; (i & QLA82XX_DEFAULT_CAP_MASK); i <<= 1, k++) { if (i & ql2xmdcapmask) ha->md_dump_size += tmplt_hdr->capture_size_array[k]; } if (ha->md_dump) { ql_log(ql_log_warn, vha, 0xb046, "Firmware dump previously allocated.\n"); return 1; } ha->md_dump = vmalloc(ha->md_dump_size); if (ha->md_dump == NULL) { ql_log(ql_log_warn, vha, 0xb047, "Unable to allocate memory for Minidump size " "(0x%x).\n", ha->md_dump_size); return 1; } return 0; } void qla82xx_md_free(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; /* Release the template header allocated */ if (ha->md_tmplt_hdr) { ql_log(ql_log_info, vha, 0xb048, "Free MiniDump template: %p, size (%d KB)\n", ha->md_tmplt_hdr, ha->md_template_size / 1024); dma_free_coherent(&ha->pdev->dev, ha->md_template_size, ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma); ha->md_tmplt_hdr = NULL; } /* Release the template data buffer allocated */ if (ha->md_dump) { ql_log(ql_log_info, vha, 0xb049, "Free MiniDump memory: %p, size (%d KB)\n", ha->md_dump, ha->md_dump_size / 1024); vfree(ha->md_dump); ha->md_dump_size = 0; ha->md_dump = NULL; } } void qla82xx_md_prep(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int rval; /* Get Minidump template size */ rval = qla82xx_md_get_template_size(vha); if (rval == QLA_SUCCESS) { ql_log(ql_log_info, vha, 0xb04a, "MiniDump Template size obtained (%d KB)\n", ha->md_template_size / 1024); /* Get Minidump template */ if (IS_QLA8044(ha)) rval = qla8044_md_get_template(vha); else rval = qla82xx_md_get_template(vha); if (rval == QLA_SUCCESS) { ql_dbg(ql_dbg_p3p, vha, 0xb04b, "MiniDump Template obtained\n"); /* Allocate memory for minidump */ rval = qla82xx_md_alloc(vha); if (rval == QLA_SUCCESS) ql_log(ql_log_info, vha, 0xb04c, "MiniDump memory allocated (%d KB)\n", ha->md_dump_size / 1024); else { ql_log(ql_log_info, vha, 0xb04d, "Free MiniDump template: %p, size: (%d KB)\n", ha->md_tmplt_hdr, ha->md_template_size / 1024); dma_free_coherent(&ha->pdev->dev, ha->md_template_size, ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma); ha->md_tmplt_hdr = NULL; } } } } int qla82xx_beacon_on(struct scsi_qla_host *vha) { int rval; struct qla_hw_data *ha = vha->hw; qla82xx_idc_lock(ha); rval = qla82xx_mbx_beacon_ctl(vha, 1); if (rval) { ql_log(ql_log_warn, vha, 0xb050, "mbx set led config failed in %s\n", __func__); goto exit; } ha->beacon_blink_led = 1; exit: qla82xx_idc_unlock(ha); return rval; } int qla82xx_beacon_off(struct scsi_qla_host *vha) { int rval; struct qla_hw_data *ha = vha->hw; qla82xx_idc_lock(ha); rval = qla82xx_mbx_beacon_ctl(vha, 0); if (rval) { ql_log(ql_log_warn, vha, 0xb051, "mbx set led config failed in %s\n", __func__); goto exit; } ha->beacon_blink_led = 0; exit: qla82xx_idc_unlock(ha); return rval; } void qla82xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { struct qla_hw_data *ha = vha->hw; if (!ha->allow_cna_fw_dump) return; scsi_block_requests(vha->host); ha->flags.isp82xx_no_md_cap = 1; qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); qla2x00_wait_for_chip_reset(vha); scsi_unblock_requests(vha->host); }
gpl-2.0
aznrice/l-preview
drivers/media/platform/msm/camera_v2/isp/msm_isp.c
239
4937
/* Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/debugfs.h> #include <linux/videodev2.h> #include <media/v4l2-subdev.h> #include <media/v4l2-device.h> #include <mach/board.h> #include <mach/vreg.h> #include <mach/iommu.h> #include "msm_isp.h" #include "msm_isp_util.h" #include "msm_isp_axi_util.h" #include "msm_isp_stats_util.h" #include "msm_sd.h" #include "msm_isp40.h" #include "msm_isp32.h" static struct msm_sd_req_vb2_q vfe_vb2_ops; static const struct of_device_id msm_vfe_dt_match[] = { { .compatible = "qcom,vfe40", .data = &vfe40_hw_info, }, { .compatible = "qcom,vfe32", .data = &vfe32_hw_info, }, {} }; MODULE_DEVICE_TABLE(of, msm_vfe_dt_match); static const struct platform_device_id msm_vfe_dev_id[] = { {"msm_vfe32", (kernel_ulong_t) &vfe32_hw_info}, {} }; static struct msm_isp_buf_mgr vfe_buf_mgr; static int __devinit vfe_probe(struct platform_device *pdev) { struct vfe_device *vfe_dev; /*struct msm_cam_subdev_info sd_info;*/ const struct of_device_id *match_dev; int rc = 0; struct msm_iova_partition vfe_partition = { .start = SZ_128K, .size = SZ_2G - SZ_128K, }; struct msm_iova_layout vfe_layout = { .partitions = &vfe_partition, .npartitions = 1, .client_name = "vfe", .domain_flags = 0, }; vfe_dev = kzalloc(sizeof(struct vfe_device), GFP_KERNEL); if (!vfe_dev) { pr_err("%s: no enough memory\n", __func__); return -ENOMEM; } if (pdev->dev.of_node) { of_property_read_u32((&pdev->dev)->of_node, "cell-index", &pdev->id); match_dev = of_match_device(msm_vfe_dt_match, &pdev->dev); vfe_dev->hw_info = (struct msm_vfe_hardware_info *) match_dev->data; } else { vfe_dev->hw_info = (struct msm_vfe_hardware_info *) platform_get_device_id(pdev)->driver_data; } if (!vfe_dev->hw_info) { pr_err("%s: No vfe hardware info\n", __func__); return -EINVAL; } ISP_DBG("%s: device id = %d\n", __func__, pdev->id); vfe_dev->pdev = pdev; rc = vfe_dev->hw_info->vfe_ops.core_ops.get_platform_data(vfe_dev); if (rc < 0) { pr_err("%s: failed to get platform resources\n", __func__); kfree(vfe_dev); return -ENOMEM; } INIT_LIST_HEAD(&vfe_dev->tasklet_q); tasklet_init(&vfe_dev->vfe_tasklet, msm_isp_do_tasklet, (unsigned long)vfe_dev); v4l2_subdev_init(&vfe_dev->subdev.sd, vfe_dev->hw_info->subdev_ops); vfe_dev->subdev.sd.internal_ops = vfe_dev->hw_info->subdev_internal_ops; snprintf(vfe_dev->subdev.sd.name, ARRAY_SIZE(vfe_dev->subdev.sd.name), "vfe"); vfe_dev->subdev.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; vfe_dev->subdev.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS; v4l2_set_subdevdata(&vfe_dev->subdev.sd, vfe_dev); platform_set_drvdata(pdev, &vfe_dev->subdev.sd); mutex_init(&vfe_dev->realtime_mutex); mutex_init(&vfe_dev->core_mutex); spin_lock_init(&vfe_dev->tasklet_lock); spin_lock_init(&vfe_dev->shared_data_lock); spin_lock_init(&vfe_dev->cfg_flag_lock); media_entity_init(&vfe_dev->subdev.sd.entity, 0, NULL, 0); vfe_dev->subdev.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV; vfe_dev->subdev.sd.entity.group_id = MSM_CAMERA_SUBDEV_VFE; vfe_dev->subdev.sd.entity.name = pdev->name; vfe_dev->subdev.close_seq = MSM_SD_CLOSE_1ST_CATEGORY | 0x2; rc = msm_sd_register(&vfe_dev->subdev); if (rc != 0) { pr_err("%s: msm_sd_register error = %d\n", __func__, rc); kfree(vfe_dev); goto end; } vfe_dev->buf_mgr = &vfe_buf_mgr; v4l2_subdev_notify(&vfe_dev->subdev.sd, MSM_SD_NOTIFY_REQ_CB, &vfe_vb2_ops); rc = msm_isp_create_isp_buf_mgr(vfe_dev->buf_mgr, &vfe_vb2_ops, &vfe_layout); if (rc < 0) { pr_err("%s: Unable to create buffer manager\n", __func__); kfree(vfe_dev); return -EINVAL; } vfe_dev->buf_mgr->ops->register_ctx(vfe_dev->buf_mgr, &vfe_dev->iommu_ctx[0], vfe_dev->hw_info->num_iommu_ctx); vfe_dev->vfe_open_cnt = 0; end: return rc; } static struct platform_driver vfe_driver = { .probe = vfe_probe, .driver = { .name = "msm_vfe", .owner = THIS_MODULE, .of_match_table = msm_vfe_dt_match, }, .id_table = msm_vfe_dev_id, }; static int __init msm_vfe_init_module(void) { return platform_driver_register(&vfe_driver); } static void __exit msm_vfe_exit_module(void) { platform_driver_unregister(&vfe_driver); } module_init(msm_vfe_init_module); module_exit(msm_vfe_exit_module); MODULE_DESCRIPTION("MSM VFE driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
felipito/linux-stable
drivers/staging/comedi/drivers/aio_iiro_16.c
495
2914
/* comedi/drivers/aio_iiro_16.c Driver for Access I/O Products PC-104 AIO-IIRO-16 Digital I/O board Copyright (C) 2006 C&C Technologies, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. */ /* Driver: aio_iiro_16 Description: Access I/O Products PC-104 IIRO16 Relay And Isolated Input Board Author: Zachary Ware <zach.ware@cctechnol.com> Devices: [Access I/O] PC-104 AIO12-8 Status: experimental Configuration Options: [0] - I/O port base address */ #include <linux/module.h> #include "../comedidev.h" #define AIO_IIRO_16_RELAY_0_7 0x00 #define AIO_IIRO_16_INPUT_0_7 0x01 #define AIO_IIRO_16_IRQ 0x02 #define AIO_IIRO_16_RELAY_8_15 0x04 #define AIO_IIRO_16_INPUT_8_15 0x05 static int aio_iiro_16_dio_insn_bits_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (comedi_dio_update_state(s, data)) { outb(s->state & 0xff, dev->iobase + AIO_IIRO_16_RELAY_0_7); outb((s->state >> 8) & 0xff, dev->iobase + AIO_IIRO_16_RELAY_8_15); } data[1] = s->state; return insn->n; } static int aio_iiro_16_dio_insn_bits_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[1] = 0; data[1] |= inb(dev->iobase + AIO_IIRO_16_INPUT_0_7); data[1] |= inb(dev->iobase + AIO_IIRO_16_INPUT_8_15) << 8; return insn->n; } static int aio_iiro_16_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; int ret; ret = comedi_request_region(dev, it->options[0], 0x8); if (ret) return ret; ret = comedi_alloc_subdevices(dev, 2); if (ret) return ret; s = &dev->subdevices[0]; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 16; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = aio_iiro_16_dio_insn_bits_write; s = &dev->subdevices[1]; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE; s->n_chan = 16; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = aio_iiro_16_dio_insn_bits_read; return 0; } static struct comedi_driver aio_iiro_16_driver = { .driver_name = "aio_iiro_16", .module = THIS_MODULE, .attach = aio_iiro_16_attach, .detach = comedi_legacy_detach, }; module_comedi_driver(aio_iiro_16_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
ma34s/so03c_kernel
drivers/media/video/cx231xx/cx231xx-input.c
495
6423
/* handle cx231xx IR remotes via linux kernel input layer. Copyright (C) 2008 <srinivasa.deevi at conexant dot com> Based on em28xx driver < This is a place holder for IR now.> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/usb.h> #include "cx231xx.h" static unsigned int ir_debug; module_param(ir_debug, int, 0644); MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]"); #define i2cdprintk(fmt, arg...) \ if (ir_debug) { \ printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \ } #define dprintk(fmt, arg...) \ if (ir_debug) { \ printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \ } /********************************************************** Polling structure used by cx231xx IR's **********************************************************/ struct cx231xx_ir_poll_result { unsigned int toggle_bit:1; unsigned int read_count:7; u8 rc_address; u8 rc_data[4]; }; struct cx231xx_IR { struct cx231xx *dev; struct input_dev *input; struct ir_input_state ir; char name[32]; char phys[32]; /* poll external decoder */ int polling; struct work_struct work; struct timer_list timer; unsigned int last_toggle:1; unsigned int last_readcount; unsigned int repeat_interval; int (*get_key) (struct cx231xx_IR *, struct cx231xx_ir_poll_result *); }; /********************************************************** Polling code for cx231xx **********************************************************/ static void cx231xx_ir_handle_key(struct cx231xx_IR *ir) { int result; int do_sendkey = 0; struct cx231xx_ir_poll_result poll_result; /* read the registers containing the IR status */ result = ir->get_key(ir, &poll_result); if (result < 0) { dprintk("ir->get_key() failed %d\n", result); return; } dprintk("ir->get_key result tb=%02x rc=%02x lr=%02x data=%02x\n", poll_result.toggle_bit, poll_result.read_count, ir->last_readcount, poll_result.rc_data[0]); if (ir->dev->chip_id == CHIP_ID_EM2874) { /* The em2874 clears the readcount field every time the register is read. The em2860/2880 datasheet says that it is supposed to clear the readcount, but it doesn't. So with the em2874, we are looking for a non-zero read count as opposed to a readcount that is incrementing */ ir->last_readcount = 0; } if (poll_result.read_count == 0) { /* The button has not been pressed since the last read */ } else if (ir->last_toggle != poll_result.toggle_bit) { /* A button has been pressed */ dprintk("button has been pressed\n"); ir->last_toggle = poll_result.toggle_bit; ir->repeat_interval = 0; do_sendkey = 1; } else if (poll_result.toggle_bit == ir->last_toggle && poll_result.read_count > 0 && poll_result.read_count != ir->last_readcount) { /* The button is still being held down */ dprintk("button being held down\n"); /* Debouncer for first keypress */ if (ir->repeat_interval++ > 9) { /* Start repeating after 1 second */ do_sendkey = 1; } } if (do_sendkey) { dprintk("sending keypress\n"); ir_input_keydown(ir->input, &ir->ir, poll_result.rc_data[0], poll_result.rc_data[0]); ir_input_nokey(ir->input, &ir->ir); } ir->last_readcount = poll_result.read_count; return; } static void ir_timer(unsigned long data) { struct cx231xx_IR *ir = (struct cx231xx_IR *)data; schedule_work(&ir->work); } static void cx231xx_ir_work(struct work_struct *work) { struct cx231xx_IR *ir = container_of(work, struct cx231xx_IR, work); cx231xx_ir_handle_key(ir); mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling)); } void cx231xx_ir_start(struct cx231xx_IR *ir) { setup_timer(&ir->timer, ir_timer, (unsigned long)ir); INIT_WORK(&ir->work, cx231xx_ir_work); schedule_work(&ir->work); } static void cx231xx_ir_stop(struct cx231xx_IR *ir) { del_timer_sync(&ir->timer); flush_scheduled_work(); } int cx231xx_ir_init(struct cx231xx *dev) { struct cx231xx_IR *ir; struct input_dev *input_dev; u8 ir_config; int err = -ENOMEM; if (dev->board.ir_codes == NULL) { /* No remote control support */ return 0; } ir = kzalloc(sizeof(*ir), GFP_KERNEL); input_dev = input_allocate_device(); if (!ir || !input_dev) goto err_out_free; ir->input = input_dev; /* Setup the proper handler based on the chip */ switch (dev->chip_id) { default: printk("Unrecognized cx231xx chip id: IR not supported\n"); goto err_out_free; } /* This is how often we ask the chip for IR information */ ir->polling = 100; /* ms */ /* init input device */ snprintf(ir->name, sizeof(ir->name), "cx231xx IR (%s)", dev->name); usb_make_path(dev->udev, ir->phys, sizeof(ir->phys)); strlcat(ir->phys, "/input0", sizeof(ir->phys)); ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER, dev->board.ir_codes); input_dev->name = ir->name; input_dev->phys = ir->phys; input_dev->id.bustype = BUS_USB; input_dev->id.version = 1; input_dev->id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor); input_dev->id.product = le16_to_cpu(dev->udev->descriptor.idProduct); input_dev->dev.parent = &dev->udev->dev; /* record handles to ourself */ ir->dev = dev; dev->ir = ir; cx231xx_ir_start(ir); /* all done */ err = input_register_device(ir->input); if (err) goto err_out_stop; return 0; err_out_stop: cx231xx_ir_stop(ir); dev->ir = NULL; err_out_free: input_free_device(input_dev); kfree(ir); return err; } int cx231xx_ir_fini(struct cx231xx *dev) { struct cx231xx_IR *ir = dev->ir; /* skip detach on non attached boards */ if (!ir) return 0; cx231xx_ir_stop(ir); input_unregister_device(ir->input); kfree(ir); /* done */ dev->ir = NULL; return 0; }
gpl-2.0
fengshao0907/fastsocket
kernel/drivers/serial/s3c2410.c
495
3040
/* linux/drivers/serial/s3c2410.c * * Driver for Samsung S3C2410 SoC onboard UARTs. * * Ben Dooks, Copyright (c) 2003-2005,2008 Simtec Electronics * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <asm/irq.h> #include <mach/hardware.h> #include <plat/regs-serial.h> #include <mach/regs-gpio.h> #include "samsung.h" static int s3c2410_serial_setsource(struct uart_port *port, struct s3c24xx_uart_clksrc *clk) { unsigned long ucon = rd_regl(port, S3C2410_UCON); if (strcmp(clk->name, "uclk") == 0) ucon |= S3C2410_UCON_UCLK; else ucon &= ~S3C2410_UCON_UCLK; wr_regl(port, S3C2410_UCON, ucon); return 0; } static int s3c2410_serial_getsource(struct uart_port *port, struct s3c24xx_uart_clksrc *clk) { unsigned long ucon = rd_regl(port, S3C2410_UCON); clk->divisor = 1; clk->name = (ucon & S3C2410_UCON_UCLK) ? "uclk" : "pclk"; return 0; } static int s3c2410_serial_resetport(struct uart_port *port, struct s3c2410_uartcfg *cfg) { dbg("s3c2410_serial_resetport: port=%p (%08lx), cfg=%p\n", port, port->mapbase, cfg); wr_regl(port, S3C2410_UCON, cfg->ucon); wr_regl(port, S3C2410_ULCON, cfg->ulcon); /* reset both fifos */ wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH); wr_regl(port, S3C2410_UFCON, cfg->ufcon); return 0; } static struct s3c24xx_uart_info s3c2410_uart_inf = { .name = "Samsung S3C2410 UART", .type = PORT_S3C2410, .fifosize = 16, .rx_fifomask = S3C2410_UFSTAT_RXMASK, .rx_fifoshift = S3C2410_UFSTAT_RXSHIFT, .rx_fifofull = S3C2410_UFSTAT_RXFULL, .tx_fifofull = S3C2410_UFSTAT_TXFULL, .tx_fifomask = S3C2410_UFSTAT_TXMASK, .tx_fifoshift = S3C2410_UFSTAT_TXSHIFT, .get_clksrc = s3c2410_serial_getsource, .set_clksrc = s3c2410_serial_setsource, .reset_port = s3c2410_serial_resetport, }; static int s3c2410_serial_probe(struct platform_device *dev) { return s3c24xx_serial_probe(dev, &s3c2410_uart_inf); } static struct platform_driver s3c2410_serial_driver = { .probe = s3c2410_serial_probe, .remove = __devexit_p(s3c24xx_serial_remove), .driver = { .name = "s3c2410-uart", .owner = THIS_MODULE, }, }; s3c24xx_console_init(&s3c2410_serial_driver, &s3c2410_uart_inf); static int __init s3c2410_serial_init(void) { return s3c24xx_serial_init(&s3c2410_serial_driver, &s3c2410_uart_inf); } static void __exit s3c2410_serial_exit(void) { platform_driver_unregister(&s3c2410_serial_driver); } module_init(s3c2410_serial_init); module_exit(s3c2410_serial_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_DESCRIPTION("Samsung S3C2410 SoC Serial port driver"); MODULE_ALIAS("platform:s3c2410-uart");
gpl-2.0
peacetank200/android_kernel_samsung_m180l
drivers/char/nozomi.c
751
50250
/* * nozomi.c -- HSDPA driver Broadband Wireless Data Card - Globe Trotter * * Written by: Ulf Jakobsson, * Jan Åkerfeldt, * Stefan Thomasson, * * Maintained by: Paul Hardwick (p.hardwick@option.com) * * Patches: * Locking code changes for Vodafone by Sphere Systems Ltd, * Andrew Bird (ajb@spheresystems.co.uk ) * & Phil Sanderson * * Source has been ported from an implementation made by Filip Aben @ Option * * -------------------------------------------------------------------------- * * Copyright (c) 2005,2006 Option Wireless Sweden AB * Copyright (c) 2006 Sphere Systems Ltd * Copyright (c) 2006 Option Wireless n/v * All rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * -------------------------------------------------------------------------- */ /* Enable this to have a lot of debug printouts */ #define DEBUG #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/sched.h> #include <linux/serial.h> #include <linux/interrupt.h> #include <linux/kmod.h> #include <linux/init.h> #include <linux/kfifo.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <asm/byteorder.h> #include <linux/delay.h> #define VERSION_STRING DRIVER_DESC " 2.1d (build date: " \ __DATE__ " " __TIME__ ")" /* Macros definitions */ /* Default debug printout level */ #define NOZOMI_DEBUG_LEVEL 0x00 #define P_BUF_SIZE 128 #define NFO(_err_flag_, args...) \ do { \ char tmp[P_BUF_SIZE]; \ snprintf(tmp, sizeof(tmp), ##args); \ printk(_err_flag_ "[%d] %s(): %s\n", __LINE__, \ __func__, tmp); \ } while (0) #define DBG1(args...) D_(0x01, ##args) #define DBG2(args...) D_(0x02, ##args) #define DBG3(args...) D_(0x04, ##args) #define DBG4(args...) D_(0x08, ##args) #define DBG5(args...) D_(0x10, ##args) #define DBG6(args...) D_(0x20, ##args) #define DBG7(args...) D_(0x40, ##args) #define DBG8(args...) D_(0x80, ##args) #ifdef DEBUG /* Do we need this settable at runtime? */ static int debug = NOZOMI_DEBUG_LEVEL; #define D(lvl, args...) do \ {if (lvl & debug) NFO(KERN_DEBUG, ##args); } \ while (0) #define D_(lvl, args...) D(lvl, ##args) /* These printouts are always printed */ #else static int debug; #define D_(lvl, args...) #endif /* TODO: rewrite to optimize macros... */ #define TMP_BUF_MAX 256 #define DUMP(buf__,len__) \ do { \ char tbuf[TMP_BUF_MAX] = {0};\ if (len__ > 1) {\ snprintf(tbuf, len__ > TMP_BUF_MAX ? TMP_BUF_MAX : len__, "%s", buf__);\ if (tbuf[len__-2] == '\r') {\ tbuf[len__-2] = 'r';\ } \ DBG1("SENDING: '%s' (%d+n)", tbuf, len__);\ } else {\ DBG1("SENDING: '%s' (%d)", tbuf, len__);\ } \ } while (0) /* Defines */ #define NOZOMI_NAME "nozomi" #define NOZOMI_NAME_TTY "nozomi_tty" #define DRIVER_DESC "Nozomi driver" #define NTTY_TTY_MAXMINORS 256 #define NTTY_FIFO_BUFFER_SIZE 8192 /* Must be power of 2 */ #define FIFO_BUFFER_SIZE_UL 8192 /* Size of tmp send buffer to card */ #define SEND_BUF_MAX 1024 #define RECEIVE_BUF_MAX 4 #define R_IIR 0x0000 /* Interrupt Identity Register */ #define R_FCR 0x0000 /* Flow Control Register */ #define R_IER 0x0004 /* Interrupt Enable Register */ #define CONFIG_MAGIC 0xEFEFFEFE #define TOGGLE_VALID 0x0000 /* Definition of interrupt tokens */ #define MDM_DL1 0x0001 #define MDM_UL1 0x0002 #define MDM_DL2 0x0004 #define MDM_UL2 0x0008 #define DIAG_DL1 0x0010 #define DIAG_DL2 0x0020 #define DIAG_UL 0x0040 #define APP1_DL 0x0080 #define APP1_UL 0x0100 #define APP2_DL 0x0200 #define APP2_UL 0x0400 #define CTRL_DL 0x0800 #define CTRL_UL 0x1000 #define RESET 0x8000 #define MDM_DL (MDM_DL1 | MDM_DL2) #define MDM_UL (MDM_UL1 | MDM_UL2) #define DIAG_DL (DIAG_DL1 | DIAG_DL2) /* modem signal definition */ #define CTRL_DSR 0x0001 #define CTRL_DCD 0x0002 #define CTRL_RI 0x0004 #define CTRL_CTS 0x0008 #define CTRL_DTR 0x0001 #define CTRL_RTS 0x0002 #define MAX_PORT 4 #define NOZOMI_MAX_PORTS 5 #define NOZOMI_MAX_CARDS (NTTY_TTY_MAXMINORS / MAX_PORT) /* Type definitions */ /* * There are two types of nozomi cards, * one with 2048 memory and with 8192 memory */ enum card_type { F32_2 = 2048, /* 512 bytes downlink + uplink * 2 -> 2048 */ F32_8 = 8192, /* 3072 bytes downl. + 1024 bytes uplink * 2 -> 8192 */ }; /* Initialization states a card can be in */ enum card_state { NOZOMI_STATE_UKNOWN = 0, NOZOMI_STATE_ENABLED = 1, /* pci device enabled */ NOZOMI_STATE_ALLOCATED = 2, /* config setup done */ NOZOMI_STATE_READY = 3, /* flowcontrols received */ }; /* Two different toggle channels exist */ enum channel_type { CH_A = 0, CH_B = 1, }; /* Port definition for the card regarding flow control */ enum ctrl_port_type { CTRL_CMD = 0, CTRL_MDM = 1, CTRL_DIAG = 2, CTRL_APP1 = 3, CTRL_APP2 = 4, CTRL_ERROR = -1, }; /* Ports that the nozomi has */ enum port_type { PORT_MDM = 0, PORT_DIAG = 1, PORT_APP1 = 2, PORT_APP2 = 3, PORT_CTRL = 4, PORT_ERROR = -1, }; #ifdef __BIG_ENDIAN /* Big endian */ struct toggles { unsigned int enabled:5; /* * Toggle fields are valid if enabled is 0, * else A-channels must always be used. */ unsigned int diag_dl:1; unsigned int mdm_dl:1; unsigned int mdm_ul:1; } __attribute__ ((packed)); /* Configuration table to read at startup of card */ /* Is for now only needed during initialization phase */ struct config_table { u32 signature; u16 product_information; u16 version; u8 pad3[3]; struct toggles toggle; u8 pad1[4]; u16 dl_mdm_len1; /* * If this is 64, it can hold * 60 bytes + 4 that is length field */ u16 dl_start; u16 dl_diag_len1; u16 dl_mdm_len2; /* * If this is 64, it can hold * 60 bytes + 4 that is length field */ u16 dl_app1_len; u16 dl_diag_len2; u16 dl_ctrl_len; u16 dl_app2_len; u8 pad2[16]; u16 ul_mdm_len1; u16 ul_start; u16 ul_diag_len; u16 ul_mdm_len2; u16 ul_app1_len; u16 ul_app2_len; u16 ul_ctrl_len; } __attribute__ ((packed)); /* This stores all control downlink flags */ struct ctrl_dl { u8 port; unsigned int reserved:4; unsigned int CTS:1; unsigned int RI:1; unsigned int DCD:1; unsigned int DSR:1; } __attribute__ ((packed)); /* This stores all control uplink flags */ struct ctrl_ul { u8 port; unsigned int reserved:6; unsigned int RTS:1; unsigned int DTR:1; } __attribute__ ((packed)); #else /* Little endian */ /* This represents the toggle information */ struct toggles { unsigned int mdm_ul:1; unsigned int mdm_dl:1; unsigned int diag_dl:1; unsigned int enabled:5; /* * Toggle fields are valid if enabled is 0, * else A-channels must always be used. */ } __attribute__ ((packed)); /* Configuration table to read at startup of card */ struct config_table { u32 signature; u16 version; u16 product_information; struct toggles toggle; u8 pad1[7]; u16 dl_start; u16 dl_mdm_len1; /* * If this is 64, it can hold * 60 bytes + 4 that is length field */ u16 dl_mdm_len2; u16 dl_diag_len1; u16 dl_diag_len2; u16 dl_app1_len; u16 dl_app2_len; u16 dl_ctrl_len; u8 pad2[16]; u16 ul_start; u16 ul_mdm_len2; u16 ul_mdm_len1; u16 ul_diag_len; u16 ul_app1_len; u16 ul_app2_len; u16 ul_ctrl_len; } __attribute__ ((packed)); /* This stores all control downlink flags */ struct ctrl_dl { unsigned int DSR:1; unsigned int DCD:1; unsigned int RI:1; unsigned int CTS:1; unsigned int reserverd:4; u8 port; } __attribute__ ((packed)); /* This stores all control uplink flags */ struct ctrl_ul { unsigned int DTR:1; unsigned int RTS:1; unsigned int reserved:6; u8 port; } __attribute__ ((packed)); #endif /* This holds all information that is needed regarding a port */ struct port { struct tty_port port; u8 update_flow_control; struct ctrl_ul ctrl_ul; struct ctrl_dl ctrl_dl; struct kfifo fifo_ul; void __iomem *dl_addr[2]; u32 dl_size[2]; u8 toggle_dl; void __iomem *ul_addr[2]; u32 ul_size[2]; u8 toggle_ul; u16 token_dl; /* mutex to ensure one access patch to this port */ struct mutex tty_sem; wait_queue_head_t tty_wait; struct async_icount tty_icount; struct nozomi *dc; }; /* Private data one for each card in the system */ struct nozomi { void __iomem *base_addr; unsigned long flip; /* Pointers to registers */ void __iomem *reg_iir; void __iomem *reg_fcr; void __iomem *reg_ier; u16 last_ier; enum card_type card_type; struct config_table config_table; /* Configuration table */ struct pci_dev *pdev; struct port port[NOZOMI_MAX_PORTS]; u8 *send_buf; spinlock_t spin_mutex; /* secures access to registers and tty */ unsigned int index_start; enum card_state state; u32 open_ttys; }; /* This is a data packet that is read or written to/from card */ struct buffer { u32 size; /* size is the length of the data buffer */ u8 *data; } __attribute__ ((packed)); /* Global variables */ static const struct pci_device_id nozomi_pci_tbl[] __devinitconst = { {PCI_DEVICE(0x1931, 0x000c)}, /* Nozomi HSDPA */ {}, }; MODULE_DEVICE_TABLE(pci, nozomi_pci_tbl); static struct nozomi *ndevs[NOZOMI_MAX_CARDS]; static struct tty_driver *ntty_driver; static const struct tty_port_operations noz_tty_port_ops; /* * find card by tty_index */ static inline struct nozomi *get_dc_by_tty(const struct tty_struct *tty) { return tty ? ndevs[tty->index / MAX_PORT] : NULL; } static inline struct port *get_port_by_tty(const struct tty_struct *tty) { struct nozomi *ndev = get_dc_by_tty(tty); return ndev ? &ndev->port[tty->index % MAX_PORT] : NULL; } /* * TODO: * -Optimize * -Rewrite cleaner */ static void read_mem32(u32 *buf, const void __iomem *mem_addr_start, u32 size_bytes) { u32 i = 0; const u32 __iomem *ptr = mem_addr_start; u16 *buf16; if (unlikely(!ptr || !buf)) goto out; /* shortcut for extremely often used cases */ switch (size_bytes) { case 2: /* 2 bytes */ buf16 = (u16 *) buf; *buf16 = __le16_to_cpu(readw(ptr)); goto out; break; case 4: /* 4 bytes */ *(buf) = __le32_to_cpu(readl(ptr)); goto out; break; } while (i < size_bytes) { if (size_bytes - i == 2) { /* Handle 2 bytes in the end */ buf16 = (u16 *) buf; *(buf16) = __le16_to_cpu(readw(ptr)); i += 2; } else { /* Read 4 bytes */ *(buf) = __le32_to_cpu(readl(ptr)); i += 4; } buf++; ptr++; } out: return; } /* * TODO: * -Optimize * -Rewrite cleaner */ static u32 write_mem32(void __iomem *mem_addr_start, const u32 *buf, u32 size_bytes) { u32 i = 0; u32 __iomem *ptr = mem_addr_start; const u16 *buf16; if (unlikely(!ptr || !buf)) return 0; /* shortcut for extremely often used cases */ switch (size_bytes) { case 2: /* 2 bytes */ buf16 = (const u16 *)buf; writew(__cpu_to_le16(*buf16), ptr); return 2; break; case 1: /* * also needs to write 4 bytes in this case * so falling through.. */ case 4: /* 4 bytes */ writel(__cpu_to_le32(*buf), ptr); return 4; break; } while (i < size_bytes) { if (size_bytes - i == 2) { /* 2 bytes */ buf16 = (const u16 *)buf; writew(__cpu_to_le16(*buf16), ptr); i += 2; } else { /* 4 bytes */ writel(__cpu_to_le32(*buf), ptr); i += 4; } buf++; ptr++; } return i; } /* Setup pointers to different channels and also setup buffer sizes. */ static void setup_memory(struct nozomi *dc) { void __iomem *offset = dc->base_addr + dc->config_table.dl_start; /* The length reported is including the length field of 4 bytes, * hence subtract with 4. */ const u16 buff_offset = 4; /* Modem port dl configuration */ dc->port[PORT_MDM].dl_addr[CH_A] = offset; dc->port[PORT_MDM].dl_addr[CH_B] = (offset += dc->config_table.dl_mdm_len1); dc->port[PORT_MDM].dl_size[CH_A] = dc->config_table.dl_mdm_len1 - buff_offset; dc->port[PORT_MDM].dl_size[CH_B] = dc->config_table.dl_mdm_len2 - buff_offset; /* Diag port dl configuration */ dc->port[PORT_DIAG].dl_addr[CH_A] = (offset += dc->config_table.dl_mdm_len2); dc->port[PORT_DIAG].dl_size[CH_A] = dc->config_table.dl_diag_len1 - buff_offset; dc->port[PORT_DIAG].dl_addr[CH_B] = (offset += dc->config_table.dl_diag_len1); dc->port[PORT_DIAG].dl_size[CH_B] = dc->config_table.dl_diag_len2 - buff_offset; /* App1 port dl configuration */ dc->port[PORT_APP1].dl_addr[CH_A] = (offset += dc->config_table.dl_diag_len2); dc->port[PORT_APP1].dl_size[CH_A] = dc->config_table.dl_app1_len - buff_offset; /* App2 port dl configuration */ dc->port[PORT_APP2].dl_addr[CH_A] = (offset += dc->config_table.dl_app1_len); dc->port[PORT_APP2].dl_size[CH_A] = dc->config_table.dl_app2_len - buff_offset; /* Ctrl dl configuration */ dc->port[PORT_CTRL].dl_addr[CH_A] = (offset += dc->config_table.dl_app2_len); dc->port[PORT_CTRL].dl_size[CH_A] = dc->config_table.dl_ctrl_len - buff_offset; offset = dc->base_addr + dc->config_table.ul_start; /* Modem Port ul configuration */ dc->port[PORT_MDM].ul_addr[CH_A] = offset; dc->port[PORT_MDM].ul_size[CH_A] = dc->config_table.ul_mdm_len1 - buff_offset; dc->port[PORT_MDM].ul_addr[CH_B] = (offset += dc->config_table.ul_mdm_len1); dc->port[PORT_MDM].ul_size[CH_B] = dc->config_table.ul_mdm_len2 - buff_offset; /* Diag port ul configuration */ dc->port[PORT_DIAG].ul_addr[CH_A] = (offset += dc->config_table.ul_mdm_len2); dc->port[PORT_DIAG].ul_size[CH_A] = dc->config_table.ul_diag_len - buff_offset; /* App1 port ul configuration */ dc->port[PORT_APP1].ul_addr[CH_A] = (offset += dc->config_table.ul_diag_len); dc->port[PORT_APP1].ul_size[CH_A] = dc->config_table.ul_app1_len - buff_offset; /* App2 port ul configuration */ dc->port[PORT_APP2].ul_addr[CH_A] = (offset += dc->config_table.ul_app1_len); dc->port[PORT_APP2].ul_size[CH_A] = dc->config_table.ul_app2_len - buff_offset; /* Ctrl ul configuration */ dc->port[PORT_CTRL].ul_addr[CH_A] = (offset += dc->config_table.ul_app2_len); dc->port[PORT_CTRL].ul_size[CH_A] = dc->config_table.ul_ctrl_len - buff_offset; } /* Dump config table under initalization phase */ #ifdef DEBUG static void dump_table(const struct nozomi *dc) { DBG3("signature: 0x%08X", dc->config_table.signature); DBG3("version: 0x%04X", dc->config_table.version); DBG3("product_information: 0x%04X", \ dc->config_table.product_information); DBG3("toggle enabled: %d", dc->config_table.toggle.enabled); DBG3("toggle up_mdm: %d", dc->config_table.toggle.mdm_ul); DBG3("toggle dl_mdm: %d", dc->config_table.toggle.mdm_dl); DBG3("toggle dl_dbg: %d", dc->config_table.toggle.diag_dl); DBG3("dl_start: 0x%04X", dc->config_table.dl_start); DBG3("dl_mdm_len0: 0x%04X, %d", dc->config_table.dl_mdm_len1, dc->config_table.dl_mdm_len1); DBG3("dl_mdm_len1: 0x%04X, %d", dc->config_table.dl_mdm_len2, dc->config_table.dl_mdm_len2); DBG3("dl_diag_len0: 0x%04X, %d", dc->config_table.dl_diag_len1, dc->config_table.dl_diag_len1); DBG3("dl_diag_len1: 0x%04X, %d", dc->config_table.dl_diag_len2, dc->config_table.dl_diag_len2); DBG3("dl_app1_len: 0x%04X, %d", dc->config_table.dl_app1_len, dc->config_table.dl_app1_len); DBG3("dl_app2_len: 0x%04X, %d", dc->config_table.dl_app2_len, dc->config_table.dl_app2_len); DBG3("dl_ctrl_len: 0x%04X, %d", dc->config_table.dl_ctrl_len, dc->config_table.dl_ctrl_len); DBG3("ul_start: 0x%04X, %d", dc->config_table.ul_start, dc->config_table.ul_start); DBG3("ul_mdm_len[0]: 0x%04X, %d", dc->config_table.ul_mdm_len1, dc->config_table.ul_mdm_len1); DBG3("ul_mdm_len[1]: 0x%04X, %d", dc->config_table.ul_mdm_len2, dc->config_table.ul_mdm_len2); DBG3("ul_diag_len: 0x%04X, %d", dc->config_table.ul_diag_len, dc->config_table.ul_diag_len); DBG3("ul_app1_len: 0x%04X, %d", dc->config_table.ul_app1_len, dc->config_table.ul_app1_len); DBG3("ul_app2_len: 0x%04X, %d", dc->config_table.ul_app2_len, dc->config_table.ul_app2_len); DBG3("ul_ctrl_len: 0x%04X, %d", dc->config_table.ul_ctrl_len, dc->config_table.ul_ctrl_len); } #else static inline void dump_table(const struct nozomi *dc) { } #endif /* * Read configuration table from card under intalization phase * Returns 1 if ok, else 0 */ static int nozomi_read_config_table(struct nozomi *dc) { read_mem32((u32 *) &dc->config_table, dc->base_addr + 0, sizeof(struct config_table)); if (dc->config_table.signature != CONFIG_MAGIC) { dev_err(&dc->pdev->dev, "ConfigTable Bad! 0x%08X != 0x%08X\n", dc->config_table.signature, CONFIG_MAGIC); return 0; } if ((dc->config_table.version == 0) || (dc->config_table.toggle.enabled == TOGGLE_VALID)) { int i; DBG1("Second phase, configuring card"); setup_memory(dc); dc->port[PORT_MDM].toggle_ul = dc->config_table.toggle.mdm_ul; dc->port[PORT_MDM].toggle_dl = dc->config_table.toggle.mdm_dl; dc->port[PORT_DIAG].toggle_dl = dc->config_table.toggle.diag_dl; DBG1("toggle ports: MDM UL:%d MDM DL:%d, DIAG DL:%d", dc->port[PORT_MDM].toggle_ul, dc->port[PORT_MDM].toggle_dl, dc->port[PORT_DIAG].toggle_dl); dump_table(dc); for (i = PORT_MDM; i < MAX_PORT; i++) { memset(&dc->port[i].ctrl_dl, 0, sizeof(struct ctrl_dl)); memset(&dc->port[i].ctrl_ul, 0, sizeof(struct ctrl_ul)); } /* Enable control channel */ dc->last_ier = dc->last_ier | CTRL_DL; writew(dc->last_ier, dc->reg_ier); dc->state = NOZOMI_STATE_ALLOCATED; dev_info(&dc->pdev->dev, "Initialization OK!\n"); return 1; } if ((dc->config_table.version > 0) && (dc->config_table.toggle.enabled != TOGGLE_VALID)) { u32 offset = 0; DBG1("First phase: pushing upload buffers, clearing download"); dev_info(&dc->pdev->dev, "Version of card: %d\n", dc->config_table.version); /* Here we should disable all I/O over F32. */ setup_memory(dc); /* * We should send ALL channel pair tokens back along * with reset token */ /* push upload modem buffers */ write_mem32(dc->port[PORT_MDM].ul_addr[CH_A], (u32 *) &offset, 4); write_mem32(dc->port[PORT_MDM].ul_addr[CH_B], (u32 *) &offset, 4); writew(MDM_UL | DIAG_DL | MDM_DL, dc->reg_fcr); DBG1("First phase done"); } return 1; } /* Enable uplink interrupts */ static void enable_transmit_ul(enum port_type port, struct nozomi *dc) { static const u16 mask[] = {MDM_UL, DIAG_UL, APP1_UL, APP2_UL, CTRL_UL}; if (port < NOZOMI_MAX_PORTS) { dc->last_ier |= mask[port]; writew(dc->last_ier, dc->reg_ier); } else { dev_err(&dc->pdev->dev, "Called with wrong port?\n"); } } /* Disable uplink interrupts */ static void disable_transmit_ul(enum port_type port, struct nozomi *dc) { static const u16 mask[] = {~MDM_UL, ~DIAG_UL, ~APP1_UL, ~APP2_UL, ~CTRL_UL}; if (port < NOZOMI_MAX_PORTS) { dc->last_ier &= mask[port]; writew(dc->last_ier, dc->reg_ier); } else { dev_err(&dc->pdev->dev, "Called with wrong port?\n"); } } /* Enable downlink interrupts */ static void enable_transmit_dl(enum port_type port, struct nozomi *dc) { static const u16 mask[] = {MDM_DL, DIAG_DL, APP1_DL, APP2_DL, CTRL_DL}; if (port < NOZOMI_MAX_PORTS) { dc->last_ier |= mask[port]; writew(dc->last_ier, dc->reg_ier); } else { dev_err(&dc->pdev->dev, "Called with wrong port?\n"); } } /* Disable downlink interrupts */ static void disable_transmit_dl(enum port_type port, struct nozomi *dc) { static const u16 mask[] = {~MDM_DL, ~DIAG_DL, ~APP1_DL, ~APP2_DL, ~CTRL_DL}; if (port < NOZOMI_MAX_PORTS) { dc->last_ier &= mask[port]; writew(dc->last_ier, dc->reg_ier); } else { dev_err(&dc->pdev->dev, "Called with wrong port?\n"); } } /* * Return 1 - send buffer to card and ack. * Return 0 - don't ack, don't send buffer to card. */ static int send_data(enum port_type index, struct nozomi *dc) { u32 size = 0; struct port *port = &dc->port[index]; const u8 toggle = port->toggle_ul; void __iomem *addr = port->ul_addr[toggle]; const u32 ul_size = port->ul_size[toggle]; struct tty_struct *tty = tty_port_tty_get(&port->port); /* Get data from tty and place in buf for now */ size = kfifo_out(&port->fifo_ul, dc->send_buf, ul_size < SEND_BUF_MAX ? ul_size : SEND_BUF_MAX); if (size == 0) { DBG4("No more data to send, disable link:"); tty_kref_put(tty); return 0; } /* DUMP(buf, size); */ /* Write length + data */ write_mem32(addr, (u32 *) &size, 4); write_mem32(addr + 4, (u32 *) dc->send_buf, size); if (tty) tty_wakeup(tty); tty_kref_put(tty); return 1; } /* If all data has been read, return 1, else 0 */ static int receive_data(enum port_type index, struct nozomi *dc) { u8 buf[RECEIVE_BUF_MAX] = { 0 }; int size; u32 offset = 4; struct port *port = &dc->port[index]; void __iomem *addr = port->dl_addr[port->toggle_dl]; struct tty_struct *tty = tty_port_tty_get(&port->port); int i, ret; if (unlikely(!tty)) { DBG1("tty not open for port: %d?", index); return 1; } read_mem32((u32 *) &size, addr, 4); /* DBG1( "%d bytes port: %d", size, index); */ if (test_bit(TTY_THROTTLED, &tty->flags)) { DBG1("No room in tty, don't read data, don't ack interrupt, " "disable interrupt"); /* disable interrupt in downlink... */ disable_transmit_dl(index, dc); ret = 0; goto put; } if (unlikely(size == 0)) { dev_err(&dc->pdev->dev, "size == 0?\n"); ret = 1; goto put; } while (size > 0) { read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX); if (size == 1) { tty_insert_flip_char(tty, buf[0], TTY_NORMAL); size = 0; } else if (size < RECEIVE_BUF_MAX) { size -= tty_insert_flip_string(tty, (char *) buf, size); } else { i = tty_insert_flip_string(tty, \ (char *) buf, RECEIVE_BUF_MAX); size -= i; offset += i; } } set_bit(index, &dc->flip); ret = 1; put: tty_kref_put(tty); return ret; } /* Debug for interrupts */ #ifdef DEBUG static char *interrupt2str(u16 interrupt) { static char buf[TMP_BUF_MAX]; char *p = buf; interrupt & MDM_DL1 ? p += snprintf(p, TMP_BUF_MAX, "MDM_DL1 ") : NULL; interrupt & MDM_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf), "MDM_DL2 ") : NULL; interrupt & MDM_UL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf), "MDM_UL1 ") : NULL; interrupt & MDM_UL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf), "MDM_UL2 ") : NULL; interrupt & DIAG_DL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf), "DIAG_DL1 ") : NULL; interrupt & DIAG_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf), "DIAG_DL2 ") : NULL; interrupt & DIAG_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), "DIAG_UL ") : NULL; interrupt & APP1_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), "APP1_DL ") : NULL; interrupt & APP2_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), "APP2_DL ") : NULL; interrupt & APP1_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), "APP1_UL ") : NULL; interrupt & APP2_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), "APP2_UL ") : NULL; interrupt & CTRL_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), "CTRL_DL ") : NULL; interrupt & CTRL_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf), "CTRL_UL ") : NULL; interrupt & RESET ? p += snprintf(p, TMP_BUF_MAX - (p - buf), "RESET ") : NULL; return buf; } #endif /* * Receive flow control * Return 1 - If ok, else 0 */ static int receive_flow_control(struct nozomi *dc) { enum port_type port = PORT_MDM; struct ctrl_dl ctrl_dl; struct ctrl_dl old_ctrl; u16 enable_ier = 0; read_mem32((u32 *) &ctrl_dl, dc->port[PORT_CTRL].dl_addr[CH_A], 2); switch (ctrl_dl.port) { case CTRL_CMD: DBG1("The Base Band sends this value as a response to a " "request for IMSI detach sent over the control " "channel uplink (see section 7.6.1)."); break; case CTRL_MDM: port = PORT_MDM; enable_ier = MDM_DL; break; case CTRL_DIAG: port = PORT_DIAG; enable_ier = DIAG_DL; break; case CTRL_APP1: port = PORT_APP1; enable_ier = APP1_DL; break; case CTRL_APP2: port = PORT_APP2; enable_ier = APP2_DL; if (dc->state == NOZOMI_STATE_ALLOCATED) { /* * After card initialization the flow control * received for APP2 is always the last */ dc->state = NOZOMI_STATE_READY; dev_info(&dc->pdev->dev, "Device READY!\n"); } break; default: dev_err(&dc->pdev->dev, "ERROR: flow control received for non-existing port\n"); return 0; }; DBG1("0x%04X->0x%04X", *((u16 *)&dc->port[port].ctrl_dl), *((u16 *)&ctrl_dl)); old_ctrl = dc->port[port].ctrl_dl; dc->port[port].ctrl_dl = ctrl_dl; if (old_ctrl.CTS == 1 && ctrl_dl.CTS == 0) { DBG1("Disable interrupt (0x%04X) on port: %d", enable_ier, port); disable_transmit_ul(port, dc); } else if (old_ctrl.CTS == 0 && ctrl_dl.CTS == 1) { if (kfifo_len(&dc->port[port].fifo_ul)) { DBG1("Enable interrupt (0x%04X) on port: %d", enable_ier, port); DBG1("Data in buffer [%d], enable transmit! ", kfifo_len(&dc->port[port].fifo_ul)); enable_transmit_ul(port, dc); } else { DBG1("No data in buffer..."); } } if (*(u16 *)&old_ctrl == *(u16 *)&ctrl_dl) { DBG1(" No change in mctrl"); return 1; } /* Update statistics */ if (old_ctrl.CTS != ctrl_dl.CTS) dc->port[port].tty_icount.cts++; if (old_ctrl.DSR != ctrl_dl.DSR) dc->port[port].tty_icount.dsr++; if (old_ctrl.RI != ctrl_dl.RI) dc->port[port].tty_icount.rng++; if (old_ctrl.DCD != ctrl_dl.DCD) dc->port[port].tty_icount.dcd++; wake_up_interruptible(&dc->port[port].tty_wait); DBG1("port: %d DCD(%d), CTS(%d), RI(%d), DSR(%d)", port, dc->port[port].tty_icount.dcd, dc->port[port].tty_icount.cts, dc->port[port].tty_icount.rng, dc->port[port].tty_icount.dsr); return 1; } static enum ctrl_port_type port2ctrl(enum port_type port, const struct nozomi *dc) { switch (port) { case PORT_MDM: return CTRL_MDM; case PORT_DIAG: return CTRL_DIAG; case PORT_APP1: return CTRL_APP1; case PORT_APP2: return CTRL_APP2; default: dev_err(&dc->pdev->dev, "ERROR: send flow control " \ "received for non-existing port\n"); }; return CTRL_ERROR; } /* * Send flow control, can only update one channel at a time * Return 0 - If we have updated all flow control * Return 1 - If we need to update more flow control, ack current enable more */ static int send_flow_control(struct nozomi *dc) { u32 i, more_flow_control_to_be_updated = 0; u16 *ctrl; for (i = PORT_MDM; i < MAX_PORT; i++) { if (dc->port[i].update_flow_control) { if (more_flow_control_to_be_updated) { /* We have more flow control to be updated */ return 1; } dc->port[i].ctrl_ul.port = port2ctrl(i, dc); ctrl = (u16 *)&dc->port[i].ctrl_ul; write_mem32(dc->port[PORT_CTRL].ul_addr[0], \ (u32 *) ctrl, 2); dc->port[i].update_flow_control = 0; more_flow_control_to_be_updated = 1; } } return 0; } /* * Handle downlink data, ports that are handled are modem and diagnostics * Return 1 - ok * Return 0 - toggle fields are out of sync */ static int handle_data_dl(struct nozomi *dc, enum port_type port, u8 *toggle, u16 read_iir, u16 mask1, u16 mask2) { if (*toggle == 0 && read_iir & mask1) { if (receive_data(port, dc)) { writew(mask1, dc->reg_fcr); *toggle = !(*toggle); } if (read_iir & mask2) { if (receive_data(port, dc)) { writew(mask2, dc->reg_fcr); *toggle = !(*toggle); } } } else if (*toggle == 1 && read_iir & mask2) { if (receive_data(port, dc)) { writew(mask2, dc->reg_fcr); *toggle = !(*toggle); } if (read_iir & mask1) { if (receive_data(port, dc)) { writew(mask1, dc->reg_fcr); *toggle = !(*toggle); } } } else { dev_err(&dc->pdev->dev, "port out of sync!, toggle:%d\n", *toggle); return 0; } return 1; } /* * Handle uplink data, this is currently for the modem port * Return 1 - ok * Return 0 - toggle field are out of sync */ static int handle_data_ul(struct nozomi *dc, enum port_type port, u16 read_iir) { u8 *toggle = &(dc->port[port].toggle_ul); if (*toggle == 0 && read_iir & MDM_UL1) { dc->last_ier &= ~MDM_UL; writew(dc->last_ier, dc->reg_ier); if (send_data(port, dc)) { writew(MDM_UL1, dc->reg_fcr); dc->last_ier = dc->last_ier | MDM_UL; writew(dc->last_ier, dc->reg_ier); *toggle = !*toggle; } if (read_iir & MDM_UL2) { dc->last_ier &= ~MDM_UL; writew(dc->last_ier, dc->reg_ier); if (send_data(port, dc)) { writew(MDM_UL2, dc->reg_fcr); dc->last_ier = dc->last_ier | MDM_UL; writew(dc->last_ier, dc->reg_ier); *toggle = !*toggle; } } } else if (*toggle == 1 && read_iir & MDM_UL2) { dc->last_ier &= ~MDM_UL; writew(dc->last_ier, dc->reg_ier); if (send_data(port, dc)) { writew(MDM_UL2, dc->reg_fcr); dc->last_ier = dc->last_ier | MDM_UL; writew(dc->last_ier, dc->reg_ier); *toggle = !*toggle; } if (read_iir & MDM_UL1) { dc->last_ier &= ~MDM_UL; writew(dc->last_ier, dc->reg_ier); if (send_data(port, dc)) { writew(MDM_UL1, dc->reg_fcr); dc->last_ier = dc->last_ier | MDM_UL; writew(dc->last_ier, dc->reg_ier); *toggle = !*toggle; } } } else { writew(read_iir & MDM_UL, dc->reg_fcr); dev_err(&dc->pdev->dev, "port out of sync!\n"); return 0; } return 1; } static irqreturn_t interrupt_handler(int irq, void *dev_id) { struct nozomi *dc = dev_id; unsigned int a; u16 read_iir; if (!dc) return IRQ_NONE; spin_lock(&dc->spin_mutex); read_iir = readw(dc->reg_iir); /* Card removed */ if (read_iir == (u16)-1) goto none; /* * Just handle interrupt enabled in IER * (by masking with dc->last_ier) */ read_iir &= dc->last_ier; if (read_iir == 0) goto none; DBG4("%s irq:0x%04X, prev:0x%04X", interrupt2str(read_iir), read_iir, dc->last_ier); if (read_iir & RESET) { if (unlikely(!nozomi_read_config_table(dc))) { dc->last_ier = 0x0; writew(dc->last_ier, dc->reg_ier); dev_err(&dc->pdev->dev, "Could not read status from " "card, we should disable interface\n"); } else { writew(RESET, dc->reg_fcr); } /* No more useful info if this was the reset interrupt. */ goto exit_handler; } if (read_iir & CTRL_UL) { DBG1("CTRL_UL"); dc->last_ier &= ~CTRL_UL; writew(dc->last_ier, dc->reg_ier); if (send_flow_control(dc)) { writew(CTRL_UL, dc->reg_fcr); dc->last_ier = dc->last_ier | CTRL_UL; writew(dc->last_ier, dc->reg_ier); } } if (read_iir & CTRL_DL) { receive_flow_control(dc); writew(CTRL_DL, dc->reg_fcr); } if (read_iir & MDM_DL) { if (!handle_data_dl(dc, PORT_MDM, &(dc->port[PORT_MDM].toggle_dl), read_iir, MDM_DL1, MDM_DL2)) { dev_err(&dc->pdev->dev, "MDM_DL out of sync!\n"); goto exit_handler; } } if (read_iir & MDM_UL) { if (!handle_data_ul(dc, PORT_MDM, read_iir)) { dev_err(&dc->pdev->dev, "MDM_UL out of sync!\n"); goto exit_handler; } } if (read_iir & DIAG_DL) { if (!handle_data_dl(dc, PORT_DIAG, &(dc->port[PORT_DIAG].toggle_dl), read_iir, DIAG_DL1, DIAG_DL2)) { dev_err(&dc->pdev->dev, "DIAG_DL out of sync!\n"); goto exit_handler; } } if (read_iir & DIAG_UL) { dc->last_ier &= ~DIAG_UL; writew(dc->last_ier, dc->reg_ier); if (send_data(PORT_DIAG, dc)) { writew(DIAG_UL, dc->reg_fcr); dc->last_ier = dc->last_ier | DIAG_UL; writew(dc->last_ier, dc->reg_ier); } } if (read_iir & APP1_DL) { if (receive_data(PORT_APP1, dc)) writew(APP1_DL, dc->reg_fcr); } if (read_iir & APP1_UL) { dc->last_ier &= ~APP1_UL; writew(dc->last_ier, dc->reg_ier); if (send_data(PORT_APP1, dc)) { writew(APP1_UL, dc->reg_fcr); dc->last_ier = dc->last_ier | APP1_UL; writew(dc->last_ier, dc->reg_ier); } } if (read_iir & APP2_DL) { if (receive_data(PORT_APP2, dc)) writew(APP2_DL, dc->reg_fcr); } if (read_iir & APP2_UL) { dc->last_ier &= ~APP2_UL; writew(dc->last_ier, dc->reg_ier); if (send_data(PORT_APP2, dc)) { writew(APP2_UL, dc->reg_fcr); dc->last_ier = dc->last_ier | APP2_UL; writew(dc->last_ier, dc->reg_ier); } } exit_handler: spin_unlock(&dc->spin_mutex); for (a = 0; a < NOZOMI_MAX_PORTS; a++) { struct tty_struct *tty; if (test_and_clear_bit(a, &dc->flip)) { tty = tty_port_tty_get(&dc->port[a].port); if (tty) tty_flip_buffer_push(tty); tty_kref_put(tty); } } return IRQ_HANDLED; none: spin_unlock(&dc->spin_mutex); return IRQ_NONE; } static void nozomi_get_card_type(struct nozomi *dc) { int i; u32 size = 0; for (i = 0; i < 6; i++) size += pci_resource_len(dc->pdev, i); /* Assume card type F32_8 if no match */ dc->card_type = size == 2048 ? F32_2 : F32_8; dev_info(&dc->pdev->dev, "Card type is: %d\n", dc->card_type); } static void nozomi_setup_private_data(struct nozomi *dc) { void __iomem *offset = dc->base_addr + dc->card_type / 2; unsigned int i; dc->reg_fcr = (void __iomem *)(offset + R_FCR); dc->reg_iir = (void __iomem *)(offset + R_IIR); dc->reg_ier = (void __iomem *)(offset + R_IER); dc->last_ier = 0; dc->flip = 0; dc->port[PORT_MDM].token_dl = MDM_DL; dc->port[PORT_DIAG].token_dl = DIAG_DL; dc->port[PORT_APP1].token_dl = APP1_DL; dc->port[PORT_APP2].token_dl = APP2_DL; for (i = 0; i < MAX_PORT; i++) init_waitqueue_head(&dc->port[i].tty_wait); } static ssize_t card_type_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev)); return sprintf(buf, "%d\n", dc->card_type); } static DEVICE_ATTR(card_type, S_IRUGO, card_type_show, NULL); static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev)); return sprintf(buf, "%u\n", dc->open_ttys); } static DEVICE_ATTR(open_ttys, S_IRUGO, open_ttys_show, NULL); static void make_sysfs_files(struct nozomi *dc) { if (device_create_file(&dc->pdev->dev, &dev_attr_card_type)) dev_err(&dc->pdev->dev, "Could not create sysfs file for card_type\n"); if (device_create_file(&dc->pdev->dev, &dev_attr_open_ttys)) dev_err(&dc->pdev->dev, "Could not create sysfs file for open_ttys\n"); } static void remove_sysfs_files(struct nozomi *dc) { device_remove_file(&dc->pdev->dev, &dev_attr_card_type); device_remove_file(&dc->pdev->dev, &dev_attr_open_ttys); } /* Allocate memory for one device */ static int __devinit nozomi_card_init(struct pci_dev *pdev, const struct pci_device_id *ent) { resource_size_t start; int ret; struct nozomi *dc = NULL; int ndev_idx; int i; dev_dbg(&pdev->dev, "Init, new card found\n"); for (ndev_idx = 0; ndev_idx < ARRAY_SIZE(ndevs); ndev_idx++) if (!ndevs[ndev_idx]) break; if (ndev_idx >= ARRAY_SIZE(ndevs)) { dev_err(&pdev->dev, "no free tty range for this card left\n"); ret = -EIO; goto err; } dc = kzalloc(sizeof(struct nozomi), GFP_KERNEL); if (unlikely(!dc)) { dev_err(&pdev->dev, "Could not allocate memory\n"); ret = -ENOMEM; goto err_free; } dc->pdev = pdev; ret = pci_enable_device(dc->pdev); if (ret) { dev_err(&pdev->dev, "Failed to enable PCI Device\n"); goto err_free; } ret = pci_request_regions(dc->pdev, NOZOMI_NAME); if (ret) { dev_err(&pdev->dev, "I/O address 0x%04x already in use\n", (int) /* nozomi_private.io_addr */ 0); goto err_disable_device; } start = pci_resource_start(dc->pdev, 0); if (start == 0) { dev_err(&pdev->dev, "No I/O address for card detected\n"); ret = -ENODEV; goto err_rel_regs; } /* Find out what card type it is */ nozomi_get_card_type(dc); dc->base_addr = ioremap_nocache(start, dc->card_type); if (!dc->base_addr) { dev_err(&pdev->dev, "Unable to map card MMIO\n"); ret = -ENODEV; goto err_rel_regs; } dc->send_buf = kmalloc(SEND_BUF_MAX, GFP_KERNEL); if (!dc->send_buf) { dev_err(&pdev->dev, "Could not allocate send buffer?\n"); ret = -ENOMEM; goto err_free_sbuf; } for (i = PORT_MDM; i < MAX_PORT; i++) { if (kfifo_alloc(&dc->port[i].fifo_ul, FIFO_BUFFER_SIZE_UL, GFP_ATOMIC)) { dev_err(&pdev->dev, "Could not allocate kfifo buffer\n"); ret = -ENOMEM; goto err_free_kfifo; } } spin_lock_init(&dc->spin_mutex); nozomi_setup_private_data(dc); /* Disable all interrupts */ dc->last_ier = 0; writew(dc->last_ier, dc->reg_ier); ret = request_irq(pdev->irq, &interrupt_handler, IRQF_SHARED, NOZOMI_NAME, dc); if (unlikely(ret)) { dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq); goto err_free_kfifo; } DBG1("base_addr: %p", dc->base_addr); make_sysfs_files(dc); dc->index_start = ndev_idx * MAX_PORT; ndevs[ndev_idx] = dc; pci_set_drvdata(pdev, dc); /* Enable RESET interrupt */ dc->last_ier = RESET; iowrite16(dc->last_ier, dc->reg_ier); dc->state = NOZOMI_STATE_ENABLED; for (i = 0; i < MAX_PORT; i++) { struct device *tty_dev; struct port *port = &dc->port[i]; port->dc = dc; mutex_init(&port->tty_sem); tty_port_init(&port->port); port->port.ops = &noz_tty_port_ops; tty_dev = tty_register_device(ntty_driver, dc->index_start + i, &pdev->dev); if (IS_ERR(tty_dev)) { ret = PTR_ERR(tty_dev); dev_err(&pdev->dev, "Could not allocate tty?\n"); goto err_free_tty; } } return 0; err_free_tty: for (i = dc->index_start; i < dc->index_start + MAX_PORT; ++i) tty_unregister_device(ntty_driver, i); err_free_kfifo: for (i = 0; i < MAX_PORT; i++) kfifo_free(&dc->port[i].fifo_ul); err_free_sbuf: kfree(dc->send_buf); iounmap(dc->base_addr); err_rel_regs: pci_release_regions(pdev); err_disable_device: pci_disable_device(pdev); err_free: kfree(dc); err: return ret; } static void __devexit tty_exit(struct nozomi *dc) { unsigned int i; DBG1(" "); flush_scheduled_work(); for (i = 0; i < MAX_PORT; ++i) { struct tty_struct *tty = tty_port_tty_get(&dc->port[i].port); if (tty && list_empty(&tty->hangup_work.entry)) tty_hangup(tty); tty_kref_put(tty); } /* Racy below - surely should wait for scheduled work to be done or complete off a hangup method ? */ while (dc->open_ttys) msleep(1); for (i = dc->index_start; i < dc->index_start + MAX_PORT; ++i) tty_unregister_device(ntty_driver, i); } /* Deallocate memory for one device */ static void __devexit nozomi_card_exit(struct pci_dev *pdev) { int i; struct ctrl_ul ctrl; struct nozomi *dc = pci_get_drvdata(pdev); /* Disable all interrupts */ dc->last_ier = 0; writew(dc->last_ier, dc->reg_ier); tty_exit(dc); /* Send 0x0001, command card to resend the reset token. */ /* This is to get the reset when the module is reloaded. */ ctrl.port = 0x00; ctrl.reserved = 0; ctrl.RTS = 0; ctrl.DTR = 1; DBG1("sending flow control 0x%04X", *((u16 *)&ctrl)); /* Setup dc->reg addresses to we can use defines here */ write_mem32(dc->port[PORT_CTRL].ul_addr[0], (u32 *)&ctrl, 2); writew(CTRL_UL, dc->reg_fcr); /* push the token to the card. */ remove_sysfs_files(dc); free_irq(pdev->irq, dc); for (i = 0; i < MAX_PORT; i++) kfifo_free(&dc->port[i].fifo_ul); kfree(dc->send_buf); iounmap(dc->base_addr); pci_release_regions(pdev); pci_disable_device(pdev); ndevs[dc->index_start / MAX_PORT] = NULL; kfree(dc); } static void set_rts(const struct tty_struct *tty, int rts) { struct port *port = get_port_by_tty(tty); port->ctrl_ul.RTS = rts; port->update_flow_control = 1; enable_transmit_ul(PORT_CTRL, get_dc_by_tty(tty)); } static void set_dtr(const struct tty_struct *tty, int dtr) { struct port *port = get_port_by_tty(tty); DBG1("SETTING DTR index: %d, dtr: %d", tty->index, dtr); port->ctrl_ul.DTR = dtr; port->update_flow_control = 1; enable_transmit_ul(PORT_CTRL, get_dc_by_tty(tty)); } /* * ---------------------------------------------------------------------------- * TTY code * ---------------------------------------------------------------------------- */ static int ntty_install(struct tty_driver *driver, struct tty_struct *tty) { struct port *port = get_port_by_tty(tty); struct nozomi *dc = get_dc_by_tty(tty); int ret; if (!port || !dc || dc->state != NOZOMI_STATE_READY) return -ENODEV; ret = tty_init_termios(tty); if (ret == 0) { tty_driver_kref_get(driver); tty->count++; tty->driver_data = port; driver->ttys[tty->index] = tty; } return ret; } static void ntty_cleanup(struct tty_struct *tty) { tty->driver_data = NULL; } static int ntty_activate(struct tty_port *tport, struct tty_struct *tty) { struct port *port = container_of(tport, struct port, port); struct nozomi *dc = port->dc; unsigned long flags; DBG1("open: %d", port->token_dl); spin_lock_irqsave(&dc->spin_mutex, flags); dc->last_ier = dc->last_ier | port->token_dl; writew(dc->last_ier, dc->reg_ier); dc->open_ttys++; spin_unlock_irqrestore(&dc->spin_mutex, flags); printk("noz: activated %d: %p\n", tty->index, tport); return 0; } static int ntty_open(struct tty_struct *tty, struct file *filp) { struct port *port = tty->driver_data; return tty_port_open(&port->port, tty, filp); } static void ntty_shutdown(struct tty_port *tport) { struct port *port = container_of(tport, struct port, port); struct nozomi *dc = port->dc; unsigned long flags; DBG1("close: %d", port->token_dl); spin_lock_irqsave(&dc->spin_mutex, flags); dc->last_ier &= ~(port->token_dl); writew(dc->last_ier, dc->reg_ier); dc->open_ttys--; spin_unlock_irqrestore(&dc->spin_mutex, flags); printk("noz: shutdown %p\n", tport); } static void ntty_close(struct tty_struct *tty, struct file *filp) { struct port *port = tty->driver_data; if (port) tty_port_close(&port->port, tty, filp); } static void ntty_hangup(struct tty_struct *tty) { struct port *port = tty->driver_data; tty_port_hangup(&port->port); } /* * called when the userspace process writes to the tty (/dev/noz*). * Data is inserted into a fifo, which is then read and transfered to the modem. */ static int ntty_write(struct tty_struct *tty, const unsigned char *buffer, int count) { int rval = -EINVAL; struct nozomi *dc = get_dc_by_tty(tty); struct port *port = tty->driver_data; unsigned long flags; /* DBG1( "WRITEx: %d, index = %d", count, index); */ if (!dc || !port) return -ENODEV; mutex_lock(&port->tty_sem); if (unlikely(!port->port.count)) { DBG1(" "); goto exit; } rval = kfifo_in(&port->fifo_ul, (unsigned char *)buffer, count); /* notify card */ if (unlikely(dc == NULL)) { DBG1("No device context?"); goto exit; } spin_lock_irqsave(&dc->spin_mutex, flags); /* CTS is only valid on the modem channel */ if (port == &(dc->port[PORT_MDM])) { if (port->ctrl_dl.CTS) { DBG4("Enable interrupt"); enable_transmit_ul(tty->index % MAX_PORT, dc); } else { dev_err(&dc->pdev->dev, "CTS not active on modem port?\n"); } } else { enable_transmit_ul(tty->index % MAX_PORT, dc); } spin_unlock_irqrestore(&dc->spin_mutex, flags); exit: mutex_unlock(&port->tty_sem); return rval; } /* * Calculate how much is left in device * This method is called by the upper tty layer. * #according to sources N_TTY.c it expects a value >= 0 and * does not check for negative values. * * If the port is unplugged report lots of room and let the bits * dribble away so we don't block anything. */ static int ntty_write_room(struct tty_struct *tty) { struct port *port = tty->driver_data; int room = 4096; const struct nozomi *dc = get_dc_by_tty(tty); if (dc) { mutex_lock(&port->tty_sem); if (port->port.count) room = port->fifo_ul.size - kfifo_len(&port->fifo_ul); mutex_unlock(&port->tty_sem); } return room; } /* Gets io control parameters */ static int ntty_tiocmget(struct tty_struct *tty, struct file *file) { const struct port *port = tty->driver_data; const struct ctrl_dl *ctrl_dl = &port->ctrl_dl; const struct ctrl_ul *ctrl_ul = &port->ctrl_ul; /* Note: these could change under us but it is not clear this matters if so */ return (ctrl_ul->RTS ? TIOCM_RTS : 0) | (ctrl_ul->DTR ? TIOCM_DTR : 0) | (ctrl_dl->DCD ? TIOCM_CAR : 0) | (ctrl_dl->RI ? TIOCM_RNG : 0) | (ctrl_dl->DSR ? TIOCM_DSR : 0) | (ctrl_dl->CTS ? TIOCM_CTS : 0); } /* Sets io controls parameters */ static int ntty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear) { struct nozomi *dc = get_dc_by_tty(tty); unsigned long flags; spin_lock_irqsave(&dc->spin_mutex, flags); if (set & TIOCM_RTS) set_rts(tty, 1); else if (clear & TIOCM_RTS) set_rts(tty, 0); if (set & TIOCM_DTR) set_dtr(tty, 1); else if (clear & TIOCM_DTR) set_dtr(tty, 0); spin_unlock_irqrestore(&dc->spin_mutex, flags); return 0; } static int ntty_cflags_changed(struct port *port, unsigned long flags, struct async_icount *cprev) { const struct async_icount cnow = port->tty_icount; int ret; ret = ((flags & TIOCM_RNG) && (cnow.rng != cprev->rng)) || ((flags & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) || ((flags & TIOCM_CD) && (cnow.dcd != cprev->dcd)) || ((flags & TIOCM_CTS) && (cnow.cts != cprev->cts)); *cprev = cnow; return ret; } static int ntty_ioctl_tiocgicount(struct port *port, void __user *argp) { const struct async_icount cnow = port->tty_icount; struct serial_icounter_struct icount; icount.cts = cnow.cts; icount.dsr = cnow.dsr; icount.rng = cnow.rng; icount.dcd = cnow.dcd; icount.rx = cnow.rx; icount.tx = cnow.tx; icount.frame = cnow.frame; icount.overrun = cnow.overrun; icount.parity = cnow.parity; icount.brk = cnow.brk; icount.buf_overrun = cnow.buf_overrun; return copy_to_user(argp, &icount, sizeof(icount)) ? -EFAULT : 0; } static int ntty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { struct port *port = tty->driver_data; void __user *argp = (void __user *)arg; int rval = -ENOIOCTLCMD; DBG1("******** IOCTL, cmd: %d", cmd); switch (cmd) { case TIOCMIWAIT: { struct async_icount cprev = port->tty_icount; rval = wait_event_interruptible(port->tty_wait, ntty_cflags_changed(port, arg, &cprev)); break; } case TIOCGICOUNT: rval = ntty_ioctl_tiocgicount(port, argp); break; default: DBG1("ERR: 0x%08X, %d", cmd, cmd); break; }; return rval; } /* * Called by the upper tty layer when tty buffers are ready * to receive data again after a call to throttle. */ static void ntty_unthrottle(struct tty_struct *tty) { struct nozomi *dc = get_dc_by_tty(tty); unsigned long flags; DBG1("UNTHROTTLE"); spin_lock_irqsave(&dc->spin_mutex, flags); enable_transmit_dl(tty->index % MAX_PORT, dc); set_rts(tty, 1); spin_unlock_irqrestore(&dc->spin_mutex, flags); } /* * Called by the upper tty layer when the tty buffers are almost full. * The driver should stop send more data. */ static void ntty_throttle(struct tty_struct *tty) { struct nozomi *dc = get_dc_by_tty(tty); unsigned long flags; DBG1("THROTTLE"); spin_lock_irqsave(&dc->spin_mutex, flags); set_rts(tty, 0); spin_unlock_irqrestore(&dc->spin_mutex, flags); } /* Returns number of chars in buffer, called by tty layer */ static s32 ntty_chars_in_buffer(struct tty_struct *tty) { struct port *port = tty->driver_data; struct nozomi *dc = get_dc_by_tty(tty); s32 rval = 0; if (unlikely(!dc || !port)) { goto exit_in_buffer; } if (unlikely(!port->port.count)) { dev_err(&dc->pdev->dev, "No tty open?\n"); goto exit_in_buffer; } rval = kfifo_len(&port->fifo_ul); exit_in_buffer: return rval; } static const struct tty_port_operations noz_tty_port_ops = { .activate = ntty_activate, .shutdown = ntty_shutdown, }; static const struct tty_operations tty_ops = { .ioctl = ntty_ioctl, .open = ntty_open, .close = ntty_close, .hangup = ntty_hangup, .write = ntty_write, .write_room = ntty_write_room, .unthrottle = ntty_unthrottle, .throttle = ntty_throttle, .chars_in_buffer = ntty_chars_in_buffer, .tiocmget = ntty_tiocmget, .tiocmset = ntty_tiocmset, .install = ntty_install, .cleanup = ntty_cleanup, }; /* Module initialization */ static struct pci_driver nozomi_driver = { .name = NOZOMI_NAME, .id_table = nozomi_pci_tbl, .probe = nozomi_card_init, .remove = __devexit_p(nozomi_card_exit), }; static __init int nozomi_init(void) { int ret; printk(KERN_INFO "Initializing %s\n", VERSION_STRING); ntty_driver = alloc_tty_driver(NTTY_TTY_MAXMINORS); if (!ntty_driver) return -ENOMEM; ntty_driver->owner = THIS_MODULE; ntty_driver->driver_name = NOZOMI_NAME_TTY; ntty_driver->name = "noz"; ntty_driver->major = 0; ntty_driver->type = TTY_DRIVER_TYPE_SERIAL; ntty_driver->subtype = SERIAL_TYPE_NORMAL; ntty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; ntty_driver->init_termios = tty_std_termios; ntty_driver->init_termios.c_cflag = B115200 | CS8 | CREAD | \ HUPCL | CLOCAL; ntty_driver->init_termios.c_ispeed = 115200; ntty_driver->init_termios.c_ospeed = 115200; tty_set_operations(ntty_driver, &tty_ops); ret = tty_register_driver(ntty_driver); if (ret) { printk(KERN_ERR "Nozomi: failed to register ntty driver\n"); goto free_tty; } ret = pci_register_driver(&nozomi_driver); if (ret) { printk(KERN_ERR "Nozomi: can't register pci driver\n"); goto unr_tty; } return 0; unr_tty: tty_unregister_driver(ntty_driver); free_tty: put_tty_driver(ntty_driver); return ret; } static __exit void nozomi_exit(void) { printk(KERN_INFO "Unloading %s\n", DRIVER_DESC); pci_unregister_driver(&nozomi_driver); tty_unregister_driver(ntty_driver); put_tty_driver(ntty_driver); } module_init(nozomi_init); module_exit(nozomi_exit); module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION(DRIVER_DESC);
gpl-2.0
brunomars/semc-kernel-haida
block/noop-iosched.c
751
2656
/* * elevator noop */ #include <linux/blkdev.h> #include <linux/elevator.h> #include <linux/bio.h> #include <linux/module.h> #include <linux/init.h> struct noop_data { struct list_head queue; }; static void noop_merged_requests(struct request_queue *q, struct request *rq, struct request *next) { list_del_init(&next->queuelist); } static int noop_dispatch(struct request_queue *q, int force) { struct noop_data *nd = q->elevator->elevator_data; if (!list_empty(&nd->queue)) { struct request *rq; rq = list_entry(nd->queue.next, struct request, queuelist); list_del_init(&rq->queuelist); elv_dispatch_sort(q, rq); return 1; } return 0; } static void noop_add_request(struct request_queue *q, struct request *rq) { struct noop_data *nd = q->elevator->elevator_data; list_add_tail(&rq->queuelist, &nd->queue); } static int noop_queue_empty(struct request_queue *q) { struct noop_data *nd = q->elevator->elevator_data; return list_empty(&nd->queue); } static struct request * noop_former_request(struct request_queue *q, struct request *rq) { struct noop_data *nd = q->elevator->elevator_data; if (rq->queuelist.prev == &nd->queue) return NULL; return list_entry(rq->queuelist.prev, struct request, queuelist); } static struct request * noop_latter_request(struct request_queue *q, struct request *rq) { struct noop_data *nd = q->elevator->elevator_data; if (rq->queuelist.next == &nd->queue) return NULL; return list_entry(rq->queuelist.next, struct request, queuelist); } static void *noop_init_queue(struct request_queue *q) { struct noop_data *nd; nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node); if (!nd) return NULL; INIT_LIST_HEAD(&nd->queue); return nd; } static void noop_exit_queue(struct elevator_queue *e) { struct noop_data *nd = e->elevator_data; BUG_ON(!list_empty(&nd->queue)); kfree(nd); } static struct elevator_type elevator_noop = { .ops = { .elevator_merge_req_fn = noop_merged_requests, .elevator_dispatch_fn = noop_dispatch, .elevator_add_req_fn = noop_add_request, .elevator_queue_empty_fn = noop_queue_empty, .elevator_former_req_fn = noop_former_request, .elevator_latter_req_fn = noop_latter_request, .elevator_init_fn = noop_init_queue, .elevator_exit_fn = noop_exit_queue, }, .elevator_name = "noop", .elevator_owner = THIS_MODULE, }; static int __init noop_init(void) { elv_register(&elevator_noop); return 0; } static void __exit noop_exit(void) { elv_unregister(&elevator_noop); } module_init(noop_init); module_exit(noop_exit); MODULE_AUTHOR("Jens Axboe"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("No-op IO scheduler");
gpl-2.0
Dabug123/owlCore64
drivers/pci/hotplug/cpqphp_ctrl.c
2287
76931
/* * Compaq Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <greg@kroah.com> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/wait.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/kthread.h> #include "cpqphp.h" static u32 configure_new_device(struct controller* ctrl, struct pci_func *func, u8 behind_bridge, struct resource_lists *resources); static int configure_new_function(struct controller* ctrl, struct pci_func *func, u8 behind_bridge, struct resource_lists *resources); static void interrupt_event_handler(struct controller *ctrl); static struct task_struct *cpqhp_event_thread; static unsigned long pushbutton_pending; /* = 0 */ /* delay is in jiffies to wait for */ static void long_delay(int delay) { /* * XXX(hch): if someone is bored please convert all callers * to call msleep_interruptible directly. They really want * to specify timeouts in natural units and spend a lot of * effort converting them to jiffies.. */ msleep_interruptible(jiffies_to_msecs(delay)); } /* FIXME: The following line needs to be somewhere else... */ #define WRONG_BUS_FREQUENCY 0x07 static u8 handle_switch_change(u8 change, struct controller * ctrl) { int hp_slot; u8 rc = 0; u16 temp_word; struct pci_func *func; struct event_info *taskInfo; if (!change) return 0; /* Switch Change */ dbg("cpqsbd: Switch interrupt received.\n"); for (hp_slot = 0; hp_slot < 6; hp_slot++) { if (change & (0x1L << hp_slot)) { /* * this one changed. */ func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); /* this is the structure that tells the worker thread * what to do */ taskInfo = &(ctrl->event_queue[ctrl->next_event]); ctrl->next_event = (ctrl->next_event + 1) % 10; taskInfo->hp_slot = hp_slot; rc++; temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { /* * Switch opened */ func->switch_save = 0; taskInfo->event_type = INT_SWITCH_OPEN; } else { /* * Switch closed */ func->switch_save = 0x10; taskInfo->event_type = INT_SWITCH_CLOSE; } } } return rc; } /** * cpqhp_find_slot - find the struct slot of given device * @ctrl: scan lots of this controller * @device: the device id to find */ static struct slot *cpqhp_find_slot(struct controller *ctrl, u8 device) { struct slot *slot = ctrl->slot; while (slot && (slot->device != device)) slot = slot->next; return slot; } static u8 handle_presence_change(u16 change, struct controller * ctrl) { int hp_slot; u8 rc = 0; u8 temp_byte; u16 temp_word; struct pci_func *func; struct event_info *taskInfo; struct slot *p_slot; if (!change) return 0; /* * Presence Change */ dbg("cpqsbd: Presence/Notify input change.\n"); dbg(" Changed bits are 0x%4.4x\n", change ); for (hp_slot = 0; hp_slot < 6; hp_slot++) { if (change & (0x0101 << hp_slot)) { /* * this one changed. */ func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); taskInfo = &(ctrl->event_queue[ctrl->next_event]); ctrl->next_event = (ctrl->next_event + 1) % 10; taskInfo->hp_slot = hp_slot; rc++; p_slot = cpqhp_find_slot(ctrl, hp_slot + (readb(ctrl->hpc_reg + SLOT_MASK) >> 4)); if (!p_slot) return 0; /* If the switch closed, must be a button * If not in button mode, nevermind */ if (func->switch_save && (ctrl->push_button == 1)) { temp_word = ctrl->ctrl_int_comp >> 16; temp_byte = (temp_word >> hp_slot) & 0x01; temp_byte |= (temp_word >> (hp_slot + 7)) & 0x02; if (temp_byte != func->presence_save) { /* * button Pressed (doesn't do anything) */ dbg("hp_slot %d button pressed\n", hp_slot); taskInfo->event_type = INT_BUTTON_PRESS; } else { /* * button Released - TAKE ACTION!!!! */ dbg("hp_slot %d button released\n", hp_slot); taskInfo->event_type = INT_BUTTON_RELEASE; /* Cancel if we are still blinking */ if ((p_slot->state == BLINKINGON_STATE) || (p_slot->state == BLINKINGOFF_STATE)) { taskInfo->event_type = INT_BUTTON_CANCEL; dbg("hp_slot %d button cancel\n", hp_slot); } else if ((p_slot->state == POWERON_STATE) || (p_slot->state == POWEROFF_STATE)) { /* info(msg_button_ignore, p_slot->number); */ taskInfo->event_type = INT_BUTTON_IGNORE; dbg("hp_slot %d button ignore\n", hp_slot); } } } else { /* Switch is open, assume a presence change * Save the presence state */ temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if ((!(ctrl->ctrl_int_comp & (0x010000 << hp_slot))) || (!(ctrl->ctrl_int_comp & (0x01000000 << hp_slot)))) { /* Present */ taskInfo->event_type = INT_PRESENCE_ON; } else { /* Not Present */ taskInfo->event_type = INT_PRESENCE_OFF; } } } } return rc; } static u8 handle_power_fault(u8 change, struct controller * ctrl) { int hp_slot; u8 rc = 0; struct pci_func *func; struct event_info *taskInfo; if (!change) return 0; /* * power fault */ info("power fault interrupt\n"); for (hp_slot = 0; hp_slot < 6; hp_slot++) { if (change & (0x01 << hp_slot)) { /* * this one changed. */ func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); taskInfo = &(ctrl->event_queue[ctrl->next_event]); ctrl->next_event = (ctrl->next_event + 1) % 10; taskInfo->hp_slot = hp_slot; rc++; if (ctrl->ctrl_int_comp & (0x00000100 << hp_slot)) { /* * power fault Cleared */ func->status = 0x00; taskInfo->event_type = INT_POWER_FAULT_CLEAR; } else { /* * power fault */ taskInfo->event_type = INT_POWER_FAULT; if (ctrl->rev < 4) { amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); set_SOGO (ctrl); /* this is a fatal condition, we want * to crash the machine to protect from * data corruption. simulated_NMI * shouldn't ever return */ /* FIXME simulated_NMI(hp_slot, ctrl); */ /* The following code causes a software * crash just in case simulated_NMI did * return */ /*FIXME panic(msg_power_fault); */ } else { /* set power fault status for this board */ func->status = 0xFF; info("power fault bit %x set\n", hp_slot); } } } } return rc; } /** * sort_by_size - sort nodes on the list by their length, smallest first. * @head: list to sort */ static int sort_by_size(struct pci_resource **head) { struct pci_resource *current_res; struct pci_resource *next_res; int out_of_order = 1; if (!(*head)) return 1; if (!((*head)->next)) return 0; while (out_of_order) { out_of_order = 0; /* Special case for swapping list head */ if (((*head)->next) && ((*head)->length > (*head)->next->length)) { out_of_order++; current_res = *head; *head = (*head)->next; current_res->next = (*head)->next; (*head)->next = current_res; } current_res = *head; while (current_res->next && current_res->next->next) { if (current_res->next->length > current_res->next->next->length) { out_of_order++; next_res = current_res->next; current_res->next = current_res->next->next; current_res = current_res->next; next_res->next = current_res->next; current_res->next = next_res; } else current_res = current_res->next; } } /* End of out_of_order loop */ return 0; } /** * sort_by_max_size - sort nodes on the list by their length, largest first. * @head: list to sort */ static int sort_by_max_size(struct pci_resource **head) { struct pci_resource *current_res; struct pci_resource *next_res; int out_of_order = 1; if (!(*head)) return 1; if (!((*head)->next)) return 0; while (out_of_order) { out_of_order = 0; /* Special case for swapping list head */ if (((*head)->next) && ((*head)->length < (*head)->next->length)) { out_of_order++; current_res = *head; *head = (*head)->next; current_res->next = (*head)->next; (*head)->next = current_res; } current_res = *head; while (current_res->next && current_res->next->next) { if (current_res->next->length < current_res->next->next->length) { out_of_order++; next_res = current_res->next; current_res->next = current_res->next->next; current_res = current_res->next; next_res->next = current_res->next; current_res->next = next_res; } else current_res = current_res->next; } } /* End of out_of_order loop */ return 0; } /** * do_pre_bridge_resource_split - find node of resources that are unused * @head: new list head * @orig_head: original list head * @alignment: max node size (?) */ static struct pci_resource *do_pre_bridge_resource_split(struct pci_resource **head, struct pci_resource **orig_head, u32 alignment) { struct pci_resource *prevnode = NULL; struct pci_resource *node; struct pci_resource *split_node; u32 rc; u32 temp_dword; dbg("do_pre_bridge_resource_split\n"); if (!(*head) || !(*orig_head)) return NULL; rc = cpqhp_resource_sort_and_combine(head); if (rc) return NULL; if ((*head)->base != (*orig_head)->base) return NULL; if ((*head)->length == (*orig_head)->length) return NULL; /* If we got here, there the bridge requires some of the resource, but * we may be able to split some off of the front */ node = *head; if (node->length & (alignment -1)) { /* this one isn't an aligned length, so we'll make a new entry * and split it up. */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; temp_dword = (node->length | (alignment-1)) + 1 - alignment; split_node->base = node->base; split_node->length = temp_dword; node->length -= temp_dword; node->base += split_node->length; /* Put it in the list */ *head = split_node; split_node->next = node; } if (node->length < alignment) return NULL; /* Now unlink it */ if (*head == node) { *head = node->next; } else { prevnode = *head; while (prevnode->next != node) prevnode = prevnode->next; prevnode->next = node->next; } node->next = NULL; return node; } /** * do_bridge_resource_split - find one node of resources that aren't in use * @head: list head * @alignment: max node size (?) */ static struct pci_resource *do_bridge_resource_split(struct pci_resource **head, u32 alignment) { struct pci_resource *prevnode = NULL; struct pci_resource *node; u32 rc; u32 temp_dword; rc = cpqhp_resource_sort_and_combine(head); if (rc) return NULL; node = *head; while (node->next) { prevnode = node; node = node->next; kfree(prevnode); } if (node->length < alignment) goto error; if (node->base & (alignment - 1)) { /* Short circuit if adjusted size is too small */ temp_dword = (node->base | (alignment-1)) + 1; if ((node->length - (temp_dword - node->base)) < alignment) goto error; node->length -= (temp_dword - node->base); node->base = temp_dword; } if (node->length & (alignment - 1)) /* There's stuff in use after this node */ goto error; return node; error: kfree(node); return NULL; } /** * get_io_resource - find first node of given size not in ISA aliasing window. * @head: list to search * @size: size of node to find, must be a power of two. * * Description: This function sorts the resource list by size and then returns * returns the first node of "size" length that is not in the ISA aliasing * window. If it finds a node larger than "size" it will split it up. */ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size) { struct pci_resource *prevnode; struct pci_resource *node; struct pci_resource *split_node; u32 temp_dword; if (!(*head)) return NULL; if (cpqhp_resource_sort_and_combine(head)) return NULL; if (sort_by_size(head)) return NULL; for (node = *head; node; node = node->next) { if (node->length < size) continue; if (node->base & (size - 1)) { /* this one isn't base aligned properly * so we'll make a new entry and split it up */ temp_dword = (node->base | (size-1)) + 1; /* Short circuit if adjusted size is too small */ if ((node->length - (temp_dword - node->base)) < size) continue; split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base; split_node->length = temp_dword - node->base; node->base = temp_dword; node->length -= split_node->length; /* Put it in the list */ split_node->next = node->next; node->next = split_node; } /* End of non-aligned base */ /* Don't need to check if too small since we already did */ if (node->length > size) { /* this one is longer than we need * so we'll make a new entry and split it up */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base + size; split_node->length = node->length - size; node->length = size; /* Put it in the list */ split_node->next = node->next; node->next = split_node; } /* End of too big on top end */ /* For IO make sure it's not in the ISA aliasing space */ if (node->base & 0x300L) continue; /* If we got here, then it is the right size * Now take it out of the list and break */ if (*head == node) { *head = node->next; } else { prevnode = *head; while (prevnode->next != node) prevnode = prevnode->next; prevnode->next = node->next; } node->next = NULL; break; } return node; } /** * get_max_resource - get largest node which has at least the given size. * @head: the list to search the node in * @size: the minimum size of the node to find * * Description: Gets the largest node that is at least "size" big from the * list pointed to by head. It aligns the node on top and bottom * to "size" alignment before returning it. */ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 size) { struct pci_resource *max; struct pci_resource *temp; struct pci_resource *split_node; u32 temp_dword; if (cpqhp_resource_sort_and_combine(head)) return NULL; if (sort_by_max_size(head)) return NULL; for (max = *head; max; max = max->next) { /* If not big enough we could probably just bail, * instead we'll continue to the next. */ if (max->length < size) continue; if (max->base & (size - 1)) { /* this one isn't base aligned properly * so we'll make a new entry and split it up */ temp_dword = (max->base | (size-1)) + 1; /* Short circuit if adjusted size is too small */ if ((max->length - (temp_dword - max->base)) < size) continue; split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = max->base; split_node->length = temp_dword - max->base; max->base = temp_dword; max->length -= split_node->length; split_node->next = max->next; max->next = split_node; } if ((max->base + max->length) & (size - 1)) { /* this one isn't end aligned properly at the top * so we'll make a new entry and split it up */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; temp_dword = ((max->base + max->length) & ~(size - 1)); split_node->base = temp_dword; split_node->length = max->length + max->base - split_node->base; max->length -= split_node->length; split_node->next = max->next; max->next = split_node; } /* Make sure it didn't shrink too much when we aligned it */ if (max->length < size) continue; /* Now take it out of the list */ temp = *head; if (temp == max) { *head = max->next; } else { while (temp && temp->next != max) { temp = temp->next; } temp->next = max->next; } max->next = NULL; break; } return max; } /** * get_resource - find resource of given size and split up larger ones. * @head: the list to search for resources * @size: the size limit to use * * Description: This function sorts the resource list by size and then * returns the first node of "size" length. If it finds a node * larger than "size" it will split it up. * * size must be a power of two. */ static struct pci_resource *get_resource(struct pci_resource **head, u32 size) { struct pci_resource *prevnode; struct pci_resource *node; struct pci_resource *split_node; u32 temp_dword; if (cpqhp_resource_sort_and_combine(head)) return NULL; if (sort_by_size(head)) return NULL; for (node = *head; node; node = node->next) { dbg("%s: req_size =%x node=%p, base=%x, length=%x\n", __func__, size, node, node->base, node->length); if (node->length < size) continue; if (node->base & (size - 1)) { dbg("%s: not aligned\n", __func__); /* this one isn't base aligned properly * so we'll make a new entry and split it up */ temp_dword = (node->base | (size-1)) + 1; /* Short circuit if adjusted size is too small */ if ((node->length - (temp_dword - node->base)) < size) continue; split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base; split_node->length = temp_dword - node->base; node->base = temp_dword; node->length -= split_node->length; split_node->next = node->next; node->next = split_node; } /* End of non-aligned base */ /* Don't need to check if too small since we already did */ if (node->length > size) { dbg("%s: too big\n", __func__); /* this one is longer than we need * so we'll make a new entry and split it up */ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL); if (!split_node) return NULL; split_node->base = node->base + size; split_node->length = node->length - size; node->length = size; /* Put it in the list */ split_node->next = node->next; node->next = split_node; } /* End of too big on top end */ dbg("%s: got one!!!\n", __func__); /* If we got here, then it is the right size * Now take it out of the list */ if (*head == node) { *head = node->next; } else { prevnode = *head; while (prevnode->next != node) prevnode = prevnode->next; prevnode->next = node->next; } node->next = NULL; break; } return node; } /** * cpqhp_resource_sort_and_combine - sort nodes by base addresses and clean up * @head: the list to sort and clean up * * Description: Sorts all of the nodes in the list in ascending order by * their base addresses. Also does garbage collection by * combining adjacent nodes. * * Returns %0 if success. */ int cpqhp_resource_sort_and_combine(struct pci_resource **head) { struct pci_resource *node1; struct pci_resource *node2; int out_of_order = 1; dbg("%s: head = %p, *head = %p\n", __func__, head, *head); if (!(*head)) return 1; dbg("*head->next = %p\n",(*head)->next); if (!(*head)->next) return 0; /* only one item on the list, already sorted! */ dbg("*head->base = 0x%x\n",(*head)->base); dbg("*head->next->base = 0x%x\n",(*head)->next->base); while (out_of_order) { out_of_order = 0; /* Special case for swapping list head */ if (((*head)->next) && ((*head)->base > (*head)->next->base)) { node1 = *head; (*head) = (*head)->next; node1->next = (*head)->next; (*head)->next = node1; out_of_order++; } node1 = (*head); while (node1->next && node1->next->next) { if (node1->next->base > node1->next->next->base) { out_of_order++; node2 = node1->next; node1->next = node1->next->next; node1 = node1->next; node2->next = node1->next; node1->next = node2; } else node1 = node1->next; } } /* End of out_of_order loop */ node1 = *head; while (node1 && node1->next) { if ((node1->base + node1->length) == node1->next->base) { /* Combine */ dbg("8..\n"); node1->length += node1->next->length; node2 = node1->next; node1->next = node1->next->next; kfree(node2); } else node1 = node1->next; } return 0; } irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data) { struct controller *ctrl = data; u8 schedule_flag = 0; u8 reset; u16 misc; u32 Diff; u32 temp_dword; misc = readw(ctrl->hpc_reg + MISC); /* * Check to see if it was our interrupt */ if (!(misc & 0x000C)) { return IRQ_NONE; } if (misc & 0x0004) { /* * Serial Output interrupt Pending */ /* Clear the interrupt */ misc |= 0x0004; writew(misc, ctrl->hpc_reg + MISC); /* Read to clear posted writes */ misc = readw(ctrl->hpc_reg + MISC); dbg ("%s - waking up\n", __func__); wake_up_interruptible(&ctrl->queue); } if (misc & 0x0008) { /* General-interrupt-input interrupt Pending */ Diff = readl(ctrl->hpc_reg + INT_INPUT_CLEAR) ^ ctrl->ctrl_int_comp; ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); /* Clear the interrupt */ writel(Diff, ctrl->hpc_reg + INT_INPUT_CLEAR); /* Read it back to clear any posted writes */ temp_dword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); if (!Diff) /* Clear all interrupts */ writel(0xFFFFFFFF, ctrl->hpc_reg + INT_INPUT_CLEAR); schedule_flag += handle_switch_change((u8)(Diff & 0xFFL), ctrl); schedule_flag += handle_presence_change((u16)((Diff & 0xFFFF0000L) >> 16), ctrl); schedule_flag += handle_power_fault((u8)((Diff & 0xFF00L) >> 8), ctrl); } reset = readb(ctrl->hpc_reg + RESET_FREQ_MODE); if (reset & 0x40) { /* Bus reset has completed */ reset &= 0xCF; writeb(reset, ctrl->hpc_reg + RESET_FREQ_MODE); reset = readb(ctrl->hpc_reg + RESET_FREQ_MODE); wake_up_interruptible(&ctrl->queue); } if (schedule_flag) { wake_up_process(cpqhp_event_thread); dbg("Waking even thread"); } return IRQ_HANDLED; } /** * cpqhp_slot_create - Creates a node and adds it to the proper bus. * @busnumber: bus where new node is to be located * * Returns pointer to the new node or %NULL if unsuccessful. */ struct pci_func *cpqhp_slot_create(u8 busnumber) { struct pci_func *new_slot; struct pci_func *next; new_slot = kzalloc(sizeof(*new_slot), GFP_KERNEL); if (new_slot == NULL) return new_slot; new_slot->next = NULL; new_slot->configured = 1; if (cpqhp_slot_list[busnumber] == NULL) { cpqhp_slot_list[busnumber] = new_slot; } else { next = cpqhp_slot_list[busnumber]; while (next->next != NULL) next = next->next; next->next = new_slot; } return new_slot; } /** * slot_remove - Removes a node from the linked list of slots. * @old_slot: slot to remove * * Returns %0 if successful, !0 otherwise. */ static int slot_remove(struct pci_func * old_slot) { struct pci_func *next; if (old_slot == NULL) return 1; next = cpqhp_slot_list[old_slot->bus]; if (next == NULL) return 1; if (next == old_slot) { cpqhp_slot_list[old_slot->bus] = old_slot->next; cpqhp_destroy_board_resources(old_slot); kfree(old_slot); return 0; } while ((next->next != old_slot) && (next->next != NULL)) next = next->next; if (next->next == old_slot) { next->next = old_slot->next; cpqhp_destroy_board_resources(old_slot); kfree(old_slot); return 0; } else return 2; } /** * bridge_slot_remove - Removes a node from the linked list of slots. * @bridge: bridge to remove * * Returns %0 if successful, !0 otherwise. */ static int bridge_slot_remove(struct pci_func *bridge) { u8 subordinateBus, secondaryBus; u8 tempBus; struct pci_func *next; secondaryBus = (bridge->config_space[0x06] >> 8) & 0xFF; subordinateBus = (bridge->config_space[0x06] >> 16) & 0xFF; for (tempBus = secondaryBus; tempBus <= subordinateBus; tempBus++) { next = cpqhp_slot_list[tempBus]; while (!slot_remove(next)) next = cpqhp_slot_list[tempBus]; } next = cpqhp_slot_list[bridge->bus]; if (next == NULL) return 1; if (next == bridge) { cpqhp_slot_list[bridge->bus] = bridge->next; goto out; } while ((next->next != bridge) && (next->next != NULL)) next = next->next; if (next->next != bridge) return 2; next->next = bridge->next; out: kfree(bridge); return 0; } /** * cpqhp_slot_find - Looks for a node by bus, and device, multiple functions accessed * @bus: bus to find * @device: device to find * @index: is %0 for first function found, %1 for the second... * * Returns pointer to the node if successful, %NULL otherwise. */ struct pci_func *cpqhp_slot_find(u8 bus, u8 device, u8 index) { int found = -1; struct pci_func *func; func = cpqhp_slot_list[bus]; if ((func == NULL) || ((func->device == device) && (index == 0))) return func; if (func->device == device) found++; while (func->next != NULL) { func = func->next; if (func->device == device) found++; if (found == index) return func; } return NULL; } /* DJZ: I don't think is_bridge will work as is. * FIXME */ static int is_bridge(struct pci_func * func) { /* Check the header type */ if (((func->config_space[0x03] >> 16) & 0xFF) == 0x01) return 1; else return 0; } /** * set_controller_speed - set the frequency and/or mode of a specific controller segment. * @ctrl: controller to change frequency/mode for. * @adapter_speed: the speed of the adapter we want to match. * @hp_slot: the slot number where the adapter is installed. * * Returns %0 if we successfully change frequency and/or mode to match the * adapter speed. */ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_slot) { struct slot *slot; struct pci_bus *bus = ctrl->pci_bus; u8 reg; u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER); u16 reg16; u32 leds = readl(ctrl->hpc_reg + LED_CONTROL); if (bus->cur_bus_speed == adapter_speed) return 0; /* We don't allow freq/mode changes if we find another adapter running * in another slot on this controller */ for(slot = ctrl->slot; slot; slot = slot->next) { if (slot->device == (hp_slot + ctrl->slot_device_offset)) continue; if (!slot->hotplug_slot || !slot->hotplug_slot->info) continue; if (slot->hotplug_slot->info->adapter_status == 0) continue; /* If another adapter is running on the same segment but at a * lower speed/mode, we allow the new adapter to function at * this rate if supported */ if (bus->cur_bus_speed < adapter_speed) return 0; return 1; } /* If the controller doesn't support freq/mode changes and the * controller is running at a higher mode, we bail */ if ((bus->cur_bus_speed > adapter_speed) && (!ctrl->pcix_speed_capability)) return 1; /* But we allow the adapter to run at a lower rate if possible */ if ((bus->cur_bus_speed < adapter_speed) && (!ctrl->pcix_speed_capability)) return 0; /* We try to set the max speed supported by both the adapter and * controller */ if (bus->max_bus_speed < adapter_speed) { if (bus->cur_bus_speed == bus->max_bus_speed) return 0; adapter_speed = bus->max_bus_speed; } writel(0x0L, ctrl->hpc_reg + LED_CONTROL); writeb(0x00, ctrl->hpc_reg + SLOT_ENABLE); set_SOGO(ctrl); wait_for_ctrl_irq(ctrl); if (adapter_speed != PCI_SPEED_133MHz_PCIX) reg = 0xF5; else reg = 0xF4; pci_write_config_byte(ctrl->pci_dev, 0x41, reg); reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ); reg16 &= ~0x000F; switch(adapter_speed) { case(PCI_SPEED_133MHz_PCIX): reg = 0x75; reg16 |= 0xB; break; case(PCI_SPEED_100MHz_PCIX): reg = 0x74; reg16 |= 0xA; break; case(PCI_SPEED_66MHz_PCIX): reg = 0x73; reg16 |= 0x9; break; case(PCI_SPEED_66MHz): reg = 0x73; reg16 |= 0x1; break; default: /* 33MHz PCI 2.2 */ reg = 0x71; break; } reg16 |= 0xB << 12; writew(reg16, ctrl->hpc_reg + NEXT_CURR_FREQ); mdelay(5); /* Reenable interrupts */ writel(0, ctrl->hpc_reg + INT_MASK); pci_write_config_byte(ctrl->pci_dev, 0x41, reg); /* Restart state machine */ reg = ~0xF; pci_read_config_byte(ctrl->pci_dev, 0x43, &reg); pci_write_config_byte(ctrl->pci_dev, 0x43, reg); /* Only if mode change...*/ if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) set_SOGO(ctrl); wait_for_ctrl_irq(ctrl); mdelay(1100); /* Restore LED/Slot state */ writel(leds, ctrl->hpc_reg + LED_CONTROL); writeb(slot_power, ctrl->hpc_reg + SLOT_ENABLE); set_SOGO(ctrl); wait_for_ctrl_irq(ctrl); bus->cur_bus_speed = adapter_speed; slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); info("Successfully changed frequency/mode for adapter in slot %d\n", slot->number); return 0; } /* the following routines constitute the bulk of the * hotplug controller logic */ /** * board_replaced - Called after a board has been replaced in the system. * @func: PCI device/function information * @ctrl: hotplug controller * * This is only used if we don't have resources for hot add. * Turns power on for the board. * Checks to see if board is the same. * If board is same, reconfigures it. * If board isn't same, turns it back off. */ static u32 board_replaced(struct pci_func *func, struct controller *ctrl) { struct pci_bus *bus = ctrl->pci_bus; u8 hp_slot; u8 temp_byte; u8 adapter_speed; u32 rc = 0; hp_slot = func->device - ctrl->slot_device_offset; /* * The switch is open. */ if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot)) rc = INTERLOCK_OPEN; /* * The board is already on */ else if (is_slot_enabled (ctrl, hp_slot)) rc = CARD_FUNCTIONING; else { mutex_lock(&ctrl->crit_sect); /* turn on board without attaching to the bus */ enable_slot_power (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); /* Change bits in slot power register to force another shift out * NOTE: this is to work around the timer bug */ temp_byte = readb(ctrl->hpc_reg + SLOT_POWER); writeb(0x00, ctrl->hpc_reg + SLOT_POWER); writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); adapter_speed = get_adapter_speed(ctrl, hp_slot); if (bus->cur_bus_speed != adapter_speed) if (set_controller_speed(ctrl, adapter_speed, hp_slot)) rc = WRONG_BUS_FREQUENCY; /* turn off board without attaching to the bus */ disable_slot_power (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); if (rc) return rc; mutex_lock(&ctrl->crit_sect); slot_enable (ctrl, hp_slot); green_LED_blink (ctrl, hp_slot); amber_LED_off (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); /* Wait for ~1 second because of hot plug spec */ long_delay(1*HZ); /* Check for a power fault */ if (func->status == 0xFF) { /* power fault occurred, but it was benign */ rc = POWER_FAILURE; func->status = 0; } else rc = cpqhp_valid_replace(ctrl, func); if (!rc) { /* It must be the same board */ rc = cpqhp_configure_board(ctrl, func); /* If configuration fails, turn it off * Get slot won't work for devices behind * bridges, but in this case it will always be * called for the "base" bus/dev/func of an * adapter. */ mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); if (rc) return rc; else return 1; } else { /* Something is wrong * Get slot won't work for devices behind bridges, but * in this case it will always be called for the "base" * bus/dev/func of an adapter. */ mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); } } return rc; } /** * board_added - Called after a board has been added to the system. * @func: PCI device/function info * @ctrl: hotplug controller * * Turns power on for the board. * Configures board. */ static u32 board_added(struct pci_func *func, struct controller *ctrl) { u8 hp_slot; u8 temp_byte; u8 adapter_speed; int index; u32 temp_register = 0xFFFFFFFF; u32 rc = 0; struct pci_func *new_slot = NULL; struct pci_bus *bus = ctrl->pci_bus; struct slot *p_slot; struct resource_lists res_lists; hp_slot = func->device - ctrl->slot_device_offset; dbg("%s: func->device, slot_offset, hp_slot = %d, %d ,%d\n", __func__, func->device, ctrl->slot_device_offset, hp_slot); mutex_lock(&ctrl->crit_sect); /* turn on board without attaching to the bus */ enable_slot_power(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); /* Change bits in slot power register to force another shift out * NOTE: this is to work around the timer bug */ temp_byte = readb(ctrl->hpc_reg + SLOT_POWER); writeb(0x00, ctrl->hpc_reg + SLOT_POWER); writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); adapter_speed = get_adapter_speed(ctrl, hp_slot); if (bus->cur_bus_speed != adapter_speed) if (set_controller_speed(ctrl, adapter_speed, hp_slot)) rc = WRONG_BUS_FREQUENCY; /* turn off board without attaching to the bus */ disable_slot_power (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); mutex_unlock(&ctrl->crit_sect); if (rc) return rc; p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); /* turn on board and blink green LED */ dbg("%s: before down\n", __func__); mutex_lock(&ctrl->crit_sect); dbg("%s: after down\n", __func__); dbg("%s: before slot_enable\n", __func__); slot_enable (ctrl, hp_slot); dbg("%s: before green_LED_blink\n", __func__); green_LED_blink (ctrl, hp_slot); dbg("%s: before amber_LED_blink\n", __func__); amber_LED_off (ctrl, hp_slot); dbg("%s: before set_SOGO\n", __func__); set_SOGO(ctrl); /* Wait for SOBS to be unset */ dbg("%s: before wait_for_ctrl_irq\n", __func__); wait_for_ctrl_irq (ctrl); dbg("%s: after wait_for_ctrl_irq\n", __func__); dbg("%s: before up\n", __func__); mutex_unlock(&ctrl->crit_sect); dbg("%s: after up\n", __func__); /* Wait for ~1 second because of hot plug spec */ dbg("%s: before long_delay\n", __func__); long_delay(1*HZ); dbg("%s: after long_delay\n", __func__); dbg("%s: func status = %x\n", __func__, func->status); /* Check for a power fault */ if (func->status == 0xFF) { /* power fault occurred, but it was benign */ temp_register = 0xFFFFFFFF; dbg("%s: temp register set to %x by power fault\n", __func__, temp_register); rc = POWER_FAILURE; func->status = 0; } else { /* Get vendor/device ID u32 */ ctrl->pci_bus->number = func->bus; rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, func->function), PCI_VENDOR_ID, &temp_register); dbg("%s: pci_read_config_dword returns %d\n", __func__, rc); dbg("%s: temp_register is %x\n", __func__, temp_register); if (rc != 0) { /* Something's wrong here */ temp_register = 0xFFFFFFFF; dbg("%s: temp register set to %x by error\n", __func__, temp_register); } /* Preset return code. It will be changed later if things go okay. */ rc = NO_ADAPTER_PRESENT; } /* All F's is an empty slot or an invalid board */ if (temp_register != 0xFFFFFFFF) { res_lists.io_head = ctrl->io_head; res_lists.mem_head = ctrl->mem_head; res_lists.p_mem_head = ctrl->p_mem_head; res_lists.bus_head = ctrl->bus_head; res_lists.irqs = NULL; rc = configure_new_device(ctrl, func, 0, &res_lists); dbg("%s: back from configure_new_device\n", __func__); ctrl->io_head = res_lists.io_head; ctrl->mem_head = res_lists.mem_head; ctrl->p_mem_head = res_lists.p_mem_head; ctrl->bus_head = res_lists.bus_head; cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->io_head)); cpqhp_resource_sort_and_combine(&(ctrl->bus_head)); if (rc) { mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); return rc; } else { cpqhp_save_slot_config(ctrl, func); } func->status = 0; func->switch_save = 0x10; func->is_a_board = 0x01; /* next, we will instantiate the linux pci_dev structures (with * appropriate driver notification, if already present) */ dbg("%s: configure linux pci_dev structure\n", __func__); index = 0; do { new_slot = cpqhp_slot_find(ctrl->bus, func->device, index++); if (new_slot && !new_slot->pci_dev) cpqhp_configure_device(ctrl, new_slot); } while (new_slot); mutex_lock(&ctrl->crit_sect); green_LED_on (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); } else { mutex_lock(&ctrl->crit_sect); amber_LED_on (ctrl, hp_slot); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); return rc; } return 0; } /** * remove_board - Turns off slot and LEDs * @func: PCI device/function info * @replace_flag: whether replacing or adding a new device * @ctrl: target controller */ static u32 remove_board(struct pci_func * func, u32 replace_flag, struct controller * ctrl) { int index; u8 skip = 0; u8 device; u8 hp_slot; u8 temp_byte; u32 rc; struct resource_lists res_lists; struct pci_func *temp_func; if (cpqhp_unconfigure_device(func)) return 1; device = func->device; hp_slot = func->device - ctrl->slot_device_offset; dbg("In %s, hp_slot = %d\n", __func__, hp_slot); /* When we get here, it is safe to change base address registers. * We will attempt to save the base address register lengths */ if (replace_flag || !ctrl->add_support) rc = cpqhp_save_base_addr_length(ctrl, func); else if (!func->bus_head && !func->mem_head && !func->p_mem_head && !func->io_head) { /* Here we check to see if we've saved any of the board's * resources already. If so, we'll skip the attempt to * determine what's being used. */ index = 0; temp_func = cpqhp_slot_find(func->bus, func->device, index++); while (temp_func) { if (temp_func->bus_head || temp_func->mem_head || temp_func->p_mem_head || temp_func->io_head) { skip = 1; break; } temp_func = cpqhp_slot_find(temp_func->bus, temp_func->device, index++); } if (!skip) rc = cpqhp_save_used_resources(ctrl, func); } /* Change status to shutdown */ if (func->is_a_board) func->status = 0x01; func->configured = 0; mutex_lock(&ctrl->crit_sect); green_LED_off (ctrl, hp_slot); slot_disable (ctrl, hp_slot); set_SOGO(ctrl); /* turn off SERR for slot */ temp_byte = readb(ctrl->hpc_reg + SLOT_SERR); temp_byte &= ~(0x01 << hp_slot); writeb(temp_byte, ctrl->hpc_reg + SLOT_SERR); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); if (!replace_flag && ctrl->add_support) { while (func) { res_lists.io_head = ctrl->io_head; res_lists.mem_head = ctrl->mem_head; res_lists.p_mem_head = ctrl->p_mem_head; res_lists.bus_head = ctrl->bus_head; cpqhp_return_board_resources(func, &res_lists); ctrl->io_head = res_lists.io_head; ctrl->mem_head = res_lists.mem_head; ctrl->p_mem_head = res_lists.p_mem_head; ctrl->bus_head = res_lists.bus_head; cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); cpqhp_resource_sort_and_combine(&(ctrl->io_head)); cpqhp_resource_sort_and_combine(&(ctrl->bus_head)); if (is_bridge(func)) { bridge_slot_remove(func); } else slot_remove(func); func = cpqhp_slot_find(ctrl->bus, device, 0); } /* Setup slot structure with entry for empty slot */ func = cpqhp_slot_create(ctrl->bus); if (func == NULL) return 1; func->bus = ctrl->bus; func->device = device; func->function = 0; func->configured = 0; func->switch_save = 0x10; func->is_a_board = 0; func->p_task_event = NULL; } return 0; } static void pushbutton_helper_thread(unsigned long data) { pushbutton_pending = data; wake_up_process(cpqhp_event_thread); } /* this is the main worker thread */ static int event_thread(void* data) { struct controller *ctrl; while (1) { dbg("!!!!event_thread sleeping\n"); set_current_state(TASK_INTERRUPTIBLE); schedule(); if (kthread_should_stop()) break; /* Do stuff here */ if (pushbutton_pending) cpqhp_pushbutton_thread(pushbutton_pending); else for (ctrl = cpqhp_ctrl_list; ctrl; ctrl=ctrl->next) interrupt_event_handler(ctrl); } dbg("event_thread signals exit\n"); return 0; } int cpqhp_event_start_thread(void) { cpqhp_event_thread = kthread_run(event_thread, NULL, "phpd_event"); if (IS_ERR(cpqhp_event_thread)) { err ("Can't start up our event thread\n"); return PTR_ERR(cpqhp_event_thread); } return 0; } void cpqhp_event_stop_thread(void) { kthread_stop(cpqhp_event_thread); } static int update_slot_info(struct controller *ctrl, struct slot *slot) { struct hotplug_slot_info *info; int result; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->power_status = get_slot_enabled(ctrl, slot); info->attention_status = cpq_get_attention_status(ctrl, slot); info->latch_status = cpq_get_latch_status(ctrl, slot); info->adapter_status = get_presence_status(ctrl, slot); result = pci_hp_change_slot_info(slot->hotplug_slot, info); kfree (info); return result; } static void interrupt_event_handler(struct controller *ctrl) { int loop = 0; int change = 1; struct pci_func *func; u8 hp_slot; struct slot *p_slot; while (change) { change = 0; for (loop = 0; loop < 10; loop++) { /* dbg("loop %d\n", loop); */ if (ctrl->event_queue[loop].event_type != 0) { hp_slot = ctrl->event_queue[loop].hp_slot; func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0); if (!func) return; p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); if (!p_slot) return; dbg("hp_slot %d, func %p, p_slot %p\n", hp_slot, func, p_slot); if (ctrl->event_queue[loop].event_type == INT_BUTTON_PRESS) { dbg("button pressed\n"); } else if (ctrl->event_queue[loop].event_type == INT_BUTTON_CANCEL) { dbg("button cancel\n"); del_timer(&p_slot->task_event); mutex_lock(&ctrl->crit_sect); if (p_slot->state == BLINKINGOFF_STATE) { /* slot is on */ dbg("turn on green LED\n"); green_LED_on (ctrl, hp_slot); } else if (p_slot->state == BLINKINGON_STATE) { /* slot is off */ dbg("turn off green LED\n"); green_LED_off (ctrl, hp_slot); } info(msg_button_cancel, p_slot->number); p_slot->state = STATIC_STATE; amber_LED_off (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); } /*** button Released (No action on press...) */ else if (ctrl->event_queue[loop].event_type == INT_BUTTON_RELEASE) { dbg("button release\n"); if (is_slot_enabled (ctrl, hp_slot)) { dbg("slot is on\n"); p_slot->state = BLINKINGOFF_STATE; info(msg_button_off, p_slot->number); } else { dbg("slot is off\n"); p_slot->state = BLINKINGON_STATE; info(msg_button_on, p_slot->number); } mutex_lock(&ctrl->crit_sect); dbg("blink green LED and turn off amber\n"); amber_LED_off (ctrl, hp_slot); green_LED_blink (ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); mutex_unlock(&ctrl->crit_sect); init_timer(&p_slot->task_event); p_slot->hp_slot = hp_slot; p_slot->ctrl = ctrl; /* p_slot->physical_slot = physical_slot; */ p_slot->task_event.expires = jiffies + 5 * HZ; /* 5 second delay */ p_slot->task_event.function = pushbutton_helper_thread; p_slot->task_event.data = (u32) p_slot; dbg("add_timer p_slot = %p\n", p_slot); add_timer(&p_slot->task_event); } /***********POWER FAULT */ else if (ctrl->event_queue[loop].event_type == INT_POWER_FAULT) { dbg("power fault\n"); } else { /* refresh notification */ update_slot_info(ctrl, p_slot); } ctrl->event_queue[loop].event_type = 0; change = 1; } } /* End of FOR loop */ } return; } /** * cpqhp_pushbutton_thread - handle pushbutton events * @slot: target slot (struct) * * Scheduled procedure to handle blocking stuff for the pushbuttons. * Handles all pending events and exits. */ void cpqhp_pushbutton_thread(unsigned long slot) { u8 hp_slot; u8 device; struct pci_func *func; struct slot *p_slot = (struct slot *) slot; struct controller *ctrl = (struct controller *) p_slot->ctrl; pushbutton_pending = 0; hp_slot = p_slot->hp_slot; device = p_slot->device; if (is_slot_enabled(ctrl, hp_slot)) { p_slot->state = POWEROFF_STATE; /* power Down board */ func = cpqhp_slot_find(p_slot->bus, p_slot->device, 0); dbg("In power_down_board, func = %p, ctrl = %p\n", func, ctrl); if (!func) { dbg("Error! func NULL in %s\n", __func__); return ; } if (cpqhp_process_SS(ctrl, func) != 0) { amber_LED_on(ctrl, hp_slot); green_LED_on(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); } p_slot->state = STATIC_STATE; } else { p_slot->state = POWERON_STATE; /* slot is off */ func = cpqhp_slot_find(p_slot->bus, p_slot->device, 0); dbg("In add_board, func = %p, ctrl = %p\n", func, ctrl); if (!func) { dbg("Error! func NULL in %s\n", __func__); return ; } if (ctrl != NULL) { if (cpqhp_process_SI(ctrl, func) != 0) { amber_LED_on(ctrl, hp_slot); green_LED_off(ctrl, hp_slot); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); } } p_slot->state = STATIC_STATE; } return; } int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func) { u8 device, hp_slot; u16 temp_word; u32 tempdword; int rc; struct slot* p_slot; int physical_slot = 0; tempdword = 0; device = func->device; hp_slot = device - ctrl->slot_device_offset; p_slot = cpqhp_find_slot(ctrl, device); if (p_slot) physical_slot = p_slot->number; /* Check to see if the interlock is closed */ tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); if (tempdword & (0x01 << hp_slot)) { return 1; } if (func->is_a_board) { rc = board_replaced(func, ctrl); } else { /* add board */ slot_remove(func); func = cpqhp_slot_create(ctrl->bus); if (func == NULL) return 1; func->bus = ctrl->bus; func->device = device; func->function = 0; func->configured = 0; func->is_a_board = 1; /* We have to save the presence info for these slots */ temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { func->switch_save = 0; } else { func->switch_save = 0x10; } rc = board_added(func, ctrl); if (rc) { if (is_bridge(func)) { bridge_slot_remove(func); } else slot_remove(func); /* Setup slot structure with entry for empty slot */ func = cpqhp_slot_create(ctrl->bus); if (func == NULL) return 1; func->bus = ctrl->bus; func->device = device; func->function = 0; func->configured = 0; func->is_a_board = 0; /* We have to save the presence info for these slots */ temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) { func->switch_save = 0; } else { func->switch_save = 0x10; } } } if (rc) { dbg("%s: rc = %d\n", __func__, rc); } if (p_slot) update_slot_info(ctrl, p_slot); return rc; } int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func) { u8 device, class_code, header_type, BCR; u8 index = 0; u8 replace_flag; u32 rc = 0; unsigned int devfn; struct slot* p_slot; struct pci_bus *pci_bus = ctrl->pci_bus; int physical_slot=0; device = func->device; func = cpqhp_slot_find(ctrl->bus, device, index++); p_slot = cpqhp_find_slot(ctrl, device); if (p_slot) { physical_slot = p_slot->number; } /* Make sure there are no video controllers here */ while (func && !rc) { pci_bus->number = func->bus; devfn = PCI_DEVFN(func->device, func->function); /* Check the Class Code */ rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code); if (rc) return rc; if (class_code == PCI_BASE_CLASS_DISPLAY) { /* Display/Video adapter (not supported) */ rc = REMOVE_NOT_SUPPORTED; } else { /* See if it's a bridge */ rc = pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type); if (rc) return rc; /* If it's a bridge, check the VGA Enable bit */ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { rc = pci_bus_read_config_byte (pci_bus, devfn, PCI_BRIDGE_CONTROL, &BCR); if (rc) return rc; /* If the VGA Enable bit is set, remove isn't * supported */ if (BCR & PCI_BRIDGE_CTL_VGA) rc = REMOVE_NOT_SUPPORTED; } } func = cpqhp_slot_find(ctrl->bus, device, index++); } func = cpqhp_slot_find(ctrl->bus, device, 0); if ((func != NULL) && !rc) { /* FIXME: Replace flag should be passed into process_SS */ replace_flag = !(ctrl->add_support); rc = remove_board(func, replace_flag, ctrl); } else if (!rc) { rc = 1; } if (p_slot) update_slot_info(ctrl, p_slot); return rc; } /** * switch_leds - switch the leds, go from one site to the other. * @ctrl: controller to use * @num_of_slots: number of slots to use * @work_LED: LED control value * @direction: 1 to start from the left side, 0 to start right. */ static void switch_leds(struct controller *ctrl, const int num_of_slots, u32 *work_LED, const int direction) { int loop; for (loop = 0; loop < num_of_slots; loop++) { if (direction) *work_LED = *work_LED >> 1; else *work_LED = *work_LED << 1; writel(*work_LED, ctrl->hpc_reg + LED_CONTROL); set_SOGO(ctrl); /* Wait for SOGO interrupt */ wait_for_ctrl_irq(ctrl); /* Get ready for next iteration */ long_delay((2*HZ)/10); } } /** * cpqhp_hardware_test - runs hardware tests * @ctrl: target controller * @test_num: the number written to the "test" file in sysfs. * * For hot plug ctrl folks to play with. */ int cpqhp_hardware_test(struct controller *ctrl, int test_num) { u32 save_LED; u32 work_LED; int loop; int num_of_slots; num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0f; switch (test_num) { case 1: /* Do stuff here! */ /* Do that funky LED thing */ /* so we can restore them later */ save_LED = readl(ctrl->hpc_reg + LED_CONTROL); work_LED = 0x01010101; switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); work_LED = 0x01010000; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); work_LED = 0x00000101; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); switch_leds(ctrl, num_of_slots, &work_LED, 0); switch_leds(ctrl, num_of_slots, &work_LED, 1); work_LED = 0x01010000; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); for (loop = 0; loop < num_of_slots; loop++) { set_SOGO(ctrl); /* Wait for SOGO interrupt */ wait_for_ctrl_irq (ctrl); /* Get ready for next iteration */ long_delay((3*HZ)/10); work_LED = work_LED >> 16; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); set_SOGO(ctrl); /* Wait for SOGO interrupt */ wait_for_ctrl_irq (ctrl); /* Get ready for next iteration */ long_delay((3*HZ)/10); work_LED = work_LED << 16; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); work_LED = work_LED << 1; writel(work_LED, ctrl->hpc_reg + LED_CONTROL); } /* put it back the way it was */ writel(save_LED, ctrl->hpc_reg + LED_CONTROL); set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq (ctrl); break; case 2: /* Do other stuff here! */ break; case 3: /* and more... */ break; } return 0; } /** * configure_new_device - Configures the PCI header information of one board. * @ctrl: pointer to controller structure * @func: pointer to function structure * @behind_bridge: 1 if this is a recursive call, 0 if not * @resources: pointer to set of resource lists * * Returns 0 if success. */ static u32 configure_new_device(struct controller * ctrl, struct pci_func * func, u8 behind_bridge, struct resource_lists * resources) { u8 temp_byte, function, max_functions, stop_it; int rc; u32 ID; struct pci_func *new_slot; int index; new_slot = func; dbg("%s\n", __func__); /* Check for Multi-function device */ ctrl->pci_bus->number = func->bus; rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(func->device, func->function), 0x0E, &temp_byte); if (rc) { dbg("%s: rc = %d\n", __func__, rc); return rc; } if (temp_byte & 0x80) /* Multi-function device */ max_functions = 8; else max_functions = 1; function = 0; do { rc = configure_new_function(ctrl, new_slot, behind_bridge, resources); if (rc) { dbg("configure_new_function failed %d\n",rc); index = 0; while (new_slot) { new_slot = cpqhp_slot_find(new_slot->bus, new_slot->device, index++); if (new_slot) cpqhp_return_board_resources(new_slot, resources); } return rc; } function++; stop_it = 0; /* The following loop skips to the next present function * and creates a board structure */ while ((function < max_functions) && (!stop_it)) { pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID); if (ID == 0xFFFFFFFF) { function++; } else { /* Setup slot structure. */ new_slot = cpqhp_slot_create(func->bus); if (new_slot == NULL) return 1; new_slot->bus = func->bus; new_slot->device = func->device; new_slot->function = function; new_slot->is_a_board = 1; new_slot->status = 0; stop_it++; } } } while (function < max_functions); dbg("returning from configure_new_device\n"); return 0; } /* * Configuration logic that involves the hotplug data structures and * their bookkeeping */ /** * configure_new_function - Configures the PCI header information of one device * @ctrl: pointer to controller structure * @func: pointer to function structure * @behind_bridge: 1 if this is a recursive call, 0 if not * @resources: pointer to set of resource lists * * Calls itself recursively for bridged devices. * Returns 0 if success. */ static int configure_new_function(struct controller *ctrl, struct pci_func *func, u8 behind_bridge, struct resource_lists *resources) { int cloop; u8 IRQ = 0; u8 temp_byte; u8 device; u8 class_code; u16 command; u16 temp_word; u32 temp_dword; u32 rc; u32 temp_register; u32 base; u32 ID; unsigned int devfn; struct pci_resource *mem_node; struct pci_resource *p_mem_node; struct pci_resource *io_node; struct pci_resource *bus_node; struct pci_resource *hold_mem_node; struct pci_resource *hold_p_mem_node; struct pci_resource *hold_IO_node; struct pci_resource *hold_bus_node; struct irq_mapping irqs; struct pci_func *new_slot; struct pci_bus *pci_bus; struct resource_lists temp_resources; pci_bus = ctrl->pci_bus; pci_bus->number = func->bus; devfn = PCI_DEVFN(func->device, func->function); /* Check for Bridge */ rc = pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &temp_byte); if (rc) return rc; if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* set Primary bus */ dbg("set Primary bus = %d\n", func->bus); rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus); if (rc) return rc; /* find range of busses to use */ dbg("find ranges of buses to use\n"); bus_node = get_max_resource(&(resources->bus_head), 1); /* If we don't have any busses to allocate, we can't continue */ if (!bus_node) return -ENOMEM; /* set Secondary bus */ temp_byte = bus_node->base; dbg("set Secondary bus = %d\n", bus_node->base); rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, temp_byte); if (rc) return rc; /* set subordinate bus */ temp_byte = bus_node->base + bus_node->length - 1; dbg("set subordinate bus = %d\n", bus_node->base + bus_node->length - 1); rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SUBORDINATE_BUS, temp_byte); if (rc) return rc; /* set subordinate Latency Timer and base Latency Timer */ temp_byte = 0x40; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SEC_LATENCY_TIMER, temp_byte); if (rc) return rc; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_LATENCY_TIMER, temp_byte); if (rc) return rc; /* set Cache Line size */ temp_byte = 0x08; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_CACHE_LINE_SIZE, temp_byte); if (rc) return rc; /* Setup the IO, memory, and prefetchable windows */ io_node = get_max_resource(&(resources->io_head), 0x1000); if (!io_node) return -ENOMEM; mem_node = get_max_resource(&(resources->mem_head), 0x100000); if (!mem_node) return -ENOMEM; p_mem_node = get_max_resource(&(resources->p_mem_head), 0x100000); if (!p_mem_node) return -ENOMEM; dbg("Setup the IO, memory, and prefetchable windows\n"); dbg("io_node\n"); dbg("(base, len, next) (%x, %x, %p)\n", io_node->base, io_node->length, io_node->next); dbg("mem_node\n"); dbg("(base, len, next) (%x, %x, %p)\n", mem_node->base, mem_node->length, mem_node->next); dbg("p_mem_node\n"); dbg("(base, len, next) (%x, %x, %p)\n", p_mem_node->base, p_mem_node->length, p_mem_node->next); /* set up the IRQ info */ if (!resources->irqs) { irqs.barber_pole = 0; irqs.interrupt[0] = 0; irqs.interrupt[1] = 0; irqs.interrupt[2] = 0; irqs.interrupt[3] = 0; irqs.valid_INT = 0; } else { irqs.barber_pole = resources->irqs->barber_pole; irqs.interrupt[0] = resources->irqs->interrupt[0]; irqs.interrupt[1] = resources->irqs->interrupt[1]; irqs.interrupt[2] = resources->irqs->interrupt[2]; irqs.interrupt[3] = resources->irqs->interrupt[3]; irqs.valid_INT = resources->irqs->valid_INT; } /* set up resource lists that are now aligned on top and bottom * for anything behind the bridge. */ temp_resources.bus_head = bus_node; temp_resources.io_head = io_node; temp_resources.mem_head = mem_node; temp_resources.p_mem_head = p_mem_node; temp_resources.irqs = &irqs; /* Make copies of the nodes we are going to pass down so that * if there is a problem,we can just use these to free resources */ hold_bus_node = kmalloc(sizeof(*hold_bus_node), GFP_KERNEL); hold_IO_node = kmalloc(sizeof(*hold_IO_node), GFP_KERNEL); hold_mem_node = kmalloc(sizeof(*hold_mem_node), GFP_KERNEL); hold_p_mem_node = kmalloc(sizeof(*hold_p_mem_node), GFP_KERNEL); if (!hold_bus_node || !hold_IO_node || !hold_mem_node || !hold_p_mem_node) { kfree(hold_bus_node); kfree(hold_IO_node); kfree(hold_mem_node); kfree(hold_p_mem_node); return 1; } memcpy(hold_bus_node, bus_node, sizeof(struct pci_resource)); bus_node->base += 1; bus_node->length -= 1; bus_node->next = NULL; /* If we have IO resources copy them and fill in the bridge's * IO range registers */ memcpy(hold_IO_node, io_node, sizeof(struct pci_resource)); io_node->next = NULL; /* set IO base and Limit registers */ temp_byte = io_node->base >> 8; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_BASE, temp_byte); temp_byte = (io_node->base + io_node->length - 1) >> 8; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_LIMIT, temp_byte); /* Copy the memory resources and fill in the bridge's memory * range registers. */ memcpy(hold_mem_node, mem_node, sizeof(struct pci_resource)); mem_node->next = NULL; /* set Mem base and Limit registers */ temp_word = mem_node->base >> 16; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_BASE, temp_word); temp_word = (mem_node->base + mem_node->length - 1) >> 16; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word); memcpy(hold_p_mem_node, p_mem_node, sizeof(struct pci_resource)); p_mem_node->next = NULL; /* set Pre Mem base and Limit registers */ temp_word = p_mem_node->base >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_BASE, temp_word); temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word); /* Adjust this to compensate for extra adjustment in first loop */ irqs.barber_pole--; rc = 0; /* Here we actually find the devices and configure them */ for (device = 0; (device <= 0x1F) && !rc; device++) { irqs.barber_pole = (irqs.barber_pole + 1) & 0x03; ID = 0xFFFFFFFF; pci_bus->number = hold_bus_node->base; pci_bus_read_config_dword (pci_bus, PCI_DEVFN(device, 0), 0x00, &ID); pci_bus->number = func->bus; if (ID != 0xFFFFFFFF) { /* device present */ /* Setup slot structure. */ new_slot = cpqhp_slot_create(hold_bus_node->base); if (new_slot == NULL) { rc = -ENOMEM; continue; } new_slot->bus = hold_bus_node->base; new_slot->device = device; new_slot->function = 0; new_slot->is_a_board = 1; new_slot->status = 0; rc = configure_new_device(ctrl, new_slot, 1, &temp_resources); dbg("configure_new_device rc=0x%x\n",rc); } /* End of IF (device in slot?) */ } /* End of FOR loop */ if (rc) goto free_and_out; /* save the interrupt routing information */ if (resources->irqs) { resources->irqs->interrupt[0] = irqs.interrupt[0]; resources->irqs->interrupt[1] = irqs.interrupt[1]; resources->irqs->interrupt[2] = irqs.interrupt[2]; resources->irqs->interrupt[3] = irqs.interrupt[3]; resources->irqs->valid_INT = irqs.valid_INT; } else if (!behind_bridge) { /* We need to hook up the interrupts here */ for (cloop = 0; cloop < 4; cloop++) { if (irqs.valid_INT & (0x01 << cloop)) { rc = cpqhp_set_irq(func->bus, func->device, cloop + 1, irqs.interrupt[cloop]); if (rc) goto free_and_out; } } /* end of for loop */ } /* Return unused bus resources * First use the temporary node to store information for * the board */ if (bus_node && temp_resources.bus_head) { hold_bus_node->length = bus_node->base - hold_bus_node->base; hold_bus_node->next = func->bus_head; func->bus_head = hold_bus_node; temp_byte = temp_resources.bus_head->base - 1; /* set subordinate bus */ rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_SUBORDINATE_BUS, temp_byte); if (temp_resources.bus_head->length == 0) { kfree(temp_resources.bus_head); temp_resources.bus_head = NULL; } else { return_resource(&(resources->bus_head), temp_resources.bus_head); } } /* If we have IO space available and there is some left, * return the unused portion */ if (hold_IO_node && temp_resources.io_head) { io_node = do_pre_bridge_resource_split(&(temp_resources.io_head), &hold_IO_node, 0x1000); /* Check if we were able to split something off */ if (io_node) { hold_IO_node->base = io_node->base + io_node->length; temp_byte = (hold_IO_node->base) >> 8; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_IO_BASE, temp_byte); return_resource(&(resources->io_head), io_node); } io_node = do_bridge_resource_split(&(temp_resources.io_head), 0x1000); /* Check if we were able to split something off */ if (io_node) { /* First use the temporary node to store * information for the board */ hold_IO_node->length = io_node->base - hold_IO_node->base; /* If we used any, add it to the board's list */ if (hold_IO_node->length) { hold_IO_node->next = func->io_head; func->io_head = hold_IO_node; temp_byte = (io_node->base - 1) >> 8; rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_IO_LIMIT, temp_byte); return_resource(&(resources->io_head), io_node); } else { /* it doesn't need any IO */ temp_word = 0x0000; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_IO_LIMIT, temp_word); return_resource(&(resources->io_head), io_node); kfree(hold_IO_node); } } else { /* it used most of the range */ hold_IO_node->next = func->io_head; func->io_head = hold_IO_node; } } else if (hold_IO_node) { /* it used the whole range */ hold_IO_node->next = func->io_head; func->io_head = hold_IO_node; } /* If we have memory space available and there is some left, * return the unused portion */ if (hold_mem_node && temp_resources.mem_head) { mem_node = do_pre_bridge_resource_split(&(temp_resources. mem_head), &hold_mem_node, 0x100000); /* Check if we were able to split something off */ if (mem_node) { hold_mem_node->base = mem_node->base + mem_node->length; temp_word = (hold_mem_node->base) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_MEMORY_BASE, temp_word); return_resource(&(resources->mem_head), mem_node); } mem_node = do_bridge_resource_split(&(temp_resources.mem_head), 0x100000); /* Check if we were able to split something off */ if (mem_node) { /* First use the temporary node to store * information for the board */ hold_mem_node->length = mem_node->base - hold_mem_node->base; if (hold_mem_node->length) { hold_mem_node->next = func->mem_head; func->mem_head = hold_mem_node; /* configure end address */ temp_word = (mem_node->base - 1) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word); /* Return unused resources to the pool */ return_resource(&(resources->mem_head), mem_node); } else { /* it doesn't need any Mem */ temp_word = 0x0000; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word); return_resource(&(resources->mem_head), mem_node); kfree(hold_mem_node); } } else { /* it used most of the range */ hold_mem_node->next = func->mem_head; func->mem_head = hold_mem_node; } } else if (hold_mem_node) { /* it used the whole range */ hold_mem_node->next = func->mem_head; func->mem_head = hold_mem_node; } /* If we have prefetchable memory space available and there * is some left at the end, return the unused portion */ if (temp_resources.p_mem_head) { p_mem_node = do_pre_bridge_resource_split(&(temp_resources.p_mem_head), &hold_p_mem_node, 0x100000); /* Check if we were able to split something off */ if (p_mem_node) { hold_p_mem_node->base = p_mem_node->base + p_mem_node->length; temp_word = (hold_p_mem_node->base) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_BASE, temp_word); return_resource(&(resources->p_mem_head), p_mem_node); } p_mem_node = do_bridge_resource_split(&(temp_resources.p_mem_head), 0x100000); /* Check if we were able to split something off */ if (p_mem_node) { /* First use the temporary node to store * information for the board */ hold_p_mem_node->length = p_mem_node->base - hold_p_mem_node->base; /* If we used any, add it to the board's list */ if (hold_p_mem_node->length) { hold_p_mem_node->next = func->p_mem_head; func->p_mem_head = hold_p_mem_node; temp_word = (p_mem_node->base - 1) >> 16; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word); return_resource(&(resources->p_mem_head), p_mem_node); } else { /* it doesn't need any PMem */ temp_word = 0x0000; rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word); return_resource(&(resources->p_mem_head), p_mem_node); kfree(hold_p_mem_node); } } else { /* it used the most of the range */ hold_p_mem_node->next = func->p_mem_head; func->p_mem_head = hold_p_mem_node; } } else if (hold_p_mem_node) { /* it used the whole range */ hold_p_mem_node->next = func->p_mem_head; func->p_mem_head = hold_p_mem_node; } /* We should be configuring an IRQ and the bridge's base address * registers if it needs them. Although we have never seen such * a device */ /* enable card */ command = 0x0157; /* = PCI_COMMAND_IO | * PCI_COMMAND_MEMORY | * PCI_COMMAND_MASTER | * PCI_COMMAND_INVALIDATE | * PCI_COMMAND_PARITY | * PCI_COMMAND_SERR */ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_COMMAND, command); /* set Bridge Control Register */ command = 0x07; /* = PCI_BRIDGE_CTL_PARITY | * PCI_BRIDGE_CTL_SERR | * PCI_BRIDGE_CTL_NO_ISA */ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_BRIDGE_CONTROL, command); } else if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_NORMAL) { /* Standard device */ rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code); if (class_code == PCI_BASE_CLASS_DISPLAY) { /* Display (video) adapter (not supported) */ return DEVICE_TYPE_NOT_SUPPORTED; } /* Figure out IO and memory needs */ for (cloop = 0x10; cloop <= 0x24; cloop += 4) { temp_register = 0xFFFFFFFF; dbg("CND: bus=%d, devfn=%d, offset=%d\n", pci_bus->number, devfn, cloop); rc = pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register); rc = pci_bus_read_config_dword (pci_bus, devfn, cloop, &temp_register); dbg("CND: base = 0x%x\n", temp_register); if (temp_register) { /* If this register is implemented */ if ((temp_register & 0x03L) == 0x01) { /* Map IO */ /* set base = amount of IO space */ base = temp_register & 0xFFFFFFFC; base = ~base + 1; dbg("CND: length = 0x%x\n", base); io_node = get_io_resource(&(resources->io_head), base); dbg("Got io_node start = %8.8x, length = %8.8x next (%p)\n", io_node->base, io_node->length, io_node->next); dbg("func (%p) io_head (%p)\n", func, func->io_head); /* allocate the resource to the board */ if (io_node) { base = io_node->base; io_node->next = func->io_head; func->io_head = io_node; } else return -ENOMEM; } else if ((temp_register & 0x0BL) == 0x08) { /* Map prefetchable memory */ base = temp_register & 0xFFFFFFF0; base = ~base + 1; dbg("CND: length = 0x%x\n", base); p_mem_node = get_resource(&(resources->p_mem_head), base); /* allocate the resource to the board */ if (p_mem_node) { base = p_mem_node->base; p_mem_node->next = func->p_mem_head; func->p_mem_head = p_mem_node; } else return -ENOMEM; } else if ((temp_register & 0x0BL) == 0x00) { /* Map memory */ base = temp_register & 0xFFFFFFF0; base = ~base + 1; dbg("CND: length = 0x%x\n", base); mem_node = get_resource(&(resources->mem_head), base); /* allocate the resource to the board */ if (mem_node) { base = mem_node->base; mem_node->next = func->mem_head; func->mem_head = mem_node; } else return -ENOMEM; } else { /* Reserved bits or requesting space below 1M */ return NOT_ENOUGH_RESOURCES; } rc = pci_bus_write_config_dword(pci_bus, devfn, cloop, base); /* Check for 64-bit base */ if ((temp_register & 0x07L) == 0x04) { cloop += 4; /* Upper 32 bits of address always zero * on today's systems */ /* FIXME this is probably not true on * Alpha and ia64??? */ base = 0; rc = pci_bus_write_config_dword(pci_bus, devfn, cloop, base); } } } /* End of base register loop */ if (cpqhp_legacy_mode) { /* Figure out which interrupt pin this function uses */ rc = pci_bus_read_config_byte (pci_bus, devfn, PCI_INTERRUPT_PIN, &temp_byte); /* If this function needs an interrupt and we are behind * a bridge and the pin is tied to something that's * alread mapped, set this one the same */ if (temp_byte && resources->irqs && (resources->irqs->valid_INT & (0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) { /* We have to share with something already set up */ IRQ = resources->irqs->interrupt[(temp_byte + resources->irqs->barber_pole - 1) & 0x03]; } else { /* Program IRQ based on card type */ rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code); if (class_code == PCI_BASE_CLASS_STORAGE) IRQ = cpqhp_disk_irq; else IRQ = cpqhp_nic_irq; } /* IRQ Line */ rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_INTERRUPT_LINE, IRQ); } if (!behind_bridge) { rc = cpqhp_set_irq(func->bus, func->device, temp_byte, IRQ); if (rc) return 1; } else { /* TBD - this code may also belong in the other clause * of this If statement */ resources->irqs->interrupt[(temp_byte + resources->irqs->barber_pole - 1) & 0x03] = IRQ; resources->irqs->valid_INT |= 0x01 << (temp_byte + resources->irqs->barber_pole - 1) & 0x03; } /* Latency Timer */ temp_byte = 0x40; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_LATENCY_TIMER, temp_byte); /* Cache Line size */ temp_byte = 0x08; rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_CACHE_LINE_SIZE, temp_byte); /* disable ROM base Address */ temp_dword = 0x00L; rc = pci_bus_write_config_word(pci_bus, devfn, PCI_ROM_ADDRESS, temp_dword); /* enable card */ temp_word = 0x0157; /* = PCI_COMMAND_IO | * PCI_COMMAND_MEMORY | * PCI_COMMAND_MASTER | * PCI_COMMAND_INVALIDATE | * PCI_COMMAND_PARITY | * PCI_COMMAND_SERR */ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_COMMAND, temp_word); } else { /* End of Not-A-Bridge else */ /* It's some strange type of PCI adapter (Cardbus?) */ return DEVICE_TYPE_NOT_SUPPORTED; } func->configured = 1; return 0; free_and_out: cpqhp_destroy_resource_list (&temp_resources); return_resource(&(resources-> bus_head), hold_bus_node); return_resource(&(resources-> io_head), hold_IO_node); return_resource(&(resources-> mem_head), hold_mem_node); return_resource(&(resources-> p_mem_head), hold_p_mem_node); return rc; }
gpl-2.0
adeen-s/android_kernel_cyanogen_msm8916
drivers/mtd/mtdsuper.c
2543
5567
/* MTD-based superblock management * * Copyright © 2001-2007 Red Hat, Inc. All Rights Reserved. * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org> * * Written by: David Howells <dhowells@redhat.com> * David Woodhouse <dwmw2@infradead.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/mtd/super.h> #include <linux/namei.h> #include <linux/export.h> #include <linux/ctype.h> #include <linux/slab.h> /* * compare superblocks to see if they're equivalent * - they are if the underlying MTD device is the same */ static int get_sb_mtd_compare(struct super_block *sb, void *_mtd) { struct mtd_info *mtd = _mtd; if (sb->s_mtd == mtd) { pr_debug("MTDSB: Match on device %d (\"%s\")\n", mtd->index, mtd->name); return 1; } pr_debug("MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n", sb->s_mtd->index, sb->s_mtd->name, mtd->index, mtd->name); return 0; } /* * mark the superblock by the MTD device it is using * - set the device number to be the correct MTD block device for pesuperstence * of NFS exports */ static int get_sb_mtd_set(struct super_block *sb, void *_mtd) { struct mtd_info *mtd = _mtd; sb->s_mtd = mtd; sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, mtd->index); sb->s_bdi = mtd->backing_dev_info; return 0; } /* * get a superblock on an MTD-backed filesystem */ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct mtd_info *mtd, int (*fill_super)(struct super_block *, void *, int)) { struct super_block *sb; int ret; sb = sget(fs_type, get_sb_mtd_compare, get_sb_mtd_set, flags, mtd); if (IS_ERR(sb)) goto out_error; if (sb->s_root) goto already_mounted; /* fresh new superblock */ pr_debug("MTDSB: New superblock for device %d (\"%s\")\n", mtd->index, mtd->name); ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0); if (ret < 0) { deactivate_locked_super(sb); return ERR_PTR(ret); } /* go */ sb->s_flags |= MS_ACTIVE; return dget(sb->s_root); /* new mountpoint for an already mounted superblock */ already_mounted: pr_debug("MTDSB: Device %d (\"%s\") is already mounted\n", mtd->index, mtd->name); put_mtd_device(mtd); return dget(sb->s_root); out_error: put_mtd_device(mtd); return ERR_CAST(sb); } /* * get a superblock on an MTD-backed filesystem by MTD device number */ static struct dentry *mount_mtd_nr(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int mtdnr, int (*fill_super)(struct super_block *, void *, int)) { struct mtd_info *mtd; mtd = get_mtd_device(NULL, mtdnr); if (IS_ERR(mtd)) { pr_debug("MTDSB: Device #%u doesn't appear to exist\n", mtdnr); return ERR_CAST(mtd); } return mount_mtd_aux(fs_type, flags, dev_name, data, mtd, fill_super); } /* * set up an MTD-based superblock */ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int)) { #ifdef CONFIG_BLOCK struct block_device *bdev; int ret, major; #endif int mtdnr; if (!dev_name) return ERR_PTR(-EINVAL); pr_debug("MTDSB: dev_name \"%s\"\n", dev_name); /* the preferred way of mounting in future; especially when * CONFIG_BLOCK=n - we specify the underlying MTD device by number or * by name, so that we don't require block device support to be present * in the kernel. */ if (dev_name[0] == 'm' && dev_name[1] == 't' && dev_name[2] == 'd') { if (dev_name[3] == ':') { struct mtd_info *mtd; /* mount by MTD device name */ pr_debug("MTDSB: mtd:%%s, name \"%s\"\n", dev_name + 4); mtd = get_mtd_device_nm(dev_name + 4); if (!IS_ERR(mtd)) return mount_mtd_aux( fs_type, flags, dev_name, data, mtd, fill_super); printk(KERN_NOTICE "MTD:" " MTD device with name \"%s\" not found.\n", dev_name + 4); } else if (isdigit(dev_name[3])) { /* mount by MTD device number name */ char *endptr; mtdnr = simple_strtoul(dev_name + 3, &endptr, 0); if (!*endptr) { /* It was a valid number */ pr_debug("MTDSB: mtd%%d, mtdnr %d\n", mtdnr); return mount_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super); } } } #ifdef CONFIG_BLOCK /* try the old way - the hack where we allowed users to mount * /dev/mtdblock$(n) but didn't actually _use_ the blockdev */ bdev = lookup_bdev(dev_name); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); pr_debug("MTDSB: lookup_bdev() returned %d\n", ret); return ERR_PTR(ret); } pr_debug("MTDSB: lookup_bdev() returned 0\n"); ret = -EINVAL; major = MAJOR(bdev->bd_dev); mtdnr = MINOR(bdev->bd_dev); bdput(bdev); if (major != MTD_BLOCK_MAJOR) goto not_an_MTD_device; return mount_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super); not_an_MTD_device: #endif /* CONFIG_BLOCK */ if (!(flags & MS_SILENT)) printk(KERN_NOTICE "MTD: Attempt to mount non-MTD device \"%s\"\n", dev_name); return ERR_PTR(-EINVAL); } EXPORT_SYMBOL_GPL(mount_mtd); /* * destroy an MTD-based superblock */ void kill_mtd_super(struct super_block *sb) { generic_shutdown_super(sb); put_mtd_device(sb->s_mtd); sb->s_mtd = NULL; } EXPORT_SYMBOL_GPL(kill_mtd_super);
gpl-2.0
mint2g/stock-kernel
net/sched/act_skbedit.c
2543
5837
/* * Copyright (c) 2008, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Author: Alexander Duyck <alexander.h.duyck@intel.com> */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <linux/tc_act/tc_skbedit.h> #include <net/tc_act/tc_skbedit.h> #define SKBEDIT_TAB_MASK 15 static struct tcf_common *tcf_skbedit_ht[SKBEDIT_TAB_MASK + 1]; static u32 skbedit_idx_gen; static DEFINE_RWLOCK(skbedit_lock); static struct tcf_hashinfo skbedit_hash_info = { .htab = tcf_skbedit_ht, .hmask = SKBEDIT_TAB_MASK, .lock = &skbedit_lock, }; static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) { struct tcf_skbedit *d = a->priv; spin_lock(&d->tcf_lock); d->tcf_tm.lastuse = jiffies; bstats_update(&d->tcf_bstats, skb); if (d->flags & SKBEDIT_F_PRIORITY) skb->priority = d->priority; if (d->flags & SKBEDIT_F_QUEUE_MAPPING && skb->dev->real_num_tx_queues > d->queue_mapping) skb_set_queue_mapping(skb, d->queue_mapping); if (d->flags & SKBEDIT_F_MARK) skb->mark = d->mark; spin_unlock(&d->tcf_lock); return d->tcf_action; } static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { [TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) }, [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) }, [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) }, [TCA_SKBEDIT_MARK] = { .len = sizeof(u32) }, }; static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, struct tc_action *a, int ovr, int bind) { struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; struct tc_skbedit *parm; struct tcf_skbedit *d; struct tcf_common *pc; u32 flags = 0, *priority = NULL, *mark = NULL; u16 *queue_mapping = NULL; int ret = 0, err; if (nla == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_SKBEDIT_MAX, nla, skbedit_policy); if (err < 0) return err; if (tb[TCA_SKBEDIT_PARMS] == NULL) return -EINVAL; if (tb[TCA_SKBEDIT_PRIORITY] != NULL) { flags |= SKBEDIT_F_PRIORITY; priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]); } if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) { flags |= SKBEDIT_F_QUEUE_MAPPING; queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]); } if (tb[TCA_SKBEDIT_MARK] != NULL) { flags |= SKBEDIT_F_MARK; mark = nla_data(tb[TCA_SKBEDIT_MARK]); } if (!flags) return -EINVAL; parm = nla_data(tb[TCA_SKBEDIT_PARMS]); pc = tcf_hash_check(parm->index, a, bind, &skbedit_hash_info); if (!pc) { pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind, &skbedit_idx_gen, &skbedit_hash_info); if (IS_ERR(pc)) return PTR_ERR(pc); d = to_skbedit(pc); ret = ACT_P_CREATED; } else { d = to_skbedit(pc); if (!ovr) { tcf_hash_release(pc, bind, &skbedit_hash_info); return -EEXIST; } } spin_lock_bh(&d->tcf_lock); d->flags = flags; if (flags & SKBEDIT_F_PRIORITY) d->priority = *priority; if (flags & SKBEDIT_F_QUEUE_MAPPING) d->queue_mapping = *queue_mapping; if (flags & SKBEDIT_F_MARK) d->mark = *mark; d->tcf_action = parm->action; spin_unlock_bh(&d->tcf_lock); if (ret == ACT_P_CREATED) tcf_hash_insert(pc, &skbedit_hash_info); return ret; } static int tcf_skbedit_cleanup(struct tc_action *a, int bind) { struct tcf_skbedit *d = a->priv; if (d) return tcf_hash_release(&d->common, bind, &skbedit_hash_info); return 0; } static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_skbedit *d = a->priv; struct tc_skbedit opt = { .index = d->tcf_index, .refcnt = d->tcf_refcnt - ref, .bindcnt = d->tcf_bindcnt - bind, .action = d->tcf_action, }; struct tcf_t t; NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt); if (d->flags & SKBEDIT_F_PRIORITY) NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), &d->priority); if (d->flags & SKBEDIT_F_QUEUE_MAPPING) NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING, sizeof(d->queue_mapping), &d->queue_mapping); if (d->flags & SKBEDIT_F_MARK) NLA_PUT(skb, TCA_SKBEDIT_MARK, sizeof(d->mark), &d->mark); t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(d->tcf_tm.expires); NLA_PUT(skb, TCA_SKBEDIT_TM, sizeof(t), &t); return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static struct tc_action_ops act_skbedit_ops = { .kind = "skbedit", .hinfo = &skbedit_hash_info, .type = TCA_ACT_SKBEDIT, .capab = TCA_CAP_NONE, .owner = THIS_MODULE, .act = tcf_skbedit, .dump = tcf_skbedit_dump, .cleanup = tcf_skbedit_cleanup, .init = tcf_skbedit_init, .walk = tcf_generic_walker, }; MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>"); MODULE_DESCRIPTION("SKB Editing"); MODULE_LICENSE("GPL"); static int __init skbedit_init_module(void) { return tcf_register_action(&act_skbedit_ops); } static void __exit skbedit_cleanup_module(void) { tcf_unregister_action(&act_skbedit_ops); } module_init(skbedit_init_module); module_exit(skbedit_cleanup_module);
gpl-2.0
MaxiCM/android_kernel_motorola_msm8916
drivers/media/usb/em28xx/em28xx-audio.c
2543
19451
/* * Empiatech em28x1 audio extension * * Copyright (C) 2006 Markus Rechberger <mrechberger@gmail.com> * * Copyright (C) 2007-2011 Mauro Carvalho Chehab <mchehab@redhat.com> * - Port to work with the in-kernel driver * - Cleanups, fixes, alsa-controls, etc. * * This driver is based on my previous au600 usb pstn audio driver * and inherits all the copyrights * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/usb.h> #include <linux/init.h> #include <linux/sound.h> #include <linux/spinlock.h> #include <linux/soundcard.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/proc_fs.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/info.h> #include <sound/initval.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/ac97_codec.h> #include <media/v4l2-common.h> #include "em28xx.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "activates debug info"); #define dprintk(fmt, arg...) do { \ if (debug) \ printk(KERN_INFO "em28xx-audio %s: " fmt, \ __func__, ##arg); \ } while (0) static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static int em28xx_deinit_isoc_audio(struct em28xx *dev) { int i; dprintk("Stopping isoc\n"); for (i = 0; i < EM28XX_AUDIO_BUFS; i++) { if (!irqs_disabled()) usb_kill_urb(dev->adev.urb[i]); else usb_unlink_urb(dev->adev.urb[i]); usb_free_urb(dev->adev.urb[i]); dev->adev.urb[i] = NULL; kfree(dev->adev.transfer_buffer[i]); dev->adev.transfer_buffer[i] = NULL; } return 0; } static void em28xx_audio_isocirq(struct urb *urb) { struct em28xx *dev = urb->context; int i; unsigned int oldptr; int period_elapsed = 0; int status; unsigned char *cp; unsigned int stride; struct snd_pcm_substream *substream; struct snd_pcm_runtime *runtime; switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ dprintk("urb completition error %d.\n", urb->status); break; } if (atomic_read(&dev->stream_started) == 0) return; if (dev->adev.capture_pcm_substream) { substream = dev->adev.capture_pcm_substream; runtime = substream->runtime; stride = runtime->frame_bits >> 3; for (i = 0; i < urb->number_of_packets; i++) { int length = urb->iso_frame_desc[i].actual_length / stride; cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset; if (!length) continue; oldptr = dev->adev.hwptr_done_capture; if (oldptr + length >= runtime->buffer_size) { unsigned int cnt = runtime->buffer_size - oldptr; memcpy(runtime->dma_area + oldptr * stride, cp, cnt * stride); memcpy(runtime->dma_area, cp + cnt * stride, length * stride - cnt * stride); } else { memcpy(runtime->dma_area + oldptr * stride, cp, length * stride); } snd_pcm_stream_lock(substream); dev->adev.hwptr_done_capture += length; if (dev->adev.hwptr_done_capture >= runtime->buffer_size) dev->adev.hwptr_done_capture -= runtime->buffer_size; dev->adev.capture_transfer_done += length; if (dev->adev.capture_transfer_done >= runtime->period_size) { dev->adev.capture_transfer_done -= runtime->period_size; period_elapsed = 1; } snd_pcm_stream_unlock(substream); } if (period_elapsed) snd_pcm_period_elapsed(substream); } urb->status = 0; status = usb_submit_urb(urb, GFP_ATOMIC); if (status < 0) { em28xx_errdev("resubmit of audio urb failed (error=%i)\n", status); } return; } static int em28xx_init_audio_isoc(struct em28xx *dev) { int i, errCode; const int sb_size = EM28XX_NUM_AUDIO_PACKETS * EM28XX_AUDIO_MAX_PACKET_SIZE; dprintk("Starting isoc transfers\n"); for (i = 0; i < EM28XX_AUDIO_BUFS; i++) { struct urb *urb; int j, k; dev->adev.transfer_buffer[i] = kmalloc(sb_size, GFP_ATOMIC); if (!dev->adev.transfer_buffer[i]) return -ENOMEM; memset(dev->adev.transfer_buffer[i], 0x80, sb_size); urb = usb_alloc_urb(EM28XX_NUM_AUDIO_PACKETS, GFP_ATOMIC); if (!urb) { em28xx_errdev("usb_alloc_urb failed!\n"); for (j = 0; j < i; j++) { usb_free_urb(dev->adev.urb[j]); kfree(dev->adev.transfer_buffer[j]); } return -ENOMEM; } urb->dev = dev->udev; urb->context = dev; urb->pipe = usb_rcvisocpipe(dev->udev, EM28XX_EP_AUDIO); urb->transfer_flags = URB_ISO_ASAP; urb->transfer_buffer = dev->adev.transfer_buffer[i]; urb->interval = 1; urb->complete = em28xx_audio_isocirq; urb->number_of_packets = EM28XX_NUM_AUDIO_PACKETS; urb->transfer_buffer_length = sb_size; for (j = k = 0; j < EM28XX_NUM_AUDIO_PACKETS; j++, k += EM28XX_AUDIO_MAX_PACKET_SIZE) { urb->iso_frame_desc[j].offset = k; urb->iso_frame_desc[j].length = EM28XX_AUDIO_MAX_PACKET_SIZE; } dev->adev.urb[i] = urb; } for (i = 0; i < EM28XX_AUDIO_BUFS; i++) { errCode = usb_submit_urb(dev->adev.urb[i], GFP_ATOMIC); if (errCode) { em28xx_errdev("submit of audio urb failed\n"); em28xx_deinit_isoc_audio(dev); atomic_set(&dev->stream_started, 0); return errCode; } } return 0; } static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs, size_t size) { struct snd_pcm_runtime *runtime = subs->runtime; dprintk("Allocating vbuffer\n"); if (runtime->dma_area) { if (runtime->dma_bytes > size) return 0; vfree(runtime->dma_area); } runtime->dma_area = vmalloc(size); if (!runtime->dma_area) return -ENOMEM; runtime->dma_bytes = size; return 0; } static struct snd_pcm_hardware snd_em28xx_hw_capture = { .info = SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_KNOT, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 62720 * 8, /* just about the value in usbaudio.c */ .period_bytes_min = 64, /* 12544/2, */ .period_bytes_max = 12544, .periods_min = 2, .periods_max = 98, /* 12544, */ }; static int snd_em28xx_capture_open(struct snd_pcm_substream *substream) { struct em28xx *dev = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int ret = 0; dprintk("opening device and trying to acquire exclusive lock\n"); if (!dev) { em28xx_err("BUG: em28xx can't find device struct." " Can't proceed with open\n"); return -ENODEV; } runtime->hw = snd_em28xx_hw_capture; if ((dev->alt == 0 || dev->audio_ifnum) && dev->adev.users == 0) { if (dev->audio_ifnum) dev->alt = 1; else dev->alt = 7; dprintk("changing alternate number on interface %d to %d\n", dev->audio_ifnum, dev->alt); usb_set_interface(dev->udev, dev->audio_ifnum, dev->alt); /* Sets volume, mute, etc */ dev->mute = 0; mutex_lock(&dev->lock); ret = em28xx_audio_analog_set(dev); if (ret < 0) goto err; dev->adev.users++; mutex_unlock(&dev->lock); } snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); dev->adev.capture_pcm_substream = substream; return 0; err: mutex_unlock(&dev->lock); em28xx_err("Error while configuring em28xx mixer\n"); return ret; } static int snd_em28xx_pcm_close(struct snd_pcm_substream *substream) { struct em28xx *dev = snd_pcm_substream_chip(substream); dprintk("closing device\n"); dev->mute = 1; mutex_lock(&dev->lock); dev->adev.users--; if (atomic_read(&dev->stream_started) > 0) { atomic_set(&dev->stream_started, 0); schedule_work(&dev->wq_trigger); } em28xx_audio_analog_set(dev); if (substream->runtime->dma_area) { dprintk("freeing\n"); vfree(substream->runtime->dma_area); substream->runtime->dma_area = NULL; } mutex_unlock(&dev->lock); return 0; } static int snd_em28xx_hw_capture_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { int ret; dprintk("Setting capture parameters\n"); ret = snd_pcm_alloc_vmalloc_buffer(substream, params_buffer_bytes(hw_params)); if (ret < 0) return ret; #if 0 /* TODO: set up em28xx audio chip to deliver the correct audio format, current default is 48000hz multiplexed => 96000hz mono which shouldn't matter since analogue TV only supports mono */ unsigned int channels, rate, format; format = params_format(hw_params); rate = params_rate(hw_params); channels = params_channels(hw_params); #endif return 0; } static int snd_em28xx_hw_capture_free(struct snd_pcm_substream *substream) { struct em28xx *dev = snd_pcm_substream_chip(substream); dprintk("Stop capture, if needed\n"); if (atomic_read(&dev->stream_started) > 0) { atomic_set(&dev->stream_started, 0); schedule_work(&dev->wq_trigger); } return 0; } static int snd_em28xx_prepare(struct snd_pcm_substream *substream) { struct em28xx *dev = snd_pcm_substream_chip(substream); dev->adev.hwptr_done_capture = 0; dev->adev.capture_transfer_done = 0; return 0; } static void audio_trigger(struct work_struct *work) { struct em28xx *dev = container_of(work, struct em28xx, wq_trigger); if (atomic_read(&dev->stream_started)) { dprintk("starting capture"); em28xx_init_audio_isoc(dev); } else { dprintk("stopping capture"); em28xx_deinit_isoc_audio(dev); } } static int snd_em28xx_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct em28xx *dev = snd_pcm_substream_chip(substream); int retval = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */ case SNDRV_PCM_TRIGGER_RESUME: /* fall through */ case SNDRV_PCM_TRIGGER_START: atomic_set(&dev->stream_started, 1); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */ case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */ case SNDRV_PCM_TRIGGER_STOP: atomic_set(&dev->stream_started, 0); break; default: retval = -EINVAL; } schedule_work(&dev->wq_trigger); return retval; } static snd_pcm_uframes_t snd_em28xx_capture_pointer(struct snd_pcm_substream *substream) { unsigned long flags; struct em28xx *dev; snd_pcm_uframes_t hwptr_done; dev = snd_pcm_substream_chip(substream); spin_lock_irqsave(&dev->adev.slock, flags); hwptr_done = dev->adev.hwptr_done_capture; spin_unlock_irqrestore(&dev->adev.slock, flags); return hwptr_done; } static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs, unsigned long offset) { void *pageptr = subs->runtime->dma_area + offset; return vmalloc_to_page(pageptr); } /* * AC97 volume control support */ static int em28xx_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info) { info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = 2; info->value.integer.min = 0; info->value.integer.max = 0x1f; return 0; } static int em28xx_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); u16 val = (0x1f - (value->value.integer.value[0] & 0x1f)) | (0x1f - (value->value.integer.value[1] & 0x1f)) << 8; int rc; mutex_lock(&dev->lock); rc = em28xx_read_ac97(dev, kcontrol->private_value); if (rc < 0) goto err; val |= rc & 0x8000; /* Preserve the mute flag */ rc = em28xx_write_ac97(dev, kcontrol->private_value, val); if (rc < 0) goto err; dprintk("%sleft vol %d, right vol %d (0x%04x) to ac97 volume control 0x%04x\n", (val & 0x8000) ? "muted " : "", 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f), val, (int)kcontrol->private_value); err: mutex_unlock(&dev->lock); return rc; } static int em28xx_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); int val; mutex_lock(&dev->lock); val = em28xx_read_ac97(dev, kcontrol->private_value); mutex_unlock(&dev->lock); if (val < 0) return val; dprintk("%sleft vol %d, right vol %d (0x%04x) from ac97 volume control 0x%04x\n", (val & 0x8000) ? "muted " : "", 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f), val, (int)kcontrol->private_value); value->value.integer.value[0] = 0x1f - (val & 0x1f); value->value.integer.value[1] = 0x1f - ((val << 8) & 0x1f); return 0; } static int em28xx_vol_put_mute(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); u16 val = value->value.integer.value[0]; int rc; mutex_lock(&dev->lock); rc = em28xx_read_ac97(dev, kcontrol->private_value); if (rc < 0) goto err; if (val) rc &= 0x1f1f; else rc |= 0x8000; rc = em28xx_write_ac97(dev, kcontrol->private_value, rc); if (rc < 0) goto err; dprintk("%sleft vol %d, right vol %d (0x%04x) to ac97 volume control 0x%04x\n", (val & 0x8000) ? "muted " : "", 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f), val, (int)kcontrol->private_value); err: mutex_unlock(&dev->lock); return rc; } static int em28xx_vol_get_mute(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct em28xx *dev = snd_kcontrol_chip(kcontrol); int val; mutex_lock(&dev->lock); val = em28xx_read_ac97(dev, kcontrol->private_value); mutex_unlock(&dev->lock); if (val < 0) return val; if (val & 0x8000) value->value.integer.value[0] = 0; else value->value.integer.value[0] = 1; dprintk("%sleft vol %d, right vol %d (0x%04x) from ac97 volume control 0x%04x\n", (val & 0x8000) ? "muted " : "", 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f), val, (int)kcontrol->private_value); return 0; } static const DECLARE_TLV_DB_SCALE(em28xx_db_scale, -3450, 150, 0); static int em28xx_cvol_new(struct snd_card *card, struct em28xx *dev, char *name, int id) { int err; char ctl_name[44]; struct snd_kcontrol *kctl; struct snd_kcontrol_new tmp; memset (&tmp, 0, sizeof(tmp)); tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER, tmp.private_value = id, tmp.name = ctl_name, /* Add Mute Control */ sprintf(ctl_name, "%s Switch", name); tmp.get = em28xx_vol_get_mute; tmp.put = em28xx_vol_put_mute; tmp.info = snd_ctl_boolean_mono_info; kctl = snd_ctl_new1(&tmp, dev); err = snd_ctl_add(card, kctl); if (err < 0) return err; dprintk("Added control %s for ac97 volume control 0x%04x\n", ctl_name, id); memset (&tmp, 0, sizeof(tmp)); tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER, tmp.private_value = id, tmp.name = ctl_name, /* Add Volume Control */ sprintf(ctl_name, "%s Volume", name); tmp.get = em28xx_vol_get; tmp.put = em28xx_vol_put; tmp.info = em28xx_vol_info; tmp.tlv.p = em28xx_db_scale, kctl = snd_ctl_new1(&tmp, dev); err = snd_ctl_add(card, kctl); if (err < 0) return err; dprintk("Added control %s for ac97 volume control 0x%04x\n", ctl_name, id); return 0; } /* * register/unregister code and data */ static struct snd_pcm_ops snd_em28xx_pcm_capture = { .open = snd_em28xx_capture_open, .close = snd_em28xx_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_em28xx_hw_capture_params, .hw_free = snd_em28xx_hw_capture_free, .prepare = snd_em28xx_prepare, .trigger = snd_em28xx_capture_trigger, .pointer = snd_em28xx_capture_pointer, .page = snd_pcm_get_vmalloc_page, }; static int em28xx_audio_init(struct em28xx *dev) { struct em28xx_audio *adev = &dev->adev; struct snd_pcm *pcm; struct snd_card *card; static int devnr; int err; if (!dev->has_alsa_audio || dev->audio_ifnum < 0) { /* This device does not support the extension (in this case the device is expecting the snd-usb-audio module or doesn't have analog audio support at all) */ return 0; } printk(KERN_INFO "em28xx-audio.c: probing for em28xx Audio Vendor Class\n"); printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2006 Markus " "Rechberger\n"); printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2007-2011 Mauro Carvalho Chehab\n"); err = snd_card_create(index[devnr], "Em28xx Audio", THIS_MODULE, 0, &card); if (err < 0) return err; spin_lock_init(&adev->slock); err = snd_pcm_new(card, "Em28xx Audio", 0, 0, 1, &pcm); if (err < 0) { snd_card_free(card); return err; } snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_em28xx_pcm_capture); pcm->info_flags = 0; pcm->private_data = dev; strcpy(pcm->name, "Empia 28xx Capture"); snd_card_set_dev(card, &dev->udev->dev); strcpy(card->driver, "Em28xx-Audio"); strcpy(card->shortname, "Em28xx Audio"); strcpy(card->longname, "Empia Em28xx Audio"); INIT_WORK(&dev->wq_trigger, audio_trigger); if (dev->audio_mode.ac97 != EM28XX_NO_AC97) { em28xx_cvol_new(card, dev, "Video", AC97_VIDEO); em28xx_cvol_new(card, dev, "Line In", AC97_LINE); em28xx_cvol_new(card, dev, "Phone", AC97_PHONE); em28xx_cvol_new(card, dev, "Microphone", AC97_MIC); em28xx_cvol_new(card, dev, "CD", AC97_CD); em28xx_cvol_new(card, dev, "AUX", AC97_AUX); em28xx_cvol_new(card, dev, "PCM", AC97_PCM); em28xx_cvol_new(card, dev, "Master", AC97_MASTER); em28xx_cvol_new(card, dev, "Line", AC97_HEADPHONE); em28xx_cvol_new(card, dev, "Mono", AC97_MASTER_MONO); em28xx_cvol_new(card, dev, "LFE", AC97_CENTER_LFE_MASTER); em28xx_cvol_new(card, dev, "Surround", AC97_SURROUND_MASTER); } err = snd_card_register(card); if (err < 0) { snd_card_free(card); return err; } adev->sndcard = card; adev->udev = dev->udev; return 0; } static int em28xx_audio_fini(struct em28xx *dev) { if (dev == NULL) return 0; if (dev->has_alsa_audio != 1) { /* This device does not support the extension (in this case the device is expecting the snd-usb-audio module or doesn't have analog audio support at all) */ return 0; } if (dev->adev.sndcard) { snd_card_free(dev->adev.sndcard); dev->adev.sndcard = NULL; } return 0; } static struct em28xx_ops audio_ops = { .id = EM28XX_AUDIO, .name = "Em28xx Audio Extension", .init = em28xx_audio_init, .fini = em28xx_audio_fini, }; static int __init em28xx_alsa_register(void) { return em28xx_register_extension(&audio_ops); } static void __exit em28xx_alsa_unregister(void) { em28xx_unregister_extension(&audio_ops); } MODULE_LICENSE("GPL"); MODULE_AUTHOR("Markus Rechberger <mrechberger@gmail.com>"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); MODULE_DESCRIPTION("Em28xx Audio driver"); module_init(em28xx_alsa_register); module_exit(em28xx_alsa_unregister);
gpl-2.0
eagle0422/eagle-huanyi-linux3.3.0
arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
2799
3107
/* * Copyright (C) 2010 Pengutronix, Wolfram Sang <w.sang@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <linux/platform_data/mmc-esdhc-imx.h> #include "../hardware.h" #include "devices-common.h" #define imx_sdhci_esdhc_imx_data_entry_single(soc, _devid, _id, hwid) \ { \ .devid = _devid, \ .id = _id, \ .iobase = soc ## _ESDHC ## hwid ## _BASE_ADDR, \ .irq = soc ## _INT_ESDHC ## hwid, \ } #define imx_sdhci_esdhc_imx_data_entry(soc, devid, id, hwid) \ [id] = imx_sdhci_esdhc_imx_data_entry_single(soc, devid, id, hwid) #ifdef CONFIG_SOC_IMX25 const struct imx_sdhci_esdhc_imx_data imx25_sdhci_esdhc_imx_data[] __initconst = { #define imx25_sdhci_esdhc_imx_data_entry(_id, _hwid) \ imx_sdhci_esdhc_imx_data_entry(MX25, "sdhci-esdhc-imx25", _id, _hwid) imx25_sdhci_esdhc_imx_data_entry(0, 1), imx25_sdhci_esdhc_imx_data_entry(1, 2), }; #endif /* ifdef CONFIG_SOC_IMX25 */ #ifdef CONFIG_SOC_IMX35 const struct imx_sdhci_esdhc_imx_data imx35_sdhci_esdhc_imx_data[] __initconst = { #define imx35_sdhci_esdhc_imx_data_entry(_id, _hwid) \ imx_sdhci_esdhc_imx_data_entry(MX35, "sdhci-esdhc-imx35", _id, _hwid) imx35_sdhci_esdhc_imx_data_entry(0, 1), imx35_sdhci_esdhc_imx_data_entry(1, 2), imx35_sdhci_esdhc_imx_data_entry(2, 3), }; #endif /* ifdef CONFIG_SOC_IMX35 */ #ifdef CONFIG_SOC_IMX51 const struct imx_sdhci_esdhc_imx_data imx51_sdhci_esdhc_imx_data[] __initconst = { #define imx51_sdhci_esdhc_imx_data_entry(_id, _hwid) \ imx_sdhci_esdhc_imx_data_entry(MX51, "sdhci-esdhc-imx51", _id, _hwid) imx51_sdhci_esdhc_imx_data_entry(0, 1), imx51_sdhci_esdhc_imx_data_entry(1, 2), imx51_sdhci_esdhc_imx_data_entry(2, 3), imx51_sdhci_esdhc_imx_data_entry(3, 4), }; #endif /* ifdef CONFIG_SOC_IMX51 */ #ifdef CONFIG_SOC_IMX53 const struct imx_sdhci_esdhc_imx_data imx53_sdhci_esdhc_imx_data[] __initconst = { #define imx53_sdhci_esdhc_imx_data_entry(_id, _hwid) \ imx_sdhci_esdhc_imx_data_entry(MX53, "sdhci-esdhc-imx53", _id, _hwid) imx53_sdhci_esdhc_imx_data_entry(0, 1), imx53_sdhci_esdhc_imx_data_entry(1, 2), imx53_sdhci_esdhc_imx_data_entry(2, 3), imx53_sdhci_esdhc_imx_data_entry(3, 4), }; #endif /* ifdef CONFIG_SOC_IMX53 */ static const struct esdhc_platform_data default_esdhc_pdata __initconst = { .wp_type = ESDHC_WP_NONE, .cd_type = ESDHC_CD_NONE, }; struct platform_device *__init imx_add_sdhci_esdhc_imx( const struct imx_sdhci_esdhc_imx_data *data, const struct esdhc_platform_data *pdata) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + SZ_16K - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; /* * If machine does not provide pdata, use the default one * which means no WP/CD support */ if (!pdata) pdata = &default_esdhc_pdata; return imx_add_platform_device(data->devid, data->id, res, ARRAY_SIZE(res), pdata, sizeof(*pdata)); }
gpl-2.0
cryptotronix/linux
arch/metag/lib/delay.c
4335
1307
/* * Precise Delay Loops for Meta * * Copyright (C) 1993 Linus Torvalds * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> * Copyright (C) 2007,2009 Imagination Technologies Ltd. * */ #include <linux/export.h> #include <linux/sched.h> #include <linux/delay.h> #include <asm/core_reg.h> #include <asm/processor.h> /* * TXTACTCYC is only 24 bits, so on chips with fast clocks it will wrap * many times per-second. If it does wrap __delay will return prematurely, * but this is only likely with large delay values. * * We also can't implement read_current_timer() with TXTACTCYC due to * this wrapping behaviour. */ #define rdtimer(t) t = __core_reg_get(TXTACTCYC) void __delay(unsigned long loops) { unsigned long bclock, now; rdtimer(bclock); do { asm("NOP"); rdtimer(now); } while ((now-bclock) < loops); } EXPORT_SYMBOL(__delay); inline void __const_udelay(unsigned long xloops) { u64 loops = (u64)xloops * (u64)loops_per_jiffy * HZ; __delay(loops >> 32); } EXPORT_SYMBOL(__const_udelay); void __udelay(unsigned long usecs) { __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ } EXPORT_SYMBOL(__udelay); void __ndelay(unsigned long nsecs) { __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ } EXPORT_SYMBOL(__ndelay);
gpl-2.0
StanTRC/lge-kernel-e430
arch/arm/plat-samsung/pwm.c
4847
8853
/* arch/arm/plat-s3c/pwm.c * * Copyright (c) 2007 Ben Dooks * Copyright (c) 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk>, <ben-linux@fluff.org> * * S3C series PWM device core * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/pwm.h> #include <mach/map.h> #include <plat/regs-timer.h> struct pwm_device { struct list_head list; struct platform_device *pdev; struct clk *clk_div; struct clk *clk; const char *label; unsigned int period_ns; unsigned int duty_ns; unsigned char tcon_base; unsigned char running; unsigned char use_count; unsigned char pwm_id; }; #define pwm_dbg(_pwm, msg...) dev_dbg(&(_pwm)->pdev->dev, msg) static struct clk *clk_scaler[2]; static inline int pwm_is_tdiv(struct pwm_device *pwm) { return clk_get_parent(pwm->clk) == pwm->clk_div; } static DEFINE_MUTEX(pwm_lock); static LIST_HEAD(pwm_list); struct pwm_device *pwm_request(int pwm_id, const char *label) { struct pwm_device *pwm; int found = 0; mutex_lock(&pwm_lock); list_for_each_entry(pwm, &pwm_list, list) { if (pwm->pwm_id == pwm_id) { found = 1; break; } } if (found) { if (pwm->use_count == 0) { pwm->use_count = 1; pwm->label = label; } else pwm = ERR_PTR(-EBUSY); } else pwm = ERR_PTR(-ENOENT); mutex_unlock(&pwm_lock); return pwm; } EXPORT_SYMBOL(pwm_request); void pwm_free(struct pwm_device *pwm) { mutex_lock(&pwm_lock); if (pwm->use_count) { pwm->use_count--; pwm->label = NULL; } else printk(KERN_ERR "PWM%d device already freed\n", pwm->pwm_id); mutex_unlock(&pwm_lock); } EXPORT_SYMBOL(pwm_free); #define pwm_tcon_start(pwm) (1 << (pwm->tcon_base + 0)) #define pwm_tcon_invert(pwm) (1 << (pwm->tcon_base + 2)) #define pwm_tcon_autoreload(pwm) (1 << (pwm->tcon_base + 3)) #define pwm_tcon_manulupdate(pwm) (1 << (pwm->tcon_base + 1)) int pwm_enable(struct pwm_device *pwm) { unsigned long flags; unsigned long tcon; local_irq_save(flags); tcon = __raw_readl(S3C2410_TCON); tcon |= pwm_tcon_start(pwm); __raw_writel(tcon, S3C2410_TCON); local_irq_restore(flags); pwm->running = 1; return 0; } EXPORT_SYMBOL(pwm_enable); void pwm_disable(struct pwm_device *pwm) { unsigned long flags; unsigned long tcon; local_irq_save(flags); tcon = __raw_readl(S3C2410_TCON); tcon &= ~pwm_tcon_start(pwm); __raw_writel(tcon, S3C2410_TCON); local_irq_restore(flags); pwm->running = 0; } EXPORT_SYMBOL(pwm_disable); static unsigned long pwm_calc_tin(struct pwm_device *pwm, unsigned long freq) { unsigned long tin_parent_rate; unsigned int div; tin_parent_rate = clk_get_rate(clk_get_parent(pwm->clk_div)); pwm_dbg(pwm, "tin parent at %lu\n", tin_parent_rate); for (div = 2; div <= 16; div *= 2) { if ((tin_parent_rate / (div << 16)) < freq) return tin_parent_rate / div; } return tin_parent_rate / 16; } #define NS_IN_HZ (1000000000UL) int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) { unsigned long tin_rate; unsigned long tin_ns; unsigned long period; unsigned long flags; unsigned long tcon; unsigned long tcnt; long tcmp; /* We currently avoid using 64bit arithmetic by using the * fact that anything faster than 1Hz is easily representable * by 32bits. */ if (period_ns > NS_IN_HZ || duty_ns > NS_IN_HZ) return -ERANGE; if (duty_ns > period_ns) return -EINVAL; if (period_ns == pwm->period_ns && duty_ns == pwm->duty_ns) return 0; /* The TCMP and TCNT can be read without a lock, they're not * shared between the timers. */ tcmp = __raw_readl(S3C2410_TCMPB(pwm->pwm_id)); tcnt = __raw_readl(S3C2410_TCNTB(pwm->pwm_id)); period = NS_IN_HZ / period_ns; pwm_dbg(pwm, "duty_ns=%d, period_ns=%d (%lu)\n", duty_ns, period_ns, period); /* Check to see if we are changing the clock rate of the PWM */ if (pwm->period_ns != period_ns) { if (pwm_is_tdiv(pwm)) { tin_rate = pwm_calc_tin(pwm, period); clk_set_rate(pwm->clk_div, tin_rate); } else tin_rate = clk_get_rate(pwm->clk); pwm->period_ns = period_ns; pwm_dbg(pwm, "tin_rate=%lu\n", tin_rate); tin_ns = NS_IN_HZ / tin_rate; tcnt = period_ns / tin_ns; } else tin_ns = NS_IN_HZ / clk_get_rate(pwm->clk); /* Note, counters count down */ tcmp = duty_ns / tin_ns; tcmp = tcnt - tcmp; /* the pwm hw only checks the compare register after a decrement, so the pin never toggles if tcmp = tcnt */ if (tcmp == tcnt) tcmp--; pwm_dbg(pwm, "tin_ns=%lu, tcmp=%ld/%lu\n", tin_ns, tcmp, tcnt); if (tcmp < 0) tcmp = 0; /* Update the PWM register block. */ local_irq_save(flags); __raw_writel(tcmp, S3C2410_TCMPB(pwm->pwm_id)); __raw_writel(tcnt, S3C2410_TCNTB(pwm->pwm_id)); tcon = __raw_readl(S3C2410_TCON); tcon |= pwm_tcon_manulupdate(pwm); tcon |= pwm_tcon_autoreload(pwm); __raw_writel(tcon, S3C2410_TCON); tcon &= ~pwm_tcon_manulupdate(pwm); __raw_writel(tcon, S3C2410_TCON); local_irq_restore(flags); return 0; } EXPORT_SYMBOL(pwm_config); static int pwm_register(struct pwm_device *pwm) { pwm->duty_ns = -1; pwm->period_ns = -1; mutex_lock(&pwm_lock); list_add_tail(&pwm->list, &pwm_list); mutex_unlock(&pwm_lock); return 0; } static int s3c_pwm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct pwm_device *pwm; unsigned long flags; unsigned long tcon; unsigned int id = pdev->id; int ret; if (id == 4) { dev_err(dev, "TIMER4 is currently not supported\n"); return -ENXIO; } pwm = kzalloc(sizeof(struct pwm_device), GFP_KERNEL); if (pwm == NULL) { dev_err(dev, "failed to allocate pwm_device\n"); return -ENOMEM; } pwm->pdev = pdev; pwm->pwm_id = id; /* calculate base of control bits in TCON */ pwm->tcon_base = id == 0 ? 0 : (id * 4) + 4; pwm->clk = clk_get(dev, "pwm-tin"); if (IS_ERR(pwm->clk)) { dev_err(dev, "failed to get pwm tin clk\n"); ret = PTR_ERR(pwm->clk); goto err_alloc; } pwm->clk_div = clk_get(dev, "pwm-tdiv"); if (IS_ERR(pwm->clk_div)) { dev_err(dev, "failed to get pwm tdiv clk\n"); ret = PTR_ERR(pwm->clk_div); goto err_clk_tin; } clk_enable(pwm->clk); clk_enable(pwm->clk_div); local_irq_save(flags); tcon = __raw_readl(S3C2410_TCON); tcon |= pwm_tcon_invert(pwm); __raw_writel(tcon, S3C2410_TCON); local_irq_restore(flags); ret = pwm_register(pwm); if (ret) { dev_err(dev, "failed to register pwm\n"); goto err_clk_tdiv; } pwm_dbg(pwm, "config bits %02x\n", (__raw_readl(S3C2410_TCON) >> pwm->tcon_base) & 0x0f); dev_info(dev, "tin at %lu, tdiv at %lu, tin=%sclk, base %d\n", clk_get_rate(pwm->clk), clk_get_rate(pwm->clk_div), pwm_is_tdiv(pwm) ? "div" : "ext", pwm->tcon_base); platform_set_drvdata(pdev, pwm); return 0; err_clk_tdiv: clk_disable(pwm->clk_div); clk_disable(pwm->clk); clk_put(pwm->clk_div); err_clk_tin: clk_put(pwm->clk); err_alloc: kfree(pwm); return ret; } static int __devexit s3c_pwm_remove(struct platform_device *pdev) { struct pwm_device *pwm = platform_get_drvdata(pdev); clk_disable(pwm->clk_div); clk_disable(pwm->clk); clk_put(pwm->clk_div); clk_put(pwm->clk); kfree(pwm); return 0; } #ifdef CONFIG_PM static int s3c_pwm_suspend(struct platform_device *pdev, pm_message_t state) { struct pwm_device *pwm = platform_get_drvdata(pdev); /* No one preserve these values during suspend so reset them * Otherwise driver leaves PWM unconfigured if same values * passed to pwm_config */ pwm->period_ns = 0; pwm->duty_ns = 0; return 0; } static int s3c_pwm_resume(struct platform_device *pdev) { struct pwm_device *pwm = platform_get_drvdata(pdev); unsigned long tcon; /* Restore invertion */ tcon = __raw_readl(S3C2410_TCON); tcon |= pwm_tcon_invert(pwm); __raw_writel(tcon, S3C2410_TCON); return 0; } #else #define s3c_pwm_suspend NULL #define s3c_pwm_resume NULL #endif static struct platform_driver s3c_pwm_driver = { .driver = { .name = "s3c24xx-pwm", .owner = THIS_MODULE, }, .probe = s3c_pwm_probe, .remove = __devexit_p(s3c_pwm_remove), .suspend = s3c_pwm_suspend, .resume = s3c_pwm_resume, }; static int __init pwm_init(void) { int ret; clk_scaler[0] = clk_get(NULL, "pwm-scaler0"); clk_scaler[1] = clk_get(NULL, "pwm-scaler1"); if (IS_ERR(clk_scaler[0]) || IS_ERR(clk_scaler[1])) { printk(KERN_ERR "%s: failed to get scaler clocks\n", __func__); return -EINVAL; } ret = platform_driver_register(&s3c_pwm_driver); if (ret) printk(KERN_ERR "%s: failed to add pwm driver\n", __func__); return ret; } arch_initcall(pwm_init);
gpl-2.0
vwmofo/SebastianFM-kernel
arch/arm/mach-shmobile/pfc-sh7377.c
5103
67219
/* * sh7377 processor support - PFC hardware block * * Copyright (C) 2010 NISHIMOTO Hiroki * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2 of the * License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/gpio.h> #include <mach/sh7377.h> #define CPU_ALL_PORT(fn, pfx, sfx) \ PORT_10(fn, pfx, sfx), PORT_90(fn, pfx, sfx), \ PORT_10(fn, pfx##10, sfx), \ PORT_1(fn, pfx##110, sfx), PORT_1(fn, pfx##111, sfx), \ PORT_1(fn, pfx##112, sfx), PORT_1(fn, pfx##113, sfx), \ PORT_1(fn, pfx##114, sfx), PORT_1(fn, pfx##115, sfx), \ PORT_1(fn, pfx##116, sfx), PORT_1(fn, pfx##117, sfx), \ PORT_1(fn, pfx##118, sfx), \ PORT_1(fn, pfx##128, sfx), PORT_1(fn, pfx##129, sfx), \ PORT_10(fn, pfx##13, sfx), PORT_10(fn, pfx##14, sfx), \ PORT_10(fn, pfx##15, sfx), \ PORT_1(fn, pfx##160, sfx), PORT_1(fn, pfx##161, sfx), \ PORT_1(fn, pfx##162, sfx), PORT_1(fn, pfx##163, sfx), \ PORT_1(fn, pfx##164, sfx), \ PORT_1(fn, pfx##192, sfx), PORT_1(fn, pfx##193, sfx), \ PORT_1(fn, pfx##194, sfx), PORT_1(fn, pfx##195, sfx), \ PORT_1(fn, pfx##196, sfx), PORT_1(fn, pfx##197, sfx), \ PORT_1(fn, pfx##198, sfx), PORT_1(fn, pfx##199, sfx), \ PORT_10(fn, pfx##20, sfx), PORT_10(fn, pfx##21, sfx), \ PORT_10(fn, pfx##22, sfx), PORT_10(fn, pfx##23, sfx), \ PORT_10(fn, pfx##24, sfx), PORT_10(fn, pfx##25, sfx), \ PORT_1(fn, pfx##260, sfx), PORT_1(fn, pfx##261, sfx), \ PORT_1(fn, pfx##262, sfx), PORT_1(fn, pfx##263, sfx), \ PORT_1(fn, pfx##264, sfx) enum { PINMUX_RESERVED = 0, PINMUX_DATA_BEGIN, PORT_ALL(DATA), /* PORT0_DATA -> PORT264_DATA */ PINMUX_DATA_END, PINMUX_INPUT_BEGIN, PORT_ALL(IN), /* PORT0_IN -> PORT264_IN */ PINMUX_INPUT_END, PINMUX_INPUT_PULLUP_BEGIN, PORT_ALL(IN_PU), /* PORT0_IN_PU -> PORT264_IN_PU */ PINMUX_INPUT_PULLUP_END, PINMUX_INPUT_PULLDOWN_BEGIN, PORT_ALL(IN_PD), /* PORT0_IN_PD -> PORT264_IN_PD */ PINMUX_INPUT_PULLDOWN_END, PINMUX_OUTPUT_BEGIN, PORT_ALL(OUT), /* PORT0_OUT -> PORT264_OUT */ PINMUX_OUTPUT_END, PINMUX_FUNCTION_BEGIN, PORT_ALL(FN_IN), /* PORT0_FN_IN -> PORT264_FN_IN */ PORT_ALL(FN_OUT), /* PORT0_FN_OUT -> PORT264_FN_OUT */ PORT_ALL(FN0), /* PORT0_FN0 -> PORT264_FN0 */ PORT_ALL(FN1), /* PORT0_FN1 -> PORT264_FN1 */ PORT_ALL(FN2), /* PORT0_FN2 -> PORT264_FN2 */ PORT_ALL(FN3), /* PORT0_FN3 -> PORT264_FN3 */ PORT_ALL(FN4), /* PORT0_FN4 -> PORT264_FN4 */ PORT_ALL(FN5), /* PORT0_FN5 -> PORT264_FN5 */ PORT_ALL(FN6), /* PORT0_FN6 -> PORT264_FN6 */ PORT_ALL(FN7), /* PORT0_FN7 -> PORT264_FN7 */ MSELBCR_MSEL17_1, MSELBCR_MSEL17_0, MSELBCR_MSEL16_1, MSELBCR_MSEL16_0, PINMUX_FUNCTION_END, PINMUX_MARK_BEGIN, /* Special Pull-up / Pull-down Functions */ PORT66_KEYIN0_PU_MARK, PORT67_KEYIN1_PU_MARK, PORT68_KEYIN2_PU_MARK, PORT69_KEYIN3_PU_MARK, PORT70_KEYIN4_PU_MARK, PORT71_KEYIN5_PU_MARK, PORT72_KEYIN6_PU_MARK, /* 55-1 */ VBUS_0_MARK, CPORT0_MARK, CPORT1_MARK, CPORT2_MARK, CPORT3_MARK, CPORT4_MARK, CPORT5_MARK, CPORT6_MARK, CPORT7_MARK, CPORT8_MARK, CPORT9_MARK, CPORT10_MARK, CPORT11_MARK, SIN2_MARK, CPORT12_MARK, XCTS2_MARK, CPORT13_MARK, RFSPO4_MARK, CPORT14_MARK, RFSPO5_MARK, CPORT15_MARK, SCIFA0_SCK_MARK, GPS_AGC2_MARK, CPORT16_MARK, SCIFA0_TXD_MARK, GPS_AGC3_MARK, CPORT17_IC_OE_MARK, SOUT2_MARK, CPORT18_MARK, XRTS2_MARK, PORT19_VIO_CKO2_MARK, CPORT19_MPORT1_MARK, CPORT20_MARK, RFSPO6_MARK, CPORT21_MARK, STATUS0_MARK, CPORT22_MARK, STATUS1_MARK, CPORT23_MARK, STATUS2_MARK, RFSPO7_MARK, B_SYNLD1_MARK, B_SYNLD2_MARK, SYSENMSK_MARK, XMAINPS_MARK, XDIVPS_MARK, XIDRST_MARK, IDCLK_MARK, IC_DP_MARK, IDIO_MARK, IC_DM_MARK, SOUT1_MARK, SCIFA4_TXD_MARK, M02_BERDAT_MARK, SIN1_MARK, SCIFA4_RXD_MARK, XWUP_MARK, XRTS1_MARK, SCIFA4_RTS_MARK, M03_BERCLK_MARK, XCTS1_MARK, SCIFA4_CTS_MARK, PCMCLKO_MARK, SYNC8KO_MARK, /* 55-2 */ DNPCM_A_MARK, UPPCM_A_MARK, VACK_MARK, XTALB1L_MARK, GPS_AGC1_MARK, SCIFA0_RTS_MARK, GPS_AGC4_MARK, SCIFA0_RXD_MARK, GPS_PWRDOWN_MARK, SCIFA0_CTS_MARK, GPS_IM_MARK, GPS_IS_MARK, GPS_QM_MARK, GPS_QS_MARK, FMSOCK_MARK, PORT49_IRDA_OUT_MARK, PORT49_IROUT_MARK, FMSOOLR_MARK, BBIF2_TSYNC2_MARK, TPU2TO2_MARK, IPORT3_MARK, FMSIOLR_MARK, FMSOOBT_MARK, BBIF2_TSCK2_MARK, TPU2TO3_MARK, OPORT1_MARK, FMSIOBT_MARK, FMSOSLD_MARK, BBIF2_TXD2_MARK, OPORT2_MARK, FMSOILR_MARK, PORT53_IRDA_IN_MARK, TPU3TO3_MARK, OPORT3_MARK, FMSIILR_MARK, FMSOIBT_MARK, PORT54_IRDA_FIRSEL_MARK, TPU3TO2_MARK, FMSIIBT_MARK, FMSISLD_MARK, MFG0_OUT1_MARK, TPU0TO0_MARK, A0_EA0_MARK, BS_MARK, A12_EA12_MARK, PORT58_VIO_CKOR_MARK, TPU4TO2_MARK, A13_EA13_MARK, PORT59_IROUT_MARK, MFG0_OUT2_MARK, TPU0TO1_MARK, A14_EA14_MARK, PORT60_KEYOUT5_MARK, A15_EA15_MARK, PORT61_KEYOUT4_MARK, A16_EA16_MARK, PORT62_KEYOUT3_MARK, MSIOF0_SS1_MARK, A17_EA17_MARK, PORT63_KEYOUT2_MARK, MSIOF0_TSYNC_MARK, A18_EA18_MARK, PORT64_KEYOUT1_MARK, MSIOF0_TSCK_MARK, A19_EA19_MARK, PORT65_KEYOUT0_MARK, MSIOF0_TXD_MARK, A20_EA20_MARK, PORT66_KEYIN0_MARK, MSIOF0_RSCK_MARK, A21_EA21_MARK, PORT67_KEYIN1_MARK, MSIOF0_RSYNC_MARK, A22_EA22_MARK, PORT68_KEYIN2_MARK, MSIOF0_MCK0_MARK, A23_EA23_MARK, PORT69_KEYIN3_MARK, MSIOF0_MCK1_MARK, A24_EA24_MARK, PORT70_KEYIN4_MARK, MSIOF0_RXD_MARK, A25_EA25_MARK, PORT71_KEYIN5_MARK, MSIOF0_SS2_MARK, A26_MARK, PORT72_KEYIN6_MARK, D0_ED0_NAF0_MARK, D1_ED1_NAF1_MARK, D2_ED2_NAF2_MARK, D3_ED3_NAF3_MARK, D4_ED4_NAF4_MARK, D5_ED5_NAF5_MARK, D6_ED6_NAF6_MARK, D7_ED7_NAF7_MARK, D8_ED8_NAF8_MARK, D9_ED9_NAF9_MARK, D10_ED10_NAF10_MARK, D11_ED11_NAF11_MARK, D12_ED12_NAF12_MARK, D13_ED13_NAF13_MARK, D14_ED14_NAF14_MARK, D15_ED15_NAF15_MARK, CS4_MARK, CS5A_MARK, FMSICK_MARK, CS5B_MARK, FCE1_MARK, /* 55-3 */ CS6B_MARK, XCS2_MARK, CS6A_MARK, DACK0_MARK, FCE0_MARK, WAIT_MARK, DREQ0_MARK, RD_XRD_MARK, WE0_XWR0_FWE_MARK, WE1_XWR1_MARK, FRB_MARK, CKO_MARK, NBRSTOUT_MARK, NBRST_MARK, GPS_EPPSIN_MARK, LATCHPULSE_MARK, LTESIGNAL_MARK, LEGACYSTATE_MARK, TCKON_MARK, VIO_VD_MARK, PORT128_KEYOUT0_MARK, IPORT0_MARK, VIO_HD_MARK, PORT129_KEYOUT1_MARK, IPORT1_MARK, VIO_D0_MARK, PORT130_KEYOUT2_MARK, PORT130_MSIOF2_RXD_MARK, VIO_D1_MARK, PORT131_KEYOUT3_MARK, PORT131_MSIOF2_SS1_MARK, VIO_D2_MARK, PORT132_KEYOUT4_MARK, PORT132_MSIOF2_SS2_MARK, VIO_D3_MARK, PORT133_KEYOUT5_MARK, PORT133_MSIOF2_TSYNC_MARK, VIO_D4_MARK, PORT134_KEYIN0_MARK, PORT134_MSIOF2_TXD_MARK, VIO_D5_MARK, PORT135_KEYIN1_MARK, PORT135_MSIOF2_TSCK_MARK, VIO_D6_MARK, PORT136_KEYIN2_MARK, VIO_D7_MARK, PORT137_KEYIN3_MARK, VIO_D8_MARK, M9_SLCD_A01_MARK, PORT138_FSIAOMC_MARK, VIO_D9_MARK, M10_SLCD_CK1_MARK, PORT139_FSIAOLR_MARK, VIO_D10_MARK, M11_SLCD_SO1_MARK, TPU0TO2_MARK, PORT140_FSIAOBT_MARK, VIO_D11_MARK, M12_SLCD_CE1_MARK, TPU0TO3_MARK, PORT141_FSIAOSLD_MARK, VIO_D12_MARK, M13_BSW_MARK, PORT142_FSIACK_MARK, VIO_D13_MARK, M14_GSW_MARK, PORT143_FSIAILR_MARK, VIO_D14_MARK, M15_RSW_MARK, PORT144_FSIAIBT_MARK, VIO_D15_MARK, TPU1TO3_MARK, PORT145_FSIAISLD_MARK, VIO_CLK_MARK, PORT146_KEYIN4_MARK, IPORT2_MARK, VIO_FIELD_MARK, PORT147_KEYIN5_MARK, VIO_CKO_MARK, PORT148_KEYIN6_MARK, A27_MARK, RDWR_XWE_MARK, MFG0_IN1_MARK, MFG0_IN2_MARK, TS_SPSYNC3_MARK, MSIOF2_RSCK_MARK, TS_SDAT3_MARK, MSIOF2_RSYNC_MARK, TPU1TO2_MARK, TS_SDEN3_MARK, PORT153_MSIOF2_SS1_MARK, SOUT3_MARK, SCIFA2_TXD1_MARK, MSIOF2_MCK0_MARK, SIN3_MARK, SCIFA2_RXD1_MARK, MSIOF2_MCK1_MARK, XRTS3_MARK, SCIFA2_RTS1_MARK, PORT156_MSIOF2_SS2_MARK, XCTS3_MARK, SCIFA2_CTS1_MARK, PORT157_MSIOF2_RXD_MARK, /* 55-4 */ DINT_MARK, SCIFA2_SCK1_MARK, TS_SCK3_MARK, PORT159_SCIFB_SCK_MARK, PORT159_SCIFA5_SCK_MARK, NMI_MARK, PORT160_SCIFB_TXD_MARK, PORT160_SCIFA5_TXD_MARK, SOUT0_MARK, PORT161_SCIFB_CTS_MARK, PORT161_SCIFA5_CTS_MARK, XCTS0_MARK, MFG3_IN2_MARK, PORT162_SCIFB_RXD_MARK, PORT162_SCIFA5_RXD_MARK, SIN0_MARK, MFG3_IN1_MARK, PORT163_SCIFB_RTS_MARK, PORT163_SCIFA5_RTS_MARK, XRTS0_MARK, MFG3_OUT1_MARK, TPU3TO0_MARK, LCDD0_MARK, PORT192_KEYOUT0_MARK, EXT_CKI_MARK, LCDD1_MARK, PORT193_KEYOUT1_MARK, PORT193_SCIFA5_CTS_MARK, BBIF2_TSYNC1_MARK, LCDD2_MARK, PORT194_KEYOUT2_MARK, PORT194_SCIFA5_RTS_MARK, BBIF2_TSCK1_MARK, LCDD3_MARK, PORT195_KEYOUT3_MARK, PORT195_SCIFA5_RXD_MARK, BBIF2_TXD1_MARK, LCDD4_MARK, PORT196_KEYOUT4_MARK, PORT196_SCIFA5_TXD_MARK, LCDD5_MARK, PORT197_KEYOUT5_MARK, PORT197_SCIFA5_SCK_MARK, MFG2_OUT2_MARK, TPU2TO1_MARK, LCDD6_MARK, XWR2_MARK, LCDD7_MARK, TPU4TO1_MARK, MFG4_OUT2_MARK, XWR3_MARK, LCDD8_MARK, PORT200_KEYIN0_MARK, VIO_DR0_MARK, D16_MARK, ED16_MARK, LCDD9_MARK, PORT201_KEYIN1_MARK, VIO_DR1_MARK, D17_MARK, ED17_MARK, LCDD10_MARK, PORT202_KEYIN2_MARK, VIO_DR2_MARK, D18_MARK, ED18_MARK, LCDD11_MARK, PORT203_KEYIN3_MARK, VIO_DR3_MARK, D19_MARK, ED19_MARK, LCDD12_MARK, PORT204_KEYIN4_MARK, VIO_DR4_MARK, D20_MARK, ED20_MARK, LCDD13_MARK, PORT205_KEYIN5_MARK, VIO_DR5_MARK, D21_MARK, ED21_MARK, LCDD14_MARK, PORT206_KEYIN6_MARK, VIO_DR6_MARK, D22_MARK, ED22_MARK, LCDD15_MARK, PORT207_MSIOF0L_SS1_MARK, PORT207_KEYOUT0_MARK, VIO_DR7_MARK, D23_MARK, ED23_MARK, LCDD16_MARK, PORT208_MSIOF0L_SS2_MARK, PORT208_KEYOUT1_MARK, VIO_VDR_MARK, D24_MARK, ED24_MARK, LCDD17_MARK, PORT209_KEYOUT2_MARK, VIO_HDR_MARK, D25_MARK, ED25_MARK, LCDD18_MARK, DREQ2_MARK, PORT210_MSIOF0L_SS1_MARK, D26_MARK, ED26_MARK, LCDD19_MARK, PORT211_MSIOF0L_SS2_MARK, D27_MARK, ED27_MARK, LCDD20_MARK, TS_SPSYNC1_MARK, MSIOF0L_MCK0_MARK, D28_MARK, ED28_MARK, LCDD21_MARK, TS_SDAT1_MARK, MSIOF0L_MCK1_MARK, D29_MARK, ED29_MARK, LCDD22_MARK, TS_SDEN1_MARK, MSIOF0L_RSCK_MARK, D30_MARK, ED30_MARK, LCDD23_MARK, TS_SCK1_MARK, MSIOF0L_RSYNC_MARK, D31_MARK, ED31_MARK, LCDDCK_MARK, LCDWR_MARK, PORT216_KEYOUT3_MARK, VIO_CLKR_MARK, LCDRD_MARK, DACK2_MARK, MSIOF0L_TSYNC_MARK, LCDHSYN_MARK, LCDCS_MARK, LCDCS2_MARK, DACK3_MARK, PORT218_VIO_CKOR_MARK, PORT218_KEYOUT4_MARK, LCDDISP_MARK, LCDRS_MARK, DREQ3_MARK, MSIOF0L_TSCK_MARK, LCDVSYN_MARK, LCDVSYN2_MARK, PORT220_KEYOUT5_MARK, LCDLCLK_MARK, DREQ1_MARK, PWEN_MARK, MSIOF0L_RXD_MARK, LCDDON_MARK, LCDDON2_MARK, DACK1_MARK, OVCN_MARK, MSIOF0L_TXD_MARK, SCIFA1_TXD_MARK, OVCN2_MARK, EXTLP_MARK, SCIFA1_SCK_MARK, USBTERM_MARK, PORT226_VIO_CKO2_MARK, SCIFA1_RTS_MARK, IDIN_MARK, SCIFA1_RXD_MARK, SCIFA1_CTS_MARK, MFG1_IN1_MARK, MSIOF1_TXD_MARK, SCIFA2_TXD2_MARK, PORT230_FSIAOMC_MARK, MSIOF1_TSYNC_MARK, SCIFA2_CTS2_MARK, PORT231_FSIAOLR_MARK, MSIOF1_TSCK_MARK, SCIFA2_SCK2_MARK, PORT232_FSIAOBT_MARK, MSIOF1_RXD_MARK, SCIFA2_RXD2_MARK, GPS_VCOTRIG_MARK, PORT233_FSIACK_MARK, MSIOF1_RSCK_MARK, SCIFA2_RTS2_MARK, PORT234_FSIAOSLD_MARK, MSIOF1_RSYNC_MARK, OPORT0_MARK, MFG1_IN2_MARK, PORT235_FSIAILR_MARK, MSIOF1_MCK0_MARK, I2C_SDA2_MARK, PORT236_FSIAIBT_MARK, MSIOF1_MCK1_MARK, I2C_SCL2_MARK, PORT237_FSIAISLD_MARK, MSIOF1_SS1_MARK, EDBGREQ3_MARK, /* 55-5 */ MSIOF1_SS2_MARK, SCIFA6_TXD_MARK, PORT241_IRDA_OUT_MARK, PORT241_IROUT_MARK, MFG4_OUT1_MARK, TPU4TO0_MARK, PORT242_IRDA_IN_MARK, MFG4_IN2_MARK, PORT243_IRDA_FIRSEL_MARK, PORT243_VIO_CKO2_MARK, PORT244_SCIFA5_CTS_MARK, MFG2_IN1_MARK, PORT244_SCIFB_CTS_MARK, PORT244_MSIOF2_RXD_MARK, PORT245_SCIFA5_RTS_MARK, MFG2_IN2_MARK, PORT245_SCIFB_RTS_MARK, PORT245_MSIOF2_TXD_MARK, PORT246_SCIFA5_RXD_MARK, MFG1_OUT1_MARK, PORT246_SCIFB_RXD_MARK, TPU1TO0_MARK, PORT247_SCIFA5_TXD_MARK, MFG3_OUT2_MARK, PORT247_SCIFB_TXD_MARK, TPU3TO1_MARK, PORT248_SCIFA5_SCK_MARK, MFG2_OUT1_MARK, PORT248_SCIFB_SCK_MARK, TPU2TO0_MARK, PORT248_MSIOF2_TSCK_MARK, PORT249_IROUT_MARK, MFG4_IN1_MARK, PORT249_MSIOF2_TSYNC_MARK, SDHICLK0_MARK, TCK2_SWCLK_MC0_MARK, SDHICD0_MARK, SDHID0_0_MARK, TMS2_SWDIO_MC0_MARK, SDHID0_1_MARK, TDO2_SWO0_MC0_MARK, SDHID0_2_MARK, TDI2_MARK, SDHID0_3_MARK, RTCK2_SWO1_MC0_MARK, SDHICMD0_MARK, TRST2_MARK, SDHIWP0_MARK, EDBGREQ2_MARK, SDHICLK1_MARK, TCK3_SWCLK_MC1_MARK, SDHID1_0_MARK, M11_SLCD_SO2_MARK, TS_SPSYNC2_MARK, TMS3_SWDIO_MC1_MARK, SDHID1_1_MARK, M9_SLCD_A02_MARK, TS_SDAT2_MARK, TDO3_SWO0_MC1_MARK, SDHID1_2_MARK, M10_SLCD_CK2_MARK, TS_SDEN2_MARK, TDI3_MARK, SDHID1_3_MARK, M12_SLCD_CE2_MARK, TS_SCK2_MARK, RTCK3_SWO1_MC1_MARK, SDHICMD1_MARK, TRST3_MARK, RESETOUTS_MARK, PINMUX_MARK_END, }; static pinmux_enum_t pinmux_data[] = { /* specify valid pin states for each pin in GPIO mode */ /* 55-1 (GPIO) */ PORT_DATA_I_PD(0), PORT_DATA_I_PU(1), PORT_DATA_I_PU(2), PORT_DATA_I_PU(3), PORT_DATA_I_PU(4), PORT_DATA_I_PU(5), PORT_DATA_I_PU(6), PORT_DATA_I_PU(7), PORT_DATA_I_PU(8), PORT_DATA_I_PU(9), PORT_DATA_I_PU(10), PORT_DATA_I_PU(11), PORT_DATA_IO_PU(12), PORT_DATA_IO_PU(13), PORT_DATA_IO_PU_PD(14), PORT_DATA_IO_PU_PD(15), PORT_DATA_O(16), PORT_DATA_IO(17), PORT_DATA_O(18), PORT_DATA_O(19), PORT_DATA_O(20), PORT_DATA_O(21), PORT_DATA_O(22), PORT_DATA_O(23), PORT_DATA_O(24), PORT_DATA_I_PD(25), PORT_DATA_I_PD(26), PORT_DATA_O(27), PORT_DATA_O(28), PORT_DATA_O(29), PORT_DATA_IO(30), PORT_DATA_IO_PU(31), PORT_DATA_IO_PD(32), PORT_DATA_I_PU(33), PORT_DATA_IO_PD(34), PORT_DATA_I_PU_PD(35), PORT_DATA_O(36), PORT_DATA_IO(37), /* 55-2 (GPIO) */ PORT_DATA_O(38), PORT_DATA_I_PU(39), PORT_DATA_I_PU_PD(40), PORT_DATA_O(41), PORT_DATA_IO_PD(42), PORT_DATA_IO_PD(43), PORT_DATA_IO_PD(44), PORT_DATA_I_PD(45), PORT_DATA_I_PD(46), PORT_DATA_I_PD(47), PORT_DATA_I_PD(48), PORT_DATA_IO_PU_PD(49), PORT_DATA_IO_PD(50), PORT_DATA_IO_PD(51), PORT_DATA_O(52), PORT_DATA_IO_PU_PD(53), PORT_DATA_IO_PU_PD(54), PORT_DATA_IO_PD(55), PORT_DATA_I_PU_PD(56), PORT_DATA_IO(57), PORT_DATA_IO(58), PORT_DATA_IO(59), PORT_DATA_IO(60), PORT_DATA_IO(61), PORT_DATA_IO_PD(62), PORT_DATA_IO_PD(63), PORT_DATA_IO_PD(64), PORT_DATA_IO_PD(65), PORT_DATA_IO_PU_PD(66), PORT_DATA_IO_PU_PD(67), PORT_DATA_IO_PU_PD(68), PORT_DATA_IO_PU_PD(69), PORT_DATA_IO_PU_PD(70), PORT_DATA_IO_PU_PD(71), PORT_DATA_IO_PU_PD(72), PORT_DATA_I_PU_PD(73), PORT_DATA_IO_PU(74), PORT_DATA_IO_PU(75), PORT_DATA_IO_PU(76), PORT_DATA_IO_PU(77), PORT_DATA_IO_PU(78), PORT_DATA_IO_PU(79), PORT_DATA_IO_PU(80), PORT_DATA_IO_PU(81), PORT_DATA_IO_PU(82), PORT_DATA_IO_PU(83), PORT_DATA_IO_PU(84), PORT_DATA_IO_PU(85), PORT_DATA_IO_PU(86), PORT_DATA_IO_PU(87), PORT_DATA_IO_PU(88), PORT_DATA_IO_PU(89), PORT_DATA_O(90), PORT_DATA_IO_PU(91), PORT_DATA_O(92), /* 55-3 (GPIO) */ PORT_DATA_IO_PU(93), PORT_DATA_O(94), PORT_DATA_I_PU_PD(95), PORT_DATA_IO(96), PORT_DATA_IO(97), PORT_DATA_IO(98), PORT_DATA_I_PU(99), PORT_DATA_O(100), PORT_DATA_O(101), PORT_DATA_I_PU(102), PORT_DATA_IO_PD(103), PORT_DATA_I_PD(104), PORT_DATA_I_PD(105), PORT_DATA_I_PD(106), PORT_DATA_I_PD(107), PORT_DATA_I_PD(108), PORT_DATA_IO_PD(109), PORT_DATA_IO_PD(110), PORT_DATA_I_PD(111), PORT_DATA_IO_PD(112), PORT_DATA_IO_PD(113), PORT_DATA_IO_PD(114), PORT_DATA_I_PD(115), PORT_DATA_I_PD(116), PORT_DATA_IO_PD(117), PORT_DATA_I_PD(118), PORT_DATA_IO_PD(128), PORT_DATA_IO_PD(129), PORT_DATA_IO_PD(130), PORT_DATA_IO_PD(131), PORT_DATA_IO_PD(132), PORT_DATA_IO_PD(133), PORT_DATA_IO_PU_PD(134), PORT_DATA_IO_PU_PD(135), PORT_DATA_IO_PU_PD(136), PORT_DATA_IO_PU_PD(137), PORT_DATA_IO_PD(138), PORT_DATA_IO_PD(139), PORT_DATA_IO_PD(140), PORT_DATA_IO_PD(141), PORT_DATA_IO_PD(142), PORT_DATA_IO_PD(143), PORT_DATA_IO_PU_PD(144), PORT_DATA_IO_PD(145), PORT_DATA_IO_PU_PD(146), PORT_DATA_IO_PU_PD(147), PORT_DATA_IO_PU_PD(148), PORT_DATA_IO_PU_PD(149), PORT_DATA_I_PD(150), PORT_DATA_IO_PU_PD(151), PORT_DATA_IO_PD(152), PORT_DATA_IO_PD(153), PORT_DATA_IO_PD(154), PORT_DATA_I_PD(155), PORT_DATA_IO_PU_PD(156), PORT_DATA_I_PD(157), PORT_DATA_IO_PD(158), /* 55-4 (GPIO) */ PORT_DATA_IO_PU_PD(159), PORT_DATA_IO_PU_PD(160), PORT_DATA_I_PU_PD(161), PORT_DATA_I_PU_PD(162), PORT_DATA_IO_PU_PD(163), PORT_DATA_I_PU_PD(164), PORT_DATA_IO_PD(192), PORT_DATA_IO_PD(193), PORT_DATA_IO_PD(194), PORT_DATA_IO_PD(195), PORT_DATA_IO_PD(196), PORT_DATA_IO_PD(197), PORT_DATA_IO_PD(198), PORT_DATA_IO_PD(199), PORT_DATA_IO_PU_PD(200), PORT_DATA_IO_PU_PD(201), PORT_DATA_IO_PU_PD(202), PORT_DATA_IO_PU_PD(203), PORT_DATA_IO_PU_PD(204), PORT_DATA_IO_PU_PD(205), PORT_DATA_IO_PU_PD(206), PORT_DATA_IO_PD(207), PORT_DATA_IO_PD(208), PORT_DATA_IO_PD(209), PORT_DATA_IO_PD(210), PORT_DATA_IO_PD(211), PORT_DATA_IO_PD(212), PORT_DATA_IO_PD(213), PORT_DATA_IO_PD(214), PORT_DATA_IO_PD(215), PORT_DATA_IO_PD(216), PORT_DATA_IO_PD(217), PORT_DATA_O(218), PORT_DATA_IO_PD(219), PORT_DATA_IO_PD(220), PORT_DATA_IO_PD(221), PORT_DATA_IO_PU_PD(222), PORT_DATA_I_PU_PD(223), PORT_DATA_I_PU_PD(224), PORT_DATA_IO_PU_PD(225), PORT_DATA_O(226), PORT_DATA_IO_PU_PD(227), PORT_DATA_I_PD(228), PORT_DATA_I_PD(229), PORT_DATA_IO(230), PORT_DATA_IO_PD(231), PORT_DATA_IO_PU_PD(232), PORT_DATA_I_PD(233), PORT_DATA_IO_PU_PD(234), PORT_DATA_IO_PU_PD(235), PORT_DATA_IO_PU_PD(236), PORT_DATA_IO_PD(237), PORT_DATA_IO_PU_PD(238), /* 55-5 (GPIO) */ PORT_DATA_IO_PU_PD(239), PORT_DATA_IO_PU_PD(240), PORT_DATA_O(241), PORT_DATA_I_PD(242), PORT_DATA_IO_PU_PD(243), PORT_DATA_IO_PU_PD(244), PORT_DATA_IO_PU_PD(245), PORT_DATA_IO_PU_PD(246), PORT_DATA_IO_PU_PD(247), PORT_DATA_IO_PU_PD(248), PORT_DATA_IO_PU_PD(249), PORT_DATA_IO_PD(250), PORT_DATA_IO_PU_PD(251), PORT_DATA_IO_PU_PD(252), PORT_DATA_IO_PU_PD(253), PORT_DATA_IO_PU_PD(254), PORT_DATA_IO_PU_PD(255), PORT_DATA_IO_PU_PD(256), PORT_DATA_IO_PU_PD(257), PORT_DATA_IO_PD(258), PORT_DATA_IO_PU_PD(259), PORT_DATA_IO_PU_PD(260), PORT_DATA_IO_PU_PD(261), PORT_DATA_IO_PU_PD(262), PORT_DATA_IO_PU_PD(263), /* Special Pull-up / Pull-down Functions */ PINMUX_DATA(PORT66_KEYIN0_PU_MARK, MSELBCR_MSEL17_0, MSELBCR_MSEL16_0, PORT66_FN2, PORT66_IN_PU), PINMUX_DATA(PORT67_KEYIN1_PU_MARK, MSELBCR_MSEL17_0, MSELBCR_MSEL16_0, PORT67_FN2, PORT67_IN_PU), PINMUX_DATA(PORT68_KEYIN2_PU_MARK, MSELBCR_MSEL17_0, MSELBCR_MSEL16_0, PORT68_FN2, PORT68_IN_PU), PINMUX_DATA(PORT69_KEYIN3_PU_MARK, MSELBCR_MSEL17_0, MSELBCR_MSEL16_0, PORT69_FN2, PORT69_IN_PU), PINMUX_DATA(PORT70_KEYIN4_PU_MARK, MSELBCR_MSEL17_0, MSELBCR_MSEL16_0, PORT70_FN2, PORT70_IN_PU), PINMUX_DATA(PORT71_KEYIN5_PU_MARK, MSELBCR_MSEL17_0, MSELBCR_MSEL16_0, PORT71_FN2, PORT71_IN_PU), PINMUX_DATA(PORT72_KEYIN6_PU_MARK, MSELBCR_MSEL17_0, MSELBCR_MSEL16_0, PORT72_FN2, PORT72_IN_PU), /* 55-1 (FN) */ PINMUX_DATA(VBUS_0_MARK, PORT0_FN1), PINMUX_DATA(CPORT0_MARK, PORT1_FN1), PINMUX_DATA(CPORT1_MARK, PORT2_FN1), PINMUX_DATA(CPORT2_MARK, PORT3_FN1), PINMUX_DATA(CPORT3_MARK, PORT4_FN1), PINMUX_DATA(CPORT4_MARK, PORT5_FN1), PINMUX_DATA(CPORT5_MARK, PORT6_FN1), PINMUX_DATA(CPORT6_MARK, PORT7_FN1), PINMUX_DATA(CPORT7_MARK, PORT8_FN1), PINMUX_DATA(CPORT8_MARK, PORT9_FN1), PINMUX_DATA(CPORT9_MARK, PORT10_FN1), PINMUX_DATA(CPORT10_MARK, PORT11_FN1), PINMUX_DATA(CPORT11_MARK, PORT12_FN1), PINMUX_DATA(SIN2_MARK, PORT12_FN2), PINMUX_DATA(CPORT12_MARK, PORT13_FN1), PINMUX_DATA(XCTS2_MARK, PORT13_FN2), PINMUX_DATA(CPORT13_MARK, PORT14_FN1), PINMUX_DATA(RFSPO4_MARK, PORT14_FN2), PINMUX_DATA(CPORT14_MARK, PORT15_FN1), PINMUX_DATA(RFSPO5_MARK, PORT15_FN2), PINMUX_DATA(CPORT15_MARK, PORT16_FN1), PINMUX_DATA(SCIFA0_SCK_MARK, PORT16_FN2), PINMUX_DATA(GPS_AGC2_MARK, PORT16_FN3), PINMUX_DATA(CPORT16_MARK, PORT17_FN1), PINMUX_DATA(SCIFA0_TXD_MARK, PORT17_FN2), PINMUX_DATA(GPS_AGC3_MARK, PORT17_FN3), PINMUX_DATA(CPORT17_IC_OE_MARK, PORT18_FN1), PINMUX_DATA(SOUT2_MARK, PORT18_FN2), PINMUX_DATA(CPORT18_MARK, PORT19_FN1), PINMUX_DATA(XRTS2_MARK, PORT19_FN2), PINMUX_DATA(PORT19_VIO_CKO2_MARK, PORT19_FN3), PINMUX_DATA(CPORT19_MPORT1_MARK, PORT20_FN1), PINMUX_DATA(CPORT20_MARK, PORT21_FN1), PINMUX_DATA(RFSPO6_MARK, PORT21_FN2), PINMUX_DATA(CPORT21_MARK, PORT22_FN1), PINMUX_DATA(STATUS0_MARK, PORT22_FN2), PINMUX_DATA(CPORT22_MARK, PORT23_FN1), PINMUX_DATA(STATUS1_MARK, PORT23_FN2), PINMUX_DATA(CPORT23_MARK, PORT24_FN1), PINMUX_DATA(STATUS2_MARK, PORT24_FN2), PINMUX_DATA(RFSPO7_MARK, PORT24_FN3), PINMUX_DATA(B_SYNLD1_MARK, PORT25_FN1), PINMUX_DATA(B_SYNLD2_MARK, PORT26_FN1), PINMUX_DATA(SYSENMSK_MARK, PORT26_FN2), PINMUX_DATA(XMAINPS_MARK, PORT27_FN1), PINMUX_DATA(XDIVPS_MARK, PORT28_FN1), PINMUX_DATA(XIDRST_MARK, PORT29_FN1), PINMUX_DATA(IDCLK_MARK, PORT30_FN1), PINMUX_DATA(IC_DP_MARK, PORT30_FN2), PINMUX_DATA(IDIO_MARK, PORT31_FN1), PINMUX_DATA(IC_DM_MARK, PORT31_FN2), PINMUX_DATA(SOUT1_MARK, PORT32_FN1), PINMUX_DATA(SCIFA4_TXD_MARK, PORT32_FN2), PINMUX_DATA(M02_BERDAT_MARK, PORT32_FN3), PINMUX_DATA(SIN1_MARK, PORT33_FN1), PINMUX_DATA(SCIFA4_RXD_MARK, PORT33_FN2), PINMUX_DATA(XWUP_MARK, PORT33_FN3), PINMUX_DATA(XRTS1_MARK, PORT34_FN1), PINMUX_DATA(SCIFA4_RTS_MARK, PORT34_FN2), PINMUX_DATA(M03_BERCLK_MARK, PORT34_FN3), PINMUX_DATA(XCTS1_MARK, PORT35_FN1), PINMUX_DATA(SCIFA4_CTS_MARK, PORT35_FN2), PINMUX_DATA(PCMCLKO_MARK, PORT36_FN1), PINMUX_DATA(SYNC8KO_MARK, PORT37_FN1), /* 55-2 (FN) */ PINMUX_DATA(DNPCM_A_MARK, PORT38_FN1), PINMUX_DATA(UPPCM_A_MARK, PORT39_FN1), PINMUX_DATA(VACK_MARK, PORT40_FN1), PINMUX_DATA(XTALB1L_MARK, PORT41_FN1), PINMUX_DATA(GPS_AGC1_MARK, PORT42_FN1), PINMUX_DATA(SCIFA0_RTS_MARK, PORT42_FN2), PINMUX_DATA(GPS_AGC4_MARK, PORT43_FN1), PINMUX_DATA(SCIFA0_RXD_MARK, PORT43_FN2), PINMUX_DATA(GPS_PWRDOWN_MARK, PORT44_FN1), PINMUX_DATA(SCIFA0_CTS_MARK, PORT44_FN2), PINMUX_DATA(GPS_IM_MARK, PORT45_FN1), PINMUX_DATA(GPS_IS_MARK, PORT46_FN1), PINMUX_DATA(GPS_QM_MARK, PORT47_FN1), PINMUX_DATA(GPS_QS_MARK, PORT48_FN1), PINMUX_DATA(FMSOCK_MARK, PORT49_FN1), PINMUX_DATA(PORT49_IRDA_OUT_MARK, PORT49_FN2), PINMUX_DATA(PORT49_IROUT_MARK, PORT49_FN3), PINMUX_DATA(FMSOOLR_MARK, PORT50_FN1), PINMUX_DATA(BBIF2_TSYNC2_MARK, PORT50_FN2), PINMUX_DATA(TPU2TO2_MARK, PORT50_FN3), PINMUX_DATA(IPORT3_MARK, PORT50_FN4), PINMUX_DATA(FMSIOLR_MARK, PORT50_FN5), PINMUX_DATA(FMSOOBT_MARK, PORT51_FN1), PINMUX_DATA(BBIF2_TSCK2_MARK, PORT51_FN2), PINMUX_DATA(TPU2TO3_MARK, PORT51_FN3), PINMUX_DATA(OPORT1_MARK, PORT51_FN4), PINMUX_DATA(FMSIOBT_MARK, PORT51_FN5), PINMUX_DATA(FMSOSLD_MARK, PORT52_FN1), PINMUX_DATA(BBIF2_TXD2_MARK, PORT52_FN2), PINMUX_DATA(OPORT2_MARK, PORT52_FN3), PINMUX_DATA(FMSOILR_MARK, PORT53_FN1), PINMUX_DATA(PORT53_IRDA_IN_MARK, PORT53_FN2), PINMUX_DATA(TPU3TO3_MARK, PORT53_FN3), PINMUX_DATA(OPORT3_MARK, PORT53_FN4), PINMUX_DATA(FMSIILR_MARK, PORT53_FN5), PINMUX_DATA(FMSOIBT_MARK, PORT54_FN1), PINMUX_DATA(PORT54_IRDA_FIRSEL_MARK, PORT54_FN2), PINMUX_DATA(TPU3TO2_MARK, PORT54_FN3), PINMUX_DATA(FMSIIBT_MARK, PORT54_FN4), PINMUX_DATA(FMSISLD_MARK, PORT55_FN1), PINMUX_DATA(MFG0_OUT1_MARK, PORT55_FN2), PINMUX_DATA(TPU0TO0_MARK, PORT55_FN3), PINMUX_DATA(A0_EA0_MARK, PORT57_FN1), PINMUX_DATA(BS_MARK, PORT57_FN2), PINMUX_DATA(A12_EA12_MARK, PORT58_FN1), PINMUX_DATA(PORT58_VIO_CKOR_MARK, PORT58_FN2), PINMUX_DATA(TPU4TO2_MARK, PORT58_FN3), PINMUX_DATA(A13_EA13_MARK, PORT59_FN1), PINMUX_DATA(PORT59_IROUT_MARK, PORT59_FN2), PINMUX_DATA(MFG0_OUT2_MARK, PORT59_FN3), PINMUX_DATA(TPU0TO1_MARK, PORT59_FN4), PINMUX_DATA(A14_EA14_MARK, PORT60_FN1), PINMUX_DATA(PORT60_KEYOUT5_MARK, PORT60_FN2), PINMUX_DATA(A15_EA15_MARK, PORT61_FN1), PINMUX_DATA(PORT61_KEYOUT4_MARK, PORT61_FN2), PINMUX_DATA(A16_EA16_MARK, PORT62_FN1), PINMUX_DATA(PORT62_KEYOUT3_MARK, PORT62_FN2), PINMUX_DATA(MSIOF0_SS1_MARK, PORT62_FN3), PINMUX_DATA(A17_EA17_MARK, PORT63_FN1), PINMUX_DATA(PORT63_KEYOUT2_MARK, PORT63_FN2), PINMUX_DATA(MSIOF0_TSYNC_MARK, PORT63_FN3), PINMUX_DATA(A18_EA18_MARK, PORT64_FN1), PINMUX_DATA(PORT64_KEYOUT1_MARK, PORT64_FN2), PINMUX_DATA(MSIOF0_TSCK_MARK, PORT64_FN3), PINMUX_DATA(A19_EA19_MARK, PORT65_FN1), PINMUX_DATA(PORT65_KEYOUT0_MARK, PORT65_FN2), PINMUX_DATA(MSIOF0_TXD_MARK, PORT65_FN3), PINMUX_DATA(A20_EA20_MARK, PORT66_FN1), PINMUX_DATA(PORT66_KEYIN0_MARK, PORT66_FN2), PINMUX_DATA(MSIOF0_RSCK_MARK, PORT66_FN3), PINMUX_DATA(A21_EA21_MARK, PORT67_FN1), PINMUX_DATA(PORT67_KEYIN1_MARK, PORT67_FN2), PINMUX_DATA(MSIOF0_RSYNC_MARK, PORT67_FN3), PINMUX_DATA(A22_EA22_MARK, PORT68_FN1), PINMUX_DATA(PORT68_KEYIN2_MARK, PORT68_FN2), PINMUX_DATA(MSIOF0_MCK0_MARK, PORT68_FN3), PINMUX_DATA(A23_EA23_MARK, PORT69_FN1), PINMUX_DATA(PORT69_KEYIN3_MARK, PORT69_FN2), PINMUX_DATA(MSIOF0_MCK1_MARK, PORT69_FN3), PINMUX_DATA(A24_EA24_MARK, PORT70_FN1), PINMUX_DATA(PORT70_KEYIN4_MARK, PORT70_FN2), PINMUX_DATA(MSIOF0_RXD_MARK, PORT70_FN3), PINMUX_DATA(A25_EA25_MARK, PORT71_FN1), PINMUX_DATA(PORT71_KEYIN5_MARK, PORT71_FN2), PINMUX_DATA(MSIOF0_SS2_MARK, PORT71_FN3), PINMUX_DATA(A26_MARK, PORT72_FN1), PINMUX_DATA(PORT72_KEYIN6_MARK, PORT72_FN2), PINMUX_DATA(D0_ED0_NAF0_MARK, PORT74_FN1), PINMUX_DATA(D1_ED1_NAF1_MARK, PORT75_FN1), PINMUX_DATA(D2_ED2_NAF2_MARK, PORT76_FN1), PINMUX_DATA(D3_ED3_NAF3_MARK, PORT77_FN1), PINMUX_DATA(D4_ED4_NAF4_MARK, PORT78_FN1), PINMUX_DATA(D5_ED5_NAF5_MARK, PORT79_FN1), PINMUX_DATA(D6_ED6_NAF6_MARK, PORT80_FN1), PINMUX_DATA(D7_ED7_NAF7_MARK, PORT81_FN1), PINMUX_DATA(D8_ED8_NAF8_MARK, PORT82_FN1), PINMUX_DATA(D9_ED9_NAF9_MARK, PORT83_FN1), PINMUX_DATA(D10_ED10_NAF10_MARK, PORT84_FN1), PINMUX_DATA(D11_ED11_NAF11_MARK, PORT85_FN1), PINMUX_DATA(D12_ED12_NAF12_MARK, PORT86_FN1), PINMUX_DATA(D13_ED13_NAF13_MARK, PORT87_FN1), PINMUX_DATA(D14_ED14_NAF14_MARK, PORT88_FN1), PINMUX_DATA(D15_ED15_NAF15_MARK, PORT89_FN1), PINMUX_DATA(CS4_MARK, PORT90_FN1), PINMUX_DATA(CS5A_MARK, PORT91_FN1), PINMUX_DATA(FMSICK_MARK, PORT91_FN2), PINMUX_DATA(CS5B_MARK, PORT92_FN1), PINMUX_DATA(FCE1_MARK, PORT92_FN2), /* 55-3 (FN) */ PINMUX_DATA(CS6B_MARK, PORT93_FN1), PINMUX_DATA(XCS2_MARK, PORT93_FN2), PINMUX_DATA(CS6A_MARK, PORT93_FN3), PINMUX_DATA(DACK0_MARK, PORT93_FN4), PINMUX_DATA(FCE0_MARK, PORT94_FN1), PINMUX_DATA(WAIT_MARK, PORT95_FN1), PINMUX_DATA(DREQ0_MARK, PORT95_FN2), PINMUX_DATA(RD_XRD_MARK, PORT96_FN1), PINMUX_DATA(WE0_XWR0_FWE_MARK, PORT97_FN1), PINMUX_DATA(WE1_XWR1_MARK, PORT98_FN1), PINMUX_DATA(FRB_MARK, PORT99_FN1), PINMUX_DATA(CKO_MARK, PORT100_FN1), PINMUX_DATA(NBRSTOUT_MARK, PORT101_FN1), PINMUX_DATA(NBRST_MARK, PORT102_FN1), PINMUX_DATA(GPS_EPPSIN_MARK, PORT106_FN1), PINMUX_DATA(LATCHPULSE_MARK, PORT110_FN1), PINMUX_DATA(LTESIGNAL_MARK, PORT111_FN1), PINMUX_DATA(LEGACYSTATE_MARK, PORT112_FN1), PINMUX_DATA(TCKON_MARK, PORT118_FN1), PINMUX_DATA(VIO_VD_MARK, PORT128_FN1), PINMUX_DATA(PORT128_KEYOUT0_MARK, PORT128_FN2), PINMUX_DATA(IPORT0_MARK, PORT128_FN3), PINMUX_DATA(VIO_HD_MARK, PORT129_FN1), PINMUX_DATA(PORT129_KEYOUT1_MARK, PORT129_FN2), PINMUX_DATA(IPORT1_MARK, PORT129_FN3), PINMUX_DATA(VIO_D0_MARK, PORT130_FN1), PINMUX_DATA(PORT130_KEYOUT2_MARK, PORT130_FN2), PINMUX_DATA(PORT130_MSIOF2_RXD_MARK, PORT130_FN3), PINMUX_DATA(VIO_D1_MARK, PORT131_FN1), PINMUX_DATA(PORT131_KEYOUT3_MARK, PORT131_FN2), PINMUX_DATA(PORT131_MSIOF2_SS1_MARK, PORT131_FN3), PINMUX_DATA(VIO_D2_MARK, PORT132_FN1), PINMUX_DATA(PORT132_KEYOUT4_MARK, PORT132_FN2), PINMUX_DATA(PORT132_MSIOF2_SS2_MARK, PORT132_FN3), PINMUX_DATA(VIO_D3_MARK, PORT133_FN1), PINMUX_DATA(PORT133_KEYOUT5_MARK, PORT133_FN2), PINMUX_DATA(PORT133_MSIOF2_TSYNC_MARK, PORT133_FN3), PINMUX_DATA(VIO_D4_MARK, PORT134_FN1), PINMUX_DATA(PORT134_KEYIN0_MARK, PORT134_FN2), PINMUX_DATA(PORT134_MSIOF2_TXD_MARK, PORT134_FN3), PINMUX_DATA(VIO_D5_MARK, PORT135_FN1), PINMUX_DATA(PORT135_KEYIN1_MARK, PORT135_FN2), PINMUX_DATA(PORT135_MSIOF2_TSCK_MARK, PORT135_FN3), PINMUX_DATA(VIO_D6_MARK, PORT136_FN1), PINMUX_DATA(PORT136_KEYIN2_MARK, PORT136_FN2), PINMUX_DATA(VIO_D7_MARK, PORT137_FN1), PINMUX_DATA(PORT137_KEYIN3_MARK, PORT137_FN2), PINMUX_DATA(VIO_D8_MARK, PORT138_FN1), PINMUX_DATA(M9_SLCD_A01_MARK, PORT138_FN2), PINMUX_DATA(PORT138_FSIAOMC_MARK, PORT138_FN3), PINMUX_DATA(VIO_D9_MARK, PORT139_FN1), PINMUX_DATA(M10_SLCD_CK1_MARK, PORT139_FN2), PINMUX_DATA(PORT139_FSIAOLR_MARK, PORT139_FN3), PINMUX_DATA(VIO_D10_MARK, PORT140_FN1), PINMUX_DATA(M11_SLCD_SO1_MARK, PORT140_FN2), PINMUX_DATA(TPU0TO2_MARK, PORT140_FN3), PINMUX_DATA(PORT140_FSIAOBT_MARK, PORT140_FN4), PINMUX_DATA(VIO_D11_MARK, PORT141_FN1), PINMUX_DATA(M12_SLCD_CE1_MARK, PORT141_FN2), PINMUX_DATA(TPU0TO3_MARK, PORT141_FN3), PINMUX_DATA(PORT141_FSIAOSLD_MARK, PORT141_FN4), PINMUX_DATA(VIO_D12_MARK, PORT142_FN1), PINMUX_DATA(M13_BSW_MARK, PORT142_FN2), PINMUX_DATA(PORT142_FSIACK_MARK, PORT142_FN3), PINMUX_DATA(VIO_D13_MARK, PORT143_FN1), PINMUX_DATA(M14_GSW_MARK, PORT143_FN2), PINMUX_DATA(PORT143_FSIAILR_MARK, PORT143_FN3), PINMUX_DATA(VIO_D14_MARK, PORT144_FN1), PINMUX_DATA(M15_RSW_MARK, PORT144_FN2), PINMUX_DATA(PORT144_FSIAIBT_MARK, PORT144_FN3), PINMUX_DATA(VIO_D15_MARK, PORT145_FN1), PINMUX_DATA(TPU1TO3_MARK, PORT145_FN2), PINMUX_DATA(PORT145_FSIAISLD_MARK, PORT145_FN3), PINMUX_DATA(VIO_CLK_MARK, PORT146_FN1), PINMUX_DATA(PORT146_KEYIN4_MARK, PORT146_FN2), PINMUX_DATA(IPORT2_MARK, PORT146_FN3), PINMUX_DATA(VIO_FIELD_MARK, PORT147_FN1), PINMUX_DATA(PORT147_KEYIN5_MARK, PORT147_FN2), PINMUX_DATA(VIO_CKO_MARK, PORT148_FN1), PINMUX_DATA(PORT148_KEYIN6_MARK, PORT148_FN2), PINMUX_DATA(A27_MARK, PORT149_FN1), PINMUX_DATA(RDWR_XWE_MARK, PORT149_FN2), PINMUX_DATA(MFG0_IN1_MARK, PORT149_FN3), PINMUX_DATA(MFG0_IN2_MARK, PORT150_FN1), PINMUX_DATA(TS_SPSYNC3_MARK, PORT151_FN1), PINMUX_DATA(MSIOF2_RSCK_MARK, PORT151_FN2), PINMUX_DATA(TS_SDAT3_MARK, PORT152_FN1), PINMUX_DATA(MSIOF2_RSYNC_MARK, PORT152_FN2), PINMUX_DATA(TPU1TO2_MARK, PORT153_FN1), PINMUX_DATA(TS_SDEN3_MARK, PORT153_FN2), PINMUX_DATA(PORT153_MSIOF2_SS1_MARK, PORT153_FN3), PINMUX_DATA(SOUT3_MARK, PORT154_FN1), PINMUX_DATA(SCIFA2_TXD1_MARK, PORT154_FN2), PINMUX_DATA(MSIOF2_MCK0_MARK, PORT154_FN3), PINMUX_DATA(SIN3_MARK, PORT155_FN1), PINMUX_DATA(SCIFA2_RXD1_MARK, PORT155_FN2), PINMUX_DATA(MSIOF2_MCK1_MARK, PORT155_FN3), PINMUX_DATA(XRTS3_MARK, PORT156_FN1), PINMUX_DATA(SCIFA2_RTS1_MARK, PORT156_FN2), PINMUX_DATA(PORT156_MSIOF2_SS2_MARK, PORT156_FN3), PINMUX_DATA(XCTS3_MARK, PORT157_FN1), PINMUX_DATA(SCIFA2_CTS1_MARK, PORT157_FN2), PINMUX_DATA(PORT157_MSIOF2_RXD_MARK, PORT157_FN3), /* 55-4 (FN) */ PINMUX_DATA(DINT_MARK, PORT158_FN1), PINMUX_DATA(SCIFA2_SCK1_MARK, PORT158_FN2), PINMUX_DATA(TS_SCK3_MARK, PORT158_FN3), PINMUX_DATA(PORT159_SCIFB_SCK_MARK, PORT159_FN1), PINMUX_DATA(PORT159_SCIFA5_SCK_MARK, PORT159_FN2), PINMUX_DATA(NMI_MARK, PORT159_FN3), PINMUX_DATA(PORT160_SCIFB_TXD_MARK, PORT160_FN1), PINMUX_DATA(PORT160_SCIFA5_TXD_MARK, PORT160_FN2), PINMUX_DATA(SOUT0_MARK, PORT160_FN3), PINMUX_DATA(PORT161_SCIFB_CTS_MARK, PORT161_FN1), PINMUX_DATA(PORT161_SCIFA5_CTS_MARK, PORT161_FN2), PINMUX_DATA(XCTS0_MARK, PORT161_FN3), PINMUX_DATA(MFG3_IN2_MARK, PORT161_FN4), PINMUX_DATA(PORT162_SCIFB_RXD_MARK, PORT162_FN1), PINMUX_DATA(PORT162_SCIFA5_RXD_MARK, PORT162_FN2), PINMUX_DATA(SIN0_MARK, PORT162_FN3), PINMUX_DATA(MFG3_IN1_MARK, PORT162_FN4), PINMUX_DATA(PORT163_SCIFB_RTS_MARK, PORT163_FN1), PINMUX_DATA(PORT163_SCIFA5_RTS_MARK, PORT163_FN2), PINMUX_DATA(XRTS0_MARK, PORT163_FN3), PINMUX_DATA(MFG3_OUT1_MARK, PORT163_FN4), PINMUX_DATA(TPU3TO0_MARK, PORT163_FN5), PINMUX_DATA(LCDD0_MARK, PORT192_FN1), PINMUX_DATA(PORT192_KEYOUT0_MARK, PORT192_FN2), PINMUX_DATA(EXT_CKI_MARK, PORT192_FN3), PINMUX_DATA(LCDD1_MARK, PORT193_FN1), PINMUX_DATA(PORT193_KEYOUT1_MARK, PORT193_FN2), PINMUX_DATA(PORT193_SCIFA5_CTS_MARK, PORT193_FN3), PINMUX_DATA(BBIF2_TSYNC1_MARK, PORT193_FN4), PINMUX_DATA(LCDD2_MARK, PORT194_FN1), PINMUX_DATA(PORT194_KEYOUT2_MARK, PORT194_FN2), PINMUX_DATA(PORT194_SCIFA5_RTS_MARK, PORT194_FN3), PINMUX_DATA(BBIF2_TSCK1_MARK, PORT194_FN4), PINMUX_DATA(LCDD3_MARK, PORT195_FN1), PINMUX_DATA(PORT195_KEYOUT3_MARK, PORT195_FN2), PINMUX_DATA(PORT195_SCIFA5_RXD_MARK, PORT195_FN3), PINMUX_DATA(BBIF2_TXD1_MARK, PORT195_FN4), PINMUX_DATA(LCDD4_MARK, PORT196_FN1), PINMUX_DATA(PORT196_KEYOUT4_MARK, PORT196_FN2), PINMUX_DATA(PORT196_SCIFA5_TXD_MARK, PORT196_FN3), PINMUX_DATA(LCDD5_MARK, PORT197_FN1), PINMUX_DATA(PORT197_KEYOUT5_MARK, PORT197_FN2), PINMUX_DATA(PORT197_SCIFA5_SCK_MARK, PORT197_FN3), PINMUX_DATA(MFG2_OUT2_MARK, PORT197_FN4), PINMUX_DATA(LCDD6_MARK, PORT198_FN1), PINMUX_DATA(LCDD7_MARK, PORT199_FN1), PINMUX_DATA(TPU4TO1_MARK, PORT199_FN2), PINMUX_DATA(MFG4_OUT2_MARK, PORT199_FN3), PINMUX_DATA(LCDD8_MARK, PORT200_FN1), PINMUX_DATA(PORT200_KEYIN0_MARK, PORT200_FN2), PINMUX_DATA(VIO_DR0_MARK, PORT200_FN3), PINMUX_DATA(D16_MARK, PORT200_FN4), PINMUX_DATA(LCDD9_MARK, PORT201_FN1), PINMUX_DATA(PORT201_KEYIN1_MARK, PORT201_FN2), PINMUX_DATA(VIO_DR1_MARK, PORT201_FN3), PINMUX_DATA(D17_MARK, PORT201_FN4), PINMUX_DATA(LCDD10_MARK, PORT202_FN1), PINMUX_DATA(PORT202_KEYIN2_MARK, PORT202_FN2), PINMUX_DATA(VIO_DR2_MARK, PORT202_FN3), PINMUX_DATA(D18_MARK, PORT202_FN4), PINMUX_DATA(LCDD11_MARK, PORT203_FN1), PINMUX_DATA(PORT203_KEYIN3_MARK, PORT203_FN2), PINMUX_DATA(VIO_DR3_MARK, PORT203_FN3), PINMUX_DATA(D19_MARK, PORT203_FN4), PINMUX_DATA(LCDD12_MARK, PORT204_FN1), PINMUX_DATA(PORT204_KEYIN4_MARK, PORT204_FN2), PINMUX_DATA(VIO_DR4_MARK, PORT204_FN3), PINMUX_DATA(D20_MARK, PORT204_FN4), PINMUX_DATA(LCDD13_MARK, PORT205_FN1), PINMUX_DATA(PORT205_KEYIN5_MARK, PORT205_FN2), PINMUX_DATA(VIO_DR5_MARK, PORT205_FN3), PINMUX_DATA(D21_MARK, PORT205_FN4), PINMUX_DATA(LCDD14_MARK, PORT206_FN1), PINMUX_DATA(PORT206_KEYIN6_MARK, PORT206_FN2), PINMUX_DATA(VIO_DR6_MARK, PORT206_FN3), PINMUX_DATA(D22_MARK, PORT206_FN4), PINMUX_DATA(LCDD15_MARK, PORT207_FN1), PINMUX_DATA(PORT207_MSIOF0L_SS1_MARK, PORT207_FN2), PINMUX_DATA(PORT207_KEYOUT0_MARK, PORT207_FN3), PINMUX_DATA(VIO_DR7_MARK, PORT207_FN4), PINMUX_DATA(D23_MARK, PORT207_FN5), PINMUX_DATA(LCDD16_MARK, PORT208_FN1), PINMUX_DATA(PORT208_MSIOF0L_SS2_MARK, PORT208_FN2), PINMUX_DATA(PORT208_KEYOUT1_MARK, PORT208_FN3), PINMUX_DATA(VIO_VDR_MARK, PORT208_FN4), PINMUX_DATA(D24_MARK, PORT208_FN5), PINMUX_DATA(LCDD17_MARK, PORT209_FN1), PINMUX_DATA(PORT209_KEYOUT2_MARK, PORT209_FN2), PINMUX_DATA(VIO_HDR_MARK, PORT209_FN3), PINMUX_DATA(D25_MARK, PORT209_FN4), PINMUX_DATA(LCDD18_MARK, PORT210_FN1), PINMUX_DATA(DREQ2_MARK, PORT210_FN2), PINMUX_DATA(PORT210_MSIOF0L_SS1_MARK, PORT210_FN3), PINMUX_DATA(D26_MARK, PORT210_FN4), PINMUX_DATA(LCDD19_MARK, PORT211_FN1), PINMUX_DATA(PORT211_MSIOF0L_SS2_MARK, PORT211_FN2), PINMUX_DATA(D27_MARK, PORT211_FN3), PINMUX_DATA(LCDD20_MARK, PORT212_FN1), PINMUX_DATA(TS_SPSYNC1_MARK, PORT212_FN2), PINMUX_DATA(MSIOF0L_MCK0_MARK, PORT212_FN3), PINMUX_DATA(D28_MARK, PORT212_FN4), PINMUX_DATA(LCDD21_MARK, PORT213_FN1), PINMUX_DATA(TS_SDAT1_MARK, PORT213_FN2), PINMUX_DATA(MSIOF0L_MCK1_MARK, PORT213_FN3), PINMUX_DATA(D29_MARK, PORT213_FN4), PINMUX_DATA(LCDD22_MARK, PORT214_FN1), PINMUX_DATA(TS_SDEN1_MARK, PORT214_FN2), PINMUX_DATA(MSIOF0L_RSCK_MARK, PORT214_FN3), PINMUX_DATA(D30_MARK, PORT214_FN4), PINMUX_DATA(LCDD23_MARK, PORT215_FN1), PINMUX_DATA(TS_SCK1_MARK, PORT215_FN2), PINMUX_DATA(MSIOF0L_RSYNC_MARK, PORT215_FN3), PINMUX_DATA(D31_MARK, PORT215_FN4), PINMUX_DATA(LCDDCK_MARK, PORT216_FN1), PINMUX_DATA(LCDWR_MARK, PORT216_FN2), PINMUX_DATA(PORT216_KEYOUT3_MARK, PORT216_FN3), PINMUX_DATA(VIO_CLKR_MARK, PORT216_FN4), PINMUX_DATA(LCDRD_MARK, PORT217_FN1), PINMUX_DATA(DACK2_MARK, PORT217_FN2), PINMUX_DATA(MSIOF0L_TSYNC_MARK, PORT217_FN3), PINMUX_DATA(LCDHSYN_MARK, PORT218_FN1), PINMUX_DATA(LCDCS_MARK, PORT218_FN2), PINMUX_DATA(LCDCS2_MARK, PORT218_FN3), PINMUX_DATA(DACK3_MARK, PORT218_FN4), PINMUX_DATA(PORT218_VIO_CKOR_MARK, PORT218_FN5), PINMUX_DATA(PORT218_KEYOUT4_MARK, PORT218_FN6), PINMUX_DATA(LCDDISP_MARK, PORT219_FN1), PINMUX_DATA(LCDRS_MARK, PORT219_FN2), PINMUX_DATA(DREQ3_MARK, PORT219_FN3), PINMUX_DATA(MSIOF0L_TSCK_MARK, PORT219_FN4), PINMUX_DATA(LCDVSYN_MARK, PORT220_FN1), PINMUX_DATA(LCDVSYN2_MARK, PORT220_FN2), PINMUX_DATA(PORT220_KEYOUT5_MARK, PORT220_FN3), PINMUX_DATA(LCDLCLK_MARK, PORT221_FN1), PINMUX_DATA(DREQ1_MARK, PORT221_FN2), PINMUX_DATA(PWEN_MARK, PORT221_FN3), PINMUX_DATA(MSIOF0L_RXD_MARK, PORT221_FN4), PINMUX_DATA(LCDDON_MARK, PORT222_FN1), PINMUX_DATA(LCDDON2_MARK, PORT222_FN2), PINMUX_DATA(DACK1_MARK, PORT222_FN3), PINMUX_DATA(OVCN_MARK, PORT222_FN4), PINMUX_DATA(MSIOF0L_TXD_MARK, PORT222_FN5), PINMUX_DATA(SCIFA1_TXD_MARK, PORT225_FN1), PINMUX_DATA(OVCN2_MARK, PORT225_FN2), PINMUX_DATA(EXTLP_MARK, PORT226_FN1), PINMUX_DATA(SCIFA1_SCK_MARK, PORT226_FN2), PINMUX_DATA(USBTERM_MARK, PORT226_FN3), PINMUX_DATA(PORT226_VIO_CKO2_MARK, PORT226_FN4), PINMUX_DATA(SCIFA1_RTS_MARK, PORT227_FN1), PINMUX_DATA(IDIN_MARK, PORT227_FN2), PINMUX_DATA(SCIFA1_RXD_MARK, PORT228_FN1), PINMUX_DATA(SCIFA1_CTS_MARK, PORT229_FN1), PINMUX_DATA(MFG1_IN1_MARK, PORT229_FN2), PINMUX_DATA(MSIOF1_TXD_MARK, PORT230_FN1), PINMUX_DATA(SCIFA2_TXD2_MARK, PORT230_FN2), PINMUX_DATA(PORT230_FSIAOMC_MARK, PORT230_FN3), PINMUX_DATA(MSIOF1_TSYNC_MARK, PORT231_FN1), PINMUX_DATA(SCIFA2_CTS2_MARK, PORT231_FN2), PINMUX_DATA(PORT231_FSIAOLR_MARK, PORT231_FN3), PINMUX_DATA(MSIOF1_TSCK_MARK, PORT232_FN1), PINMUX_DATA(SCIFA2_SCK2_MARK, PORT232_FN2), PINMUX_DATA(PORT232_FSIAOBT_MARK, PORT232_FN3), PINMUX_DATA(MSIOF1_RXD_MARK, PORT233_FN1), PINMUX_DATA(SCIFA2_RXD2_MARK, PORT233_FN2), PINMUX_DATA(GPS_VCOTRIG_MARK, PORT233_FN3), PINMUX_DATA(PORT233_FSIACK_MARK, PORT233_FN4), PINMUX_DATA(MSIOF1_RSCK_MARK, PORT234_FN1), PINMUX_DATA(SCIFA2_RTS2_MARK, PORT234_FN2), PINMUX_DATA(PORT234_FSIAOSLD_MARK, PORT234_FN3), PINMUX_DATA(MSIOF1_RSYNC_MARK, PORT235_FN1), PINMUX_DATA(OPORT0_MARK, PORT235_FN2), PINMUX_DATA(MFG1_IN2_MARK, PORT235_FN3), PINMUX_DATA(PORT235_FSIAILR_MARK, PORT235_FN4), PINMUX_DATA(MSIOF1_MCK0_MARK, PORT236_FN1), PINMUX_DATA(I2C_SDA2_MARK, PORT236_FN2), PINMUX_DATA(PORT236_FSIAIBT_MARK, PORT236_FN3), PINMUX_DATA(MSIOF1_MCK1_MARK, PORT237_FN1), PINMUX_DATA(I2C_SCL2_MARK, PORT237_FN2), PINMUX_DATA(PORT237_FSIAISLD_MARK, PORT237_FN3), PINMUX_DATA(MSIOF1_SS1_MARK, PORT238_FN1), PINMUX_DATA(EDBGREQ3_MARK, PORT238_FN2), /* 55-5 (FN) */ PINMUX_DATA(MSIOF1_SS2_MARK, PORT239_FN1), PINMUX_DATA(SCIFA6_TXD_MARK, PORT240_FN1), PINMUX_DATA(PORT241_IRDA_OUT_MARK, PORT241_FN1), PINMUX_DATA(PORT241_IROUT_MARK, PORT241_FN2), PINMUX_DATA(MFG4_OUT1_MARK, PORT241_FN3), PINMUX_DATA(TPU4TO0_MARK, PORT241_FN4), PINMUX_DATA(PORT242_IRDA_IN_MARK, PORT242_FN1), PINMUX_DATA(MFG4_IN2_MARK, PORT242_FN2), PINMUX_DATA(PORT243_IRDA_FIRSEL_MARK, PORT243_FN1), PINMUX_DATA(PORT243_VIO_CKO2_MARK, PORT243_FN2), PINMUX_DATA(PORT244_SCIFA5_CTS_MARK, PORT244_FN1), PINMUX_DATA(MFG2_IN1_MARK, PORT244_FN2), PINMUX_DATA(PORT244_SCIFB_CTS_MARK, PORT244_FN3), PINMUX_DATA(PORT245_SCIFA5_RTS_MARK, PORT245_FN1), PINMUX_DATA(MFG2_IN2_MARK, PORT245_FN2), PINMUX_DATA(PORT245_SCIFB_RTS_MARK, PORT245_FN3), PINMUX_DATA(PORT246_SCIFA5_RXD_MARK, PORT246_FN1), PINMUX_DATA(MFG1_OUT1_MARK, PORT246_FN2), PINMUX_DATA(PORT246_SCIFB_RXD_MARK, PORT246_FN3), PINMUX_DATA(TPU1TO0_MARK, PORT246_FN4), PINMUX_DATA(PORT247_SCIFA5_TXD_MARK, PORT247_FN1), PINMUX_DATA(MFG3_OUT2_MARK, PORT247_FN2), PINMUX_DATA(PORT247_SCIFB_TXD_MARK, PORT247_FN3), PINMUX_DATA(TPU3TO1_MARK, PORT247_FN4), PINMUX_DATA(PORT248_SCIFA5_SCK_MARK, PORT248_FN1), PINMUX_DATA(MFG2_OUT1_MARK, PORT248_FN2), PINMUX_DATA(PORT248_SCIFB_SCK_MARK, PORT248_FN3), PINMUX_DATA(TPU2TO0_MARK, PORT248_FN4), PINMUX_DATA(PORT249_IROUT_MARK, PORT249_FN1), PINMUX_DATA(MFG4_IN1_MARK, PORT249_FN2), PINMUX_DATA(SDHICLK0_MARK, PORT250_FN1), PINMUX_DATA(TCK2_SWCLK_MC0_MARK, PORT250_FN2), PINMUX_DATA(SDHICD0_MARK, PORT251_FN1), PINMUX_DATA(SDHID0_0_MARK, PORT252_FN1), PINMUX_DATA(TMS2_SWDIO_MC0_MARK, PORT252_FN2), PINMUX_DATA(SDHID0_1_MARK, PORT253_FN1), PINMUX_DATA(TDO2_SWO0_MC0_MARK, PORT253_FN2), PINMUX_DATA(SDHID0_2_MARK, PORT254_FN1), PINMUX_DATA(TDI2_MARK, PORT254_FN2), PINMUX_DATA(SDHID0_3_MARK, PORT255_FN1), PINMUX_DATA(RTCK2_SWO1_MC0_MARK, PORT255_FN2), PINMUX_DATA(SDHICMD0_MARK, PORT256_FN1), PINMUX_DATA(TRST2_MARK, PORT256_FN2), PINMUX_DATA(SDHIWP0_MARK, PORT257_FN1), PINMUX_DATA(EDBGREQ2_MARK, PORT257_FN2), PINMUX_DATA(SDHICLK1_MARK, PORT258_FN1), PINMUX_DATA(TCK3_SWCLK_MC1_MARK, PORT258_FN2), PINMUX_DATA(SDHID1_0_MARK, PORT259_FN1), PINMUX_DATA(M11_SLCD_SO2_MARK, PORT259_FN2), PINMUX_DATA(TS_SPSYNC2_MARK, PORT259_FN3), PINMUX_DATA(TMS3_SWDIO_MC1_MARK, PORT259_FN4), PINMUX_DATA(SDHID1_1_MARK, PORT260_FN1), PINMUX_DATA(M9_SLCD_A02_MARK, PORT260_FN2), PINMUX_DATA(TS_SDAT2_MARK, PORT260_FN3), PINMUX_DATA(TDO3_SWO0_MC1_MARK, PORT260_FN4), PINMUX_DATA(SDHID1_2_MARK, PORT261_FN1), PINMUX_DATA(M10_SLCD_CK2_MARK, PORT261_FN2), PINMUX_DATA(TS_SDEN2_MARK, PORT261_FN3), PINMUX_DATA(TDI3_MARK, PORT261_FN4), PINMUX_DATA(SDHID1_3_MARK, PORT262_FN1), PINMUX_DATA(M12_SLCD_CE2_MARK, PORT262_FN2), PINMUX_DATA(TS_SCK2_MARK, PORT262_FN3), PINMUX_DATA(RTCK3_SWO1_MC1_MARK, PORT262_FN4), PINMUX_DATA(SDHICMD1_MARK, PORT263_FN1), PINMUX_DATA(TRST3_MARK, PORT263_FN2), PINMUX_DATA(RESETOUTS_MARK, PORT264_FN1), }; static struct pinmux_gpio pinmux_gpios[] = { /* 55-1 -> 55-5 (GPIO) */ GPIO_PORT_ALL(), /* Special Pull-up / Pull-down Functions */ GPIO_FN(PORT66_KEYIN0_PU), GPIO_FN(PORT67_KEYIN1_PU), GPIO_FN(PORT68_KEYIN2_PU), GPIO_FN(PORT69_KEYIN3_PU), GPIO_FN(PORT70_KEYIN4_PU), GPIO_FN(PORT71_KEYIN5_PU), GPIO_FN(PORT72_KEYIN6_PU), /* 55-1 (FN) */ GPIO_FN(VBUS_0), GPIO_FN(CPORT0), GPIO_FN(CPORT1), GPIO_FN(CPORT2), GPIO_FN(CPORT3), GPIO_FN(CPORT4), GPIO_FN(CPORT5), GPIO_FN(CPORT6), GPIO_FN(CPORT7), GPIO_FN(CPORT8), GPIO_FN(CPORT9), GPIO_FN(CPORT10), GPIO_FN(CPORT11), GPIO_FN(SIN2), GPIO_FN(CPORT12), GPIO_FN(XCTS2), GPIO_FN(CPORT13), GPIO_FN(RFSPO4), GPIO_FN(CPORT14), GPIO_FN(RFSPO5), GPIO_FN(CPORT15), GPIO_FN(SCIFA0_SCK), GPIO_FN(GPS_AGC2), GPIO_FN(CPORT16), GPIO_FN(SCIFA0_TXD), GPIO_FN(GPS_AGC3), GPIO_FN(CPORT17_IC_OE), GPIO_FN(SOUT2), GPIO_FN(CPORT18), GPIO_FN(XRTS2), GPIO_FN(PORT19_VIO_CKO2), GPIO_FN(CPORT19_MPORT1), GPIO_FN(CPORT20), GPIO_FN(RFSPO6), GPIO_FN(CPORT21), GPIO_FN(STATUS0), GPIO_FN(CPORT22), GPIO_FN(STATUS1), GPIO_FN(CPORT23), GPIO_FN(STATUS2), GPIO_FN(RFSPO7), GPIO_FN(B_SYNLD1), GPIO_FN(B_SYNLD2), GPIO_FN(SYSENMSK), GPIO_FN(XMAINPS), GPIO_FN(XDIVPS), GPIO_FN(XIDRST), GPIO_FN(IDCLK), GPIO_FN(IC_DP), GPIO_FN(IDIO), GPIO_FN(IC_DM), GPIO_FN(SOUT1), GPIO_FN(SCIFA4_TXD), GPIO_FN(M02_BERDAT), GPIO_FN(SIN1), GPIO_FN(SCIFA4_RXD), GPIO_FN(XWUP), GPIO_FN(XRTS1), GPIO_FN(SCIFA4_RTS), GPIO_FN(M03_BERCLK), GPIO_FN(XCTS1), GPIO_FN(SCIFA4_CTS), GPIO_FN(PCMCLKO), GPIO_FN(SYNC8KO), /* 55-2 (FN) */ GPIO_FN(DNPCM_A), GPIO_FN(UPPCM_A), GPIO_FN(VACK), GPIO_FN(XTALB1L), GPIO_FN(GPS_AGC1), GPIO_FN(SCIFA0_RTS), GPIO_FN(GPS_AGC4), GPIO_FN(SCIFA0_RXD), GPIO_FN(GPS_PWRDOWN), GPIO_FN(SCIFA0_CTS), GPIO_FN(GPS_IM), GPIO_FN(GPS_IS), GPIO_FN(GPS_QM), GPIO_FN(GPS_QS), GPIO_FN(FMSOCK), GPIO_FN(PORT49_IRDA_OUT), GPIO_FN(PORT49_IROUT), GPIO_FN(FMSOOLR), GPIO_FN(BBIF2_TSYNC2), GPIO_FN(TPU2TO2), GPIO_FN(IPORT3), GPIO_FN(FMSIOLR), GPIO_FN(FMSOOBT), GPIO_FN(BBIF2_TSCK2), GPIO_FN(TPU2TO3), GPIO_FN(OPORT1), GPIO_FN(FMSIOBT), GPIO_FN(FMSOSLD), GPIO_FN(BBIF2_TXD2), GPIO_FN(OPORT2), GPIO_FN(FMSOILR), GPIO_FN(PORT53_IRDA_IN), GPIO_FN(TPU3TO3), GPIO_FN(OPORT3), GPIO_FN(FMSIILR), GPIO_FN(FMSOIBT), GPIO_FN(PORT54_IRDA_FIRSEL), GPIO_FN(TPU3TO2), GPIO_FN(FMSIIBT), GPIO_FN(FMSISLD), GPIO_FN(MFG0_OUT1), GPIO_FN(TPU0TO0), GPIO_FN(A0_EA0), GPIO_FN(BS), GPIO_FN(A12_EA12), GPIO_FN(PORT58_VIO_CKOR), GPIO_FN(TPU4TO2), GPIO_FN(A13_EA13), GPIO_FN(PORT59_IROUT), GPIO_FN(MFG0_OUT2), GPIO_FN(TPU0TO1), GPIO_FN(A14_EA14), GPIO_FN(PORT60_KEYOUT5), GPIO_FN(A15_EA15), GPIO_FN(PORT61_KEYOUT4), GPIO_FN(A16_EA16), GPIO_FN(PORT62_KEYOUT3), GPIO_FN(MSIOF0_SS1), GPIO_FN(A17_EA17), GPIO_FN(PORT63_KEYOUT2), GPIO_FN(MSIOF0_TSYNC), GPIO_FN(A18_EA18), GPIO_FN(PORT64_KEYOUT1), GPIO_FN(MSIOF0_TSCK), GPIO_FN(A19_EA19), GPIO_FN(PORT65_KEYOUT0), GPIO_FN(MSIOF0_TXD), GPIO_FN(A20_EA20), GPIO_FN(PORT66_KEYIN0), GPIO_FN(MSIOF0_RSCK), GPIO_FN(A21_EA21), GPIO_FN(PORT67_KEYIN1), GPIO_FN(MSIOF0_RSYNC), GPIO_FN(A22_EA22), GPIO_FN(PORT68_KEYIN2), GPIO_FN(MSIOF0_MCK0), GPIO_FN(A23_EA23), GPIO_FN(PORT69_KEYIN3), GPIO_FN(MSIOF0_MCK1), GPIO_FN(A24_EA24), GPIO_FN(PORT70_KEYIN4), GPIO_FN(MSIOF0_RXD), GPIO_FN(A25_EA25), GPIO_FN(PORT71_KEYIN5), GPIO_FN(MSIOF0_SS2), GPIO_FN(A26), GPIO_FN(PORT72_KEYIN6), GPIO_FN(D0_ED0_NAF0), GPIO_FN(D1_ED1_NAF1), GPIO_FN(D2_ED2_NAF2), GPIO_FN(D3_ED3_NAF3), GPIO_FN(D4_ED4_NAF4), GPIO_FN(D5_ED5_NAF5), GPIO_FN(D6_ED6_NAF6), GPIO_FN(D7_ED7_NAF7), GPIO_FN(D8_ED8_NAF8), GPIO_FN(D9_ED9_NAF9), GPIO_FN(D10_ED10_NAF10), GPIO_FN(D11_ED11_NAF11), GPIO_FN(D12_ED12_NAF12), GPIO_FN(D13_ED13_NAF13), GPIO_FN(D14_ED14_NAF14), GPIO_FN(D15_ED15_NAF15), GPIO_FN(CS4), GPIO_FN(CS5A), GPIO_FN(FMSICK), /* 55-3 (FN) */ GPIO_FN(CS5B), GPIO_FN(FCE1), GPIO_FN(CS6B), GPIO_FN(XCS2), GPIO_FN(CS6A), GPIO_FN(DACK0), GPIO_FN(FCE0), GPIO_FN(WAIT), GPIO_FN(DREQ0), GPIO_FN(RD_XRD), GPIO_FN(WE0_XWR0_FWE), GPIO_FN(WE1_XWR1), GPIO_FN(FRB), GPIO_FN(CKO), GPIO_FN(NBRSTOUT), GPIO_FN(NBRST), GPIO_FN(GPS_EPPSIN), GPIO_FN(LATCHPULSE), GPIO_FN(LTESIGNAL), GPIO_FN(LEGACYSTATE), GPIO_FN(TCKON), GPIO_FN(VIO_VD), GPIO_FN(PORT128_KEYOUT0), GPIO_FN(IPORT0), GPIO_FN(VIO_HD), GPIO_FN(PORT129_KEYOUT1), GPIO_FN(IPORT1), GPIO_FN(VIO_D0), GPIO_FN(PORT130_KEYOUT2), GPIO_FN(PORT130_MSIOF2_RXD), GPIO_FN(VIO_D1), GPIO_FN(PORT131_KEYOUT3), GPIO_FN(PORT131_MSIOF2_SS1), GPIO_FN(VIO_D2), GPIO_FN(PORT132_KEYOUT4), GPIO_FN(PORT132_MSIOF2_SS2), GPIO_FN(VIO_D3), GPIO_FN(PORT133_KEYOUT5), GPIO_FN(PORT133_MSIOF2_TSYNC), GPIO_FN(VIO_D4), GPIO_FN(PORT134_KEYIN0), GPIO_FN(PORT134_MSIOF2_TXD), GPIO_FN(VIO_D5), GPIO_FN(PORT135_KEYIN1), GPIO_FN(PORT135_MSIOF2_TSCK), GPIO_FN(VIO_D6), GPIO_FN(PORT136_KEYIN2), GPIO_FN(VIO_D7), GPIO_FN(PORT137_KEYIN3), GPIO_FN(VIO_D8), GPIO_FN(M9_SLCD_A01), GPIO_FN(PORT138_FSIAOMC), GPIO_FN(VIO_D9), GPIO_FN(M10_SLCD_CK1), GPIO_FN(PORT139_FSIAOLR), GPIO_FN(VIO_D10), GPIO_FN(M11_SLCD_SO1), GPIO_FN(TPU0TO2), GPIO_FN(PORT140_FSIAOBT), GPIO_FN(VIO_D11), GPIO_FN(M12_SLCD_CE1), GPIO_FN(TPU0TO3), GPIO_FN(PORT141_FSIAOSLD), GPIO_FN(VIO_D12), GPIO_FN(M13_BSW), GPIO_FN(PORT142_FSIACK), GPIO_FN(VIO_D13), GPIO_FN(M14_GSW), GPIO_FN(PORT143_FSIAILR), GPIO_FN(VIO_D14), GPIO_FN(M15_RSW), GPIO_FN(PORT144_FSIAIBT), GPIO_FN(VIO_D15), GPIO_FN(TPU1TO3), GPIO_FN(PORT145_FSIAISLD), GPIO_FN(VIO_CLK), GPIO_FN(PORT146_KEYIN4), GPIO_FN(IPORT2), GPIO_FN(VIO_FIELD), GPIO_FN(PORT147_KEYIN5), GPIO_FN(VIO_CKO), GPIO_FN(PORT148_KEYIN6), GPIO_FN(A27), GPIO_FN(RDWR_XWE), GPIO_FN(MFG0_IN1), GPIO_FN(MFG0_IN2), GPIO_FN(TS_SPSYNC3), GPIO_FN(MSIOF2_RSCK), GPIO_FN(TS_SDAT3), GPIO_FN(MSIOF2_RSYNC), GPIO_FN(TPU1TO2), GPIO_FN(TS_SDEN3), GPIO_FN(PORT153_MSIOF2_SS1), GPIO_FN(SOUT3), GPIO_FN(SCIFA2_TXD1), GPIO_FN(MSIOF2_MCK0), GPIO_FN(SIN3), GPIO_FN(SCIFA2_RXD1), GPIO_FN(MSIOF2_MCK1), GPIO_FN(XRTS3), GPIO_FN(SCIFA2_RTS1), GPIO_FN(PORT156_MSIOF2_SS2), GPIO_FN(XCTS3), GPIO_FN(SCIFA2_CTS1), GPIO_FN(PORT157_MSIOF2_RXD), /* 55-4 (FN) */ GPIO_FN(DINT), GPIO_FN(SCIFA2_SCK1), GPIO_FN(TS_SCK3), GPIO_FN(PORT159_SCIFB_SCK), GPIO_FN(PORT159_SCIFA5_SCK), GPIO_FN(NMI), GPIO_FN(PORT160_SCIFB_TXD), GPIO_FN(PORT160_SCIFA5_TXD), GPIO_FN(SOUT0), GPIO_FN(PORT161_SCIFB_CTS), GPIO_FN(PORT161_SCIFA5_CTS), GPIO_FN(XCTS0), GPIO_FN(MFG3_IN2), GPIO_FN(PORT162_SCIFB_RXD), GPIO_FN(PORT162_SCIFA5_RXD), GPIO_FN(SIN0), GPIO_FN(MFG3_IN1), GPIO_FN(PORT163_SCIFB_RTS), GPIO_FN(PORT163_SCIFA5_RTS), GPIO_FN(XRTS0), GPIO_FN(MFG3_OUT1), GPIO_FN(TPU3TO0), GPIO_FN(LCDD0), GPIO_FN(PORT192_KEYOUT0), GPIO_FN(EXT_CKI), GPIO_FN(LCDD1), GPIO_FN(PORT193_KEYOUT1), GPIO_FN(PORT193_SCIFA5_CTS), GPIO_FN(BBIF2_TSYNC1), GPIO_FN(LCDD2), GPIO_FN(PORT194_KEYOUT2), GPIO_FN(PORT194_SCIFA5_RTS), GPIO_FN(BBIF2_TSCK1), GPIO_FN(LCDD3), GPIO_FN(PORT195_KEYOUT3), GPIO_FN(PORT195_SCIFA5_RXD), GPIO_FN(BBIF2_TXD1), GPIO_FN(LCDD4), GPIO_FN(PORT196_KEYOUT4), GPIO_FN(PORT196_SCIFA5_TXD), GPIO_FN(LCDD5), GPIO_FN(PORT197_KEYOUT5), GPIO_FN(PORT197_SCIFA5_SCK), GPIO_FN(MFG2_OUT2), GPIO_FN(LCDD6), GPIO_FN(LCDD7), GPIO_FN(TPU4TO1), GPIO_FN(MFG4_OUT2), GPIO_FN(LCDD8), GPIO_FN(PORT200_KEYIN0), GPIO_FN(VIO_DR0), GPIO_FN(D16), GPIO_FN(LCDD9), GPIO_FN(PORT201_KEYIN1), GPIO_FN(VIO_DR1), GPIO_FN(D17), GPIO_FN(LCDD10), GPIO_FN(PORT202_KEYIN2), GPIO_FN(VIO_DR2), GPIO_FN(D18), GPIO_FN(LCDD11), GPIO_FN(PORT203_KEYIN3), GPIO_FN(VIO_DR3), GPIO_FN(D19), GPIO_FN(LCDD12), GPIO_FN(PORT204_KEYIN4), GPIO_FN(VIO_DR4), GPIO_FN(D20), GPIO_FN(LCDD13), GPIO_FN(PORT205_KEYIN5), GPIO_FN(VIO_DR5), GPIO_FN(D21), GPIO_FN(LCDD14), GPIO_FN(PORT206_KEYIN6), GPIO_FN(VIO_DR6), GPIO_FN(D22), GPIO_FN(LCDD15), GPIO_FN(PORT207_MSIOF0L_SS1), GPIO_FN(PORT207_KEYOUT0), GPIO_FN(VIO_DR7), GPIO_FN(D23), GPIO_FN(LCDD16), GPIO_FN(PORT208_MSIOF0L_SS2), GPIO_FN(PORT208_KEYOUT1), GPIO_FN(VIO_VDR), GPIO_FN(D24), GPIO_FN(LCDD17), GPIO_FN(PORT209_KEYOUT2), GPIO_FN(VIO_HDR), GPIO_FN(D25), GPIO_FN(LCDD18), GPIO_FN(DREQ2), GPIO_FN(PORT210_MSIOF0L_SS1), GPIO_FN(D26), GPIO_FN(LCDD19), GPIO_FN(PORT211_MSIOF0L_SS2), GPIO_FN(D27), GPIO_FN(LCDD20), GPIO_FN(TS_SPSYNC1), GPIO_FN(MSIOF0L_MCK0), GPIO_FN(D28), GPIO_FN(LCDD21), GPIO_FN(TS_SDAT1), GPIO_FN(MSIOF0L_MCK1), GPIO_FN(D29), GPIO_FN(LCDD22), GPIO_FN(TS_SDEN1), GPIO_FN(MSIOF0L_RSCK), GPIO_FN(D30), GPIO_FN(LCDD23), GPIO_FN(TS_SCK1), GPIO_FN(MSIOF0L_RSYNC), GPIO_FN(D31), GPIO_FN(LCDDCK), GPIO_FN(LCDWR), GPIO_FN(PORT216_KEYOUT3), GPIO_FN(VIO_CLKR), GPIO_FN(LCDRD), GPIO_FN(DACK2), GPIO_FN(MSIOF0L_TSYNC), GPIO_FN(LCDHSYN), GPIO_FN(LCDCS), GPIO_FN(LCDCS2), GPIO_FN(DACK3), GPIO_FN(PORT218_VIO_CKOR), GPIO_FN(PORT218_KEYOUT4), GPIO_FN(LCDDISP), GPIO_FN(LCDRS), GPIO_FN(DREQ3), GPIO_FN(MSIOF0L_TSCK), GPIO_FN(LCDVSYN), GPIO_FN(LCDVSYN2), GPIO_FN(PORT220_KEYOUT5), GPIO_FN(LCDLCLK), GPIO_FN(DREQ1), GPIO_FN(PWEN), GPIO_FN(MSIOF0L_RXD), GPIO_FN(LCDDON), GPIO_FN(LCDDON2), GPIO_FN(DACK1), GPIO_FN(OVCN), GPIO_FN(MSIOF0L_TXD), GPIO_FN(SCIFA1_TXD), GPIO_FN(OVCN2), GPIO_FN(EXTLP), GPIO_FN(SCIFA1_SCK), GPIO_FN(USBTERM), GPIO_FN(PORT226_VIO_CKO2), GPIO_FN(SCIFA1_RTS), GPIO_FN(IDIN), GPIO_FN(SCIFA1_RXD), GPIO_FN(SCIFA1_CTS), GPIO_FN(MFG1_IN1), GPIO_FN(MSIOF1_TXD), GPIO_FN(SCIFA2_TXD2), GPIO_FN(PORT230_FSIAOMC), GPIO_FN(MSIOF1_TSYNC), GPIO_FN(SCIFA2_CTS2), GPIO_FN(PORT231_FSIAOLR), GPIO_FN(MSIOF1_TSCK), GPIO_FN(SCIFA2_SCK2), GPIO_FN(PORT232_FSIAOBT), GPIO_FN(MSIOF1_RXD), GPIO_FN(SCIFA2_RXD2), GPIO_FN(GPS_VCOTRIG), GPIO_FN(PORT233_FSIACK), GPIO_FN(MSIOF1_RSCK), GPIO_FN(SCIFA2_RTS2), GPIO_FN(PORT234_FSIAOSLD), GPIO_FN(MSIOF1_RSYNC), GPIO_FN(OPORT0), GPIO_FN(MFG1_IN2), GPIO_FN(PORT235_FSIAILR), GPIO_FN(MSIOF1_MCK0), GPIO_FN(I2C_SDA2), GPIO_FN(PORT236_FSIAIBT), GPIO_FN(MSIOF1_MCK1), GPIO_FN(I2C_SCL2), GPIO_FN(PORT237_FSIAISLD), GPIO_FN(MSIOF1_SS1), GPIO_FN(EDBGREQ3), /* 55-5 (FN) */ GPIO_FN(MSIOF1_SS2), GPIO_FN(SCIFA6_TXD), GPIO_FN(PORT241_IRDA_OUT), GPIO_FN(PORT241_IROUT), GPIO_FN(MFG4_OUT1), GPIO_FN(TPU4TO0), GPIO_FN(PORT242_IRDA_IN), GPIO_FN(MFG4_IN2), GPIO_FN(PORT243_IRDA_FIRSEL), GPIO_FN(PORT243_VIO_CKO2), GPIO_FN(PORT244_SCIFA5_CTS), GPIO_FN(MFG2_IN1), GPIO_FN(PORT244_SCIFB_CTS), GPIO_FN(PORT245_SCIFA5_RTS), GPIO_FN(MFG2_IN2), GPIO_FN(PORT245_SCIFB_RTS), GPIO_FN(PORT246_SCIFA5_RXD), GPIO_FN(MFG1_OUT1), GPIO_FN(PORT246_SCIFB_RXD), GPIO_FN(TPU1TO0), GPIO_FN(PORT247_SCIFA5_TXD), GPIO_FN(MFG3_OUT2), GPIO_FN(PORT247_SCIFB_TXD), GPIO_FN(TPU3TO1), GPIO_FN(PORT248_SCIFA5_SCK), GPIO_FN(MFG2_OUT1), GPIO_FN(PORT248_SCIFB_SCK), GPIO_FN(TPU2TO0), GPIO_FN(PORT249_IROUT), GPIO_FN(MFG4_IN1), GPIO_FN(SDHICLK0), GPIO_FN(TCK2_SWCLK_MC0), GPIO_FN(SDHICD0), GPIO_FN(SDHID0_0), GPIO_FN(TMS2_SWDIO_MC0), GPIO_FN(SDHID0_1), GPIO_FN(TDO2_SWO0_MC0), GPIO_FN(SDHID0_2), GPIO_FN(TDI2), GPIO_FN(SDHID0_3), GPIO_FN(RTCK2_SWO1_MC0), GPIO_FN(SDHICMD0), GPIO_FN(TRST2), GPIO_FN(SDHIWP0), GPIO_FN(EDBGREQ2), GPIO_FN(SDHICLK1), GPIO_FN(TCK3_SWCLK_MC1), GPIO_FN(SDHID1_0), GPIO_FN(M11_SLCD_SO2), GPIO_FN(TS_SPSYNC2), GPIO_FN(TMS3_SWDIO_MC1), GPIO_FN(SDHID1_1), GPIO_FN(M9_SLCD_A02), GPIO_FN(TS_SDAT2), GPIO_FN(TDO3_SWO0_MC1), GPIO_FN(SDHID1_2), GPIO_FN(M10_SLCD_CK2), GPIO_FN(TS_SDEN2), GPIO_FN(TDI3), GPIO_FN(SDHID1_3), GPIO_FN(M12_SLCD_CE2), GPIO_FN(TS_SCK2), GPIO_FN(RTCK3_SWO1_MC1), GPIO_FN(SDHICMD1), GPIO_FN(TRST3), GPIO_FN(RESETOUTS), }; static struct pinmux_cfg_reg pinmux_config_regs[] = { PORTCR(0, 0xe6050000), /* PORT0CR */ PORTCR(1, 0xe6050001), /* PORT1CR */ PORTCR(2, 0xe6050002), /* PORT2CR */ PORTCR(3, 0xe6050003), /* PORT3CR */ PORTCR(4, 0xe6050004), /* PORT4CR */ PORTCR(5, 0xe6050005), /* PORT5CR */ PORTCR(6, 0xe6050006), /* PORT6CR */ PORTCR(7, 0xe6050007), /* PORT7CR */ PORTCR(8, 0xe6050008), /* PORT8CR */ PORTCR(9, 0xe6050009), /* PORT9CR */ PORTCR(10, 0xe605000a), /* PORT10CR */ PORTCR(11, 0xe605000b), /* PORT11CR */ PORTCR(12, 0xe605000c), /* PORT12CR */ PORTCR(13, 0xe605000d), /* PORT13CR */ PORTCR(14, 0xe605000e), /* PORT14CR */ PORTCR(15, 0xe605000f), /* PORT15CR */ PORTCR(16, 0xe6050010), /* PORT16CR */ PORTCR(17, 0xe6050011), /* PORT17CR */ PORTCR(18, 0xe6050012), /* PORT18CR */ PORTCR(19, 0xe6050013), /* PORT19CR */ PORTCR(20, 0xe6050014), /* PORT20CR */ PORTCR(21, 0xe6050015), /* PORT21CR */ PORTCR(22, 0xe6050016), /* PORT22CR */ PORTCR(23, 0xe6050017), /* PORT23CR */ PORTCR(24, 0xe6050018), /* PORT24CR */ PORTCR(25, 0xe6050019), /* PORT25CR */ PORTCR(26, 0xe605001a), /* PORT26CR */ PORTCR(27, 0xe605001b), /* PORT27CR */ PORTCR(28, 0xe605001c), /* PORT28CR */ PORTCR(29, 0xe605001d), /* PORT29CR */ PORTCR(30, 0xe605001e), /* PORT30CR */ PORTCR(31, 0xe605001f), /* PORT31CR */ PORTCR(32, 0xe6050020), /* PORT32CR */ PORTCR(33, 0xe6050021), /* PORT33CR */ PORTCR(34, 0xe6050022), /* PORT34CR */ PORTCR(35, 0xe6050023), /* PORT35CR */ PORTCR(36, 0xe6050024), /* PORT36CR */ PORTCR(37, 0xe6050025), /* PORT37CR */ PORTCR(38, 0xe6050026), /* PORT38CR */ PORTCR(39, 0xe6050027), /* PORT39CR */ PORTCR(40, 0xe6050028), /* PORT40CR */ PORTCR(41, 0xe6050029), /* PORT41CR */ PORTCR(42, 0xe605002a), /* PORT42CR */ PORTCR(43, 0xe605002b), /* PORT43CR */ PORTCR(44, 0xe605002c), /* PORT44CR */ PORTCR(45, 0xe605002d), /* PORT45CR */ PORTCR(46, 0xe605002e), /* PORT46CR */ PORTCR(47, 0xe605002f), /* PORT47CR */ PORTCR(48, 0xe6050030), /* PORT48CR */ PORTCR(49, 0xe6050031), /* PORT49CR */ PORTCR(50, 0xe6050032), /* PORT50CR */ PORTCR(51, 0xe6050033), /* PORT51CR */ PORTCR(52, 0xe6050034), /* PORT52CR */ PORTCR(53, 0xe6050035), /* PORT53CR */ PORTCR(54, 0xe6050036), /* PORT54CR */ PORTCR(55, 0xe6050037), /* PORT55CR */ PORTCR(56, 0xe6050038), /* PORT56CR */ PORTCR(57, 0xe6050039), /* PORT57CR */ PORTCR(58, 0xe605003a), /* PORT58CR */ PORTCR(59, 0xe605003b), /* PORT59CR */ PORTCR(60, 0xe605003c), /* PORT60CR */ PORTCR(61, 0xe605003d), /* PORT61CR */ PORTCR(62, 0xe605003e), /* PORT62CR */ PORTCR(63, 0xe605003f), /* PORT63CR */ PORTCR(64, 0xe6050040), /* PORT64CR */ PORTCR(65, 0xe6050041), /* PORT65CR */ PORTCR(66, 0xe6050042), /* PORT66CR */ PORTCR(67, 0xe6050043), /* PORT67CR */ PORTCR(68, 0xe6050044), /* PORT68CR */ PORTCR(69, 0xe6050045), /* PORT69CR */ PORTCR(70, 0xe6050046), /* PORT70CR */ PORTCR(71, 0xe6050047), /* PORT71CR */ PORTCR(72, 0xe6050048), /* PORT72CR */ PORTCR(73, 0xe6050049), /* PORT73CR */ PORTCR(74, 0xe605004a), /* PORT74CR */ PORTCR(75, 0xe605004b), /* PORT75CR */ PORTCR(76, 0xe605004c), /* PORT76CR */ PORTCR(77, 0xe605004d), /* PORT77CR */ PORTCR(78, 0xe605004e), /* PORT78CR */ PORTCR(79, 0xe605004f), /* PORT79CR */ PORTCR(80, 0xe6050050), /* PORT80CR */ PORTCR(81, 0xe6050051), /* PORT81CR */ PORTCR(82, 0xe6050052), /* PORT82CR */ PORTCR(83, 0xe6050053), /* PORT83CR */ PORTCR(84, 0xe6050054), /* PORT84CR */ PORTCR(85, 0xe6050055), /* PORT85CR */ PORTCR(86, 0xe6050056), /* PORT86CR */ PORTCR(87, 0xe6050057), /* PORT87CR */ PORTCR(88, 0xe6050058), /* PORT88CR */ PORTCR(89, 0xe6050059), /* PORT89CR */ PORTCR(90, 0xe605005a), /* PORT90CR */ PORTCR(91, 0xe605005b), /* PORT91CR */ PORTCR(92, 0xe605005c), /* PORT92CR */ PORTCR(93, 0xe605005d), /* PORT93CR */ PORTCR(94, 0xe605005e), /* PORT94CR */ PORTCR(95, 0xe605005f), /* PORT95CR */ PORTCR(96, 0xe6050060), /* PORT96CR */ PORTCR(97, 0xe6050061), /* PORT97CR */ PORTCR(98, 0xe6050062), /* PORT98CR */ PORTCR(99, 0xe6050063), /* PORT99CR */ PORTCR(100, 0xe6050064), /* PORT100CR */ PORTCR(101, 0xe6050065), /* PORT101CR */ PORTCR(102, 0xe6050066), /* PORT102CR */ PORTCR(103, 0xe6050067), /* PORT103CR */ PORTCR(104, 0xe6050068), /* PORT104CR */ PORTCR(105, 0xe6050069), /* PORT105CR */ PORTCR(106, 0xe605006a), /* PORT106CR */ PORTCR(107, 0xe605006b), /* PORT107CR */ PORTCR(108, 0xe605006c), /* PORT108CR */ PORTCR(109, 0xe605006d), /* PORT109CR */ PORTCR(110, 0xe605006e), /* PORT110CR */ PORTCR(111, 0xe605006f), /* PORT111CR */ PORTCR(112, 0xe6050070), /* PORT112CR */ PORTCR(113, 0xe6050071), /* PORT113CR */ PORTCR(114, 0xe6050072), /* PORT114CR */ PORTCR(115, 0xe6050073), /* PORT115CR */ PORTCR(116, 0xe6050074), /* PORT116CR */ PORTCR(117, 0xe6050075), /* PORT117CR */ PORTCR(118, 0xe6050076), /* PORT118CR */ PORTCR(128, 0xe6051080), /* PORT128CR */ PORTCR(129, 0xe6051081), /* PORT129CR */ PORTCR(130, 0xe6051082), /* PORT130CR */ PORTCR(131, 0xe6051083), /* PORT131CR */ PORTCR(132, 0xe6051084), /* PORT132CR */ PORTCR(133, 0xe6051085), /* PORT133CR */ PORTCR(134, 0xe6051086), /* PORT134CR */ PORTCR(135, 0xe6051087), /* PORT135CR */ PORTCR(136, 0xe6051088), /* PORT136CR */ PORTCR(137, 0xe6051089), /* PORT137CR */ PORTCR(138, 0xe605108a), /* PORT138CR */ PORTCR(139, 0xe605108b), /* PORT139CR */ PORTCR(140, 0xe605108c), /* PORT140CR */ PORTCR(141, 0xe605108d), /* PORT141CR */ PORTCR(142, 0xe605108e), /* PORT142CR */ PORTCR(143, 0xe605108f), /* PORT143CR */ PORTCR(144, 0xe6051090), /* PORT144CR */ PORTCR(145, 0xe6051091), /* PORT145CR */ PORTCR(146, 0xe6051092), /* PORT146CR */ PORTCR(147, 0xe6051093), /* PORT147CR */ PORTCR(148, 0xe6051094), /* PORT148CR */ PORTCR(149, 0xe6051095), /* PORT149CR */ PORTCR(150, 0xe6051096), /* PORT150CR */ PORTCR(151, 0xe6051097), /* PORT151CR */ PORTCR(152, 0xe6051098), /* PORT152CR */ PORTCR(153, 0xe6051099), /* PORT153CR */ PORTCR(154, 0xe605109a), /* PORT154CR */ PORTCR(155, 0xe605109b), /* PORT155CR */ PORTCR(156, 0xe605109c), /* PORT156CR */ PORTCR(157, 0xe605109d), /* PORT157CR */ PORTCR(158, 0xe605109e), /* PORT158CR */ PORTCR(159, 0xe605109f), /* PORT159CR */ PORTCR(160, 0xe60510a0), /* PORT160CR */ PORTCR(161, 0xe60510a1), /* PORT161CR */ PORTCR(162, 0xe60510a2), /* PORT162CR */ PORTCR(163, 0xe60510a3), /* PORT163CR */ PORTCR(164, 0xe60510a4), /* PORT164CR */ PORTCR(192, 0xe60520c0), /* PORT192CR */ PORTCR(193, 0xe60520c1), /* PORT193CR */ PORTCR(194, 0xe60520c2), /* PORT194CR */ PORTCR(195, 0xe60520c3), /* PORT195CR */ PORTCR(196, 0xe60520c4), /* PORT196CR */ PORTCR(197, 0xe60520c5), /* PORT197CR */ PORTCR(198, 0xe60520c6), /* PORT198CR */ PORTCR(199, 0xe60520c7), /* PORT199CR */ PORTCR(200, 0xe60520c8), /* PORT200CR */ PORTCR(201, 0xe60520c9), /* PORT201CR */ PORTCR(202, 0xe60520ca), /* PORT202CR */ PORTCR(203, 0xe60520cb), /* PORT203CR */ PORTCR(204, 0xe60520cc), /* PORT204CR */ PORTCR(205, 0xe60520cd), /* PORT205CR */ PORTCR(206, 0xe60520ce), /* PORT206CR */ PORTCR(207, 0xe60520cf), /* PORT207CR */ PORTCR(208, 0xe60520d0), /* PORT208CR */ PORTCR(209, 0xe60520d1), /* PORT209CR */ PORTCR(210, 0xe60520d2), /* PORT210CR */ PORTCR(211, 0xe60520d3), /* PORT211CR */ PORTCR(212, 0xe60520d4), /* PORT212CR */ PORTCR(213, 0xe60520d5), /* PORT213CR */ PORTCR(214, 0xe60520d6), /* PORT214CR */ PORTCR(215, 0xe60520d7), /* PORT215CR */ PORTCR(216, 0xe60520d8), /* PORT216CR */ PORTCR(217, 0xe60520d9), /* PORT217CR */ PORTCR(218, 0xe60520da), /* PORT218CR */ PORTCR(219, 0xe60520db), /* PORT219CR */ PORTCR(220, 0xe60520dc), /* PORT220CR */ PORTCR(221, 0xe60520dd), /* PORT221CR */ PORTCR(222, 0xe60520de), /* PORT222CR */ PORTCR(223, 0xe60520df), /* PORT223CR */ PORTCR(224, 0xe60520e0), /* PORT224CR */ PORTCR(225, 0xe60520e1), /* PORT225CR */ PORTCR(226, 0xe60520e2), /* PORT226CR */ PORTCR(227, 0xe60520e3), /* PORT227CR */ PORTCR(228, 0xe60520e4), /* PORT228CR */ PORTCR(229, 0xe60520e5), /* PORT229CR */ PORTCR(230, 0xe60520e6), /* PORT230CR */ PORTCR(231, 0xe60520e7), /* PORT231CR */ PORTCR(232, 0xe60520e8), /* PORT232CR */ PORTCR(233, 0xe60520e9), /* PORT233CR */ PORTCR(234, 0xe60520ea), /* PORT234CR */ PORTCR(235, 0xe60520eb), /* PORT235CR */ PORTCR(236, 0xe60520ec), /* PORT236CR */ PORTCR(237, 0xe60520ed), /* PORT237CR */ PORTCR(238, 0xe60520ee), /* PORT238CR */ PORTCR(239, 0xe60520ef), /* PORT239CR */ PORTCR(240, 0xe60520f0), /* PORT240CR */ PORTCR(241, 0xe60520f1), /* PORT241CR */ PORTCR(242, 0xe60520f2), /* PORT242CR */ PORTCR(243, 0xe60520f3), /* PORT243CR */ PORTCR(244, 0xe60520f4), /* PORT244CR */ PORTCR(245, 0xe60520f5), /* PORT245CR */ PORTCR(246, 0xe60520f6), /* PORT246CR */ PORTCR(247, 0xe60520f7), /* PORT247CR */ PORTCR(248, 0xe60520f8), /* PORT248CR */ PORTCR(249, 0xe60520f9), /* PORT249CR */ PORTCR(250, 0xe60520fa), /* PORT250CR */ PORTCR(251, 0xe60520fb), /* PORT251CR */ PORTCR(252, 0xe60520fc), /* PORT252CR */ PORTCR(253, 0xe60520fd), /* PORT253CR */ PORTCR(254, 0xe60520fe), /* PORT254CR */ PORTCR(255, 0xe60520ff), /* PORT255CR */ PORTCR(256, 0xe6052100), /* PORT256CR */ PORTCR(257, 0xe6052101), /* PORT257CR */ PORTCR(258, 0xe6052102), /* PORT258CR */ PORTCR(259, 0xe6052103), /* PORT259CR */ PORTCR(260, 0xe6052104), /* PORT260CR */ PORTCR(261, 0xe6052105), /* PORT261CR */ PORTCR(262, 0xe6052106), /* PORT262CR */ PORTCR(263, 0xe6052107), /* PORT263CR */ PORTCR(264, 0xe6052108), /* PORT264CR */ { PINMUX_CFG_REG("MSELBCR", 0xe6058024, 32, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MSELBCR_MSEL17_0, MSELBCR_MSEL17_1, MSELBCR_MSEL16_0, MSELBCR_MSEL16_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { }, }; static struct pinmux_data_reg pinmux_data_regs[] = { { PINMUX_DATA_REG("PORTL031_000DR", 0xe6054000, 32) { PORT31_DATA, PORT30_DATA, PORT29_DATA, PORT28_DATA, PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA, PORT23_DATA, PORT22_DATA, PORT21_DATA, PORT20_DATA, PORT19_DATA, PORT18_DATA, PORT17_DATA, PORT16_DATA, PORT15_DATA, PORT14_DATA, PORT13_DATA, PORT12_DATA, PORT11_DATA, PORT10_DATA, PORT9_DATA, PORT8_DATA, PORT7_DATA, PORT6_DATA, PORT5_DATA, PORT4_DATA, PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA } }, { PINMUX_DATA_REG("PORTL063_032DR", 0xe6054004, 32) { PORT63_DATA, PORT62_DATA, PORT61_DATA, PORT60_DATA, PORT59_DATA, PORT58_DATA, PORT57_DATA, PORT56_DATA, PORT55_DATA, PORT54_DATA, PORT53_DATA, PORT52_DATA, PORT51_DATA, PORT50_DATA, PORT49_DATA, PORT48_DATA, PORT47_DATA, PORT46_DATA, PORT45_DATA, PORT44_DATA, PORT43_DATA, PORT42_DATA, PORT41_DATA, PORT40_DATA, PORT39_DATA, PORT38_DATA, PORT37_DATA, PORT36_DATA, PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA } }, { PINMUX_DATA_REG("PORTL095_064DR", 0xe6054008, 32) { PORT95_DATA, PORT94_DATA, PORT93_DATA, PORT92_DATA, PORT91_DATA, PORT90_DATA, PORT89_DATA, PORT88_DATA, PORT87_DATA, PORT86_DATA, PORT85_DATA, PORT84_DATA, PORT83_DATA, PORT82_DATA, PORT81_DATA, PORT80_DATA, PORT79_DATA, PORT78_DATA, PORT77_DATA, PORT76_DATA, PORT75_DATA, PORT74_DATA, PORT73_DATA, PORT72_DATA, PORT71_DATA, PORT70_DATA, PORT69_DATA, PORT68_DATA, PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA } }, { PINMUX_DATA_REG("PORTD127_096DR", 0xe605400C, 32) { 0, 0, 0, 0, 0, 0, 0, 0, 0, PORT118_DATA, PORT117_DATA, PORT116_DATA, PORT115_DATA, PORT114_DATA, PORT113_DATA, PORT112_DATA, PORT111_DATA, PORT110_DATA, PORT109_DATA, PORT108_DATA, PORT107_DATA, PORT106_DATA, PORT105_DATA, PORT104_DATA, PORT103_DATA, PORT102_DATA, PORT101_DATA, PORT100_DATA, PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA } }, { PINMUX_DATA_REG("PORTD159_128DR", 0xe6055000, 32) { PORT159_DATA, PORT158_DATA, PORT157_DATA, PORT156_DATA, PORT155_DATA, PORT154_DATA, PORT153_DATA, PORT152_DATA, PORT151_DATA, PORT150_DATA, PORT149_DATA, PORT148_DATA, PORT147_DATA, PORT146_DATA, PORT145_DATA, PORT144_DATA, PORT143_DATA, PORT142_DATA, PORT141_DATA, PORT140_DATA, PORT139_DATA, PORT138_DATA, PORT137_DATA, PORT136_DATA, PORT135_DATA, PORT134_DATA, PORT133_DATA, PORT132_DATA, PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA } }, { PINMUX_DATA_REG("PORTR191_160DR", 0xe6055004, 32) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PORT164_DATA, PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA } }, { PINMUX_DATA_REG("PORTR223_192DR", 0xe6056000, 32) { PORT223_DATA, PORT222_DATA, PORT221_DATA, PORT220_DATA, PORT219_DATA, PORT218_DATA, PORT217_DATA, PORT216_DATA, PORT215_DATA, PORT214_DATA, PORT213_DATA, PORT212_DATA, PORT211_DATA, PORT210_DATA, PORT209_DATA, PORT208_DATA, PORT207_DATA, PORT206_DATA, PORT205_DATA, PORT204_DATA, PORT203_DATA, PORT202_DATA, PORT201_DATA, PORT200_DATA, PORT199_DATA, PORT198_DATA, PORT197_DATA, PORT196_DATA, PORT195_DATA, PORT194_DATA, PORT193_DATA, PORT192_DATA } }, { PINMUX_DATA_REG("PORTU255_224DR", 0xe6056004, 32) { PORT255_DATA, PORT254_DATA, PORT253_DATA, PORT252_DATA, PORT251_DATA, PORT250_DATA, PORT249_DATA, PORT248_DATA, PORT247_DATA, PORT246_DATA, PORT245_DATA, PORT244_DATA, PORT243_DATA, PORT242_DATA, PORT241_DATA, PORT240_DATA, PORT239_DATA, PORT238_DATA, PORT237_DATA, PORT236_DATA, PORT235_DATA, PORT234_DATA, PORT233_DATA, PORT232_DATA, PORT231_DATA, PORT230_DATA, PORT229_DATA, PORT228_DATA, PORT227_DATA, PORT226_DATA, PORT225_DATA, PORT224_DATA } }, { PINMUX_DATA_REG("PORTU287_256DR", 0xe6056008, 32) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PORT264_DATA, PORT263_DATA, PORT262_DATA, PORT261_DATA, PORT260_DATA, PORT259_DATA, PORT258_DATA, PORT257_DATA, PORT256_DATA } }, { }, }; static struct pinmux_info sh7377_pinmux_info = { .name = "sh7377_pfc", .reserved_id = PINMUX_RESERVED, .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END }, .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END }, .input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END }, .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, .first_gpio = GPIO_PORT0, .last_gpio = GPIO_FN_RESETOUTS, .gpios = pinmux_gpios, .cfg_regs = pinmux_config_regs, .data_regs = pinmux_data_regs, .gpio_data = pinmux_data, .gpio_data_size = ARRAY_SIZE(pinmux_data), }; void sh7377_pinmux_init(void) { register_pinmux(&sh7377_pinmux_info); }
gpl-2.0
barome/HD_mako
arch/unicore32/kernel/clock.c
10223
10455
/* * linux/arch/unicore32/kernel/clock.c * * Code specific to PKUnity SoC and UniCore ISA * * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn> * Copyright (C) 2001-2010 Guan Xuetao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/string.h> #include <linux/clk.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/io.h> #include <mach/hardware.h> /* * Very simple clock implementation */ struct clk { struct list_head node; unsigned long rate; const char *name; }; static struct clk clk_ost_clk = { .name = "OST_CLK", .rate = CLOCK_TICK_RATE, }; static struct clk clk_mclk_clk = { .name = "MAIN_CLK", }; static struct clk clk_bclk32_clk = { .name = "BUS32_CLK", }; static struct clk clk_ddr_clk = { .name = "DDR_CLK", }; static struct clk clk_vga_clk = { .name = "VGA_CLK", }; static LIST_HEAD(clocks); static DEFINE_MUTEX(clocks_mutex); struct clk *clk_get(struct device *dev, const char *id) { struct clk *p, *clk = ERR_PTR(-ENOENT); mutex_lock(&clocks_mutex); list_for_each_entry(p, &clocks, node) { if (strcmp(id, p->name) == 0) { clk = p; break; } } mutex_unlock(&clocks_mutex); return clk; } EXPORT_SYMBOL(clk_get); void clk_put(struct clk *clk) { } EXPORT_SYMBOL(clk_put); int clk_enable(struct clk *clk) { return 0; } EXPORT_SYMBOL(clk_enable); void clk_disable(struct clk *clk) { } EXPORT_SYMBOL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { return clk->rate; } EXPORT_SYMBOL(clk_get_rate); struct { unsigned long rate; unsigned long cfg; unsigned long div; } vga_clk_table[] = { {.rate = 25175000, .cfg = 0x00002001, .div = 0x9}, {.rate = 31500000, .cfg = 0x00002001, .div = 0x7}, {.rate = 40000000, .cfg = 0x00003801, .div = 0x9}, {.rate = 49500000, .cfg = 0x00003801, .div = 0x7}, {.rate = 65000000, .cfg = 0x00002c01, .div = 0x4}, {.rate = 78750000, .cfg = 0x00002400, .div = 0x7}, {.rate = 108000000, .cfg = 0x00002c01, .div = 0x2}, {.rate = 106500000, .cfg = 0x00003c01, .div = 0x3}, {.rate = 50650000, .cfg = 0x00106400, .div = 0x9}, {.rate = 61500000, .cfg = 0x00106400, .div = 0xa}, {.rate = 85500000, .cfg = 0x00002800, .div = 0x6}, }; struct { unsigned long mrate; unsigned long prate; } mclk_clk_table[] = { {.mrate = 500000000, .prate = 0x00109801}, {.mrate = 525000000, .prate = 0x00104C00}, {.mrate = 550000000, .prate = 0x00105000}, {.mrate = 575000000, .prate = 0x00105400}, {.mrate = 600000000, .prate = 0x00105800}, {.mrate = 625000000, .prate = 0x00105C00}, {.mrate = 650000000, .prate = 0x00106000}, {.mrate = 675000000, .prate = 0x00106400}, {.mrate = 700000000, .prate = 0x00106800}, {.mrate = 725000000, .prate = 0x00106C00}, {.mrate = 750000000, .prate = 0x00107000}, {.mrate = 775000000, .prate = 0x00107400}, {.mrate = 800000000, .prate = 0x00107800}, }; int clk_set_rate(struct clk *clk, unsigned long rate) { if (clk == &clk_vga_clk) { unsigned long pll_vgacfg, pll_vgadiv; int ret, i; /* lookup vga_clk_table */ ret = -EINVAL; for (i = 0; i < ARRAY_SIZE(vga_clk_table); i++) { if (rate == vga_clk_table[i].rate) { pll_vgacfg = vga_clk_table[i].cfg; pll_vgadiv = vga_clk_table[i].div; ret = 0; break; } } if (ret) return ret; if (readl(PM_PLLVGACFG) == pll_vgacfg) return 0; /* set pll vga cfg reg. */ writel(pll_vgacfg, PM_PLLVGACFG); writel(PM_PMCR_CFBVGA, PM_PMCR); while ((readl(PM_PLLDFCDONE) & PM_PLLDFCDONE_VGADFC) != PM_PLLDFCDONE_VGADFC) udelay(100); /* about 1ms */ /* set div cfg reg. */ writel(readl(PM_PCGR) | PM_PCGR_VGACLK, PM_PCGR); writel((readl(PM_DIVCFG) & ~PM_DIVCFG_VGACLK_MASK) | PM_DIVCFG_VGACLK(pll_vgadiv), PM_DIVCFG); writel(readl(PM_SWRESET) | PM_SWRESET_VGADIV, PM_SWRESET); while ((readl(PM_SWRESET) & PM_SWRESET_VGADIV) == PM_SWRESET_VGADIV) udelay(100); /* 65536 bclk32, about 320us */ writel(readl(PM_PCGR) & ~PM_PCGR_VGACLK, PM_PCGR); } #ifdef CONFIG_CPU_FREQ if (clk == &clk_mclk_clk) { u32 pll_rate, divstatus = PM_DIVSTATUS; int ret, i; /* lookup mclk_clk_table */ ret = -EINVAL; for (i = 0; i < ARRAY_SIZE(mclk_clk_table); i++) { if (rate == mclk_clk_table[i].mrate) { pll_rate = mclk_clk_table[i].prate; clk_mclk_clk.rate = mclk_clk_table[i].mrate; ret = 0; break; } } if (ret) return ret; if (clk_mclk_clk.rate) clk_bclk32_clk.rate = clk_mclk_clk.rate / (((divstatus & 0x0000f000) >> 12) + 1); /* set pll sys cfg reg. */ PM_PLLSYSCFG = pll_rate; PM_PMCR = PM_PMCR_CFBSYS; while ((PM_PLLDFCDONE & PM_PLLDFCDONE_SYSDFC) != PM_PLLDFCDONE_SYSDFC) udelay(100); /* about 1ms */ } #endif return 0; } EXPORT_SYMBOL(clk_set_rate); int clk_register(struct clk *clk) { mutex_lock(&clocks_mutex); list_add(&clk->node, &clocks); mutex_unlock(&clocks_mutex); printk(KERN_DEFAULT "PKUnity PM: %s %lu.%02luM\n", clk->name, (clk->rate)/1000000, (clk->rate)/10000 % 100); return 0; } EXPORT_SYMBOL(clk_register); void clk_unregister(struct clk *clk) { mutex_lock(&clocks_mutex); list_del(&clk->node); mutex_unlock(&clocks_mutex); } EXPORT_SYMBOL(clk_unregister); struct { unsigned long prate; unsigned long rate; } pllrate_table[] = { {.prate = 0x00002001, .rate = 250000000}, {.prate = 0x00104801, .rate = 250000000}, {.prate = 0x00104C01, .rate = 262500000}, {.prate = 0x00002401, .rate = 275000000}, {.prate = 0x00105001, .rate = 275000000}, {.prate = 0x00105401, .rate = 287500000}, {.prate = 0x00002801, .rate = 300000000}, {.prate = 0x00105801, .rate = 300000000}, {.prate = 0x00105C01, .rate = 312500000}, {.prate = 0x00002C01, .rate = 325000000}, {.prate = 0x00106001, .rate = 325000000}, {.prate = 0x00106401, .rate = 337500000}, {.prate = 0x00003001, .rate = 350000000}, {.prate = 0x00106801, .rate = 350000000}, {.prate = 0x00106C01, .rate = 362500000}, {.prate = 0x00003401, .rate = 375000000}, {.prate = 0x00107001, .rate = 375000000}, {.prate = 0x00107401, .rate = 387500000}, {.prate = 0x00003801, .rate = 400000000}, {.prate = 0x00107801, .rate = 400000000}, {.prate = 0x00107C01, .rate = 412500000}, {.prate = 0x00003C01, .rate = 425000000}, {.prate = 0x00108001, .rate = 425000000}, {.prate = 0x00108401, .rate = 437500000}, {.prate = 0x00004001, .rate = 450000000}, {.prate = 0x00108801, .rate = 450000000}, {.prate = 0x00108C01, .rate = 462500000}, {.prate = 0x00004401, .rate = 475000000}, {.prate = 0x00109001, .rate = 475000000}, {.prate = 0x00109401, .rate = 487500000}, {.prate = 0x00004801, .rate = 500000000}, {.prate = 0x00109801, .rate = 500000000}, {.prate = 0x00104C00, .rate = 525000000}, {.prate = 0x00002400, .rate = 550000000}, {.prate = 0x00105000, .rate = 550000000}, {.prate = 0x00105400, .rate = 575000000}, {.prate = 0x00002800, .rate = 600000000}, {.prate = 0x00105800, .rate = 600000000}, {.prate = 0x00105C00, .rate = 625000000}, {.prate = 0x00002C00, .rate = 650000000}, {.prate = 0x00106000, .rate = 650000000}, {.prate = 0x00106400, .rate = 675000000}, {.prate = 0x00003000, .rate = 700000000}, {.prate = 0x00106800, .rate = 700000000}, {.prate = 0x00106C00, .rate = 725000000}, {.prate = 0x00003400, .rate = 750000000}, {.prate = 0x00107000, .rate = 750000000}, {.prate = 0x00107400, .rate = 775000000}, {.prate = 0x00003800, .rate = 800000000}, {.prate = 0x00107800, .rate = 800000000}, {.prate = 0x00107C00, .rate = 825000000}, {.prate = 0x00003C00, .rate = 850000000}, {.prate = 0x00108000, .rate = 850000000}, {.prate = 0x00108400, .rate = 875000000}, {.prate = 0x00004000, .rate = 900000000}, {.prate = 0x00108800, .rate = 900000000}, {.prate = 0x00108C00, .rate = 925000000}, {.prate = 0x00004400, .rate = 950000000}, {.prate = 0x00109000, .rate = 950000000}, {.prate = 0x00109400, .rate = 975000000}, {.prate = 0x00004800, .rate = 1000000000}, {.prate = 0x00109800, .rate = 1000000000}, }; struct { unsigned long prate; unsigned long drate; } pddr_table[] = { {.prate = 0x00100800, .drate = 44236800}, {.prate = 0x00100C00, .drate = 66355200}, {.prate = 0x00101000, .drate = 88473600}, {.prate = 0x00101400, .drate = 110592000}, {.prate = 0x00101800, .drate = 132710400}, {.prate = 0x00101C01, .drate = 154828800}, {.prate = 0x00102001, .drate = 176947200}, {.prate = 0x00102401, .drate = 199065600}, {.prate = 0x00102801, .drate = 221184000}, {.prate = 0x00102C01, .drate = 243302400}, {.prate = 0x00103001, .drate = 265420800}, {.prate = 0x00103401, .drate = 287539200}, {.prate = 0x00103801, .drate = 309657600}, {.prate = 0x00103C01, .drate = 331776000}, {.prate = 0x00104001, .drate = 353894400}, }; static int __init clk_init(void) { #ifdef CONFIG_PUV3_PM u32 pllrate, divstatus = readl(PM_DIVSTATUS); u32 pcgr_val = readl(PM_PCGR); int i; pcgr_val |= PM_PCGR_BCLKMME | PM_PCGR_BCLKH264E | PM_PCGR_BCLKH264D | PM_PCGR_HECLK | PM_PCGR_HDCLK; writel(pcgr_val, PM_PCGR); pllrate = readl(PM_PLLSYSSTATUS); /* lookup pmclk_table */ clk_mclk_clk.rate = 0; for (i = 0; i < ARRAY_SIZE(pllrate_table); i++) { if (pllrate == pllrate_table[i].prate) { clk_mclk_clk.rate = pllrate_table[i].rate; break; } } if (clk_mclk_clk.rate) clk_bclk32_clk.rate = clk_mclk_clk.rate / (((divstatus & 0x0000f000) >> 12) + 1); pllrate = readl(PM_PLLDDRSTATUS); /* lookup pddr_table */ clk_ddr_clk.rate = 0; for (i = 0; i < ARRAY_SIZE(pddr_table); i++) { if (pllrate == pddr_table[i].prate) { clk_ddr_clk.rate = pddr_table[i].drate; break; } } pllrate = readl(PM_PLLVGASTATUS); /* lookup pvga_table */ clk_vga_clk.rate = 0; for (i = 0; i < ARRAY_SIZE(pllrate_table); i++) { if (pllrate == pllrate_table[i].prate) { clk_vga_clk.rate = pllrate_table[i].rate; break; } } if (clk_vga_clk.rate) clk_vga_clk.rate = clk_vga_clk.rate / (((divstatus & 0x00f00000) >> 20) + 1); clk_register(&clk_vga_clk); #endif #ifdef CONFIG_ARCH_FPGA clk_ddr_clk.rate = 33000000; clk_mclk_clk.rate = 33000000; clk_bclk32_clk.rate = 33000000; #endif clk_register(&clk_ddr_clk); clk_register(&clk_mclk_clk); clk_register(&clk_bclk32_clk); clk_register(&clk_ost_clk); return 0; } core_initcall(clk_init);
gpl-2.0
ajopanoor/hydra
drivers/net/wireless/p54/p54pci.c
496
16980
/* * Linux device driver for PCI based Prism54 * * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> * Copyright (c) 2008, Christian Lamparter <chunkeey@web.de> * * Based on the islsm (softmac prism54) driver, which is: * Copyright 2004-2006 Jean-Baptiste Note <jean-baptiste.note@m4x.org>, et al. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/pci.h> #include <linux/slab.h> #include <linux/firmware.h> #include <linux/etherdevice.h> #include <linux/delay.h> #include <linux/completion.h> #include <linux/module.h> #include <net/mac80211.h> #include "p54.h" #include "lmac.h" #include "p54pci.h" MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); MODULE_DESCRIPTION("Prism54 PCI wireless driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("prism54pci"); MODULE_FIRMWARE("isl3886pci"); static const struct pci_device_id p54p_table[] = { /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ { PCI_DEVICE(0x1260, 0x3890) }, /* 3COM 3CRWE154G72 Wireless LAN adapter */ { PCI_DEVICE(0x10b7, 0x6001) }, /* Intersil PRISM Indigo Wireless LAN adapter */ { PCI_DEVICE(0x1260, 0x3877) }, /* Intersil PRISM Javelin/Xbow Wireless LAN adapter */ { PCI_DEVICE(0x1260, 0x3886) }, /* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */ { PCI_DEVICE(0x1260, 0xffff) }, { }, }; MODULE_DEVICE_TABLE(pci, p54p_table); static int p54p_upload_firmware(struct ieee80211_hw *dev) { struct p54p_priv *priv = dev->priv; __le32 reg; int err; __le32 *data; u32 remains, left, device_addr; P54P_WRITE(int_enable, cpu_to_le32(0)); P54P_READ(int_enable); udelay(10); reg = P54P_READ(ctrl_stat); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RAMBOOT); P54P_WRITE(ctrl_stat, reg); P54P_READ(ctrl_stat); udelay(10); reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET); P54P_WRITE(ctrl_stat, reg); wmb(); udelay(10); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); P54P_WRITE(ctrl_stat, reg); wmb(); /* wait for the firmware to reset properly */ mdelay(10); err = p54_parse_firmware(dev, priv->firmware); if (err) return err; if (priv->common.fw_interface != FW_LM86) { dev_err(&priv->pdev->dev, "wrong firmware, " "please get a LM86(PCI) firmware a try again.\n"); return -EINVAL; } data = (__le32 *) priv->firmware->data; remains = priv->firmware->size; device_addr = ISL38XX_DEV_FIRMWARE_ADDR; while (remains) { u32 i = 0; left = min((u32)0x1000, remains); P54P_WRITE(direct_mem_base, cpu_to_le32(device_addr)); P54P_READ(int_enable); device_addr += 0x1000; while (i < left) { P54P_WRITE(direct_mem_win[i], *data++); i += sizeof(u32); } remains -= left; P54P_READ(int_enable); } reg = P54P_READ(ctrl_stat); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RAMBOOT); P54P_WRITE(ctrl_stat, reg); P54P_READ(ctrl_stat); udelay(10); reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET); P54P_WRITE(ctrl_stat, reg); wmb(); udelay(10); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); P54P_WRITE(ctrl_stat, reg); wmb(); udelay(10); /* wait for the firmware to boot properly */ mdelay(100); return 0; } static void p54p_refill_rx_ring(struct ieee80211_hw *dev, int ring_index, struct p54p_desc *ring, u32 ring_limit, struct sk_buff **rx_buf, u32 index) { struct p54p_priv *priv = dev->priv; struct p54p_ring_control *ring_control = priv->ring_control; u32 limit, idx, i; idx = le32_to_cpu(ring_control->host_idx[ring_index]); limit = idx; limit -= index; limit = ring_limit - limit; i = idx % ring_limit; while (limit-- > 1) { struct p54p_desc *desc = &ring[i]; if (!desc->host_addr) { struct sk_buff *skb; dma_addr_t mapping; skb = dev_alloc_skb(priv->common.rx_mtu + 32); if (!skb) break; mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb), priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(priv->pdev, mapping)) { dev_kfree_skb_any(skb); dev_err(&priv->pdev->dev, "RX DMA Mapping error\n"); break; } desc->host_addr = cpu_to_le32(mapping); desc->device_addr = 0; // FIXME: necessary? desc->len = cpu_to_le16(priv->common.rx_mtu + 32); desc->flags = 0; rx_buf[i] = skb; } i++; idx++; i %= ring_limit; } wmb(); ring_control->host_idx[ring_index] = cpu_to_le32(idx); } static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, int ring_index, struct p54p_desc *ring, u32 ring_limit, struct sk_buff **rx_buf) { struct p54p_priv *priv = dev->priv; struct p54p_ring_control *ring_control = priv->ring_control; struct p54p_desc *desc; u32 idx, i; i = (*index) % ring_limit; (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]); idx %= ring_limit; while (i != idx) { u16 len; struct sk_buff *skb; dma_addr_t dma_addr; desc = &ring[i]; len = le16_to_cpu(desc->len); skb = rx_buf[i]; if (!skb) { i++; i %= ring_limit; continue; } if (unlikely(len > priv->common.rx_mtu)) { if (net_ratelimit()) dev_err(&priv->pdev->dev, "rx'd frame size " "exceeds length threshold.\n"); len = priv->common.rx_mtu; } dma_addr = le32_to_cpu(desc->host_addr); pci_dma_sync_single_for_cpu(priv->pdev, dma_addr, priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); skb_put(skb, len); if (p54_rx(dev, skb)) { pci_unmap_single(priv->pdev, dma_addr, priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); rx_buf[i] = NULL; desc->host_addr = cpu_to_le32(0); } else { skb_trim(skb, 0); pci_dma_sync_single_for_device(priv->pdev, dma_addr, priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); desc->len = cpu_to_le16(priv->common.rx_mtu + 32); } i++; i %= ring_limit; } p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index); } static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index, int ring_index, struct p54p_desc *ring, u32 ring_limit, struct sk_buff **tx_buf) { struct p54p_priv *priv = dev->priv; struct p54p_ring_control *ring_control = priv->ring_control; struct p54p_desc *desc; struct sk_buff *skb; u32 idx, i; i = (*index) % ring_limit; (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]); idx %= ring_limit; while (i != idx) { desc = &ring[i]; skb = tx_buf[i]; tx_buf[i] = NULL; pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), le16_to_cpu(desc->len), PCI_DMA_TODEVICE); desc->host_addr = 0; desc->device_addr = 0; desc->len = 0; desc->flags = 0; if (skb && FREE_AFTER_TX(skb)) p54_free_skb(dev, skb); i++; i %= ring_limit; } } static void p54p_tasklet(unsigned long dev_id) { struct ieee80211_hw *dev = (struct ieee80211_hw *)dev_id; struct p54p_priv *priv = dev->priv; struct p54p_ring_control *ring_control = priv->ring_control; p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt, ARRAY_SIZE(ring_control->tx_mgmt), priv->tx_buf_mgmt); p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data, ARRAY_SIZE(ring_control->tx_data), priv->tx_buf_data); p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt, ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt); p54p_check_rx_ring(dev, &priv->rx_idx_data, 0, ring_control->rx_data, ARRAY_SIZE(ring_control->rx_data), priv->rx_buf_data); wmb(); P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); } static irqreturn_t p54p_interrupt(int irq, void *dev_id) { struct ieee80211_hw *dev = dev_id; struct p54p_priv *priv = dev->priv; __le32 reg; reg = P54P_READ(int_ident); if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) { goto out; } P54P_WRITE(int_ack, reg); reg &= P54P_READ(int_enable); if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)) tasklet_schedule(&priv->tasklet); else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT)) complete(&priv->boot_comp); out: return reg ? IRQ_HANDLED : IRQ_NONE; } static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb) { unsigned long flags; struct p54p_priv *priv = dev->priv; struct p54p_ring_control *ring_control = priv->ring_control; struct p54p_desc *desc; dma_addr_t mapping; u32 idx, i; spin_lock_irqsave(&priv->lock, flags); idx = le32_to_cpu(ring_control->host_idx[1]); i = idx % ARRAY_SIZE(ring_control->tx_data); mapping = pci_map_single(priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(priv->pdev, mapping)) { spin_unlock_irqrestore(&priv->lock, flags); p54_free_skb(dev, skb); dev_err(&priv->pdev->dev, "TX DMA mapping error\n"); return ; } priv->tx_buf_data[i] = skb; desc = &ring_control->tx_data[i]; desc->host_addr = cpu_to_le32(mapping); desc->device_addr = ((struct p54_hdr *)skb->data)->req_id; desc->len = cpu_to_le16(skb->len); desc->flags = 0; wmb(); ring_control->host_idx[1] = cpu_to_le32(idx + 1); spin_unlock_irqrestore(&priv->lock, flags); P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); P54P_READ(dev_int); } static void p54p_stop(struct ieee80211_hw *dev) { struct p54p_priv *priv = dev->priv; struct p54p_ring_control *ring_control = priv->ring_control; unsigned int i; struct p54p_desc *desc; P54P_WRITE(int_enable, cpu_to_le32(0)); P54P_READ(int_enable); udelay(10); free_irq(priv->pdev->irq, dev); tasklet_kill(&priv->tasklet); P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) { desc = &ring_control->rx_data[i]; if (desc->host_addr) pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); kfree_skb(priv->rx_buf_data[i]); priv->rx_buf_data[i] = NULL; } for (i = 0; i < ARRAY_SIZE(priv->rx_buf_mgmt); i++) { desc = &ring_control->rx_mgmt[i]; if (desc->host_addr) pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); kfree_skb(priv->rx_buf_mgmt[i]); priv->rx_buf_mgmt[i] = NULL; } for (i = 0; i < ARRAY_SIZE(priv->tx_buf_data); i++) { desc = &ring_control->tx_data[i]; if (desc->host_addr) pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), le16_to_cpu(desc->len), PCI_DMA_TODEVICE); p54_free_skb(dev, priv->tx_buf_data[i]); priv->tx_buf_data[i] = NULL; } for (i = 0; i < ARRAY_SIZE(priv->tx_buf_mgmt); i++) { desc = &ring_control->tx_mgmt[i]; if (desc->host_addr) pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), le16_to_cpu(desc->len), PCI_DMA_TODEVICE); p54_free_skb(dev, priv->tx_buf_mgmt[i]); priv->tx_buf_mgmt[i] = NULL; } memset(ring_control, 0, sizeof(*ring_control)); } static int p54p_open(struct ieee80211_hw *dev) { struct p54p_priv *priv = dev->priv; int err; init_completion(&priv->boot_comp); err = request_irq(priv->pdev->irq, p54p_interrupt, IRQF_SHARED, "p54pci", dev); if (err) { dev_err(&priv->pdev->dev, "failed to register IRQ handler\n"); return err; } memset(priv->ring_control, 0, sizeof(*priv->ring_control)); err = p54p_upload_firmware(dev); if (err) { free_irq(priv->pdev->irq, dev); return err; } priv->rx_idx_data = priv->tx_idx_data = 0; priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0; p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data, ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data, 0); p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt, ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt, 0); P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma)); P54P_READ(ring_control_base); wmb(); udelay(10); P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT)); P54P_READ(int_enable); wmb(); udelay(10); P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); P54P_READ(dev_int); if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) { wiphy_err(dev->wiphy, "Cannot boot firmware!\n"); p54p_stop(dev); return -ETIMEDOUT; } P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)); P54P_READ(int_enable); wmb(); udelay(10); P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); P54P_READ(dev_int); wmb(); udelay(10); return 0; } static void p54p_firmware_step2(const struct firmware *fw, void *context) { struct p54p_priv *priv = context; struct ieee80211_hw *dev = priv->common.hw; struct pci_dev *pdev = priv->pdev; int err; if (!fw) { dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n"); err = -ENOENT; goto out; } priv->firmware = fw; err = p54p_open(dev); if (err) goto out; err = p54_read_eeprom(dev); p54p_stop(dev); if (err) goto out; err = p54_register_common(dev, &pdev->dev); if (err) goto out; out: complete(&priv->fw_loaded); if (err) { struct device *parent = pdev->dev.parent; if (parent) device_lock(parent); /* * This will indirectly result in a call to p54p_remove. * Hence, we don't need to bother with freeing any * allocated ressources at all. */ device_release_driver(&pdev->dev); if (parent) device_unlock(parent); } pci_dev_put(pdev); } static int p54p_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct p54p_priv *priv; struct ieee80211_hw *dev; unsigned long mem_addr, mem_len; int err; pci_dev_get(pdev); err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable new PCI device\n"); return err; } mem_addr = pci_resource_start(pdev, 0); mem_len = pci_resource_len(pdev, 0); if (mem_len < sizeof(struct p54p_csr)) { dev_err(&pdev->dev, "Too short PCI resources\n"); err = -ENODEV; goto err_disable_dev; } err = pci_request_regions(pdev, "p54pci"); if (err) { dev_err(&pdev->dev, "Cannot obtain PCI resources\n"); goto err_disable_dev; } err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (!err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No suitable DMA available\n"); goto err_free_reg; } pci_set_master(pdev); pci_try_set_mwi(pdev); pci_write_config_byte(pdev, 0x40, 0); pci_write_config_byte(pdev, 0x41, 0); dev = p54_init_common(sizeof(*priv)); if (!dev) { dev_err(&pdev->dev, "ieee80211 alloc failed\n"); err = -ENOMEM; goto err_free_reg; } priv = dev->priv; priv->pdev = pdev; init_completion(&priv->fw_loaded); SET_IEEE80211_DEV(dev, &pdev->dev); pci_set_drvdata(pdev, dev); priv->map = ioremap(mem_addr, mem_len); if (!priv->map) { dev_err(&pdev->dev, "Cannot map device memory\n"); err = -ENOMEM; goto err_free_dev; } priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control), &priv->ring_control_dma); if (!priv->ring_control) { dev_err(&pdev->dev, "Cannot allocate rings\n"); err = -ENOMEM; goto err_iounmap; } priv->common.open = p54p_open; priv->common.stop = p54p_stop; priv->common.tx = p54p_tx; spin_lock_init(&priv->lock); tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev); err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci", &priv->pdev->dev, GFP_KERNEL, priv, p54p_firmware_step2); if (!err) return 0; pci_free_consistent(pdev, sizeof(*priv->ring_control), priv->ring_control, priv->ring_control_dma); err_iounmap: iounmap(priv->map); err_free_dev: p54_free_common(dev); err_free_reg: pci_release_regions(pdev); err_disable_dev: pci_disable_device(pdev); pci_dev_put(pdev); return err; } static void p54p_remove(struct pci_dev *pdev) { struct ieee80211_hw *dev = pci_get_drvdata(pdev); struct p54p_priv *priv; if (!dev) return; priv = dev->priv; wait_for_completion(&priv->fw_loaded); p54_unregister_common(dev); release_firmware(priv->firmware); pci_free_consistent(pdev, sizeof(*priv->ring_control), priv->ring_control, priv->ring_control_dma); iounmap(priv->map); pci_release_regions(pdev); pci_disable_device(pdev); p54_free_common(dev); } #ifdef CONFIG_PM_SLEEP static int p54p_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); pci_save_state(pdev); pci_set_power_state(pdev, PCI_D3hot); pci_disable_device(pdev); return 0; } static int p54p_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); int err; err = pci_reenable_device(pdev); if (err) return err; return pci_set_power_state(pdev, PCI_D0); } static SIMPLE_DEV_PM_OPS(p54pci_pm_ops, p54p_suspend, p54p_resume); #define P54P_PM_OPS (&p54pci_pm_ops) #else #define P54P_PM_OPS (NULL) #endif /* CONFIG_PM_SLEEP */ static struct pci_driver p54p_driver = { .name = "p54pci", .id_table = p54p_table, .probe = p54p_probe, .remove = p54p_remove, .driver.pm = P54P_PM_OPS, }; module_pci_driver(p54p_driver);
gpl-2.0
AICP/kernel_htc_msm8960-old
drivers/mtd/nand/pxa3xx_nand.c
752
33813
/* * drivers/mtd/nand/pxa3xx_nand.c * * Copyright © 2005 Intel Corporation * Copyright © 2006 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/slab.h> #include <mach/dma.h> #include <plat/pxa3xx_nand.h> #define CHIP_DELAY_TIMEOUT (2 * HZ/10) #define NAND_STOP_DELAY (2 * HZ/50) #define PAGE_CHUNK_SIZE (2048) /* registers and bit definitions */ #define NDCR (0x00) /* Control register */ #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */ #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */ #define NDSR (0x14) /* Status Register */ #define NDPCR (0x18) /* Page Count Register */ #define NDBDR0 (0x1C) /* Bad Block Register 0 */ #define NDBDR1 (0x20) /* Bad Block Register 1 */ #define NDDB (0x40) /* Data Buffer */ #define NDCB0 (0x48) /* Command Buffer0 */ #define NDCB1 (0x4C) /* Command Buffer1 */ #define NDCB2 (0x50) /* Command Buffer2 */ #define NDCR_SPARE_EN (0x1 << 31) #define NDCR_ECC_EN (0x1 << 30) #define NDCR_DMA_EN (0x1 << 29) #define NDCR_ND_RUN (0x1 << 28) #define NDCR_DWIDTH_C (0x1 << 27) #define NDCR_DWIDTH_M (0x1 << 26) #define NDCR_PAGE_SZ (0x1 << 24) #define NDCR_NCSX (0x1 << 23) #define NDCR_ND_MODE (0x3 << 21) #define NDCR_NAND_MODE (0x0) #define NDCR_CLR_PG_CNT (0x1 << 20) #define NDCR_STOP_ON_UNCOR (0x1 << 19) #define NDCR_RD_ID_CNT_MASK (0x7 << 16) #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK) #define NDCR_RA_START (0x1 << 15) #define NDCR_PG_PER_BLK (0x1 << 14) #define NDCR_ND_ARB_EN (0x1 << 12) #define NDCR_INT_MASK (0xFFF) #define NDSR_MASK (0xfff) #define NDSR_RDY (0x1 << 12) #define NDSR_FLASH_RDY (0x1 << 11) #define NDSR_CS0_PAGED (0x1 << 10) #define NDSR_CS1_PAGED (0x1 << 9) #define NDSR_CS0_CMDD (0x1 << 8) #define NDSR_CS1_CMDD (0x1 << 7) #define NDSR_CS0_BBD (0x1 << 6) #define NDSR_CS1_BBD (0x1 << 5) #define NDSR_DBERR (0x1 << 4) #define NDSR_SBERR (0x1 << 3) #define NDSR_WRDREQ (0x1 << 2) #define NDSR_RDDREQ (0x1 << 1) #define NDSR_WRCMDREQ (0x1) #define NDCB0_ST_ROW_EN (0x1 << 26) #define NDCB0_AUTO_RS (0x1 << 25) #define NDCB0_CSEL (0x1 << 24) #define NDCB0_CMD_TYPE_MASK (0x7 << 21) #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK) #define NDCB0_NC (0x1 << 20) #define NDCB0_DBC (0x1 << 19) #define NDCB0_ADDR_CYC_MASK (0x7 << 16) #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK) #define NDCB0_CMD2_MASK (0xff << 8) #define NDCB0_CMD1_MASK (0xff) #define NDCB0_ADDR_CYC_SHIFT (16) /* macros for registers read/write */ #define nand_writel(info, off, val) \ writel_relaxed((val), (info)->mmio_base + (off)) #define nand_readl(info, off) \ readl_relaxed((info)->mmio_base + (off)) /* error code and state */ enum { ERR_NONE = 0, ERR_DMABUSERR = -1, ERR_SENDCMD = -2, ERR_DBERR = -3, ERR_BBERR = -4, ERR_SBERR = -5, }; enum { STATE_IDLE = 0, STATE_PREPARED, STATE_CMD_HANDLE, STATE_DMA_READING, STATE_DMA_WRITING, STATE_DMA_DONE, STATE_PIO_READING, STATE_PIO_WRITING, STATE_CMD_DONE, STATE_READY, }; struct pxa3xx_nand_host { struct nand_chip chip; struct pxa3xx_nand_cmdset *cmdset; struct mtd_info *mtd; void *info_data; /* page size of attached chip */ unsigned int page_size; int use_ecc; int cs; /* calculated from pxa3xx_nand_flash data */ unsigned int col_addr_cycles; unsigned int row_addr_cycles; size_t read_id_bytes; /* cached register value */ uint32_t reg_ndcr; uint32_t ndtr0cs0; uint32_t ndtr1cs0; }; struct pxa3xx_nand_info { struct nand_hw_control controller; struct platform_device *pdev; struct clk *clk; void __iomem *mmio_base; unsigned long mmio_phys; struct completion cmd_complete; unsigned int buf_start; unsigned int buf_count; /* DMA information */ int drcmr_dat; int drcmr_cmd; unsigned char *data_buff; unsigned char *oob_buff; dma_addr_t data_buff_phys; int data_dma_ch; struct pxa_dma_desc *data_desc; dma_addr_t data_desc_addr; struct pxa3xx_nand_host *host[NUM_CHIP_SELECT]; unsigned int state; int cs; int use_ecc; /* use HW ECC ? */ int use_dma; /* use DMA ? */ int is_ready; unsigned int page_size; /* page size of attached chip */ unsigned int data_size; /* data size in FIFO */ unsigned int oob_size; int retcode; /* generated NDCBx register values */ uint32_t ndcb0; uint32_t ndcb1; uint32_t ndcb2; }; static bool use_dma = 1; module_param(use_dma, bool, 0444); MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW"); /* * Default NAND flash controller configuration setup by the * bootloader. This configuration is used only when pdata->keep_config is set */ static struct pxa3xx_nand_cmdset default_cmdset = { .read1 = 0x3000, .read2 = 0x0050, .program = 0x1080, .read_status = 0x0070, .read_id = 0x0090, .erase = 0xD060, .reset = 0x00FF, .lock = 0x002A, .unlock = 0x2423, .lock_status = 0x007A, }; static struct pxa3xx_nand_timing timing[] = { { 40, 80, 60, 100, 80, 100, 90000, 400, 40, }, { 10, 0, 20, 40, 30, 40, 11123, 110, 10, }, { 10, 25, 15, 25, 15, 30, 25000, 60, 10, }, { 10, 35, 15, 25, 15, 25, 25000, 60, 10, }, }; static struct pxa3xx_nand_flash builtin_flash_types[] = { { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] }, { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] }, { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] }, { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] }, { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] }, { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] }, { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] }, { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] }, { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] }, }; /* Define a default flash type setting serve as flash detecting only */ #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0]) const char *mtd_names[] = {"pxa3xx_nand-0", "pxa3xx_nand-1", NULL}; #define NDTR0_tCH(c) (min((c), 7) << 19) #define NDTR0_tCS(c) (min((c), 7) << 16) #define NDTR0_tWH(c) (min((c), 7) << 11) #define NDTR0_tWP(c) (min((c), 7) << 8) #define NDTR0_tRH(c) (min((c), 7) << 3) #define NDTR0_tRP(c) (min((c), 7) << 0) #define NDTR1_tR(c) (min((c), 65535) << 16) #define NDTR1_tWHR(c) (min((c), 15) << 4) #define NDTR1_tAR(c) (min((c), 15) << 0) /* convert nano-seconds to nand flash controller clock cycles */ #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000) static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host, const struct pxa3xx_nand_timing *t) { struct pxa3xx_nand_info *info = host->info_data; unsigned long nand_clk = clk_get_rate(info->clk); uint32_t ndtr0, ndtr1; ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) | NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) | NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) | NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) | NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) | NDTR0_tRP(ns2cycle(t->tRP, nand_clk)); ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) | NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) | NDTR1_tAR(ns2cycle(t->tAR, nand_clk)); host->ndtr0cs0 = ndtr0; host->ndtr1cs0 = ndtr1; nand_writel(info, NDTR0CS0, ndtr0); nand_writel(info, NDTR1CS0, ndtr1); } static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) { struct pxa3xx_nand_host *host = info->host[info->cs]; int oob_enable = host->reg_ndcr & NDCR_SPARE_EN; info->data_size = host->page_size; if (!oob_enable) { info->oob_size = 0; return; } switch (host->page_size) { case 2048: info->oob_size = (info->use_ecc) ? 40 : 64; break; case 512: info->oob_size = (info->use_ecc) ? 8 : 16; break; } } /** * NOTE: it is a must to set ND_RUN firstly, then write * command buffer, otherwise, it does not work. * We enable all the interrupt at the same time, and * let pxa3xx_nand_irq to handle all logic. */ static void pxa3xx_nand_start(struct pxa3xx_nand_info *info) { struct pxa3xx_nand_host *host = info->host[info->cs]; uint32_t ndcr; ndcr = host->reg_ndcr; ndcr |= info->use_ecc ? NDCR_ECC_EN : 0; ndcr |= info->use_dma ? NDCR_DMA_EN : 0; ndcr |= NDCR_ND_RUN; /* clear status bits and run */ nand_writel(info, NDCR, 0); nand_writel(info, NDSR, NDSR_MASK); nand_writel(info, NDCR, ndcr); } static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info) { uint32_t ndcr; int timeout = NAND_STOP_DELAY; /* wait RUN bit in NDCR become 0 */ ndcr = nand_readl(info, NDCR); while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) { ndcr = nand_readl(info, NDCR); udelay(1); } if (timeout <= 0) { ndcr &= ~NDCR_ND_RUN; nand_writel(info, NDCR, ndcr); } /* clear status bits */ nand_writel(info, NDSR, NDSR_MASK); } static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) { uint32_t ndcr; ndcr = nand_readl(info, NDCR); nand_writel(info, NDCR, ndcr & ~int_mask); } static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) { uint32_t ndcr; ndcr = nand_readl(info, NDCR); nand_writel(info, NDCR, ndcr | int_mask); } static void handle_data_pio(struct pxa3xx_nand_info *info) { switch (info->state) { case STATE_PIO_WRITING: __raw_writesl(info->mmio_base + NDDB, info->data_buff, DIV_ROUND_UP(info->data_size, 4)); if (info->oob_size > 0) __raw_writesl(info->mmio_base + NDDB, info->oob_buff, DIV_ROUND_UP(info->oob_size, 4)); break; case STATE_PIO_READING: __raw_readsl(info->mmio_base + NDDB, info->data_buff, DIV_ROUND_UP(info->data_size, 4)); if (info->oob_size > 0) __raw_readsl(info->mmio_base + NDDB, info->oob_buff, DIV_ROUND_UP(info->oob_size, 4)); break; default: dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, info->state); BUG(); } } static void start_data_dma(struct pxa3xx_nand_info *info) { struct pxa_dma_desc *desc = info->data_desc; int dma_len = ALIGN(info->data_size + info->oob_size, 32); desc->ddadr = DDADR_STOP; desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len; switch (info->state) { case STATE_DMA_WRITING: desc->dsadr = info->data_buff_phys; desc->dtadr = info->mmio_phys + NDDB; desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG; break; case STATE_DMA_READING: desc->dtadr = info->data_buff_phys; desc->dsadr = info->mmio_phys + NDDB; desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC; break; default: dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, info->state); BUG(); } DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch; DDADR(info->data_dma_ch) = info->data_desc_addr; DCSR(info->data_dma_ch) |= DCSR_RUN; } static void pxa3xx_nand_data_dma_irq(int channel, void *data) { struct pxa3xx_nand_info *info = data; uint32_t dcsr; dcsr = DCSR(channel); DCSR(channel) = dcsr; if (dcsr & DCSR_BUSERR) { info->retcode = ERR_DMABUSERR; } info->state = STATE_DMA_DONE; enable_int(info, NDCR_INT_MASK); nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ); } static irqreturn_t pxa3xx_nand_irq(int irq, void *devid) { struct pxa3xx_nand_info *info = devid; unsigned int status, is_completed = 0; unsigned int ready, cmd_done; if (info->cs == 0) { ready = NDSR_FLASH_RDY; cmd_done = NDSR_CS0_CMDD; } else { ready = NDSR_RDY; cmd_done = NDSR_CS1_CMDD; } status = nand_readl(info, NDSR); if (status & NDSR_DBERR) info->retcode = ERR_DBERR; if (status & NDSR_SBERR) info->retcode = ERR_SBERR; if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) { /* whether use dma to transfer data */ if (info->use_dma) { disable_int(info, NDCR_INT_MASK); info->state = (status & NDSR_RDDREQ) ? STATE_DMA_READING : STATE_DMA_WRITING; start_data_dma(info); goto NORMAL_IRQ_EXIT; } else { info->state = (status & NDSR_RDDREQ) ? STATE_PIO_READING : STATE_PIO_WRITING; handle_data_pio(info); } } if (status & cmd_done) { info->state = STATE_CMD_DONE; is_completed = 1; } if (status & ready) { info->is_ready = 1; info->state = STATE_READY; } if (status & NDSR_WRCMDREQ) { nand_writel(info, NDSR, NDSR_WRCMDREQ); status &= ~NDSR_WRCMDREQ; info->state = STATE_CMD_HANDLE; nand_writel(info, NDCB0, info->ndcb0); nand_writel(info, NDCB0, info->ndcb1); nand_writel(info, NDCB0, info->ndcb2); } /* clear NDSR to let the controller exit the IRQ */ nand_writel(info, NDSR, status); if (is_completed) complete(&info->cmd_complete); NORMAL_IRQ_EXIT: return IRQ_HANDLED; } static inline int is_buf_blank(uint8_t *buf, size_t len) { for (; len > 0; len--) if (*buf++ != 0xff) return 0; return 1; } static int prepare_command_pool(struct pxa3xx_nand_info *info, int command, uint16_t column, int page_addr) { uint16_t cmd; int addr_cycle, exec_cmd; struct pxa3xx_nand_host *host; struct mtd_info *mtd; host = info->host[info->cs]; mtd = host->mtd; addr_cycle = 0; exec_cmd = 1; /* reset data and oob column point to handle data */ info->buf_start = 0; info->buf_count = 0; info->oob_size = 0; info->use_ecc = 0; info->is_ready = 0; info->retcode = ERR_NONE; if (info->cs != 0) info->ndcb0 = NDCB0_CSEL; else info->ndcb0 = 0; switch (command) { case NAND_CMD_READ0: case NAND_CMD_PAGEPROG: info->use_ecc = 1; case NAND_CMD_READOOB: pxa3xx_set_datasize(info); break; case NAND_CMD_SEQIN: exec_cmd = 0; break; default: info->ndcb1 = 0; info->ndcb2 = 0; break; } addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles + host->col_addr_cycles); switch (command) { case NAND_CMD_READOOB: case NAND_CMD_READ0: cmd = host->cmdset->read1; if (command == NAND_CMD_READOOB) info->buf_start = mtd->writesize + column; else info->buf_start = column; if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) info->ndcb0 |= NDCB0_CMD_TYPE(0) | addr_cycle | (cmd & NDCB0_CMD1_MASK); else info->ndcb0 |= NDCB0_CMD_TYPE(0) | NDCB0_DBC | addr_cycle | cmd; case NAND_CMD_SEQIN: /* small page addr setting */ if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) { info->ndcb1 = ((page_addr & 0xFFFFFF) << 8) | (column & 0xFF); info->ndcb2 = 0; } else { info->ndcb1 = ((page_addr & 0xFFFF) << 16) | (column & 0xFFFF); if (page_addr & 0xFF0000) info->ndcb2 = (page_addr & 0xFF0000) >> 16; else info->ndcb2 = 0; } info->buf_count = mtd->writesize + mtd->oobsize; memset(info->data_buff, 0xFF, info->buf_count); break; case NAND_CMD_PAGEPROG: if (is_buf_blank(info->data_buff, (mtd->writesize + mtd->oobsize))) { exec_cmd = 0; break; } cmd = host->cmdset->program; info->ndcb0 |= NDCB0_CMD_TYPE(0x1) | NDCB0_AUTO_RS | NDCB0_ST_ROW_EN | NDCB0_DBC | cmd | addr_cycle; break; case NAND_CMD_READID: cmd = host->cmdset->read_id; info->buf_count = host->read_id_bytes; info->ndcb0 |= NDCB0_CMD_TYPE(3) | NDCB0_ADDR_CYC(1) | cmd; info->data_size = 8; break; case NAND_CMD_STATUS: cmd = host->cmdset->read_status; info->buf_count = 1; info->ndcb0 |= NDCB0_CMD_TYPE(4) | NDCB0_ADDR_CYC(1) | cmd; info->data_size = 8; break; case NAND_CMD_ERASE1: cmd = host->cmdset->erase; info->ndcb0 |= NDCB0_CMD_TYPE(2) | NDCB0_AUTO_RS | NDCB0_ADDR_CYC(3) | NDCB0_DBC | cmd; info->ndcb1 = page_addr; info->ndcb2 = 0; break; case NAND_CMD_RESET: cmd = host->cmdset->reset; info->ndcb0 |= NDCB0_CMD_TYPE(5) | cmd; break; case NAND_CMD_ERASE2: exec_cmd = 0; break; default: exec_cmd = 0; dev_err(&info->pdev->dev, "non-supported command %x\n", command); break; } return exec_cmd; } static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, int column, int page_addr) { struct pxa3xx_nand_host *host = mtd->priv; struct pxa3xx_nand_info *info = host->info_data; int ret, exec_cmd; /* * if this is a x16 device ,then convert the input * "byte" address into a "word" address appropriate * for indexing a word-oriented device */ if (host->reg_ndcr & NDCR_DWIDTH_M) column /= 2; /* * There may be different NAND chip hooked to * different chip select, so check whether * chip select has been changed, if yes, reset the timing */ if (info->cs != host->cs) { info->cs = host->cs; nand_writel(info, NDTR0CS0, host->ndtr0cs0); nand_writel(info, NDTR1CS0, host->ndtr1cs0); } info->state = STATE_PREPARED; exec_cmd = prepare_command_pool(info, command, column, page_addr); if (exec_cmd) { init_completion(&info->cmd_complete); pxa3xx_nand_start(info); ret = wait_for_completion_timeout(&info->cmd_complete, CHIP_DELAY_TIMEOUT); if (!ret) { dev_err(&info->pdev->dev, "Wait time out!!!\n"); /* Stop State Machine for next command cycle */ pxa3xx_nand_stop(info); } } info->state = STATE_IDLE; } static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf) { chip->write_buf(mtd, buf, mtd->writesize); chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); } static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int page) { struct pxa3xx_nand_host *host = mtd->priv; struct pxa3xx_nand_info *info = host->info_data; chip->read_buf(mtd, buf, mtd->writesize); chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); if (info->retcode == ERR_SBERR) { switch (info->use_ecc) { case 1: mtd->ecc_stats.corrected++; break; case 0: default: break; } } else if (info->retcode == ERR_DBERR) { /* * for blank page (all 0xff), HW will calculate its ECC as * 0, which is different from the ECC information within * OOB, ignore such double bit errors */ if (is_buf_blank(buf, mtd->writesize)) info->retcode = ERR_NONE; else mtd->ecc_stats.failed++; } return 0; } static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd) { struct pxa3xx_nand_host *host = mtd->priv; struct pxa3xx_nand_info *info = host->info_data; char retval = 0xFF; if (info->buf_start < info->buf_count) /* Has just send a new command? */ retval = info->data_buff[info->buf_start++]; return retval; } static u16 pxa3xx_nand_read_word(struct mtd_info *mtd) { struct pxa3xx_nand_host *host = mtd->priv; struct pxa3xx_nand_info *info = host->info_data; u16 retval = 0xFFFF; if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) { retval = *((u16 *)(info->data_buff+info->buf_start)); info->buf_start += 2; } return retval; } static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { struct pxa3xx_nand_host *host = mtd->priv; struct pxa3xx_nand_info *info = host->info_data; int real_len = min_t(size_t, len, info->buf_count - info->buf_start); memcpy(buf, info->data_buff + info->buf_start, real_len); info->buf_start += real_len; } static void pxa3xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { struct pxa3xx_nand_host *host = mtd->priv; struct pxa3xx_nand_info *info = host->info_data; int real_len = min_t(size_t, len, info->buf_count - info->buf_start); memcpy(info->data_buff + info->buf_start, buf, real_len); info->buf_start += real_len; } static int pxa3xx_nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { return 0; } static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip) { return; } static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this) { struct pxa3xx_nand_host *host = mtd->priv; struct pxa3xx_nand_info *info = host->info_data; /* pxa3xx_nand_send_command has waited for command complete */ if (this->state == FL_WRITING || this->state == FL_ERASING) { if (info->retcode == ERR_NONE) return 0; else { /* * any error make it return 0x01 which will tell * the caller the erase and write fail */ return 0x01; } } return 0; } static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, const struct pxa3xx_nand_flash *f) { struct platform_device *pdev = info->pdev; struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; struct pxa3xx_nand_host *host = info->host[info->cs]; uint32_t ndcr = 0x0; /* enable all interrupts */ if (f->page_size != 2048 && f->page_size != 512) { dev_err(&pdev->dev, "Current only support 2048 and 512 size\n"); return -EINVAL; } if (f->flash_width != 16 && f->flash_width != 8) { dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n"); return -EINVAL; } /* calculate flash information */ host->cmdset = &default_cmdset; host->page_size = f->page_size; host->read_id_bytes = (f->page_size == 2048) ? 4 : 2; /* calculate addressing information */ host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1; if (f->num_blocks * f->page_per_block > 65536) host->row_addr_cycles = 3; else host->row_addr_cycles = 2; ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0; ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0; ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0; ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0; ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0; ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0; ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes); ndcr |= NDCR_SPARE_EN; /* enable spare by default */ host->reg_ndcr = ndcr; pxa3xx_nand_set_timing(host, f->timing); return 0; } static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) { /* * We set 0 by hard coding here, for we don't support keep_config * when there is more than one chip attached to the controller */ struct pxa3xx_nand_host *host = info->host[0]; uint32_t ndcr = nand_readl(info, NDCR); if (ndcr & NDCR_PAGE_SZ) { host->page_size = 2048; host->read_id_bytes = 4; } else { host->page_size = 512; host->read_id_bytes = 2; } host->reg_ndcr = ndcr & ~NDCR_INT_MASK; host->cmdset = &default_cmdset; host->ndtr0cs0 = nand_readl(info, NDTR0CS0); host->ndtr1cs0 = nand_readl(info, NDTR1CS0); return 0; } /* the maximum possible buffer size for large page with OOB data * is: 2048 + 64 = 2112 bytes, allocate a page here for both the * data buffer and the DMA descriptor */ #define MAX_BUFF_SIZE PAGE_SIZE static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info) { struct platform_device *pdev = info->pdev; int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc); if (use_dma == 0) { info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL); if (info->data_buff == NULL) return -ENOMEM; return 0; } info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE, &info->data_buff_phys, GFP_KERNEL); if (info->data_buff == NULL) { dev_err(&pdev->dev, "failed to allocate dma buffer\n"); return -ENOMEM; } info->data_desc = (void *)info->data_buff + data_desc_offset; info->data_desc_addr = info->data_buff_phys + data_desc_offset; info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW, pxa3xx_nand_data_dma_irq, info); if (info->data_dma_ch < 0) { dev_err(&pdev->dev, "failed to request data dma\n"); dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE, info->data_buff, info->data_buff_phys); return info->data_dma_ch; } return 0; } static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info) { struct mtd_info *mtd; int ret; mtd = info->host[info->cs]->mtd; /* use the common timing to make a try */ ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]); if (ret) return ret; pxa3xx_nand_cmdfunc(mtd, NAND_CMD_RESET, 0, 0); if (info->is_ready) return 0; return -ENODEV; } static int pxa3xx_nand_scan(struct mtd_info *mtd) { struct pxa3xx_nand_host *host = mtd->priv; struct pxa3xx_nand_info *info = host->info_data; struct platform_device *pdev = info->pdev; struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL; const struct pxa3xx_nand_flash *f = NULL; struct nand_chip *chip = mtd->priv; uint32_t id = -1; uint64_t chipsize; int i, ret, num; if (pdata->keep_config && !pxa3xx_nand_detect_config(info)) goto KEEP_CONFIG; ret = pxa3xx_nand_sensing(info); if (ret) { dev_info(&info->pdev->dev, "There is no chip on cs %d!\n", info->cs); return ret; } chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0); id = *((uint16_t *)(info->data_buff)); if (id != 0) dev_info(&info->pdev->dev, "Detect a flash id %x\n", id); else { dev_warn(&info->pdev->dev, "Read out ID 0, potential timing set wrong!!\n"); return -EINVAL; } num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1; for (i = 0; i < num; i++) { if (i < pdata->num_flash) f = pdata->flash + i; else f = &builtin_flash_types[i - pdata->num_flash + 1]; /* find the chip in default list */ if (f->chip_id == id) break; } if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) { dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n"); return -EINVAL; } ret = pxa3xx_nand_config_flash(info, f); if (ret) { dev_err(&info->pdev->dev, "ERROR! Configure failed\n"); return ret; } pxa3xx_flash_ids[0].name = f->name; pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff; pxa3xx_flash_ids[0].pagesize = f->page_size; chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size; pxa3xx_flash_ids[0].chipsize = chipsize >> 20; pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block; if (f->flash_width == 16) pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16; pxa3xx_flash_ids[1].name = NULL; def = pxa3xx_flash_ids; KEEP_CONFIG: chip->ecc.mode = NAND_ECC_HW; chip->ecc.size = host->page_size; chip->ecc.strength = 1; chip->options = NAND_NO_AUTOINCR; chip->options |= NAND_NO_READRDY; if (host->reg_ndcr & NDCR_DWIDTH_M) chip->options |= NAND_BUSWIDTH_16; if (nand_scan_ident(mtd, 1, def)) return -ENODEV; /* calculate addressing information */ if (mtd->writesize >= 2048) host->col_addr_cycles = 2; else host->col_addr_cycles = 1; info->oob_buff = info->data_buff + mtd->writesize; if ((mtd->size >> chip->page_shift) > 65536) host->row_addr_cycles = 3; else host->row_addr_cycles = 2; mtd->name = mtd_names[0]; return nand_scan_tail(mtd); } static int alloc_nand_resource(struct platform_device *pdev) { struct pxa3xx_nand_platform_data *pdata; struct pxa3xx_nand_info *info; struct pxa3xx_nand_host *host; struct nand_chip *chip; struct mtd_info *mtd; struct resource *r; int ret, irq, cs; pdata = pdev->dev.platform_data; info = kzalloc(sizeof(*info) + (sizeof(*mtd) + sizeof(*host)) * pdata->num_cs, GFP_KERNEL); if (!info) { dev_err(&pdev->dev, "failed to allocate memory\n"); return -ENOMEM; } info->pdev = pdev; for (cs = 0; cs < pdata->num_cs; cs++) { mtd = (struct mtd_info *)((unsigned int)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs); chip = (struct nand_chip *)(&mtd[1]); host = (struct pxa3xx_nand_host *)chip; info->host[cs] = host; host->mtd = mtd; host->cs = cs; host->info_data = info; mtd->priv = host; mtd->owner = THIS_MODULE; chip->ecc.read_page = pxa3xx_nand_read_page_hwecc; chip->ecc.write_page = pxa3xx_nand_write_page_hwecc; chip->controller = &info->controller; chip->waitfunc = pxa3xx_nand_waitfunc; chip->select_chip = pxa3xx_nand_select_chip; chip->cmdfunc = pxa3xx_nand_cmdfunc; chip->read_word = pxa3xx_nand_read_word; chip->read_byte = pxa3xx_nand_read_byte; chip->read_buf = pxa3xx_nand_read_buf; chip->write_buf = pxa3xx_nand_write_buf; chip->verify_buf = pxa3xx_nand_verify_buf; } spin_lock_init(&chip->controller->lock); init_waitqueue_head(&chip->controller->wq); info->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(info->clk)) { dev_err(&pdev->dev, "failed to get nand clock\n"); ret = PTR_ERR(info->clk); goto fail_free_mtd; } clk_enable(info->clk); r = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (r == NULL) { dev_err(&pdev->dev, "no resource defined for data DMA\n"); ret = -ENXIO; goto fail_put_clk; } info->drcmr_dat = r->start; r = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (r == NULL) { dev_err(&pdev->dev, "no resource defined for command DMA\n"); ret = -ENXIO; goto fail_put_clk; } info->drcmr_cmd = r->start; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no IRQ resource defined\n"); ret = -ENXIO; goto fail_put_clk; } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (r == NULL) { dev_err(&pdev->dev, "no IO memory resource defined\n"); ret = -ENODEV; goto fail_put_clk; } r = request_mem_region(r->start, resource_size(r), pdev->name); if (r == NULL) { dev_err(&pdev->dev, "failed to request memory resource\n"); ret = -EBUSY; goto fail_put_clk; } info->mmio_base = ioremap(r->start, resource_size(r)); if (info->mmio_base == NULL) { dev_err(&pdev->dev, "ioremap() failed\n"); ret = -ENODEV; goto fail_free_res; } info->mmio_phys = r->start; ret = pxa3xx_nand_init_buff(info); if (ret) goto fail_free_io; /* initialize all interrupts to be disabled */ disable_int(info, NDSR_MASK); ret = request_irq(irq, pxa3xx_nand_irq, IRQF_DISABLED, pdev->name, info); if (ret < 0) { dev_err(&pdev->dev, "failed to request IRQ\n"); goto fail_free_buf; } platform_set_drvdata(pdev, info); return 0; fail_free_buf: free_irq(irq, info); if (use_dma) { pxa_free_dma(info->data_dma_ch); dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE, info->data_buff, info->data_buff_phys); } else kfree(info->data_buff); fail_free_io: iounmap(info->mmio_base); fail_free_res: release_mem_region(r->start, resource_size(r)); fail_put_clk: clk_disable(info->clk); clk_put(info->clk); fail_free_mtd: kfree(info); return ret; } static int pxa3xx_nand_remove(struct platform_device *pdev) { struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); struct pxa3xx_nand_platform_data *pdata; struct resource *r; int irq, cs; if (!info) return 0; pdata = pdev->dev.platform_data; platform_set_drvdata(pdev, NULL); irq = platform_get_irq(pdev, 0); if (irq >= 0) free_irq(irq, info); if (use_dma) { pxa_free_dma(info->data_dma_ch); dma_free_writecombine(&pdev->dev, MAX_BUFF_SIZE, info->data_buff, info->data_buff_phys); } else kfree(info->data_buff); iounmap(info->mmio_base); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(r->start, resource_size(r)); clk_disable(info->clk); clk_put(info->clk); for (cs = 0; cs < pdata->num_cs; cs++) nand_release(info->host[cs]->mtd); kfree(info); return 0; } static int pxa3xx_nand_probe(struct platform_device *pdev) { struct pxa3xx_nand_platform_data *pdata; struct pxa3xx_nand_info *info; int ret, cs, probe_success; pdata = pdev->dev.platform_data; if (!pdata) { dev_err(&pdev->dev, "no platform data defined\n"); return -ENODEV; } ret = alloc_nand_resource(pdev); if (ret) { dev_err(&pdev->dev, "alloc nand resource failed\n"); return ret; } info = platform_get_drvdata(pdev); probe_success = 0; for (cs = 0; cs < pdata->num_cs; cs++) { info->cs = cs; ret = pxa3xx_nand_scan(info->host[cs]->mtd); if (ret) { dev_warn(&pdev->dev, "failed to scan nand at cs %d\n", cs); continue; } ret = mtd_device_parse_register(info->host[cs]->mtd, NULL, NULL, pdata->parts[cs], pdata->nr_parts[cs]); if (!ret) probe_success = 1; } if (!probe_success) { pxa3xx_nand_remove(pdev); return -ENODEV; } return 0; } #ifdef CONFIG_PM static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state) { struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); struct pxa3xx_nand_platform_data *pdata; struct mtd_info *mtd; int cs; pdata = pdev->dev.platform_data; if (info->state) { dev_err(&pdev->dev, "driver busy, state = %d\n", info->state); return -EAGAIN; } for (cs = 0; cs < pdata->num_cs; cs++) { mtd = info->host[cs]->mtd; mtd_suspend(mtd); } return 0; } static int pxa3xx_nand_resume(struct platform_device *pdev) { struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); struct pxa3xx_nand_platform_data *pdata; struct mtd_info *mtd; int cs; pdata = pdev->dev.platform_data; /* We don't want to handle interrupt without calling mtd routine */ disable_int(info, NDCR_INT_MASK); /* * Directly set the chip select to a invalid value, * then the driver would reset the timing according * to current chip select at the beginning of cmdfunc */ info->cs = 0xff; /* * As the spec says, the NDSR would be updated to 0x1800 when * doing the nand_clk disable/enable. * To prevent it damaging state machine of the driver, clear * all status before resume */ nand_writel(info, NDSR, NDSR_MASK); for (cs = 0; cs < pdata->num_cs; cs++) { mtd = info->host[cs]->mtd; mtd_resume(mtd); } return 0; } #else #define pxa3xx_nand_suspend NULL #define pxa3xx_nand_resume NULL #endif static struct platform_driver pxa3xx_nand_driver = { .driver = { .name = "pxa3xx-nand", }, .probe = pxa3xx_nand_probe, .remove = pxa3xx_nand_remove, .suspend = pxa3xx_nand_suspend, .resume = pxa3xx_nand_resume, }; module_platform_driver(pxa3xx_nand_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PXA3xx NAND controller driver");
gpl-2.0
gianmarcorev/rpi_linux
kernel/torture.c
752
20332
/* * Common functions for in-kernel torture tests. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, you can access it online at * http://www.gnu.org/licenses/gpl-2.0.html. * * Copyright (C) IBM Corporation, 2014 * * Author: Paul E. McKenney <paulmck@us.ibm.com> * Based on kernel/rcu/torture.c. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/kthread.h> #include <linux/err.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/atomic.h> #include <linux/bitops.h> #include <linux/completion.h> #include <linux/moduleparam.h> #include <linux/percpu.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/freezer.h> #include <linux/cpu.h> #include <linux/delay.h> #include <linux/stat.h> #include <linux/slab.h> #include <linux/trace_clock.h> #include <asm/byteorder.h> #include <linux/torture.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>"); static char *torture_type; static bool verbose; /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */ #define FULLSTOP_DONTSTOP 0 /* Normal operation. */ #define FULLSTOP_SHUTDOWN 1 /* System shutdown with torture running. */ #define FULLSTOP_RMMOD 2 /* Normal rmmod of torture. */ static int fullstop = FULLSTOP_RMMOD; static DEFINE_MUTEX(fullstop_mutex); static int *torture_runnable; #ifdef CONFIG_HOTPLUG_CPU /* * Variables for online-offline handling. Only present if CPU hotplug * is enabled, otherwise does nothing. */ static struct task_struct *onoff_task; static long onoff_holdoff; static long onoff_interval; static long n_offline_attempts; static long n_offline_successes; static unsigned long sum_offline; static int min_offline = -1; static int max_offline; static long n_online_attempts; static long n_online_successes; static unsigned long sum_online; static int min_online = -1; static int max_online; /* * Execute random CPU-hotplug operations at the interval specified * by the onoff_interval. */ static int torture_onoff(void *arg) { int cpu; unsigned long delta; int maxcpu = -1; DEFINE_TORTURE_RANDOM(rand); int ret; unsigned long starttime; VERBOSE_TOROUT_STRING("torture_onoff task started"); for_each_online_cpu(cpu) maxcpu = cpu; WARN_ON(maxcpu < 0); if (onoff_holdoff > 0) { VERBOSE_TOROUT_STRING("torture_onoff begin holdoff"); schedule_timeout_interruptible(onoff_holdoff); VERBOSE_TOROUT_STRING("torture_onoff end holdoff"); } while (!torture_must_stop()) { cpu = (torture_random(&rand) >> 4) % (maxcpu + 1); if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) { if (verbose) pr_alert("%s" TORTURE_FLAG "torture_onoff task: offlining %d\n", torture_type, cpu); starttime = jiffies; n_offline_attempts++; ret = cpu_down(cpu); if (ret) { if (verbose) pr_alert("%s" TORTURE_FLAG "torture_onoff task: offline %d failed: errno %d\n", torture_type, cpu, ret); } else { if (verbose) pr_alert("%s" TORTURE_FLAG "torture_onoff task: offlined %d\n", torture_type, cpu); n_offline_successes++; delta = jiffies - starttime; sum_offline += delta; if (min_offline < 0) { min_offline = delta; max_offline = delta; } if (min_offline > delta) min_offline = delta; if (max_offline < delta) max_offline = delta; } } else if (cpu_is_hotpluggable(cpu)) { if (verbose) pr_alert("%s" TORTURE_FLAG "torture_onoff task: onlining %d\n", torture_type, cpu); starttime = jiffies; n_online_attempts++; ret = cpu_up(cpu); if (ret) { if (verbose) pr_alert("%s" TORTURE_FLAG "torture_onoff task: online %d failed: errno %d\n", torture_type, cpu, ret); } else { if (verbose) pr_alert("%s" TORTURE_FLAG "torture_onoff task: onlined %d\n", torture_type, cpu); n_online_successes++; delta = jiffies - starttime; sum_online += delta; if (min_online < 0) { min_online = delta; max_online = delta; } if (min_online > delta) min_online = delta; if (max_online < delta) max_online = delta; } } schedule_timeout_interruptible(onoff_interval); } torture_kthread_stopping("torture_onoff"); return 0; } #endif /* #ifdef CONFIG_HOTPLUG_CPU */ /* * Initiate online-offline handling. */ int torture_onoff_init(long ooholdoff, long oointerval) { int ret = 0; #ifdef CONFIG_HOTPLUG_CPU onoff_holdoff = ooholdoff; onoff_interval = oointerval; if (onoff_interval <= 0) return 0; ret = torture_create_kthread(torture_onoff, NULL, onoff_task); #endif /* #ifdef CONFIG_HOTPLUG_CPU */ return ret; } EXPORT_SYMBOL_GPL(torture_onoff_init); /* * Clean up after online/offline testing. */ static void torture_onoff_cleanup(void) { #ifdef CONFIG_HOTPLUG_CPU if (onoff_task == NULL) return; VERBOSE_TOROUT_STRING("Stopping torture_onoff task"); kthread_stop(onoff_task); onoff_task = NULL; #endif /* #ifdef CONFIG_HOTPLUG_CPU */ } EXPORT_SYMBOL_GPL(torture_onoff_cleanup); /* * Print online/offline testing statistics. */ void torture_onoff_stats(void) { #ifdef CONFIG_HOTPLUG_CPU pr_cont("onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ", n_online_successes, n_online_attempts, n_offline_successes, n_offline_attempts, min_online, max_online, min_offline, max_offline, sum_online, sum_offline, HZ); #endif /* #ifdef CONFIG_HOTPLUG_CPU */ } EXPORT_SYMBOL_GPL(torture_onoff_stats); /* * Were all the online/offline operations successful? */ bool torture_onoff_failures(void) { #ifdef CONFIG_HOTPLUG_CPU return n_online_successes != n_online_attempts || n_offline_successes != n_offline_attempts; #else /* #ifdef CONFIG_HOTPLUG_CPU */ return false; #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ } EXPORT_SYMBOL_GPL(torture_onoff_failures); #define TORTURE_RANDOM_MULT 39916801 /* prime */ #define TORTURE_RANDOM_ADD 479001701 /* prime */ #define TORTURE_RANDOM_REFRESH 10000 /* * Crude but fast random-number generator. Uses a linear congruential * generator, with occasional help from cpu_clock(). */ unsigned long torture_random(struct torture_random_state *trsp) { if (--trsp->trs_count < 0) { trsp->trs_state += (unsigned long)local_clock(); trsp->trs_count = TORTURE_RANDOM_REFRESH; } trsp->trs_state = trsp->trs_state * TORTURE_RANDOM_MULT + TORTURE_RANDOM_ADD; return swahw32(trsp->trs_state); } EXPORT_SYMBOL_GPL(torture_random); /* * Variables for shuffling. The idea is to ensure that each CPU stays * idle for an extended period to test interactions with dyntick idle, * as well as interactions with any per-CPU varibles. */ struct shuffle_task { struct list_head st_l; struct task_struct *st_t; }; static long shuffle_interval; /* In jiffies. */ static struct task_struct *shuffler_task; static cpumask_var_t shuffle_tmp_mask; static int shuffle_idle_cpu; /* Force all torture tasks off this CPU */ static struct list_head shuffle_task_list = LIST_HEAD_INIT(shuffle_task_list); static DEFINE_MUTEX(shuffle_task_mutex); /* * Register a task to be shuffled. If there is no memory, just splat * and don't bother registering. */ void torture_shuffle_task_register(struct task_struct *tp) { struct shuffle_task *stp; if (WARN_ON_ONCE(tp == NULL)) return; stp = kmalloc(sizeof(*stp), GFP_KERNEL); if (WARN_ON_ONCE(stp == NULL)) return; stp->st_t = tp; mutex_lock(&shuffle_task_mutex); list_add(&stp->st_l, &shuffle_task_list); mutex_unlock(&shuffle_task_mutex); } EXPORT_SYMBOL_GPL(torture_shuffle_task_register); /* * Unregister all tasks, for example, at the end of the torture run. */ static void torture_shuffle_task_unregister_all(void) { struct shuffle_task *stp; struct shuffle_task *p; mutex_lock(&shuffle_task_mutex); list_for_each_entry_safe(stp, p, &shuffle_task_list, st_l) { list_del(&stp->st_l); kfree(stp); } mutex_unlock(&shuffle_task_mutex); } /* Shuffle tasks such that we allow shuffle_idle_cpu to become idle. * A special case is when shuffle_idle_cpu = -1, in which case we allow * the tasks to run on all CPUs. */ static void torture_shuffle_tasks(void) { struct shuffle_task *stp; cpumask_setall(shuffle_tmp_mask); get_online_cpus(); /* No point in shuffling if there is only one online CPU (ex: UP) */ if (num_online_cpus() == 1) { put_online_cpus(); return; } /* Advance to the next CPU. Upon overflow, don't idle any CPUs. */ shuffle_idle_cpu = cpumask_next(shuffle_idle_cpu, shuffle_tmp_mask); if (shuffle_idle_cpu >= nr_cpu_ids) shuffle_idle_cpu = -1; else cpumask_clear_cpu(shuffle_idle_cpu, shuffle_tmp_mask); mutex_lock(&shuffle_task_mutex); list_for_each_entry(stp, &shuffle_task_list, st_l) set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask); mutex_unlock(&shuffle_task_mutex); put_online_cpus(); } /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the * system to become idle at a time and cut off its timer ticks. This is meant * to test the support for such tickless idle CPU in RCU. */ static int torture_shuffle(void *arg) { VERBOSE_TOROUT_STRING("torture_shuffle task started"); do { schedule_timeout_interruptible(shuffle_interval); torture_shuffle_tasks(); torture_shutdown_absorb("torture_shuffle"); } while (!torture_must_stop()); torture_kthread_stopping("torture_shuffle"); return 0; } /* * Start the shuffler, with shuffint in jiffies. */ int torture_shuffle_init(long shuffint) { shuffle_interval = shuffint; shuffle_idle_cpu = -1; if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) { VERBOSE_TOROUT_ERRSTRING("Failed to alloc mask"); return -ENOMEM; } /* Create the shuffler thread */ return torture_create_kthread(torture_shuffle, NULL, shuffler_task); } EXPORT_SYMBOL_GPL(torture_shuffle_init); /* * Stop the shuffling. */ static void torture_shuffle_cleanup(void) { torture_shuffle_task_unregister_all(); if (shuffler_task) { VERBOSE_TOROUT_STRING("Stopping torture_shuffle task"); kthread_stop(shuffler_task); free_cpumask_var(shuffle_tmp_mask); } shuffler_task = NULL; } EXPORT_SYMBOL_GPL(torture_shuffle_cleanup); /* * Variables for auto-shutdown. This allows "lights out" torture runs * to be fully scripted. */ static int shutdown_secs; /* desired test duration in seconds. */ static struct task_struct *shutdown_task; static unsigned long shutdown_time; /* jiffies to system shutdown. */ static void (*torture_shutdown_hook)(void); /* * Absorb kthreads into a kernel function that won't return, so that * they won't ever access module text or data again. */ void torture_shutdown_absorb(const char *title) { while (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { pr_notice("torture thread %s parking due to system shutdown\n", title); schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT); } } EXPORT_SYMBOL_GPL(torture_shutdown_absorb); /* * Cause the torture test to shutdown the system after the test has * run for the time specified by the shutdown_secs parameter. */ static int torture_shutdown(void *arg) { long delta; unsigned long jiffies_snap; VERBOSE_TOROUT_STRING("torture_shutdown task started"); jiffies_snap = jiffies; while (ULONG_CMP_LT(jiffies_snap, shutdown_time) && !torture_must_stop()) { delta = shutdown_time - jiffies_snap; if (verbose) pr_alert("%s" TORTURE_FLAG "torture_shutdown task: %lu jiffies remaining\n", torture_type, delta); schedule_timeout_interruptible(delta); jiffies_snap = jiffies; } if (torture_must_stop()) { torture_kthread_stopping("torture_shutdown"); return 0; } /* OK, shut down the system. */ VERBOSE_TOROUT_STRING("torture_shutdown task shutting down system"); shutdown_task = NULL; /* Avoid self-kill deadlock. */ if (torture_shutdown_hook) torture_shutdown_hook(); else VERBOSE_TOROUT_STRING("No torture_shutdown_hook(), skipping."); kernel_power_off(); /* Shut down the system. */ return 0; } /* * Start up the shutdown task. */ int torture_shutdown_init(int ssecs, void (*cleanup)(void)) { int ret = 0; shutdown_secs = ssecs; torture_shutdown_hook = cleanup; if (shutdown_secs > 0) { shutdown_time = jiffies + shutdown_secs * HZ; ret = torture_create_kthread(torture_shutdown, NULL, shutdown_task); } return ret; } EXPORT_SYMBOL_GPL(torture_shutdown_init); /* * Detect and respond to a system shutdown. */ static int torture_shutdown_notify(struct notifier_block *unused1, unsigned long unused2, void *unused3) { mutex_lock(&fullstop_mutex); if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) { VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected"); ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN; } else { pr_warn("Concurrent rmmod and shutdown illegal!\n"); } mutex_unlock(&fullstop_mutex); return NOTIFY_DONE; } static struct notifier_block torture_shutdown_nb = { .notifier_call = torture_shutdown_notify, }; /* * Shut down the shutdown task. Say what??? Heh! This can happen if * the torture module gets an rmmod before the shutdown time arrives. ;-) */ static void torture_shutdown_cleanup(void) { unregister_reboot_notifier(&torture_shutdown_nb); if (shutdown_task != NULL) { VERBOSE_TOROUT_STRING("Stopping torture_shutdown task"); kthread_stop(shutdown_task); } shutdown_task = NULL; } /* * Variables for stuttering, which means to periodically pause and * restart testing in order to catch bugs that appear when load is * suddenly applied to or removed from the system. */ static struct task_struct *stutter_task; static int stutter_pause_test; static int stutter; /* * Block until the stutter interval ends. This must be called periodically * by all running kthreads that need to be subject to stuttering. */ void stutter_wait(const char *title) { while (ACCESS_ONCE(stutter_pause_test) || (torture_runnable && !ACCESS_ONCE(*torture_runnable))) { if (stutter_pause_test) if (ACCESS_ONCE(stutter_pause_test) == 1) schedule_timeout_interruptible(1); else while (ACCESS_ONCE(stutter_pause_test)) cond_resched(); else schedule_timeout_interruptible(round_jiffies_relative(HZ)); torture_shutdown_absorb(title); } } EXPORT_SYMBOL_GPL(stutter_wait); /* * Cause the torture test to "stutter", starting and stopping all * threads periodically. */ static int torture_stutter(void *arg) { VERBOSE_TOROUT_STRING("torture_stutter task started"); do { if (!torture_must_stop()) { if (stutter > 1) { schedule_timeout_interruptible(stutter - 1); ACCESS_ONCE(stutter_pause_test) = 2; } schedule_timeout_interruptible(1); ACCESS_ONCE(stutter_pause_test) = 1; } if (!torture_must_stop()) schedule_timeout_interruptible(stutter); ACCESS_ONCE(stutter_pause_test) = 0; torture_shutdown_absorb("torture_stutter"); } while (!torture_must_stop()); torture_kthread_stopping("torture_stutter"); return 0; } /* * Initialize and kick off the torture_stutter kthread. */ int torture_stutter_init(int s) { int ret; stutter = s; ret = torture_create_kthread(torture_stutter, NULL, stutter_task); return ret; } EXPORT_SYMBOL_GPL(torture_stutter_init); /* * Cleanup after the torture_stutter kthread. */ static void torture_stutter_cleanup(void) { if (!stutter_task) return; VERBOSE_TOROUT_STRING("Stopping torture_stutter task"); kthread_stop(stutter_task); stutter_task = NULL; } /* * Initialize torture module. Please note that this is -not- invoked via * the usual module_init() mechanism, but rather by an explicit call from * the client torture module. This call must be paired with a later * torture_init_end(). * * The runnable parameter points to a flag that controls whether or not * the test is currently runnable. If there is no such flag, pass in NULL. */ bool torture_init_begin(char *ttype, bool v, int *runnable) { mutex_lock(&fullstop_mutex); if (torture_type != NULL) { pr_alert("torture_init_begin: refusing %s init: %s running", ttype, torture_type); mutex_unlock(&fullstop_mutex); return false; } torture_type = ttype; verbose = v; torture_runnable = runnable; fullstop = FULLSTOP_DONTSTOP; return true; } EXPORT_SYMBOL_GPL(torture_init_begin); /* * Tell the torture module that initialization is complete. */ void torture_init_end(void) { mutex_unlock(&fullstop_mutex); register_reboot_notifier(&torture_shutdown_nb); } EXPORT_SYMBOL_GPL(torture_init_end); /* * Clean up torture module. Please note that this is -not- invoked via * the usual module_exit() mechanism, but rather by an explicit call from * the client torture module. Returns true if a race with system shutdown * is detected, otherwise, all kthreads started by functions in this file * will be shut down. * * This must be called before the caller starts shutting down its own * kthreads. * * Both torture_cleanup_begin() and torture_cleanup_end() must be paired, * in order to correctly perform the cleanup. They are separated because * threads can still need to reference the torture_type type, thus nullify * only after completing all other relevant calls. */ bool torture_cleanup_begin(void) { mutex_lock(&fullstop_mutex); if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { pr_warn("Concurrent rmmod and shutdown illegal!\n"); mutex_unlock(&fullstop_mutex); schedule_timeout_uninterruptible(10); return true; } ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD; mutex_unlock(&fullstop_mutex); torture_shutdown_cleanup(); torture_shuffle_cleanup(); torture_stutter_cleanup(); torture_onoff_cleanup(); return false; } EXPORT_SYMBOL_GPL(torture_cleanup_begin); void torture_cleanup_end(void) { mutex_lock(&fullstop_mutex); torture_type = NULL; mutex_unlock(&fullstop_mutex); } EXPORT_SYMBOL_GPL(torture_cleanup_end); /* * Is it time for the current torture test to stop? */ bool torture_must_stop(void) { return torture_must_stop_irq() || kthread_should_stop(); } EXPORT_SYMBOL_GPL(torture_must_stop); /* * Is it time for the current torture test to stop? This is the irq-safe * version, hence no check for kthread_should_stop(). */ bool torture_must_stop_irq(void) { return ACCESS_ONCE(fullstop) != FULLSTOP_DONTSTOP; } EXPORT_SYMBOL_GPL(torture_must_stop_irq); /* * Each kthread must wait for kthread_should_stop() before returning from * its top-level function, otherwise segfaults ensue. This function * prints a "stopping" message and waits for kthread_should_stop(), and * should be called from all torture kthreads immediately prior to * returning. */ void torture_kthread_stopping(char *title) { char buf[128]; snprintf(buf, sizeof(buf), "Stopping %s", title); VERBOSE_TOROUT_STRING(buf); while (!kthread_should_stop()) { torture_shutdown_absorb(title); schedule_timeout_uninterruptible(1); } } EXPORT_SYMBOL_GPL(torture_kthread_stopping); /* * Create a generic torture kthread that is immediately runnable. If you * need the kthread to be stopped so that you can do something to it before * it starts, you will need to open-code your own. */ int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m, char *f, struct task_struct **tp) { int ret = 0; VERBOSE_TOROUT_STRING(m); *tp = kthread_run(fn, arg, "%s", s); if (IS_ERR(*tp)) { ret = PTR_ERR(*tp); VERBOSE_TOROUT_ERRSTRING(f); *tp = NULL; } torture_shuffle_task_register(*tp); return ret; } EXPORT_SYMBOL_GPL(_torture_create_kthread); /* * Stop a generic kthread, emitting a message. */ void _torture_stop_kthread(char *m, struct task_struct **tp) { if (*tp == NULL) return; VERBOSE_TOROUT_STRING(m); kthread_stop(*tp); *tp = NULL; } EXPORT_SYMBOL_GPL(_torture_stop_kthread);
gpl-2.0
SanziShi/KVMGT-kernel
drivers/s390/net/qeth_l3_sys.c
1008
27372
/* * Copyright IBM Corp. 2007 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, * Frank Pavlic <fpavlic@de.ibm.com>, * Thomas Spatzier <tspat@de.ibm.com>, * Frank Blaschka <frank.blaschka@de.ibm.com> */ #include <linux/slab.h> #include <asm/ebcdic.h> #include "qeth_l3.h" #define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \ struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store) static ssize_t qeth_l3_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route, char *buf) { switch (route->type) { case PRIMARY_ROUTER: return sprintf(buf, "%s\n", "primary router"); case SECONDARY_ROUTER: return sprintf(buf, "%s\n", "secondary router"); case MULTICAST_ROUTER: if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) return sprintf(buf, "%s\n", "multicast router+"); else return sprintf(buf, "%s\n", "multicast router"); case PRIMARY_CONNECTOR: if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) return sprintf(buf, "%s\n", "primary connector+"); else return sprintf(buf, "%s\n", "primary connector"); case SECONDARY_CONNECTOR: if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) return sprintf(buf, "%s\n", "secondary connector+"); else return sprintf(buf, "%s\n", "secondary connector"); default: return sprintf(buf, "%s\n", "no"); } } static ssize_t qeth_l3_dev_route4_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_route_show(card, &card->options.route4, buf); } static ssize_t qeth_l3_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route, enum qeth_prot_versions prot, const char *buf, size_t count) { enum qeth_routing_types old_route_type = route->type; char *tmp; int rc = 0; tmp = strsep((char **) &buf, "\n"); mutex_lock(&card->conf_mutex); if (!strcmp(tmp, "no_router")) { route->type = NO_ROUTER; } else if (!strcmp(tmp, "primary_connector")) { route->type = PRIMARY_CONNECTOR; } else if (!strcmp(tmp, "secondary_connector")) { route->type = SECONDARY_CONNECTOR; } else if (!strcmp(tmp, "primary_router")) { route->type = PRIMARY_ROUTER; } else if (!strcmp(tmp, "secondary_router")) { route->type = SECONDARY_ROUTER; } else if (!strcmp(tmp, "multicast_router")) { route->type = MULTICAST_ROUTER; } else { rc = -EINVAL; goto out; } if (((card->state == CARD_STATE_SOFTSETUP) || (card->state == CARD_STATE_UP)) && (old_route_type != route->type)) { if (prot == QETH_PROT_IPV4) rc = qeth_l3_setrouting_v4(card); else if (prot == QETH_PROT_IPV6) rc = qeth_l3_setrouting_v6(card); } out: if (rc) route->type = old_route_type; mutex_unlock(&card->conf_mutex); return rc ? rc : count; } static ssize_t qeth_l3_dev_route4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_route_store(card, &card->options.route4, QETH_PROT_IPV4, buf, count); } static DEVICE_ATTR(route4, 0644, qeth_l3_dev_route4_show, qeth_l3_dev_route4_store); static ssize_t qeth_l3_dev_route6_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_route_show(card, &card->options.route6, buf); } static ssize_t qeth_l3_dev_route6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_route_store(card, &card->options.route6, QETH_PROT_IPV6, buf, count); } static DEVICE_ATTR(route6, 0644, qeth_l3_dev_route6_show, qeth_l3_dev_route6_store); static ssize_t qeth_l3_dev_fake_broadcast_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0); } static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); char *tmp; int i, rc = 0; if (!card) return -EINVAL; mutex_lock(&card->conf_mutex); if ((card->state != CARD_STATE_DOWN) && (card->state != CARD_STATE_RECOVER)) { rc = -EPERM; goto out; } i = simple_strtoul(buf, &tmp, 16); if ((i == 0) || (i == 1)) card->options.fake_broadcast = i; else rc = -EINVAL; out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; } static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show, qeth_l3_dev_fake_broadcast_store); static ssize_t qeth_l3_dev_sniffer_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0); } static ssize_t qeth_l3_dev_sniffer_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); int rc = 0; unsigned long i; if (!card) return -EINVAL; if (card->info.type != QETH_CARD_TYPE_IQD) return -EPERM; if (card->options.cq == QETH_CQ_ENABLED) return -EPERM; mutex_lock(&card->conf_mutex); if ((card->state != CARD_STATE_DOWN) && (card->state != CARD_STATE_RECOVER)) { rc = -EPERM; goto out; } rc = kstrtoul(buf, 16, &i); if (rc) { rc = -EINVAL; goto out; } switch (i) { case 0: card->options.sniffer = i; break; case 1: qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd); if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) { card->options.sniffer = i; if (card->qdio.init_pool.buf_count != QETH_IN_BUF_COUNT_MAX) qeth_realloc_buffer_pool(card, QETH_IN_BUF_COUNT_MAX); } else rc = -EPERM; break; default: rc = -EINVAL; } out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; } static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show, qeth_l3_dev_sniffer_store); static ssize_t qeth_l3_dev_hsuid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); char tmp_hsuid[9]; if (!card) return -EINVAL; if (card->info.type != QETH_CARD_TYPE_IQD) return -EPERM; if (card->state == CARD_STATE_DOWN) return -EPERM; memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid)); EBCASC(tmp_hsuid, 8); return sprintf(buf, "%s\n", tmp_hsuid); } static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); struct qeth_ipaddr *addr; char *tmp; int i; if (!card) return -EINVAL; if (card->info.type != QETH_CARD_TYPE_IQD) return -EPERM; if (card->state != CARD_STATE_DOWN && card->state != CARD_STATE_RECOVER) return -EPERM; if (card->options.sniffer) return -EPERM; if (card->options.cq == QETH_CQ_NOTAVAILABLE) return -EPERM; tmp = strsep((char **)&buf, "\n"); if (strlen(tmp) > 8) return -EINVAL; if (card->options.hsuid[0]) { /* delete old ip address */ addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); if (addr != NULL) { addr->u.a6.addr.s6_addr32[0] = 0xfe800000; addr->u.a6.addr.s6_addr32[1] = 0x00000000; for (i = 8; i < 16; i++) addr->u.a6.addr.s6_addr[i] = card->options.hsuid[i - 8]; addr->u.a6.pfxlen = 0; addr->type = QETH_IP_TYPE_NORMAL; } else return -ENOMEM; if (!qeth_l3_delete_ip(card, addr)) kfree(addr); qeth_l3_set_ip_addr_list(card); } if (strlen(tmp) == 0) { /* delete ip address only */ card->options.hsuid[0] = '\0'; if (card->dev) memcpy(card->dev->perm_addr, card->options.hsuid, 9); qeth_configure_cq(card, QETH_CQ_DISABLED); return count; } if (qeth_configure_cq(card, QETH_CQ_ENABLED)) return -EPERM; snprintf(card->options.hsuid, sizeof(card->options.hsuid), "%-8s", tmp); ASCEBC(card->options.hsuid, 8); if (card->dev) memcpy(card->dev->perm_addr, card->options.hsuid, 9); addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); if (addr != NULL) { addr->u.a6.addr.s6_addr32[0] = 0xfe800000; addr->u.a6.addr.s6_addr32[1] = 0x00000000; for (i = 8; i < 16; i++) addr->u.a6.addr.s6_addr[i] = card->options.hsuid[i - 8]; addr->u.a6.pfxlen = 0; addr->type = QETH_IP_TYPE_NORMAL; } else return -ENOMEM; if (!qeth_l3_add_ip(card, addr)) kfree(addr); qeth_l3_set_ip_addr_list(card); return count; } static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show, qeth_l3_dev_hsuid_store); static struct attribute *qeth_l3_device_attrs[] = { &dev_attr_route4.attr, &dev_attr_route6.attr, &dev_attr_fake_broadcast.attr, &dev_attr_sniffer.attr, &dev_attr_hsuid.attr, NULL, }; static struct attribute_group qeth_l3_device_attr_group = { .attrs = qeth_l3_device_attrs, }; static ssize_t qeth_l3_dev_ipato_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return sprintf(buf, "%i\n", card->ipato.enabled? 1:0); } static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); struct qeth_ipaddr *tmpipa, *t; char *tmp; int rc = 0; if (!card) return -EINVAL; mutex_lock(&card->conf_mutex); if ((card->state != CARD_STATE_DOWN) && (card->state != CARD_STATE_RECOVER)) { rc = -EPERM; goto out; } tmp = strsep((char **) &buf, "\n"); if (!strcmp(tmp, "toggle")) { card->ipato.enabled = (card->ipato.enabled)? 0 : 1; } else if (!strcmp(tmp, "1")) { card->ipato.enabled = 1; list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) { if ((tmpipa->type == QETH_IP_TYPE_NORMAL) && qeth_l3_is_addr_covered_by_ipato(card, tmpipa)) tmpipa->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; } } else if (!strcmp(tmp, "0")) { card->ipato.enabled = 0; list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) { if (tmpipa->set_flags & QETH_IPA_SETIP_TAKEOVER_FLAG) tmpipa->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG; } } else rc = -EINVAL; out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; } static QETH_DEVICE_ATTR(ipato_enable, enable, 0644, qeth_l3_dev_ipato_enable_show, qeth_l3_dev_ipato_enable_store); static ssize_t qeth_l3_dev_ipato_invert4_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return sprintf(buf, "%i\n", card->ipato.invert4? 1:0); } static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); char *tmp; int rc = 0; if (!card) return -EINVAL; mutex_lock(&card->conf_mutex); tmp = strsep((char **) &buf, "\n"); if (!strcmp(tmp, "toggle")) { card->ipato.invert4 = (card->ipato.invert4)? 0 : 1; } else if (!strcmp(tmp, "1")) { card->ipato.invert4 = 1; } else if (!strcmp(tmp, "0")) { card->ipato.invert4 = 0; } else rc = -EINVAL; mutex_unlock(&card->conf_mutex); return rc ? rc : count; } static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644, qeth_l3_dev_ipato_invert4_show, qeth_l3_dev_ipato_invert4_store); static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { struct qeth_ipato_entry *ipatoe; unsigned long flags; char addr_str[40]; int entry_len; /* length of 1 entry string, differs between v4 and v6 */ int i = 0; entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; /* add strlen for "/<mask>\n" */ entry_len += (proto == QETH_PROT_IPV4)? 5 : 6; spin_lock_irqsave(&card->ip_lock, flags); list_for_each_entry(ipatoe, &card->ipato.entries, entry) { if (ipatoe->proto != proto) continue; /* String must not be longer than PAGE_SIZE. So we check if * string length gets near PAGE_SIZE. Then we can savely display * the next IPv6 address (worst case, compared to IPv4) */ if ((PAGE_SIZE - i) <= entry_len) break; qeth_l3_ipaddr_to_string(proto, ipatoe->addr, addr_str); i += snprintf(buf + i, PAGE_SIZE - i, "%s/%i\n", addr_str, ipatoe->mask_bits); } spin_unlock_irqrestore(&card->ip_lock, flags); i += snprintf(buf + i, PAGE_SIZE - i, "\n"); return i; } static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV4); } static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto, u8 *addr, int *mask_bits) { const char *start, *end; char *tmp; char buffer[40] = {0, }; start = buf; /* get address string */ end = strchr(start, '/'); if (!end || (end - start >= 40)) { return -EINVAL; } strncpy(buffer, start, end - start); if (qeth_l3_string_to_ipaddr(buffer, proto, addr)) { return -EINVAL; } start = end + 1; *mask_bits = simple_strtoul(start, &tmp, 10); if (!strlen(start) || (tmp == start) || (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) { return -EINVAL; } return 0; } static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count, struct qeth_card *card, enum qeth_prot_versions proto) { struct qeth_ipato_entry *ipatoe; u8 addr[16]; int mask_bits; int rc = 0; mutex_lock(&card->conf_mutex); rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits); if (rc) goto out; ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL); if (!ipatoe) { rc = -ENOMEM; goto out; } ipatoe->proto = proto; memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16); ipatoe->mask_bits = mask_bits; rc = qeth_l3_add_ipato_entry(card, ipatoe); if (rc) kfree(ipatoe); out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; } static ssize_t qeth_l3_dev_ipato_add4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4); } static QETH_DEVICE_ATTR(ipato_add4, add4, 0644, qeth_l3_dev_ipato_add4_show, qeth_l3_dev_ipato_add4_store); static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count, struct qeth_card *card, enum qeth_prot_versions proto) { u8 addr[16]; int mask_bits; int rc = 0; mutex_lock(&card->conf_mutex); rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits); if (!rc) qeth_l3_del_ipato_entry(card, proto, addr, mask_bits); mutex_unlock(&card->conf_mutex); return rc ? rc : count; } static ssize_t qeth_l3_dev_ipato_del4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4); } static QETH_DEVICE_ATTR(ipato_del4, del4, 0200, NULL, qeth_l3_dev_ipato_del4_store); static ssize_t qeth_l3_dev_ipato_invert6_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return sprintf(buf, "%i\n", card->ipato.invert6? 1:0); } static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); char *tmp; int rc = 0; if (!card) return -EINVAL; mutex_lock(&card->conf_mutex); tmp = strsep((char **) &buf, "\n"); if (!strcmp(tmp, "toggle")) { card->ipato.invert6 = (card->ipato.invert6)? 0 : 1; } else if (!strcmp(tmp, "1")) { card->ipato.invert6 = 1; } else if (!strcmp(tmp, "0")) { card->ipato.invert6 = 0; } else rc = -EINVAL; mutex_unlock(&card->conf_mutex); return rc ? rc : count; } static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644, qeth_l3_dev_ipato_invert6_show, qeth_l3_dev_ipato_invert6_store); static ssize_t qeth_l3_dev_ipato_add6_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV6); } static ssize_t qeth_l3_dev_ipato_add6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6); } static QETH_DEVICE_ATTR(ipato_add6, add6, 0644, qeth_l3_dev_ipato_add6_show, qeth_l3_dev_ipato_add6_store); static ssize_t qeth_l3_dev_ipato_del6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6); } static QETH_DEVICE_ATTR(ipato_del6, del6, 0200, NULL, qeth_l3_dev_ipato_del6_store); static struct attribute *qeth_ipato_device_attrs[] = { &dev_attr_ipato_enable.attr, &dev_attr_ipato_invert4.attr, &dev_attr_ipato_add4.attr, &dev_attr_ipato_del4.attr, &dev_attr_ipato_invert6.attr, &dev_attr_ipato_add6.attr, &dev_attr_ipato_del6.attr, NULL, }; static struct attribute_group qeth_device_ipato_group = { .name = "ipa_takeover", .attrs = qeth_ipato_device_attrs, }; static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { struct qeth_ipaddr *ipaddr; char addr_str[40]; int entry_len; /* length of 1 entry string, differs between v4 and v6 */ unsigned long flags; int i = 0; entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; entry_len += 2; /* \n + terminator */ spin_lock_irqsave(&card->ip_lock, flags); list_for_each_entry(ipaddr, &card->ip_list, entry) { if (ipaddr->proto != proto) continue; if (ipaddr->type != QETH_IP_TYPE_VIPA) continue; /* String must not be longer than PAGE_SIZE. So we check if * string length gets near PAGE_SIZE. Then we can savely display * the next IPv6 address (worst case, compared to IPv4) */ if ((PAGE_SIZE - i) <= entry_len) break; qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str); i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str); } spin_unlock_irqrestore(&card->ip_lock, flags); i += snprintf(buf + i, PAGE_SIZE - i, "\n"); return i; } static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV4); } static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto, u8 *addr) { if (qeth_l3_string_to_ipaddr(buf, proto, addr)) { return -EINVAL; } return 0; } static ssize_t qeth_l3_dev_vipa_add_store(const char *buf, size_t count, struct qeth_card *card, enum qeth_prot_versions proto) { u8 addr[16] = {0, }; int rc; mutex_lock(&card->conf_mutex); rc = qeth_l3_parse_vipae(buf, proto, addr); if (!rc) rc = qeth_l3_add_vipa(card, proto, addr); mutex_unlock(&card->conf_mutex); return rc ? rc : count; } static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4); } static QETH_DEVICE_ATTR(vipa_add4, add4, 0644, qeth_l3_dev_vipa_add4_show, qeth_l3_dev_vipa_add4_store); static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count, struct qeth_card *card, enum qeth_prot_versions proto) { u8 addr[16]; int rc; mutex_lock(&card->conf_mutex); rc = qeth_l3_parse_vipae(buf, proto, addr); if (!rc) qeth_l3_del_vipa(card, proto, addr); mutex_unlock(&card->conf_mutex); return rc ? rc : count; } static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4); } static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL, qeth_l3_dev_vipa_del4_store); static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV6); } static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6); } static QETH_DEVICE_ATTR(vipa_add6, add6, 0644, qeth_l3_dev_vipa_add6_show, qeth_l3_dev_vipa_add6_store); static ssize_t qeth_l3_dev_vipa_del6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6); } static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL, qeth_l3_dev_vipa_del6_store); static struct attribute *qeth_vipa_device_attrs[] = { &dev_attr_vipa_add4.attr, &dev_attr_vipa_del4.attr, &dev_attr_vipa_add6.attr, &dev_attr_vipa_del6.attr, NULL, }; static struct attribute_group qeth_device_vipa_group = { .name = "vipa", .attrs = qeth_vipa_device_attrs, }; static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { struct qeth_ipaddr *ipaddr; char addr_str[40]; int entry_len; /* length of 1 entry string, differs between v4 and v6 */ unsigned long flags; int i = 0; entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; entry_len += 2; /* \n + terminator */ spin_lock_irqsave(&card->ip_lock, flags); list_for_each_entry(ipaddr, &card->ip_list, entry) { if (ipaddr->proto != proto) continue; if (ipaddr->type != QETH_IP_TYPE_RXIP) continue; /* String must not be longer than PAGE_SIZE. So we check if * string length gets near PAGE_SIZE. Then we can savely display * the next IPv6 address (worst case, compared to IPv4) */ if ((PAGE_SIZE - i) <= entry_len) break; qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str); i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str); } spin_unlock_irqrestore(&card->ip_lock, flags); i += snprintf(buf + i, PAGE_SIZE - i, "\n"); return i; } static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV4); } static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto, u8 *addr) { if (qeth_l3_string_to_ipaddr(buf, proto, addr)) { return -EINVAL; } return 0; } static ssize_t qeth_l3_dev_rxip_add_store(const char *buf, size_t count, struct qeth_card *card, enum qeth_prot_versions proto) { u8 addr[16] = {0, }; int rc; mutex_lock(&card->conf_mutex); rc = qeth_l3_parse_rxipe(buf, proto, addr); if (!rc) rc = qeth_l3_add_rxip(card, proto, addr); mutex_unlock(&card->conf_mutex); return rc ? rc : count; } static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4); } static QETH_DEVICE_ATTR(rxip_add4, add4, 0644, qeth_l3_dev_rxip_add4_show, qeth_l3_dev_rxip_add4_store); static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count, struct qeth_card *card, enum qeth_prot_versions proto) { u8 addr[16]; int rc; mutex_lock(&card->conf_mutex); rc = qeth_l3_parse_rxipe(buf, proto, addr); if (!rc) qeth_l3_del_rxip(card, proto, addr); mutex_unlock(&card->conf_mutex); return rc ? rc : count; } static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4); } static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL, qeth_l3_dev_rxip_del4_store); static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV6); } static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6); } static QETH_DEVICE_ATTR(rxip_add6, add6, 0644, qeth_l3_dev_rxip_add6_show, qeth_l3_dev_rxip_add6_store); static ssize_t qeth_l3_dev_rxip_del6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); if (!card) return -EINVAL; return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6); } static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL, qeth_l3_dev_rxip_del6_store); static struct attribute *qeth_rxip_device_attrs[] = { &dev_attr_rxip_add4.attr, &dev_attr_rxip_del4.attr, &dev_attr_rxip_add6.attr, &dev_attr_rxip_del6.attr, NULL, }; static struct attribute_group qeth_device_rxip_group = { .name = "rxip", .attrs = qeth_rxip_device_attrs, }; int qeth_l3_create_device_attributes(struct device *dev) { int ret; ret = sysfs_create_group(&dev->kobj, &qeth_l3_device_attr_group); if (ret) return ret; ret = sysfs_create_group(&dev->kobj, &qeth_device_ipato_group); if (ret) { sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group); return ret; } ret = sysfs_create_group(&dev->kobj, &qeth_device_vipa_group); if (ret) { sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group); sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group); return ret; } ret = sysfs_create_group(&dev->kobj, &qeth_device_rxip_group); if (ret) { sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group); sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group); sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group); return ret; } return 0; } void qeth_l3_remove_device_attributes(struct device *dev) { sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group); sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group); sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group); sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group); }
gpl-2.0
adhi1419/MSM7627A
drivers/cpuidle/sysfs.c
1008
9871
/* * sysfs.c - sysfs support * * (C) 2006-2007 Shaohua Li <shaohua.li@intel.com> * * This code is licenced under the GPL. */ #include <linux/kernel.h> #include <linux/cpuidle.h> #include <linux/sysfs.h> #include <linux/slab.h> #include <linux/cpu.h> #include "cpuidle.h" static unsigned int sysfs_switch; static int __init cpuidle_sysfs_setup(char *unused) { sysfs_switch = 1; return 1; } __setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup); static ssize_t show_available_governors(struct sysdev_class *class, struct sysdev_class_attribute *attr, char *buf) { ssize_t i = 0; struct cpuidle_governor *tmp; mutex_lock(&cpuidle_lock); list_for_each_entry(tmp, &cpuidle_governors, governor_list) { if (i >= (ssize_t) ((PAGE_SIZE/sizeof(char)) - CPUIDLE_NAME_LEN - 2)) goto out; i += scnprintf(&buf[i], CPUIDLE_NAME_LEN, "%s ", tmp->name); } out: i+= sprintf(&buf[i], "\n"); mutex_unlock(&cpuidle_lock); return i; } static ssize_t show_current_driver(struct sysdev_class *class, struct sysdev_class_attribute *attr, char *buf) { ssize_t ret; struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); spin_lock(&cpuidle_driver_lock); if (cpuidle_driver) ret = sprintf(buf, "%s\n", cpuidle_driver->name); else ret = sprintf(buf, "none\n"); spin_unlock(&cpuidle_driver_lock); return ret; } static ssize_t show_current_governor(struct sysdev_class *class, struct sysdev_class_attribute *attr, char *buf) { ssize_t ret; mutex_lock(&cpuidle_lock); if (cpuidle_curr_governor) ret = sprintf(buf, "%s\n", cpuidle_curr_governor->name); else ret = sprintf(buf, "none\n"); mutex_unlock(&cpuidle_lock); return ret; } static ssize_t store_current_governor(struct sysdev_class *class, struct sysdev_class_attribute *attr, const char *buf, size_t count) { char gov_name[CPUIDLE_NAME_LEN]; int ret = -EINVAL; size_t len = count; struct cpuidle_governor *gov; if (!len || len >= sizeof(gov_name)) return -EINVAL; memcpy(gov_name, buf, len); gov_name[len] = '\0'; if (gov_name[len - 1] == '\n') gov_name[--len] = '\0'; mutex_lock(&cpuidle_lock); list_for_each_entry(gov, &cpuidle_governors, governor_list) { if (strlen(gov->name) == len && !strcmp(gov->name, gov_name)) { ret = cpuidle_switch_governor(gov); break; } } mutex_unlock(&cpuidle_lock); if (ret) return ret; else return count; } static SYSDEV_CLASS_ATTR(current_driver, 0444, show_current_driver, NULL); static SYSDEV_CLASS_ATTR(current_governor_ro, 0444, show_current_governor, NULL); static struct attribute *cpuclass_default_attrs[] = { &attr_current_driver.attr, &attr_current_governor_ro.attr, NULL }; static SYSDEV_CLASS_ATTR(available_governors, 0444, show_available_governors, NULL); static SYSDEV_CLASS_ATTR(current_governor, 0644, show_current_governor, store_current_governor); static struct attribute *cpuclass_switch_attrs[] = { &attr_available_governors.attr, &attr_current_driver.attr, &attr_current_governor.attr, NULL }; static struct attribute_group cpuclass_attr_group = { .attrs = cpuclass_default_attrs, .name = "cpuidle", }; /** * cpuidle_add_class_sysfs - add CPU global sysfs attributes */ int cpuidle_add_class_sysfs(struct sysdev_class *cls) { if (sysfs_switch) cpuclass_attr_group.attrs = cpuclass_switch_attrs; return sysfs_create_group(&cls->kset.kobj, &cpuclass_attr_group); } /** * cpuidle_remove_class_sysfs - remove CPU global sysfs attributes */ void cpuidle_remove_class_sysfs(struct sysdev_class *cls) { sysfs_remove_group(&cls->kset.kobj, &cpuclass_attr_group); } struct cpuidle_attr { struct attribute attr; ssize_t (*show)(struct cpuidle_device *, char *); ssize_t (*store)(struct cpuidle_device *, const char *, size_t count); }; #define define_one_ro(_name, show) \ static struct cpuidle_attr attr_##_name = __ATTR(_name, 0444, show, NULL) #define define_one_rw(_name, show, store) \ static struct cpuidle_attr attr_##_name = __ATTR(_name, 0644, show, store) #define kobj_to_cpuidledev(k) container_of(k, struct cpuidle_device, kobj) #define attr_to_cpuidleattr(a) container_of(a, struct cpuidle_attr, attr) static ssize_t cpuidle_show(struct kobject * kobj, struct attribute * attr ,char * buf) { int ret = -EIO; struct cpuidle_device *dev = kobj_to_cpuidledev(kobj); struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr); if (cattr->show) { mutex_lock(&cpuidle_lock); ret = cattr->show(dev, buf); mutex_unlock(&cpuidle_lock); } return ret; } static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr, const char * buf, size_t count) { int ret = -EIO; struct cpuidle_device *dev = kobj_to_cpuidledev(kobj); struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr); if (cattr->store) { mutex_lock(&cpuidle_lock); ret = cattr->store(dev, buf, count); mutex_unlock(&cpuidle_lock); } return ret; } static const struct sysfs_ops cpuidle_sysfs_ops = { .show = cpuidle_show, .store = cpuidle_store, }; static void cpuidle_sysfs_release(struct kobject *kobj) { struct cpuidle_device *dev = kobj_to_cpuidledev(kobj); complete(&dev->kobj_unregister); } static struct kobj_type ktype_cpuidle = { .sysfs_ops = &cpuidle_sysfs_ops, .release = cpuidle_sysfs_release, }; struct cpuidle_state_attr { struct attribute attr; ssize_t (*show)(struct cpuidle_state *, char *); ssize_t (*store)(struct cpuidle_state *, const char *, size_t); }; #define define_one_state_ro(_name, show) \ static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL) #define define_show_state_function(_name) \ static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ { \ return sprintf(buf, "%u\n", state->_name);\ } #define define_show_state_ull_function(_name) \ static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ { \ return sprintf(buf, "%llu\n", state->_name);\ } #define define_show_state_str_function(_name) \ static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ { \ if (state->_name[0] == '\0')\ return sprintf(buf, "<null>\n");\ return sprintf(buf, "%s\n", state->_name);\ } define_show_state_function(exit_latency) define_show_state_function(power_usage) define_show_state_ull_function(usage) define_show_state_ull_function(time) define_show_state_str_function(name) define_show_state_str_function(desc) define_one_state_ro(name, show_state_name); define_one_state_ro(desc, show_state_desc); define_one_state_ro(latency, show_state_exit_latency); define_one_state_ro(power, show_state_power_usage); define_one_state_ro(usage, show_state_usage); define_one_state_ro(time, show_state_time); static struct attribute *cpuidle_state_default_attrs[] = { &attr_name.attr, &attr_desc.attr, &attr_latency.attr, &attr_power.attr, &attr_usage.attr, &attr_time.attr, NULL }; #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) #define kobj_to_state(k) (kobj_to_state_obj(k)->state) #define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr) static ssize_t cpuidle_state_show(struct kobject * kobj, struct attribute * attr ,char * buf) { int ret = -EIO; struct cpuidle_state *state = kobj_to_state(kobj); struct cpuidle_state_attr * cattr = attr_to_stateattr(attr); if (cattr->show) ret = cattr->show(state, buf); return ret; } static const struct sysfs_ops cpuidle_state_sysfs_ops = { .show = cpuidle_state_show, }; static void cpuidle_state_sysfs_release(struct kobject *kobj) { struct cpuidle_state_kobj *state_obj = kobj_to_state_obj(kobj); complete(&state_obj->kobj_unregister); } static struct kobj_type ktype_state_cpuidle = { .sysfs_ops = &cpuidle_state_sysfs_ops, .default_attrs = cpuidle_state_default_attrs, .release = cpuidle_state_sysfs_release, }; static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i) { kobject_put(&device->kobjs[i]->kobj); wait_for_completion(&device->kobjs[i]->kobj_unregister); kfree(device->kobjs[i]); device->kobjs[i] = NULL; } /** * cpuidle_add_driver_sysfs - adds driver-specific sysfs attributes * @device: the target device */ int cpuidle_add_state_sysfs(struct cpuidle_device *device) { int i, ret = -ENOMEM; struct cpuidle_state_kobj *kobj; /* state statistics */ for (i = 0; i < device->state_count; i++) { kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); if (!kobj) goto error_state; kobj->state = &device->states[i]; init_completion(&kobj->kobj_unregister); ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj, "state%d", i); if (ret) { kfree(kobj); goto error_state; } kobject_uevent(&kobj->kobj, KOBJ_ADD); device->kobjs[i] = kobj; } return 0; error_state: for (i = i - 1; i >= 0; i--) cpuidle_free_state_kobj(device, i); return ret; } /** * cpuidle_remove_driver_sysfs - removes driver-specific sysfs attributes * @device: the target device */ void cpuidle_remove_state_sysfs(struct cpuidle_device *device) { int i; for (i = 0; i < device->state_count; i++) cpuidle_free_state_kobj(device, i); } /** * cpuidle_add_sysfs - creates a sysfs instance for the target device * @sysdev: the target device */ int cpuidle_add_sysfs(struct sys_device *sysdev) { int cpu = sysdev->id; struct cpuidle_device *dev; int error; dev = per_cpu(cpuidle_devices, cpu); error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &sysdev->kobj, "cpuidle"); if (!error) kobject_uevent(&dev->kobj, KOBJ_ADD); return error; } /** * cpuidle_remove_sysfs - deletes a sysfs instance on the target device * @sysdev: the target device */ void cpuidle_remove_sysfs(struct sys_device *sysdev) { int cpu = sysdev->id; struct cpuidle_device *dev; dev = per_cpu(cpuidle_devices, cpu); kobject_put(&dev->kobj); }
gpl-2.0
Austinpb/linux
mm/debug-pagealloc.c
1008
2711
#include <linux/kernel.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/page_ext.h> #include <linux/poison.h> #include <linux/ratelimit.h> static bool page_poisoning_enabled __read_mostly; static bool need_page_poisoning(void) { if (!debug_pagealloc_enabled()) return false; return true; } static void init_page_poisoning(void) { if (!debug_pagealloc_enabled()) return; page_poisoning_enabled = true; } struct page_ext_operations page_poisoning_ops = { .need = need_page_poisoning, .init = init_page_poisoning, }; static inline void set_page_poison(struct page *page) { struct page_ext *page_ext; page_ext = lookup_page_ext(page); __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); } static inline void clear_page_poison(struct page *page) { struct page_ext *page_ext; page_ext = lookup_page_ext(page); __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); } static inline bool page_poison(struct page *page) { struct page_ext *page_ext; page_ext = lookup_page_ext(page); return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); } static void poison_page(struct page *page) { void *addr = kmap_atomic(page); set_page_poison(page); memset(addr, PAGE_POISON, PAGE_SIZE); kunmap_atomic(addr); } static void poison_pages(struct page *page, int n) { int i; for (i = 0; i < n; i++) poison_page(page + i); } static bool single_bit_flip(unsigned char a, unsigned char b) { unsigned char error = a ^ b; return error && !(error & (error - 1)); } static void check_poison_mem(unsigned char *mem, size_t bytes) { static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10); unsigned char *start; unsigned char *end; start = memchr_inv(mem, PAGE_POISON, bytes); if (!start) return; for (end = mem + bytes - 1; end > start; end--) { if (*end != PAGE_POISON) break; } if (!__ratelimit(&ratelimit)) return; else if (start == end && single_bit_flip(*start, PAGE_POISON)) printk(KERN_ERR "pagealloc: single bit error\n"); else printk(KERN_ERR "pagealloc: memory corruption\n"); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start, end - start + 1, 1); dump_stack(); } static void unpoison_page(struct page *page) { void *addr; if (!page_poison(page)) return; addr = kmap_atomic(page); check_poison_mem(addr, PAGE_SIZE); clear_page_poison(page); kunmap_atomic(addr); } static void unpoison_pages(struct page *page, int n) { int i; for (i = 0; i < n; i++) unpoison_page(page + i); } void __kernel_map_pages(struct page *page, int numpages, int enable) { if (!page_poisoning_enabled) return; if (enable) unpoison_pages(page, numpages); else poison_pages(page, numpages); }
gpl-2.0
partner-seco/linux_SBC
kernel/sched_idletask.c
2800
2135
/* * idle-task scheduling class. * * (NOTE: these are not related to SCHED_IDLE tasks which are * handled in sched_fair.c) */ #ifdef CONFIG_SMP static int select_task_rq_idle(struct task_struct *p, int sd_flag, int flags) { return task_cpu(p); /* IDLE tasks as never migrated */ } #endif /* CONFIG_SMP */ /* * Idle tasks are unconditionally rescheduled: */ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) { resched_task(rq->idle); } static struct task_struct *pick_next_task_idle(struct rq *rq) { schedstat_inc(rq, sched_goidle); calc_load_account_idle(rq); return rq->idle; } /* * It is not legal to sleep in the idle task - print a warning * message if some code attempts to do it: */ static void dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) { raw_spin_unlock_irq(&rq->lock); printk(KERN_ERR "bad: scheduling from the idle thread!\n"); dump_stack(); raw_spin_lock_irq(&rq->lock); } static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) { } static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) { } static void set_curr_task_idle(struct rq *rq) { } static void switched_to_idle(struct rq *rq, struct task_struct *p) { BUG(); } static void prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio) { BUG(); } static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) { return 0; } /* * Simple, special scheduling class for the per-CPU idle tasks: */ static const struct sched_class idle_sched_class = { /* .next is NULL */ /* no enqueue/yield_task for idle tasks */ /* dequeue is not valid, we print a debug message there: */ .dequeue_task = dequeue_task_idle, .check_preempt_curr = check_preempt_curr_idle, .pick_next_task = pick_next_task_idle, .put_prev_task = put_prev_task_idle, #ifdef CONFIG_SMP .select_task_rq = select_task_rq_idle, #endif .set_curr_task = set_curr_task_idle, .task_tick = task_tick_idle, .get_rr_interval = get_rr_interval_idle, .prio_changed = prio_changed_idle, .switched_to = switched_to_idle, };
gpl-2.0
sudosurootdev/kernel_lge_msm8974
drivers/usb/serial/io_ti.c
3312
77510
/* * Edgeport USB Serial Converter driver * * Copyright (C) 2000-2002 Inside Out Networks, All rights reserved. * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Supports the following devices: * EP/1 EP/2 EP/4 EP/21 EP/22 EP/221 EP/42 EP/421 WATCHPORT * * For questions or problems with this driver, contact Inside Out * Networks technical support, or Peter Berger <pberger@brimson.com>, * or Al Borchers <alborchers@steinerpoint.com>. */ #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/serial.h> #include <linux/kfifo.h> #include <linux/ioctl.h> #include <linux/firmware.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include "io_16654.h" #include "io_usbvend.h" #include "io_ti.h" /* * Version Information */ #define DRIVER_VERSION "v0.7mode043006" #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com> and David Iacovelli" #define DRIVER_DESC "Edgeport USB Serial Driver" #define EPROM_PAGE_SIZE 64 /* different hardware types */ #define HARDWARE_TYPE_930 0 #define HARDWARE_TYPE_TIUMP 1 /* IOCTL_PRIVATE_TI_GET_MODE Definitions */ #define TI_MODE_CONFIGURING 0 /* Device has not entered start device */ #define TI_MODE_BOOT 1 /* Staying in boot mode */ #define TI_MODE_DOWNLOAD 2 /* Made it to download mode */ #define TI_MODE_TRANSITIONING 3 /* Currently in boot mode but transitioning to download mode */ /* read urb state */ #define EDGE_READ_URB_RUNNING 0 #define EDGE_READ_URB_STOPPING 1 #define EDGE_READ_URB_STOPPED 2 #define EDGE_CLOSING_WAIT 4000 /* in .01 sec */ #define EDGE_OUT_BUF_SIZE 1024 /* Product information read from the Edgeport */ struct product_info { int TiMode; /* Current TI Mode */ __u8 hardware_type; /* Type of hardware */ } __attribute__((packed)); struct edgeport_port { __u16 uart_base; __u16 dma_address; __u8 shadow_msr; __u8 shadow_mcr; __u8 shadow_lsr; __u8 lsr_mask; __u32 ump_read_timeout; /* Number of milliseconds the UMP will wait without data before completing a read short */ int baud_rate; int close_pending; int lsr_event; struct async_icount icount; wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ struct edgeport_serial *edge_serial; struct usb_serial_port *port; __u8 bUartMode; /* Port type, 0: RS232, etc. */ spinlock_t ep_lock; int ep_read_urb_state; int ep_write_urb_in_use; struct kfifo write_fifo; }; struct edgeport_serial { struct product_info product_info; u8 TI_I2C_Type; /* Type of I2C in UMP */ u8 TiReadI2C; /* Set to TRUE if we have read the I2c in Boot Mode */ struct mutex es_lock; int num_ports_open; struct usb_serial *serial; }; /* Devices that this driver supports */ static const struct usb_device_id edgeport_1port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROXIMITY) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOTION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOISTURE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_TEMPERATURE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_HUMIDITY) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_POWER) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_LIGHT) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_RADIATION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_DISTANCE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_ACCELERATION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROX_DIST) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_HP4CD) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_PCI) }, { } }; static const struct usb_device_id edgeport_2port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_421) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_42) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_221C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21C) }, /* The 4, 8 and 16 port devices show up as multiple 2 port devices */ { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) }, { } }; /* Devices that this driver supports */ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROXIMITY) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOTION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOISTURE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_TEMPERATURE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_HUMIDITY) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_POWER) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_LIGHT) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_RADIATION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_DISTANCE) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_ACCELERATION) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROX_DIST) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_HP4CD) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_PCI) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_421) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_42) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_221C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) }, { } }; MODULE_DEVICE_TABLE(usb, id_table_combined); static struct usb_driver io_driver = { .name = "io_ti", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, .id_table = id_table_combined, }; static unsigned char OperationalMajorVersion; static unsigned char OperationalMinorVersion; static unsigned short OperationalBuildNumber; static bool debug; static int closing_wait = EDGE_CLOSING_WAIT; static bool ignore_cpu_rev; static int default_uart_mode; /* RS232 */ static void edge_tty_recv(struct device *dev, struct tty_struct *tty, unsigned char *data, int length); static void stop_read(struct edgeport_port *edge_port); static int restart_read(struct edgeport_port *edge_port); static void edge_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios); static void edge_send(struct tty_struct *tty); /* sysfs attributes */ static int edge_create_sysfs_attrs(struct usb_serial_port *port); static int edge_remove_sysfs_attrs(struct usb_serial_port *port); static int ti_vread_sync(struct usb_device *dev, __u8 request, __u16 value, __u16 index, u8 *data, int size) { int status; status = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, (USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN), value, index, data, size, 1000); if (status < 0) return status; if (status != size) { dbg("%s - wanted to write %d, but only wrote %d", __func__, size, status); return -ECOMM; } return 0; } static int ti_vsend_sync(struct usb_device *dev, __u8 request, __u16 value, __u16 index, u8 *data, int size) { int status; status = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request, (USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT), value, index, data, size, 1000); if (status < 0) return status; if (status != size) { dbg("%s - wanted to write %d, but only wrote %d", __func__, size, status); return -ECOMM; } return 0; } static int send_cmd(struct usb_device *dev, __u8 command, __u8 moduleid, __u16 value, u8 *data, int size) { return ti_vsend_sync(dev, command, value, moduleid, data, size); } /* clear tx/rx buffers and fifo in TI UMP */ static int purge_port(struct usb_serial_port *port, __u16 mask) { int port_number = port->number - port->serial->minor; dbg("%s - port %d, mask %x", __func__, port_number, mask); return send_cmd(port->serial->dev, UMPC_PURGE_PORT, (__u8)(UMPM_UART1_PORT + port_number), mask, NULL, 0); } /** * read_download_mem - Read edgeport memory from TI chip * @dev: usb device pointer * @start_address: Device CPU address at which to read * @length: Length of above data * @address_type: Can read both XDATA and I2C * @buffer: pointer to input data buffer */ static int read_download_mem(struct usb_device *dev, int start_address, int length, __u8 address_type, __u8 *buffer) { int status = 0; __u8 read_length; __be16 be_start_address; dbg("%s - @ %x for %d", __func__, start_address, length); /* Read in blocks of 64 bytes * (TI firmware can't handle more than 64 byte reads) */ while (length) { if (length > 64) read_length = 64; else read_length = (__u8)length; if (read_length > 1) { dbg("%s - @ %x for %d", __func__, start_address, read_length); } be_start_address = cpu_to_be16(start_address); status = ti_vread_sync(dev, UMPC_MEMORY_READ, (__u16)address_type, (__force __u16)be_start_address, buffer, read_length); if (status) { dbg("%s - ERROR %x", __func__, status); return status; } if (read_length > 1) usb_serial_debug_data(debug, &dev->dev, __func__, read_length, buffer); /* Update pointers/length */ start_address += read_length; buffer += read_length; length -= read_length; } return status; } static int read_ram(struct usb_device *dev, int start_address, int length, __u8 *buffer) { return read_download_mem(dev, start_address, length, DTK_ADDR_SPACE_XDATA, buffer); } /* Read edgeport memory to a given block */ static int read_boot_mem(struct edgeport_serial *serial, int start_address, int length, __u8 *buffer) { int status = 0; int i; for (i = 0; i < length; i++) { status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ, serial->TI_I2C_Type, (__u16)(start_address+i), &buffer[i], 0x01); if (status) { dbg("%s - ERROR %x", __func__, status); return status; } } dbg("%s - start_address = %x, length = %d", __func__, start_address, length); usb_serial_debug_data(debug, &serial->serial->dev->dev, __func__, length, buffer); serial->TiReadI2C = 1; return status; } /* Write given block to TI EPROM memory */ static int write_boot_mem(struct edgeport_serial *serial, int start_address, int length, __u8 *buffer) { int status = 0; int i; u8 *temp; /* Must do a read before write */ if (!serial->TiReadI2C) { temp = kmalloc(1, GFP_KERNEL); if (!temp) { dev_err(&serial->serial->dev->dev, "%s - out of memory\n", __func__); return -ENOMEM; } status = read_boot_mem(serial, 0, 1, temp); kfree(temp); if (status) return status; } for (i = 0; i < length; ++i) { status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE, buffer[i], (__u16)(i + start_address), NULL, 0); if (status) return status; } dbg("%s - start_sddr = %x, length = %d", __func__, start_address, length); usb_serial_debug_data(debug, &serial->serial->dev->dev, __func__, length, buffer); return status; } /* Write edgeport I2C memory to TI chip */ static int write_i2c_mem(struct edgeport_serial *serial, int start_address, int length, __u8 address_type, __u8 *buffer) { int status = 0; int write_length; __be16 be_start_address; /* We can only send a maximum of 1 aligned byte page at a time */ /* calculate the number of bytes left in the first page */ write_length = EPROM_PAGE_SIZE - (start_address & (EPROM_PAGE_SIZE - 1)); if (write_length > length) write_length = length; dbg("%s - BytesInFirstPage Addr = %x, length = %d", __func__, start_address, write_length); usb_serial_debug_data(debug, &serial->serial->dev->dev, __func__, write_length, buffer); /* Write first page */ be_start_address = cpu_to_be16(start_address); status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE, (__u16)address_type, (__force __u16)be_start_address, buffer, write_length); if (status) { dbg("%s - ERROR %d", __func__, status); return status; } length -= write_length; start_address += write_length; buffer += write_length; /* We should be aligned now -- can write max page size bytes at a time */ while (length) { if (length > EPROM_PAGE_SIZE) write_length = EPROM_PAGE_SIZE; else write_length = length; dbg("%s - Page Write Addr = %x, length = %d", __func__, start_address, write_length); usb_serial_debug_data(debug, &serial->serial->dev->dev, __func__, write_length, buffer); /* Write next page */ be_start_address = cpu_to_be16(start_address); status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE, (__u16)address_type, (__force __u16)be_start_address, buffer, write_length); if (status) { dev_err(&serial->serial->dev->dev, "%s - ERROR %d\n", __func__, status); return status; } length -= write_length; start_address += write_length; buffer += write_length; } return status; } /* Examine the UMP DMA registers and LSR * * Check the MSBit of the X and Y DMA byte count registers. * A zero in this bit indicates that the TX DMA buffers are empty * then check the TX Empty bit in the UART. */ static int tx_active(struct edgeport_port *port) { int status; struct out_endpoint_desc_block *oedb; __u8 *lsr; int bytes_left = 0; oedb = kmalloc(sizeof(*oedb), GFP_KERNEL); if (!oedb) { dev_err(&port->port->dev, "%s - out of memory\n", __func__); return -ENOMEM; } lsr = kmalloc(1, GFP_KERNEL); /* Sigh, that's right, just one byte, as not all platforms can do DMA from stack */ if (!lsr) { kfree(oedb); return -ENOMEM; } /* Read the DMA Count Registers */ status = read_ram(port->port->serial->dev, port->dma_address, sizeof(*oedb), (void *)oedb); if (status) goto exit_is_tx_active; dbg("%s - XByteCount 0x%X", __func__, oedb->XByteCount); /* and the LSR */ status = read_ram(port->port->serial->dev, port->uart_base + UMPMEM_OFFS_UART_LSR, 1, lsr); if (status) goto exit_is_tx_active; dbg("%s - LSR = 0x%X", __func__, *lsr); /* If either buffer has data or we are transmitting then return TRUE */ if ((oedb->XByteCount & 0x80) != 0) bytes_left += 64; if ((*lsr & UMP_UART_LSR_TX_MASK) == 0) bytes_left += 1; /* We return Not Active if we get any kind of error */ exit_is_tx_active: dbg("%s - return %d", __func__, bytes_left); kfree(lsr); kfree(oedb); return bytes_left; } static void chase_port(struct edgeport_port *port, unsigned long timeout, int flush) { int baud_rate; struct tty_struct *tty = tty_port_tty_get(&port->port->port); wait_queue_t wait; unsigned long flags; if (!timeout) timeout = (HZ * EDGE_CLOSING_WAIT)/100; /* wait for data to drain from the buffer */ spin_lock_irqsave(&port->ep_lock, flags); init_waitqueue_entry(&wait, current); add_wait_queue(&tty->write_wait, &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (kfifo_len(&port->write_fifo) == 0 || timeout == 0 || signal_pending(current) || !usb_get_intfdata(port->port->serial->interface)) /* disconnect */ break; spin_unlock_irqrestore(&port->ep_lock, flags); timeout = schedule_timeout(timeout); spin_lock_irqsave(&port->ep_lock, flags); } set_current_state(TASK_RUNNING); remove_wait_queue(&tty->write_wait, &wait); if (flush) kfifo_reset_out(&port->write_fifo); spin_unlock_irqrestore(&port->ep_lock, flags); tty_kref_put(tty); /* wait for data to drain from the device */ timeout += jiffies; while ((long)(jiffies - timeout) < 0 && !signal_pending(current) && usb_get_intfdata(port->port->serial->interface)) { /* not disconnected */ if (!tx_active(port)) break; msleep(10); } /* disconnected */ if (!usb_get_intfdata(port->port->serial->interface)) return; /* wait one more character time, based on baud rate */ /* (tx_active doesn't seem to wait for the last byte) */ baud_rate = port->baud_rate; if (baud_rate == 0) baud_rate = 50; msleep(max(1, DIV_ROUND_UP(10000, baud_rate))); } static int choose_config(struct usb_device *dev) { /* * There may be multiple configurations on this device, in which case * we would need to read and parse all of them to find out which one * we want. However, we just support one config at this point, * configuration # 1, which is Config Descriptor 0. */ dbg("%s - Number of Interfaces = %d", __func__, dev->config->desc.bNumInterfaces); dbg("%s - MAX Power = %d", __func__, dev->config->desc.bMaxPower * 2); if (dev->config->desc.bNumInterfaces != 1) { dev_err(&dev->dev, "%s - bNumInterfaces is not 1, ERROR!\n", __func__); return -ENODEV; } return 0; } static int read_rom(struct edgeport_serial *serial, int start_address, int length, __u8 *buffer) { int status; if (serial->product_info.TiMode == TI_MODE_DOWNLOAD) { status = read_download_mem(serial->serial->dev, start_address, length, serial->TI_I2C_Type, buffer); } else { status = read_boot_mem(serial, start_address, length, buffer); } return status; } static int write_rom(struct edgeport_serial *serial, int start_address, int length, __u8 *buffer) { if (serial->product_info.TiMode == TI_MODE_BOOT) return write_boot_mem(serial, start_address, length, buffer); if (serial->product_info.TiMode == TI_MODE_DOWNLOAD) return write_i2c_mem(serial, start_address, length, serial->TI_I2C_Type, buffer); return -EINVAL; } /* Read a descriptor header from I2C based on type */ static int get_descriptor_addr(struct edgeport_serial *serial, int desc_type, struct ti_i2c_desc *rom_desc) { int start_address; int status; /* Search for requested descriptor in I2C */ start_address = 2; do { status = read_rom(serial, start_address, sizeof(struct ti_i2c_desc), (__u8 *)rom_desc); if (status) return 0; if (rom_desc->Type == desc_type) return start_address; start_address = start_address + sizeof(struct ti_i2c_desc) + rom_desc->Size; } while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type); return 0; } /* Validate descriptor checksum */ static int valid_csum(struct ti_i2c_desc *rom_desc, __u8 *buffer) { __u16 i; __u8 cs = 0; for (i = 0; i < rom_desc->Size; i++) cs = (__u8)(cs + buffer[i]); if (cs != rom_desc->CheckSum) { dbg("%s - Mismatch %x - %x", __func__, rom_desc->CheckSum, cs); return -EINVAL; } return 0; } /* Make sure that the I2C image is good */ static int check_i2c_image(struct edgeport_serial *serial) { struct device *dev = &serial->serial->dev->dev; int status = 0; struct ti_i2c_desc *rom_desc; int start_address = 2; __u8 *buffer; __u16 ttype; rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL); if (!rom_desc) { dev_err(dev, "%s - out of memory\n", __func__); return -ENOMEM; } buffer = kmalloc(TI_MAX_I2C_SIZE, GFP_KERNEL); if (!buffer) { dev_err(dev, "%s - out of memory when allocating buffer\n", __func__); kfree(rom_desc); return -ENOMEM; } /* Read the first byte (Signature0) must be 0x52 or 0x10 */ status = read_rom(serial, 0, 1, buffer); if (status) goto out; if (*buffer != UMP5152 && *buffer != UMP3410) { dev_err(dev, "%s - invalid buffer signature\n", __func__); status = -ENODEV; goto out; } do { /* Validate the I2C */ status = read_rom(serial, start_address, sizeof(struct ti_i2c_desc), (__u8 *)rom_desc); if (status) break; if ((start_address + sizeof(struct ti_i2c_desc) + rom_desc->Size) > TI_MAX_I2C_SIZE) { status = -ENODEV; dbg("%s - structure too big, erroring out.", __func__); break; } dbg("%s Type = 0x%x", __func__, rom_desc->Type); /* Skip type 2 record */ ttype = rom_desc->Type & 0x0f; if (ttype != I2C_DESC_TYPE_FIRMWARE_BASIC && ttype != I2C_DESC_TYPE_FIRMWARE_AUTO) { /* Read the descriptor data */ status = read_rom(serial, start_address + sizeof(struct ti_i2c_desc), rom_desc->Size, buffer); if (status) break; status = valid_csum(rom_desc, buffer); if (status) break; } start_address = start_address + sizeof(struct ti_i2c_desc) + rom_desc->Size; } while ((rom_desc->Type != I2C_DESC_TYPE_ION) && (start_address < TI_MAX_I2C_SIZE)); if ((rom_desc->Type != I2C_DESC_TYPE_ION) || (start_address > TI_MAX_I2C_SIZE)) status = -ENODEV; out: kfree(buffer); kfree(rom_desc); return status; } static int get_manuf_info(struct edgeport_serial *serial, __u8 *buffer) { int status; int start_address; struct ti_i2c_desc *rom_desc; struct edge_ti_manuf_descriptor *desc; rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL); if (!rom_desc) { dev_err(&serial->serial->dev->dev, "%s - out of memory\n", __func__); return -ENOMEM; } start_address = get_descriptor_addr(serial, I2C_DESC_TYPE_ION, rom_desc); if (!start_address) { dbg("%s - Edge Descriptor not found in I2C", __func__); status = -ENODEV; goto exit; } /* Read the descriptor data */ status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc), rom_desc->Size, buffer); if (status) goto exit; status = valid_csum(rom_desc, buffer); desc = (struct edge_ti_manuf_descriptor *)buffer; dbg("%s - IonConfig 0x%x", __func__, desc->IonConfig); dbg("%s - Version %d", __func__, desc->Version); dbg("%s - Cpu/Board 0x%x", __func__, desc->CpuRev_BoardRev); dbg("%s - NumPorts %d", __func__, desc->NumPorts); dbg("%s - NumVirtualPorts %d", __func__, desc->NumVirtualPorts); dbg("%s - TotalPorts %d", __func__, desc->TotalPorts); exit: kfree(rom_desc); return status; } /* Build firmware header used for firmware update */ static int build_i2c_fw_hdr(__u8 *header, struct device *dev) { __u8 *buffer; int buffer_size; int i; int err; __u8 cs = 0; struct ti_i2c_desc *i2c_header; struct ti_i2c_image_header *img_header; struct ti_i2c_firmware_rec *firmware_rec; const struct firmware *fw; const char *fw_name = "edgeport/down3.bin"; /* In order to update the I2C firmware we must change the type 2 record * to type 0xF2. This will force the UMP to come up in Boot Mode. * Then while in boot mode, the driver will download the latest * firmware (padded to 15.5k) into the UMP ram. And finally when the * device comes back up in download mode the driver will cause the new * firmware to be copied from the UMP Ram to I2C and the firmware will * update the record type from 0xf2 to 0x02. */ /* Allocate a 15.5k buffer + 2 bytes for version number * (Firmware Record) */ buffer_size = (((1024 * 16) - 512 ) + sizeof(struct ti_i2c_firmware_rec)); buffer = kmalloc(buffer_size, GFP_KERNEL); if (!buffer) { dev_err(dev, "%s - out of memory\n", __func__); return -ENOMEM; } // Set entire image of 0xffs memset(buffer, 0xff, buffer_size); err = request_firmware(&fw, fw_name, dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fw_name, err); kfree(buffer); return err; } /* Save Download Version Number */ OperationalMajorVersion = fw->data[0]; OperationalMinorVersion = fw->data[1]; OperationalBuildNumber = fw->data[2] | (fw->data[3] << 8); /* Copy version number into firmware record */ firmware_rec = (struct ti_i2c_firmware_rec *)buffer; firmware_rec->Ver_Major = OperationalMajorVersion; firmware_rec->Ver_Minor = OperationalMinorVersion; /* Pointer to fw_down memory image */ img_header = (struct ti_i2c_image_header *)&fw->data[4]; memcpy(buffer + sizeof(struct ti_i2c_firmware_rec), &fw->data[4 + sizeof(struct ti_i2c_image_header)], le16_to_cpu(img_header->Length)); release_firmware(fw); for (i=0; i < buffer_size; i++) { cs = (__u8)(cs + buffer[i]); } kfree(buffer); /* Build new header */ i2c_header = (struct ti_i2c_desc *)header; firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data; i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK; i2c_header->Size = (__u16)buffer_size; i2c_header->CheckSum = cs; firmware_rec->Ver_Major = OperationalMajorVersion; firmware_rec->Ver_Minor = OperationalMinorVersion; return 0; } /* Try to figure out what type of I2c we have */ static int i2c_type_bootmode(struct edgeport_serial *serial) { int status; u8 *data; data = kmalloc(1, GFP_KERNEL); if (!data) { dev_err(&serial->serial->dev->dev, "%s - out of memory\n", __func__); return -ENOMEM; } /* Try to read type 2 */ status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ, DTK_ADDR_SPACE_I2C_TYPE_II, 0, data, 0x01); if (status) dbg("%s - read 2 status error = %d", __func__, status); else dbg("%s - read 2 data = 0x%x", __func__, *data); if ((!status) && (*data == UMP5152 || *data == UMP3410)) { dbg("%s - ROM_TYPE_II", __func__); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; goto out; } /* Try to read type 3 */ status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ, DTK_ADDR_SPACE_I2C_TYPE_III, 0, data, 0x01); if (status) dbg("%s - read 3 status error = %d", __func__, status); else dbg("%s - read 2 data = 0x%x", __func__, *data); if ((!status) && (*data == UMP5152 || *data == UMP3410)) { dbg("%s - ROM_TYPE_III", __func__); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_III; goto out; } dbg("%s - Unknown", __func__); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; status = -ENODEV; out: kfree(data); return status; } static int bulk_xfer(struct usb_serial *serial, void *buffer, int length, int *num_sent) { int status; status = usb_bulk_msg(serial->dev, usb_sndbulkpipe(serial->dev, serial->port[0]->bulk_out_endpointAddress), buffer, length, num_sent, 1000); return status; } /* Download given firmware image to the device (IN BOOT MODE) */ static int download_code(struct edgeport_serial *serial, __u8 *image, int image_length) { int status = 0; int pos; int transfer; int done; /* Transfer firmware image */ for (pos = 0; pos < image_length; ) { /* Read the next buffer from file */ transfer = image_length - pos; if (transfer > EDGE_FW_BULK_MAX_PACKET_SIZE) transfer = EDGE_FW_BULK_MAX_PACKET_SIZE; /* Transfer data */ status = bulk_xfer(serial->serial, &image[pos], transfer, &done); if (status) break; /* Advance buffer pointer */ pos += done; } return status; } /* FIXME!!! */ static int config_boot_dev(struct usb_device *dev) { return 0; } static int ti_cpu_rev(struct edge_ti_manuf_descriptor *desc) { return TI_GET_CPU_REVISION(desc->CpuRev_BoardRev); } /** * DownloadTIFirmware - Download run-time operating firmware to the TI5052 * * This routine downloads the main operating code into the TI5052, using the * boot code already burned into E2PROM or ROM. */ static int download_fw(struct edgeport_serial *serial) { struct device *dev = &serial->serial->dev->dev; int status = 0; int start_address; struct edge_ti_manuf_descriptor *ti_manuf_desc; struct usb_interface_descriptor *interface; int download_cur_ver; int download_new_ver; /* This routine is entered by both the BOOT mode and the Download mode * We can determine which code is running by the reading the config * descriptor and if we have only one bulk pipe it is in boot mode */ serial->product_info.hardware_type = HARDWARE_TYPE_TIUMP; /* Default to type 2 i2c */ serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; status = choose_config(serial->serial->dev); if (status) return status; interface = &serial->serial->interface->cur_altsetting->desc; if (!interface) { dev_err(dev, "%s - no interface set, error!\n", __func__); return -ENODEV; } /* * Setup initial mode -- the default mode 0 is TI_MODE_CONFIGURING * if we have more than one endpoint we are definitely in download * mode */ if (interface->bNumEndpoints > 1) serial->product_info.TiMode = TI_MODE_DOWNLOAD; else /* Otherwise we will remain in configuring mode */ serial->product_info.TiMode = TI_MODE_CONFIGURING; /********************************************************************/ /* Download Mode */ /********************************************************************/ if (serial->product_info.TiMode == TI_MODE_DOWNLOAD) { struct ti_i2c_desc *rom_desc; dbg("%s - RUNNING IN DOWNLOAD MODE", __func__); status = check_i2c_image(serial); if (status) { dbg("%s - DOWNLOAD MODE -- BAD I2C", __func__); return status; } /* Validate Hardware version number * Read Manufacturing Descriptor from TI Based Edgeport */ ti_manuf_desc = kmalloc(sizeof(*ti_manuf_desc), GFP_KERNEL); if (!ti_manuf_desc) { dev_err(dev, "%s - out of memory.\n", __func__); return -ENOMEM; } status = get_manuf_info(serial, (__u8 *)ti_manuf_desc); if (status) { kfree(ti_manuf_desc); return status; } /* Check version number of ION descriptor */ if (!ignore_cpu_rev && ti_cpu_rev(ti_manuf_desc) < 2) { dbg("%s - Wrong CPU Rev %d (Must be 2)", __func__, ti_cpu_rev(ti_manuf_desc)); kfree(ti_manuf_desc); return -EINVAL; } rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL); if (!rom_desc) { dev_err(dev, "%s - out of memory.\n", __func__); kfree(ti_manuf_desc); return -ENOMEM; } /* Search for type 2 record (firmware record) */ start_address = get_descriptor_addr(serial, I2C_DESC_TYPE_FIRMWARE_BASIC, rom_desc); if (start_address != 0) { struct ti_i2c_firmware_rec *firmware_version; u8 *record; dbg("%s - Found Type FIRMWARE (Type 2) record", __func__); firmware_version = kmalloc(sizeof(*firmware_version), GFP_KERNEL); if (!firmware_version) { dev_err(dev, "%s - out of memory.\n", __func__); kfree(rom_desc); kfree(ti_manuf_desc); return -ENOMEM; } /* Validate version number * Read the descriptor data */ status = read_rom(serial, start_address + sizeof(struct ti_i2c_desc), sizeof(struct ti_i2c_firmware_rec), (__u8 *)firmware_version); if (status) { kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return status; } /* Check version number of download with current version in I2c */ download_cur_ver = (firmware_version->Ver_Major << 8) + (firmware_version->Ver_Minor); download_new_ver = (OperationalMajorVersion << 8) + (OperationalMinorVersion); dbg("%s - >> FW Versions Device %d.%d Driver %d.%d", __func__, firmware_version->Ver_Major, firmware_version->Ver_Minor, OperationalMajorVersion, OperationalMinorVersion); /* Check if we have an old version in the I2C and update if necessary */ if (download_cur_ver < download_new_ver) { dbg("%s - Update I2C dld from %d.%d to %d.%d", __func__, firmware_version->Ver_Major, firmware_version->Ver_Minor, OperationalMajorVersion, OperationalMinorVersion); record = kmalloc(1, GFP_KERNEL); if (!record) { dev_err(dev, "%s - out of memory.\n", __func__); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return -ENOMEM; } /* In order to update the I2C firmware we must * change the type 2 record to type 0xF2. This * will force the UMP to come up in Boot Mode. * Then while in boot mode, the driver will * download the latest firmware (padded to * 15.5k) into the UMP ram. Finally when the * device comes back up in download mode the * driver will cause the new firmware to be * copied from the UMP Ram to I2C and the * firmware will update the record type from * 0xf2 to 0x02. */ *record = I2C_DESC_TYPE_FIRMWARE_BLANK; /* Change the I2C Firmware record type to 0xf2 to trigger an update */ status = write_rom(serial, start_address, sizeof(*record), record); if (status) { kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return status; } /* verify the write -- must do this in order * for write to complete before we do the * hardware reset */ status = read_rom(serial, start_address, sizeof(*record), record); if (status) { kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return status; } if (*record != I2C_DESC_TYPE_FIRMWARE_BLANK) { dev_err(dev, "%s - error resetting device\n", __func__); kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return -ENODEV; } dbg("%s - HARDWARE RESET", __func__); /* Reset UMP -- Back to BOOT MODE */ status = ti_vsend_sync(serial->serial->dev, UMPC_HARDWARE_RESET, 0, 0, NULL, 0); dbg("%s - HARDWARE RESET return %d", __func__, status); /* return an error on purpose. */ kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return -ENODEV; } kfree(firmware_version); } /* Search for type 0xF2 record (firmware blank record) */ else if ((start_address = get_descriptor_addr(serial, I2C_DESC_TYPE_FIRMWARE_BLANK, rom_desc)) != 0) { #define HEADER_SIZE (sizeof(struct ti_i2c_desc) + \ sizeof(struct ti_i2c_firmware_rec)) __u8 *header; __u8 *vheader; header = kmalloc(HEADER_SIZE, GFP_KERNEL); if (!header) { dev_err(dev, "%s - out of memory.\n", __func__); kfree(rom_desc); kfree(ti_manuf_desc); return -ENOMEM; } vheader = kmalloc(HEADER_SIZE, GFP_KERNEL); if (!vheader) { dev_err(dev, "%s - out of memory.\n", __func__); kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return -ENOMEM; } dbg("%s - Found Type BLANK FIRMWARE (Type F2) record", __func__); /* * In order to update the I2C firmware we must change * the type 2 record to type 0xF2. This will force the * UMP to come up in Boot Mode. Then while in boot * mode, the driver will download the latest firmware * (padded to 15.5k) into the UMP ram. Finally when the * device comes back up in download mode the driver * will cause the new firmware to be copied from the * UMP Ram to I2C and the firmware will update the * record type from 0xf2 to 0x02. */ status = build_i2c_fw_hdr(header, dev); if (status) { kfree(vheader); kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return -EINVAL; } /* Update I2C with type 0xf2 record with correct size and checksum */ status = write_rom(serial, start_address, HEADER_SIZE, header); if (status) { kfree(vheader); kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return -EINVAL; } /* verify the write -- must do this in order for write to complete before we do the hardware reset */ status = read_rom(serial, start_address, HEADER_SIZE, vheader); if (status) { dbg("%s - can't read header back", __func__); kfree(vheader); kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return status; } if (memcmp(vheader, header, HEADER_SIZE)) { dbg("%s - write download record failed", __func__); kfree(vheader); kfree(header); kfree(rom_desc); kfree(ti_manuf_desc); return -EINVAL; } kfree(vheader); kfree(header); dbg("%s - Start firmware update", __func__); /* Tell firmware to copy download image into I2C */ status = ti_vsend_sync(serial->serial->dev, UMPC_COPY_DNLD_TO_I2C, 0, 0, NULL, 0); dbg("%s - Update complete 0x%x", __func__, status); if (status) { dev_err(dev, "%s - UMPC_COPY_DNLD_TO_I2C failed\n", __func__); kfree(rom_desc); kfree(ti_manuf_desc); return status; } } // The device is running the download code kfree(rom_desc); kfree(ti_manuf_desc); return 0; } /********************************************************************/ /* Boot Mode */ /********************************************************************/ dbg("%s - RUNNING IN BOOT MODE", __func__); /* Configure the TI device so we can use the BULK pipes for download */ status = config_boot_dev(serial->serial->dev); if (status) return status; if (le16_to_cpu(serial->serial->dev->descriptor.idVendor) != USB_VENDOR_ID_ION) { dbg("%s - VID = 0x%x", __func__, le16_to_cpu(serial->serial->dev->descriptor.idVendor)); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; goto stayinbootmode; } /* We have an ION device (I2c Must be programmed) Determine I2C image type */ if (i2c_type_bootmode(serial)) goto stayinbootmode; /* Check for ION Vendor ID and that the I2C is valid */ if (!check_i2c_image(serial)) { struct ti_i2c_image_header *header; int i; __u8 cs = 0; __u8 *buffer; int buffer_size; int err; const struct firmware *fw; const char *fw_name = "edgeport/down3.bin"; /* Validate Hardware version number * Read Manufacturing Descriptor from TI Based Edgeport */ ti_manuf_desc = kmalloc(sizeof(*ti_manuf_desc), GFP_KERNEL); if (!ti_manuf_desc) { dev_err(dev, "%s - out of memory.\n", __func__); return -ENOMEM; } status = get_manuf_info(serial, (__u8 *)ti_manuf_desc); if (status) { kfree(ti_manuf_desc); goto stayinbootmode; } /* Check for version 2 */ if (!ignore_cpu_rev && ti_cpu_rev(ti_manuf_desc) < 2) { dbg("%s - Wrong CPU Rev %d (Must be 2)", __func__, ti_cpu_rev(ti_manuf_desc)); kfree(ti_manuf_desc); goto stayinbootmode; } kfree(ti_manuf_desc); /* * In order to update the I2C firmware we must change the type * 2 record to type 0xF2. This will force the UMP to come up * in Boot Mode. Then while in boot mode, the driver will * download the latest firmware (padded to 15.5k) into the * UMP ram. Finally when the device comes back up in download * mode the driver will cause the new firmware to be copied * from the UMP Ram to I2C and the firmware will update the * record type from 0xf2 to 0x02. * * Do we really have to copy the whole firmware image, * or could we do this in place! */ /* Allocate a 15.5k buffer + 3 byte header */ buffer_size = (((1024 * 16) - 512) + sizeof(struct ti_i2c_image_header)); buffer = kmalloc(buffer_size, GFP_KERNEL); if (!buffer) { dev_err(dev, "%s - out of memory\n", __func__); return -ENOMEM; } /* Initialize the buffer to 0xff (pad the buffer) */ memset(buffer, 0xff, buffer_size); err = request_firmware(&fw, fw_name, dev); if (err) { printk(KERN_ERR "Failed to load image \"%s\" err %d\n", fw_name, err); kfree(buffer); return err; } memcpy(buffer, &fw->data[4], fw->size - 4); release_firmware(fw); for (i = sizeof(struct ti_i2c_image_header); i < buffer_size; i++) { cs = (__u8)(cs + buffer[i]); } header = (struct ti_i2c_image_header *)buffer; /* update length and checksum after padding */ header->Length = cpu_to_le16((__u16)(buffer_size - sizeof(struct ti_i2c_image_header))); header->CheckSum = cs; /* Download the operational code */ dbg("%s - Downloading operational code image (TI UMP)", __func__); status = download_code(serial, buffer, buffer_size); kfree(buffer); if (status) { dbg("%s - Error downloading operational code image", __func__); return status; } /* Device will reboot */ serial->product_info.TiMode = TI_MODE_TRANSITIONING; dbg("%s - Download successful -- Device rebooting...", __func__); /* return an error on purpose */ return -ENODEV; } stayinbootmode: /* Eprom is invalid or blank stay in boot mode */ dbg("%s - STAYING IN BOOT MODE", __func__); serial->product_info.TiMode = TI_MODE_BOOT; return 0; } static int ti_do_config(struct edgeport_port *port, int feature, int on) { int port_number = port->port->number - port->port->serial->minor; on = !!on; /* 1 or 0 not bitmask */ return send_cmd(port->port->serial->dev, feature, (__u8)(UMPM_UART1_PORT + port_number), on, NULL, 0); } static int restore_mcr(struct edgeport_port *port, __u8 mcr) { int status = 0; dbg("%s - %x", __func__, mcr); status = ti_do_config(port, UMPC_SET_CLR_DTR, mcr & MCR_DTR); if (status) return status; status = ti_do_config(port, UMPC_SET_CLR_RTS, mcr & MCR_RTS); if (status) return status; return ti_do_config(port, UMPC_SET_CLR_LOOPBACK, mcr & MCR_LOOPBACK); } /* Convert TI LSR to standard UART flags */ static __u8 map_line_status(__u8 ti_lsr) { __u8 lsr = 0; #define MAP_FLAG(flagUmp, flagUart) \ if (ti_lsr & flagUmp) \ lsr |= flagUart; MAP_FLAG(UMP_UART_LSR_OV_MASK, LSR_OVER_ERR) /* overrun */ MAP_FLAG(UMP_UART_LSR_PE_MASK, LSR_PAR_ERR) /* parity error */ MAP_FLAG(UMP_UART_LSR_FE_MASK, LSR_FRM_ERR) /* framing error */ MAP_FLAG(UMP_UART_LSR_BR_MASK, LSR_BREAK) /* break detected */ MAP_FLAG(UMP_UART_LSR_RX_MASK, LSR_RX_AVAIL) /* rx data available */ MAP_FLAG(UMP_UART_LSR_TX_MASK, LSR_TX_EMPTY) /* tx hold reg empty */ #undef MAP_FLAG return lsr; } static void handle_new_msr(struct edgeport_port *edge_port, __u8 msr) { struct async_icount *icount; struct tty_struct *tty; dbg("%s - %02x", __func__, msr); if (msr & (EDGEPORT_MSR_DELTA_CTS | EDGEPORT_MSR_DELTA_DSR | EDGEPORT_MSR_DELTA_RI | EDGEPORT_MSR_DELTA_CD)) { icount = &edge_port->icount; /* update input line counters */ if (msr & EDGEPORT_MSR_DELTA_CTS) icount->cts++; if (msr & EDGEPORT_MSR_DELTA_DSR) icount->dsr++; if (msr & EDGEPORT_MSR_DELTA_CD) icount->dcd++; if (msr & EDGEPORT_MSR_DELTA_RI) icount->rng++; wake_up_interruptible(&edge_port->delta_msr_wait); } /* Save the new modem status */ edge_port->shadow_msr = msr & 0xf0; tty = tty_port_tty_get(&edge_port->port->port); /* handle CTS flow control */ if (tty && C_CRTSCTS(tty)) { if (msr & EDGEPORT_MSR_CTS) { tty->hw_stopped = 0; tty_wakeup(tty); } else { tty->hw_stopped = 1; } } tty_kref_put(tty); } static void handle_new_lsr(struct edgeport_port *edge_port, int lsr_data, __u8 lsr, __u8 data) { struct async_icount *icount; __u8 new_lsr = (__u8)(lsr & (__u8)(LSR_OVER_ERR | LSR_PAR_ERR | LSR_FRM_ERR | LSR_BREAK)); struct tty_struct *tty; dbg("%s - %02x", __func__, new_lsr); edge_port->shadow_lsr = lsr; if (new_lsr & LSR_BREAK) /* * Parity and Framing errors only count if they * occur exclusive of a break being received. */ new_lsr &= (__u8)(LSR_OVER_ERR | LSR_BREAK); /* Place LSR data byte into Rx buffer */ if (lsr_data) { tty = tty_port_tty_get(&edge_port->port->port); if (tty) { edge_tty_recv(&edge_port->port->dev, tty, &data, 1); tty_kref_put(tty); } } /* update input line counters */ icount = &edge_port->icount; if (new_lsr & LSR_BREAK) icount->brk++; if (new_lsr & LSR_OVER_ERR) icount->overrun++; if (new_lsr & LSR_PAR_ERR) icount->parity++; if (new_lsr & LSR_FRM_ERR) icount->frame++; } static void edge_interrupt_callback(struct urb *urb) { struct edgeport_serial *edge_serial = urb->context; struct usb_serial_port *port; struct edgeport_port *edge_port; unsigned char *data = urb->transfer_buffer; int length = urb->actual_length; int port_number; int function; int retval; __u8 lsr; __u8 msr; int status = urb->status; dbg("%s", __func__); switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dbg("%s - urb shutting down with status: %d", __func__, status); return; default: dev_err(&urb->dev->dev, "%s - nonzero urb status received: " "%d\n", __func__, status); goto exit; } if (!length) { dbg("%s - no data in urb", __func__); goto exit; } usb_serial_debug_data(debug, &edge_serial->serial->dev->dev, __func__, length, data); if (length != 2) { dbg("%s - expecting packet of size 2, got %d", __func__, length); goto exit; } port_number = TIUMP_GET_PORT_FROM_CODE(data[0]); function = TIUMP_GET_FUNC_FROM_CODE(data[0]); dbg("%s - port_number %d, function %d, info 0x%x", __func__, port_number, function, data[1]); port = edge_serial->serial->port[port_number]; edge_port = usb_get_serial_port_data(port); if (!edge_port) { dbg("%s - edge_port not found", __func__); return; } switch (function) { case TIUMP_INTERRUPT_CODE_LSR: lsr = map_line_status(data[1]); if (lsr & UMP_UART_LSR_DATA_MASK) { /* Save the LSR event for bulk read completion routine */ dbg("%s - LSR Event Port %u LSR Status = %02x", __func__, port_number, lsr); edge_port->lsr_event = 1; edge_port->lsr_mask = lsr; } else { dbg("%s - ===== Port %d LSR Status = %02x ======", __func__, port_number, lsr); handle_new_lsr(edge_port, 0, lsr, 0); } break; case TIUMP_INTERRUPT_CODE_MSR: /* MSR */ /* Copy MSR from UMP */ msr = data[1]; dbg("%s - ===== Port %u MSR Status = %02x ======", __func__, port_number, msr); handle_new_msr(edge_port, msr); break; default: dev_err(&urb->dev->dev, "%s - Unknown Interrupt code from UMP %x\n", __func__, data[1]); break; } exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } static void edge_bulk_in_callback(struct urb *urb) { struct edgeport_port *edge_port = urb->context; unsigned char *data = urb->transfer_buffer; struct tty_struct *tty; int retval = 0; int port_number; int status = urb->status; dbg("%s", __func__); switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dbg("%s - urb shutting down with status: %d", __func__, status); return; default: dev_err(&urb->dev->dev, "%s - nonzero read bulk status received: %d\n", __func__, status); } if (status == -EPIPE) goto exit; if (status) { dev_err(&urb->dev->dev, "%s - stopping read!\n", __func__); return; } port_number = edge_port->port->number - edge_port->port->serial->minor; if (edge_port->lsr_event) { edge_port->lsr_event = 0; dbg("%s ===== Port %u LSR Status = %02x, Data = %02x ======", __func__, port_number, edge_port->lsr_mask, *data); handle_new_lsr(edge_port, 1, edge_port->lsr_mask, *data); /* Adjust buffer length/pointer */ --urb->actual_length; ++data; } tty = tty_port_tty_get(&edge_port->port->port); if (tty && urb->actual_length) { usb_serial_debug_data(debug, &edge_port->port->dev, __func__, urb->actual_length, data); if (edge_port->close_pending) dbg("%s - close pending, dropping data on the floor", __func__); else edge_tty_recv(&edge_port->port->dev, tty, data, urb->actual_length); edge_port->icount.rx += urb->actual_length; } tty_kref_put(tty); exit: /* continue read unless stopped */ spin_lock(&edge_port->ep_lock); if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING) retval = usb_submit_urb(urb, GFP_ATOMIC); else if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPING) edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPED; spin_unlock(&edge_port->ep_lock); if (retval) dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } static void edge_tty_recv(struct device *dev, struct tty_struct *tty, unsigned char *data, int length) { int queued; queued = tty_insert_flip_string(tty, data, length); if (queued < length) dev_err(dev, "%s - dropping data, %d bytes lost\n", __func__, length - queued); tty_flip_buffer_push(tty); } static void edge_bulk_out_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status = urb->status; struct tty_struct *tty; dbg("%s - port %d", __func__, port->number); edge_port->ep_write_urb_in_use = 0; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dbg("%s - urb shutting down with status: %d", __func__, status); return; default: dev_err_console(port, "%s - nonzero write bulk status " "received: %d\n", __func__, status); } /* send any buffered data */ tty = tty_port_tty_get(&port->port); edge_send(tty); tty_kref_put(tty); } static int edge_open(struct tty_struct *tty, struct usb_serial_port *port) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); struct edgeport_serial *edge_serial; struct usb_device *dev; struct urb *urb; int port_number; int status; u16 open_settings; u8 transaction_timeout; dbg("%s - port %d", __func__, port->number); if (edge_port == NULL) return -ENODEV; port_number = port->number - port->serial->minor; switch (port_number) { case 0: edge_port->uart_base = UMPMEM_BASE_UART1; edge_port->dma_address = UMPD_OEDB1_ADDRESS; break; case 1: edge_port->uart_base = UMPMEM_BASE_UART2; edge_port->dma_address = UMPD_OEDB2_ADDRESS; break; default: dev_err(&port->dev, "Unknown port number!!!\n"); return -ENODEV; } dbg("%s - port_number = %d, uart_base = %04x, dma_address = %04x", __func__, port_number, edge_port->uart_base, edge_port->dma_address); dev = port->serial->dev; memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount)); init_waitqueue_head(&edge_port->delta_msr_wait); /* turn off loopback */ status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0); if (status) { dev_err(&port->dev, "%s - cannot send clear loopback command, %d\n", __func__, status); return status; } /* set up the port settings */ if (tty) edge_set_termios(tty, port, tty->termios); /* open up the port */ /* milliseconds to timeout for DMA transfer */ transaction_timeout = 2; edge_port->ump_read_timeout = max(20, ((transaction_timeout * 3) / 2)); /* milliseconds to timeout for DMA transfer */ open_settings = (u8)(UMP_DMA_MODE_CONTINOUS | UMP_PIPE_TRANS_TIMEOUT_ENA | (transaction_timeout << 2)); dbg("%s - Sending UMPC_OPEN_PORT", __func__); /* Tell TI to open and start the port */ status = send_cmd(dev, UMPC_OPEN_PORT, (u8)(UMPM_UART1_PORT + port_number), open_settings, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot send open command, %d\n", __func__, status); return status; } /* Start the DMA? */ status = send_cmd(dev, UMPC_START_PORT, (u8)(UMPM_UART1_PORT + port_number), 0, NULL, 0); if (status) { dev_err(&port->dev, "%s - cannot send start DMA command, %d\n", __func__, status); return status; } /* Clear TX and RX buffers in UMP */ status = purge_port(port, UMP_PORT_DIR_OUT | UMP_PORT_DIR_IN); if (status) { dev_err(&port->dev, "%s - cannot send clear buffers command, %d\n", __func__, status); return status; } /* Read Initial MSR */ status = ti_vread_sync(dev, UMPC_READ_MSR, 0, (__u16)(UMPM_UART1_PORT + port_number), &edge_port->shadow_msr, 1); if (status) { dev_err(&port->dev, "%s - cannot send read MSR command, %d\n", __func__, status); return status; } dbg("ShadowMSR 0x%X", edge_port->shadow_msr); /* Set Initial MCR */ edge_port->shadow_mcr = MCR_RTS | MCR_DTR; dbg("ShadowMCR 0x%X", edge_port->shadow_mcr); edge_serial = edge_port->edge_serial; if (mutex_lock_interruptible(&edge_serial->es_lock)) return -ERESTARTSYS; if (edge_serial->num_ports_open == 0) { /* we are the first port to open, post the interrupt urb */ urb = edge_serial->serial->port[0]->interrupt_in_urb; if (!urb) { dev_err(&port->dev, "%s - no interrupt urb present, exiting\n", __func__); status = -EINVAL; goto release_es_lock; } urb->context = edge_serial; status = usb_submit_urb(urb, GFP_KERNEL); if (status) { dev_err(&port->dev, "%s - usb_submit_urb failed with value %d\n", __func__, status); goto release_es_lock; } } /* * reset the data toggle on the bulk endpoints to work around bug in * host controllers where things get out of sync some times */ usb_clear_halt(dev, port->write_urb->pipe); usb_clear_halt(dev, port->read_urb->pipe); /* start up our bulk read urb */ urb = port->read_urb; if (!urb) { dev_err(&port->dev, "%s - no read urb present, exiting\n", __func__); status = -EINVAL; goto unlink_int_urb; } edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING; urb->context = edge_port; status = usb_submit_urb(urb, GFP_KERNEL); if (status) { dev_err(&port->dev, "%s - read bulk usb_submit_urb failed with value %d\n", __func__, status); goto unlink_int_urb; } ++edge_serial->num_ports_open; dbg("%s - exited", __func__); goto release_es_lock; unlink_int_urb: if (edge_port->edge_serial->num_ports_open == 0) usb_kill_urb(port->serial->port[0]->interrupt_in_urb); release_es_lock: mutex_unlock(&edge_serial->es_lock); return status; } static void edge_close(struct usb_serial_port *port) { struct edgeport_serial *edge_serial; struct edgeport_port *edge_port; int port_number; int status; dbg("%s - port %d", __func__, port->number); edge_serial = usb_get_serial_data(port->serial); edge_port = usb_get_serial_port_data(port); if (edge_serial == NULL || edge_port == NULL) return; /* The bulkreadcompletion routine will check * this flag and dump add read data */ edge_port->close_pending = 1; /* chase the port close and flush */ chase_port(edge_port, (HZ * closing_wait) / 100, 1); usb_kill_urb(port->read_urb); usb_kill_urb(port->write_urb); edge_port->ep_write_urb_in_use = 0; /* assuming we can still talk to the device, * send a close port command to it */ dbg("%s - send umpc_close_port", __func__); port_number = port->number - port->serial->minor; status = send_cmd(port->serial->dev, UMPC_CLOSE_PORT, (__u8)(UMPM_UART1_PORT + port_number), 0, NULL, 0); mutex_lock(&edge_serial->es_lock); --edge_port->edge_serial->num_ports_open; if (edge_port->edge_serial->num_ports_open <= 0) { /* last port is now closed, let's shut down our interrupt urb */ usb_kill_urb(port->serial->port[0]->interrupt_in_urb); edge_port->edge_serial->num_ports_open = 0; } mutex_unlock(&edge_serial->es_lock); edge_port->close_pending = 0; dbg("%s - exited", __func__); } static int edge_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *data, int count) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); dbg("%s - port %d", __func__, port->number); if (count == 0) { dbg("%s - write request of 0 bytes", __func__); return 0; } if (edge_port == NULL) return -ENODEV; if (edge_port->close_pending == 1) return -ENODEV; count = kfifo_in_locked(&edge_port->write_fifo, data, count, &edge_port->ep_lock); edge_send(tty); return count; } static void edge_send(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; int count, result; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned long flags; dbg("%s - port %d", __func__, port->number); spin_lock_irqsave(&edge_port->ep_lock, flags); if (edge_port->ep_write_urb_in_use) { spin_unlock_irqrestore(&edge_port->ep_lock, flags); return; } count = kfifo_out(&edge_port->write_fifo, port->write_urb->transfer_buffer, port->bulk_out_size); if (count == 0) { spin_unlock_irqrestore(&edge_port->ep_lock, flags); return; } edge_port->ep_write_urb_in_use = 1; spin_unlock_irqrestore(&edge_port->ep_lock, flags); usb_serial_debug_data(debug, &port->dev, __func__, count, port->write_urb->transfer_buffer); /* set up our urb */ port->write_urb->transfer_buffer_length = count; /* send the data out the bulk port */ result = usb_submit_urb(port->write_urb, GFP_ATOMIC); if (result) { dev_err_console(port, "%s - failed submitting write urb, error %d\n", __func__, result); edge_port->ep_write_urb_in_use = 0; /* TODO: reschedule edge_send */ } else edge_port->icount.tx += count; /* wakeup any process waiting for writes to complete */ /* there is now more room in the buffer for new writes */ if (tty) tty_wakeup(tty); } static int edge_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int room = 0; unsigned long flags; dbg("%s - port %d", __func__, port->number); if (edge_port == NULL) return 0; if (edge_port->close_pending == 1) return 0; spin_lock_irqsave(&edge_port->ep_lock, flags); room = kfifo_avail(&edge_port->write_fifo); spin_unlock_irqrestore(&edge_port->ep_lock, flags); dbg("%s - returns %d", __func__, room); return room; } static int edge_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int chars = 0; unsigned long flags; dbg("%s - port %d", __func__, port->number); if (edge_port == NULL) return 0; if (edge_port->close_pending == 1) return 0; spin_lock_irqsave(&edge_port->ep_lock, flags); chars = kfifo_len(&edge_port->write_fifo); spin_unlock_irqrestore(&edge_port->ep_lock, flags); dbg("%s - returns %d", __func__, chars); return chars; } static void edge_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status; dbg("%s - port %d", __func__, port->number); if (edge_port == NULL) return; /* if we are implementing XON/XOFF, send the stop character */ if (I_IXOFF(tty)) { unsigned char stop_char = STOP_CHAR(tty); status = edge_write(tty, port, &stop_char, 1); if (status <= 0) { dev_err(&port->dev, "%s - failed to write stop character, %d\n", __func__, status); } } /* if we are implementing RTS/CTS, stop reads */ /* and the Edgeport will clear the RTS line */ if (C_CRTSCTS(tty)) stop_read(edge_port); } static void edge_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status; dbg("%s - port %d", __func__, port->number); if (edge_port == NULL) return; /* if we are implementing XON/XOFF, send the start character */ if (I_IXOFF(tty)) { unsigned char start_char = START_CHAR(tty); status = edge_write(tty, port, &start_char, 1); if (status <= 0) { dev_err(&port->dev, "%s - failed to write start character, %d\n", __func__, status); } } /* if we are implementing RTS/CTS, restart reads */ /* are the Edgeport will assert the RTS line */ if (C_CRTSCTS(tty)) { status = restart_read(edge_port); if (status) dev_err(&port->dev, "%s - read bulk usb_submit_urb failed: %d\n", __func__, status); } } static void stop_read(struct edgeport_port *edge_port) { unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING) edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPING; edge_port->shadow_mcr &= ~MCR_RTS; spin_unlock_irqrestore(&edge_port->ep_lock, flags); } static int restart_read(struct edgeport_port *edge_port) { struct urb *urb; int status = 0; unsigned long flags; spin_lock_irqsave(&edge_port->ep_lock, flags); if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPED) { urb = edge_port->port->read_urb; status = usb_submit_urb(urb, GFP_ATOMIC); } edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING; edge_port->shadow_mcr |= MCR_RTS; spin_unlock_irqrestore(&edge_port->ep_lock, flags); return status; } static void change_port_settings(struct tty_struct *tty, struct edgeport_port *edge_port, struct ktermios *old_termios) { struct ump_uart_config *config; int baud; unsigned cflag; int status; int port_number = edge_port->port->number - edge_port->port->serial->minor; dbg("%s - port %d", __func__, edge_port->port->number); config = kmalloc (sizeof (*config), GFP_KERNEL); if (!config) { *tty->termios = *old_termios; dev_err(&edge_port->port->dev, "%s - out of memory\n", __func__); return; } cflag = tty->termios->c_cflag; config->wFlags = 0; /* These flags must be set */ config->wFlags |= UMP_MASK_UART_FLAGS_RECEIVE_MS_INT; config->wFlags |= UMP_MASK_UART_FLAGS_AUTO_START_ON_ERR; config->bUartMode = (__u8)(edge_port->bUartMode); switch (cflag & CSIZE) { case CS5: config->bDataBits = UMP_UART_CHAR5BITS; dbg("%s - data bits = 5", __func__); break; case CS6: config->bDataBits = UMP_UART_CHAR6BITS; dbg("%s - data bits = 6", __func__); break; case CS7: config->bDataBits = UMP_UART_CHAR7BITS; dbg("%s - data bits = 7", __func__); break; default: case CS8: config->bDataBits = UMP_UART_CHAR8BITS; dbg("%s - data bits = 8", __func__); break; } if (cflag & PARENB) { if (cflag & PARODD) { config->wFlags |= UMP_MASK_UART_FLAGS_PARITY; config->bParity = UMP_UART_ODDPARITY; dbg("%s - parity = odd", __func__); } else { config->wFlags |= UMP_MASK_UART_FLAGS_PARITY; config->bParity = UMP_UART_EVENPARITY; dbg("%s - parity = even", __func__); } } else { config->bParity = UMP_UART_NOPARITY; dbg("%s - parity = none", __func__); } if (cflag & CSTOPB) { config->bStopBits = UMP_UART_STOPBIT2; dbg("%s - stop bits = 2", __func__); } else { config->bStopBits = UMP_UART_STOPBIT1; dbg("%s - stop bits = 1", __func__); } /* figure out the flow control settings */ if (cflag & CRTSCTS) { config->wFlags |= UMP_MASK_UART_FLAGS_OUT_X_CTS_FLOW; config->wFlags |= UMP_MASK_UART_FLAGS_RTS_FLOW; dbg("%s - RTS/CTS is enabled", __func__); } else { dbg("%s - RTS/CTS is disabled", __func__); tty->hw_stopped = 0; restart_read(edge_port); } /* if we are implementing XON/XOFF, set the start and stop character in the device */ config->cXon = START_CHAR(tty); config->cXoff = STOP_CHAR(tty); /* if we are implementing INBOUND XON/XOFF */ if (I_IXOFF(tty)) { config->wFlags |= UMP_MASK_UART_FLAGS_IN_X; dbg("%s - INBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x", __func__, config->cXon, config->cXoff); } else dbg("%s - INBOUND XON/XOFF is disabled", __func__); /* if we are implementing OUTBOUND XON/XOFF */ if (I_IXON(tty)) { config->wFlags |= UMP_MASK_UART_FLAGS_OUT_X; dbg("%s - OUTBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x", __func__, config->cXon, config->cXoff); } else dbg("%s - OUTBOUND XON/XOFF is disabled", __func__); tty->termios->c_cflag &= ~CMSPAR; /* Round the baud rate */ baud = tty_get_baud_rate(tty); if (!baud) { /* pick a default, any default... */ baud = 9600; } else tty_encode_baud_rate(tty, baud, baud); edge_port->baud_rate = baud; config->wBaudRate = (__u16)((461550L + baud/2) / baud); /* FIXME: Recompute actual baud from divisor here */ dbg("%s - baud rate = %d, wBaudRate = %d", __func__, baud, config->wBaudRate); dbg("wBaudRate: %d", (int)(461550L / config->wBaudRate)); dbg("wFlags: 0x%x", config->wFlags); dbg("bDataBits: %d", config->bDataBits); dbg("bParity: %d", config->bParity); dbg("bStopBits: %d", config->bStopBits); dbg("cXon: %d", config->cXon); dbg("cXoff: %d", config->cXoff); dbg("bUartMode: %d", config->bUartMode); /* move the word values into big endian mode */ cpu_to_be16s(&config->wFlags); cpu_to_be16s(&config->wBaudRate); status = send_cmd(edge_port->port->serial->dev, UMPC_SET_CONFIG, (__u8)(UMPM_UART1_PORT + port_number), 0, (__u8 *)config, sizeof(*config)); if (status) dbg("%s - error %d when trying to write config to device", __func__, status); kfree(config); } static void edge_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int cflag; cflag = tty->termios->c_cflag; dbg("%s - clfag %08x iflag %08x", __func__, tty->termios->c_cflag, tty->termios->c_iflag); dbg("%s - old clfag %08x old iflag %08x", __func__, old_termios->c_cflag, old_termios->c_iflag); dbg("%s - port %d", __func__, port->number); if (edge_port == NULL) return; /* change the port settings to the new ones specified */ change_port_settings(tty, edge_port, old_termios); } static int edge_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int mcr; unsigned long flags; dbg("%s - port %d", __func__, port->number); spin_lock_irqsave(&edge_port->ep_lock, flags); mcr = edge_port->shadow_mcr; if (set & TIOCM_RTS) mcr |= MCR_RTS; if (set & TIOCM_DTR) mcr |= MCR_DTR; if (set & TIOCM_LOOP) mcr |= MCR_LOOPBACK; if (clear & TIOCM_RTS) mcr &= ~MCR_RTS; if (clear & TIOCM_DTR) mcr &= ~MCR_DTR; if (clear & TIOCM_LOOP) mcr &= ~MCR_LOOPBACK; edge_port->shadow_mcr = mcr; spin_unlock_irqrestore(&edge_port->ep_lock, flags); restore_mcr(edge_port, mcr); return 0; } static int edge_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int result = 0; unsigned int msr; unsigned int mcr; unsigned long flags; dbg("%s - port %d", __func__, port->number); spin_lock_irqsave(&edge_port->ep_lock, flags); msr = edge_port->shadow_msr; mcr = edge_port->shadow_mcr; result = ((mcr & MCR_DTR) ? TIOCM_DTR: 0) /* 0x002 */ | ((mcr & MCR_RTS) ? TIOCM_RTS: 0) /* 0x004 */ | ((msr & EDGEPORT_MSR_CTS) ? TIOCM_CTS: 0) /* 0x020 */ | ((msr & EDGEPORT_MSR_CD) ? TIOCM_CAR: 0) /* 0x040 */ | ((msr & EDGEPORT_MSR_RI) ? TIOCM_RI: 0) /* 0x080 */ | ((msr & EDGEPORT_MSR_DSR) ? TIOCM_DSR: 0); /* 0x100 */ dbg("%s -- %x", __func__, result); spin_unlock_irqrestore(&edge_port->ep_lock, flags); return result; } static int edge_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); struct async_icount *ic = &edge_port->icount; icount->cts = ic->cts; icount->dsr = ic->dsr; icount->rng = ic->rng; icount->dcd = ic->dcd; icount->tx = ic->tx; icount->rx = ic->rx; icount->frame = ic->frame; icount->parity = ic->parity; icount->overrun = ic->overrun; icount->brk = ic->brk; icount->buf_overrun = ic->buf_overrun; return 0; } static int get_serial_info(struct edgeport_port *edge_port, struct serial_struct __user *retinfo) { struct serial_struct tmp; if (!retinfo) return -EFAULT; memset(&tmp, 0, sizeof(tmp)); tmp.type = PORT_16550A; tmp.line = edge_port->port->serial->minor; tmp.port = edge_port->port->number; tmp.irq = 0; tmp.flags = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ; tmp.xmit_fifo_size = edge_port->port->bulk_out_size; tmp.baud_base = 9600; tmp.close_delay = 5*HZ; tmp.closing_wait = closing_wait; if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) return -EFAULT; return 0; } static int edge_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); struct async_icount cnow; struct async_icount cprev; dbg("%s - port %d, cmd = 0x%x", __func__, port->number, cmd); switch (cmd) { case TIOCGSERIAL: dbg("%s - (%d) TIOCGSERIAL", __func__, port->number); return get_serial_info(edge_port, (struct serial_struct __user *) arg); case TIOCMIWAIT: dbg("%s - (%d) TIOCMIWAIT", __func__, port->number); cprev = edge_port->icount; while (1) { interruptible_sleep_on(&edge_port->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; cnow = edge_port->icount; if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) return -EIO; /* no change => error */ if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) { return 0; } cprev = cnow; } /* not reached */ break; } return -ENOIOCTLCMD; } static void edge_break(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct edgeport_port *edge_port = usb_get_serial_port_data(port); int status; int bv = 0; /* Off */ dbg("%s - state = %d", __func__, break_state); /* chase the port close */ chase_port(edge_port, 0, 0); if (break_state == -1) bv = 1; /* On */ status = ti_do_config(edge_port, UMPC_SET_CLR_BREAK, bv); if (status) dbg("%s - error %d sending break set/clear command.", __func__, status); } static int edge_startup(struct usb_serial *serial) { struct edgeport_serial *edge_serial; struct edgeport_port *edge_port; struct usb_device *dev; int status; int i; dev = serial->dev; /* create our private serial structure */ edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL); if (edge_serial == NULL) { dev_err(&serial->dev->dev, "%s - Out of memory\n", __func__); return -ENOMEM; } mutex_init(&edge_serial->es_lock); edge_serial->serial = serial; usb_set_serial_data(serial, edge_serial); status = download_fw(edge_serial); if (status) { kfree(edge_serial); return status; } /* set up our port private structures */ for (i = 0; i < serial->num_ports; ++i) { edge_port = kzalloc(sizeof(struct edgeport_port), GFP_KERNEL); if (edge_port == NULL) { dev_err(&serial->dev->dev, "%s - Out of memory\n", __func__); goto cleanup; } spin_lock_init(&edge_port->ep_lock); if (kfifo_alloc(&edge_port->write_fifo, EDGE_OUT_BUF_SIZE, GFP_KERNEL)) { dev_err(&serial->dev->dev, "%s - Out of memory\n", __func__); kfree(edge_port); goto cleanup; } edge_port->port = serial->port[i]; edge_port->edge_serial = edge_serial; usb_set_serial_port_data(serial->port[i], edge_port); edge_port->bUartMode = default_uart_mode; } return 0; cleanup: for (--i; i >= 0; --i) { edge_port = usb_get_serial_port_data(serial->port[i]); kfifo_free(&edge_port->write_fifo); kfree(edge_port); usb_set_serial_port_data(serial->port[i], NULL); } kfree(edge_serial); usb_set_serial_data(serial, NULL); return -ENOMEM; } static void edge_disconnect(struct usb_serial *serial) { dbg("%s", __func__); } static void edge_release(struct usb_serial *serial) { int i; struct edgeport_port *edge_port; dbg("%s", __func__); for (i = 0; i < serial->num_ports; ++i) { edge_port = usb_get_serial_port_data(serial->port[i]); kfifo_free(&edge_port->write_fifo); kfree(edge_port); } kfree(usb_get_serial_data(serial)); } /* Sysfs Attributes */ static ssize_t show_uart_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_serial_port *port = to_usb_serial_port(dev); struct edgeport_port *edge_port = usb_get_serial_port_data(port); return sprintf(buf, "%d\n", edge_port->bUartMode); } static ssize_t store_uart_mode(struct device *dev, struct device_attribute *attr, const char *valbuf, size_t count) { struct usb_serial_port *port = to_usb_serial_port(dev); struct edgeport_port *edge_port = usb_get_serial_port_data(port); unsigned int v = simple_strtoul(valbuf, NULL, 0); dbg("%s: setting uart_mode = %d", __func__, v); if (v < 256) edge_port->bUartMode = v; else dev_err(dev, "%s - uart_mode %d is invalid\n", __func__, v); return count; } static DEVICE_ATTR(uart_mode, S_IWUSR | S_IRUGO, show_uart_mode, store_uart_mode); static int edge_create_sysfs_attrs(struct usb_serial_port *port) { return device_create_file(&port->dev, &dev_attr_uart_mode); } static int edge_remove_sysfs_attrs(struct usb_serial_port *port) { device_remove_file(&port->dev, &dev_attr_uart_mode); return 0; } static struct usb_serial_driver edgeport_1port_device = { .driver = { .owner = THIS_MODULE, .name = "edgeport_ti_1", }, .description = "Edgeport TI 1 port adapter", .id_table = edgeport_1port_id_table, .num_ports = 1, .open = edge_open, .close = edge_close, .throttle = edge_throttle, .unthrottle = edge_unthrottle, .attach = edge_startup, .disconnect = edge_disconnect, .release = edge_release, .port_probe = edge_create_sysfs_attrs, .port_remove = edge_remove_sysfs_attrs, .ioctl = edge_ioctl, .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, .tiocmset = edge_tiocmset, .get_icount = edge_get_icount, .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, .break_ctl = edge_break, .read_int_callback = edge_interrupt_callback, .read_bulk_callback = edge_bulk_in_callback, .write_bulk_callback = edge_bulk_out_callback, }; static struct usb_serial_driver edgeport_2port_device = { .driver = { .owner = THIS_MODULE, .name = "edgeport_ti_2", }, .description = "Edgeport TI 2 port adapter", .id_table = edgeport_2port_id_table, .num_ports = 2, .open = edge_open, .close = edge_close, .throttle = edge_throttle, .unthrottle = edge_unthrottle, .attach = edge_startup, .disconnect = edge_disconnect, .release = edge_release, .port_probe = edge_create_sysfs_attrs, .port_remove = edge_remove_sysfs_attrs, .ioctl = edge_ioctl, .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, .tiocmset = edge_tiocmset, .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, .break_ctl = edge_break, .read_int_callback = edge_interrupt_callback, .read_bulk_callback = edge_bulk_in_callback, .write_bulk_callback = edge_bulk_out_callback, }; static struct usb_serial_driver * const serial_drivers[] = { &edgeport_1port_device, &edgeport_2port_device, NULL }; module_usb_serial_driver(io_driver, serial_drivers); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("edgeport/down3.bin"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug enabled or not"); module_param(closing_wait, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(closing_wait, "Maximum wait for data to drain, in .01 secs"); module_param(ignore_cpu_rev, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ignore_cpu_rev, "Ignore the cpu revision when connecting to a device"); module_param(default_uart_mode, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(default_uart_mode, "Default uart_mode, 0=RS232, ...");
gpl-2.0
SlimRoms/kernel_lge_msm7x27a-common
arch/alpha/kernel/osf_sys.c
4336
30504
/* * linux/arch/alpha/kernel/osf_sys.c * * Copyright (C) 1995 Linus Torvalds */ /* * This file handles some of the stranger OSF/1 system call interfaces. * Some of the system calls expect a non-C calling standard, others have * special parameter blocks.. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/syscalls.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/utsname.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/major.h> #include <linux/stat.h> #include <linux/mman.h> #include <linux/shm.h> #include <linux/poll.h> #include <linux/file.h> #include <linux/types.h> #include <linux/ipc.h> #include <linux/namei.h> #include <linux/uio.h> #include <linux/vfs.h> #include <linux/rcupdate.h> #include <linux/slab.h> #include <asm/fpu.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/sysinfo.h> #include <asm/thread_info.h> #include <asm/hwrpb.h> #include <asm/processor.h> /* * Brk needs to return an error. Still support Linux's brk(0) query idiom, * which OSF programs just shouldn't be doing. We're still not quite * identical to OSF as we don't return 0 on success, but doing otherwise * would require changes to libc. Hopefully this is good enough. */ SYSCALL_DEFINE1(osf_brk, unsigned long, brk) { unsigned long retval = sys_brk(brk); if (brk && brk != retval) retval = -ENOMEM; return retval; } /* * This is pure guess-work.. */ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start, unsigned long, text_len, unsigned long, bss_start, unsigned long, bss_len) { struct mm_struct *mm; mm = current->mm; mm->end_code = bss_start + bss_len; mm->start_brk = bss_start + bss_len; mm->brk = bss_start + bss_len; #if 0 printk("set_program_attributes(%lx %lx %lx %lx)\n", text_start, text_len, bss_start, bss_len); #endif return 0; } /* * OSF/1 directory handling functions... * * The "getdents()" interface is much more sane: the "basep" stuff is * braindamage (it can't really handle filesystems where the directory * offset differences aren't the same as "d_reclen"). */ #define NAME_OFFSET offsetof (struct osf_dirent, d_name) struct osf_dirent { unsigned int d_ino; unsigned short d_reclen; unsigned short d_namlen; char d_name[1]; }; struct osf_dirent_callback { struct osf_dirent __user *dirent; long __user *basep; unsigned int count; int error; }; static int osf_filldir(void *__buf, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct osf_dirent __user *dirent; struct osf_dirent_callback *buf = (struct osf_dirent_callback *) __buf; unsigned int reclen = ALIGN(NAME_OFFSET + namlen + 1, sizeof(u32)); unsigned int d_ino; buf->error = -EINVAL; /* only used if we fail */ if (reclen > buf->count) return -EINVAL; d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) { buf->error = -EOVERFLOW; return -EOVERFLOW; } if (buf->basep) { if (put_user(offset, buf->basep)) goto Efault; buf->basep = NULL; } dirent = buf->dirent; if (put_user(d_ino, &dirent->d_ino) || put_user(namlen, &dirent->d_namlen) || put_user(reclen, &dirent->d_reclen) || copy_to_user(dirent->d_name, name, namlen) || put_user(0, dirent->d_name + namlen)) goto Efault; dirent = (void __user *)dirent + reclen; buf->dirent = dirent; buf->count -= reclen; return 0; Efault: buf->error = -EFAULT; return -EFAULT; } SYSCALL_DEFINE4(osf_getdirentries, unsigned int, fd, struct osf_dirent __user *, dirent, unsigned int, count, long __user *, basep) { int error; struct file *file; struct osf_dirent_callback buf; error = -EBADF; file = fget(fd); if (!file) goto out; buf.dirent = dirent; buf.basep = basep; buf.count = count; buf.error = 0; error = vfs_readdir(file, osf_filldir, &buf); if (error >= 0) error = buf.error; if (count != buf.count) error = count - buf.count; fput(file); out: return error; } #undef NAME_OFFSET SYSCALL_DEFINE6(osf_mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, off) { unsigned long ret = -EINVAL; #if 0 if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED)) printk("%s: unimplemented OSF mmap flags %04lx\n", current->comm, flags); #endif if ((off + PAGE_ALIGN(len)) < off) goto out; if (off & ~PAGE_MASK) goto out; ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); out: return ret; } /* * The OSF/1 statfs structure is much larger, but this should * match the beginning, at least. */ struct osf_statfs { short f_type; short f_flags; int f_fsize; int f_bsize; int f_blocks; int f_bfree; int f_bavail; int f_files; int f_ffree; __kernel_fsid_t f_fsid; }; static int linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_stat, unsigned long bufsiz) { struct osf_statfs tmp_stat; tmp_stat.f_type = linux_stat->f_type; tmp_stat.f_flags = 0; /* mount flags */ tmp_stat.f_fsize = linux_stat->f_frsize; tmp_stat.f_bsize = linux_stat->f_bsize; tmp_stat.f_blocks = linux_stat->f_blocks; tmp_stat.f_bfree = linux_stat->f_bfree; tmp_stat.f_bavail = linux_stat->f_bavail; tmp_stat.f_files = linux_stat->f_files; tmp_stat.f_ffree = linux_stat->f_ffree; tmp_stat.f_fsid = linux_stat->f_fsid; if (bufsiz > sizeof(tmp_stat)) bufsiz = sizeof(tmp_stat); return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0; } SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname, struct osf_statfs __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = user_statfs(pathname, &linux_stat); if (!error) error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); return error; } SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd, struct osf_statfs __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = fd_statfs(fd, &linux_stat); if (!error) error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); return error; } /* * Uhh.. OSF/1 mount parameters aren't exactly obvious.. * * Although to be frank, neither are the native Linux/i386 ones.. */ struct ufs_args { char __user *devname; int flags; uid_t exroot; }; struct cdfs_args { char __user *devname; int flags; uid_t exroot; /* This has lots more here, which Linux handles with the option block but I'm too lazy to do the translation into ASCII. */ }; struct procfs_args { char __user *devname; int flags; uid_t exroot; }; /* * We can't actually handle ufs yet, so we translate UFS mounts to * ext2fs mounts. I wouldn't mind a UFS filesystem, but the UFS * layout is so braindead it's a major headache doing it. * * Just how long ago was it written? OTOH our UFS driver may be still * unhappy with OSF UFS. [CHECKME] */ static int osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags) { int retval; struct cdfs_args tmp; char *devname; retval = -EFAULT; if (copy_from_user(&tmp, args, sizeof(tmp))) goto out; devname = getname(tmp.devname); retval = PTR_ERR(devname); if (IS_ERR(devname)) goto out; retval = do_mount(devname, dirname, "ext2", flags, NULL); putname(devname); out: return retval; } static int osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags) { int retval; struct cdfs_args tmp; char *devname; retval = -EFAULT; if (copy_from_user(&tmp, args, sizeof(tmp))) goto out; devname = getname(tmp.devname); retval = PTR_ERR(devname); if (IS_ERR(devname)) goto out; retval = do_mount(devname, dirname, "iso9660", flags, NULL); putname(devname); out: return retval; } static int osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags) { struct procfs_args tmp; if (copy_from_user(&tmp, args, sizeof(tmp))) return -EFAULT; return do_mount("", dirname, "proc", flags, NULL); } SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path, int, flag, void __user *, data) { int retval; char *name; name = getname(path); retval = PTR_ERR(name); if (IS_ERR(name)) goto out; switch (typenr) { case 1: retval = osf_ufs_mount(name, data, flag); break; case 6: retval = osf_cdfs_mount(name, data, flag); break; case 9: retval = osf_procfs_mount(name, data, flag); break; default: retval = -EINVAL; printk("osf_mount(%ld, %x)\n", typenr, flag); } putname(name); out: return retval; } SYSCALL_DEFINE1(osf_utsname, char __user *, name) { int error; down_read(&uts_sem); error = -EFAULT; if (copy_to_user(name + 0, utsname()->sysname, 32)) goto out; if (copy_to_user(name + 32, utsname()->nodename, 32)) goto out; if (copy_to_user(name + 64, utsname()->release, 32)) goto out; if (copy_to_user(name + 96, utsname()->version, 32)) goto out; if (copy_to_user(name + 128, utsname()->machine, 32)) goto out; error = 0; out: up_read(&uts_sem); return error; } SYSCALL_DEFINE0(getpagesize) { return PAGE_SIZE; } SYSCALL_DEFINE0(getdtablesize) { return sysctl_nr_open; } /* * For compatibility with OSF/1 only. Use utsname(2) instead. */ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen) { unsigned len; int i; if (!access_ok(VERIFY_WRITE, name, namelen)) return -EFAULT; len = namelen; if (len > 32) len = 32; down_read(&uts_sem); for (i = 0; i < len; ++i) { __put_user(utsname()->domainname[i], name + i); if (utsname()->domainname[i] == '\0') break; } up_read(&uts_sem); return 0; } /* * The following stuff should move into a header file should it ever * be labeled "officially supported." Right now, there is just enough * support to avoid applications (such as tar) printing error * messages. The attributes are not really implemented. */ /* * Values for Property list entry flag */ #define PLE_PROPAGATE_ON_COPY 0x1 /* cp(1) will copy entry by default */ #define PLE_FLAG_MASK 0x1 /* Valid flag values */ #define PLE_FLAG_ALL -1 /* All flag value */ struct proplistname_args { unsigned int pl_mask; unsigned int pl_numnames; char **pl_names; }; union pl_args { struct setargs { char __user *path; long follow; long nbytes; char __user *buf; } set; struct fsetargs { long fd; long nbytes; char __user *buf; } fset; struct getargs { char __user *path; long follow; struct proplistname_args __user *name_args; long nbytes; char __user *buf; int __user *min_buf_size; } get; struct fgetargs { long fd; struct proplistname_args __user *name_args; long nbytes; char __user *buf; int __user *min_buf_size; } fget; struct delargs { char __user *path; long follow; struct proplistname_args __user *name_args; } del; struct fdelargs { long fd; struct proplistname_args __user *name_args; } fdel; }; enum pl_code { PL_SET = 1, PL_FSET = 2, PL_GET = 3, PL_FGET = 4, PL_DEL = 5, PL_FDEL = 6 }; SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code, union pl_args __user *, args) { long error; int __user *min_buf_size_ptr; switch (code) { case PL_SET: if (get_user(error, &args->set.nbytes)) error = -EFAULT; break; case PL_FSET: if (get_user(error, &args->fset.nbytes)) error = -EFAULT; break; case PL_GET: error = get_user(min_buf_size_ptr, &args->get.min_buf_size); if (error) break; error = put_user(0, min_buf_size_ptr); break; case PL_FGET: error = get_user(min_buf_size_ptr, &args->fget.min_buf_size); if (error) break; error = put_user(0, min_buf_size_ptr); break; case PL_DEL: case PL_FDEL: error = 0; break; default: error = -EOPNOTSUPP; break; }; return error; } SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss, struct sigstack __user *, uoss) { unsigned long usp = rdusp(); unsigned long oss_sp = current->sas_ss_sp + current->sas_ss_size; unsigned long oss_os = on_sig_stack(usp); int error; if (uss) { void __user *ss_sp; error = -EFAULT; if (get_user(ss_sp, &uss->ss_sp)) goto out; /* If the current stack was set with sigaltstack, don't swap stacks while we are on it. */ error = -EPERM; if (current->sas_ss_sp && on_sig_stack(usp)) goto out; /* Since we don't know the extent of the stack, and we don't track onstack-ness, but rather calculate it, we must presume a size. Ho hum this interface is lossy. */ current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ; current->sas_ss_size = SIGSTKSZ; } if (uoss) { error = -EFAULT; if (! access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)) || __put_user(oss_sp, &uoss->ss_sp) || __put_user(oss_os, &uoss->ss_onstack)) goto out; } error = 0; out: return error; } SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) { const char *sysinfo_table[] = { utsname()->sysname, utsname()->nodename, utsname()->release, utsname()->version, utsname()->machine, "alpha", /* instruction set architecture */ "dummy", /* hardware serial number */ "dummy", /* hardware manufacturer */ "dummy", /* secure RPC domain */ }; unsigned long offset; const char *res; long len, err = -EINVAL; offset = command-1; if (offset >= ARRAY_SIZE(sysinfo_table)) { /* Digital UNIX has a few unpublished interfaces here */ printk("sysinfo(%d)", command); goto out; } down_read(&uts_sem); res = sysinfo_table[offset]; len = strlen(res)+1; if ((unsigned long)len > (unsigned long)count) len = count; if (copy_to_user(buf, res, len)) err = -EFAULT; else err = 0; up_read(&uts_sem); out: return err; } SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer, unsigned long, nbytes, int __user *, start, void __user *, arg) { unsigned long w; struct percpu_struct *cpu; switch (op) { case GSI_IEEE_FP_CONTROL: /* Return current software fp control & status bits. */ /* Note that DU doesn't verify available space here. */ w = current_thread_info()->ieee_state & IEEE_SW_MASK; w = swcr_update_status(w, rdfpcr()); if (put_user(w, (unsigned long __user *) buffer)) return -EFAULT; return 0; case GSI_IEEE_STATE_AT_SIGNAL: /* * Not sure anybody will ever use this weird stuff. These * ops can be used (under OSF/1) to set the fpcr that should * be used when a signal handler starts executing. */ break; case GSI_UACPROC: if (nbytes < sizeof(unsigned int)) return -EINVAL; w = (current_thread_info()->flags >> ALPHA_UAC_SHIFT) & UAC_BITMASK; if (put_user(w, (unsigned int __user *)buffer)) return -EFAULT; return 1; case GSI_PROC_TYPE: if (nbytes < sizeof(unsigned long)) return -EINVAL; cpu = (struct percpu_struct*) ((char*)hwrpb + hwrpb->processor_offset); w = cpu->type; if (put_user(w, (unsigned long __user*)buffer)) return -EFAULT; return 1; case GSI_GET_HWRPB: if (nbytes > sizeof(*hwrpb)) return -EINVAL; if (copy_to_user(buffer, hwrpb, nbytes) != 0) return -EFAULT; return 1; default: break; } return -EOPNOTSUPP; } SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer, unsigned long, nbytes, int __user *, start, void __user *, arg) { switch (op) { case SSI_IEEE_FP_CONTROL: { unsigned long swcr, fpcr; unsigned int *state; /* * Alpha Architecture Handbook 4.7.7.3: * To be fully IEEE compiant, we must track the current IEEE * exception state in software, because spurious bits can be * set in the trap shadow of a software-complete insn. */ if (get_user(swcr, (unsigned long __user *)buffer)) return -EFAULT; state = &current_thread_info()->ieee_state; /* Update softare trap enable bits. */ *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK); /* Update the real fpcr. */ fpcr = rdfpcr() & FPCR_DYN_MASK; fpcr |= ieee_swcr_to_fpcr(swcr); wrfpcr(fpcr); return 0; } case SSI_IEEE_RAISE_EXCEPTION: { unsigned long exc, swcr, fpcr, fex; unsigned int *state; if (get_user(exc, (unsigned long __user *)buffer)) return -EFAULT; state = &current_thread_info()->ieee_state; exc &= IEEE_STATUS_MASK; /* Update softare trap enable bits. */ swcr = (*state & IEEE_SW_MASK) | exc; *state |= exc; /* Update the real fpcr. */ fpcr = rdfpcr(); fpcr |= ieee_swcr_to_fpcr(swcr); wrfpcr(fpcr); /* If any exceptions set by this call, and are unmasked, send a signal. Old exceptions are not signaled. */ fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr; if (fex) { siginfo_t info; int si_code = 0; if (fex & IEEE_TRAP_ENABLE_DNO) si_code = FPE_FLTUND; if (fex & IEEE_TRAP_ENABLE_INE) si_code = FPE_FLTRES; if (fex & IEEE_TRAP_ENABLE_UNF) si_code = FPE_FLTUND; if (fex & IEEE_TRAP_ENABLE_OVF) si_code = FPE_FLTOVF; if (fex & IEEE_TRAP_ENABLE_DZE) si_code = FPE_FLTDIV; if (fex & IEEE_TRAP_ENABLE_INV) si_code = FPE_FLTINV; info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = si_code; info.si_addr = NULL; /* FIXME */ send_sig_info(SIGFPE, &info, current); } return 0; } case SSI_IEEE_STATE_AT_SIGNAL: case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: /* * Not sure anybody will ever use this weird stuff. These * ops can be used (under OSF/1) to set the fpcr that should * be used when a signal handler starts executing. */ break; case SSI_NVPAIRS: { unsigned long v, w, i; unsigned int old, new; for (i = 0; i < nbytes; ++i) { if (get_user(v, 2*i + (unsigned int __user *)buffer)) return -EFAULT; if (get_user(w, 2*i + 1 + (unsigned int __user *)buffer)) return -EFAULT; switch (v) { case SSIN_UACPROC: again: old = current_thread_info()->flags; new = old & ~(UAC_BITMASK << ALPHA_UAC_SHIFT); new = new | (w & UAC_BITMASK) << ALPHA_UAC_SHIFT; if (cmpxchg(&current_thread_info()->flags, old, new) != old) goto again; break; default: return -EOPNOTSUPP; } } return 0; } default: break; } return -EOPNOTSUPP; } /* Translations due to the fact that OSF's time_t is an int. Which affects all sorts of things, like timeval and itimerval. */ extern struct timezone sys_tz; struct timeval32 { int tv_sec, tv_usec; }; struct itimerval32 { struct timeval32 it_interval; struct timeval32 it_value; }; static inline long get_tv32(struct timeval *o, struct timeval32 __user *i) { return (!access_ok(VERIFY_READ, i, sizeof(*i)) || (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec))); } static inline long put_tv32(struct timeval32 __user *o, struct timeval *i) { return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec))); } static inline long get_it32(struct itimerval *o, struct itimerval32 __user *i) { return (!access_ok(VERIFY_READ, i, sizeof(*i)) || (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) | __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) | __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) | __get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); } static inline long put_it32(struct itimerval32 __user *o, struct itimerval *i) { return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) | __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) | __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); } static inline void jiffies_to_timeval32(unsigned long jiffies, struct timeval32 *value) { value->tv_usec = (jiffies % HZ) * (1000000L / HZ); value->tv_sec = jiffies / HZ; } SYSCALL_DEFINE2(osf_gettimeofday, struct timeval32 __user *, tv, struct timezone __user *, tz) { if (tv) { struct timeval ktv; do_gettimeofday(&ktv); if (put_tv32(tv, &ktv)) return -EFAULT; } if (tz) { if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) return -EFAULT; } return 0; } SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv, struct timezone __user *, tz) { struct timespec kts; struct timezone ktz; if (tv) { if (get_tv32((struct timeval *)&kts, tv)) return -EFAULT; } if (tz) { if (copy_from_user(&ktz, tz, sizeof(*tz))) return -EFAULT; } kts.tv_nsec *= 1000; return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); } SYSCALL_DEFINE2(osf_getitimer, int, which, struct itimerval32 __user *, it) { struct itimerval kit; int error; error = do_getitimer(which, &kit); if (!error && put_it32(it, &kit)) error = -EFAULT; return error; } SYSCALL_DEFINE3(osf_setitimer, int, which, struct itimerval32 __user *, in, struct itimerval32 __user *, out) { struct itimerval kin, kout; int error; if (in) { if (get_it32(&kin, in)) return -EFAULT; } else memset(&kin, 0, sizeof(kin)); error = do_setitimer(which, &kin, out ? &kout : NULL); if (error || !out) return error; if (put_it32(out, &kout)) return -EFAULT; return 0; } SYSCALL_DEFINE2(osf_utimes, const char __user *, filename, struct timeval32 __user *, tvs) { struct timespec tv[2]; if (tvs) { struct timeval ktvs[2]; if (get_tv32(&ktvs[0], &tvs[0]) || get_tv32(&ktvs[1], &tvs[1])) return -EFAULT; if (ktvs[0].tv_usec < 0 || ktvs[0].tv_usec >= 1000000 || ktvs[1].tv_usec < 0 || ktvs[1].tv_usec >= 1000000) return -EINVAL; tv[0].tv_sec = ktvs[0].tv_sec; tv[0].tv_nsec = 1000 * ktvs[0].tv_usec; tv[1].tv_sec = ktvs[1].tv_sec; tv[1].tv_nsec = 1000 * ktvs[1].tv_usec; } return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0); } SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp, fd_set __user *, exp, struct timeval32 __user *, tvp) { struct timespec end_time, *to = NULL; if (tvp) { time_t sec, usec; to = &end_time; if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp)) || __get_user(sec, &tvp->tv_sec) || __get_user(usec, &tvp->tv_usec)) { return -EFAULT; } if (sec < 0 || usec < 0) return -EINVAL; if (poll_select_set_timeout(to, sec, usec * NSEC_PER_USEC)) return -EINVAL; } /* OSF does not copy back the remaining time. */ return core_sys_select(n, inp, outp, exp, to); } struct rusage32 { struct timeval32 ru_utime; /* user time used */ struct timeval32 ru_stime; /* system time used */ long ru_maxrss; /* maximum resident set size */ long ru_ixrss; /* integral shared memory size */ long ru_idrss; /* integral unshared data size */ long ru_isrss; /* integral unshared stack size */ long ru_minflt; /* page reclaims */ long ru_majflt; /* page faults */ long ru_nswap; /* swaps */ long ru_inblock; /* block input operations */ long ru_oublock; /* block output operations */ long ru_msgsnd; /* messages sent */ long ru_msgrcv; /* messages received */ long ru_nsignals; /* signals received */ long ru_nvcsw; /* voluntary context switches */ long ru_nivcsw; /* involuntary " */ }; SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) { struct rusage32 r; if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) return -EINVAL; memset(&r, 0, sizeof(r)); switch (who) { case RUSAGE_SELF: jiffies_to_timeval32(current->utime, &r.ru_utime); jiffies_to_timeval32(current->stime, &r.ru_stime); r.ru_minflt = current->min_flt; r.ru_majflt = current->maj_flt; break; case RUSAGE_CHILDREN: jiffies_to_timeval32(current->signal->cutime, &r.ru_utime); jiffies_to_timeval32(current->signal->cstime, &r.ru_stime); r.ru_minflt = current->signal->cmin_flt; r.ru_majflt = current->signal->cmaj_flt; break; } return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; } SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options, struct rusage32 __user *, ur) { struct rusage r; long ret, err; unsigned int status = 0; mm_segment_t old_fs; if (!ur) return sys_wait4(pid, ustatus, options, NULL); old_fs = get_fs(); set_fs (KERNEL_DS); ret = sys_wait4(pid, (unsigned int __user *) &status, options, (struct rusage __user *) &r); set_fs (old_fs); if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) return -EFAULT; err = 0; err |= put_user(status, ustatus); err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); err |= __put_user(r.ru_stime.tv_usec, &ur->ru_stime.tv_usec); err |= __put_user(r.ru_maxrss, &ur->ru_maxrss); err |= __put_user(r.ru_ixrss, &ur->ru_ixrss); err |= __put_user(r.ru_idrss, &ur->ru_idrss); err |= __put_user(r.ru_isrss, &ur->ru_isrss); err |= __put_user(r.ru_minflt, &ur->ru_minflt); err |= __put_user(r.ru_majflt, &ur->ru_majflt); err |= __put_user(r.ru_nswap, &ur->ru_nswap); err |= __put_user(r.ru_inblock, &ur->ru_inblock); err |= __put_user(r.ru_oublock, &ur->ru_oublock); err |= __put_user(r.ru_msgsnd, &ur->ru_msgsnd); err |= __put_user(r.ru_msgrcv, &ur->ru_msgrcv); err |= __put_user(r.ru_nsignals, &ur->ru_nsignals); err |= __put_user(r.ru_nvcsw, &ur->ru_nvcsw); err |= __put_user(r.ru_nivcsw, &ur->ru_nivcsw); return err ? err : ret; } /* * I don't know what the parameters are: the first one * seems to be a timeval pointer, and I suspect the second * one is the time remaining.. Ho humm.. No documentation. */ SYSCALL_DEFINE2(osf_usleep_thread, struct timeval32 __user *, sleep, struct timeval32 __user *, remain) { struct timeval tmp; unsigned long ticks; if (get_tv32(&tmp, sleep)) goto fault; ticks = timeval_to_jiffies(&tmp); ticks = schedule_timeout_interruptible(ticks); if (remain) { jiffies_to_timeval(ticks, &tmp); if (put_tv32(remain, &tmp)) goto fault; } return 0; fault: return -EFAULT; } struct timex32 { unsigned int modes; /* mode selector */ long offset; /* time offset (usec) */ long freq; /* frequency offset (scaled ppm) */ long maxerror; /* maximum error (usec) */ long esterror; /* estimated error (usec) */ int status; /* clock command/status */ long constant; /* pll time constant */ long precision; /* clock precision (usec) (read only) */ long tolerance; /* clock frequency tolerance (ppm) * (read only) */ struct timeval32 time; /* (read only) */ long tick; /* (modified) usecs between clock ticks */ long ppsfreq; /* pps frequency (scaled ppm) (ro) */ long jitter; /* pps jitter (us) (ro) */ int shift; /* interval duration (s) (shift) (ro) */ long stabil; /* pps stability (scaled ppm) (ro) */ long jitcnt; /* jitter limit exceeded (ro) */ long calcnt; /* calibration intervals (ro) */ long errcnt; /* calibration errors (ro) */ long stbcnt; /* stability limit exceeded (ro) */ int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; }; SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) { struct timex txc; int ret; /* copy relevant bits of struct timex. */ if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) || copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) - offsetof(struct timex32, time))) return -EFAULT; ret = do_adjtimex(&txc); if (ret < 0) return ret; /* copy back to timex32 */ if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) || (copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) - offsetof(struct timex32, tick))) || (put_tv32(&txc_p->time, &txc.time))) return -EFAULT; return ret; } /* Get an address range which is currently unmapped. Similar to the generic version except that we know how to honor ADDR_LIMIT_32BIT. */ static unsigned long arch_get_unmapped_area_1(unsigned long addr, unsigned long len, unsigned long limit) { struct vm_area_struct *vma = find_vma(current->mm, addr); while (1) { /* At this point: (!vma || addr < vma->vm_end). */ if (limit - len < addr) return -ENOMEM; if (!vma || addr + len <= vma->vm_start) return addr; addr = vma->vm_end; vma = vma->vm_next; } } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long limit; /* "32 bit" actually means 31 bit, since pointers sign extend. */ if (current->personality & ADDR_LIMIT_32BIT) limit = 0x80000000; else limit = TASK_SIZE; if (len > limit) return -ENOMEM; if (flags & MAP_FIXED) return addr; /* First, see if the given suggestion fits. The OSF/1 loader (/sbin/loader) relies on us returning an address larger than the requested if one exists, which is a terribly broken way to program. That said, I can see the use in being able to suggest not merely specific addresses, but regions of memory -- perhaps this feature should be incorporated into all ports? */ if (addr) { addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); if (addr != (unsigned long) -ENOMEM) return addr; } /* Next, try allocating at TASK_UNMAPPED_BASE. */ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), len, limit); if (addr != (unsigned long) -ENOMEM) return addr; /* Finally, try allocating in low memory. */ addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit); return addr; } #ifdef CONFIG_OSF4_COMPAT /* Clear top 32 bits of iov_len in the user's buffer for compatibility with old versions of OSF/1 where iov_len was defined as int. */ static int osf_fix_iov_len(const struct iovec __user *iov, unsigned long count) { unsigned long i; for (i = 0 ; i < count ; i++) { int __user *iov_len_high = (int __user *)&iov[i].iov_len + 1; if (put_user(0, iov_len_high)) return -EFAULT; } return 0; } SYSCALL_DEFINE3(osf_readv, unsigned long, fd, const struct iovec __user *, vector, unsigned long, count) { if (unlikely(personality(current->personality) == PER_OSF4)) if (osf_fix_iov_len(vector, count)) return -EFAULT; return sys_readv(fd, vector, count); } SYSCALL_DEFINE3(osf_writev, unsigned long, fd, const struct iovec __user *, vector, unsigned long, count) { if (unlikely(personality(current->personality) == PER_OSF4)) if (osf_fix_iov_len(vector, count)) return -EFAULT; return sys_writev(fd, vector, count); } #endif
gpl-2.0
mkey-mi/android_kernel_xiaomi_ferrari
arch/metag/kernel/tcm.c
4336
3354
/* * Copyright (C) 2010 Imagination Technologies Ltd. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/genalloc.h> #include <linux/string.h> #include <linux/list.h> #include <linux/slab.h> #include <asm/page.h> #include <asm/tcm.h> struct tcm_pool { struct list_head list; unsigned int tag; unsigned long start; unsigned long end; struct gen_pool *pool; }; static LIST_HEAD(pool_list); static struct tcm_pool *find_pool(unsigned int tag) { struct list_head *lh; struct tcm_pool *pool; list_for_each(lh, &pool_list) { pool = list_entry(lh, struct tcm_pool, list); if (pool->tag == tag) return pool; } return NULL; } /** * tcm_alloc - allocate memory from a TCM pool * @tag: tag of the pool to allocate memory from * @len: number of bytes to be allocated * * Allocate the requested number of bytes from the pool matching * the specified tag. Returns the address of the allocated memory * or zero on failure. */ unsigned long tcm_alloc(unsigned int tag, size_t len) { unsigned long vaddr; struct tcm_pool *pool; pool = find_pool(tag); if (!pool) return 0; vaddr = gen_pool_alloc(pool->pool, len); if (!vaddr) return 0; return vaddr; } /** * tcm_free - free a block of memory to a TCM pool * @tag: tag of the pool to free memory to * @addr: address of the memory to be freed * @len: number of bytes to be freed * * Free the requested number of bytes at a specific address to the * pool matching the specified tag. */ void tcm_free(unsigned int tag, unsigned long addr, size_t len) { struct tcm_pool *pool; pool = find_pool(tag); if (!pool) return; gen_pool_free(pool->pool, addr, len); } /** * tcm_lookup_tag - find the tag matching an address * @p: memory address to lookup the tag for * * Find the tag of the tcm memory region that contains the * specified address. Returns %TCM_INVALID_TAG if no such * memory region could be found. */ unsigned int tcm_lookup_tag(unsigned long p) { struct list_head *lh; struct tcm_pool *pool; unsigned long addr = (unsigned long) p; list_for_each(lh, &pool_list) { pool = list_entry(lh, struct tcm_pool, list); if (addr >= pool->start && addr < pool->end) return pool->tag; } return TCM_INVALID_TAG; } /** * tcm_add_region - add a memory region to TCM pool list * @reg: descriptor of region to be added * * Add a region of memory to the TCM pool list. Returns 0 on success. */ int __init tcm_add_region(struct tcm_region *reg) { struct tcm_pool *pool; pool = kmalloc(sizeof(*pool), GFP_KERNEL); if (!pool) { pr_err("Failed to alloc memory for TCM pool!\n"); return -ENOMEM; } pool->tag = reg->tag; pool->start = reg->res.start; pool->end = reg->res.end; /* * 2^3 = 8 bytes granularity to allow for 64bit access alignment. * -1 = NUMA node specifier. */ pool->pool = gen_pool_create(3, -1); if (!pool->pool) { pr_err("Failed to create TCM pool!\n"); kfree(pool); return -ENOMEM; } if (gen_pool_add(pool->pool, reg->res.start, reg->res.end - reg->res.start + 1, -1)) { pr_err("Failed to add memory to TCM pool!\n"); return -ENOMEM; } pr_info("Added %s TCM pool (%08x bytes @ %08x)\n", reg->res.name, reg->res.end - reg->res.start + 1, reg->res.start); list_add_tail(&pool->list, &pool_list); return 0; }
gpl-2.0
Divaksh/Speedy-Kernel-u8500-old
drivers/net/wireless/b43legacy/ilt.c
4336
10742
/* Broadcom B43legacy wireless driver Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, Stefano Brivio <stefano.brivio@polimi.it> Michael Buesch <mbuesch@freenet.de> Danny van Dyk <kugelfang@gentoo.org> Andreas Jaggi <andreas.jaggi@waterwave.ch> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "b43legacy.h" #include "ilt.h" #include "phy.h" /**** Initial Internal Lookup Tables ****/ const u32 b43legacy_ilt_rotor[B43legacy_ILT_ROTOR_SIZE] = { 0xFEB93FFD, 0xFEC63FFD, /* 0 */ 0xFED23FFD, 0xFEDF3FFD, 0xFEEC3FFE, 0xFEF83FFE, 0xFF053FFE, 0xFF113FFE, 0xFF1E3FFE, 0xFF2A3FFF, /* 8 */ 0xFF373FFF, 0xFF443FFF, 0xFF503FFF, 0xFF5D3FFF, 0xFF693FFF, 0xFF763FFF, 0xFF824000, 0xFF8F4000, /* 16 */ 0xFF9B4000, 0xFFA84000, 0xFFB54000, 0xFFC14000, 0xFFCE4000, 0xFFDA4000, 0xFFE74000, 0xFFF34000, /* 24 */ 0x00004000, 0x000D4000, 0x00194000, 0x00264000, 0x00324000, 0x003F4000, 0x004B4000, 0x00584000, /* 32 */ 0x00654000, 0x00714000, 0x007E4000, 0x008A3FFF, 0x00973FFF, 0x00A33FFF, 0x00B03FFF, 0x00BC3FFF, /* 40 */ 0x00C93FFF, 0x00D63FFF, 0x00E23FFE, 0x00EF3FFE, 0x00FB3FFE, 0x01083FFE, 0x01143FFE, 0x01213FFD, /* 48 */ 0x012E3FFD, 0x013A3FFD, 0x01473FFD, }; const u32 b43legacy_ilt_retard[B43legacy_ILT_RETARD_SIZE] = { 0xDB93CB87, 0xD666CF64, /* 0 */ 0xD1FDD358, 0xCDA6D826, 0xCA38DD9F, 0xC729E2B4, 0xC469E88E, 0xC26AEE2B, 0xC0DEF46C, 0xC073FA62, /* 8 */ 0xC01D00D5, 0xC0760743, 0xC1560D1E, 0xC2E51369, 0xC4ED18FF, 0xC7AC1ED7, 0xCB2823B2, 0xCEFA28D9, /* 16 */ 0xD2F62D3F, 0xD7BB3197, 0xDCE53568, 0xE1FE3875, 0xE7D13B35, 0xED663D35, 0xF39B3EC4, 0xF98E3FA7, /* 24 */ 0x00004000, 0x06723FA7, 0x0C653EC4, 0x129A3D35, 0x182F3B35, 0x1E023875, 0x231B3568, 0x28453197, /* 32 */ 0x2D0A2D3F, 0x310628D9, 0x34D823B2, 0x38541ED7, 0x3B1318FF, 0x3D1B1369, 0x3EAA0D1E, 0x3F8A0743, /* 40 */ 0x3FE300D5, 0x3F8DFA62, 0x3F22F46C, 0x3D96EE2B, 0x3B97E88E, 0x38D7E2B4, 0x35C8DD9F, 0x325AD826, /* 48 */ 0x2E03D358, 0x299ACF64, 0x246DCB87, }; const u16 b43legacy_ilt_finefreqa[B43legacy_ILT_FINEFREQA_SIZE] = { 0x0082, 0x0082, 0x0102, 0x0182, /* 0 */ 0x0202, 0x0282, 0x0302, 0x0382, 0x0402, 0x0482, 0x0502, 0x0582, 0x05E2, 0x0662, 0x06E2, 0x0762, 0x07E2, 0x0842, 0x08C2, 0x0942, /* 16 */ 0x09C2, 0x0A22, 0x0AA2, 0x0B02, 0x0B82, 0x0BE2, 0x0C62, 0x0CC2, 0x0D42, 0x0DA2, 0x0E02, 0x0E62, 0x0EE2, 0x0F42, 0x0FA2, 0x1002, /* 32 */ 0x1062, 0x10C2, 0x1122, 0x1182, 0x11E2, 0x1242, 0x12A2, 0x12E2, 0x1342, 0x13A2, 0x1402, 0x1442, 0x14A2, 0x14E2, 0x1542, 0x1582, /* 48 */ 0x15E2, 0x1622, 0x1662, 0x16C1, 0x1701, 0x1741, 0x1781, 0x17E1, 0x1821, 0x1861, 0x18A1, 0x18E1, 0x1921, 0x1961, 0x19A1, 0x19E1, /* 64 */ 0x1A21, 0x1A61, 0x1AA1, 0x1AC1, 0x1B01, 0x1B41, 0x1B81, 0x1BA1, 0x1BE1, 0x1C21, 0x1C41, 0x1C81, 0x1CA1, 0x1CE1, 0x1D01, 0x1D41, /* 80 */ 0x1D61, 0x1DA1, 0x1DC1, 0x1E01, 0x1E21, 0x1E61, 0x1E81, 0x1EA1, 0x1EE1, 0x1F01, 0x1F21, 0x1F41, 0x1F81, 0x1FA1, 0x1FC1, 0x1FE1, /* 96 */ 0x2001, 0x2041, 0x2061, 0x2081, 0x20A1, 0x20C1, 0x20E1, 0x2101, 0x2121, 0x2141, 0x2161, 0x2181, 0x21A1, 0x21C1, 0x21E1, 0x2201, /* 112 */ 0x2221, 0x2241, 0x2261, 0x2281, 0x22A1, 0x22C1, 0x22C1, 0x22E1, 0x2301, 0x2321, 0x2341, 0x2361, 0x2361, 0x2381, 0x23A1, 0x23C1, /* 128 */ 0x23E1, 0x23E1, 0x2401, 0x2421, 0x2441, 0x2441, 0x2461, 0x2481, 0x2481, 0x24A1, 0x24C1, 0x24C1, 0x24E1, 0x2501, 0x2501, 0x2521, /* 144 */ 0x2541, 0x2541, 0x2561, 0x2561, 0x2581, 0x25A1, 0x25A1, 0x25C1, 0x25C1, 0x25E1, 0x2601, 0x2601, 0x2621, 0x2621, 0x2641, 0x2641, /* 160 */ 0x2661, 0x2661, 0x2681, 0x2681, 0x26A1, 0x26A1, 0x26C1, 0x26C1, 0x26E1, 0x26E1, 0x2701, 0x2701, 0x2721, 0x2721, 0x2740, 0x2740, /* 176 */ 0x2760, 0x2760, 0x2780, 0x2780, 0x2780, 0x27A0, 0x27A0, 0x27C0, 0x27C0, 0x27E0, 0x27E0, 0x27E0, 0x2800, 0x2800, 0x2820, 0x2820, /* 192 */ 0x2820, 0x2840, 0x2840, 0x2840, 0x2860, 0x2860, 0x2880, 0x2880, 0x2880, 0x28A0, 0x28A0, 0x28A0, 0x28C0, 0x28C0, 0x28C0, 0x28E0, /* 208 */ 0x28E0, 0x28E0, 0x2900, 0x2900, 0x2900, 0x2920, 0x2920, 0x2920, 0x2940, 0x2940, 0x2940, 0x2960, 0x2960, 0x2960, 0x2960, 0x2980, /* 224 */ 0x2980, 0x2980, 0x29A0, 0x29A0, 0x29A0, 0x29A0, 0x29C0, 0x29C0, 0x29C0, 0x29E0, 0x29E0, 0x29E0, 0x29E0, 0x2A00, 0x2A00, 0x2A00, /* 240 */ 0x2A00, 0x2A20, 0x2A20, 0x2A20, 0x2A20, 0x2A40, 0x2A40, 0x2A40, 0x2A40, 0x2A60, 0x2A60, 0x2A60, }; const u16 b43legacy_ilt_finefreqg[B43legacy_ILT_FINEFREQG_SIZE] = { 0x0089, 0x02E9, 0x0409, 0x04E9, /* 0 */ 0x05A9, 0x0669, 0x0709, 0x0789, 0x0829, 0x08A9, 0x0929, 0x0989, 0x0A09, 0x0A69, 0x0AC9, 0x0B29, 0x0BA9, 0x0BE9, 0x0C49, 0x0CA9, /* 16 */ 0x0D09, 0x0D69, 0x0DA9, 0x0E09, 0x0E69, 0x0EA9, 0x0F09, 0x0F49, 0x0FA9, 0x0FE9, 0x1029, 0x1089, 0x10C9, 0x1109, 0x1169, 0x11A9, /* 32 */ 0x11E9, 0x1229, 0x1289, 0x12C9, 0x1309, 0x1349, 0x1389, 0x13C9, 0x1409, 0x1449, 0x14A9, 0x14E9, 0x1529, 0x1569, 0x15A9, 0x15E9, /* 48 */ 0x1629, 0x1669, 0x16A9, 0x16E8, 0x1728, 0x1768, 0x17A8, 0x17E8, 0x1828, 0x1868, 0x18A8, 0x18E8, 0x1928, 0x1968, 0x19A8, 0x19E8, /* 64 */ 0x1A28, 0x1A68, 0x1AA8, 0x1AE8, 0x1B28, 0x1B68, 0x1BA8, 0x1BE8, 0x1C28, 0x1C68, 0x1CA8, 0x1CE8, 0x1D28, 0x1D68, 0x1DC8, 0x1E08, /* 80 */ 0x1E48, 0x1E88, 0x1EC8, 0x1F08, 0x1F48, 0x1F88, 0x1FE8, 0x2028, 0x2068, 0x20A8, 0x2108, 0x2148, 0x2188, 0x21C8, 0x2228, 0x2268, /* 96 */ 0x22C8, 0x2308, 0x2348, 0x23A8, 0x23E8, 0x2448, 0x24A8, 0x24E8, 0x2548, 0x25A8, 0x2608, 0x2668, 0x26C8, 0x2728, 0x2787, 0x27E7, /* 112 */ 0x2847, 0x28C7, 0x2947, 0x29A7, 0x2A27, 0x2AC7, 0x2B47, 0x2BE7, 0x2CA7, 0x2D67, 0x2E47, 0x2F67, 0x3247, 0x3526, 0x3646, 0x3726, /* 128 */ 0x3806, 0x38A6, 0x3946, 0x39E6, 0x3A66, 0x3AE6, 0x3B66, 0x3BC6, 0x3C45, 0x3CA5, 0x3D05, 0x3D85, 0x3DE5, 0x3E45, 0x3EA5, 0x3EE5, /* 144 */ 0x3F45, 0x3FA5, 0x4005, 0x4045, 0x40A5, 0x40E5, 0x4145, 0x4185, 0x41E5, 0x4225, 0x4265, 0x42C5, 0x4305, 0x4345, 0x43A5, 0x43E5, /* 160 */ 0x4424, 0x4464, 0x44C4, 0x4504, 0x4544, 0x4584, 0x45C4, 0x4604, 0x4644, 0x46A4, 0x46E4, 0x4724, 0x4764, 0x47A4, 0x47E4, 0x4824, /* 176 */ 0x4864, 0x48A4, 0x48E4, 0x4924, 0x4964, 0x49A4, 0x49E4, 0x4A24, 0x4A64, 0x4AA4, 0x4AE4, 0x4B23, 0x4B63, 0x4BA3, 0x4BE3, 0x4C23, /* 192 */ 0x4C63, 0x4CA3, 0x4CE3, 0x4D23, 0x4D63, 0x4DA3, 0x4DE3, 0x4E23, 0x4E63, 0x4EA3, 0x4EE3, 0x4F23, 0x4F63, 0x4FC3, 0x5003, 0x5043, /* 208 */ 0x5083, 0x50C3, 0x5103, 0x5143, 0x5183, 0x51E2, 0x5222, 0x5262, 0x52A2, 0x52E2, 0x5342, 0x5382, 0x53C2, 0x5402, 0x5462, 0x54A2, /* 224 */ 0x5502, 0x5542, 0x55A2, 0x55E2, 0x5642, 0x5682, 0x56E2, 0x5722, 0x5782, 0x57E1, 0x5841, 0x58A1, 0x5901, 0x5961, 0x59C1, 0x5A21, /* 240 */ 0x5AA1, 0x5B01, 0x5B81, 0x5BE1, 0x5C61, 0x5D01, 0x5D80, 0x5E20, 0x5EE0, 0x5FA0, 0x6080, 0x61C0, }; const u16 b43legacy_ilt_noisea2[B43legacy_ILT_NOISEA2_SIZE] = { 0x0001, 0x0001, 0x0001, 0xFFFE, 0xFFFE, 0x3FFF, 0x1000, 0x0393, }; const u16 b43legacy_ilt_noisea3[B43legacy_ILT_NOISEA3_SIZE] = { 0x4C4C, 0x4C4C, 0x4C4C, 0x2D36, 0x4C4C, 0x4C4C, 0x4C4C, 0x2D36, }; const u16 b43legacy_ilt_noiseg1[B43legacy_ILT_NOISEG1_SIZE] = { 0x013C, 0x01F5, 0x031A, 0x0631, 0x0001, 0x0001, 0x0001, 0x0001, }; const u16 b43legacy_ilt_noiseg2[B43legacy_ILT_NOISEG2_SIZE] = { 0x5484, 0x3C40, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; const u16 b43legacy_ilt_noisescaleg1[B43legacy_ILT_NOISESCALEG_SIZE] = { 0x6C77, 0x5162, 0x3B40, 0x3335, /* 0 */ 0x2F2D, 0x2A2A, 0x2527, 0x1F21, 0x1A1D, 0x1719, 0x1616, 0x1414, 0x1414, 0x1400, 0x1414, 0x1614, 0x1716, 0x1A19, 0x1F1D, 0x2521, /* 16 */ 0x2A27, 0x2F2A, 0x332D, 0x3B35, 0x5140, 0x6C62, 0x0077, }; const u16 b43legacy_ilt_noisescaleg2[B43legacy_ILT_NOISESCALEG_SIZE] = { 0xD8DD, 0xCBD4, 0xBCC0, 0XB6B7, /* 0 */ 0xB2B0, 0xADAD, 0xA7A9, 0x9FA1, 0x969B, 0x9195, 0x8F8F, 0x8A8A, 0x8A8A, 0x8A00, 0x8A8A, 0x8F8A, 0x918F, 0x9695, 0x9F9B, 0xA7A1, /* 16 */ 0xADA9, 0xB2AD, 0xB6B0, 0xBCB7, 0xCBC0, 0xD8D4, 0x00DD, }; const u16 b43legacy_ilt_noisescaleg3[B43legacy_ILT_NOISESCALEG_SIZE] = { 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 0 */ 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA400, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 16 */ 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, 0x00A4, }; const u16 b43legacy_ilt_sigmasqr1[B43legacy_ILT_SIGMASQR_SIZE] = { 0x007A, 0x0075, 0x0071, 0x006C, /* 0 */ 0x0067, 0x0063, 0x005E, 0x0059, 0x0054, 0x0050, 0x004B, 0x0046, 0x0042, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, /* 16 */ 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x0000, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, /* 32 */ 0x003D, 0x003D, 0x003D, 0x003D, 0x0042, 0x0046, 0x004B, 0x0050, 0x0054, 0x0059, 0x005E, 0x0063, 0x0067, 0x006C, 0x0071, 0x0075, /* 48 */ 0x007A, }; const u16 b43legacy_ilt_sigmasqr2[B43legacy_ILT_SIGMASQR_SIZE] = { 0x00DE, 0x00DC, 0x00DA, 0x00D8, /* 0 */ 0x00D6, 0x00D4, 0x00D2, 0x00CF, 0x00CD, 0x00CA, 0x00C7, 0x00C4, 0x00C1, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 16 */ 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x0000, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 32 */ 0x00BE, 0x00BE, 0x00BE, 0x00BE, 0x00C1, 0x00C4, 0x00C7, 0x00CA, 0x00CD, 0x00CF, 0x00D2, 0x00D4, 0x00D6, 0x00D8, 0x00DA, 0x00DC, /* 48 */ 0x00DE, }; /**** Helper functions to access the device Internal Lookup Tables ****/ void b43legacy_ilt_write(struct b43legacy_wldev *dev, u16 offset, u16 val) { b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset); mmiowb(); b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1, val); } void b43legacy_ilt_write32(struct b43legacy_wldev *dev, u16 offset, u32 val) { b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset); mmiowb(); b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA2, (val & 0xFFFF0000) >> 16); b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1, val & 0x0000FFFF); } u16 b43legacy_ilt_read(struct b43legacy_wldev *dev, u16 offset) { b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset); return b43legacy_phy_read(dev, B43legacy_PHY_ILT_G_DATA1); }
gpl-2.0
sirmordred/samsung-kernel-msm7x30-2
arch/arm/mach-pxa/devices.c
4848
22640
#include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/spi/pxa2xx_spi.h> #include <linux/i2c/pxa-i2c.h> #include <asm/pmu.h> #include <mach/udc.h> #include <mach/pxa3xx-u2d.h> #include <mach/pxafb.h> #include <mach/mmc.h> #include <mach/irda.h> #include <mach/irqs.h> #include <mach/ohci.h> #include <plat/pxa27x_keypad.h> #include <mach/camera.h> #include <mach/audio.h> #include <mach/hardware.h> #include <plat/pxa3xx_nand.h> #include "devices.h" #include "generic.h" void __init pxa_register_device(struct platform_device *dev, void *data) { int ret; dev->dev.platform_data = data; ret = platform_device_register(dev); if (ret) dev_err(&dev->dev, "unable to register device: %d\n", ret); } static struct resource pxa_resource_pmu = { .start = IRQ_PMU, .end = IRQ_PMU, .flags = IORESOURCE_IRQ, }; struct platform_device pxa_device_pmu = { .name = "arm-pmu", .id = ARM_PMU_DEVICE_CPU, .resource = &pxa_resource_pmu, .num_resources = 1, }; static struct resource pxamci_resources[] = { [0] = { .start = 0x41100000, .end = 0x41100fff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_MMC, .end = IRQ_MMC, .flags = IORESOURCE_IRQ, }, [2] = { .start = 21, .end = 21, .flags = IORESOURCE_DMA, }, [3] = { .start = 22, .end = 22, .flags = IORESOURCE_DMA, }, }; static u64 pxamci_dmamask = 0xffffffffUL; struct platform_device pxa_device_mci = { .name = "pxa2xx-mci", .id = 0, .dev = { .dma_mask = &pxamci_dmamask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxamci_resources), .resource = pxamci_resources, }; void __init pxa_set_mci_info(struct pxamci_platform_data *info) { pxa_register_device(&pxa_device_mci, info); } static struct pxa2xx_udc_mach_info pxa_udc_info = { .gpio_pullup = -1, }; void __init pxa_set_udc_info(struct pxa2xx_udc_mach_info *info) { memcpy(&pxa_udc_info, info, sizeof *info); } static struct resource pxa2xx_udc_resources[] = { [0] = { .start = 0x40600000, .end = 0x4060ffff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_USB, .end = IRQ_USB, .flags = IORESOURCE_IRQ, }, }; static u64 udc_dma_mask = ~(u32)0; struct platform_device pxa25x_device_udc = { .name = "pxa25x-udc", .id = -1, .resource = pxa2xx_udc_resources, .num_resources = ARRAY_SIZE(pxa2xx_udc_resources), .dev = { .platform_data = &pxa_udc_info, .dma_mask = &udc_dma_mask, } }; struct platform_device pxa27x_device_udc = { .name = "pxa27x-udc", .id = -1, .resource = pxa2xx_udc_resources, .num_resources = ARRAY_SIZE(pxa2xx_udc_resources), .dev = { .platform_data = &pxa_udc_info, .dma_mask = &udc_dma_mask, } }; #ifdef CONFIG_PXA3xx static struct resource pxa3xx_u2d_resources[] = { [0] = { .start = 0x54100000, .end = 0x54100fff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_USB2, .end = IRQ_USB2, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa3xx_device_u2d = { .name = "pxa3xx-u2d", .id = -1, .resource = pxa3xx_u2d_resources, .num_resources = ARRAY_SIZE(pxa3xx_u2d_resources), }; void __init pxa3xx_set_u2d_info(struct pxa3xx_u2d_platform_data *info) { pxa_register_device(&pxa3xx_device_u2d, info); } #endif /* CONFIG_PXA3xx */ static struct resource pxafb_resources[] = { [0] = { .start = 0x44000000, .end = 0x4400ffff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_LCD, .end = IRQ_LCD, .flags = IORESOURCE_IRQ, }, }; static u64 fb_dma_mask = ~(u64)0; struct platform_device pxa_device_fb = { .name = "pxa2xx-fb", .id = -1, .dev = { .dma_mask = &fb_dma_mask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxafb_resources), .resource = pxafb_resources, }; void __init pxa_set_fb_info(struct device *parent, struct pxafb_mach_info *info) { pxa_device_fb.dev.parent = parent; pxa_register_device(&pxa_device_fb, info); } static struct resource pxa_resource_ffuart[] = { { .start = 0x40100000, .end = 0x40100023, .flags = IORESOURCE_MEM, }, { .start = IRQ_FFUART, .end = IRQ_FFUART, .flags = IORESOURCE_IRQ, } }; struct platform_device pxa_device_ffuart = { .name = "pxa2xx-uart", .id = 0, .resource = pxa_resource_ffuart, .num_resources = ARRAY_SIZE(pxa_resource_ffuart), }; void __init pxa_set_ffuart_info(void *info) { pxa_register_device(&pxa_device_ffuart, info); } static struct resource pxa_resource_btuart[] = { { .start = 0x40200000, .end = 0x40200023, .flags = IORESOURCE_MEM, }, { .start = IRQ_BTUART, .end = IRQ_BTUART, .flags = IORESOURCE_IRQ, } }; struct platform_device pxa_device_btuart = { .name = "pxa2xx-uart", .id = 1, .resource = pxa_resource_btuart, .num_resources = ARRAY_SIZE(pxa_resource_btuart), }; void __init pxa_set_btuart_info(void *info) { pxa_register_device(&pxa_device_btuart, info); } static struct resource pxa_resource_stuart[] = { { .start = 0x40700000, .end = 0x40700023, .flags = IORESOURCE_MEM, }, { .start = IRQ_STUART, .end = IRQ_STUART, .flags = IORESOURCE_IRQ, } }; struct platform_device pxa_device_stuart = { .name = "pxa2xx-uart", .id = 2, .resource = pxa_resource_stuart, .num_resources = ARRAY_SIZE(pxa_resource_stuart), }; void __init pxa_set_stuart_info(void *info) { pxa_register_device(&pxa_device_stuart, info); } static struct resource pxa_resource_hwuart[] = { { .start = 0x41600000, .end = 0x4160002F, .flags = IORESOURCE_MEM, }, { .start = IRQ_HWUART, .end = IRQ_HWUART, .flags = IORESOURCE_IRQ, } }; struct platform_device pxa_device_hwuart = { .name = "pxa2xx-uart", .id = 3, .resource = pxa_resource_hwuart, .num_resources = ARRAY_SIZE(pxa_resource_hwuart), }; void __init pxa_set_hwuart_info(void *info) { if (cpu_is_pxa255()) pxa_register_device(&pxa_device_hwuart, info); else pr_info("UART: Ignoring attempt to register HWUART on non-PXA255 hardware"); } static struct resource pxai2c_resources[] = { { .start = 0x40301680, .end = 0x403016a3, .flags = IORESOURCE_MEM, }, { .start = IRQ_I2C, .end = IRQ_I2C, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa_device_i2c = { .name = "pxa2xx-i2c", .id = 0, .resource = pxai2c_resources, .num_resources = ARRAY_SIZE(pxai2c_resources), }; void __init pxa_set_i2c_info(struct i2c_pxa_platform_data *info) { pxa_register_device(&pxa_device_i2c, info); } #ifdef CONFIG_PXA27x static struct resource pxa27x_resources_i2c_power[] = { { .start = 0x40f00180, .end = 0x40f001a3, .flags = IORESOURCE_MEM, }, { .start = IRQ_PWRI2C, .end = IRQ_PWRI2C, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa27x_device_i2c_power = { .name = "pxa2xx-i2c", .id = 1, .resource = pxa27x_resources_i2c_power, .num_resources = ARRAY_SIZE(pxa27x_resources_i2c_power), }; #endif static struct resource pxai2s_resources[] = { { .start = 0x40400000, .end = 0x40400083, .flags = IORESOURCE_MEM, }, { .start = IRQ_I2S, .end = IRQ_I2S, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa_device_i2s = { .name = "pxa2xx-i2s", .id = -1, .resource = pxai2s_resources, .num_resources = ARRAY_SIZE(pxai2s_resources), }; struct platform_device pxa_device_asoc_ssp1 = { .name = "pxa-ssp-dai", .id = 0, }; struct platform_device pxa_device_asoc_ssp2= { .name = "pxa-ssp-dai", .id = 1, }; struct platform_device pxa_device_asoc_ssp3 = { .name = "pxa-ssp-dai", .id = 2, }; struct platform_device pxa_device_asoc_ssp4 = { .name = "pxa-ssp-dai", .id = 3, }; struct platform_device pxa_device_asoc_platform = { .name = "pxa-pcm-audio", .id = -1, }; static u64 pxaficp_dmamask = ~(u32)0; struct platform_device pxa_device_ficp = { .name = "pxa2xx-ir", .id = -1, .dev = { .dma_mask = &pxaficp_dmamask, .coherent_dma_mask = 0xffffffff, }, }; void __init pxa_set_ficp_info(struct pxaficp_platform_data *info) { pxa_register_device(&pxa_device_ficp, info); } static struct resource pxa_rtc_resources[] = { [0] = { .start = 0x40900000, .end = 0x40900000 + 0x3b, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_RTC1Hz, .end = IRQ_RTC1Hz, .name = "rtc 1Hz", .flags = IORESOURCE_IRQ, }, [2] = { .start = IRQ_RTCAlrm, .end = IRQ_RTCAlrm, .name = "rtc alarm", .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa_device_rtc = { .name = "pxa-rtc", .id = -1, .num_resources = ARRAY_SIZE(pxa_rtc_resources), .resource = pxa_rtc_resources, }; static struct resource sa1100_rtc_resources[] = { { .start = IRQ_RTC1Hz, .end = IRQ_RTC1Hz, .name = "rtc 1Hz", .flags = IORESOURCE_IRQ, }, { .start = IRQ_RTCAlrm, .end = IRQ_RTCAlrm, .name = "rtc alarm", .flags = IORESOURCE_IRQ, }, }; struct platform_device sa1100_device_rtc = { .name = "sa1100-rtc", .id = -1, .num_resources = ARRAY_SIZE(sa1100_rtc_resources), .resource = sa1100_rtc_resources, }; static struct resource pxa_ac97_resources[] = { [0] = { .start = 0x40500000, .end = 0x40500000 + 0xfff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_AC97, .end = IRQ_AC97, .flags = IORESOURCE_IRQ, }, }; static u64 pxa_ac97_dmamask = 0xffffffffUL; struct platform_device pxa_device_ac97 = { .name = "pxa2xx-ac97", .id = -1, .dev = { .dma_mask = &pxa_ac97_dmamask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxa_ac97_resources), .resource = pxa_ac97_resources, }; void __init pxa_set_ac97_info(pxa2xx_audio_ops_t *ops) { pxa_register_device(&pxa_device_ac97, ops); } #ifdef CONFIG_PXA25x static struct resource pxa25x_resource_pwm0[] = { [0] = { .start = 0x40b00000, .end = 0x40b0000f, .flags = IORESOURCE_MEM, }, }; struct platform_device pxa25x_device_pwm0 = { .name = "pxa25x-pwm", .id = 0, .resource = pxa25x_resource_pwm0, .num_resources = ARRAY_SIZE(pxa25x_resource_pwm0), }; static struct resource pxa25x_resource_pwm1[] = { [0] = { .start = 0x40c00000, .end = 0x40c0000f, .flags = IORESOURCE_MEM, }, }; struct platform_device pxa25x_device_pwm1 = { .name = "pxa25x-pwm", .id = 1, .resource = pxa25x_resource_pwm1, .num_resources = ARRAY_SIZE(pxa25x_resource_pwm1), }; static u64 pxa25x_ssp_dma_mask = DMA_BIT_MASK(32); static struct resource pxa25x_resource_ssp[] = { [0] = { .start = 0x41000000, .end = 0x4100001f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP, .end = IRQ_SSP, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 13, .end = 13, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 14, .end = 14, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa25x_device_ssp = { .name = "pxa25x-ssp", .id = 0, .dev = { .dma_mask = &pxa25x_ssp_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa25x_resource_ssp, .num_resources = ARRAY_SIZE(pxa25x_resource_ssp), }; static u64 pxa25x_nssp_dma_mask = DMA_BIT_MASK(32); static struct resource pxa25x_resource_nssp[] = { [0] = { .start = 0x41400000, .end = 0x4140002f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_NSSP, .end = IRQ_NSSP, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 15, .end = 15, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 16, .end = 16, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa25x_device_nssp = { .name = "pxa25x-nssp", .id = 1, .dev = { .dma_mask = &pxa25x_nssp_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa25x_resource_nssp, .num_resources = ARRAY_SIZE(pxa25x_resource_nssp), }; static u64 pxa25x_assp_dma_mask = DMA_BIT_MASK(32); static struct resource pxa25x_resource_assp[] = { [0] = { .start = 0x41500000, .end = 0x4150002f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_ASSP, .end = IRQ_ASSP, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 23, .end = 23, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 24, .end = 24, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa25x_device_assp = { /* ASSP is basically equivalent to NSSP */ .name = "pxa25x-nssp", .id = 2, .dev = { .dma_mask = &pxa25x_assp_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa25x_resource_assp, .num_resources = ARRAY_SIZE(pxa25x_resource_assp), }; #endif /* CONFIG_PXA25x */ #if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx) static struct resource pxa27x_resource_camera[] = { [0] = { .start = 0x50000000, .end = 0x50000fff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_CAMERA, .end = IRQ_CAMERA, .flags = IORESOURCE_IRQ, }, }; static u64 pxa27x_dma_mask_camera = DMA_BIT_MASK(32); static struct platform_device pxa27x_device_camera = { .name = "pxa27x-camera", .id = 0, /* This is used to put cameras on this interface */ .dev = { .dma_mask = &pxa27x_dma_mask_camera, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxa27x_resource_camera), .resource = pxa27x_resource_camera, }; void __init pxa_set_camera_info(struct pxacamera_platform_data *info) { pxa_register_device(&pxa27x_device_camera, info); } static u64 pxa27x_ohci_dma_mask = DMA_BIT_MASK(32); static struct resource pxa27x_resource_ohci[] = { [0] = { .start = 0x4C000000, .end = 0x4C00ff6f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_USBH1, .end = IRQ_USBH1, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa27x_device_ohci = { .name = "pxa27x-ohci", .id = -1, .dev = { .dma_mask = &pxa27x_ohci_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(pxa27x_resource_ohci), .resource = pxa27x_resource_ohci, }; void __init pxa_set_ohci_info(struct pxaohci_platform_data *info) { pxa_register_device(&pxa27x_device_ohci, info); } #endif /* CONFIG_PXA27x || CONFIG_PXA3xx */ #if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx) || defined(CONFIG_PXA95x) static struct resource pxa27x_resource_keypad[] = { [0] = { .start = 0x41500000, .end = 0x4150004c, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_KEYPAD, .end = IRQ_KEYPAD, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa27x_device_keypad = { .name = "pxa27x-keypad", .id = -1, .resource = pxa27x_resource_keypad, .num_resources = ARRAY_SIZE(pxa27x_resource_keypad), }; void __init pxa_set_keypad_info(struct pxa27x_keypad_platform_data *info) { pxa_register_device(&pxa27x_device_keypad, info); } static u64 pxa27x_ssp1_dma_mask = DMA_BIT_MASK(32); static struct resource pxa27x_resource_ssp1[] = { [0] = { .start = 0x41000000, .end = 0x4100003f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP, .end = IRQ_SSP, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 13, .end = 13, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 14, .end = 14, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa27x_device_ssp1 = { .name = "pxa27x-ssp", .id = 0, .dev = { .dma_mask = &pxa27x_ssp1_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa27x_resource_ssp1, .num_resources = ARRAY_SIZE(pxa27x_resource_ssp1), }; static u64 pxa27x_ssp2_dma_mask = DMA_BIT_MASK(32); static struct resource pxa27x_resource_ssp2[] = { [0] = { .start = 0x41700000, .end = 0x4170003f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP2, .end = IRQ_SSP2, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 15, .end = 15, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 16, .end = 16, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa27x_device_ssp2 = { .name = "pxa27x-ssp", .id = 1, .dev = { .dma_mask = &pxa27x_ssp2_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa27x_resource_ssp2, .num_resources = ARRAY_SIZE(pxa27x_resource_ssp2), }; static u64 pxa27x_ssp3_dma_mask = DMA_BIT_MASK(32); static struct resource pxa27x_resource_ssp3[] = { [0] = { .start = 0x41900000, .end = 0x4190003f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP3, .end = IRQ_SSP3, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 66, .end = 66, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 67, .end = 67, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa27x_device_ssp3 = { .name = "pxa27x-ssp", .id = 2, .dev = { .dma_mask = &pxa27x_ssp3_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa27x_resource_ssp3, .num_resources = ARRAY_SIZE(pxa27x_resource_ssp3), }; static struct resource pxa27x_resource_pwm0[] = { [0] = { .start = 0x40b00000, .end = 0x40b0001f, .flags = IORESOURCE_MEM, }, }; struct platform_device pxa27x_device_pwm0 = { .name = "pxa27x-pwm", .id = 0, .resource = pxa27x_resource_pwm0, .num_resources = ARRAY_SIZE(pxa27x_resource_pwm0), }; static struct resource pxa27x_resource_pwm1[] = { [0] = { .start = 0x40c00000, .end = 0x40c0001f, .flags = IORESOURCE_MEM, }, }; struct platform_device pxa27x_device_pwm1 = { .name = "pxa27x-pwm", .id = 1, .resource = pxa27x_resource_pwm1, .num_resources = ARRAY_SIZE(pxa27x_resource_pwm1), }; #endif /* CONFIG_PXA27x || CONFIG_PXA3xx || CONFIG_PXA95x*/ #ifdef CONFIG_PXA3xx static struct resource pxa3xx_resources_mci2[] = { [0] = { .start = 0x42000000, .end = 0x42000fff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_MMC2, .end = IRQ_MMC2, .flags = IORESOURCE_IRQ, }, [2] = { .start = 93, .end = 93, .flags = IORESOURCE_DMA, }, [3] = { .start = 94, .end = 94, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa3xx_device_mci2 = { .name = "pxa2xx-mci", .id = 1, .dev = { .dma_mask = &pxamci_dmamask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxa3xx_resources_mci2), .resource = pxa3xx_resources_mci2, }; void __init pxa3xx_set_mci2_info(struct pxamci_platform_data *info) { pxa_register_device(&pxa3xx_device_mci2, info); } static struct resource pxa3xx_resources_mci3[] = { [0] = { .start = 0x42500000, .end = 0x42500fff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_MMC3, .end = IRQ_MMC3, .flags = IORESOURCE_IRQ, }, [2] = { .start = 100, .end = 100, .flags = IORESOURCE_DMA, }, [3] = { .start = 101, .end = 101, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa3xx_device_mci3 = { .name = "pxa2xx-mci", .id = 2, .dev = { .dma_mask = &pxamci_dmamask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(pxa3xx_resources_mci3), .resource = pxa3xx_resources_mci3, }; void __init pxa3xx_set_mci3_info(struct pxamci_platform_data *info) { pxa_register_device(&pxa3xx_device_mci3, info); } static struct resource pxa3xx_resources_gcu[] = { { .start = 0x54000000, .end = 0x54000fff, .flags = IORESOURCE_MEM, }, { .start = IRQ_GCU, .end = IRQ_GCU, .flags = IORESOURCE_IRQ, }, }; static u64 pxa3xx_gcu_dmamask = DMA_BIT_MASK(32); struct platform_device pxa3xx_device_gcu = { .name = "pxa3xx-gcu", .id = -1, .num_resources = ARRAY_SIZE(pxa3xx_resources_gcu), .resource = pxa3xx_resources_gcu, .dev = { .dma_mask = &pxa3xx_gcu_dmamask, .coherent_dma_mask = 0xffffffff, }, }; #endif /* CONFIG_PXA3xx */ #if defined(CONFIG_PXA3xx) || defined(CONFIG_PXA95x) static struct resource pxa3xx_resources_i2c_power[] = { { .start = 0x40f500c0, .end = 0x40f500d3, .flags = IORESOURCE_MEM, }, { .start = IRQ_PWRI2C, .end = IRQ_PWRI2C, .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa3xx_device_i2c_power = { .name = "pxa3xx-pwri2c", .id = 1, .resource = pxa3xx_resources_i2c_power, .num_resources = ARRAY_SIZE(pxa3xx_resources_i2c_power), }; static struct resource pxa3xx_resources_nand[] = { [0] = { .start = 0x43100000, .end = 0x43100053, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_NAND, .end = IRQ_NAND, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for Data DMA */ .start = 97, .end = 97, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for Command DMA */ .start = 99, .end = 99, .flags = IORESOURCE_DMA, }, }; static u64 pxa3xx_nand_dma_mask = DMA_BIT_MASK(32); struct platform_device pxa3xx_device_nand = { .name = "pxa3xx-nand", .id = -1, .dev = { .dma_mask = &pxa3xx_nand_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(pxa3xx_resources_nand), .resource = pxa3xx_resources_nand, }; void __init pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info) { pxa_register_device(&pxa3xx_device_nand, info); } static u64 pxa3xx_ssp4_dma_mask = DMA_BIT_MASK(32); static struct resource pxa3xx_resource_ssp4[] = { [0] = { .start = 0x41a00000, .end = 0x41a0003f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_SSP4, .end = IRQ_SSP4, .flags = IORESOURCE_IRQ, }, [2] = { /* DRCMR for RX */ .start = 2, .end = 2, .flags = IORESOURCE_DMA, }, [3] = { /* DRCMR for TX */ .start = 3, .end = 3, .flags = IORESOURCE_DMA, }, }; struct platform_device pxa3xx_device_ssp4 = { /* PXA3xx SSP is basically equivalent to PXA27x */ .name = "pxa27x-ssp", .id = 3, .dev = { .dma_mask = &pxa3xx_ssp4_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = pxa3xx_resource_ssp4, .num_resources = ARRAY_SIZE(pxa3xx_resource_ssp4), }; #endif /* CONFIG_PXA3xx || CONFIG_PXA95x */ struct resource pxa_resource_gpio[] = { { .start = 0x40e00000, .end = 0x40e0ffff, .flags = IORESOURCE_MEM, }, { .start = IRQ_GPIO0, .end = IRQ_GPIO0, .name = "gpio0", .flags = IORESOURCE_IRQ, }, { .start = IRQ_GPIO1, .end = IRQ_GPIO1, .name = "gpio1", .flags = IORESOURCE_IRQ, }, { .start = IRQ_GPIO_2_x, .end = IRQ_GPIO_2_x, .name = "gpio_mux", .flags = IORESOURCE_IRQ, }, }; struct platform_device pxa_device_gpio = { .name = "pxa-gpio", .id = -1, .num_resources = ARRAY_SIZE(pxa_resource_gpio), .resource = pxa_resource_gpio, }; /* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1. * See comment in arch/arm/mach-pxa/ssp.c::ssp_probe() */ void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info) { struct platform_device *pd; pd = platform_device_alloc("pxa2xx-spi", id); if (pd == NULL) { printk(KERN_ERR "pxa2xx-spi: failed to allocate device id %d\n", id); return; } pd->dev.platform_data = info; platform_device_add(pd); }
gpl-2.0
Ronfante/android_kernel_xiaomi_cancro
arch/s390/kernel/mem_detect.c
4848
3802
/* * Copyright IBM Corp. 2008, 2009 * * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> */ #include <linux/kernel.h> #include <linux/module.h> #include <asm/ipl.h> #include <asm/sclp.h> #include <asm/setup.h> #define ADDR2G (1ULL << 31) static void find_memory_chunks(struct mem_chunk chunk[]) { unsigned long long memsize, rnmax, rzm; unsigned long addr = 0, size; int i = 0, type; rzm = sclp_get_rzm(); rnmax = sclp_get_rnmax(); memsize = rzm * rnmax; if (!rzm) rzm = 1ULL << 17; if (sizeof(long) == 4) { rzm = min(ADDR2G, rzm); memsize = memsize ? min(ADDR2G, memsize) : ADDR2G; } do { size = 0; type = tprot(addr); do { size += rzm; if (memsize && addr + size >= memsize) break; } while (type == tprot(addr + size)); if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) { chunk[i].addr = addr; chunk[i].size = size; chunk[i].type = type; i++; } addr += size; } while (addr < memsize && i < MEMORY_CHUNKS); } void detect_memory_layout(struct mem_chunk chunk[]) { unsigned long flags, cr0; memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk)); /* Disable IRQs, DAT and low address protection so tprot does the * right thing and we don't get scheduled away with low address * protection disabled. */ flags = __arch_local_irq_stnsm(0xf8); __ctl_store(cr0, 0, 0); __ctl_clear_bit(0, 28); find_memory_chunks(chunk); __ctl_load(cr0, 0, 0); arch_local_irq_restore(flags); } EXPORT_SYMBOL(detect_memory_layout); /* * Move memory chunks array from index "from" to index "to" */ static void mem_chunk_move(struct mem_chunk chunk[], int to, int from) { int cnt = MEMORY_CHUNKS - to; memmove(&chunk[to], &chunk[from], cnt * sizeof(struct mem_chunk)); } /* * Initialize memory chunk */ static void mem_chunk_init(struct mem_chunk *chunk, unsigned long addr, unsigned long size, int type) { chunk->type = type; chunk->addr = addr; chunk->size = size; } /* * Create memory hole with given address, size, and type */ void create_mem_hole(struct mem_chunk chunk[], unsigned long addr, unsigned long size, int type) { unsigned long lh_start, lh_end, lh_size, ch_start, ch_end, ch_size; int i, ch_type; for (i = 0; i < MEMORY_CHUNKS; i++) { if (chunk[i].size == 0) continue; /* Define chunk properties */ ch_start = chunk[i].addr; ch_size = chunk[i].size; ch_end = ch_start + ch_size - 1; ch_type = chunk[i].type; /* Is memory chunk hit by memory hole? */ if (addr + size <= ch_start) continue; /* No: memory hole in front of chunk */ if (addr > ch_end) continue; /* No: memory hole after chunk */ /* Yes: Define local hole properties */ lh_start = max(addr, chunk[i].addr); lh_end = min(addr + size - 1, ch_end); lh_size = lh_end - lh_start + 1; if (lh_start == ch_start && lh_end == ch_end) { /* Hole covers complete memory chunk */ mem_chunk_init(&chunk[i], lh_start, lh_size, type); } else if (lh_end == ch_end) { /* Hole starts in memory chunk and convers chunk end */ mem_chunk_move(chunk, i + 1, i); mem_chunk_init(&chunk[i], ch_start, ch_size - lh_size, ch_type); mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type); i += 1; } else if (lh_start == ch_start) { /* Hole ends in memory chunk */ mem_chunk_move(chunk, i + 1, i); mem_chunk_init(&chunk[i], lh_start, lh_size, type); mem_chunk_init(&chunk[i + 1], lh_end + 1, ch_size - lh_size, ch_type); break; } else { /* Hole splits memory chunk */ mem_chunk_move(chunk, i + 2, i); mem_chunk_init(&chunk[i], ch_start, lh_start - ch_start, ch_type); mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type); mem_chunk_init(&chunk[i + 2], lh_end + 1, ch_end - lh_end, ch_type); break; } } }
gpl-2.0
klquicksall/Galaxy-Nexus-JB
drivers/misc/isl29003.c
4848
11896
/* * isl29003.c - Linux kernel module for * Intersil ISL29003 ambient light sensor * * See file:Documentation/misc-devices/isl29003 * * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> * * Based on code written by * Rodolfo Giometti <giometti@linux.it> * Eurotech S.p.A. <info@eurotech.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/mutex.h> #include <linux/delay.h> #define ISL29003_DRV_NAME "isl29003" #define DRIVER_VERSION "1.0" #define ISL29003_REG_COMMAND 0x00 #define ISL29003_ADC_ENABLED (1 << 7) #define ISL29003_ADC_PD (1 << 6) #define ISL29003_TIMING_INT (1 << 5) #define ISL29003_MODE_SHIFT (2) #define ISL29003_MODE_MASK (0x3 << ISL29003_MODE_SHIFT) #define ISL29003_RES_SHIFT (0) #define ISL29003_RES_MASK (0x3 << ISL29003_RES_SHIFT) #define ISL29003_REG_CONTROL 0x01 #define ISL29003_INT_FLG (1 << 5) #define ISL29003_RANGE_SHIFT (2) #define ISL29003_RANGE_MASK (0x3 << ISL29003_RANGE_SHIFT) #define ISL29003_INT_PERSISTS_SHIFT (0) #define ISL29003_INT_PERSISTS_MASK (0xf << ISL29003_INT_PERSISTS_SHIFT) #define ISL29003_REG_IRQ_THRESH_HI 0x02 #define ISL29003_REG_IRQ_THRESH_LO 0x03 #define ISL29003_REG_LSB_SENSOR 0x04 #define ISL29003_REG_MSB_SENSOR 0x05 #define ISL29003_REG_LSB_TIMER 0x06 #define ISL29003_REG_MSB_TIMER 0x07 #define ISL29003_NUM_CACHABLE_REGS 4 struct isl29003_data { struct i2c_client *client; struct mutex lock; u8 reg_cache[ISL29003_NUM_CACHABLE_REGS]; u8 power_state_before_suspend; }; static int gain_range[] = { 1000, 4000, 16000, 64000 }; /* * register access helpers */ static int __isl29003_read_reg(struct i2c_client *client, u32 reg, u8 mask, u8 shift) { struct isl29003_data *data = i2c_get_clientdata(client); return (data->reg_cache[reg] & mask) >> shift; } static int __isl29003_write_reg(struct i2c_client *client, u32 reg, u8 mask, u8 shift, u8 val) { struct isl29003_data *data = i2c_get_clientdata(client); int ret = 0; u8 tmp; if (reg >= ISL29003_NUM_CACHABLE_REGS) return -EINVAL; mutex_lock(&data->lock); tmp = data->reg_cache[reg]; tmp &= ~mask; tmp |= val << shift; ret = i2c_smbus_write_byte_data(client, reg, tmp); if (!ret) data->reg_cache[reg] = tmp; mutex_unlock(&data->lock); return ret; } /* * internally used functions */ /* range */ static int isl29003_get_range(struct i2c_client *client) { return __isl29003_read_reg(client, ISL29003_REG_CONTROL, ISL29003_RANGE_MASK, ISL29003_RANGE_SHIFT); } static int isl29003_set_range(struct i2c_client *client, int range) { return __isl29003_write_reg(client, ISL29003_REG_CONTROL, ISL29003_RANGE_MASK, ISL29003_RANGE_SHIFT, range); } /* resolution */ static int isl29003_get_resolution(struct i2c_client *client) { return __isl29003_read_reg(client, ISL29003_REG_COMMAND, ISL29003_RES_MASK, ISL29003_RES_SHIFT); } static int isl29003_set_resolution(struct i2c_client *client, int res) { return __isl29003_write_reg(client, ISL29003_REG_COMMAND, ISL29003_RES_MASK, ISL29003_RES_SHIFT, res); } /* mode */ static int isl29003_get_mode(struct i2c_client *client) { return __isl29003_read_reg(client, ISL29003_REG_COMMAND, ISL29003_RES_MASK, ISL29003_RES_SHIFT); } static int isl29003_set_mode(struct i2c_client *client, int mode) { return __isl29003_write_reg(client, ISL29003_REG_COMMAND, ISL29003_RES_MASK, ISL29003_RES_SHIFT, mode); } /* power_state */ static int isl29003_set_power_state(struct i2c_client *client, int state) { return __isl29003_write_reg(client, ISL29003_REG_COMMAND, ISL29003_ADC_ENABLED | ISL29003_ADC_PD, 0, state ? ISL29003_ADC_ENABLED : ISL29003_ADC_PD); } static int isl29003_get_power_state(struct i2c_client *client) { struct isl29003_data *data = i2c_get_clientdata(client); u8 cmdreg = data->reg_cache[ISL29003_REG_COMMAND]; return ~cmdreg & ISL29003_ADC_PD; } static int isl29003_get_adc_value(struct i2c_client *client) { struct isl29003_data *data = i2c_get_clientdata(client); int lsb, msb, range, bitdepth; mutex_lock(&data->lock); lsb = i2c_smbus_read_byte_data(client, ISL29003_REG_LSB_SENSOR); if (lsb < 0) { mutex_unlock(&data->lock); return lsb; } msb = i2c_smbus_read_byte_data(client, ISL29003_REG_MSB_SENSOR); mutex_unlock(&data->lock); if (msb < 0) return msb; range = isl29003_get_range(client); bitdepth = (4 - isl29003_get_resolution(client)) * 4; return (((msb << 8) | lsb) * gain_range[range]) >> bitdepth; } /* * sysfs layer */ /* range */ static ssize_t isl29003_show_range(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); return sprintf(buf, "%i\n", isl29003_get_range(client)); } static ssize_t isl29003_store_range(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); unsigned long val; int ret; if ((strict_strtoul(buf, 10, &val) < 0) || (val > 3)) return -EINVAL; ret = isl29003_set_range(client, val); if (ret < 0) return ret; return count; } static DEVICE_ATTR(range, S_IWUSR | S_IRUGO, isl29003_show_range, isl29003_store_range); /* resolution */ static ssize_t isl29003_show_resolution(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); return sprintf(buf, "%d\n", isl29003_get_resolution(client)); } static ssize_t isl29003_store_resolution(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); unsigned long val; int ret; if ((strict_strtoul(buf, 10, &val) < 0) || (val > 3)) return -EINVAL; ret = isl29003_set_resolution(client, val); if (ret < 0) return ret; return count; } static DEVICE_ATTR(resolution, S_IWUSR | S_IRUGO, isl29003_show_resolution, isl29003_store_resolution); /* mode */ static ssize_t isl29003_show_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); return sprintf(buf, "%d\n", isl29003_get_mode(client)); } static ssize_t isl29003_store_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); unsigned long val; int ret; if ((strict_strtoul(buf, 10, &val) < 0) || (val > 2)) return -EINVAL; ret = isl29003_set_mode(client, val); if (ret < 0) return ret; return count; } static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, isl29003_show_mode, isl29003_store_mode); /* power state */ static ssize_t isl29003_show_power_state(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); return sprintf(buf, "%d\n", isl29003_get_power_state(client)); } static ssize_t isl29003_store_power_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); unsigned long val; int ret; if ((strict_strtoul(buf, 10, &val) < 0) || (val > 1)) return -EINVAL; ret = isl29003_set_power_state(client, val); return ret ? ret : count; } static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO, isl29003_show_power_state, isl29003_store_power_state); /* lux */ static ssize_t isl29003_show_lux(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); /* No LUX data if not operational */ if (!isl29003_get_power_state(client)) return -EBUSY; return sprintf(buf, "%d\n", isl29003_get_adc_value(client)); } static DEVICE_ATTR(lux, S_IRUGO, isl29003_show_lux, NULL); static struct attribute *isl29003_attributes[] = { &dev_attr_range.attr, &dev_attr_resolution.attr, &dev_attr_mode.attr, &dev_attr_power_state.attr, &dev_attr_lux.attr, NULL }; static const struct attribute_group isl29003_attr_group = { .attrs = isl29003_attributes, }; static int isl29003_init_client(struct i2c_client *client) { struct isl29003_data *data = i2c_get_clientdata(client); int i; /* read all the registers once to fill the cache. * if one of the reads fails, we consider the init failed */ for (i = 0; i < ARRAY_SIZE(data->reg_cache); i++) { int v = i2c_smbus_read_byte_data(client, i); if (v < 0) return -ENODEV; data->reg_cache[i] = v; } /* set defaults */ isl29003_set_range(client, 0); isl29003_set_resolution(client, 0); isl29003_set_mode(client, 0); isl29003_set_power_state(client, 0); return 0; } /* * I2C layer */ static int __devinit isl29003_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct isl29003_data *data; int err = 0; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) return -EIO; data = kzalloc(sizeof(struct isl29003_data), GFP_KERNEL); if (!data) return -ENOMEM; data->client = client; i2c_set_clientdata(client, data); mutex_init(&data->lock); /* initialize the ISL29003 chip */ err = isl29003_init_client(client); if (err) goto exit_kfree; /* register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &isl29003_attr_group); if (err) goto exit_kfree; dev_info(&client->dev, "driver version %s enabled\n", DRIVER_VERSION); return 0; exit_kfree: kfree(data); return err; } static int __devexit isl29003_remove(struct i2c_client *client) { sysfs_remove_group(&client->dev.kobj, &isl29003_attr_group); isl29003_set_power_state(client, 0); kfree(i2c_get_clientdata(client)); return 0; } #ifdef CONFIG_PM static int isl29003_suspend(struct i2c_client *client, pm_message_t mesg) { struct isl29003_data *data = i2c_get_clientdata(client); data->power_state_before_suspend = isl29003_get_power_state(client); return isl29003_set_power_state(client, 0); } static int isl29003_resume(struct i2c_client *client) { int i; struct isl29003_data *data = i2c_get_clientdata(client); /* restore registers from cache */ for (i = 0; i < ARRAY_SIZE(data->reg_cache); i++) if (i2c_smbus_write_byte_data(client, i, data->reg_cache[i])) return -EIO; return isl29003_set_power_state(client, data->power_state_before_suspend); } #else #define isl29003_suspend NULL #define isl29003_resume NULL #endif /* CONFIG_PM */ static const struct i2c_device_id isl29003_id[] = { { "isl29003", 0 }, {} }; MODULE_DEVICE_TABLE(i2c, isl29003_id); static struct i2c_driver isl29003_driver = { .driver = { .name = ISL29003_DRV_NAME, .owner = THIS_MODULE, }, .suspend = isl29003_suspend, .resume = isl29003_resume, .probe = isl29003_probe, .remove = __devexit_p(isl29003_remove), .id_table = isl29003_id, }; static int __init isl29003_init(void) { return i2c_add_driver(&isl29003_driver); } static void __exit isl29003_exit(void) { i2c_del_driver(&isl29003_driver); } MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); MODULE_DESCRIPTION("ISL29003 ambient light sensor driver"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRIVER_VERSION); module_init(isl29003_init); module_exit(isl29003_exit);
gpl-2.0
flar2/m7wl-Bulletproof
arch/x86/kernel/cpu/mcheck/mce_intel.c
5104
5606
/* * Intel specific MCE features. * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca> * Copyright (C) 2008, 2009 Intel Corporation * Author: Andi Kleen */ #include <linux/gfp.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/sched.h> #include <asm/apic.h> #include <asm/processor.h> #include <asm/msr.h> #include <asm/mce.h> /* * Support for Intel Correct Machine Check Interrupts. This allows * the CPU to raise an interrupt when a corrected machine check happened. * Normally we pick those up using a regular polling timer. * Also supports reliable discovery of shared banks. */ static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); /* * cmci_discover_lock protects against parallel discovery attempts * which could race against each other. */ static DEFINE_RAW_SPINLOCK(cmci_discover_lock); #define CMCI_THRESHOLD 1 static int cmci_supported(int *banks) { u64 cap; if (mce_cmci_disabled || mce_ignore_ce) return 0; /* * Vendor check is not strictly needed, but the initial * initialization is vendor keyed and this * makes sure none of the backdoors are entered otherwise. */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return 0; if (!cpu_has_apic || lapic_get_maxlvt() < 6) return 0; rdmsrl(MSR_IA32_MCG_CAP, cap); *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff); return !!(cap & MCG_CMCI_P); } /* * The interrupt handler. This is called on every event. * Just call the poller directly to log any events. * This could in theory increase the threshold under high load, * but doesn't for now. */ static void intel_threshold_interrupt(void) { machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); mce_notify_irq(); } static void print_update(char *type, int *hdr, int num) { if (*hdr == 0) printk(KERN_INFO "CPU %d MCA banks", smp_processor_id()); *hdr = 1; printk(KERN_CONT " %s:%d", type, num); } /* * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks * on this CPU. Use the algorithm recommended in the SDM to discover shared * banks. */ static void cmci_discover(int banks, int boot) { unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); unsigned long flags; int hdr = 0; int i; raw_spin_lock_irqsave(&cmci_discover_lock, flags); for (i = 0; i < banks; i++) { u64 val; if (test_bit(i, owned)) continue; rdmsrl(MSR_IA32_MCx_CTL2(i), val); /* Already owned by someone else? */ if (val & MCI_CTL2_CMCI_EN) { if (test_and_clear_bit(i, owned) && !boot) print_update("SHD", &hdr, i); __clear_bit(i, __get_cpu_var(mce_poll_banks)); continue; } val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK; val |= MCI_CTL2_CMCI_EN | CMCI_THRESHOLD; wrmsrl(MSR_IA32_MCx_CTL2(i), val); rdmsrl(MSR_IA32_MCx_CTL2(i), val); /* Did the enable bit stick? -- the bank supports CMCI */ if (val & MCI_CTL2_CMCI_EN) { if (!test_and_set_bit(i, owned) && !boot) print_update("CMCI", &hdr, i); __clear_bit(i, __get_cpu_var(mce_poll_banks)); } else { WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); } } raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); if (hdr) printk(KERN_CONT "\n"); } /* * Just in case we missed an event during initialization check * all the CMCI owned banks. */ void cmci_recheck(void) { unsigned long flags; int banks; if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) return; local_irq_save(flags); machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); local_irq_restore(flags); } /* * Disable CMCI on this CPU for all banks it owns when it goes down. * This allows other CPUs to claim the banks on rediscovery. */ void cmci_clear(void) { unsigned long flags; int i; int banks; u64 val; if (!cmci_supported(&banks)) return; raw_spin_lock_irqsave(&cmci_discover_lock, flags); for (i = 0; i < banks; i++) { if (!test_bit(i, __get_cpu_var(mce_banks_owned))) continue; /* Disable CMCI */ rdmsrl(MSR_IA32_MCx_CTL2(i), val); val &= ~(MCI_CTL2_CMCI_EN|MCI_CTL2_CMCI_THRESHOLD_MASK); wrmsrl(MSR_IA32_MCx_CTL2(i), val); __clear_bit(i, __get_cpu_var(mce_banks_owned)); } raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); } /* * After a CPU went down cycle through all the others and rediscover * Must run in process context. */ void cmci_rediscover(int dying) { int banks; int cpu; cpumask_var_t old; if (!cmci_supported(&banks)) return; if (!alloc_cpumask_var(&old, GFP_KERNEL)) return; cpumask_copy(old, &current->cpus_allowed); for_each_online_cpu(cpu) { if (cpu == dying) continue; if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) continue; /* Recheck banks in case CPUs don't all have the same */ if (cmci_supported(&banks)) cmci_discover(banks, 0); } set_cpus_allowed_ptr(current, old); free_cpumask_var(old); } /* * Reenable CMCI on this CPU in case a CPU down failed. */ void cmci_reenable(void) { int banks; if (cmci_supported(&banks)) cmci_discover(banks, 0); } static void intel_init_cmci(void) { int banks; if (!cmci_supported(&banks)) return; mce_threshold_vector = intel_threshold_interrupt; cmci_discover(banks, 1); /* * For CPU #0 this runs with still disabled APIC, but that's * ok because only the vector is set up. We still do another * check for the banks later for CPU #0 just to make sure * to not miss any events. */ apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED); cmci_recheck(); } void mce_intel_feature_init(struct cpuinfo_x86 *c) { intel_init_thermal(c); intel_init_cmci(); }
gpl-2.0
crpalmer/dna-kernel
drivers/media/video/soc_mediabus.c
5104
10639
/* * soc-camera media bus helper routines * * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <media/v4l2-device.h> #include <media/v4l2-mediabus.h> #include <media/soc_mediabus.h> static const struct soc_mbus_lookup mbus_fmt[] = { { .code = V4L2_MBUS_FMT_YUYV8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_YUYV, .name = "YUYV", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_YVYU8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_YVYU, .name = "YVYU", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_UYVY8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_UYVY, .name = "UYVY", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_VYUY8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_VYUY, .name = "VYUY", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB555, .name = "RGB555", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB555X, .name = "RGB555X", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_RGB565_2X8_LE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB565, .name = "RGB565", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_RGB565_2X8_BE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB565X, .name = "RGB565X", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SBGGR8_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR8, .name = "Bayer 8 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_Y8_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_GREY, .name = "Grey", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_Y10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_Y10, .name = "Grey 10bit", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADLO, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, }, }, { .code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR10, .name = "Bayer 10 BGGR", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADLO, .order = SOC_MBUS_ORDER_BE, }, }, { .code = V4L2_MBUS_FMT_JPEG_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_JPEG, .name = "JPEG", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_VARIABLE, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB444, .name = "RGB444", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, }, }, { .code = V4L2_MBUS_FMT_YUYV8_1_5X8, .fmt = { .fourcc = V4L2_PIX_FMT_YUV420, .name = "YUYV 4:2:0", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_1_5X8, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_YVYU8_1_5X8, .fmt = { .fourcc = V4L2_PIX_FMT_YVU420, .name = "YVYU 4:2:0", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_1_5X8, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_UYVY8_1X16, .fmt = { .fourcc = V4L2_PIX_FMT_UYVY, .name = "UYVY 16bit", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_VYUY8_1X16, .fmt = { .fourcc = V4L2_PIX_FMT_VYUY, .name = "VYUY 16bit", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_YUYV8_1X16, .fmt = { .fourcc = V4L2_PIX_FMT_YUYV, .name = "YUYV 16bit", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_YVYU8_1X16, .fmt = { .fourcc = V4L2_PIX_FMT_YVYU, .name = "YVYU 16bit", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SGRBG8_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_SGRBG8, .name = "Bayer 8 GRBG", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, .fmt = { .fourcc = V4L2_PIX_FMT_SGRBG10DPCM8, .name = "Bayer 10 BGGR DPCM 8", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SGBRG10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_SGBRG10, .name = "Bayer 10 GBRG", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SGRBG10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_SGRBG10, .name = "Bayer 10 GRBG", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SRGGB10_1X10, .fmt = { .fourcc = V4L2_PIX_FMT_SRGGB10, .name = "Bayer 10 RGGB", .bits_per_sample = 10, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SBGGR12_1X12, .fmt = { .fourcc = V4L2_PIX_FMT_SBGGR12, .name = "Bayer 12 BGGR", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SGBRG12_1X12, .fmt = { .fourcc = V4L2_PIX_FMT_SGBRG12, .name = "Bayer 12 GBRG", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SGRBG12_1X12, .fmt = { .fourcc = V4L2_PIX_FMT_SGRBG12, .name = "Bayer 12 GRBG", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, { .code = V4L2_MBUS_FMT_SRGGB12_1X12, .fmt = { .fourcc = V4L2_PIX_FMT_SRGGB12, .name = "Bayer 12 RGGB", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_EXTEND16, .order = SOC_MBUS_ORDER_LE, }, }, }; int soc_mbus_samples_per_pixel(const struct soc_mbus_pixelfmt *mf, unsigned int *numerator, unsigned int *denominator) { switch (mf->packing) { case SOC_MBUS_PACKING_NONE: case SOC_MBUS_PACKING_EXTEND16: *numerator = 1; *denominator = 1; return 0; case SOC_MBUS_PACKING_2X8_PADHI: case SOC_MBUS_PACKING_2X8_PADLO: *numerator = 2; *denominator = 1; return 0; case SOC_MBUS_PACKING_1_5X8: *numerator = 3; *denominator = 2; return 0; case SOC_MBUS_PACKING_VARIABLE: *numerator = 0; *denominator = 1; return 0; } return -EINVAL; } EXPORT_SYMBOL(soc_mbus_samples_per_pixel); s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf) { switch (mf->packing) { case SOC_MBUS_PACKING_NONE: return width * mf->bits_per_sample / 8; case SOC_MBUS_PACKING_2X8_PADHI: case SOC_MBUS_PACKING_2X8_PADLO: case SOC_MBUS_PACKING_EXTEND16: return width * 2; case SOC_MBUS_PACKING_1_5X8: return width * 3 / 2; case SOC_MBUS_PACKING_VARIABLE: return 0; } return -EINVAL; } EXPORT_SYMBOL(soc_mbus_bytes_per_line); const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc( enum v4l2_mbus_pixelcode code, const struct soc_mbus_lookup *lookup, int n) { int i; for (i = 0; i < n; i++) if (lookup[i].code == code) return &lookup[i].fmt; return NULL; } EXPORT_SYMBOL(soc_mbus_find_fmtdesc); const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc( enum v4l2_mbus_pixelcode code) { return soc_mbus_find_fmtdesc(code, mbus_fmt, ARRAY_SIZE(mbus_fmt)); } EXPORT_SYMBOL(soc_mbus_get_fmtdesc); unsigned int soc_mbus_config_compatible(const struct v4l2_mbus_config *cfg, unsigned int flags) { unsigned long common_flags; bool hsync = true, vsync = true, pclk, data, mode; bool mipi_lanes, mipi_clock; common_flags = cfg->flags & flags; switch (cfg->type) { case V4L2_MBUS_PARALLEL: hsync = common_flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW); vsync = common_flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW); case V4L2_MBUS_BT656: pclk = common_flags & (V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING); data = common_flags & (V4L2_MBUS_DATA_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_LOW); mode = common_flags & (V4L2_MBUS_MASTER | V4L2_MBUS_SLAVE); return (!hsync || !vsync || !pclk || !data || !mode) ? 0 : common_flags; case V4L2_MBUS_CSI2: mipi_lanes = common_flags & V4L2_MBUS_CSI2_LANES; mipi_clock = common_flags & (V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK | V4L2_MBUS_CSI2_CONTINUOUS_CLOCK); return (!mipi_lanes || !mipi_clock) ? 0 : common_flags; } return 0; } EXPORT_SYMBOL(soc_mbus_config_compatible); static int __init soc_mbus_init(void) { return 0; } static void __exit soc_mbus_exit(void) { } module_init(soc_mbus_init); module_exit(soc_mbus_exit); MODULE_DESCRIPTION("soc-camera media bus interface"); MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); MODULE_LICENSE("GPL v2");
gpl-2.0
humberos/android_kernel_samsung_n80xx
drivers/char/nsc_gpio.c
10224
3578
/* linux/drivers/char/nsc_gpio.c National Semiconductor common GPIO device-file/VFS methods. Allows a user space process to control the GPIO pins. Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com> */ #include <linux/fs.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/nsc_gpio.h> #include <linux/platform_device.h> #include <asm/uaccess.h> #include <asm/io.h> #define NAME "nsc_gpio" void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index) { /* retrieve current config w/o changing it */ u32 config = amp->gpio_config(index, ~0, 0); /* user requested via 'v' command, so its INFO */ dev_info(amp->dev, "io%02u: 0x%04x %s %s %s %s %s %s %s\tio:%d/%d\n", index, config, (config & 1) ? "OE" : "TS", /* output-enabled/tristate */ (config & 2) ? "PP" : "OD", /* push pull / open drain */ (config & 4) ? "PUE" : "PUD", /* pull up enabled/disabled */ (config & 8) ? "LOCKED" : "", /* locked / unlocked */ (config & 16) ? "LEVEL" : "EDGE",/* level/edge input */ (config & 32) ? "HI" : "LO", /* trigger on rise/fall edge */ (config & 64) ? "DEBOUNCE" : "", /* debounce */ amp->gpio_get(index), amp->gpio_current(index)); } ssize_t nsc_gpio_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { unsigned m = iminor(file->f_path.dentry->d_inode); struct nsc_gpio_ops *amp = file->private_data; struct device *dev = amp->dev; size_t i; int err = 0; for (i = 0; i < len; ++i) { char c; if (get_user(c, data + i)) return -EFAULT; switch (c) { case '0': amp->gpio_set(m, 0); break; case '1': amp->gpio_set(m, 1); break; case 'O': dev_dbg(dev, "GPIO%d output enabled\n", m); amp->gpio_config(m, ~1, 1); break; case 'o': dev_dbg(dev, "GPIO%d output disabled\n", m); amp->gpio_config(m, ~1, 0); break; case 'T': dev_dbg(dev, "GPIO%d output is push pull\n", m); amp->gpio_config(m, ~2, 2); break; case 't': dev_dbg(dev, "GPIO%d output is open drain\n", m); amp->gpio_config(m, ~2, 0); break; case 'P': dev_dbg(dev, "GPIO%d pull up enabled\n", m); amp->gpio_config(m, ~4, 4); break; case 'p': dev_dbg(dev, "GPIO%d pull up disabled\n", m); amp->gpio_config(m, ~4, 0); break; case 'v': /* View Current pin settings */ amp->gpio_dump(amp, m); break; case '\n': /* end of settings string, do nothing */ break; default: dev_err(dev, "io%2d bad setting: chr<0x%2x>\n", m, (int)c); err++; } } if (err) return -EINVAL; /* full string handled, report error */ return len; } ssize_t nsc_gpio_read(struct file *file, char __user * buf, size_t len, loff_t * ppos) { unsigned m = iminor(file->f_path.dentry->d_inode); int value; struct nsc_gpio_ops *amp = file->private_data; value = amp->gpio_get(m); if (put_user(value ? '1' : '0', buf)) return -EFAULT; return 1; } /* common file-ops routines for both scx200_gpio and pc87360_gpio */ EXPORT_SYMBOL(nsc_gpio_write); EXPORT_SYMBOL(nsc_gpio_read); EXPORT_SYMBOL(nsc_gpio_dump); static int __init nsc_gpio_init(void) { printk(KERN_DEBUG NAME " initializing\n"); return 0; } static void __exit nsc_gpio_cleanup(void) { printk(KERN_DEBUG NAME " cleanup\n"); } module_init(nsc_gpio_init); module_exit(nsc_gpio_cleanup); MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>"); MODULE_DESCRIPTION("NatSemi GPIO Common Methods"); MODULE_LICENSE("GPL");
gpl-2.0
khanfrd/android_kernel_xiaomi_kenzo
mm/percpu-km.c
11248
2852
/* * mm/percpu-km.c - kernel memory based chunk allocation * * Copyright (C) 2010 SUSE Linux Products GmbH * Copyright (C) 2010 Tejun Heo <tj@kernel.org> * * This file is released under the GPLv2. * * Chunks are allocated as a contiguous kernel memory using gfp * allocation. This is to be used on nommu architectures. * * To use percpu-km, * * - define CONFIG_NEED_PER_CPU_KM from the arch Kconfig. * * - CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK must not be defined. It's * not compatible with PER_CPU_KM. EMBED_FIRST_CHUNK should work * fine. * * - NUMA is not supported. When setting up the first chunk, * @cpu_distance_fn should be NULL or report all CPUs to be nearer * than or at LOCAL_DISTANCE. * * - It's best if the chunk size is power of two multiple of * PAGE_SIZE. Because each chunk is allocated as a contiguous * kernel memory block using alloc_pages(), memory will be wasted if * chunk size is not aligned. percpu-km code will whine about it. */ #if defined(CONFIG_SMP) && defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) #error "contiguous percpu allocation is incompatible with paged first chunk" #endif #include <linux/log2.h> static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) { unsigned int cpu; for_each_possible_cpu(cpu) memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); return 0; } static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) { /* nada */ } static struct pcpu_chunk *pcpu_create_chunk(void) { const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; struct pcpu_chunk *chunk; struct page *pages; int i; chunk = pcpu_alloc_chunk(); if (!chunk) return NULL; pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages)); if (!pages) { pcpu_free_chunk(chunk); return NULL; } for (i = 0; i < nr_pages; i++) pcpu_set_page_chunk(nth_page(pages, i), chunk); chunk->data = pages; chunk->base_addr = page_address(pages) - pcpu_group_offsets[0]; return chunk; } static void pcpu_destroy_chunk(struct pcpu_chunk *chunk) { const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; if (chunk && chunk->data) __free_pages(chunk->data, order_base_2(nr_pages)); pcpu_free_chunk(chunk); } static struct page *pcpu_addr_to_page(void *addr) { return virt_to_page(addr); } static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai) { size_t nr_pages, alloc_pages; /* all units must be in a single group */ if (ai->nr_groups != 1) { printk(KERN_CRIT "percpu: can't handle more than one groups\n"); return -EINVAL; } nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; alloc_pages = roundup_pow_of_two(nr_pages); if (alloc_pages > nr_pages) printk(KERN_WARNING "percpu: wasting %zu pages per chunk\n", alloc_pages - nr_pages); return 0; }
gpl-2.0
techomancer/kernel-galaxytab-gb
sound/pci/echoaudio/indigodjx_dsp.c
12528
2197
/************************************************************************ This file is part of Echo Digital Audio's generic driver library. Copyright Echo Digital Audio Corporation (c) 1998 - 2005 All rights reserved www.echoaudio.com This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ************************************************************************* Translation from C++ and adaptation for use in ALSA-Driver were made by Giuliano Pochini <pochini@shiny.it> *************************************************************************/ static int update_vmixer_level(struct echoaudio *chip); static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe, int gain); static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id) { int err; DE_INIT(("init_hw() - Indigo DJx\n")); if (snd_BUG_ON((subdevice_id & 0xfff0) != INDIGO_DJX)) return -ENODEV; err = init_dsp_comm_page(chip); if (err < 0) { DE_INIT(("init_hw - could not initialize DSP comm page\n")); return err; } chip->device_id = device_id; chip->subdevice_id = subdevice_id; chip->bad_board = TRUE; chip->dsp_code_to_load = FW_INDIGO_DJX_DSP; /* Since this card has no ASIC, mark it as loaded so everything works OK */ chip->asic_loaded = TRUE; chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL; err = load_firmware(chip); if (err < 0) return err; chip->bad_board = FALSE; DE_INIT(("init_hw done\n")); return err; } static int set_mixer_defaults(struct echoaudio *chip) { return init_line_levels(chip); }
gpl-2.0
EloYGomeZ/kernel_huawei_msm8610
sound/pci/echoaudio/indigoiox_dsp.c
12528
2197
/************************************************************************ This file is part of Echo Digital Audio's generic driver library. Copyright Echo Digital Audio Corporation (c) 1998 - 2005 All rights reserved www.echoaudio.com This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ************************************************************************* Translation from C++ and adaptation for use in ALSA-Driver were made by Giuliano Pochini <pochini@shiny.it> *************************************************************************/ static int update_vmixer_level(struct echoaudio *chip); static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe, int gain); static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id) { int err; DE_INIT(("init_hw() - Indigo IOx\n")); if (snd_BUG_ON((subdevice_id & 0xfff0) != INDIGO_IOX)) return -ENODEV; err = init_dsp_comm_page(chip); if (err < 0) { DE_INIT(("init_hw - could not initialize DSP comm page\n")); return err; } chip->device_id = device_id; chip->subdevice_id = subdevice_id; chip->bad_board = TRUE; chip->dsp_code_to_load = FW_INDIGO_IOX_DSP; /* Since this card has no ASIC, mark it as loaded so everything works OK */ chip->asic_loaded = TRUE; chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL; err = load_firmware(chip); if (err < 0) return err; chip->bad_board = FALSE; DE_INIT(("init_hw done\n")); return err; } static int set_mixer_defaults(struct echoaudio *chip) { return init_line_levels(chip); }
gpl-2.0
multirom-htc/kernel_htc_m8gpe
sound/aoa/core/core.c
14832
3387
/* * Apple Onboard Audio driver core * * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> * * GPL v2, can be found in COPYING. */ #include <linux/init.h> #include <linux/module.h> #include <linux/list.h> #include "../aoa.h" #include "alsa.h" MODULE_DESCRIPTION("Apple Onboard Audio Sound Driver"); MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); MODULE_LICENSE("GPL"); /* We allow only one fabric. This simplifies things, * and more don't really make that much sense */ static struct aoa_fabric *fabric; static LIST_HEAD(codec_list); static int attach_codec_to_fabric(struct aoa_codec *c) { int err; if (!try_module_get(c->owner)) return -EBUSY; /* found_codec has to be assigned */ err = -ENOENT; if (fabric->found_codec) err = fabric->found_codec(c); if (err) { module_put(c->owner); printk(KERN_ERR "snd-aoa: fabric didn't like codec %s\n", c->name); return err; } c->fabric = fabric; err = 0; if (c->init) err = c->init(c); if (err) { printk(KERN_ERR "snd-aoa: codec %s didn't init\n", c->name); c->fabric = NULL; if (fabric->remove_codec) fabric->remove_codec(c); module_put(c->owner); return err; } if (fabric->attached_codec) fabric->attached_codec(c); return 0; } int aoa_codec_register(struct aoa_codec *codec) { int err = 0; /* if there's a fabric already, we can tell if we * will want to have this codec, so propagate error * through. Otherwise, this will happen later... */ if (fabric) err = attach_codec_to_fabric(codec); if (!err) list_add(&codec->list, &codec_list); return err; } EXPORT_SYMBOL_GPL(aoa_codec_register); void aoa_codec_unregister(struct aoa_codec *codec) { list_del(&codec->list); if (codec->fabric && codec->exit) codec->exit(codec); if (fabric && fabric->remove_codec) fabric->remove_codec(codec); codec->fabric = NULL; module_put(codec->owner); } EXPORT_SYMBOL_GPL(aoa_codec_unregister); int aoa_fabric_register(struct aoa_fabric *new_fabric, struct device *dev) { struct aoa_codec *c; int err; /* allow querying for presence of fabric * (i.e. do this test first!) */ if (new_fabric == fabric) { err = -EALREADY; goto attach; } if (fabric) return -EEXIST; if (!new_fabric) return -EINVAL; err = aoa_alsa_init(new_fabric->name, new_fabric->owner, dev); if (err) return err; fabric = new_fabric; attach: list_for_each_entry(c, &codec_list, list) { if (c->fabric != fabric) attach_codec_to_fabric(c); } return err; } EXPORT_SYMBOL_GPL(aoa_fabric_register); void aoa_fabric_unregister(struct aoa_fabric *old_fabric) { struct aoa_codec *c; if (fabric != old_fabric) return; list_for_each_entry(c, &codec_list, list) { if (c->fabric) aoa_fabric_unlink_codec(c); } aoa_alsa_cleanup(); fabric = NULL; } EXPORT_SYMBOL_GPL(aoa_fabric_unregister); void aoa_fabric_unlink_codec(struct aoa_codec *codec) { if (!codec->fabric) { printk(KERN_ERR "snd-aoa: fabric unassigned " "in aoa_fabric_unlink_codec\n"); dump_stack(); return; } if (codec->exit) codec->exit(codec); if (codec->fabric->remove_codec) codec->fabric->remove_codec(codec); codec->fabric = NULL; module_put(codec->owner); } EXPORT_SYMBOL_GPL(aoa_fabric_unlink_codec); static int __init aoa_init(void) { return 0; } static void __exit aoa_exit(void) { aoa_alsa_cleanup(); } module_init(aoa_init); module_exit(aoa_exit);
gpl-2.0
minipli/linux-grsec
drivers/video/fbdev/kyro/STG4000Ramdac.c
15600
3931
/* * linux/drivers/video/kyro/STG4000Ramdac.c * * Copyright (C) 2002 STMicroelectronics * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <video/kyro.h> #include "STG4000Reg.h" #include "STG4000Interface.h" static u32 STG_PIXEL_BUS_WIDTH = 128; /* 128 bit bus width */ static u32 REF_CLOCK = 14318; int InitialiseRamdac(volatile STG4000REG __iomem * pSTGReg, u32 displayDepth, u32 displayWidth, u32 displayHeight, s32 HSyncPolarity, s32 VSyncPolarity, u32 * pixelClock) { u32 tmp = 0; u32 F = 0, R = 0, P = 0; u32 stride = 0; u32 ulPdiv = 0; u32 physicalPixelDepth = 0; /* Make sure DAC is in Reset */ tmp = STG_READ_REG(SoftwareReset); if (tmp & 0x1) { CLEAR_BIT(1); STG_WRITE_REG(SoftwareReset, tmp); } /* Set Pixel Format */ tmp = STG_READ_REG(DACPixelFormat); CLEAR_BITS_FRM_TO(0, 2); /* Set LUT not used from 16bpp to 32 bpp ??? */ CLEAR_BITS_FRM_TO(8, 9); switch (displayDepth) { case 16: { physicalPixelDepth = 16; tmp |= _16BPP; break; } case 32: { /* Set for 32 bits per pixel */ physicalPixelDepth = 32; tmp |= _32BPP; break; } default: return -EINVAL; } STG_WRITE_REG(DACPixelFormat, tmp); /* Workout Bus transfer bandwidth according to pixel format */ ulPdiv = STG_PIXEL_BUS_WIDTH / physicalPixelDepth; /* Get Screen Stride in pixels */ stride = displayWidth; /* Set Primary size info */ tmp = STG_READ_REG(DACPrimSize); CLEAR_BITS_FRM_TO(0, 10); CLEAR_BITS_FRM_TO(12, 31); tmp |= ((((displayHeight - 1) << 12) | (((displayWidth / ulPdiv) - 1) << 23)) | (stride / ulPdiv)); STG_WRITE_REG(DACPrimSize, tmp); /* Set Pixel Clock */ *pixelClock = ProgramClock(REF_CLOCK, *pixelClock, &F, &R, &P); /* Set DAC PLL Mode */ tmp = STG_READ_REG(DACPLLMode); CLEAR_BITS_FRM_TO(0, 15); /* tmp |= ((P-1) | ((F-2) << 2) | ((R-2) << 11)); */ tmp |= ((P) | ((F - 2) << 2) | ((R - 2) << 11)); STG_WRITE_REG(DACPLLMode, tmp); /* Set Prim Address */ tmp = STG_READ_REG(DACPrimAddress); CLEAR_BITS_FRM_TO(0, 20); CLEAR_BITS_FRM_TO(20, 31); STG_WRITE_REG(DACPrimAddress, tmp); /* Set Cursor details with HW Cursor disabled */ tmp = STG_READ_REG(DACCursorCtrl); tmp &= ~SET_BIT(31); STG_WRITE_REG(DACCursorCtrl, tmp); tmp = STG_READ_REG(DACCursorAddr); CLEAR_BITS_FRM_TO(0, 20); STG_WRITE_REG(DACCursorAddr, tmp); /* Set Video Window */ tmp = STG_READ_REG(DACVidWinStart); CLEAR_BITS_FRM_TO(0, 10); CLEAR_BITS_FRM_TO(16, 26); STG_WRITE_REG(DACVidWinStart, tmp); tmp = STG_READ_REG(DACVidWinEnd); CLEAR_BITS_FRM_TO(0, 10); CLEAR_BITS_FRM_TO(16, 26); STG_WRITE_REG(DACVidWinEnd, tmp); /* Set DAC Border Color to default */ tmp = STG_READ_REG(DACBorderColor); CLEAR_BITS_FRM_TO(0, 23); STG_WRITE_REG(DACBorderColor, tmp); /* Set Graphics and Overlay Burst Control */ STG_WRITE_REG(DACBurstCtrl, 0x0404); /* Set CRC Trigger to default */ tmp = STG_READ_REG(DACCrcTrigger); CLEAR_BIT(0); STG_WRITE_REG(DACCrcTrigger, tmp); /* Set Video Port Control to default */ tmp = STG_READ_REG(DigVidPortCtrl); CLEAR_BIT(8); CLEAR_BITS_FRM_TO(16, 27); CLEAR_BITS_FRM_TO(1, 3); CLEAR_BITS_FRM_TO(10, 11); STG_WRITE_REG(DigVidPortCtrl, tmp); return 0; } /* Ramdac control, turning output to the screen on and off */ void DisableRamdacOutput(volatile STG4000REG __iomem * pSTGReg) { u32 tmp; /* Disable DAC for Graphics Stream Control */ tmp = (STG_READ_REG(DACStreamCtrl)) & ~SET_BIT(0); STG_WRITE_REG(DACStreamCtrl, tmp); } void EnableRamdacOutput(volatile STG4000REG __iomem * pSTGReg) { u32 tmp; /* Enable DAC for Graphics Stream Control */ tmp = (STG_READ_REG(DACStreamCtrl)) | SET_BIT(0); STG_WRITE_REG(DACStreamCtrl, tmp); }
gpl-2.0
riverzhou/kernel-c8500
arch/sh/kernel/traps.c
497
2175
#include <linux/bug.h> #include <linux/io.h> #include <linux/types.h> #include <linux/kdebug.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/uaccess.h> #include <linux/hardirq.h> #include <asm/unwinder.h> #include <asm/system.h> #ifdef CONFIG_BUG void handle_BUG(struct pt_regs *regs) { const struct bug_entry *bug; unsigned long bugaddr = regs->pc; enum bug_trap_type tt; if (!is_valid_bugaddr(bugaddr)) goto invalid; bug = find_bug(bugaddr); /* Switch unwinders when unwind_stack() is called */ if (bug->flags & BUGFLAG_UNWINDER) unwinder_faulted = 1; tt = report_bug(bugaddr, regs); if (tt == BUG_TRAP_TYPE_WARN) { regs->pc += instruction_size(bugaddr); return; } invalid: die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff); } int is_valid_bugaddr(unsigned long addr) { insn_size_t opcode; if (addr < PAGE_OFFSET) return 0; if (probe_kernel_address((insn_size_t *)addr, opcode)) return 0; if (opcode == TRAPA_BUG_OPCODE) return 1; return 0; } #endif /* * Generic trap handler. */ BUILD_TRAP_HANDLER(debug) { TRAP_HANDLER_DECL; /* Rewind */ regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); if (notify_die(DIE_TRAP, "debug trap", regs, 0, vec & 0xff, SIGTRAP) == NOTIFY_STOP) return; force_sig(SIGTRAP, current); } /* * Special handler for BUG() traps. */ BUILD_TRAP_HANDLER(bug) { TRAP_HANDLER_DECL; /* Rewind */ regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff, SIGTRAP) == NOTIFY_STOP) return; #ifdef CONFIG_BUG if (__kernel_text_address(instruction_pointer(regs))) { insn_size_t insn = *(insn_size_t *)instruction_pointer(regs); if (insn == TRAPA_BUG_OPCODE) handle_BUG(regs); return; } #endif force_sig(SIGTRAP, current); } BUILD_TRAP_HANDLER(nmi) { TRAP_HANDLER_DECL; nmi_enter(); switch (notify_die(DIE_NMI, "NMI", regs, 0, vec & 0xff, SIGINT)) { case NOTIFY_OK: case NOTIFY_STOP: break; case NOTIFY_BAD: die("Fatal Non-Maskable Interrupt", regs, SIGINT); default: printk(KERN_ALERT "Got NMI, but nobody cared. Ignoring...\n"); break; } nmi_exit(); }
gpl-2.0
eltair/htc-kernel-evo
arch/powerpc/platforms/85xx/mpc85xx_rdb.c
497
3047
/* * MPC85xx RDB Board Setup * * Copyright 2009 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/of_platform.h> #include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #undef DEBUG #ifdef DEBUG #define DBG(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) #else #define DBG(fmt, args...) #endif void __init mpc85xx_rdb_pic_init(void) { struct mpic *mpic; struct resource r; struct device_node *np; np = of_find_node_by_type(NULL, "open-pic"); if (np == NULL) { printk(KERN_ERR "Could not find open-pic node\n"); return; } if (of_address_to_resource(np, 0, &r)) { printk(KERN_ERR "Failed to map mpic register space\n"); of_node_put(np); return; } mpic = mpic_alloc(np, r.start, MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); of_node_put(np); mpic_init(mpic); } /* * Setup the architecture */ #ifdef CONFIG_SMP extern void __init mpc85xx_smp_init(void); #endif static void __init mpc85xx_rdb_setup_arch(void) { #ifdef CONFIG_PCI struct device_node *np; #endif if (ppc_md.progress) ppc_md.progress("mpc85xx_rdb_setup_arch()", 0); #ifdef CONFIG_PCI for_each_node_by_type(np, "pci") { if (of_device_is_compatible(np, "fsl,mpc8548-pcie")) fsl_add_bridge(np, 0); } #endif #ifdef CONFIG_SMP mpc85xx_smp_init(); #endif printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n"); } static struct of_device_id __initdata mpc85xxrdb_ids[] = { { .type = "soc", }, { .compatible = "soc", }, { .compatible = "simple-bus", }, { .compatible = "gianfar", }, {}, }; static int __init mpc85xxrdb_publish_devices(void) { return of_platform_bus_probe(NULL, mpc85xxrdb_ids, NULL); } machine_device_initcall(p2020_rdb, mpc85xxrdb_publish_devices); /* * Called very early, device-tree isn't unflattened */ static int __init p2020_rdb_probe(void) { unsigned long root = of_get_flat_dt_root(); if (of_flat_dt_is_compatible(root, "fsl,P2020RDB")) return 1; return 0; } define_machine(p2020_rdb) { .name = "P2020 RDB", .probe = p2020_rdb_probe, .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
penhoi/linux-3.13.11.lbrpmu
drivers/virtio/virtio.c
497
6808
#include <linux/virtio.h> #include <linux/spinlock.h> #include <linux/virtio_config.h> #include <linux/module.h> #include <linux/idr.h> /* Unique numbering for virtio devices. */ static DEFINE_IDA(virtio_index_ida); static ssize_t device_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%04x\n", dev->id.device); } static DEVICE_ATTR_RO(device); static ssize_t vendor_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%04x\n", dev->id.vendor); } static DEVICE_ATTR_RO(vendor); static ssize_t status_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); } static DEVICE_ATTR_RO(status); static ssize_t modalias_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "virtio:d%08Xv%08X\n", dev->id.device, dev->id.vendor); } static DEVICE_ATTR_RO(modalias); static ssize_t features_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); unsigned int i; ssize_t len = 0; /* We actually represent this as a bitstring, as it could be * arbitrary length in future. */ for (i = 0; i < ARRAY_SIZE(dev->features)*BITS_PER_LONG; i++) len += sprintf(buf+len, "%c", test_bit(i, dev->features) ? '1' : '0'); len += sprintf(buf+len, "\n"); return len; } static DEVICE_ATTR_RO(features); static struct attribute *virtio_dev_attrs[] = { &dev_attr_device.attr, &dev_attr_vendor.attr, &dev_attr_status.attr, &dev_attr_modalias.attr, &dev_attr_features.attr, NULL, }; ATTRIBUTE_GROUPS(virtio_dev); static inline int virtio_id_match(const struct virtio_device *dev, const struct virtio_device_id *id) { if (id->device != dev->id.device && id->device != VIRTIO_DEV_ANY_ID) return 0; return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor; } /* This looks through all the IDs a driver claims to support. If any of them * match, we return 1 and the kernel will call virtio_dev_probe(). */ static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) { unsigned int i; struct virtio_device *dev = dev_to_virtio(_dv); const struct virtio_device_id *ids; ids = drv_to_virtio(_dr)->id_table; for (i = 0; ids[i].device; i++) if (virtio_id_match(dev, &ids[i])) return 1; return 0; } static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env) { struct virtio_device *dev = dev_to_virtio(_dv); return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X", dev->id.device, dev->id.vendor); } static void add_status(struct virtio_device *dev, unsigned status) { dev->config->set_status(dev, dev->config->get_status(dev) | status); } void virtio_check_driver_offered_feature(const struct virtio_device *vdev, unsigned int fbit) { unsigned int i; struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); for (i = 0; i < drv->feature_table_size; i++) if (drv->feature_table[i] == fbit) return; BUG(); } EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature); static int virtio_dev_probe(struct device *_d) { int err, i; struct virtio_device *dev = dev_to_virtio(_d); struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); u32 device_features; /* We have a driver! */ add_status(dev, VIRTIO_CONFIG_S_DRIVER); /* Figure out what features the device supports. */ device_features = dev->config->get_features(dev); /* Features supported by both device and driver into dev->features. */ memset(dev->features, 0, sizeof(dev->features)); for (i = 0; i < drv->feature_table_size; i++) { unsigned int f = drv->feature_table[i]; BUG_ON(f >= 32); if (device_features & (1 << f)) set_bit(f, dev->features); } /* Transport features always preserved to pass to finalize_features. */ for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) if (device_features & (1 << i)) set_bit(i, dev->features); dev->config->finalize_features(dev); err = drv->probe(dev); if (err) add_status(dev, VIRTIO_CONFIG_S_FAILED); else { add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); if (drv->scan) drv->scan(dev); } return err; } static int virtio_dev_remove(struct device *_d) { struct virtio_device *dev = dev_to_virtio(_d); struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); drv->remove(dev); /* Driver should have reset device. */ WARN_ON_ONCE(dev->config->get_status(dev)); /* Acknowledge the device's existence again. */ add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); return 0; } static struct bus_type virtio_bus = { .name = "virtio", .match = virtio_dev_match, .dev_groups = virtio_dev_groups, .uevent = virtio_uevent, .probe = virtio_dev_probe, .remove = virtio_dev_remove, }; int register_virtio_driver(struct virtio_driver *driver) { /* Catch this early. */ BUG_ON(driver->feature_table_size && !driver->feature_table); driver->driver.bus = &virtio_bus; return driver_register(&driver->driver); } EXPORT_SYMBOL_GPL(register_virtio_driver); void unregister_virtio_driver(struct virtio_driver *driver) { driver_unregister(&driver->driver); } EXPORT_SYMBOL_GPL(unregister_virtio_driver); int register_virtio_device(struct virtio_device *dev) { int err; dev->dev.bus = &virtio_bus; /* Assign a unique device index and hence name. */ err = ida_simple_get(&virtio_index_ida, 0, 0, GFP_KERNEL); if (err < 0) goto out; dev->index = err; dev_set_name(&dev->dev, "virtio%u", dev->index); /* We always start by resetting the device, in case a previous * driver messed it up. This also tests that code path a little. */ dev->config->reset(dev); /* Acknowledge that we've seen the device. */ add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); INIT_LIST_HEAD(&dev->vqs); /* device_register() causes the bus infrastructure to look for a * matching driver. */ err = device_register(&dev->dev); out: if (err) add_status(dev, VIRTIO_CONFIG_S_FAILED); return err; } EXPORT_SYMBOL_GPL(register_virtio_device); void unregister_virtio_device(struct virtio_device *dev) { int index = dev->index; /* save for after device release */ device_unregister(&dev->dev); ida_simple_remove(&virtio_index_ida, index); } EXPORT_SYMBOL_GPL(unregister_virtio_device); static int virtio_init(void) { if (bus_register(&virtio_bus) != 0) panic("virtio bus registration failed"); return 0; } static void __exit virtio_exit(void) { bus_unregister(&virtio_bus); } core_initcall(virtio_init); module_exit(virtio_exit); MODULE_LICENSE("GPL");
gpl-2.0
lostpuppy/m180s-kernel-stock
fs/sysv/inode.c
753
9551
/* * linux/fs/sysv/inode.c * * minix/inode.c * Copyright (C) 1991, 1992 Linus Torvalds * * xenix/inode.c * Copyright (C) 1992 Doug Evans * * coh/inode.c * Copyright (C) 1993 Pascal Haible, Bruno Haible * * sysv/inode.c * Copyright (C) 1993 Paul B. Monday * * sysv/inode.c * Copyright (C) 1993 Bruno Haible * Copyright (C) 1997, 1998 Krzysztof G. Baranowski * * This file contains code for allocating/freeing inodes and for read/writing * the superblock. */ #include <linux/highuid.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/buffer_head.h> #include <linux/vfs.h> #include <linux/writeback.h> #include <linux/namei.h> #include <asm/byteorder.h> #include "sysv.h" static int sysv_sync_fs(struct super_block *sb, int wait) { struct sysv_sb_info *sbi = SYSV_SB(sb); unsigned long time = get_seconds(), old_time; lock_super(sb); /* * If we are going to write out the super block, * then attach current time stamp. * But if the filesystem was marked clean, keep it clean. */ sb->s_dirt = 0; old_time = fs32_to_cpu(sbi, *sbi->s_sb_time); if (sbi->s_type == FSTYPE_SYSV4) { if (*sbi->s_sb_state == cpu_to_fs32(sbi, 0x7c269d38 - old_time)) *sbi->s_sb_state = cpu_to_fs32(sbi, 0x7c269d38 - time); *sbi->s_sb_time = cpu_to_fs32(sbi, time); mark_buffer_dirty(sbi->s_bh2); } unlock_super(sb); return 0; } static void sysv_write_super(struct super_block *sb) { if (!(sb->s_flags & MS_RDONLY)) sysv_sync_fs(sb, 1); else sb->s_dirt = 0; } static int sysv_remount(struct super_block *sb, int *flags, char *data) { struct sysv_sb_info *sbi = SYSV_SB(sb); lock_super(sb); if (sbi->s_forced_ro) *flags |= MS_RDONLY; if (!(*flags & MS_RDONLY)) sb->s_dirt = 1; unlock_super(sb); return 0; } static void sysv_put_super(struct super_block *sb) { struct sysv_sb_info *sbi = SYSV_SB(sb); if (sb->s_dirt) sysv_write_super(sb); if (!(sb->s_flags & MS_RDONLY)) { /* XXX ext2 also updates the state here */ mark_buffer_dirty(sbi->s_bh1); if (sbi->s_bh1 != sbi->s_bh2) mark_buffer_dirty(sbi->s_bh2); } brelse(sbi->s_bh1); if (sbi->s_bh1 != sbi->s_bh2) brelse(sbi->s_bh2); kfree(sbi); } static int sysv_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct sysv_sb_info *sbi = SYSV_SB(sb); u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = sb->s_magic; buf->f_bsize = sb->s_blocksize; buf->f_blocks = sbi->s_ndatazones; buf->f_bavail = buf->f_bfree = sysv_count_free_blocks(sb); buf->f_files = sbi->s_ninodes; buf->f_ffree = sysv_count_free_inodes(sb); buf->f_namelen = SYSV_NAMELEN; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); return 0; } /* * NXI <-> N0XI for PDP, XIN <-> XIN0 for le32, NIX <-> 0NIX for be32 */ static inline void read3byte(struct sysv_sb_info *sbi, unsigned char * from, unsigned char * to) { if (sbi->s_bytesex == BYTESEX_PDP) { to[0] = from[0]; to[1] = 0; to[2] = from[1]; to[3] = from[2]; } else if (sbi->s_bytesex == BYTESEX_LE) { to[0] = from[0]; to[1] = from[1]; to[2] = from[2]; to[3] = 0; } else { to[0] = 0; to[1] = from[0]; to[2] = from[1]; to[3] = from[2]; } } static inline void write3byte(struct sysv_sb_info *sbi, unsigned char * from, unsigned char * to) { if (sbi->s_bytesex == BYTESEX_PDP) { to[0] = from[0]; to[1] = from[2]; to[2] = from[3]; } else if (sbi->s_bytesex == BYTESEX_LE) { to[0] = from[0]; to[1] = from[1]; to[2] = from[2]; } else { to[0] = from[1]; to[1] = from[2]; to[2] = from[3]; } } static const struct inode_operations sysv_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .getattr = sysv_getattr, }; void sysv_set_inode(struct inode *inode, dev_t rdev) { if (S_ISREG(inode->i_mode)) { inode->i_op = &sysv_file_inode_operations; inode->i_fop = &sysv_file_operations; inode->i_mapping->a_ops = &sysv_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &sysv_dir_inode_operations; inode->i_fop = &sysv_dir_operations; inode->i_mapping->a_ops = &sysv_aops; } else if (S_ISLNK(inode->i_mode)) { if (inode->i_blocks) { inode->i_op = &sysv_symlink_inode_operations; inode->i_mapping->a_ops = &sysv_aops; } else { inode->i_op = &sysv_fast_symlink_inode_operations; nd_terminate_link(SYSV_I(inode)->i_data, inode->i_size, sizeof(SYSV_I(inode)->i_data) - 1); } } else init_special_inode(inode, inode->i_mode, rdev); } struct inode *sysv_iget(struct super_block *sb, unsigned int ino) { struct sysv_sb_info * sbi = SYSV_SB(sb); struct buffer_head * bh; struct sysv_inode * raw_inode; struct sysv_inode_info * si; struct inode *inode; unsigned int block; if (!ino || ino > sbi->s_ninodes) { printk("Bad inode number on dev %s: %d is out of range\n", sb->s_id, ino); return ERR_PTR(-EIO); } inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) { printk("Major problem: unable to read inode from dev %s\n", inode->i_sb->s_id); goto bad_inode; } /* SystemV FS: kludge permissions if ino==SYSV_ROOT_INO ?? */ inode->i_mode = fs16_to_cpu(sbi, raw_inode->i_mode); inode->i_uid = (uid_t)fs16_to_cpu(sbi, raw_inode->i_uid); inode->i_gid = (gid_t)fs16_to_cpu(sbi, raw_inode->i_gid); inode->i_nlink = fs16_to_cpu(sbi, raw_inode->i_nlink); inode->i_size = fs32_to_cpu(sbi, raw_inode->i_size); inode->i_atime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_atime); inode->i_mtime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_mtime); inode->i_ctime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_ctime); inode->i_ctime.tv_nsec = 0; inode->i_atime.tv_nsec = 0; inode->i_mtime.tv_nsec = 0; inode->i_blocks = 0; si = SYSV_I(inode); for (block = 0; block < 10+1+1+1; block++) read3byte(sbi, &raw_inode->i_data[3*block], (u8 *)&si->i_data[block]); brelse(bh); si->i_dir_start_lookup = 0; if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) sysv_set_inode(inode, old_decode_dev(fs32_to_cpu(sbi, si->i_data[0]))); else sysv_set_inode(inode, 0); unlock_new_inode(inode); return inode; bad_inode: iget_failed(inode); return ERR_PTR(-EIO); } static int __sysv_write_inode(struct inode *inode, int wait) { struct super_block * sb = inode->i_sb; struct sysv_sb_info * sbi = SYSV_SB(sb); struct buffer_head * bh; struct sysv_inode * raw_inode; struct sysv_inode_info * si; unsigned int ino, block; int err = 0; ino = inode->i_ino; if (!ino || ino > sbi->s_ninodes) { printk("Bad inode number on dev %s: %d is out of range\n", inode->i_sb->s_id, ino); return -EIO; } raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) { printk("unable to read i-node block\n"); return -EIO; } raw_inode->i_mode = cpu_to_fs16(sbi, inode->i_mode); raw_inode->i_uid = cpu_to_fs16(sbi, fs_high2lowuid(inode->i_uid)); raw_inode->i_gid = cpu_to_fs16(sbi, fs_high2lowgid(inode->i_gid)); raw_inode->i_nlink = cpu_to_fs16(sbi, inode->i_nlink); raw_inode->i_size = cpu_to_fs32(sbi, inode->i_size); raw_inode->i_atime = cpu_to_fs32(sbi, inode->i_atime.tv_sec); raw_inode->i_mtime = cpu_to_fs32(sbi, inode->i_mtime.tv_sec); raw_inode->i_ctime = cpu_to_fs32(sbi, inode->i_ctime.tv_sec); si = SYSV_I(inode); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) si->i_data[0] = cpu_to_fs32(sbi, old_encode_dev(inode->i_rdev)); for (block = 0; block < 10+1+1+1; block++) write3byte(sbi, (u8 *)&si->i_data[block], &raw_inode->i_data[3*block]); mark_buffer_dirty(bh); if (wait) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk ("IO error syncing sysv inode [%s:%08x]\n", sb->s_id, ino); err = -EIO; } } brelse(bh); return 0; } int sysv_write_inode(struct inode *inode, struct writeback_control *wbc) { return __sysv_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL); } int sysv_sync_inode(struct inode *inode) { return __sysv_write_inode(inode, 1); } static void sysv_delete_inode(struct inode *inode) { truncate_inode_pages(&inode->i_data, 0); inode->i_size = 0; sysv_truncate(inode); sysv_free_inode(inode); } static struct kmem_cache *sysv_inode_cachep; static struct inode *sysv_alloc_inode(struct super_block *sb) { struct sysv_inode_info *si; si = kmem_cache_alloc(sysv_inode_cachep, GFP_KERNEL); if (!si) return NULL; return &si->vfs_inode; } static void sysv_destroy_inode(struct inode *inode) { kmem_cache_free(sysv_inode_cachep, SYSV_I(inode)); } static void init_once(void *p) { struct sysv_inode_info *si = (struct sysv_inode_info *)p; inode_init_once(&si->vfs_inode); } const struct super_operations sysv_sops = { .alloc_inode = sysv_alloc_inode, .destroy_inode = sysv_destroy_inode, .write_inode = sysv_write_inode, .delete_inode = sysv_delete_inode, .put_super = sysv_put_super, .write_super = sysv_write_super, .sync_fs = sysv_sync_fs, .remount_fs = sysv_remount, .statfs = sysv_statfs, }; int __init sysv_init_icache(void) { sysv_inode_cachep = kmem_cache_create("sysv_inode_cache", sizeof(struct sysv_inode_info), 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, init_once); if (!sysv_inode_cachep) return -ENOMEM; return 0; } void sysv_destroy_icache(void) { kmem_cache_destroy(sysv_inode_cachep); }
gpl-2.0
Fusion-Devices/android_kernel_mediatek_sprout
kernel/irq_work.c
1521
4576
/* * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * * Provides a framework for enqueueing and running callbacks from hardirq * context. The enqueueing is NMI-safe. */ #include <linux/bug.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/irq_work.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <linux/irqflags.h> #include <linux/sched.h> #include <linux/tick.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <asm/processor.h> static DEFINE_PER_CPU(struct llist_head, irq_work_list); static DEFINE_PER_CPU(int, irq_work_raised); /* * Claim the entry so that no one else will poke at it. */ static bool irq_work_claim(struct irq_work *work) { unsigned long flags, oflags, nflags; /* * Start with our best wish as a premise but only trust any * flag value after cmpxchg() result. */ flags = work->flags & ~IRQ_WORK_PENDING; for (;;) { nflags = flags | IRQ_WORK_FLAGS; oflags = cmpxchg(&work->flags, flags, nflags); if (oflags == flags) break; if (oflags & IRQ_WORK_PENDING) return false; flags = oflags; cpu_relax(); } return true; } void __weak arch_irq_work_raise(void) { /* * Lame architectures will get the timer tick callback */ } /* * Enqueue the irq_work @entry unless it's already pending * somewhere. * * Can be re-enqueued while the callback is still in progress. */ void irq_work_queue(struct irq_work *work) { /* Only queue if not already pending */ if (!irq_work_claim(work)) return; /* Queue the entry and raise the IPI if needed. */ preempt_disable(); llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); /* * If the work is not "lazy" or the tick is stopped, raise the irq * work interrupt (if supported by the arch), otherwise, just wait * for the next tick. */ if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) { if (!this_cpu_cmpxchg(irq_work_raised, 0, 1)) arch_irq_work_raise(); } preempt_enable(); } EXPORT_SYMBOL_GPL(irq_work_queue); bool irq_work_needs_cpu(void) { struct llist_head *this_list; this_list = &__get_cpu_var(irq_work_list); if (llist_empty(this_list)) return false; /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); return true; } static void __irq_work_run(void) { unsigned long flags; struct irq_work *work; struct llist_head *this_list; struct llist_node *llnode; /* * Reset the "raised" state right before we check the list because * an NMI may enqueue after we find the list empty from the runner. */ __this_cpu_write(irq_work_raised, 0); barrier(); this_list = &__get_cpu_var(irq_work_list); if (llist_empty(this_list)) return; BUG_ON(!irqs_disabled()); llnode = llist_del_all(this_list); while (llnode != NULL) { work = llist_entry(llnode, struct irq_work, llnode); llnode = llist_next(llnode); /* * Clear the PENDING bit, after this point the @work * can be re-used. * Make it immediately visible so that other CPUs trying * to claim that work don't rely on us to handle their data * while we are in the middle of the func. */ flags = work->flags & ~IRQ_WORK_PENDING; xchg(&work->flags, flags); work->func(work); /* * Clear the BUSY bit and return to the free state if * no-one else claimed it meanwhile. */ (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); } } /* * Run the irq_work entries on this cpu. Requires to be ran from hardirq * context with local IRQs disabled. */ void irq_work_run(void) { BUG_ON(!in_irq()); __irq_work_run(); } EXPORT_SYMBOL_GPL(irq_work_run); /* * Synchronize against the irq_work @entry, ensures the entry is not * currently in use. */ void irq_work_sync(struct irq_work *work) { WARN_ON_ONCE(irqs_disabled()); while (work->flags & IRQ_WORK_BUSY) cpu_relax(); } EXPORT_SYMBOL_GPL(irq_work_sync); #ifdef CONFIG_HOTPLUG_CPU static int irq_work_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { long cpu = (long)hcpu; switch (action) { case CPU_DYING: /* Called from stop_machine */ if (WARN_ON_ONCE(cpu != smp_processor_id())) break; __irq_work_run(); break; default: break; } return NOTIFY_OK; } static struct notifier_block cpu_notify; static __init int irq_work_init_cpu_notifier(void) { cpu_notify.notifier_call = irq_work_cpu_notify; cpu_notify.priority = 0; register_cpu_notifier(&cpu_notify); return 0; } device_initcall(irq_work_init_cpu_notifier); #endif /* CONFIG_HOTPLUG_CPU */
gpl-2.0
weritos666/kernel_L7_II_KK_P715
arch/powerpc/platforms/wsp/wsp_pci.c
4593
33509
/* * Copyright 2010 Ben Herrenschmidt, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define DEBUG #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/debugfs.h> #include <asm/sections.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/ppc-pci.h> #include <asm/iommu.h> #include <asm/io-workarounds.h> #include <asm/debug.h> #include "wsp.h" #include "wsp_pci.h" #include "msi.h" /* Max number of TVTs for one table. Only 32-bit tables can use * multiple TVTs and so the max currently supported is thus 8 * since only 2G of DMA space is supported */ #define MAX_TABLE_TVT_COUNT 8 struct wsp_dma_table { struct list_head link; struct iommu_table table; struct wsp_phb *phb; struct page *tces[MAX_TABLE_TVT_COUNT]; }; /* We support DMA regions from 0...2G in 32bit space (no support for * 64-bit DMA just yet). Each device gets a separate TCE table (TVT * entry) with validation enabled (though not supported by SimiCS * just yet). * * To simplify things, we divide this 2G space into N regions based * on the constant below which could be turned into a tunable eventually * * We then assign dynamically those regions to devices as they show up. * * We use a bitmap as an allocator for these. * * Tables are allocated/created dynamically as devices are discovered, * multiple TVT entries are used if needed * * When 64-bit DMA support is added we should simply use a separate set * of larger regions (the HW supports 64 TVT entries). We can * additionally create a bypass region in 64-bit space for performances * though that would have a cost in term of security. * * If you set NUM_DMA32_REGIONS to 1, then a single table is shared * for all devices and bus/dev/fn validation is disabled * * Note that a DMA32 region cannot be smaller than 256M so the max * supported here for now is 8. We don't yet support sharing regions * between multiple devices so the max number of devices supported * is MAX_TABLE_TVT_COUNT. */ #define NUM_DMA32_REGIONS 1 struct wsp_phb { struct pci_controller *hose; /* Lock controlling access to the list of dma tables. * It does -not- protect against dma_* operations on * those tables, those should be stopped before an entry * is removed from the list. * * The lock is also used for error handling operations */ spinlock_t lock; struct list_head dma_tables; unsigned long dma32_map; unsigned long dma32_base; unsigned int dma32_num_regions; unsigned long dma32_region_size; /* Debugfs stuff */ struct dentry *ddir; struct list_head all; }; static LIST_HEAD(wsp_phbs); //#define cfg_debug(fmt...) pr_debug(fmt) #define cfg_debug(fmt...) static int wsp_pcie_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { struct pci_controller *hose; int suboff; u64 addr; hose = pci_bus_to_host(bus); if (hose == NULL) return PCIBIOS_DEVICE_NOT_FOUND; if (offset >= 0x1000) return PCIBIOS_BAD_REGISTER_NUMBER; addr = PCIE_REG_CA_ENABLE | ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT | ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT | ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT; suboff = offset & 3; /* * Note: the caller has already checked that offset is * suitably aligned and that len is 1, 2 or 4. */ switch (len) { case 1: addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT; out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr); *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA) >> (suboff << 3)) & 0xff; cfg_debug("read 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n", bus->number, devfn >> 3, devfn & 7, offset, suboff, addr, *val); break; case 2: addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT; out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr); *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA) >> (suboff << 3)) & 0xffff; cfg_debug("read 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n", bus->number, devfn >> 3, devfn & 7, offset, suboff, addr, *val); break; default: addr |= 0xful << PCIE_REG_CA_BE_SHIFT; out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr); *val = in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA); cfg_debug("read 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n", bus->number, devfn >> 3, devfn & 7, offset, suboff, addr, *val); break; } return PCIBIOS_SUCCESSFUL; } static int wsp_pcie_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { struct pci_controller *hose; int suboff; u64 addr; hose = pci_bus_to_host(bus); if (hose == NULL) return PCIBIOS_DEVICE_NOT_FOUND; if (offset >= 0x1000) return PCIBIOS_BAD_REGISTER_NUMBER; addr = PCIE_REG_CA_ENABLE | ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT | ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT | ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT; suboff = offset & 3; /* * Note: the caller has already checked that offset is * suitably aligned and that len is 1, 2 or 4. */ switch (len) { case 1: addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT; val <<= suboff << 3; out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr); out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val); cfg_debug("write 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n", bus->number, devfn >> 3, devfn & 7, offset, suboff, addr, val); break; case 2: addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT; val <<= suboff << 3; out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr); out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val); cfg_debug("write 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n", bus->number, devfn >> 3, devfn & 7, offset, suboff, addr, val); break; default: addr |= 0xful << PCIE_REG_CA_BE_SHIFT; out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr); out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val); cfg_debug("write 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n", bus->number, devfn >> 3, devfn & 7, offset, suboff, addr, val); break; } return PCIBIOS_SUCCESSFUL; } static struct pci_ops wsp_pcie_pci_ops = { .read = wsp_pcie_read_config, .write = wsp_pcie_write_config, }; #define TCE_SHIFT 12 #define TCE_PAGE_SIZE (1 << TCE_SHIFT) #define TCE_PCI_WRITE 0x2 /* write from PCI allowed */ #define TCE_PCI_READ 0x1 /* read from PCI allowed */ #define TCE_RPN_MASK 0x3fffffffffful /* 42-bit RPN (4K pages) */ #define TCE_RPN_SHIFT 12 //#define dma_debug(fmt...) pr_debug(fmt) #define dma_debug(fmt...) static int tce_build_wsp(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, struct dma_attrs *attrs) { struct wsp_dma_table *ptbl = container_of(tbl, struct wsp_dma_table, table); u64 proto_tce; u64 *tcep; u64 rpn; proto_tce = TCE_PCI_READ; #ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS proto_tce |= TCE_PCI_WRITE; #else if (direction != DMA_TO_DEVICE) proto_tce |= TCE_PCI_WRITE; #endif /* XXX Make this faster by factoring out the page address for * within a TCE table */ while (npages--) { /* We don't use it->base as the table can be scattered */ tcep = (u64 *)page_address(ptbl->tces[index >> 16]); tcep += (index & 0xffff); /* can't move this out since we might cross LMB boundary */ rpn = __pa(uaddr) >> TCE_SHIFT; *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n", tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT); uaddr += TCE_PAGE_SIZE; index++; } return 0; } static void tce_free_wsp(struct iommu_table *tbl, long index, long npages) { struct wsp_dma_table *ptbl = container_of(tbl, struct wsp_dma_table, table); #ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS struct pci_controller *hose = ptbl->phb->hose; #endif u64 *tcep; /* XXX Make this faster by factoring out the page address for * within a TCE table. Also use line-kill option to kill multiple * TCEs at once */ while (npages--) { /* We don't use it->base as the table can be scattered */ tcep = (u64 *)page_address(ptbl->tces[index >> 16]); tcep += (index & 0xffff); dma_debug("[DMA] TCE %p cleared\n", tcep); *tcep = 0; #ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS /* Don't write there since it would pollute other MMIO accesses */ out_be64(hose->cfg_data + PCIE_REG_TCE_KILL, PCIE_REG_TCEKILL_SINGLE | PCIE_REG_TCEKILL_PS_4K | (__pa(tcep) & PCIE_REG_TCEKILL_ADDR_MASK)); #endif index++; } } static struct wsp_dma_table *wsp_pci_create_dma32_table(struct wsp_phb *phb, unsigned int region, struct pci_dev *validate) { struct pci_controller *hose = phb->hose; unsigned long size = phb->dma32_region_size; unsigned long addr = phb->dma32_region_size * region + phb->dma32_base; struct wsp_dma_table *tbl; int tvts_per_table, i, tvt, nid; unsigned long flags; nid = of_node_to_nid(phb->hose->dn); /* Calculate how many TVTs are needed */ tvts_per_table = size / 0x10000000; if (tvts_per_table == 0) tvts_per_table = 1; /* Calculate the base TVT index. We know all tables have the same * size so we just do a simple multiply here */ tvt = region * tvts_per_table; pr_debug(" Region : %d\n", region); pr_debug(" DMA range : 0x%08lx..0x%08lx\n", addr, addr + size - 1); pr_debug(" Number of TVTs : %d\n", tvts_per_table); pr_debug(" Base TVT : %d\n", tvt); pr_debug(" Node : %d\n", nid); tbl = kzalloc_node(sizeof(struct wsp_dma_table), GFP_KERNEL, nid); if (!tbl) return ERR_PTR(-ENOMEM); tbl->phb = phb; /* Create as many TVTs as needed, each represents 256M at most */ for (i = 0; i < tvts_per_table; i++) { u64 tvt_data1, tvt_data0; /* Allocate table. We use a 4K TCE size for now always so * one table is always 8 * (258M / 4K) == 512K */ tbl->tces[i] = alloc_pages_node(nid, GFP_KERNEL, get_order(0x80000)); if (tbl->tces[i] == NULL) goto fail; memset(page_address(tbl->tces[i]), 0, 0x80000); pr_debug(" TCE table %d at : %p\n", i, page_address(tbl->tces[i])); /* Table size. We currently set it to be the whole 256M region */ tvt_data0 = 2ull << IODA_TVT0_TCE_TABLE_SIZE_SHIFT; /* IO page size set to 4K */ tvt_data1 = 1ull << IODA_TVT1_IO_PAGE_SIZE_SHIFT; /* Shift in the address */ tvt_data0 |= __pa(page_address(tbl->tces[i])) << IODA_TVT0_TTA_SHIFT; /* Validation stuff. We only validate fully bus/dev/fn for now * one day maybe we can group devices but that isn't the case * at the moment */ if (validate) { tvt_data0 |= IODA_TVT0_BUSNUM_VALID_MASK; tvt_data0 |= validate->bus->number; tvt_data1 |= IODA_TVT1_DEVNUM_VALID; tvt_data1 |= ((u64)PCI_SLOT(validate->devfn)) << IODA_TVT1_DEVNUM_VALUE_SHIFT; tvt_data1 |= IODA_TVT1_FUNCNUM_VALID; tvt_data1 |= ((u64)PCI_FUNC(validate->devfn)) << IODA_TVT1_FUNCNUM_VALUE_SHIFT; } /* XX PE number is always 0 for now */ /* Program the values using the PHB lock */ spin_lock_irqsave(&phb->lock, flags); out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR, (tvt + i) | PCIE_REG_IODA_AD_TBL_TVT); out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, tvt_data1); out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, tvt_data0); spin_unlock_irqrestore(&phb->lock, flags); } /* Init bits and pieces */ tbl->table.it_blocksize = 16; tbl->table.it_offset = addr >> IOMMU_PAGE_SHIFT; tbl->table.it_size = size >> IOMMU_PAGE_SHIFT; /* * It's already blank but we clear it anyway. * Consider an aditiona interface that makes cleaing optional */ iommu_init_table(&tbl->table, nid); list_add(&tbl->link, &phb->dma_tables); return tbl; fail: pr_debug(" Failed to allocate a 256M TCE table !\n"); for (i = 0; i < tvts_per_table; i++) if (tbl->tces[i]) __free_pages(tbl->tces[i], get_order(0x80000)); kfree(tbl); return ERR_PTR(-ENOMEM); } static void __devinit wsp_pci_dma_dev_setup(struct pci_dev *pdev) { struct dev_archdata *archdata = &pdev->dev.archdata; struct pci_controller *hose = pci_bus_to_host(pdev->bus); struct wsp_phb *phb = hose->private_data; struct wsp_dma_table *table = NULL; unsigned long flags; int i; /* Don't assign an iommu table to a bridge */ if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) return; pr_debug("%s: Setting up DMA...\n", pci_name(pdev)); spin_lock_irqsave(&phb->lock, flags); /* If only one region, check if it already exist */ if (phb->dma32_num_regions == 1) { spin_unlock_irqrestore(&phb->lock, flags); if (list_empty(&phb->dma_tables)) table = wsp_pci_create_dma32_table(phb, 0, NULL); else table = list_first_entry(&phb->dma_tables, struct wsp_dma_table, link); } else { /* else find a free region */ for (i = 0; i < phb->dma32_num_regions && !table; i++) { if (__test_and_set_bit(i, &phb->dma32_map)) continue; spin_unlock_irqrestore(&phb->lock, flags); table = wsp_pci_create_dma32_table(phb, i, pdev); } } /* Check if we got an error */ if (IS_ERR(table)) { pr_err("%s: Failed to create DMA table, err %ld !\n", pci_name(pdev), PTR_ERR(table)); return; } /* Or a valid table */ if (table) { pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n", pci_name(pdev), table->table.it_offset << IOMMU_PAGE_SHIFT, (table->table.it_offset << IOMMU_PAGE_SHIFT) + phb->dma32_region_size - 1); archdata->dma_data.iommu_table_base = &table->table; return; } /* Or no room */ spin_unlock_irqrestore(&phb->lock, flags); pr_err("%s: Out of DMA space !\n", pci_name(pdev)); } static void __init wsp_pcie_configure_hw(struct pci_controller *hose) { u64 val; int i; #define DUMP_REG(x) \ pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x)) /* * Some WSP variants has a bogus class code by default in the PCI-E * root complex's built-in P2P bridge */ val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1); pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val); out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1, (val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8)); pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1)); #ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS /* XXX Disable TCE caching, it doesn't work on DD1 */ out_be64(hose->cfg_data + 0xe50, in_be64(hose->cfg_data + 0xe50) | (3ull << 62)); printk("PCI-E DEBUG CONTROL 5 = 0x%llx\n", in_be64(hose->cfg_data + 0xe50)); #endif /* Configure M32A and IO. IO is hard wired to be 1M for now */ out_be64(hose->cfg_data + PCIE_REG_IO_BASE_ADDR, hose->io_base_phys); out_be64(hose->cfg_data + PCIE_REG_IO_BASE_MASK, (~(hose->io_resource.end - hose->io_resource.start)) & 0x3fffffff000ul); out_be64(hose->cfg_data + PCIE_REG_IO_START_ADDR, 0 | 1); out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_ADDR, hose->mem_resources[0].start); printk("Want to write to M32A_BASE_MASK : 0x%llx\n", (~(hose->mem_resources[0].end - hose->mem_resources[0].start)) & 0x3ffffff0000ul); out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_MASK, (~(hose->mem_resources[0].end - hose->mem_resources[0].start)) & 0x3ffffff0000ul); out_be64(hose->cfg_data + PCIE_REG_M32A_START_ADDR, (hose->mem_resources[0].start - hose->pci_mem_offset) | 1); /* Clear all TVT entries * * XX Might get TVT count from device-tree */ for (i = 0; i < IODA_TVT_COUNT; i++) { out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR, PCIE_REG_IODA_AD_TBL_TVT | i); out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, 0); out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, 0); } /* Kill the TCE cache */ out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG, in_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG) | PCIE_REG_PHBC_64B_TCE_EN); /* Enable 32 & 64-bit MSIs, IO space and M32A */ val = PCIE_REG_PHBC_32BIT_MSI_EN | PCIE_REG_PHBC_IO_EN | PCIE_REG_PHBC_64BIT_MSI_EN | PCIE_REG_PHBC_M32A_EN; if (iommu_is_off) val |= PCIE_REG_PHBC_DMA_XLATE_BYPASS; pr_debug("Will write config: 0x%llx\n", val); out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG, val); /* Enable error reporting */ out_be64(hose->cfg_data + 0xe00, in_be64(hose->cfg_data + 0xe00) | 0x0008000000000000ull); /* Mask an error that's generated when doing config space probe * * XXX Maybe we should only mask it around config space cycles... that or * ignore it when we know we had a config space cycle recently ? */ out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS_MASK, 0x8000000000000000ull); out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS_MASK, 0x8000000000000000ull); /* Enable UTL errors, for now, all of them got to UTL irq 1 * * We similarily mask one UTL error caused apparently during normal * probing. We also mask the link up error */ out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_ERR_SEV, 0); out_be64(hose->cfg_data + PCIE_UTL_RC_ERR_SEVERITY, 0); out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_ERROR_SEV, 0); out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_IRQ_EN, 0xffffffff00000000ull); out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_IRQ_EN, 0xff5fffff00000000ull); out_be64(hose->cfg_data + PCIE_UTL_EP_ERR_IRQ_EN, 0xffffffff00000000ull); DUMP_REG(PCIE_REG_IO_BASE_ADDR); DUMP_REG(PCIE_REG_IO_BASE_MASK); DUMP_REG(PCIE_REG_IO_START_ADDR); DUMP_REG(PCIE_REG_M32A_BASE_ADDR); DUMP_REG(PCIE_REG_M32A_BASE_MASK); DUMP_REG(PCIE_REG_M32A_START_ADDR); DUMP_REG(PCIE_REG_M32B_BASE_ADDR); DUMP_REG(PCIE_REG_M32B_BASE_MASK); DUMP_REG(PCIE_REG_M32B_START_ADDR); DUMP_REG(PCIE_REG_M64_BASE_ADDR); DUMP_REG(PCIE_REG_M64_BASE_MASK); DUMP_REG(PCIE_REG_M64_START_ADDR); DUMP_REG(PCIE_REG_PHB_CONFIG); } static void wsp_pci_wait_io_idle(struct wsp_phb *phb, unsigned long port) { u64 val; int i; for (i = 0; i < 10000; i++) { val = in_be64(phb->hose->cfg_data + 0xe08); if ((val & 0x1900000000000000ull) == 0x0100000000000000ull) return; udelay(1); } pr_warning("PCI IO timeout on domain %d port 0x%lx\n", phb->hose->global_number, port); } #define DEF_PCI_AC_RET_pio(name, ret, at, al, aa) \ static ret wsp_pci_##name at \ { \ struct iowa_bus *bus; \ struct wsp_phb *phb; \ unsigned long flags; \ ret rval; \ bus = iowa_pio_find_bus(aa); \ WARN_ON(!bus); \ phb = bus->private; \ spin_lock_irqsave(&phb->lock, flags); \ wsp_pci_wait_io_idle(phb, aa); \ rval = __do_##name al; \ spin_unlock_irqrestore(&phb->lock, flags); \ return rval; \ } #define DEF_PCI_AC_NORET_pio(name, at, al, aa) \ static void wsp_pci_##name at \ { \ struct iowa_bus *bus; \ struct wsp_phb *phb; \ unsigned long flags; \ bus = iowa_pio_find_bus(aa); \ WARN_ON(!bus); \ phb = bus->private; \ spin_lock_irqsave(&phb->lock, flags); \ wsp_pci_wait_io_idle(phb, aa); \ __do_##name al; \ spin_unlock_irqrestore(&phb->lock, flags); \ } #define DEF_PCI_AC_RET_mem(name, ret, at, al, aa) #define DEF_PCI_AC_NORET_mem(name, at, al, aa) #define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \ DEF_PCI_AC_RET_##space(name, ret, at, al, aa) #define DEF_PCI_AC_NORET(name, at, al, space, aa) \ DEF_PCI_AC_NORET_##space(name, at, al, aa) \ #include <asm/io-defs.h> #undef DEF_PCI_AC_RET #undef DEF_PCI_AC_NORET static struct ppc_pci_io wsp_pci_iops = { .inb = wsp_pci_inb, .inw = wsp_pci_inw, .inl = wsp_pci_inl, .outb = wsp_pci_outb, .outw = wsp_pci_outw, .outl = wsp_pci_outl, .insb = wsp_pci_insb, .insw = wsp_pci_insw, .insl = wsp_pci_insl, .outsb = wsp_pci_outsb, .outsw = wsp_pci_outsw, .outsl = wsp_pci_outsl, }; static int __init wsp_setup_one_phb(struct device_node *np) { struct pci_controller *hose; struct wsp_phb *phb; pr_info("PCI: Setting up PCIe host bridge 0x%s\n", np->full_name); phb = zalloc_maybe_bootmem(sizeof(struct wsp_phb), GFP_KERNEL); if (!phb) return -ENOMEM; hose = pcibios_alloc_controller(np); if (!hose) { /* Can't really free the phb */ return -ENOMEM; } hose->private_data = phb; phb->hose = hose; INIT_LIST_HEAD(&phb->dma_tables); spin_lock_init(&phb->lock); /* XXX Use bus-range property ? */ hose->first_busno = 0; hose->last_busno = 0xff; /* We use cfg_data as the address for the whole bridge MMIO space */ hose->cfg_data = of_iomap(hose->dn, 0); pr_debug("PCIe registers mapped at 0x%p\n", hose->cfg_data); /* Get the ranges of the device-tree */ pci_process_bridge_OF_ranges(hose, np, 0); /* XXX Force re-assigning of everything for now */ pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC | PCI_ENABLE_PROC_DOMAINS); /* Calculate how the TCE space is divided */ phb->dma32_base = 0; phb->dma32_num_regions = NUM_DMA32_REGIONS; if (phb->dma32_num_regions > MAX_TABLE_TVT_COUNT) { pr_warning("IOMMU: Clamped to %d DMA32 regions\n", MAX_TABLE_TVT_COUNT); phb->dma32_num_regions = MAX_TABLE_TVT_COUNT; } phb->dma32_region_size = 0x80000000 / phb->dma32_num_regions; BUG_ON(!is_power_of_2(phb->dma32_region_size)); /* Setup config ops */ hose->ops = &wsp_pcie_pci_ops; /* Configure the HW */ wsp_pcie_configure_hw(hose); /* Instanciate IO workarounds */ iowa_register_bus(hose, &wsp_pci_iops, NULL, phb); #ifdef CONFIG_PCI_MSI wsp_setup_phb_msi(hose); #endif /* Add to global list */ list_add(&phb->all, &wsp_phbs); return 0; } void __init wsp_setup_pci(void) { struct device_node *np; int rc; /* Find host bridges */ for_each_compatible_node(np, "pciex", PCIE_COMPATIBLE) { rc = wsp_setup_one_phb(np); if (rc) pr_err("Failed to setup PCIe bridge %s, rc=%d\n", np->full_name, rc); } /* Establish device-tree linkage */ pci_devs_phb_init(); /* Set DMA ops to use TCEs */ if (iommu_is_off) { pr_info("PCI-E: Disabled TCEs, using direct DMA\n"); set_pci_dma_ops(&dma_direct_ops); } else { ppc_md.pci_dma_dev_setup = wsp_pci_dma_dev_setup; ppc_md.tce_build = tce_build_wsp; ppc_md.tce_free = tce_free_wsp; set_pci_dma_ops(&dma_iommu_ops); } } #define err_debug(fmt...) pr_debug(fmt) //#define err_debug(fmt...) static int __init wsp_pci_get_err_irq_no_dt(struct device_node *np) { const u32 *prop; int hw_irq; /* Ok, no interrupts property, let's try to find our child P2P */ np = of_get_next_child(np, NULL); if (np == NULL) return 0; /* Grab it's interrupt map */ prop = of_get_property(np, "interrupt-map", NULL); if (prop == NULL) return 0; /* Grab one of the interrupts in there, keep the low 4 bits */ hw_irq = prop[5] & 0xf; /* 0..4 for PHB 0 and 5..9 for PHB 1 */ if (hw_irq < 5) hw_irq = 4; else hw_irq = 9; hw_irq |= prop[5] & ~0xf; err_debug("PCI: Using 0x%x as error IRQ for %s\n", hw_irq, np->parent->full_name); return irq_create_mapping(NULL, hw_irq); } static const struct { u32 offset; const char *name; } wsp_pci_regs[] = { #define DREG(x) { PCIE_REG_##x, #x } #define DUTL(x) { PCIE_UTL_##x, "UTL_" #x } /* Architected registers except CONFIG_ and IODA * to avoid side effects */ DREG(DMA_CHAN_STATUS), DREG(CPU_LOADSTORE_STATUS), DREG(LOCK0), DREG(LOCK1), DREG(PHB_CONFIG), DREG(IO_BASE_ADDR), DREG(IO_BASE_MASK), DREG(IO_START_ADDR), DREG(M32A_BASE_ADDR), DREG(M32A_BASE_MASK), DREG(M32A_START_ADDR), DREG(M32B_BASE_ADDR), DREG(M32B_BASE_MASK), DREG(M32B_START_ADDR), DREG(M64_BASE_ADDR), DREG(M64_BASE_MASK), DREG(M64_START_ADDR), DREG(TCE_KILL), DREG(LOCK2), DREG(PHB_GEN_CAP), DREG(PHB_TCE_CAP), DREG(PHB_IRQ_CAP), DREG(PHB_EEH_CAP), DREG(PAPR_ERR_INJ_CONTROL), DREG(PAPR_ERR_INJ_ADDR), DREG(PAPR_ERR_INJ_MASK), /* UTL core regs */ DUTL(SYS_BUS_CONTROL), DUTL(STATUS), DUTL(SYS_BUS_AGENT_STATUS), DUTL(SYS_BUS_AGENT_ERR_SEV), DUTL(SYS_BUS_AGENT_IRQ_EN), DUTL(SYS_BUS_BURST_SZ_CONF), DUTL(REVISION_ID), DUTL(OUT_POST_HDR_BUF_ALLOC), DUTL(OUT_POST_DAT_BUF_ALLOC), DUTL(IN_POST_HDR_BUF_ALLOC), DUTL(IN_POST_DAT_BUF_ALLOC), DUTL(OUT_NP_BUF_ALLOC), DUTL(IN_NP_BUF_ALLOC), DUTL(PCIE_TAGS_ALLOC), DUTL(GBIF_READ_TAGS_ALLOC), DUTL(PCIE_PORT_CONTROL), DUTL(PCIE_PORT_STATUS), DUTL(PCIE_PORT_ERROR_SEV), DUTL(PCIE_PORT_IRQ_EN), DUTL(RC_STATUS), DUTL(RC_ERR_SEVERITY), DUTL(RC_IRQ_EN), DUTL(EP_STATUS), DUTL(EP_ERR_SEVERITY), DUTL(EP_ERR_IRQ_EN), DUTL(PCI_PM_CTRL1), DUTL(PCI_PM_CTRL2), /* PCIe stack regs */ DREG(SYSTEM_CONFIG1), DREG(SYSTEM_CONFIG2), DREG(EP_SYSTEM_CONFIG), DREG(EP_FLR), DREG(EP_BAR_CONFIG), DREG(LINK_CONFIG), DREG(PM_CONFIG), DREG(DLP_CONTROL), DREG(DLP_STATUS), DREG(ERR_REPORT_CONTROL), DREG(SLOT_CONTROL1), DREG(SLOT_CONTROL2), DREG(UTL_CONFIG), DREG(BUFFERS_CONFIG), DREG(ERROR_INJECT), DREG(SRIOV_CONFIG), DREG(PF0_SRIOV_STATUS), DREG(PF1_SRIOV_STATUS), DREG(PORT_NUMBER), DREG(POR_SYSTEM_CONFIG), /* Internal logic regs */ DREG(PHB_VERSION), DREG(RESET), DREG(PHB_CONTROL), DREG(PHB_TIMEOUT_CONTROL1), DREG(PHB_QUIESCE_DMA), DREG(PHB_DMA_READ_TAG_ACTV), DREG(PHB_TCE_READ_TAG_ACTV), /* FIR registers */ DREG(LEM_FIR_ACCUM), DREG(LEM_FIR_AND_MASK), DREG(LEM_FIR_OR_MASK), DREG(LEM_ACTION0), DREG(LEM_ACTION1), DREG(LEM_ERROR_MASK), DREG(LEM_ERROR_AND_MASK), DREG(LEM_ERROR_OR_MASK), /* Error traps registers */ DREG(PHB_ERR_STATUS), DREG(PHB_ERR_STATUS), DREG(PHB_ERR1_STATUS), DREG(PHB_ERR_INJECT), DREG(PHB_ERR_LEM_ENABLE), DREG(PHB_ERR_IRQ_ENABLE), DREG(PHB_ERR_FREEZE_ENABLE), DREG(PHB_ERR_SIDE_ENABLE), DREG(PHB_ERR_LOG_0), DREG(PHB_ERR_LOG_1), DREG(PHB_ERR_STATUS_MASK), DREG(PHB_ERR1_STATUS_MASK), DREG(MMIO_ERR_STATUS), DREG(MMIO_ERR1_STATUS), DREG(MMIO_ERR_INJECT), DREG(MMIO_ERR_LEM_ENABLE), DREG(MMIO_ERR_IRQ_ENABLE), DREG(MMIO_ERR_FREEZE_ENABLE), DREG(MMIO_ERR_SIDE_ENABLE), DREG(MMIO_ERR_LOG_0), DREG(MMIO_ERR_LOG_1), DREG(MMIO_ERR_STATUS_MASK), DREG(MMIO_ERR1_STATUS_MASK), DREG(DMA_ERR_STATUS), DREG(DMA_ERR1_STATUS), DREG(DMA_ERR_INJECT), DREG(DMA_ERR_LEM_ENABLE), DREG(DMA_ERR_IRQ_ENABLE), DREG(DMA_ERR_FREEZE_ENABLE), DREG(DMA_ERR_SIDE_ENABLE), DREG(DMA_ERR_LOG_0), DREG(DMA_ERR_LOG_1), DREG(DMA_ERR_STATUS_MASK), DREG(DMA_ERR1_STATUS_MASK), /* Debug and Trace registers */ DREG(PHB_DEBUG_CONTROL0), DREG(PHB_DEBUG_STATUS0), DREG(PHB_DEBUG_CONTROL1), DREG(PHB_DEBUG_STATUS1), DREG(PHB_DEBUG_CONTROL2), DREG(PHB_DEBUG_STATUS2), DREG(PHB_DEBUG_CONTROL3), DREG(PHB_DEBUG_STATUS3), DREG(PHB_DEBUG_CONTROL4), DREG(PHB_DEBUG_STATUS4), DREG(PHB_DEBUG_CONTROL5), DREG(PHB_DEBUG_STATUS5), /* Don't seem to exist ... DREG(PHB_DEBUG_CONTROL6), DREG(PHB_DEBUG_STATUS6), */ }; static int wsp_pci_regs_show(struct seq_file *m, void *private) { struct wsp_phb *phb = m->private; struct pci_controller *hose = phb->hose; int i; for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) { /* Skip write-only regs */ if (wsp_pci_regs[i].offset == 0xc08 || wsp_pci_regs[i].offset == 0xc10 || wsp_pci_regs[i].offset == 0xc38 || wsp_pci_regs[i].offset == 0xc40) continue; seq_printf(m, "0x%03x: 0x%016llx %s\n", wsp_pci_regs[i].offset, in_be64(hose->cfg_data + wsp_pci_regs[i].offset), wsp_pci_regs[i].name); } return 0; } static int wsp_pci_regs_open(struct inode *inode, struct file *file) { return single_open(file, wsp_pci_regs_show, inode->i_private); } static const struct file_operations wsp_pci_regs_fops = { .open = wsp_pci_regs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int wsp_pci_reg_set(void *data, u64 val) { out_be64((void __iomem *)data, val); return 0; } static int wsp_pci_reg_get(void *data, u64 *val) { *val = in_be64((void __iomem *)data); return 0; } DEFINE_SIMPLE_ATTRIBUTE(wsp_pci_reg_fops, wsp_pci_reg_get, wsp_pci_reg_set, "0x%llx\n"); static irqreturn_t wsp_pci_err_irq(int irq, void *dev_id) { struct wsp_phb *phb = dev_id; struct pci_controller *hose = phb->hose; irqreturn_t handled = IRQ_NONE; struct wsp_pcie_err_log_data ed; pr_err("PCI: Error interrupt on %s (PHB %d)\n", hose->dn->full_name, hose->global_number); again: memset(&ed, 0, sizeof(ed)); /* Read and clear UTL errors */ ed.utl_sys_err = in_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS); if (ed.utl_sys_err) out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS, ed.utl_sys_err); ed.utl_port_err = in_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS); if (ed.utl_port_err) out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS, ed.utl_port_err); ed.utl_rc_err = in_be64(hose->cfg_data + PCIE_UTL_RC_STATUS); if (ed.utl_rc_err) out_be64(hose->cfg_data + PCIE_UTL_RC_STATUS, ed.utl_rc_err); /* Read and clear main trap errors */ ed.phb_err = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS); if (ed.phb_err) { ed.phb_err1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS); ed.phb_log0 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_0); ed.phb_log1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_1); out_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS, 0); out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS, 0); } ed.mmio_err = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS); if (ed.mmio_err) { ed.mmio_err1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS); ed.mmio_log0 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_0); ed.mmio_log1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_1); out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS, 0); out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS, 0); } ed.dma_err = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS); if (ed.dma_err) { ed.dma_err1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS); ed.dma_log0 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_0); ed.dma_log1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_1); out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS, 0); out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS, 0); } /* Now print things out */ if (ed.phb_err) { pr_err(" PHB Error Status : 0x%016llx\n", ed.phb_err); pr_err(" PHB First Error Status: 0x%016llx\n", ed.phb_err1); pr_err(" PHB Error Log 0 : 0x%016llx\n", ed.phb_log0); pr_err(" PHB Error Log 1 : 0x%016llx\n", ed.phb_log1); } if (ed.mmio_err) { pr_err(" MMIO Error Status : 0x%016llx\n", ed.mmio_err); pr_err(" MMIO First Error Status: 0x%016llx\n", ed.mmio_err1); pr_err(" MMIO Error Log 0 : 0x%016llx\n", ed.mmio_log0); pr_err(" MMIO Error Log 1 : 0x%016llx\n", ed.mmio_log1); } if (ed.dma_err) { pr_err(" DMA Error Status : 0x%016llx\n", ed.dma_err); pr_err(" DMA First Error Status: 0x%016llx\n", ed.dma_err1); pr_err(" DMA Error Log 0 : 0x%016llx\n", ed.dma_log0); pr_err(" DMA Error Log 1 : 0x%016llx\n", ed.dma_log1); } if (ed.utl_sys_err) pr_err(" UTL Sys Error Status : 0x%016llx\n", ed.utl_sys_err); if (ed.utl_port_err) pr_err(" UTL Port Error Status : 0x%016llx\n", ed.utl_port_err); if (ed.utl_rc_err) pr_err(" UTL RC Error Status : 0x%016llx\n", ed.utl_rc_err); /* Interrupts are caused by the error traps. If we had any error there * we loop again in case the UTL buffered some new stuff between * going there and going to the traps */ if (ed.dma_err || ed.mmio_err || ed.phb_err) { handled = IRQ_HANDLED; goto again; } return handled; } static void __init wsp_setup_pci_err_reporting(struct wsp_phb *phb) { struct pci_controller *hose = phb->hose; int err_irq, i, rc; char fname[16]; /* Create a debugfs file for that PHB */ sprintf(fname, "phb%d", phb->hose->global_number); phb->ddir = debugfs_create_dir(fname, powerpc_debugfs_root); /* Some useful debug output */ if (phb->ddir) { struct dentry *d = debugfs_create_dir("regs", phb->ddir); char tmp[64]; for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) { sprintf(tmp, "%03x_%s", wsp_pci_regs[i].offset, wsp_pci_regs[i].name); debugfs_create_file(tmp, 0600, d, hose->cfg_data + wsp_pci_regs[i].offset, &wsp_pci_reg_fops); } debugfs_create_file("all_regs", 0600, phb->ddir, phb, &wsp_pci_regs_fops); } /* Find the IRQ number for that PHB */ err_irq = irq_of_parse_and_map(hose->dn, 0); if (err_irq == 0) /* XXX Error IRQ lacking from device-tree */ err_irq = wsp_pci_get_err_irq_no_dt(hose->dn); if (err_irq == 0) { pr_err("PCI: Failed to fetch error interrupt for %s\n", hose->dn->full_name); return; } /* Request it */ rc = request_irq(err_irq, wsp_pci_err_irq, 0, "wsp_pci error", phb); if (rc) { pr_err("PCI: Failed to request interrupt for %s\n", hose->dn->full_name); } /* Enable interrupts for all errors for now */ out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_IRQ_ENABLE, 0xffffffffffffffffull); out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_IRQ_ENABLE, 0xffffffffffffffffull); out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_IRQ_ENABLE, 0xffffffffffffffffull); } /* * This is called later to hookup with the error interrupt */ static int __init wsp_setup_pci_late(void) { struct wsp_phb *phb; list_for_each_entry(phb, &wsp_phbs, all) wsp_setup_pci_err_reporting(phb); return 0; } arch_initcall(wsp_setup_pci_late);
gpl-2.0
tommytarts/QuantumKernelM8-GPe
kernel/events/callchain.c
4593
4021
/* * Performance events callchain code, extracted from core.c: * * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> * * For licensing details see kernel-base/COPYING */ #include <linux/perf_event.h> #include <linux/slab.h> #include "internal.h" struct callchain_cpus_entries { struct rcu_head rcu_head; struct perf_callchain_entry *cpu_entries[0]; }; static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); static atomic_t nr_callchain_events; static DEFINE_MUTEX(callchain_mutex); static struct callchain_cpus_entries *callchain_cpus_entries; __weak void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { } __weak void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { } static void release_callchain_buffers_rcu(struct rcu_head *head) { struct callchain_cpus_entries *entries; int cpu; entries = container_of(head, struct callchain_cpus_entries, rcu_head); for_each_possible_cpu(cpu) kfree(entries->cpu_entries[cpu]); kfree(entries); } static void release_callchain_buffers(void) { struct callchain_cpus_entries *entries; entries = callchain_cpus_entries; rcu_assign_pointer(callchain_cpus_entries, NULL); call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); } static int alloc_callchain_buffers(void) { int cpu; int size; struct callchain_cpus_entries *entries; /* * We can't use the percpu allocation API for data that can be * accessed from NMI. Use a temporary manual per cpu allocation * until that gets sorted out. */ size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); entries = kzalloc(size, GFP_KERNEL); if (!entries) return -ENOMEM; size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS; for_each_possible_cpu(cpu) { entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); if (!entries->cpu_entries[cpu]) goto fail; } rcu_assign_pointer(callchain_cpus_entries, entries); return 0; fail: for_each_possible_cpu(cpu) kfree(entries->cpu_entries[cpu]); kfree(entries); return -ENOMEM; } int get_callchain_buffers(void) { int err = 0; int count; mutex_lock(&callchain_mutex); count = atomic_inc_return(&nr_callchain_events); if (WARN_ON_ONCE(count < 1)) { err = -EINVAL; goto exit; } if (count > 1) { /* If the allocation failed, give up */ if (!callchain_cpus_entries) err = -ENOMEM; goto exit; } err = alloc_callchain_buffers(); exit: mutex_unlock(&callchain_mutex); return err; } void put_callchain_buffers(void) { if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) { release_callchain_buffers(); mutex_unlock(&callchain_mutex); } } static struct perf_callchain_entry *get_callchain_entry(int *rctx) { int cpu; struct callchain_cpus_entries *entries; *rctx = get_recursion_context(__get_cpu_var(callchain_recursion)); if (*rctx == -1) return NULL; entries = rcu_dereference(callchain_cpus_entries); if (!entries) return NULL; cpu = smp_processor_id(); return &entries->cpu_entries[cpu][*rctx]; } static void put_callchain_entry(int rctx) { put_recursion_context(__get_cpu_var(callchain_recursion), rctx); } struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) { int rctx; struct perf_callchain_entry *entry; entry = get_callchain_entry(&rctx); if (rctx == -1) return NULL; if (!entry) goto exit_put; entry->nr = 0; if (!user_mode(regs)) { perf_callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_kernel(entry, regs); if (current->mm) regs = task_pt_regs(current); else regs = NULL; } if (regs) { perf_callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_user(entry, regs); } exit_put: put_callchain_entry(rctx); return entry; }
gpl-2.0
philozheng/kernel-msm
drivers/regulator/88pm8607.c
4849
15379
/* * Regulators driver for Marvell 88PM8607 * * Copyright (C) 2009 Marvell International Ltd. * Haojian Zhuang <haojian.zhuang@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/mfd/88pm860x.h> #include <linux/module.h> struct pm8607_regulator_info { struct regulator_desc desc; struct pm860x_chip *chip; struct regulator_dev *regulator; struct i2c_client *i2c; unsigned int *vol_table; unsigned int *vol_suspend; int vol_reg; int vol_shift; int vol_nbits; int update_reg; int update_bit; int enable_reg; int enable_bit; int slope_double; }; static const unsigned int BUCK1_table[] = { 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000, 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000, 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000, 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000, 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000, }; static const unsigned int BUCK1_suspend_table[] = { 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000, 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000, 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000, 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000, 1500000, 1500000, 1500000, }; static const unsigned int BUCK2_table[] = { 0, 50000, 100000, 150000, 200000, 250000, 300000, 350000, 400000, 450000, 500000, 550000, 600000, 650000, 700000, 750000, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 1950000, 2000000, 2050000, 2100000, 2150000, 2200000, 2250000, 2300000, 2350000, 2400000, 2450000, 2500000, 2550000, 2600000, 2650000, 2700000, 2750000, 2800000, 2850000, 2900000, 2950000, 3000000, 3000000, 3000000, 3000000, }; static const unsigned int BUCK2_suspend_table[] = { 0, 50000, 100000, 150000, 200000, 250000, 300000, 350000, 400000, 450000, 500000, 550000, 600000, 650000, 700000, 750000, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 1950000, 2000000, 2050000, 2100000, 2150000, 2200000, 2250000, 2300000, 2350000, 2400000, 2450000, 2500000, 2550000, 2600000, 2650000, 2700000, 2750000, 2800000, 2850000, 2900000, 2950000, 3000000, 3000000, 3000000, 3000000, }; static const unsigned int BUCK3_table[] = { 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000, 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000, 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000, 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000, 1500000, 1500000, 1500000, }; static const unsigned int BUCK3_suspend_table[] = { 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000, 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000, 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000, 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000, 1500000, 1500000, 1500000, }; static const unsigned int LDO1_table[] = { 1800000, 1200000, 2800000, 0, }; static const unsigned int LDO1_suspend_table[] = { 1800000, 1200000, 0, 0, }; static const unsigned int LDO2_table[] = { 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000, }; static const unsigned int LDO2_suspend_table[] = { 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000, }; static const unsigned int LDO3_table[] = { 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000, }; static const unsigned int LDO3_suspend_table[] = { 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000, }; static const unsigned int LDO4_table[] = { 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2900000, 3300000, }; static const unsigned int LDO4_suspend_table[] = { 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2900000, 2900000, }; static const unsigned int LDO5_table[] = { 2900000, 3000000, 3100000, 3300000, }; static const unsigned int LDO5_suspend_table[] = { 2900000, 0, 0, 0, }; static const unsigned int LDO6_table[] = { 1800000, 1850000, 2600000, 2650000, 2700000, 2750000, 2800000, 3300000, }; static const unsigned int LDO6_suspend_table[] = { 1800000, 1850000, 2600000, 2650000, 2700000, 2750000, 2800000, 2900000, }; static const unsigned int LDO7_table[] = { 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000, }; static const unsigned int LDO7_suspend_table[] = { 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000, }; static const unsigned int LDO8_table[] = { 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000, }; static const unsigned int LDO8_suspend_table[] = { 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000, }; static const unsigned int LDO9_table[] = { 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000, }; static const unsigned int LDO9_suspend_table[] = { 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000, }; static const unsigned int LDO10_table[] = { 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, }; static const unsigned int LDO10_suspend_table[] = { 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, }; static const unsigned int LDO12_table[] = { 1800000, 1900000, 2700000, 2800000, 2900000, 3000000, 3100000, 3300000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, }; static const unsigned int LDO12_suspend_table[] = { 1800000, 1900000, 2700000, 2800000, 2900000, 2900000, 2900000, 2900000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, }; static const unsigned int LDO13_table[] = { 1200000, 1300000, 1800000, 2000000, 2500000, 2800000, 3000000, 0, }; static const unsigned int LDO13_suspend_table[] = { 0, }; static const unsigned int LDO14_table[] = { 1800000, 1850000, 2700000, 2750000, 2800000, 2850000, 2900000, 3300000, }; static const unsigned int LDO14_suspend_table[] = { 1800000, 1850000, 2700000, 2750000, 2800000, 2850000, 2900000, 2900000, }; static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index) { struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); int ret = -EINVAL; if (info->vol_table && (index < (1 << info->vol_nbits))) { ret = info->vol_table[index]; if (info->slope_double) ret <<= 1; } return ret; } static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) { struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); int i, ret = -ENOENT; if (info->slope_double) { min_uV = min_uV >> 1; max_uV = max_uV >> 1; } if (info->vol_table) { for (i = 0; i < (1 << info->vol_nbits); i++) { if (!info->vol_table[i]) break; if ((min_uV <= info->vol_table[i]) && (max_uV >= info->vol_table[i])) { ret = i; break; } } } if (ret < 0) pr_err("invalid voltage range (%d %d) uV\n", min_uV, max_uV); return ret; } static int pm8607_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned *selector) { struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); uint8_t val, mask; int ret; if (min_uV > max_uV) { pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV); return -EINVAL; } ret = choose_voltage(rdev, min_uV, max_uV); if (ret < 0) return -EINVAL; *selector = ret; val = (uint8_t)(ret << info->vol_shift); mask = ((1 << info->vol_nbits) - 1) << info->vol_shift; ret = pm860x_set_bits(info->i2c, info->vol_reg, mask, val); if (ret) return ret; switch (info->desc.id) { case PM8607_ID_BUCK1: case PM8607_ID_BUCK3: ret = pm860x_set_bits(info->i2c, info->update_reg, 1 << info->update_bit, 1 << info->update_bit); break; } return ret; } static int pm8607_get_voltage(struct regulator_dev *rdev) { struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); uint8_t val, mask; int ret; ret = pm860x_reg_read(info->i2c, info->vol_reg); if (ret < 0) return ret; mask = ((1 << info->vol_nbits) - 1) << info->vol_shift; val = ((unsigned char)ret & mask) >> info->vol_shift; return pm8607_list_voltage(rdev, val); } static int pm8607_enable(struct regulator_dev *rdev) { struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); return pm860x_set_bits(info->i2c, info->enable_reg, 1 << info->enable_bit, 1 << info->enable_bit); } static int pm8607_disable(struct regulator_dev *rdev) { struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); return pm860x_set_bits(info->i2c, info->enable_reg, 1 << info->enable_bit, 0); } static int pm8607_is_enabled(struct regulator_dev *rdev) { struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); int ret; ret = pm860x_reg_read(info->i2c, info->enable_reg); if (ret < 0) return ret; return !!((unsigned char)ret & (1 << info->enable_bit)); } static struct regulator_ops pm8607_regulator_ops = { .set_voltage = pm8607_set_voltage, .get_voltage = pm8607_get_voltage, .enable = pm8607_enable, .disable = pm8607_disable, .is_enabled = pm8607_is_enabled, }; #define PM8607_DVC(vreg, nbits, ureg, ubit, ereg, ebit) \ { \ .desc = { \ .name = #vreg, \ .ops = &pm8607_regulator_ops, \ .type = REGULATOR_VOLTAGE, \ .id = PM8607_ID_##vreg, \ .owner = THIS_MODULE, \ }, \ .vol_reg = PM8607_##vreg, \ .vol_shift = (0), \ .vol_nbits = (nbits), \ .update_reg = PM8607_##ureg, \ .update_bit = (ubit), \ .enable_reg = PM8607_##ereg, \ .enable_bit = (ebit), \ .slope_double = (0), \ .vol_table = (unsigned int *)&vreg##_table, \ .vol_suspend = (unsigned int *)&vreg##_suspend_table, \ } #define PM8607_LDO(_id, vreg, shift, nbits, ereg, ebit) \ { \ .desc = { \ .name = "LDO" #_id, \ .ops = &pm8607_regulator_ops, \ .type = REGULATOR_VOLTAGE, \ .id = PM8607_ID_LDO##_id, \ .owner = THIS_MODULE, \ }, \ .vol_reg = PM8607_##vreg, \ .vol_shift = (shift), \ .vol_nbits = (nbits), \ .enable_reg = PM8607_##ereg, \ .enable_bit = (ebit), \ .slope_double = (0), \ .vol_table = (unsigned int *)&LDO##_id##_table, \ .vol_suspend = (unsigned int *)&LDO##_id##_suspend_table, \ } static struct pm8607_regulator_info pm8607_regulator_info[] = { PM8607_DVC(BUCK1, 6, GO, 0, SUPPLIES_EN11, 0), PM8607_DVC(BUCK2, 6, GO, 1, SUPPLIES_EN11, 1), PM8607_DVC(BUCK3, 6, GO, 2, SUPPLIES_EN11, 2), PM8607_LDO( 1, LDO1, 0, 2, SUPPLIES_EN11, 3), PM8607_LDO( 2, LDO2, 0, 3, SUPPLIES_EN11, 4), PM8607_LDO( 3, LDO3, 0, 3, SUPPLIES_EN11, 5), PM8607_LDO( 4, LDO4, 0, 3, SUPPLIES_EN11, 6), PM8607_LDO( 5, LDO5, 0, 2, SUPPLIES_EN11, 7), PM8607_LDO( 6, LDO6, 0, 3, SUPPLIES_EN12, 0), PM8607_LDO( 7, LDO7, 0, 3, SUPPLIES_EN12, 1), PM8607_LDO( 8, LDO8, 0, 3, SUPPLIES_EN12, 2), PM8607_LDO( 9, LDO9, 0, 3, SUPPLIES_EN12, 3), PM8607_LDO(10, LDO10, 0, 4, SUPPLIES_EN12, 4), PM8607_LDO(12, LDO12, 0, 4, SUPPLIES_EN12, 5), PM8607_LDO(13, VIBRATOR_SET, 1, 3, VIBRATOR_SET, 0), PM8607_LDO(14, LDO14, 0, 3, SUPPLIES_EN12, 6), }; static int __devinit pm8607_regulator_probe(struct platform_device *pdev) { struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); struct pm8607_regulator_info *info = NULL; struct regulator_init_data *pdata = pdev->dev.platform_data; struct resource *res; int i; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (res == NULL) { dev_err(&pdev->dev, "No I/O resource!\n"); return -EINVAL; } for (i = 0; i < ARRAY_SIZE(pm8607_regulator_info); i++) { info = &pm8607_regulator_info[i]; if (info->desc.id == res->start) break; } if (i == ARRAY_SIZE(pm8607_regulator_info)) { dev_err(&pdev->dev, "Failed to find regulator %llu\n", (unsigned long long)res->start); return -EINVAL; } info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion; info->chip = chip; /* check DVC ramp slope double */ if ((i == PM8607_ID_BUCK3) && info->chip->buck3_double) info->slope_double = 1; /* replace driver_data with info */ info->regulator = regulator_register(&info->desc, &pdev->dev, pdata, info, NULL); if (IS_ERR(info->regulator)) { dev_err(&pdev->dev, "failed to register regulator %s\n", info->desc.name); return PTR_ERR(info->regulator); } platform_set_drvdata(pdev, info); return 0; } static int __devexit pm8607_regulator_remove(struct platform_device *pdev) { struct pm8607_regulator_info *info = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); regulator_unregister(info->regulator); return 0; } static struct platform_driver pm8607_regulator_driver = { .driver = { .name = "88pm860x-regulator", .owner = THIS_MODULE, }, .probe = pm8607_regulator_probe, .remove = __devexit_p(pm8607_regulator_remove), }; static int __init pm8607_regulator_init(void) { return platform_driver_register(&pm8607_regulator_driver); } subsys_initcall(pm8607_regulator_init); static void __exit pm8607_regulator_exit(void) { platform_driver_unregister(&pm8607_regulator_driver); } module_exit(pm8607_regulator_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>"); MODULE_DESCRIPTION("Regulator Driver for Marvell 88PM8607 PMIC"); MODULE_ALIAS("platform:88pm8607-regulator");
gpl-2.0
n3ocort3x/android_kernel_htc_m7
drivers/acpi/bgrt.c
4849
3920
/* * Copyright 2012 Red Hat, Inc <mjg@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/sysfs.h> #include <acpi/acpi.h> #include <acpi/acpi_bus.h> static struct acpi_table_bgrt *bgrt_tab; static struct kobject *bgrt_kobj; struct bmp_header { u16 id; u32 size; } __attribute ((packed)); static struct bmp_header bmp_header; static ssize_t show_version(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->version); } static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); static ssize_t show_status(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->status); } static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_type); } static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); static ssize_t show_xoffset(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_x); } static DEVICE_ATTR(xoffset, S_IRUGO, show_xoffset, NULL); static ssize_t show_yoffset(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_y); } static DEVICE_ATTR(yoffset, S_IRUGO, show_yoffset, NULL); static ssize_t show_image(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { int size = attr->size; void __iomem *image = attr->private; if (off >= size) { count = 0; } else { if (off + count > size) count = size - off; memcpy_fromio(buf, image+off, count); } return count; } static struct bin_attribute image_attr = { .attr = { .name = "image", .mode = S_IRUGO, }, .read = show_image, }; static struct attribute *bgrt_attributes[] = { &dev_attr_version.attr, &dev_attr_status.attr, &dev_attr_type.attr, &dev_attr_xoffset.attr, &dev_attr_yoffset.attr, NULL, }; static struct attribute_group bgrt_attribute_group = { .attrs = bgrt_attributes, }; static int __init bgrt_init(void) { acpi_status status; int ret; void __iomem *bgrt; if (acpi_disabled) return -ENODEV; status = acpi_get_table("BGRT", 0, (struct acpi_table_header **)&bgrt_tab); if (ACPI_FAILURE(status)) return -ENODEV; sysfs_bin_attr_init(&image_attr); bgrt = ioremap(bgrt_tab->image_address, sizeof(struct bmp_header)); if (!bgrt) { ret = -EINVAL; goto out_err; } memcpy_fromio(&bmp_header, bgrt, sizeof(bmp_header)); image_attr.size = bmp_header.size; iounmap(bgrt); image_attr.private = ioremap(bgrt_tab->image_address, image_attr.size); if (!image_attr.private) { ret = -EINVAL; goto out_err; } bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj); if (!bgrt_kobj) { ret = -EINVAL; goto out_iounmap; } ret = sysfs_create_group(bgrt_kobj, &bgrt_attribute_group); if (ret) goto out_kobject; ret = sysfs_create_bin_file(bgrt_kobj, &image_attr); if (ret) goto out_group; return 0; out_group: sysfs_remove_group(bgrt_kobj, &bgrt_attribute_group); out_kobject: kobject_put(bgrt_kobj); out_iounmap: iounmap(image_attr.private); out_err: return ret; } static void __exit bgrt_exit(void) { iounmap(image_attr.private); sysfs_remove_group(bgrt_kobj, &bgrt_attribute_group); sysfs_remove_bin_file(bgrt_kobj, &image_attr); } module_init(bgrt_init); module_exit(bgrt_exit); MODULE_AUTHOR("Matthew Garrett"); MODULE_DESCRIPTION("BGRT boot graphic support"); MODULE_LICENSE("GPL");
gpl-2.0
kbc-developers/android_kernel_samsung_hlte
net/ceph/debugfs.c
6641
6656
#include <linux/ceph/ceph_debug.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/ceph/libceph.h> #include <linux/ceph/mon_client.h> #include <linux/ceph/auth.h> #include <linux/ceph/debugfs.h> #ifdef CONFIG_DEBUG_FS /* * Implement /sys/kernel/debug/ceph fun * * /sys/kernel/debug/ceph/client* - an instance of the ceph client * .../osdmap - current osdmap * .../monmap - current monmap * .../osdc - active osd requests * .../monc - mon client state * .../dentry_lru - dump contents of dentry lru * .../caps - expose cap (reservation) stats * .../bdi - symlink to ../../bdi/something */ static struct dentry *ceph_debugfs_dir; static int monmap_show(struct seq_file *s, void *p) { int i; struct ceph_client *client = s->private; if (client->monc.monmap == NULL) return 0; seq_printf(s, "epoch %d\n", client->monc.monmap->epoch); for (i = 0; i < client->monc.monmap->num_mon; i++) { struct ceph_entity_inst *inst = &client->monc.monmap->mon_inst[i]; seq_printf(s, "\t%s%lld\t%s\n", ENTITY_NAME(inst->name), ceph_pr_addr(&inst->addr.in_addr)); } return 0; } static int osdmap_show(struct seq_file *s, void *p) { int i; struct ceph_client *client = s->private; struct rb_node *n; if (client->osdc.osdmap == NULL) return 0; seq_printf(s, "epoch %d\n", client->osdc.osdmap->epoch); seq_printf(s, "flags%s%s\n", (client->osdc.osdmap->flags & CEPH_OSDMAP_NEARFULL) ? " NEARFULL" : "", (client->osdc.osdmap->flags & CEPH_OSDMAP_FULL) ? " FULL" : ""); for (n = rb_first(&client->osdc.osdmap->pg_pools); n; n = rb_next(n)) { struct ceph_pg_pool_info *pool = rb_entry(n, struct ceph_pg_pool_info, node); seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n", pool->id, pool->v.pg_num, pool->pg_num_mask, pool->v.lpg_num, pool->lpg_num_mask); } for (i = 0; i < client->osdc.osdmap->max_osd; i++) { struct ceph_entity_addr *addr = &client->osdc.osdmap->osd_addr[i]; int state = client->osdc.osdmap->osd_state[i]; char sb[64]; seq_printf(s, "\tosd%d\t%s\t%3d%%\t(%s)\n", i, ceph_pr_addr(&addr->in_addr), ((client->osdc.osdmap->osd_weight[i]*100) >> 16), ceph_osdmap_state_str(sb, sizeof(sb), state)); } return 0; } static int monc_show(struct seq_file *s, void *p) { struct ceph_client *client = s->private; struct ceph_mon_generic_request *req; struct ceph_mon_client *monc = &client->monc; struct rb_node *rp; mutex_lock(&monc->mutex); if (monc->have_mdsmap) seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap); if (monc->have_osdmap) seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap); if (monc->want_next_osdmap) seq_printf(s, "want next osdmap\n"); for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) { __u16 op; req = rb_entry(rp, struct ceph_mon_generic_request, node); op = le16_to_cpu(req->request->hdr.type); if (op == CEPH_MSG_STATFS) seq_printf(s, "%lld statfs\n", req->tid); else seq_printf(s, "%lld unknown\n", req->tid); } mutex_unlock(&monc->mutex); return 0; } static int osdc_show(struct seq_file *s, void *pp) { struct ceph_client *client = s->private; struct ceph_osd_client *osdc = &client->osdc; struct rb_node *p; mutex_lock(&osdc->request_mutex); for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { struct ceph_osd_request *req; struct ceph_osd_request_head *head; struct ceph_osd_op *op; int num_ops; int opcode, olen; int i; req = rb_entry(p, struct ceph_osd_request, r_node); seq_printf(s, "%lld\tosd%d\t%d.%x\t", req->r_tid, req->r_osd ? req->r_osd->o_osd : -1, le32_to_cpu(req->r_pgid.pool), le16_to_cpu(req->r_pgid.ps)); head = req->r_request->front.iov_base; op = (void *)(head + 1); num_ops = le16_to_cpu(head->num_ops); olen = le32_to_cpu(head->object_len); seq_printf(s, "%.*s", olen, (const char *)(head->ops + num_ops)); if (req->r_reassert_version.epoch) seq_printf(s, "\t%u'%llu", (unsigned)le32_to_cpu(req->r_reassert_version.epoch), le64_to_cpu(req->r_reassert_version.version)); else seq_printf(s, "\t"); for (i = 0; i < num_ops; i++) { opcode = le16_to_cpu(op->op); seq_printf(s, "\t%s", ceph_osd_op_name(opcode)); op++; } seq_printf(s, "\n"); } mutex_unlock(&osdc->request_mutex); return 0; } CEPH_DEFINE_SHOW_FUNC(monmap_show) CEPH_DEFINE_SHOW_FUNC(osdmap_show) CEPH_DEFINE_SHOW_FUNC(monc_show) CEPH_DEFINE_SHOW_FUNC(osdc_show) int ceph_debugfs_init(void) { ceph_debugfs_dir = debugfs_create_dir("ceph", NULL); if (!ceph_debugfs_dir) return -ENOMEM; return 0; } void ceph_debugfs_cleanup(void) { debugfs_remove(ceph_debugfs_dir); } int ceph_debugfs_client_init(struct ceph_client *client) { int ret = -ENOMEM; char name[80]; snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid, client->monc.auth->global_id); client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir); if (!client->debugfs_dir) goto out; client->monc.debugfs_file = debugfs_create_file("monc", 0600, client->debugfs_dir, client, &monc_show_fops); if (!client->monc.debugfs_file) goto out; client->osdc.debugfs_file = debugfs_create_file("osdc", 0600, client->debugfs_dir, client, &osdc_show_fops); if (!client->osdc.debugfs_file) goto out; client->debugfs_monmap = debugfs_create_file("monmap", 0600, client->debugfs_dir, client, &monmap_show_fops); if (!client->debugfs_monmap) goto out; client->debugfs_osdmap = debugfs_create_file("osdmap", 0600, client->debugfs_dir, client, &osdmap_show_fops); if (!client->debugfs_osdmap) goto out; return 0; out: ceph_debugfs_client_cleanup(client); return ret; } void ceph_debugfs_client_cleanup(struct ceph_client *client) { debugfs_remove(client->debugfs_osdmap); debugfs_remove(client->debugfs_monmap); debugfs_remove(client->osdc.debugfs_file); debugfs_remove(client->monc.debugfs_file); debugfs_remove(client->debugfs_dir); } #else /* CONFIG_DEBUG_FS */ int ceph_debugfs_init(void) { return 0; } void ceph_debugfs_cleanup(void) { } int ceph_debugfs_client_init(struct ceph_client *client) { return 0; } void ceph_debugfs_client_cleanup(struct ceph_client *client) { } #endif /* CONFIG_DEBUG_FS */ EXPORT_SYMBOL(ceph_debugfs_init); EXPORT_SYMBOL(ceph_debugfs_cleanup);
gpl-2.0
vitek999/android_kernel_oukitel_orange
security/apparmor/match.c
7153
10875
/* * AppArmor security module * * This file contains AppArmor dfa based regular expression matching engine * * Copyright (C) 1998-2008 Novell/SUSE * Copyright 2009-2010 Canonical Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/err.h> #include <linux/kref.h> #include "include/apparmor.h" #include "include/match.h" /** * unpack_table - unpack a dfa table (one of accept, default, base, next check) * @blob: data to unpack (NOT NULL) * @bsize: size of blob * * Returns: pointer to table else NULL on failure * * NOTE: must be freed by kvfree (not kmalloc) */ static struct table_header *unpack_table(char *blob, size_t bsize) { struct table_header *table = NULL; struct table_header th; size_t tsize; if (bsize < sizeof(struct table_header)) goto out; /* loaded td_id's start at 1, subtract 1 now to avoid doing * it every time we use td_id as an index */ th.td_id = be16_to_cpu(*(u16 *) (blob)) - 1; th.td_flags = be16_to_cpu(*(u16 *) (blob + 2)); th.td_lolen = be32_to_cpu(*(u32 *) (blob + 8)); blob += sizeof(struct table_header); if (!(th.td_flags == YYTD_DATA16 || th.td_flags == YYTD_DATA32 || th.td_flags == YYTD_DATA8)) goto out; tsize = table_size(th.td_lolen, th.td_flags); if (bsize < tsize) goto out; table = kvmalloc(tsize); if (table) { *table = th; if (th.td_flags == YYTD_DATA8) UNPACK_ARRAY(table->td_data, blob, th.td_lolen, u8, byte_to_byte); else if (th.td_flags == YYTD_DATA16) UNPACK_ARRAY(table->td_data, blob, th.td_lolen, u16, be16_to_cpu); else if (th.td_flags == YYTD_DATA32) UNPACK_ARRAY(table->td_data, blob, th.td_lolen, u32, be32_to_cpu); else goto fail; } out: /* if table was vmalloced make sure the page tables are synced * before it is used, as it goes live to all cpus. */ if (is_vmalloc_addr(table)) vm_unmap_aliases(); return table; fail: kvfree(table); return NULL; } /** * verify_dfa - verify that transitions and states in the tables are in bounds. * @dfa: dfa to test (NOT NULL) * @flags: flags controlling what type of accept table are acceptable * * Assumes dfa has gone through the first pass verification done by unpacking * NOTE: this does not valid accept table values * * Returns: %0 else error code on failure to verify */ static int verify_dfa(struct aa_dfa *dfa, int flags) { size_t i, state_count, trans_count; int error = -EPROTO; /* check that required tables exist */ if (!(dfa->tables[YYTD_ID_DEF] && dfa->tables[YYTD_ID_BASE] && dfa->tables[YYTD_ID_NXT] && dfa->tables[YYTD_ID_CHK])) goto out; /* accept.size == default.size == base.size */ state_count = dfa->tables[YYTD_ID_BASE]->td_lolen; if (ACCEPT1_FLAGS(flags)) { if (!dfa->tables[YYTD_ID_ACCEPT]) goto out; if (state_count != dfa->tables[YYTD_ID_ACCEPT]->td_lolen) goto out; } if (ACCEPT2_FLAGS(flags)) { if (!dfa->tables[YYTD_ID_ACCEPT2]) goto out; if (state_count != dfa->tables[YYTD_ID_ACCEPT2]->td_lolen) goto out; } if (state_count != dfa->tables[YYTD_ID_DEF]->td_lolen) goto out; /* next.size == chk.size */ trans_count = dfa->tables[YYTD_ID_NXT]->td_lolen; if (trans_count != dfa->tables[YYTD_ID_CHK]->td_lolen) goto out; /* if equivalence classes then its table size must be 256 */ if (dfa->tables[YYTD_ID_EC] && dfa->tables[YYTD_ID_EC]->td_lolen != 256) goto out; if (flags & DFA_FLAG_VERIFY_STATES) { for (i = 0; i < state_count; i++) { if (DEFAULT_TABLE(dfa)[i] >= state_count) goto out; /* TODO: do check that DEF state recursion terminates */ if (BASE_TABLE(dfa)[i] + 255 >= trans_count) { printk(KERN_ERR "AppArmor DFA next/check upper " "bounds error\n"); goto out; } } for (i = 0; i < trans_count; i++) { if (NEXT_TABLE(dfa)[i] >= state_count) goto out; if (CHECK_TABLE(dfa)[i] >= state_count) goto out; } } error = 0; out: return error; } /** * dfa_free - free a dfa allocated by aa_dfa_unpack * @dfa: the dfa to free (MAYBE NULL) * * Requires: reference count to dfa == 0 */ static void dfa_free(struct aa_dfa *dfa) { if (dfa) { int i; for (i = 0; i < ARRAY_SIZE(dfa->tables); i++) { kvfree(dfa->tables[i]); dfa->tables[i] = NULL; } kfree(dfa); } } /** * aa_dfa_free_kref - free aa_dfa by kref (called by aa_put_dfa) * @kr: kref callback for freeing of a dfa (NOT NULL) */ void aa_dfa_free_kref(struct kref *kref) { struct aa_dfa *dfa = container_of(kref, struct aa_dfa, count); dfa_free(dfa); } /** * aa_dfa_unpack - unpack the binary tables of a serialized dfa * @blob: aligned serialized stream of data to unpack (NOT NULL) * @size: size of data to unpack * @flags: flags controlling what type of accept tables are acceptable * * Unpack a dfa that has been serialized. To find information on the dfa * format look in Documentation/security/apparmor.txt * Assumes the dfa @blob stream has been aligned on a 8 byte boundary * * Returns: an unpacked dfa ready for matching or ERR_PTR on failure */ struct aa_dfa *aa_dfa_unpack(void *blob, size_t size, int flags) { int hsize; int error = -ENOMEM; char *data = blob; struct table_header *table = NULL; struct aa_dfa *dfa = kzalloc(sizeof(struct aa_dfa), GFP_KERNEL); if (!dfa) goto fail; kref_init(&dfa->count); error = -EPROTO; /* get dfa table set header */ if (size < sizeof(struct table_set_header)) goto fail; if (ntohl(*(u32 *) data) != YYTH_MAGIC) goto fail; hsize = ntohl(*(u32 *) (data + 4)); if (size < hsize) goto fail; dfa->flags = ntohs(*(u16 *) (data + 12)); data += hsize; size -= hsize; while (size > 0) { table = unpack_table(data, size); if (!table) goto fail; switch (table->td_id) { case YYTD_ID_ACCEPT: if (!(table->td_flags & ACCEPT1_FLAGS(flags))) goto fail; break; case YYTD_ID_ACCEPT2: if (!(table->td_flags & ACCEPT2_FLAGS(flags))) goto fail; break; case YYTD_ID_BASE: if (table->td_flags != YYTD_DATA32) goto fail; break; case YYTD_ID_DEF: case YYTD_ID_NXT: case YYTD_ID_CHK: if (table->td_flags != YYTD_DATA16) goto fail; break; case YYTD_ID_EC: if (table->td_flags != YYTD_DATA8) goto fail; break; default: goto fail; } /* check for duplicate table entry */ if (dfa->tables[table->td_id]) goto fail; dfa->tables[table->td_id] = table; data += table_size(table->td_lolen, table->td_flags); size -= table_size(table->td_lolen, table->td_flags); table = NULL; } error = verify_dfa(dfa, flags); if (error) goto fail; return dfa; fail: kvfree(table); dfa_free(dfa); return ERR_PTR(error); } /** * aa_dfa_match_len - traverse @dfa to find state @str stops at * @dfa: the dfa to match @str against (NOT NULL) * @start: the state of the dfa to start matching in * @str: the string of bytes to match against the dfa (NOT NULL) * @len: length of the string of bytes to match * * aa_dfa_match_len will match @str against the dfa and return the state it * finished matching in. The final state can be used to look up the accepting * label, or as the start state of a continuing match. * * This function will happily match again the 0 byte and only finishes * when @len input is consumed. * * Returns: final state reached after input is consumed */ unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start, const char *str, int len) { u16 *def = DEFAULT_TABLE(dfa); u32 *base = BASE_TABLE(dfa); u16 *next = NEXT_TABLE(dfa); u16 *check = CHECK_TABLE(dfa); unsigned int state = start, pos; if (state == 0) return 0; /* current state is <state>, matching character *str */ if (dfa->tables[YYTD_ID_EC]) { /* Equivalence class table defined */ u8 *equiv = EQUIV_TABLE(dfa); /* default is direct to next state */ for (; len; len--) { pos = base[state] + equiv[(u8) *str++]; if (check[pos] == state) state = next[pos]; else state = def[state]; } } else { /* default is direct to next state */ for (; len; len--) { pos = base[state] + (u8) *str++; if (check[pos] == state) state = next[pos]; else state = def[state]; } } return state; } /** * aa_dfa_match - traverse @dfa to find state @str stops at * @dfa: the dfa to match @str against (NOT NULL) * @start: the state of the dfa to start matching in * @str: the null terminated string of bytes to match against the dfa (NOT NULL) * * aa_dfa_match will match @str against the dfa and return the state it * finished matching in. The final state can be used to look up the accepting * label, or as the start state of a continuing match. * * Returns: final state reached after input is consumed */ unsigned int aa_dfa_match(struct aa_dfa *dfa, unsigned int start, const char *str) { u16 *def = DEFAULT_TABLE(dfa); u32 *base = BASE_TABLE(dfa); u16 *next = NEXT_TABLE(dfa); u16 *check = CHECK_TABLE(dfa); unsigned int state = start, pos; if (state == 0) return 0; /* current state is <state>, matching character *str */ if (dfa->tables[YYTD_ID_EC]) { /* Equivalence class table defined */ u8 *equiv = EQUIV_TABLE(dfa); /* default is direct to next state */ while (*str) { pos = base[state] + equiv[(u8) *str++]; if (check[pos] == state) state = next[pos]; else state = def[state]; } } else { /* default is direct to next state */ while (*str) { pos = base[state] + (u8) *str++; if (check[pos] == state) state = next[pos]; else state = def[state]; } } return state; } /** * aa_dfa_next - step one character to the next state in the dfa * @dfa: the dfa to tranverse (NOT NULL) * @state: the state to start in * @c: the input character to transition on * * aa_dfa_match will step through the dfa by one input character @c * * Returns: state reach after input @c */ unsigned int aa_dfa_next(struct aa_dfa *dfa, unsigned int state, const char c) { u16 *def = DEFAULT_TABLE(dfa); u32 *base = BASE_TABLE(dfa); u16 *next = NEXT_TABLE(dfa); u16 *check = CHECK_TABLE(dfa); unsigned int pos; /* current state is <state>, matching character *str */ if (dfa->tables[YYTD_ID_EC]) { /* Equivalence class table defined */ u8 *equiv = EQUIV_TABLE(dfa); /* default is direct to next state */ pos = base[state] + equiv[(u8) c]; if (check[pos] == state) state = next[pos]; else state = def[state]; } else { /* default is direct to next state */ pos = base[state] + (u8) c; if (check[pos] == state) state = next[pos]; else state = def[state]; } return state; }
gpl-2.0
charles1018/kernel_msm
arch/sparc/kernel/sigutil_32.c
8689
3264
#include <linux/kernel.h> #include <linux/types.h> #include <linux/thread_info.h> #include <linux/uaccess.h> #include <linux/sched.h> #include <asm/sigcontext.h> #include <asm/fpumacro.h> #include <asm/ptrace.h> #include <asm/switch_to.h> #include "sigutil.h" int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) { int err = 0; #ifdef CONFIG_SMP if (test_tsk_thread_flag(current, TIF_USEDFPU)) { put_psr(get_psr() | PSR_EF); fpsave(&current->thread.float_regs[0], &current->thread.fsr, &current->thread.fpqueue[0], &current->thread.fpqdepth); regs->psr &= ~(PSR_EF); clear_tsk_thread_flag(current, TIF_USEDFPU); } #else if (current == last_task_used_math) { put_psr(get_psr() | PSR_EF); fpsave(&current->thread.float_regs[0], &current->thread.fsr, &current->thread.fpqueue[0], &current->thread.fpqdepth); last_task_used_math = NULL; regs->psr &= ~(PSR_EF); } #endif err |= __copy_to_user(&fpu->si_float_regs[0], &current->thread.float_regs[0], (sizeof(unsigned long) * 32)); err |= __put_user(current->thread.fsr, &fpu->si_fsr); err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); if (current->thread.fpqdepth != 0) err |= __copy_to_user(&fpu->si_fpqueue[0], &current->thread.fpqueue[0], ((sizeof(unsigned long) + (sizeof(unsigned long *)))*16)); clear_used_math(); return err; } int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) { int err; #ifdef CONFIG_SMP if (test_tsk_thread_flag(current, TIF_USEDFPU)) regs->psr &= ~PSR_EF; #else if (current == last_task_used_math) { last_task_used_math = NULL; regs->psr &= ~PSR_EF; } #endif set_used_math(); clear_tsk_thread_flag(current, TIF_USEDFPU); if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu))) return -EFAULT; err = __copy_from_user(&current->thread.float_regs[0], &fpu->si_float_regs[0], (sizeof(unsigned long) * 32)); err |= __get_user(current->thread.fsr, &fpu->si_fsr); err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth); if (current->thread.fpqdepth != 0) err |= __copy_from_user(&current->thread.fpqueue[0], &fpu->si_fpqueue[0], ((sizeof(unsigned long) + (sizeof(unsigned long *)))*16)); return err; } int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin) { int i, err = __put_user(wsaved, &rwin->wsaved); for (i = 0; i < wsaved; i++) { struct reg_window32 *rp; unsigned long fp; rp = &current_thread_info()->reg_window[i]; fp = current_thread_info()->rwbuf_stkptrs[i]; err |= copy_to_user(&rwin->reg_window[i], rp, sizeof(struct reg_window32)); err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]); } return err; } int restore_rwin_state(__siginfo_rwin_t __user *rp) { struct thread_info *t = current_thread_info(); int i, wsaved, err; __get_user(wsaved, &rp->wsaved); if (wsaved > NSWINS) return -EFAULT; err = 0; for (i = 0; i < wsaved; i++) { err |= copy_from_user(&t->reg_window[i], &rp->reg_window[i], sizeof(struct reg_window32)); err |= __get_user(t->rwbuf_stkptrs[i], &rp->rwbuf_stkptrs[i]); } if (err) return err; t->w_saved = wsaved; synchronize_user_stack(); if (t->w_saved) return -EFAULT; return 0; }
gpl-2.0
hi35xx/hi3518e-buildroot
linux/linux-3.0.y/drivers/net/irda/litelink-sir.c
12529
5436
/********************************************************************* * * Filename: litelink.c * Version: 1.1 * Description: Driver for the Parallax LiteLink dongle * Status: Stable * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Fri May 7 12:50:33 1999 * Modified at: Fri Dec 17 09:14:23 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1999 Dag Brattli, All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA * ********************************************************************/ /* * Modified at: Thu Jan 15 2003 * Modified by: Eugene Crosser <crosser@average.org> * * Convert to "new" IRDA infrastructure for kernel 2.6 */ #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <net/irda/irda.h> #include "sir-dev.h" #define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */ #define MAX_DELAY 10000 /* 1 ms */ static int litelink_open(struct sir_dev *dev); static int litelink_close(struct sir_dev *dev); static int litelink_change_speed(struct sir_dev *dev, unsigned speed); static int litelink_reset(struct sir_dev *dev); /* These are the baudrates supported - 9600 must be last one! */ static unsigned baud_rates[] = { 115200, 57600, 38400, 19200, 9600 }; static struct dongle_driver litelink = { .owner = THIS_MODULE, .driver_name = "Parallax LiteLink", .type = IRDA_LITELINK_DONGLE, .open = litelink_open, .close = litelink_close, .reset = litelink_reset, .set_speed = litelink_change_speed, }; static int __init litelink_sir_init(void) { return irda_register_dongle(&litelink); } static void __exit litelink_sir_cleanup(void) { irda_unregister_dongle(&litelink); } static int litelink_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; IRDA_DEBUG(2, "%s()\n", __func__); /* Power up dongle */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Set the speeds we can accept */ qos->baud_rate.bits &= IR_115200|IR_57600|IR_38400|IR_19200|IR_9600; qos->min_turn_time.bits = 0x7f; /* Needs 0.01 ms */ irda_qos_bits_to_value(qos); /* irda thread waits 50 msec for power settling */ return 0; } static int litelink_close(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__); /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); return 0; } /* * Function litelink_change_speed (task) * * Change speed of the Litelink dongle. To cycle through the available * baud rates, pulse RTS low for a few ms. */ static int litelink_change_speed(struct sir_dev *dev, unsigned speed) { int i; IRDA_DEBUG(2, "%s()\n", __func__); /* dongle already reset by irda-thread - current speed (dongle and * port) is the default speed (115200 for litelink!) */ /* Cycle through avaiable baudrates until we reach the correct one */ for (i = 0; baud_rates[i] != speed; i++) { /* end-of-list reached due to invalid speed request */ if (baud_rates[i] == 9600) break; /* Set DTR, clear RTS */ sirdev_set_dtr_rts(dev, FALSE, TRUE); /* Sleep a minimum of 15 us */ udelay(MIN_DELAY); /* Set DTR, Set RTS */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Sleep a minimum of 15 us */ udelay(MIN_DELAY); } dev->speed = baud_rates[i]; /* invalid baudrate should not happen - but if, we return -EINVAL and * the dongle configured for 9600 so the stack has a chance to recover */ return (dev->speed == speed) ? 0 : -EINVAL; } /* * Function litelink_reset (task) * * Reset the Litelink type dongle. * */ static int litelink_reset(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__); /* probably the power-up can be dropped here, but with only * 15 usec delay it's not worth the risk unless somebody with * the hardware confirms it doesn't break anything... */ /* Power on dongle */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Sleep a minimum of 15 us */ udelay(MIN_DELAY); /* Clear RTS to reset dongle */ sirdev_set_dtr_rts(dev, TRUE, FALSE); /* Sleep a minimum of 15 us */ udelay(MIN_DELAY); /* Go back to normal mode */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Sleep a minimum of 15 us */ udelay(MIN_DELAY); /* This dongles speed defaults to 115200 bps */ dev->speed = 115200; return 0; } MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>"); MODULE_DESCRIPTION("Parallax Litelink dongle driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("irda-dongle-5"); /* IRDA_LITELINK_DONGLE */ /* * Function init_module (void) * * Initialize Litelink module * */ module_init(litelink_sir_init); /* * Function cleanup_module (void) * * Cleanup Litelink module * */ module_exit(litelink_sir_cleanup);
gpl-2.0
Backspace-Dev/htx21
arch/ia64/mm/extable.c
13297
3019
/* * Kernel exception handling table support. Derived from arch/alpha/mm/extable.c. * * Copyright (C) 1998, 1999, 2001-2002, 2004 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <linux/sort.h> #include <asm/uaccess.h> #include <linux/module.h> static int cmp_ex(const void *a, const void *b) { const struct exception_table_entry *l = a, *r = b; u64 lip = (u64) &l->addr + l->addr; u64 rip = (u64) &r->addr + r->addr; /* avoid overflow */ if (lip > rip) return 1; if (lip < rip) return -1; return 0; } static void swap_ex(void *a, void *b, int size) { struct exception_table_entry *l = a, *r = b, tmp; u64 delta = (u64) r - (u64) l; tmp = *l; l->addr = r->addr + delta; l->cont = r->cont + delta; r->addr = tmp.addr - delta; r->cont = tmp.cont - delta; } /* * Sort the exception table. It's usually already sorted, but there * may be unordered entries due to multiple text sections (such as the * .init text section). Note that the exception-table-entries contain * location-relative addresses, which requires a bit of care during * sorting to avoid overflows in the offset members (e.g., it would * not be safe to make a temporary copy of an exception-table entry on * the stack, because the stack may be more than 2GB away from the * exception-table). */ void sort_extable (struct exception_table_entry *start, struct exception_table_entry *finish) { sort(start, finish - start, sizeof(struct exception_table_entry), cmp_ex, swap_ex); } static inline unsigned long ex_to_addr(const struct exception_table_entry *x) { return (unsigned long)&x->addr + x->addr; } #ifdef CONFIG_MODULES /* * Any entry referring to the module init will be at the beginning or * the end. */ void trim_init_extable(struct module *m) { /*trim the beginning*/ while (m->num_exentries && within_module_init(ex_to_addr(&m->extable[0]), m)) { m->extable++; m->num_exentries--; } /*trim the end*/ while (m->num_exentries && within_module_init(ex_to_addr(&m->extable[m->num_exentries-1]), m)) m->num_exentries--; } #endif /* CONFIG_MODULES */ const struct exception_table_entry * search_extable (const struct exception_table_entry *first, const struct exception_table_entry *last, unsigned long ip) { const struct exception_table_entry *mid; unsigned long mid_ip; long diff; while (first <= last) { mid = &first[(last - first)/2]; mid_ip = (u64) &mid->addr + mid->addr; diff = mid_ip - ip; if (diff == 0) return mid; else if (diff < 0) first = mid + 1; else last = mid - 1; } return NULL; } void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e) { long fix = (u64) &e->cont + e->cont; regs->r8 = -EFAULT; if (fix & 4) regs->r9 = 0; regs->cr_iip = fix & ~0xf; ia64_psr(regs)->ri = fix & 0x3; /* set continuation slot number */ }
gpl-2.0
martyj/LGP999_V10c_Kernel
drivers/video/matrox/matroxfb_misc.c
14577
25093
/* * * Hardware accelerated Matrox Millennium I, II, Mystique, G100, G200 and G400 * * (c) 1998-2002 Petr Vandrovec <vandrove@vc.cvut.cz> * * Portions Copyright (c) 2001 Matrox Graphics Inc. * * Version: 1.65 2002/08/14 * * MTRR stuff: 1998 Tom Rini <trini@kernel.crashing.org> * * Contributors: "menion?" <menion@mindless.com> * Betatesting, fixes, ideas * * "Kurt Garloff" <garloff@suse.de> * Betatesting, fixes, ideas, videomodes, videomodes timmings * * "Tom Rini" <trini@kernel.crashing.org> * MTRR stuff, PPC cleanups, betatesting, fixes, ideas * * "Bibek Sahu" <scorpio@dodds.net> * Access device through readb|w|l and write b|w|l * Extensive debugging stuff * * "Daniel Haun" <haund@usa.net> * Testing, hardware cursor fixes * * "Scott Wood" <sawst46+@pitt.edu> * Fixes * * "Gerd Knorr" <kraxel@goldbach.isdn.cs.tu-berlin.de> * Betatesting * * "Kelly French" <targon@hazmat.com> * "Fernando Herrera" <fherrera@eurielec.etsit.upm.es> * Betatesting, bug reporting * * "Pablo Bianucci" <pbian@pccp.com.ar> * Fixes, ideas, betatesting * * "Inaky Perez Gonzalez" <inaky@peloncho.fis.ucm.es> * Fixes, enhandcements, ideas, betatesting * * "Ryuichi Oikawa" <roikawa@rr.iiij4u.or.jp> * PPC betatesting, PPC support, backward compatibility * * "Paul Womar" <Paul@pwomar.demon.co.uk> * "Owen Waller" <O.Waller@ee.qub.ac.uk> * PPC betatesting * * "Thomas Pornin" <pornin@bolet.ens.fr> * Alpha betatesting * * "Pieter van Leuven" <pvl@iae.nl> * "Ulf Jaenicke-Roessler" <ujr@physik.phy.tu-dresden.de> * G100 testing * * "H. Peter Arvin" <hpa@transmeta.com> * Ideas * * "Cort Dougan" <cort@cs.nmt.edu> * CHRP fixes and PReP cleanup * * "Mark Vojkovich" <mvojkovi@ucsd.edu> * G400 support * * "David C. Hansen" <haveblue@us.ibm.com> * Fixes * * "Ian Romanick" <idr@us.ibm.com> * Find PInS data in BIOS on PowerPC systems. * * (following author is not in any relation with this code, but his code * is included in this driver) * * Based on framebuffer driver for VBE 2.0 compliant graphic boards * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de> * * (following author is not in any relation with this code, but his ideas * were used when writing this driver) * * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk> * */ #include "matroxfb_misc.h" #include <linux/interrupt.h> #include <linux/matroxfb.h> void matroxfb_DAC_out(const struct matrox_fb_info *minfo, int reg, int val) { DBG_REG(__func__) mga_outb(M_RAMDAC_BASE+M_X_INDEX, reg); mga_outb(M_RAMDAC_BASE+M_X_DATAREG, val); } int matroxfb_DAC_in(const struct matrox_fb_info *minfo, int reg) { DBG_REG(__func__) mga_outb(M_RAMDAC_BASE+M_X_INDEX, reg); return mga_inb(M_RAMDAC_BASE+M_X_DATAREG); } void matroxfb_var2my(struct fb_var_screeninfo* var, struct my_timming* mt) { unsigned int pixclock = var->pixclock; DBG(__func__) if (!pixclock) pixclock = 10000; /* 10ns = 100MHz */ mt->pixclock = 1000000000 / pixclock; if (mt->pixclock < 1) mt->pixclock = 1; mt->mnp = -1; mt->dblscan = var->vmode & FB_VMODE_DOUBLE; mt->interlaced = var->vmode & FB_VMODE_INTERLACED; mt->HDisplay = var->xres; mt->HSyncStart = mt->HDisplay + var->right_margin; mt->HSyncEnd = mt->HSyncStart + var->hsync_len; mt->HTotal = mt->HSyncEnd + var->left_margin; mt->VDisplay = var->yres; mt->VSyncStart = mt->VDisplay + var->lower_margin; mt->VSyncEnd = mt->VSyncStart + var->vsync_len; mt->VTotal = mt->VSyncEnd + var->upper_margin; mt->sync = var->sync; } int matroxfb_PLL_calcclock(const struct matrox_pll_features* pll, unsigned int freq, unsigned int fmax, unsigned int* in, unsigned int* feed, unsigned int* post) { unsigned int bestdiff = ~0; unsigned int bestvco = 0; unsigned int fxtal = pll->ref_freq; unsigned int fwant; unsigned int p; DBG(__func__) fwant = freq; #ifdef DEBUG printk(KERN_ERR "post_shift_max: %d\n", pll->post_shift_max); printk(KERN_ERR "ref_freq: %d\n", pll->ref_freq); printk(KERN_ERR "freq: %d\n", freq); printk(KERN_ERR "vco_freq_min: %d\n", pll->vco_freq_min); printk(KERN_ERR "in_div_min: %d\n", pll->in_div_min); printk(KERN_ERR "in_div_max: %d\n", pll->in_div_max); printk(KERN_ERR "feed_div_min: %d\n", pll->feed_div_min); printk(KERN_ERR "feed_div_max: %d\n", pll->feed_div_max); printk(KERN_ERR "fmax: %d\n", fmax); #endif for (p = 1; p <= pll->post_shift_max; p++) { if (fwant * 2 > fmax) break; fwant *= 2; } if (fwant < pll->vco_freq_min) fwant = pll->vco_freq_min; if (fwant > fmax) fwant = fmax; for (; p-- > 0; fwant >>= 1, bestdiff >>= 1) { unsigned int m; if (fwant < pll->vco_freq_min) break; for (m = pll->in_div_min; m <= pll->in_div_max; m++) { unsigned int diff, fvco; unsigned int n; n = (fwant * (m + 1) + (fxtal >> 1)) / fxtal - 1; if (n > pll->feed_div_max) break; if (n < pll->feed_div_min) n = pll->feed_div_min; fvco = (fxtal * (n + 1)) / (m + 1); if (fvco < fwant) diff = fwant - fvco; else diff = fvco - fwant; if (diff < bestdiff) { bestdiff = diff; *post = p; *in = m; *feed = n; bestvco = fvco; } } } dprintk(KERN_ERR "clk: %02X %02X %02X %d %d %d\n", *in, *feed, *post, fxtal, bestvco, fwant); return bestvco; } int matroxfb_vgaHWinit(struct matrox_fb_info *minfo, struct my_timming *m) { unsigned int hd, hs, he, hbe, ht; unsigned int vd, vs, ve, vt, lc; unsigned int wd; unsigned int divider; int i; struct matrox_hw_state * const hw = &minfo->hw; DBG(__func__) hw->SEQ[0] = 0x00; hw->SEQ[1] = 0x01; /* or 0x09 */ hw->SEQ[2] = 0x0F; /* bitplanes */ hw->SEQ[3] = 0x00; hw->SEQ[4] = 0x0E; /* CRTC 0..7, 9, 16..19, 21, 22 are reprogrammed by Matrox Millennium code... Hope that by MGA1064 too */ if (m->dblscan) { m->VTotal <<= 1; m->VDisplay <<= 1; m->VSyncStart <<= 1; m->VSyncEnd <<= 1; } if (m->interlaced) { m->VTotal >>= 1; m->VDisplay >>= 1; m->VSyncStart >>= 1; m->VSyncEnd >>= 1; } /* GCTL is ignored when not using 0xA0000 aperture */ hw->GCTL[0] = 0x00; hw->GCTL[1] = 0x00; hw->GCTL[2] = 0x00; hw->GCTL[3] = 0x00; hw->GCTL[4] = 0x00; hw->GCTL[5] = 0x40; hw->GCTL[6] = 0x05; hw->GCTL[7] = 0x0F; hw->GCTL[8] = 0xFF; /* Whole ATTR is ignored in PowerGraphics mode */ for (i = 0; i < 16; i++) hw->ATTR[i] = i; hw->ATTR[16] = 0x41; hw->ATTR[17] = 0xFF; hw->ATTR[18] = 0x0F; hw->ATTR[19] = 0x00; hw->ATTR[20] = 0x00; hd = m->HDisplay >> 3; hs = m->HSyncStart >> 3; he = m->HSyncEnd >> 3; ht = m->HTotal >> 3; /* standard timmings are in 8pixels, but for interleaved we cannot */ /* do it for 4bpp (because of (4bpp >> 1(interleaved))/4 == 0) */ /* using 16 or more pixels per unit can save us */ divider = minfo->curr.final_bppShift; while (divider & 3) { hd >>= 1; hs >>= 1; he >>= 1; ht >>= 1; divider <<= 1; } divider = divider / 4; /* divider can be from 1 to 8 */ while (divider > 8) { hd <<= 1; hs <<= 1; he <<= 1; ht <<= 1; divider >>= 1; } hd = hd - 1; hs = hs - 1; he = he - 1; ht = ht - 1; vd = m->VDisplay - 1; vs = m->VSyncStart - 1; ve = m->VSyncEnd - 1; vt = m->VTotal - 2; lc = vd; /* G200 cannot work with (ht & 7) == 6 */ if (((ht & 0x07) == 0x06) || ((ht & 0x0F) == 0x04)) ht++; hbe = ht; wd = minfo->fbcon.var.xres_virtual * minfo->curr.final_bppShift / 64; hw->CRTCEXT[0] = 0; hw->CRTCEXT[5] = 0; if (m->interlaced) { hw->CRTCEXT[0] = 0x80; hw->CRTCEXT[5] = (hs + he - ht) >> 1; if (!m->dblscan) wd <<= 1; vt &= ~1; } hw->CRTCEXT[0] |= (wd & 0x300) >> 4; hw->CRTCEXT[1] = (((ht - 4) & 0x100) >> 8) | ((hd & 0x100) >> 7) | /* blanking */ ((hs & 0x100) >> 6) | /* sync start */ (hbe & 0x040); /* end hor. blanking */ /* FIXME: Enable vidrst only on G400, and only if TV-out is used */ if (minfo->outputs[1].src == MATROXFB_SRC_CRTC1) hw->CRTCEXT[1] |= 0x88; /* enable horizontal and vertical vidrst */ hw->CRTCEXT[2] = ((vt & 0xC00) >> 10) | ((vd & 0x400) >> 8) | /* disp end */ ((vd & 0xC00) >> 7) | /* vblanking start */ ((vs & 0xC00) >> 5) | ((lc & 0x400) >> 3); hw->CRTCEXT[3] = (divider - 1) | 0x80; hw->CRTCEXT[4] = 0; hw->CRTC[0] = ht-4; hw->CRTC[1] = hd; hw->CRTC[2] = hd; hw->CRTC[3] = (hbe & 0x1F) | 0x80; hw->CRTC[4] = hs; hw->CRTC[5] = ((hbe & 0x20) << 2) | (he & 0x1F); hw->CRTC[6] = vt & 0xFF; hw->CRTC[7] = ((vt & 0x100) >> 8) | ((vd & 0x100) >> 7) | ((vs & 0x100) >> 6) | ((vd & 0x100) >> 5) | ((lc & 0x100) >> 4) | ((vt & 0x200) >> 4) | ((vd & 0x200) >> 3) | ((vs & 0x200) >> 2); hw->CRTC[8] = 0x00; hw->CRTC[9] = ((vd & 0x200) >> 4) | ((lc & 0x200) >> 3); if (m->dblscan && !m->interlaced) hw->CRTC[9] |= 0x80; for (i = 10; i < 16; i++) hw->CRTC[i] = 0x00; hw->CRTC[16] = vs /* & 0xFF */; hw->CRTC[17] = (ve & 0x0F) | 0x20; hw->CRTC[18] = vd /* & 0xFF */; hw->CRTC[19] = wd /* & 0xFF */; hw->CRTC[20] = 0x00; hw->CRTC[21] = vd /* & 0xFF */; hw->CRTC[22] = (vt + 1) /* & 0xFF */; hw->CRTC[23] = 0xC3; hw->CRTC[24] = lc; return 0; }; void matroxfb_vgaHWrestore(struct matrox_fb_info *minfo) { int i; struct matrox_hw_state * const hw = &minfo->hw; CRITFLAGS DBG(__func__) dprintk(KERN_INFO "MiscOutReg: %02X\n", hw->MiscOutReg); dprintk(KERN_INFO "SEQ regs: "); for (i = 0; i < 5; i++) dprintk("%02X:", hw->SEQ[i]); dprintk("\n"); dprintk(KERN_INFO "GDC regs: "); for (i = 0; i < 9; i++) dprintk("%02X:", hw->GCTL[i]); dprintk("\n"); dprintk(KERN_INFO "CRTC regs: "); for (i = 0; i < 25; i++) dprintk("%02X:", hw->CRTC[i]); dprintk("\n"); dprintk(KERN_INFO "ATTR regs: "); for (i = 0; i < 21; i++) dprintk("%02X:", hw->ATTR[i]); dprintk("\n"); CRITBEGIN mga_inb(M_ATTR_RESET); mga_outb(M_ATTR_INDEX, 0); mga_outb(M_MISC_REG, hw->MiscOutReg); for (i = 1; i < 5; i++) mga_setr(M_SEQ_INDEX, i, hw->SEQ[i]); mga_setr(M_CRTC_INDEX, 17, hw->CRTC[17] & 0x7F); for (i = 0; i < 25; i++) mga_setr(M_CRTC_INDEX, i, hw->CRTC[i]); for (i = 0; i < 9; i++) mga_setr(M_GRAPHICS_INDEX, i, hw->GCTL[i]); for (i = 0; i < 21; i++) { mga_inb(M_ATTR_RESET); mga_outb(M_ATTR_INDEX, i); mga_outb(M_ATTR_INDEX, hw->ATTR[i]); } mga_outb(M_PALETTE_MASK, 0xFF); mga_outb(M_DAC_REG, 0x00); for (i = 0; i < 768; i++) mga_outb(M_DAC_VAL, hw->DACpal[i]); mga_inb(M_ATTR_RESET); mga_outb(M_ATTR_INDEX, 0x20); CRITEND } static void get_pins(unsigned char __iomem* pins, struct matrox_bios* bd) { unsigned int b0 = readb(pins); if (b0 == 0x2E && readb(pins+1) == 0x41) { unsigned int pins_len = readb(pins+2); unsigned int i; unsigned char cksum; unsigned char* dst = bd->pins; if (pins_len < 3 || pins_len > 128) { return; } *dst++ = 0x2E; *dst++ = 0x41; *dst++ = pins_len; cksum = 0x2E + 0x41 + pins_len; for (i = 3; i < pins_len; i++) { cksum += *dst++ = readb(pins+i); } if (cksum) { return; } bd->pins_len = pins_len; } else if (b0 == 0x40 && readb(pins+1) == 0x00) { unsigned int i; unsigned char* dst = bd->pins; *dst++ = 0x40; *dst++ = 0; for (i = 2; i < 0x40; i++) { *dst++ = readb(pins+i); } bd->pins_len = 0x40; } } static void get_bios_version(unsigned char __iomem * vbios, struct matrox_bios* bd) { unsigned int pcir_offset; pcir_offset = readb(vbios + 24) | (readb(vbios + 25) << 8); if (pcir_offset >= 26 && pcir_offset < 0xFFE0 && readb(vbios + pcir_offset ) == 'P' && readb(vbios + pcir_offset + 1) == 'C' && readb(vbios + pcir_offset + 2) == 'I' && readb(vbios + pcir_offset + 3) == 'R') { unsigned char h; h = readb(vbios + pcir_offset + 0x12); bd->version.vMaj = (h >> 4) & 0xF; bd->version.vMin = h & 0xF; bd->version.vRev = readb(vbios + pcir_offset + 0x13); } else { unsigned char h; h = readb(vbios + 5); bd->version.vMaj = (h >> 4) & 0xF; bd->version.vMin = h & 0xF; bd->version.vRev = 0; } } static void get_bios_output(unsigned char __iomem* vbios, struct matrox_bios* bd) { unsigned char b; b = readb(vbios + 0x7FF1); if (b == 0xFF) { b = 0; } bd->output.state = b; } static void get_bios_tvout(unsigned char __iomem* vbios, struct matrox_bios* bd) { unsigned int i; /* Check for 'IBM .*(V....TVO' string - it means TVO BIOS */ bd->output.tvout = 0; if (readb(vbios + 0x1D) != 'I' || readb(vbios + 0x1E) != 'B' || readb(vbios + 0x1F) != 'M' || readb(vbios + 0x20) != ' ') { return; } for (i = 0x2D; i < 0x2D + 128; i++) { unsigned char b = readb(vbios + i); if (b == '(' && readb(vbios + i + 1) == 'V') { if (readb(vbios + i + 6) == 'T' && readb(vbios + i + 7) == 'V' && readb(vbios + i + 8) == 'O') { bd->output.tvout = 1; } return; } if (b == 0) break; } } static void parse_bios(unsigned char __iomem* vbios, struct matrox_bios* bd) { unsigned int pins_offset; if (readb(vbios) != 0x55 || readb(vbios + 1) != 0xAA) { return; } bd->bios_valid = 1; get_bios_version(vbios, bd); get_bios_output(vbios, bd); get_bios_tvout(vbios, bd); #if defined(__powerpc__) /* On PowerPC cards, the PInS offset isn't stored at the end of the * BIOS image. Instead, you must search the entire BIOS image for * the magic PInS signature. * * This actually applies to all OpenFirmware base cards. Since these * cards could be put in a MIPS or SPARC system, should the condition * be something different? */ for ( pins_offset = 0 ; pins_offset <= 0xFF80 ; pins_offset++ ) { unsigned char header[3]; header[0] = readb(vbios + pins_offset); header[1] = readb(vbios + pins_offset + 1); header[2] = readb(vbios + pins_offset + 2); if ( (header[0] == 0x2E) && (header[1] == 0x41) && ((header[2] == 0x40) || (header[2] == 0x80)) ) { printk(KERN_INFO "PInS data found at offset %u\n", pins_offset); get_pins(vbios + pins_offset, bd); break; } } #else pins_offset = readb(vbios + 0x7FFC) | (readb(vbios + 0x7FFD) << 8); if (pins_offset <= 0xFF80) { get_pins(vbios + pins_offset, bd); } #endif } static int parse_pins1(struct matrox_fb_info *minfo, const struct matrox_bios *bd) { unsigned int maxdac; switch (bd->pins[22]) { case 0: maxdac = 175000; break; case 1: maxdac = 220000; break; default: maxdac = 240000; break; } if (get_unaligned_le16(bd->pins + 24)) { maxdac = get_unaligned_le16(bd->pins + 24) * 10; } minfo->limits.pixel.vcomax = maxdac; minfo->values.pll.system = get_unaligned_le16(bd->pins + 28) ? get_unaligned_le16(bd->pins + 28) * 10 : 50000; /* ignore 4MB, 8MB, module clocks */ minfo->features.pll.ref_freq = 14318; minfo->values.reg.mctlwtst = 0x00030101; return 0; } static void default_pins1(struct matrox_fb_info *minfo) { /* Millennium */ minfo->limits.pixel.vcomax = 220000; minfo->values.pll.system = 50000; minfo->features.pll.ref_freq = 14318; minfo->values.reg.mctlwtst = 0x00030101; } static int parse_pins2(struct matrox_fb_info *minfo, const struct matrox_bios *bd) { minfo->limits.pixel.vcomax = minfo->limits.system.vcomax = (bd->pins[41] == 0xFF) ? 230000 : ((bd->pins[41] + 100) * 1000); minfo->values.reg.mctlwtst = ((bd->pins[51] & 0x01) ? 0x00000001 : 0) | ((bd->pins[51] & 0x02) ? 0x00000100 : 0) | ((bd->pins[51] & 0x04) ? 0x00010000 : 0) | ((bd->pins[51] & 0x08) ? 0x00020000 : 0); minfo->values.pll.system = (bd->pins[43] == 0xFF) ? 50000 : ((bd->pins[43] + 100) * 1000); minfo->features.pll.ref_freq = 14318; return 0; } static void default_pins2(struct matrox_fb_info *minfo) { /* Millennium II, Mystique */ minfo->limits.pixel.vcomax = minfo->limits.system.vcomax = 230000; minfo->values.reg.mctlwtst = 0x00030101; minfo->values.pll.system = 50000; minfo->features.pll.ref_freq = 14318; } static int parse_pins3(struct matrox_fb_info *minfo, const struct matrox_bios *bd) { minfo->limits.pixel.vcomax = minfo->limits.system.vcomax = (bd->pins[36] == 0xFF) ? 230000 : ((bd->pins[36] + 100) * 1000); minfo->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 48) == 0xFFFFFFFF ? 0x01250A21 : get_unaligned_le32(bd->pins + 48); /* memory config */ minfo->values.reg.memrdbk = ((bd->pins[57] << 21) & 0x1E000000) | ((bd->pins[57] << 22) & 0x00C00000) | ((bd->pins[56] << 1) & 0x000001E0) | ( bd->pins[56] & 0x0000000F); minfo->values.reg.opt = (bd->pins[54] & 7) << 10; minfo->values.reg.opt2 = bd->pins[58] << 12; minfo->features.pll.ref_freq = (bd->pins[52] & 0x20) ? 14318 : 27000; return 0; } static void default_pins3(struct matrox_fb_info *minfo) { /* G100, G200 */ minfo->limits.pixel.vcomax = minfo->limits.system.vcomax = 230000; minfo->values.reg.mctlwtst = 0x01250A21; minfo->values.reg.memrdbk = 0x00000000; minfo->values.reg.opt = 0x00000C00; minfo->values.reg.opt2 = 0x00000000; minfo->features.pll.ref_freq = 27000; } static int parse_pins4(struct matrox_fb_info *minfo, const struct matrox_bios *bd) { minfo->limits.pixel.vcomax = (bd->pins[ 39] == 0xFF) ? 230000 : bd->pins[ 39] * 4000; minfo->limits.system.vcomax = (bd->pins[ 38] == 0xFF) ? minfo->limits.pixel.vcomax : bd->pins[ 38] * 4000; minfo->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 71); minfo->values.reg.memrdbk = ((bd->pins[87] << 21) & 0x1E000000) | ((bd->pins[87] << 22) & 0x00C00000) | ((bd->pins[86] << 1) & 0x000001E0) | ( bd->pins[86] & 0x0000000F); minfo->values.reg.opt = ((bd->pins[53] << 15) & 0x00400000) | ((bd->pins[53] << 22) & 0x10000000) | ((bd->pins[53] << 7) & 0x00001C00); minfo->values.reg.opt3 = get_unaligned_le32(bd->pins + 67); minfo->values.pll.system = (bd->pins[ 65] == 0xFF) ? 200000 : bd->pins[ 65] * 4000; minfo->features.pll.ref_freq = (bd->pins[ 92] & 0x01) ? 14318 : 27000; return 0; } static void default_pins4(struct matrox_fb_info *minfo) { /* G400 */ minfo->limits.pixel.vcomax = minfo->limits.system.vcomax = 252000; minfo->values.reg.mctlwtst = 0x04A450A1; minfo->values.reg.memrdbk = 0x000000E7; minfo->values.reg.opt = 0x10000400; minfo->values.reg.opt3 = 0x0190A419; minfo->values.pll.system = 200000; minfo->features.pll.ref_freq = 27000; } static int parse_pins5(struct matrox_fb_info *minfo, const struct matrox_bios *bd) { unsigned int mult; mult = bd->pins[4]?8000:6000; minfo->limits.pixel.vcomax = (bd->pins[ 38] == 0xFF) ? 600000 : bd->pins[ 38] * mult; minfo->limits.system.vcomax = (bd->pins[ 36] == 0xFF) ? minfo->limits.pixel.vcomax : bd->pins[ 36] * mult; minfo->limits.video.vcomax = (bd->pins[ 37] == 0xFF) ? minfo->limits.system.vcomax : bd->pins[ 37] * mult; minfo->limits.pixel.vcomin = (bd->pins[123] == 0xFF) ? 256000 : bd->pins[123] * mult; minfo->limits.system.vcomin = (bd->pins[121] == 0xFF) ? minfo->limits.pixel.vcomin : bd->pins[121] * mult; minfo->limits.video.vcomin = (bd->pins[122] == 0xFF) ? minfo->limits.system.vcomin : bd->pins[122] * mult; minfo->values.pll.system = minfo->values.pll.video = (bd->pins[ 92] == 0xFF) ? 284000 : bd->pins[ 92] * 4000; minfo->values.reg.opt = get_unaligned_le32(bd->pins + 48); minfo->values.reg.opt2 = get_unaligned_le32(bd->pins + 52); minfo->values.reg.opt3 = get_unaligned_le32(bd->pins + 94); minfo->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 98); minfo->values.reg.memmisc = get_unaligned_le32(bd->pins + 102); minfo->values.reg.memrdbk = get_unaligned_le32(bd->pins + 106); minfo->features.pll.ref_freq = (bd->pins[110] & 0x01) ? 14318 : 27000; minfo->values.memory.ddr = (bd->pins[114] & 0x60) == 0x20; minfo->values.memory.dll = (bd->pins[115] & 0x02) != 0; minfo->values.memory.emrswen = (bd->pins[115] & 0x01) != 0; minfo->values.reg.maccess = minfo->values.memory.emrswen ? 0x00004000 : 0x00000000; if (bd->pins[115] & 4) { minfo->values.reg.mctlwtst_core = minfo->values.reg.mctlwtst; } else { u_int32_t wtst_xlat[] = { 0, 1, 5, 6, 7, 5, 2, 3 }; minfo->values.reg.mctlwtst_core = (minfo->values.reg.mctlwtst & ~7) | wtst_xlat[minfo->values.reg.mctlwtst & 7]; } minfo->max_pixel_clock_panellink = bd->pins[47] * 4000; return 0; } static void default_pins5(struct matrox_fb_info *minfo) { /* Mine 16MB G450 with SDRAM DDR */ minfo->limits.pixel.vcomax = minfo->limits.system.vcomax = minfo->limits.video.vcomax = 600000; minfo->limits.pixel.vcomin = minfo->limits.system.vcomin = minfo->limits.video.vcomin = 256000; minfo->values.pll.system = minfo->values.pll.video = 284000; minfo->values.reg.opt = 0x404A1160; minfo->values.reg.opt2 = 0x0000AC00; minfo->values.reg.opt3 = 0x0090A409; minfo->values.reg.mctlwtst_core = minfo->values.reg.mctlwtst = 0x0C81462B; minfo->values.reg.memmisc = 0x80000004; minfo->values.reg.memrdbk = 0x01001103; minfo->features.pll.ref_freq = 27000; minfo->values.memory.ddr = 1; minfo->values.memory.dll = 1; minfo->values.memory.emrswen = 1; minfo->values.reg.maccess = 0x00004000; } static int matroxfb_set_limits(struct matrox_fb_info *minfo, const struct matrox_bios *bd) { unsigned int pins_version; static const unsigned int pinslen[] = { 64, 64, 64, 128, 128 }; switch (minfo->chip) { case MGA_2064: default_pins1(minfo); break; case MGA_2164: case MGA_1064: case MGA_1164: default_pins2(minfo); break; case MGA_G100: case MGA_G200: default_pins3(minfo); break; case MGA_G400: default_pins4(minfo); break; case MGA_G450: case MGA_G550: default_pins5(minfo); break; } if (!bd->bios_valid) { printk(KERN_INFO "matroxfb: Your Matrox device does not have BIOS\n"); return -1; } if (bd->pins_len < 64) { printk(KERN_INFO "matroxfb: BIOS on your Matrox device does not contain powerup info\n"); return -1; } if (bd->pins[0] == 0x2E && bd->pins[1] == 0x41) { pins_version = bd->pins[5]; if (pins_version < 2 || pins_version > 5) { printk(KERN_INFO "matroxfb: Unknown version (%u) of powerup info\n", pins_version); return -1; } } else { pins_version = 1; } if (bd->pins_len != pinslen[pins_version - 1]) { printk(KERN_INFO "matroxfb: Invalid powerup info\n"); return -1; } switch (pins_version) { case 1: return parse_pins1(minfo, bd); case 2: return parse_pins2(minfo, bd); case 3: return parse_pins3(minfo, bd); case 4: return parse_pins4(minfo, bd); case 5: return parse_pins5(minfo, bd); default: printk(KERN_DEBUG "matroxfb: Powerup info version %u is not yet supported\n", pins_version); return -1; } } void matroxfb_read_pins(struct matrox_fb_info *minfo) { u32 opt; u32 biosbase; u32 fbbase; struct pci_dev *pdev = minfo->pcidev; memset(&minfo->bios, 0, sizeof(minfo->bios)); pci_read_config_dword(pdev, PCI_OPTION_REG, &opt); pci_write_config_dword(pdev, PCI_OPTION_REG, opt | PCI_OPTION_ENABLE_ROM); pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &biosbase); pci_read_config_dword(pdev, minfo->devflags.fbResource, &fbbase); pci_write_config_dword(pdev, PCI_ROM_ADDRESS, (fbbase & PCI_ROM_ADDRESS_MASK) | PCI_ROM_ADDRESS_ENABLE); parse_bios(vaddr_va(minfo->video.vbase), &minfo->bios); pci_write_config_dword(pdev, PCI_ROM_ADDRESS, biosbase); pci_write_config_dword(pdev, PCI_OPTION_REG, opt); #ifdef CONFIG_X86 if (!minfo->bios.bios_valid) { unsigned char __iomem* b; b = ioremap(0x000C0000, 65536); if (!b) { printk(KERN_INFO "matroxfb: Unable to map legacy BIOS\n"); } else { unsigned int ven = readb(b+0x64+0) | (readb(b+0x64+1) << 8); unsigned int dev = readb(b+0x64+2) | (readb(b+0x64+3) << 8); if (ven != pdev->vendor || dev != pdev->device) { printk(KERN_INFO "matroxfb: Legacy BIOS is for %04X:%04X, while this device is %04X:%04X\n", ven, dev, pdev->vendor, pdev->device); } else { parse_bios(b, &minfo->bios); } iounmap(b); } } #endif matroxfb_set_limits(minfo, &minfo->bios); printk(KERN_INFO "PInS memtype = %u\n", (minfo->values.reg.opt & 0x1C00) >> 10); } EXPORT_SYMBOL(matroxfb_DAC_in); EXPORT_SYMBOL(matroxfb_DAC_out); EXPORT_SYMBOL(matroxfb_var2my); EXPORT_SYMBOL(matroxfb_PLL_calcclock); EXPORT_SYMBOL(matroxfb_vgaHWinit); /* DAC1064, Ti3026 */ EXPORT_SYMBOL(matroxfb_vgaHWrestore); /* DAC1064, Ti3026 */ EXPORT_SYMBOL(matroxfb_read_pins); MODULE_AUTHOR("(c) 1999-2002 Petr Vandrovec <vandrove@vc.cvut.cz>"); MODULE_DESCRIPTION("Miscellaneous support for Matrox video cards"); MODULE_LICENSE("GPL");
gpl-2.0
hisilicon/linux-x5hd2
drivers/video/matrox/matroxfb_misc.c
14577
25093
/* * * Hardware accelerated Matrox Millennium I, II, Mystique, G100, G200 and G400 * * (c) 1998-2002 Petr Vandrovec <vandrove@vc.cvut.cz> * * Portions Copyright (c) 2001 Matrox Graphics Inc. * * Version: 1.65 2002/08/14 * * MTRR stuff: 1998 Tom Rini <trini@kernel.crashing.org> * * Contributors: "menion?" <menion@mindless.com> * Betatesting, fixes, ideas * * "Kurt Garloff" <garloff@suse.de> * Betatesting, fixes, ideas, videomodes, videomodes timmings * * "Tom Rini" <trini@kernel.crashing.org> * MTRR stuff, PPC cleanups, betatesting, fixes, ideas * * "Bibek Sahu" <scorpio@dodds.net> * Access device through readb|w|l and write b|w|l * Extensive debugging stuff * * "Daniel Haun" <haund@usa.net> * Testing, hardware cursor fixes * * "Scott Wood" <sawst46+@pitt.edu> * Fixes * * "Gerd Knorr" <kraxel@goldbach.isdn.cs.tu-berlin.de> * Betatesting * * "Kelly French" <targon@hazmat.com> * "Fernando Herrera" <fherrera@eurielec.etsit.upm.es> * Betatesting, bug reporting * * "Pablo Bianucci" <pbian@pccp.com.ar> * Fixes, ideas, betatesting * * "Inaky Perez Gonzalez" <inaky@peloncho.fis.ucm.es> * Fixes, enhandcements, ideas, betatesting * * "Ryuichi Oikawa" <roikawa@rr.iiij4u.or.jp> * PPC betatesting, PPC support, backward compatibility * * "Paul Womar" <Paul@pwomar.demon.co.uk> * "Owen Waller" <O.Waller@ee.qub.ac.uk> * PPC betatesting * * "Thomas Pornin" <pornin@bolet.ens.fr> * Alpha betatesting * * "Pieter van Leuven" <pvl@iae.nl> * "Ulf Jaenicke-Roessler" <ujr@physik.phy.tu-dresden.de> * G100 testing * * "H. Peter Arvin" <hpa@transmeta.com> * Ideas * * "Cort Dougan" <cort@cs.nmt.edu> * CHRP fixes and PReP cleanup * * "Mark Vojkovich" <mvojkovi@ucsd.edu> * G400 support * * "David C. Hansen" <haveblue@us.ibm.com> * Fixes * * "Ian Romanick" <idr@us.ibm.com> * Find PInS data in BIOS on PowerPC systems. * * (following author is not in any relation with this code, but his code * is included in this driver) * * Based on framebuffer driver for VBE 2.0 compliant graphic boards * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de> * * (following author is not in any relation with this code, but his ideas * were used when writing this driver) * * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk> * */ #include "matroxfb_misc.h" #include <linux/interrupt.h> #include <linux/matroxfb.h> void matroxfb_DAC_out(const struct matrox_fb_info *minfo, int reg, int val) { DBG_REG(__func__) mga_outb(M_RAMDAC_BASE+M_X_INDEX, reg); mga_outb(M_RAMDAC_BASE+M_X_DATAREG, val); } int matroxfb_DAC_in(const struct matrox_fb_info *minfo, int reg) { DBG_REG(__func__) mga_outb(M_RAMDAC_BASE+M_X_INDEX, reg); return mga_inb(M_RAMDAC_BASE+M_X_DATAREG); } void matroxfb_var2my(struct fb_var_screeninfo* var, struct my_timming* mt) { unsigned int pixclock = var->pixclock; DBG(__func__) if (!pixclock) pixclock = 10000; /* 10ns = 100MHz */ mt->pixclock = 1000000000 / pixclock; if (mt->pixclock < 1) mt->pixclock = 1; mt->mnp = -1; mt->dblscan = var->vmode & FB_VMODE_DOUBLE; mt->interlaced = var->vmode & FB_VMODE_INTERLACED; mt->HDisplay = var->xres; mt->HSyncStart = mt->HDisplay + var->right_margin; mt->HSyncEnd = mt->HSyncStart + var->hsync_len; mt->HTotal = mt->HSyncEnd + var->left_margin; mt->VDisplay = var->yres; mt->VSyncStart = mt->VDisplay + var->lower_margin; mt->VSyncEnd = mt->VSyncStart + var->vsync_len; mt->VTotal = mt->VSyncEnd + var->upper_margin; mt->sync = var->sync; } int matroxfb_PLL_calcclock(const struct matrox_pll_features* pll, unsigned int freq, unsigned int fmax, unsigned int* in, unsigned int* feed, unsigned int* post) { unsigned int bestdiff = ~0; unsigned int bestvco = 0; unsigned int fxtal = pll->ref_freq; unsigned int fwant; unsigned int p; DBG(__func__) fwant = freq; #ifdef DEBUG printk(KERN_ERR "post_shift_max: %d\n", pll->post_shift_max); printk(KERN_ERR "ref_freq: %d\n", pll->ref_freq); printk(KERN_ERR "freq: %d\n", freq); printk(KERN_ERR "vco_freq_min: %d\n", pll->vco_freq_min); printk(KERN_ERR "in_div_min: %d\n", pll->in_div_min); printk(KERN_ERR "in_div_max: %d\n", pll->in_div_max); printk(KERN_ERR "feed_div_min: %d\n", pll->feed_div_min); printk(KERN_ERR "feed_div_max: %d\n", pll->feed_div_max); printk(KERN_ERR "fmax: %d\n", fmax); #endif for (p = 1; p <= pll->post_shift_max; p++) { if (fwant * 2 > fmax) break; fwant *= 2; } if (fwant < pll->vco_freq_min) fwant = pll->vco_freq_min; if (fwant > fmax) fwant = fmax; for (; p-- > 0; fwant >>= 1, bestdiff >>= 1) { unsigned int m; if (fwant < pll->vco_freq_min) break; for (m = pll->in_div_min; m <= pll->in_div_max; m++) { unsigned int diff, fvco; unsigned int n; n = (fwant * (m + 1) + (fxtal >> 1)) / fxtal - 1; if (n > pll->feed_div_max) break; if (n < pll->feed_div_min) n = pll->feed_div_min; fvco = (fxtal * (n + 1)) / (m + 1); if (fvco < fwant) diff = fwant - fvco; else diff = fvco - fwant; if (diff < bestdiff) { bestdiff = diff; *post = p; *in = m; *feed = n; bestvco = fvco; } } } dprintk(KERN_ERR "clk: %02X %02X %02X %d %d %d\n", *in, *feed, *post, fxtal, bestvco, fwant); return bestvco; } int matroxfb_vgaHWinit(struct matrox_fb_info *minfo, struct my_timming *m) { unsigned int hd, hs, he, hbe, ht; unsigned int vd, vs, ve, vt, lc; unsigned int wd; unsigned int divider; int i; struct matrox_hw_state * const hw = &minfo->hw; DBG(__func__) hw->SEQ[0] = 0x00; hw->SEQ[1] = 0x01; /* or 0x09 */ hw->SEQ[2] = 0x0F; /* bitplanes */ hw->SEQ[3] = 0x00; hw->SEQ[4] = 0x0E; /* CRTC 0..7, 9, 16..19, 21, 22 are reprogrammed by Matrox Millennium code... Hope that by MGA1064 too */ if (m->dblscan) { m->VTotal <<= 1; m->VDisplay <<= 1; m->VSyncStart <<= 1; m->VSyncEnd <<= 1; } if (m->interlaced) { m->VTotal >>= 1; m->VDisplay >>= 1; m->VSyncStart >>= 1; m->VSyncEnd >>= 1; } /* GCTL is ignored when not using 0xA0000 aperture */ hw->GCTL[0] = 0x00; hw->GCTL[1] = 0x00; hw->GCTL[2] = 0x00; hw->GCTL[3] = 0x00; hw->GCTL[4] = 0x00; hw->GCTL[5] = 0x40; hw->GCTL[6] = 0x05; hw->GCTL[7] = 0x0F; hw->GCTL[8] = 0xFF; /* Whole ATTR is ignored in PowerGraphics mode */ for (i = 0; i < 16; i++) hw->ATTR[i] = i; hw->ATTR[16] = 0x41; hw->ATTR[17] = 0xFF; hw->ATTR[18] = 0x0F; hw->ATTR[19] = 0x00; hw->ATTR[20] = 0x00; hd = m->HDisplay >> 3; hs = m->HSyncStart >> 3; he = m->HSyncEnd >> 3; ht = m->HTotal >> 3; /* standard timmings are in 8pixels, but for interleaved we cannot */ /* do it for 4bpp (because of (4bpp >> 1(interleaved))/4 == 0) */ /* using 16 or more pixels per unit can save us */ divider = minfo->curr.final_bppShift; while (divider & 3) { hd >>= 1; hs >>= 1; he >>= 1; ht >>= 1; divider <<= 1; } divider = divider / 4; /* divider can be from 1 to 8 */ while (divider > 8) { hd <<= 1; hs <<= 1; he <<= 1; ht <<= 1; divider >>= 1; } hd = hd - 1; hs = hs - 1; he = he - 1; ht = ht - 1; vd = m->VDisplay - 1; vs = m->VSyncStart - 1; ve = m->VSyncEnd - 1; vt = m->VTotal - 2; lc = vd; /* G200 cannot work with (ht & 7) == 6 */ if (((ht & 0x07) == 0x06) || ((ht & 0x0F) == 0x04)) ht++; hbe = ht; wd = minfo->fbcon.var.xres_virtual * minfo->curr.final_bppShift / 64; hw->CRTCEXT[0] = 0; hw->CRTCEXT[5] = 0; if (m->interlaced) { hw->CRTCEXT[0] = 0x80; hw->CRTCEXT[5] = (hs + he - ht) >> 1; if (!m->dblscan) wd <<= 1; vt &= ~1; } hw->CRTCEXT[0] |= (wd & 0x300) >> 4; hw->CRTCEXT[1] = (((ht - 4) & 0x100) >> 8) | ((hd & 0x100) >> 7) | /* blanking */ ((hs & 0x100) >> 6) | /* sync start */ (hbe & 0x040); /* end hor. blanking */ /* FIXME: Enable vidrst only on G400, and only if TV-out is used */ if (minfo->outputs[1].src == MATROXFB_SRC_CRTC1) hw->CRTCEXT[1] |= 0x88; /* enable horizontal and vertical vidrst */ hw->CRTCEXT[2] = ((vt & 0xC00) >> 10) | ((vd & 0x400) >> 8) | /* disp end */ ((vd & 0xC00) >> 7) | /* vblanking start */ ((vs & 0xC00) >> 5) | ((lc & 0x400) >> 3); hw->CRTCEXT[3] = (divider - 1) | 0x80; hw->CRTCEXT[4] = 0; hw->CRTC[0] = ht-4; hw->CRTC[1] = hd; hw->CRTC[2] = hd; hw->CRTC[3] = (hbe & 0x1F) | 0x80; hw->CRTC[4] = hs; hw->CRTC[5] = ((hbe & 0x20) << 2) | (he & 0x1F); hw->CRTC[6] = vt & 0xFF; hw->CRTC[7] = ((vt & 0x100) >> 8) | ((vd & 0x100) >> 7) | ((vs & 0x100) >> 6) | ((vd & 0x100) >> 5) | ((lc & 0x100) >> 4) | ((vt & 0x200) >> 4) | ((vd & 0x200) >> 3) | ((vs & 0x200) >> 2); hw->CRTC[8] = 0x00; hw->CRTC[9] = ((vd & 0x200) >> 4) | ((lc & 0x200) >> 3); if (m->dblscan && !m->interlaced) hw->CRTC[9] |= 0x80; for (i = 10; i < 16; i++) hw->CRTC[i] = 0x00; hw->CRTC[16] = vs /* & 0xFF */; hw->CRTC[17] = (ve & 0x0F) | 0x20; hw->CRTC[18] = vd /* & 0xFF */; hw->CRTC[19] = wd /* & 0xFF */; hw->CRTC[20] = 0x00; hw->CRTC[21] = vd /* & 0xFF */; hw->CRTC[22] = (vt + 1) /* & 0xFF */; hw->CRTC[23] = 0xC3; hw->CRTC[24] = lc; return 0; }; void matroxfb_vgaHWrestore(struct matrox_fb_info *minfo) { int i; struct matrox_hw_state * const hw = &minfo->hw; CRITFLAGS DBG(__func__) dprintk(KERN_INFO "MiscOutReg: %02X\n", hw->MiscOutReg); dprintk(KERN_INFO "SEQ regs: "); for (i = 0; i < 5; i++) dprintk("%02X:", hw->SEQ[i]); dprintk("\n"); dprintk(KERN_INFO "GDC regs: "); for (i = 0; i < 9; i++) dprintk("%02X:", hw->GCTL[i]); dprintk("\n"); dprintk(KERN_INFO "CRTC regs: "); for (i = 0; i < 25; i++) dprintk("%02X:", hw->CRTC[i]); dprintk("\n"); dprintk(KERN_INFO "ATTR regs: "); for (i = 0; i < 21; i++) dprintk("%02X:", hw->ATTR[i]); dprintk("\n"); CRITBEGIN mga_inb(M_ATTR_RESET); mga_outb(M_ATTR_INDEX, 0); mga_outb(M_MISC_REG, hw->MiscOutReg); for (i = 1; i < 5; i++) mga_setr(M_SEQ_INDEX, i, hw->SEQ[i]); mga_setr(M_CRTC_INDEX, 17, hw->CRTC[17] & 0x7F); for (i = 0; i < 25; i++) mga_setr(M_CRTC_INDEX, i, hw->CRTC[i]); for (i = 0; i < 9; i++) mga_setr(M_GRAPHICS_INDEX, i, hw->GCTL[i]); for (i = 0; i < 21; i++) { mga_inb(M_ATTR_RESET); mga_outb(M_ATTR_INDEX, i); mga_outb(M_ATTR_INDEX, hw->ATTR[i]); } mga_outb(M_PALETTE_MASK, 0xFF); mga_outb(M_DAC_REG, 0x00); for (i = 0; i < 768; i++) mga_outb(M_DAC_VAL, hw->DACpal[i]); mga_inb(M_ATTR_RESET); mga_outb(M_ATTR_INDEX, 0x20); CRITEND } static void get_pins(unsigned char __iomem* pins, struct matrox_bios* bd) { unsigned int b0 = readb(pins); if (b0 == 0x2E && readb(pins+1) == 0x41) { unsigned int pins_len = readb(pins+2); unsigned int i; unsigned char cksum; unsigned char* dst = bd->pins; if (pins_len < 3 || pins_len > 128) { return; } *dst++ = 0x2E; *dst++ = 0x41; *dst++ = pins_len; cksum = 0x2E + 0x41 + pins_len; for (i = 3; i < pins_len; i++) { cksum += *dst++ = readb(pins+i); } if (cksum) { return; } bd->pins_len = pins_len; } else if (b0 == 0x40 && readb(pins+1) == 0x00) { unsigned int i; unsigned char* dst = bd->pins; *dst++ = 0x40; *dst++ = 0; for (i = 2; i < 0x40; i++) { *dst++ = readb(pins+i); } bd->pins_len = 0x40; } } static void get_bios_version(unsigned char __iomem * vbios, struct matrox_bios* bd) { unsigned int pcir_offset; pcir_offset = readb(vbios + 24) | (readb(vbios + 25) << 8); if (pcir_offset >= 26 && pcir_offset < 0xFFE0 && readb(vbios + pcir_offset ) == 'P' && readb(vbios + pcir_offset + 1) == 'C' && readb(vbios + pcir_offset + 2) == 'I' && readb(vbios + pcir_offset + 3) == 'R') { unsigned char h; h = readb(vbios + pcir_offset + 0x12); bd->version.vMaj = (h >> 4) & 0xF; bd->version.vMin = h & 0xF; bd->version.vRev = readb(vbios + pcir_offset + 0x13); } else { unsigned char h; h = readb(vbios + 5); bd->version.vMaj = (h >> 4) & 0xF; bd->version.vMin = h & 0xF; bd->version.vRev = 0; } } static void get_bios_output(unsigned char __iomem* vbios, struct matrox_bios* bd) { unsigned char b; b = readb(vbios + 0x7FF1); if (b == 0xFF) { b = 0; } bd->output.state = b; } static void get_bios_tvout(unsigned char __iomem* vbios, struct matrox_bios* bd) { unsigned int i; /* Check for 'IBM .*(V....TVO' string - it means TVO BIOS */ bd->output.tvout = 0; if (readb(vbios + 0x1D) != 'I' || readb(vbios + 0x1E) != 'B' || readb(vbios + 0x1F) != 'M' || readb(vbios + 0x20) != ' ') { return; } for (i = 0x2D; i < 0x2D + 128; i++) { unsigned char b = readb(vbios + i); if (b == '(' && readb(vbios + i + 1) == 'V') { if (readb(vbios + i + 6) == 'T' && readb(vbios + i + 7) == 'V' && readb(vbios + i + 8) == 'O') { bd->output.tvout = 1; } return; } if (b == 0) break; } } static void parse_bios(unsigned char __iomem* vbios, struct matrox_bios* bd) { unsigned int pins_offset; if (readb(vbios) != 0x55 || readb(vbios + 1) != 0xAA) { return; } bd->bios_valid = 1; get_bios_version(vbios, bd); get_bios_output(vbios, bd); get_bios_tvout(vbios, bd); #if defined(__powerpc__) /* On PowerPC cards, the PInS offset isn't stored at the end of the * BIOS image. Instead, you must search the entire BIOS image for * the magic PInS signature. * * This actually applies to all OpenFirmware base cards. Since these * cards could be put in a MIPS or SPARC system, should the condition * be something different? */ for ( pins_offset = 0 ; pins_offset <= 0xFF80 ; pins_offset++ ) { unsigned char header[3]; header[0] = readb(vbios + pins_offset); header[1] = readb(vbios + pins_offset + 1); header[2] = readb(vbios + pins_offset + 2); if ( (header[0] == 0x2E) && (header[1] == 0x41) && ((header[2] == 0x40) || (header[2] == 0x80)) ) { printk(KERN_INFO "PInS data found at offset %u\n", pins_offset); get_pins(vbios + pins_offset, bd); break; } } #else pins_offset = readb(vbios + 0x7FFC) | (readb(vbios + 0x7FFD) << 8); if (pins_offset <= 0xFF80) { get_pins(vbios + pins_offset, bd); } #endif } static int parse_pins1(struct matrox_fb_info *minfo, const struct matrox_bios *bd) { unsigned int maxdac; switch (bd->pins[22]) { case 0: maxdac = 175000; break; case 1: maxdac = 220000; break; default: maxdac = 240000; break; } if (get_unaligned_le16(bd->pins + 24)) { maxdac = get_unaligned_le16(bd->pins + 24) * 10; } minfo->limits.pixel.vcomax = maxdac; minfo->values.pll.system = get_unaligned_le16(bd->pins + 28) ? get_unaligned_le16(bd->pins + 28) * 10 : 50000; /* ignore 4MB, 8MB, module clocks */ minfo->features.pll.ref_freq = 14318; minfo->values.reg.mctlwtst = 0x00030101; return 0; } static void default_pins1(struct matrox_fb_info *minfo) { /* Millennium */ minfo->limits.pixel.vcomax = 220000; minfo->values.pll.system = 50000; minfo->features.pll.ref_freq = 14318; minfo->values.reg.mctlwtst = 0x00030101; } static int parse_pins2(struct matrox_fb_info *minfo, const struct matrox_bios *bd) { minfo->limits.pixel.vcomax = minfo->limits.system.vcomax = (bd->pins[41] == 0xFF) ? 230000 : ((bd->pins[41] + 100) * 1000); minfo->values.reg.mctlwtst = ((bd->pins[51] & 0x01) ? 0x00000001 : 0) | ((bd->pins[51] & 0x02) ? 0x00000100 : 0) | ((bd->pins[51] & 0x04) ? 0x00010000 : 0) | ((bd->pins[51] & 0x08) ? 0x00020000 : 0); minfo->values.pll.system = (bd->pins[43] == 0xFF) ? 50000 : ((bd->pins[43] + 100) * 1000); minfo->features.pll.ref_freq = 14318; return 0; } static void default_pins2(struct matrox_fb_info *minfo) { /* Millennium II, Mystique */ minfo->limits.pixel.vcomax = minfo->limits.system.vcomax = 230000; minfo->values.reg.mctlwtst = 0x00030101; minfo->values.pll.system = 50000; minfo->features.pll.ref_freq = 14318; } static int parse_pins3(struct matrox_fb_info *minfo, const struct matrox_bios *bd) { minfo->limits.pixel.vcomax = minfo->limits.system.vcomax = (bd->pins[36] == 0xFF) ? 230000 : ((bd->pins[36] + 100) * 1000); minfo->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 48) == 0xFFFFFFFF ? 0x01250A21 : get_unaligned_le32(bd->pins + 48); /* memory config */ minfo->values.reg.memrdbk = ((bd->pins[57] << 21) & 0x1E000000) | ((bd->pins[57] << 22) & 0x00C00000) | ((bd->pins[56] << 1) & 0x000001E0) | ( bd->pins[56] & 0x0000000F); minfo->values.reg.opt = (bd->pins[54] & 7) << 10; minfo->values.reg.opt2 = bd->pins[58] << 12; minfo->features.pll.ref_freq = (bd->pins[52] & 0x20) ? 14318 : 27000; return 0; } static void default_pins3(struct matrox_fb_info *minfo) { /* G100, G200 */ minfo->limits.pixel.vcomax = minfo->limits.system.vcomax = 230000; minfo->values.reg.mctlwtst = 0x01250A21; minfo->values.reg.memrdbk = 0x00000000; minfo->values.reg.opt = 0x00000C00; minfo->values.reg.opt2 = 0x00000000; minfo->features.pll.ref_freq = 27000; } static int parse_pins4(struct matrox_fb_info *minfo, const struct matrox_bios *bd) { minfo->limits.pixel.vcomax = (bd->pins[ 39] == 0xFF) ? 230000 : bd->pins[ 39] * 4000; minfo->limits.system.vcomax = (bd->pins[ 38] == 0xFF) ? minfo->limits.pixel.vcomax : bd->pins[ 38] * 4000; minfo->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 71); minfo->values.reg.memrdbk = ((bd->pins[87] << 21) & 0x1E000000) | ((bd->pins[87] << 22) & 0x00C00000) | ((bd->pins[86] << 1) & 0x000001E0) | ( bd->pins[86] & 0x0000000F); minfo->values.reg.opt = ((bd->pins[53] << 15) & 0x00400000) | ((bd->pins[53] << 22) & 0x10000000) | ((bd->pins[53] << 7) & 0x00001C00); minfo->values.reg.opt3 = get_unaligned_le32(bd->pins + 67); minfo->values.pll.system = (bd->pins[ 65] == 0xFF) ? 200000 : bd->pins[ 65] * 4000; minfo->features.pll.ref_freq = (bd->pins[ 92] & 0x01) ? 14318 : 27000; return 0; } static void default_pins4(struct matrox_fb_info *minfo) { /* G400 */ minfo->limits.pixel.vcomax = minfo->limits.system.vcomax = 252000; minfo->values.reg.mctlwtst = 0x04A450A1; minfo->values.reg.memrdbk = 0x000000E7; minfo->values.reg.opt = 0x10000400; minfo->values.reg.opt3 = 0x0190A419; minfo->values.pll.system = 200000; minfo->features.pll.ref_freq = 27000; } static int parse_pins5(struct matrox_fb_info *minfo, const struct matrox_bios *bd) { unsigned int mult; mult = bd->pins[4]?8000:6000; minfo->limits.pixel.vcomax = (bd->pins[ 38] == 0xFF) ? 600000 : bd->pins[ 38] * mult; minfo->limits.system.vcomax = (bd->pins[ 36] == 0xFF) ? minfo->limits.pixel.vcomax : bd->pins[ 36] * mult; minfo->limits.video.vcomax = (bd->pins[ 37] == 0xFF) ? minfo->limits.system.vcomax : bd->pins[ 37] * mult; minfo->limits.pixel.vcomin = (bd->pins[123] == 0xFF) ? 256000 : bd->pins[123] * mult; minfo->limits.system.vcomin = (bd->pins[121] == 0xFF) ? minfo->limits.pixel.vcomin : bd->pins[121] * mult; minfo->limits.video.vcomin = (bd->pins[122] == 0xFF) ? minfo->limits.system.vcomin : bd->pins[122] * mult; minfo->values.pll.system = minfo->values.pll.video = (bd->pins[ 92] == 0xFF) ? 284000 : bd->pins[ 92] * 4000; minfo->values.reg.opt = get_unaligned_le32(bd->pins + 48); minfo->values.reg.opt2 = get_unaligned_le32(bd->pins + 52); minfo->values.reg.opt3 = get_unaligned_le32(bd->pins + 94); minfo->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 98); minfo->values.reg.memmisc = get_unaligned_le32(bd->pins + 102); minfo->values.reg.memrdbk = get_unaligned_le32(bd->pins + 106); minfo->features.pll.ref_freq = (bd->pins[110] & 0x01) ? 14318 : 27000; minfo->values.memory.ddr = (bd->pins[114] & 0x60) == 0x20; minfo->values.memory.dll = (bd->pins[115] & 0x02) != 0; minfo->values.memory.emrswen = (bd->pins[115] & 0x01) != 0; minfo->values.reg.maccess = minfo->values.memory.emrswen ? 0x00004000 : 0x00000000; if (bd->pins[115] & 4) { minfo->values.reg.mctlwtst_core = minfo->values.reg.mctlwtst; } else { u_int32_t wtst_xlat[] = { 0, 1, 5, 6, 7, 5, 2, 3 }; minfo->values.reg.mctlwtst_core = (minfo->values.reg.mctlwtst & ~7) | wtst_xlat[minfo->values.reg.mctlwtst & 7]; } minfo->max_pixel_clock_panellink = bd->pins[47] * 4000; return 0; } static void default_pins5(struct matrox_fb_info *minfo) { /* Mine 16MB G450 with SDRAM DDR */ minfo->limits.pixel.vcomax = minfo->limits.system.vcomax = minfo->limits.video.vcomax = 600000; minfo->limits.pixel.vcomin = minfo->limits.system.vcomin = minfo->limits.video.vcomin = 256000; minfo->values.pll.system = minfo->values.pll.video = 284000; minfo->values.reg.opt = 0x404A1160; minfo->values.reg.opt2 = 0x0000AC00; minfo->values.reg.opt3 = 0x0090A409; minfo->values.reg.mctlwtst_core = minfo->values.reg.mctlwtst = 0x0C81462B; minfo->values.reg.memmisc = 0x80000004; minfo->values.reg.memrdbk = 0x01001103; minfo->features.pll.ref_freq = 27000; minfo->values.memory.ddr = 1; minfo->values.memory.dll = 1; minfo->values.memory.emrswen = 1; minfo->values.reg.maccess = 0x00004000; } static int matroxfb_set_limits(struct matrox_fb_info *minfo, const struct matrox_bios *bd) { unsigned int pins_version; static const unsigned int pinslen[] = { 64, 64, 64, 128, 128 }; switch (minfo->chip) { case MGA_2064: default_pins1(minfo); break; case MGA_2164: case MGA_1064: case MGA_1164: default_pins2(minfo); break; case MGA_G100: case MGA_G200: default_pins3(minfo); break; case MGA_G400: default_pins4(minfo); break; case MGA_G450: case MGA_G550: default_pins5(minfo); break; } if (!bd->bios_valid) { printk(KERN_INFO "matroxfb: Your Matrox device does not have BIOS\n"); return -1; } if (bd->pins_len < 64) { printk(KERN_INFO "matroxfb: BIOS on your Matrox device does not contain powerup info\n"); return -1; } if (bd->pins[0] == 0x2E && bd->pins[1] == 0x41) { pins_version = bd->pins[5]; if (pins_version < 2 || pins_version > 5) { printk(KERN_INFO "matroxfb: Unknown version (%u) of powerup info\n", pins_version); return -1; } } else { pins_version = 1; } if (bd->pins_len != pinslen[pins_version - 1]) { printk(KERN_INFO "matroxfb: Invalid powerup info\n"); return -1; } switch (pins_version) { case 1: return parse_pins1(minfo, bd); case 2: return parse_pins2(minfo, bd); case 3: return parse_pins3(minfo, bd); case 4: return parse_pins4(minfo, bd); case 5: return parse_pins5(minfo, bd); default: printk(KERN_DEBUG "matroxfb: Powerup info version %u is not yet supported\n", pins_version); return -1; } } void matroxfb_read_pins(struct matrox_fb_info *minfo) { u32 opt; u32 biosbase; u32 fbbase; struct pci_dev *pdev = minfo->pcidev; memset(&minfo->bios, 0, sizeof(minfo->bios)); pci_read_config_dword(pdev, PCI_OPTION_REG, &opt); pci_write_config_dword(pdev, PCI_OPTION_REG, opt | PCI_OPTION_ENABLE_ROM); pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &biosbase); pci_read_config_dword(pdev, minfo->devflags.fbResource, &fbbase); pci_write_config_dword(pdev, PCI_ROM_ADDRESS, (fbbase & PCI_ROM_ADDRESS_MASK) | PCI_ROM_ADDRESS_ENABLE); parse_bios(vaddr_va(minfo->video.vbase), &minfo->bios); pci_write_config_dword(pdev, PCI_ROM_ADDRESS, biosbase); pci_write_config_dword(pdev, PCI_OPTION_REG, opt); #ifdef CONFIG_X86 if (!minfo->bios.bios_valid) { unsigned char __iomem* b; b = ioremap(0x000C0000, 65536); if (!b) { printk(KERN_INFO "matroxfb: Unable to map legacy BIOS\n"); } else { unsigned int ven = readb(b+0x64+0) | (readb(b+0x64+1) << 8); unsigned int dev = readb(b+0x64+2) | (readb(b+0x64+3) << 8); if (ven != pdev->vendor || dev != pdev->device) { printk(KERN_INFO "matroxfb: Legacy BIOS is for %04X:%04X, while this device is %04X:%04X\n", ven, dev, pdev->vendor, pdev->device); } else { parse_bios(b, &minfo->bios); } iounmap(b); } } #endif matroxfb_set_limits(minfo, &minfo->bios); printk(KERN_INFO "PInS memtype = %u\n", (minfo->values.reg.opt & 0x1C00) >> 10); } EXPORT_SYMBOL(matroxfb_DAC_in); EXPORT_SYMBOL(matroxfb_DAC_out); EXPORT_SYMBOL(matroxfb_var2my); EXPORT_SYMBOL(matroxfb_PLL_calcclock); EXPORT_SYMBOL(matroxfb_vgaHWinit); /* DAC1064, Ti3026 */ EXPORT_SYMBOL(matroxfb_vgaHWrestore); /* DAC1064, Ti3026 */ EXPORT_SYMBOL(matroxfb_read_pins); MODULE_AUTHOR("(c) 1999-2002 Petr Vandrovec <vandrove@vc.cvut.cz>"); MODULE_DESCRIPTION("Miscellaneous support for Matrox video cards"); MODULE_LICENSE("GPL");
gpl-2.0
miaoxie/linux-btrfs
kernel/resource.c
242
37510
/* * linux/kernel/resource.c * * Copyright (C) 1999 Linus Torvalds * Copyright (C) 1999 Martin Mares <mj@ucw.cz> * * Arbitrary resource management. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/export.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/device.h> #include <linux/pfn.h> #include <linux/mm.h> #include <asm/io.h> struct resource ioport_resource = { .name = "PCI IO", .start = 0, .end = IO_SPACE_LIMIT, .flags = IORESOURCE_IO, }; EXPORT_SYMBOL(ioport_resource); struct resource iomem_resource = { .name = "PCI mem", .start = 0, .end = -1, .flags = IORESOURCE_MEM, }; EXPORT_SYMBOL(iomem_resource); /* constraints to be met while allocating resources */ struct resource_constraint { resource_size_t min, max, align; resource_size_t (*alignf)(void *, const struct resource *, resource_size_t, resource_size_t); void *alignf_data; }; static DEFINE_RWLOCK(resource_lock); /* * For memory hotplug, there is no way to free resource entries allocated * by boot mem after the system is up. So for reusing the resource entry * we need to remember the resource. */ static struct resource *bootmem_resource_free; static DEFINE_SPINLOCK(bootmem_resource_lock); static struct resource *next_resource(struct resource *p, bool sibling_only) { /* Caller wants to traverse through siblings only */ if (sibling_only) return p->sibling; if (p->child) return p->child; while (!p->sibling && p->parent) p = p->parent; return p->sibling; } static void *r_next(struct seq_file *m, void *v, loff_t *pos) { struct resource *p = v; (*pos)++; return (void *)next_resource(p, false); } #ifdef CONFIG_PROC_FS enum { MAX_IORES_LEVEL = 5 }; static void *r_start(struct seq_file *m, loff_t *pos) __acquires(resource_lock) { struct resource *p = m->private; loff_t l = 0; read_lock(&resource_lock); for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) ; return p; } static void r_stop(struct seq_file *m, void *v) __releases(resource_lock) { read_unlock(&resource_lock); } static int r_show(struct seq_file *m, void *v) { struct resource *root = m->private; struct resource *r = v, *p; int width = root->end < 0x10000 ? 4 : 8; int depth; for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) if (p->parent == root) break; seq_printf(m, "%*s%0*llx-%0*llx : %s\n", depth * 2, "", width, (unsigned long long) r->start, width, (unsigned long long) r->end, r->name ? r->name : "<BAD>"); return 0; } static const struct seq_operations resource_op = { .start = r_start, .next = r_next, .stop = r_stop, .show = r_show, }; static int ioports_open(struct inode *inode, struct file *file) { int res = seq_open(file, &resource_op); if (!res) { struct seq_file *m = file->private_data; m->private = &ioport_resource; } return res; } static int iomem_open(struct inode *inode, struct file *file) { int res = seq_open(file, &resource_op); if (!res) { struct seq_file *m = file->private_data; m->private = &iomem_resource; } return res; } static const struct file_operations proc_ioports_operations = { .open = ioports_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct file_operations proc_iomem_operations = { .open = iomem_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init ioresources_init(void) { proc_create("ioports", 0, NULL, &proc_ioports_operations); proc_create("iomem", 0, NULL, &proc_iomem_operations); return 0; } __initcall(ioresources_init); #endif /* CONFIG_PROC_FS */ static void free_resource(struct resource *res) { if (!res) return; if (!PageSlab(virt_to_head_page(res))) { spin_lock(&bootmem_resource_lock); res->sibling = bootmem_resource_free; bootmem_resource_free = res; spin_unlock(&bootmem_resource_lock); } else { kfree(res); } } static struct resource *alloc_resource(gfp_t flags) { struct resource *res = NULL; spin_lock(&bootmem_resource_lock); if (bootmem_resource_free) { res = bootmem_resource_free; bootmem_resource_free = res->sibling; } spin_unlock(&bootmem_resource_lock); if (res) memset(res, 0, sizeof(struct resource)); else res = kzalloc(sizeof(struct resource), flags); return res; } /* Return the conflict entry if you can't request it */ static struct resource * __request_resource(struct resource *root, struct resource *new) { resource_size_t start = new->start; resource_size_t end = new->end; struct resource *tmp, **p; if (end < start) return root; if (start < root->start) return root; if (end > root->end) return root; p = &root->child; for (;;) { tmp = *p; if (!tmp || tmp->start > end) { new->sibling = tmp; *p = new; new->parent = root; return NULL; } p = &tmp->sibling; if (tmp->end < start) continue; return tmp; } } static int __release_resource(struct resource *old) { struct resource *tmp, **p; p = &old->parent->child; for (;;) { tmp = *p; if (!tmp) break; if (tmp == old) { *p = tmp->sibling; old->parent = NULL; return 0; } p = &tmp->sibling; } return -EINVAL; } static void __release_child_resources(struct resource *r) { struct resource *tmp, *p; resource_size_t size; p = r->child; r->child = NULL; while (p) { tmp = p; p = p->sibling; tmp->parent = NULL; tmp->sibling = NULL; __release_child_resources(tmp); printk(KERN_DEBUG "release child resource %pR\n", tmp); /* need to restore size, and keep flags */ size = resource_size(tmp); tmp->start = 0; tmp->end = size - 1; } } void release_child_resources(struct resource *r) { write_lock(&resource_lock); __release_child_resources(r); write_unlock(&resource_lock); } /** * request_resource_conflict - request and reserve an I/O or memory resource * @root: root resource descriptor * @new: resource descriptor desired by caller * * Returns 0 for success, conflict resource on error. */ struct resource *request_resource_conflict(struct resource *root, struct resource *new) { struct resource *conflict; write_lock(&resource_lock); conflict = __request_resource(root, new); write_unlock(&resource_lock); return conflict; } /** * request_resource - request and reserve an I/O or memory resource * @root: root resource descriptor * @new: resource descriptor desired by caller * * Returns 0 for success, negative error code on error. */ int request_resource(struct resource *root, struct resource *new) { struct resource *conflict; conflict = request_resource_conflict(root, new); return conflict ? -EBUSY : 0; } EXPORT_SYMBOL(request_resource); /** * release_resource - release a previously reserved resource * @old: resource pointer */ int release_resource(struct resource *old) { int retval; write_lock(&resource_lock); retval = __release_resource(old); write_unlock(&resource_lock); return retval; } EXPORT_SYMBOL(release_resource); /* * Finds the lowest iomem reosurce exists with-in [res->start.res->end) * the caller must specify res->start, res->end, res->flags and "name". * If found, returns 0, res is overwritten, if not found, returns -1. * This walks through whole tree and not just first level children * until and unless first_level_children_only is true. */ static int find_next_iomem_res(struct resource *res, char *name, bool first_level_children_only) { resource_size_t start, end; struct resource *p; bool sibling_only = false; BUG_ON(!res); start = res->start; end = res->end; BUG_ON(start >= end); if (first_level_children_only) sibling_only = true; read_lock(&resource_lock); for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) { if (p->flags != res->flags) continue; if (name && strcmp(p->name, name)) continue; if (p->start > end) { p = NULL; break; } if ((p->end >= start) && (p->start < end)) break; } read_unlock(&resource_lock); if (!p) return -1; /* copy data */ if (res->start < p->start) res->start = p->start; if (res->end > p->end) res->end = p->end; return 0; } /* * Walks through iomem resources and calls func() with matching resource * ranges. This walks through whole tree and not just first level children. * All the memory ranges which overlap start,end and also match flags and * name are valid candidates. * * @name: name of resource * @flags: resource flags * @start: start addr * @end: end addr */ int walk_iomem_res(char *name, unsigned long flags, u64 start, u64 end, void *arg, int (*func)(u64, u64, void *)) { struct resource res; u64 orig_end; int ret = -1; res.start = start; res.end = end; res.flags = flags; orig_end = res.end; while ((res.start < res.end) && (!find_next_iomem_res(&res, name, false))) { ret = (*func)(res.start, res.end, arg); if (ret) break; res.start = res.end + 1; res.end = orig_end; } return ret; } /* * This function calls callback against all memory range of "System RAM" * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY. * Now, this function is only for "System RAM". This function deals with * full ranges and not pfn. If resources are not pfn aligned, dealing * with pfn can truncate ranges. */ int walk_system_ram_res(u64 start, u64 end, void *arg, int (*func)(u64, u64, void *)) { struct resource res; u64 orig_end; int ret = -1; res.start = start; res.end = end; res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; orig_end = res.end; while ((res.start < res.end) && (!find_next_iomem_res(&res, "System RAM", true))) { ret = (*func)(res.start, res.end, arg); if (ret) break; res.start = res.end + 1; res.end = orig_end; } return ret; } #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY) /* * This function calls callback against all memory range of "System RAM" * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY. * Now, this function is only for "System RAM". */ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, void *arg, int (*func)(unsigned long, unsigned long, void *)) { struct resource res; unsigned long pfn, end_pfn; u64 orig_end; int ret = -1; res.start = (u64) start_pfn << PAGE_SHIFT; res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; orig_end = res.end; while ((res.start < res.end) && (find_next_iomem_res(&res, "System RAM", true) >= 0)) { pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT; end_pfn = (res.end + 1) >> PAGE_SHIFT; if (end_pfn > pfn) ret = (*func)(pfn, end_pfn - pfn, arg); if (ret) break; res.start = res.end + 1; res.end = orig_end; } return ret; } #endif static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) { return 1; } /* * This generic page_is_ram() returns true if specified address is * registered as "System RAM" in iomem_resource list. */ int __weak page_is_ram(unsigned long pfn) { return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; } EXPORT_SYMBOL_GPL(page_is_ram); /* * Search for a resouce entry that fully contains the specified region. * If found, return 1 if it is RAM, 0 if not. * If not found, or region is not fully contained, return -1 * * Used by the ioremap functions to ensure the user is not remapping RAM and is * a vast speed up over walking through the resource table page by page. */ int region_is_ram(resource_size_t start, unsigned long size) { struct resource *p; resource_size_t end = start + size - 1; int flags = IORESOURCE_MEM | IORESOURCE_BUSY; const char *name = "System RAM"; int ret = -1; read_lock(&resource_lock); for (p = iomem_resource.child; p ; p = p->sibling) { if (end < p->start) continue; if (p->start <= start && end <= p->end) { /* resource fully contains region */ if ((p->flags != flags) || strcmp(p->name, name)) ret = 0; else ret = 1; break; } if (p->end < start) break; /* not found */ } read_unlock(&resource_lock); return ret; } void __weak arch_remove_reservations(struct resource *avail) { } static resource_size_t simple_align_resource(void *data, const struct resource *avail, resource_size_t size, resource_size_t align) { return avail->start; } static void resource_clip(struct resource *res, resource_size_t min, resource_size_t max) { if (res->start < min) res->start = min; if (res->end > max) res->end = max; } /* * Find empty slot in the resource tree with the given range and * alignment constraints */ static int __find_resource(struct resource *root, struct resource *old, struct resource *new, resource_size_t size, struct resource_constraint *constraint) { struct resource *this = root->child; struct resource tmp = *new, avail, alloc; tmp.start = root->start; /* * Skip past an allocated resource that starts at 0, since the assignment * of this->start - 1 to tmp->end below would cause an underflow. */ if (this && this->start == root->start) { tmp.start = (this == old) ? old->start : this->end + 1; this = this->sibling; } for(;;) { if (this) tmp.end = (this == old) ? this->end : this->start - 1; else tmp.end = root->end; if (tmp.end < tmp.start) goto next; resource_clip(&tmp, constraint->min, constraint->max); arch_remove_reservations(&tmp); /* Check for overflow after ALIGN() */ avail.start = ALIGN(tmp.start, constraint->align); avail.end = tmp.end; avail.flags = new->flags & ~IORESOURCE_UNSET; if (avail.start >= tmp.start) { alloc.flags = avail.flags; alloc.start = constraint->alignf(constraint->alignf_data, &avail, size, constraint->align); alloc.end = alloc.start + size - 1; if (resource_contains(&avail, &alloc)) { new->start = alloc.start; new->end = alloc.end; return 0; } } next: if (!this || this->end == root->end) break; if (this != old) tmp.start = this->end + 1; this = this->sibling; } return -EBUSY; } /* * Find empty slot in the resource tree given range and alignment. */ static int find_resource(struct resource *root, struct resource *new, resource_size_t size, struct resource_constraint *constraint) { return __find_resource(root, NULL, new, size, constraint); } /** * reallocate_resource - allocate a slot in the resource tree given range & alignment. * The resource will be relocated if the new size cannot be reallocated in the * current location. * * @root: root resource descriptor * @old: resource descriptor desired by caller * @newsize: new size of the resource descriptor * @constraint: the size and alignment constraints to be met. */ static int reallocate_resource(struct resource *root, struct resource *old, resource_size_t newsize, struct resource_constraint *constraint) { int err=0; struct resource new = *old; struct resource *conflict; write_lock(&resource_lock); if ((err = __find_resource(root, old, &new, newsize, constraint))) goto out; if (resource_contains(&new, old)) { old->start = new.start; old->end = new.end; goto out; } if (old->child) { err = -EBUSY; goto out; } if (resource_contains(old, &new)) { old->start = new.start; old->end = new.end; } else { __release_resource(old); *old = new; conflict = __request_resource(root, old); BUG_ON(conflict); } out: write_unlock(&resource_lock); return err; } /** * allocate_resource - allocate empty slot in the resource tree given range & alignment. * The resource will be reallocated with a new size if it was already allocated * @root: root resource descriptor * @new: resource descriptor desired by caller * @size: requested resource region size * @min: minimum boundary to allocate * @max: maximum boundary to allocate * @align: alignment requested, in bytes * @alignf: alignment function, optional, called if not NULL * @alignf_data: arbitrary data to pass to the @alignf function */ int allocate_resource(struct resource *root, struct resource *new, resource_size_t size, resource_size_t min, resource_size_t max, resource_size_t align, resource_size_t (*alignf)(void *, const struct resource *, resource_size_t, resource_size_t), void *alignf_data) { int err; struct resource_constraint constraint; if (!alignf) alignf = simple_align_resource; constraint.min = min; constraint.max = max; constraint.align = align; constraint.alignf = alignf; constraint.alignf_data = alignf_data; if ( new->parent ) { /* resource is already allocated, try reallocating with the new constraints */ return reallocate_resource(root, new, size, &constraint); } write_lock(&resource_lock); err = find_resource(root, new, size, &constraint); if (err >= 0 && __request_resource(root, new)) err = -EBUSY; write_unlock(&resource_lock); return err; } EXPORT_SYMBOL(allocate_resource); /** * lookup_resource - find an existing resource by a resource start address * @root: root resource descriptor * @start: resource start address * * Returns a pointer to the resource if found, NULL otherwise */ struct resource *lookup_resource(struct resource *root, resource_size_t start) { struct resource *res; read_lock(&resource_lock); for (res = root->child; res; res = res->sibling) { if (res->start == start) break; } read_unlock(&resource_lock); return res; } /* * Insert a resource into the resource tree. If successful, return NULL, * otherwise return the conflicting resource (compare to __request_resource()) */ static struct resource * __insert_resource(struct resource *parent, struct resource *new) { struct resource *first, *next; for (;; parent = first) { first = __request_resource(parent, new); if (!first) return first; if (first == parent) return first; if (WARN_ON(first == new)) /* duplicated insertion */ return first; if ((first->start > new->start) || (first->end < new->end)) break; if ((first->start == new->start) && (first->end == new->end)) break; } for (next = first; ; next = next->sibling) { /* Partial overlap? Bad, and unfixable */ if (next->start < new->start || next->end > new->end) return next; if (!next->sibling) break; if (next->sibling->start > new->end) break; } new->parent = parent; new->sibling = next->sibling; new->child = first; next->sibling = NULL; for (next = first; next; next = next->sibling) next->parent = new; if (parent->child == first) { parent->child = new; } else { next = parent->child; while (next->sibling != first) next = next->sibling; next->sibling = new; } return NULL; } /** * insert_resource_conflict - Inserts resource in the resource tree * @parent: parent of the new resource * @new: new resource to insert * * Returns 0 on success, conflict resource if the resource can't be inserted. * * This function is equivalent to request_resource_conflict when no conflict * happens. If a conflict happens, and the conflicting resources * entirely fit within the range of the new resource, then the new * resource is inserted and the conflicting resources become children of * the new resource. */ struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) { struct resource *conflict; write_lock(&resource_lock); conflict = __insert_resource(parent, new); write_unlock(&resource_lock); return conflict; } /** * insert_resource - Inserts a resource in the resource tree * @parent: parent of the new resource * @new: new resource to insert * * Returns 0 on success, -EBUSY if the resource can't be inserted. */ int insert_resource(struct resource *parent, struct resource *new) { struct resource *conflict; conflict = insert_resource_conflict(parent, new); return conflict ? -EBUSY : 0; } /** * insert_resource_expand_to_fit - Insert a resource into the resource tree * @root: root resource descriptor * @new: new resource to insert * * Insert a resource into the resource tree, possibly expanding it in order * to make it encompass any conflicting resources. */ void insert_resource_expand_to_fit(struct resource *root, struct resource *new) { if (new->parent) return; write_lock(&resource_lock); for (;;) { struct resource *conflict; conflict = __insert_resource(root, new); if (!conflict) break; if (conflict == root) break; /* Ok, expand resource to cover the conflict, then try again .. */ if (conflict->start < new->start) new->start = conflict->start; if (conflict->end > new->end) new->end = conflict->end; printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); } write_unlock(&resource_lock); } static int __adjust_resource(struct resource *res, resource_size_t start, resource_size_t size) { struct resource *tmp, *parent = res->parent; resource_size_t end = start + size - 1; int result = -EBUSY; if (!parent) goto skip; if ((start < parent->start) || (end > parent->end)) goto out; if (res->sibling && (res->sibling->start <= end)) goto out; tmp = parent->child; if (tmp != res) { while (tmp->sibling != res) tmp = tmp->sibling; if (start <= tmp->end) goto out; } skip: for (tmp = res->child; tmp; tmp = tmp->sibling) if ((tmp->start < start) || (tmp->end > end)) goto out; res->start = start; res->end = end; result = 0; out: return result; } /** * adjust_resource - modify a resource's start and size * @res: resource to modify * @start: new start value * @size: new size * * Given an existing resource, change its start and size to match the * arguments. Returns 0 on success, -EBUSY if it can't fit. * Existing children of the resource are assumed to be immutable. */ int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size) { int result; write_lock(&resource_lock); result = __adjust_resource(res, start, size); write_unlock(&resource_lock); return result; } EXPORT_SYMBOL(adjust_resource); static void __init __reserve_region_with_split(struct resource *root, resource_size_t start, resource_size_t end, const char *name) { struct resource *parent = root; struct resource *conflict; struct resource *res = alloc_resource(GFP_ATOMIC); struct resource *next_res = NULL; if (!res) return; res->name = name; res->start = start; res->end = end; res->flags = IORESOURCE_BUSY; while (1) { conflict = __request_resource(parent, res); if (!conflict) { if (!next_res) break; res = next_res; next_res = NULL; continue; } /* conflict covered whole area */ if (conflict->start <= res->start && conflict->end >= res->end) { free_resource(res); WARN_ON(next_res); break; } /* failed, split and try again */ if (conflict->start > res->start) { end = res->end; res->end = conflict->start - 1; if (conflict->end < end) { next_res = alloc_resource(GFP_ATOMIC); if (!next_res) { free_resource(res); break; } next_res->name = name; next_res->start = conflict->end + 1; next_res->end = end; next_res->flags = IORESOURCE_BUSY; } } else { res->start = conflict->end + 1; } } } void __init reserve_region_with_split(struct resource *root, resource_size_t start, resource_size_t end, const char *name) { int abort = 0; write_lock(&resource_lock); if (root->start > start || root->end < end) { pr_err("requested range [0x%llx-0x%llx] not in root %pr\n", (unsigned long long)start, (unsigned long long)end, root); if (start > root->end || end < root->start) abort = 1; else { if (end > root->end) end = root->end; if (start < root->start) start = root->start; pr_err("fixing request to [0x%llx-0x%llx]\n", (unsigned long long)start, (unsigned long long)end); } dump_stack(); } if (!abort) __reserve_region_with_split(root, start, end, name); write_unlock(&resource_lock); } /** * resource_alignment - calculate resource's alignment * @res: resource pointer * * Returns alignment on success, 0 (invalid alignment) on failure. */ resource_size_t resource_alignment(struct resource *res) { switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { case IORESOURCE_SIZEALIGN: return resource_size(res); case IORESOURCE_STARTALIGN: return res->start; default: return 0; } } /* * This is compatibility stuff for IO resources. * * Note how this, unlike the above, knows about * the IO flag meanings (busy etc). * * request_region creates a new busy region. * * check_region returns non-zero if the area is already busy. * * release_region releases a matching busy region. */ static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); /** * __request_region - create a new busy resource region * @parent: parent resource descriptor * @start: resource start address * @n: resource region size * @name: reserving caller's ID string * @flags: IO resource flags */ struct resource * __request_region(struct resource *parent, resource_size_t start, resource_size_t n, const char *name, int flags) { DECLARE_WAITQUEUE(wait, current); struct resource *res = alloc_resource(GFP_KERNEL); if (!res) return NULL; res->name = name; res->start = start; res->end = start + n - 1; res->flags = resource_type(parent); res->flags |= IORESOURCE_BUSY | flags; write_lock(&resource_lock); for (;;) { struct resource *conflict; conflict = __request_resource(parent, res); if (!conflict) break; if (conflict != parent) { parent = conflict; if (!(conflict->flags & IORESOURCE_BUSY)) continue; } if (conflict->flags & flags & IORESOURCE_MUXED) { add_wait_queue(&muxed_resource_wait, &wait); write_unlock(&resource_lock); set_current_state(TASK_UNINTERRUPTIBLE); schedule(); remove_wait_queue(&muxed_resource_wait, &wait); write_lock(&resource_lock); continue; } /* Uhhuh, that didn't work out.. */ free_resource(res); res = NULL; break; } write_unlock(&resource_lock); return res; } EXPORT_SYMBOL(__request_region); /** * __check_region - check if a resource region is busy or free * @parent: parent resource descriptor * @start: resource start address * @n: resource region size * * Returns 0 if the region is free at the moment it is checked, * returns %-EBUSY if the region is busy. * * NOTE: * This function is deprecated because its use is racy. * Even if it returns 0, a subsequent call to request_region() * may fail because another driver etc. just allocated the region. * Do NOT use it. It will be removed from the kernel. */ int __check_region(struct resource *parent, resource_size_t start, resource_size_t n) { struct resource * res; res = __request_region(parent, start, n, "check-region", 0); if (!res) return -EBUSY; release_resource(res); free_resource(res); return 0; } EXPORT_SYMBOL(__check_region); /** * __release_region - release a previously reserved resource region * @parent: parent resource descriptor * @start: resource start address * @n: resource region size * * The described resource region must match a currently busy region. */ void __release_region(struct resource *parent, resource_size_t start, resource_size_t n) { struct resource **p; resource_size_t end; p = &parent->child; end = start + n - 1; write_lock(&resource_lock); for (;;) { struct resource *res = *p; if (!res) break; if (res->start <= start && res->end >= end) { if (!(res->flags & IORESOURCE_BUSY)) { p = &res->child; continue; } if (res->start != start || res->end != end) break; *p = res->sibling; write_unlock(&resource_lock); if (res->flags & IORESOURCE_MUXED) wake_up(&muxed_resource_wait); free_resource(res); return; } p = &res->sibling; } write_unlock(&resource_lock); printk(KERN_WARNING "Trying to free nonexistent resource " "<%016llx-%016llx>\n", (unsigned long long)start, (unsigned long long)end); } EXPORT_SYMBOL(__release_region); #ifdef CONFIG_MEMORY_HOTREMOVE /** * release_mem_region_adjustable - release a previously reserved memory region * @parent: parent resource descriptor * @start: resource start address * @size: resource region size * * This interface is intended for memory hot-delete. The requested region * is released from a currently busy memory resource. The requested region * must either match exactly or fit into a single busy resource entry. In * the latter case, the remaining resource is adjusted accordingly. * Existing children of the busy memory resource must be immutable in the * request. * * Note: * - Additional release conditions, such as overlapping region, can be * supported after they are confirmed as valid cases. * - When a busy memory resource gets split into two entries, the code * assumes that all children remain in the lower address entry for * simplicity. Enhance this logic when necessary. */ int release_mem_region_adjustable(struct resource *parent, resource_size_t start, resource_size_t size) { struct resource **p; struct resource *res; struct resource *new_res; resource_size_t end; int ret = -EINVAL; end = start + size - 1; if ((start < parent->start) || (end > parent->end)) return ret; /* The alloc_resource() result gets checked later */ new_res = alloc_resource(GFP_KERNEL); p = &parent->child; write_lock(&resource_lock); while ((res = *p)) { if (res->start >= end) break; /* look for the next resource if it does not fit into */ if (res->start > start || res->end < end) { p = &res->sibling; continue; } if (!(res->flags & IORESOURCE_MEM)) break; if (!(res->flags & IORESOURCE_BUSY)) { p = &res->child; continue; } /* found the target resource; let's adjust accordingly */ if (res->start == start && res->end == end) { /* free the whole entry */ *p = res->sibling; free_resource(res); ret = 0; } else if (res->start == start && res->end != end) { /* adjust the start */ ret = __adjust_resource(res, end + 1, res->end - end); } else if (res->start != start && res->end == end) { /* adjust the end */ ret = __adjust_resource(res, res->start, start - res->start); } else { /* split into two entries */ if (!new_res) { ret = -ENOMEM; break; } new_res->name = res->name; new_res->start = end + 1; new_res->end = res->end; new_res->flags = res->flags; new_res->parent = res->parent; new_res->sibling = res->sibling; new_res->child = NULL; ret = __adjust_resource(res, res->start, start - res->start); if (ret) break; res->sibling = new_res; new_res = NULL; } break; } write_unlock(&resource_lock); free_resource(new_res); return ret; } #endif /* CONFIG_MEMORY_HOTREMOVE */ /* * Managed region resource */ static void devm_resource_release(struct device *dev, void *ptr) { struct resource **r = ptr; release_resource(*r); } /** * devm_request_resource() - request and reserve an I/O or memory resource * @dev: device for which to request the resource * @root: root of the resource tree from which to request the resource * @new: descriptor of the resource to request * * This is a device-managed version of request_resource(). There is usually * no need to release resources requested by this function explicitly since * that will be taken care of when the device is unbound from its driver. * If for some reason the resource needs to be released explicitly, because * of ordering issues for example, drivers must call devm_release_resource() * rather than the regular release_resource(). * * When a conflict is detected between any existing resources and the newly * requested resource, an error message will be printed. * * Returns 0 on success or a negative error code on failure. */ int devm_request_resource(struct device *dev, struct resource *root, struct resource *new) { struct resource *conflict, **ptr; ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; *ptr = new; conflict = request_resource_conflict(root, new); if (conflict) { dev_err(dev, "resource collision: %pR conflicts with %s %pR\n", new, conflict->name, conflict); devres_free(ptr); return -EBUSY; } devres_add(dev, ptr); return 0; } EXPORT_SYMBOL(devm_request_resource); static int devm_resource_match(struct device *dev, void *res, void *data) { struct resource **ptr = res; return *ptr == data; } /** * devm_release_resource() - release a previously requested resource * @dev: device for which to release the resource * @new: descriptor of the resource to release * * Releases a resource previously requested using devm_request_resource(). */ void devm_release_resource(struct device *dev, struct resource *new) { WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match, new)); } EXPORT_SYMBOL(devm_release_resource); struct region_devres { struct resource *parent; resource_size_t start; resource_size_t n; }; static void devm_region_release(struct device *dev, void *res) { struct region_devres *this = res; __release_region(this->parent, this->start, this->n); } static int devm_region_match(struct device *dev, void *res, void *match_data) { struct region_devres *this = res, *match = match_data; return this->parent == match->parent && this->start == match->start && this->n == match->n; } struct resource * __devm_request_region(struct device *dev, struct resource *parent, resource_size_t start, resource_size_t n, const char *name) { struct region_devres *dr = NULL; struct resource *res; dr = devres_alloc(devm_region_release, sizeof(struct region_devres), GFP_KERNEL); if (!dr) return NULL; dr->parent = parent; dr->start = start; dr->n = n; res = __request_region(parent, start, n, name, 0); if (res) devres_add(dev, dr); else devres_free(dr); return res; } EXPORT_SYMBOL(__devm_request_region); void __devm_release_region(struct device *dev, struct resource *parent, resource_size_t start, resource_size_t n) { struct region_devres match_data = { parent, start, n }; __release_region(parent, start, n); WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, &match_data)); } EXPORT_SYMBOL(__devm_release_region); /* * Called from init/main.c to reserve IO ports. */ #define MAXRESERVE 4 static int __init reserve_setup(char *str) { static int reserved; static struct resource reserve[MAXRESERVE]; for (;;) { unsigned int io_start, io_num; int x = reserved; if (get_option (&str, &io_start) != 2) break; if (get_option (&str, &io_num) == 0) break; if (x < MAXRESERVE) { struct resource *res = reserve + x; res->name = "reserved"; res->start = io_start; res->end = io_start + io_num - 1; res->flags = IORESOURCE_BUSY; res->child = NULL; if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0) reserved = x+1; } } return 1; } __setup("reserve=", reserve_setup); /* * Check if the requested addr and size spans more than any slot in the * iomem resource tree. */ int iomem_map_sanity_check(resource_size_t addr, unsigned long size) { struct resource *p = &iomem_resource; int err = 0; loff_t l; read_lock(&resource_lock); for (p = p->child; p ; p = r_next(NULL, p, &l)) { /* * We can probably skip the resources without * IORESOURCE_IO attribute? */ if (p->start >= addr + size) continue; if (p->end < addr) continue; if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) continue; /* * if a resource is "BUSY", it's not a hardware resource * but a driver mapping of such a resource; we don't want * to warn for those; some drivers legitimately map only * partial hardware resources. (example: vesafb) */ if (p->flags & IORESOURCE_BUSY) continue; printk(KERN_WARNING "resource sanity check: requesting [mem %#010llx-%#010llx], which spans more than %s %pR\n", (unsigned long long)addr, (unsigned long long)(addr + size - 1), p->name, p); err = -1; break; } read_unlock(&resource_lock); return err; } #ifdef CONFIG_STRICT_DEVMEM static int strict_iomem_checks = 1; #else static int strict_iomem_checks; #endif /* * check if an address is reserved in the iomem resource tree * returns 1 if reserved, 0 if not reserved. */ int iomem_is_exclusive(u64 addr) { struct resource *p = &iomem_resource; int err = 0; loff_t l; int size = PAGE_SIZE; if (!strict_iomem_checks) return 0; addr = addr & PAGE_MASK; read_lock(&resource_lock); for (p = p->child; p ; p = r_next(NULL, p, &l)) { /* * We can probably skip the resources without * IORESOURCE_IO attribute? */ if (p->start >= addr + size) break; if (p->end < addr) continue; if (p->flags & IORESOURCE_BUSY && p->flags & IORESOURCE_EXCLUSIVE) { err = 1; break; } } read_unlock(&resource_lock); return err; } static int __init strict_iomem(char *str) { if (strstr(str, "relaxed")) strict_iomem_checks = 0; if (strstr(str, "strict")) strict_iomem_checks = 1; return 1; } __setup("iomem=", strict_iomem);
gpl-2.0
mdalexca/OP3
drivers/gpu/drm/radeon/radeon_connectors.c
242
79283
/* * Copyright 2007-8 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher */ #include <drm/drmP.h> #include <drm/drm_edid.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> #include <drm/radeon_drm.h> #include "radeon.h" #include "atom.h" #include <linux/pm_runtime.h> void radeon_connector_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); /* bail if the connector does not have hpd pin, e.g., * VGA, TV, etc. */ if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) return; radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); /* if the connector is already off, don't turn it back on */ /* FIXME: This access isn't protected by any locks. */ if (connector->dpms != DRM_MODE_DPMS_ON) return; /* just deal with DP (not eDP) here. */ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; /* if existing sink type was not DP no need to retrain */ if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) return; /* first get sink type as it may be reset after (un)plug */ dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); /* don't do anything if sink is not display port, i.e., * passive dp->(dvi|hdmi) adaptor */ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { int saved_dpms = connector->dpms; /* Only turn off the display if it's physically disconnected */ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); } else if (radeon_dp_needs_link_train(radeon_connector)) { /* set it to OFF so that drm_helper_connector_dpms() * won't return immediately since the current state * is ON at this point. */ connector->dpms = DRM_MODE_DPMS_OFF; drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); } connector->dpms = saved_dpms; } } } static void radeon_property_change_mode(struct drm_encoder *encoder) { struct drm_crtc *crtc = encoder->crtc; if (crtc && crtc->enabled) { drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb); } } int radeon_get_monitor_bpc(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector; int bpc = 8; int mode_clock, max_tmds_clock; switch (connector->connector_type) { case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_HDMIB: if (radeon_connector->use_digital) { if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { if (connector->display_info.bpc) bpc = connector->display_info.bpc; } } break; case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_HDMIA: if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { if (connector->display_info.bpc) bpc = connector->display_info.bpc; } break; case DRM_MODE_CONNECTOR_DisplayPort: dig_connector = radeon_connector->con_priv; if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) || drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { if (connector->display_info.bpc) bpc = connector->display_info.bpc; } break; case DRM_MODE_CONNECTOR_eDP: case DRM_MODE_CONNECTOR_LVDS: if (connector->display_info.bpc) bpc = connector->display_info.bpc; else if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; struct drm_encoder *encoder = connector_funcs->best_encoder(connector); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; if (dig->lcd_misc & ATOM_PANEL_MISC_V13_6BIT_PER_COLOR) bpc = 6; else if (dig->lcd_misc & ATOM_PANEL_MISC_V13_8BIT_PER_COLOR) bpc = 8; } break; } if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { /* hdmi deep color only implemented on DCE4+ */ if ((bpc > 8) && !ASIC_IS_DCE4(rdev)) { DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 8 bpc.\n", connector->name, bpc); bpc = 8; } /* * Pre DCE-8 hw can't handle > 12 bpc, and more than 12 bpc doesn't make * much sense without support for > 12 bpc framebuffers. RGB 4:4:4 at * 12 bpc is always supported on hdmi deep color sinks, as this is * required by the HDMI-1.3 spec. Clamp to a safe 12 bpc maximum. */ if (bpc > 12) { DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 12 bpc.\n", connector->name, bpc); bpc = 12; } /* Any defined maximum tmds clock limit we must not exceed? */ if (connector->max_tmds_clock > 0) { /* mode_clock is clock in kHz for mode to be modeset on this connector */ mode_clock = radeon_connector->pixelclock_for_modeset; /* Maximum allowable input clock in kHz */ max_tmds_clock = connector->max_tmds_clock * 1000; DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n", connector->name, mode_clock, max_tmds_clock); /* Check if bpc is within clock limit. Try to degrade gracefully otherwise */ if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) { if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) && (mode_clock * 5/4 <= max_tmds_clock)) bpc = 10; else bpc = 8; DRM_DEBUG("%s: HDMI deep color 12 bpc exceeds max tmds clock. Using %d bpc.\n", connector->name, bpc); } if ((bpc == 10) && (mode_clock * 5/4 > max_tmds_clock)) { bpc = 8; DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n", connector->name, bpc); } } else if (bpc > 8) { /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */ DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n", connector->name); bpc = 8; } } if ((radeon_deep_color == 0) && (bpc > 8)) { DRM_DEBUG("%s: Deep color disabled. Set radeon module param deep_color=1 to enable.\n", connector->name); bpc = 8; } DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n", connector->name, connector->display_info.bpc, bpc); return bpc; } static void radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct drm_encoder *best_encoder = NULL; struct drm_encoder *encoder = NULL; struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; bool connected; int i; best_encoder = connector_funcs->best_encoder(connector); for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) break; encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); if (!encoder) continue; if ((encoder == best_encoder) && (status == connector_status_connected)) connected = true; else connected = false; if (rdev->is_atom_bios) radeon_atombios_connected_scratch_regs(connector, encoder, connected); else radeon_combios_connected_scratch_regs(connector, encoder, connected); } } static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type) { struct drm_encoder *encoder; int i; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) break; encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); if (!encoder) continue; if (encoder->encoder_type == encoder_type) return encoder; } return NULL; } struct edid *radeon_connector_edid(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_property_blob *edid_blob = connector->edid_blob_ptr; if (radeon_connector->edid) { return radeon_connector->edid; } else if (edid_blob) { struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL); if (edid) radeon_connector->edid = edid; } return radeon_connector->edid; } static void radeon_connector_get_edid(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); if (radeon_connector->edid) return; /* on hw with routers, select right port */ if (radeon_connector->router.ddc_valid) radeon_router_select_ddc_port(radeon_connector); if ((radeon_connector_encoder_get_dp_bridge_encoder_id(connector) != ENCODER_OBJECT_ID_NONE) && radeon_connector->ddc_bus->has_aux) { radeon_connector->edid = drm_get_edid(connector, &radeon_connector->ddc_bus->aux.ddc); } else if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && radeon_connector->ddc_bus->has_aux) radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->aux.ddc); else if (radeon_connector->ddc_bus) radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); } else if (radeon_connector->ddc_bus) { radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); } if (!radeon_connector->edid) { /* don't fetch the edid from the vbios if ddc fails and runpm is * enabled so we report disconnected. */ if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) return; if (rdev->is_atom_bios) { /* some laptops provide a hardcoded edid in rom for LCDs */ if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) || (connector->connector_type == DRM_MODE_CONNECTOR_eDP))) radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); } else { /* some servers provide a hardcoded edid in rom for KVMs */ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); } } } static void radeon_connector_free_edid(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); if (radeon_connector->edid) { kfree(radeon_connector->edid); radeon_connector->edid = NULL; } } static int radeon_ddc_get_modes(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); int ret; if (radeon_connector->edid) { drm_mode_connector_update_edid_property(connector, radeon_connector->edid); ret = drm_add_edid_modes(connector, radeon_connector->edid); drm_edid_to_eld(connector, radeon_connector->edid); return ret; } drm_mode_connector_update_edid_property(connector, NULL); return 0; } static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; /* pick the encoder ids */ if (enc_id) return drm_encoder_find(connector->dev, enc_id); return NULL; } static void radeon_get_native_mode(struct drm_connector *connector) { struct drm_encoder *encoder = radeon_best_single_encoder(connector); struct radeon_encoder *radeon_encoder; if (encoder == NULL) return; radeon_encoder = to_radeon_encoder(encoder); if (!list_empty(&connector->probed_modes)) { struct drm_display_mode *preferred_mode = list_first_entry(&connector->probed_modes, struct drm_display_mode, head); radeon_encoder->native_mode = *preferred_mode; } else { radeon_encoder->native_mode.clock = 0; } } /* * radeon_connector_analog_encoder_conflict_solve * - search for other connectors sharing this encoder * if priority is true, then set them disconnected if this is connected * if priority is false, set us disconnected if they are connected */ static enum drm_connector_status radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, struct drm_encoder *encoder, enum drm_connector_status current_status, bool priority) { struct drm_device *dev = connector->dev; struct drm_connector *conflict; struct radeon_connector *radeon_conflict; int i; list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { if (conflict == connector) continue; radeon_conflict = to_radeon_connector(conflict); for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (conflict->encoder_ids[i] == 0) break; /* if the IDs match */ if (conflict->encoder_ids[i] == encoder->base.id) { if (conflict->status != connector_status_connected) continue; if (radeon_conflict->use_digital) continue; if (priority == true) { DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", conflict->name); DRM_DEBUG_KMS("in favor of %s\n", connector->name); conflict->status = connector_status_disconnected; radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); } else { DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", connector->name); DRM_DEBUG_KMS("in favor of %s\n", conflict->name); current_status = connector_status_disconnected; } break; } } } return current_status; } static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_display_mode *mode = NULL; struct drm_display_mode *native_mode = &radeon_encoder->native_mode; if (native_mode->hdisplay != 0 && native_mode->vdisplay != 0 && native_mode->clock != 0) { mode = drm_mode_duplicate(dev, native_mode); mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; drm_mode_set_name(mode); DRM_DEBUG_KMS("Adding native panel mode %s\n", mode->name); } else if (native_mode->hdisplay != 0 && native_mode->vdisplay != 0) { /* mac laptops without an edid */ /* Note that this is not necessarily the exact panel mode, * but an approximation based on the cvt formula. For these * systems we should ideally read the mode info out of the * registers or add a mode table, but this works and is much * simpler. */ mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false); mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name); } return mode; } static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_display_mode *mode = NULL; struct drm_display_mode *native_mode = &radeon_encoder->native_mode; int i; struct mode_size { int w; int h; } common_modes[17] = { { 640, 480}, { 720, 480}, { 800, 600}, { 848, 480}, {1024, 768}, {1152, 768}, {1280, 720}, {1280, 800}, {1280, 854}, {1280, 960}, {1280, 1024}, {1440, 900}, {1400, 1050}, {1680, 1050}, {1600, 1200}, {1920, 1080}, {1920, 1200} }; for (i = 0; i < 17; i++) { if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) { if (common_modes[i].w > 1024 || common_modes[i].h > 768) continue; } if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (common_modes[i].w > native_mode->hdisplay || common_modes[i].h > native_mode->vdisplay || (common_modes[i].w == native_mode->hdisplay && common_modes[i].h == native_mode->vdisplay)) continue; } if (common_modes[i].w < 320 || common_modes[i].h < 200) continue; mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); drm_mode_probed_add(connector, mode); } } static int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; if (property == rdev->mode_info.coherent_mode_property) { struct radeon_encoder_atom_dig *dig; bool new_coherent_mode; /* need to find digital encoder on connector */ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); if (!encoder) return 0; radeon_encoder = to_radeon_encoder(encoder); if (!radeon_encoder->enc_priv) return 0; dig = radeon_encoder->enc_priv; new_coherent_mode = val ? true : false; if (dig->coherent_mode != new_coherent_mode) { dig->coherent_mode = new_coherent_mode; radeon_property_change_mode(&radeon_encoder->base); } } if (property == rdev->mode_info.audio_property) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); /* need to find digital encoder on connector */ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); if (!encoder) return 0; radeon_encoder = to_radeon_encoder(encoder); if (radeon_connector->audio != val) { radeon_connector->audio = val; radeon_property_change_mode(&radeon_encoder->base); } } if (property == rdev->mode_info.dither_property) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); /* need to find digital encoder on connector */ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); if (!encoder) return 0; radeon_encoder = to_radeon_encoder(encoder); if (radeon_connector->dither != val) { radeon_connector->dither = val; radeon_property_change_mode(&radeon_encoder->base); } } if (property == rdev->mode_info.underscan_property) { /* need to find digital encoder on connector */ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); if (!encoder) return 0; radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->underscan_type != val) { radeon_encoder->underscan_type = val; radeon_property_change_mode(&radeon_encoder->base); } } if (property == rdev->mode_info.underscan_hborder_property) { /* need to find digital encoder on connector */ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); if (!encoder) return 0; radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->underscan_hborder != val) { radeon_encoder->underscan_hborder = val; radeon_property_change_mode(&radeon_encoder->base); } } if (property == rdev->mode_info.underscan_vborder_property) { /* need to find digital encoder on connector */ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); if (!encoder) return 0; radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->underscan_vborder != val) { radeon_encoder->underscan_vborder = val; radeon_property_change_mode(&radeon_encoder->base); } } if (property == rdev->mode_info.tv_std_property) { encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC); if (!encoder) { encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_DAC); } if (!encoder) return 0; radeon_encoder = to_radeon_encoder(encoder); if (!radeon_encoder->enc_priv) return 0; if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) { struct radeon_encoder_atom_dac *dac_int; dac_int = radeon_encoder->enc_priv; dac_int->tv_std = val; } else { struct radeon_encoder_tv_dac *dac_int; dac_int = radeon_encoder->enc_priv; dac_int->tv_std = val; } radeon_property_change_mode(&radeon_encoder->base); } if (property == rdev->mode_info.load_detect_property) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); if (val == 0) radeon_connector->dac_load_detect = false; else radeon_connector->dac_load_detect = true; } if (property == rdev->mode_info.tmds_pll_property) { struct radeon_encoder_int_tmds *tmds = NULL; bool ret = false; /* need to find digital encoder on connector */ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); if (!encoder) return 0; radeon_encoder = to_radeon_encoder(encoder); tmds = radeon_encoder->enc_priv; if (!tmds) return 0; if (val == 0) { if (rdev->is_atom_bios) ret = radeon_atombios_get_tmds_info(radeon_encoder, tmds); else ret = radeon_legacy_get_tmds_info_from_combios(radeon_encoder, tmds); } if (val == 1 || ret == false) { radeon_legacy_get_tmds_info_from_table(radeon_encoder, tmds); } radeon_property_change_mode(&radeon_encoder->base); } if (property == dev->mode_config.scaling_mode_property) { enum radeon_rmx_type rmx_type; if (connector->encoder) radeon_encoder = to_radeon_encoder(connector->encoder); else { struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector)); } switch (val) { default: case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break; case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break; case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break; case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break; } if (radeon_encoder->rmx_type == rmx_type) return 0; if ((rmx_type != DRM_MODE_SCALE_NONE) && (radeon_encoder->native_mode.clock == 0)) return 0; radeon_encoder->rmx_type = rmx_type; radeon_property_change_mode(&radeon_encoder->base); } return 0; } static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, struct drm_connector *connector) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_display_mode *native_mode = &radeon_encoder->native_mode; struct drm_display_mode *t, *mode; /* If the EDID preferred mode doesn't match the native mode, use it */ list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { if (mode->type & DRM_MODE_TYPE_PREFERRED) { if (mode->hdisplay != native_mode->hdisplay || mode->vdisplay != native_mode->vdisplay) memcpy(native_mode, mode, sizeof(*mode)); } } /* Try to get native mode details from EDID if necessary */ if (!native_mode->clock) { list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { if (mode->hdisplay == native_mode->hdisplay && mode->vdisplay == native_mode->vdisplay) { *native_mode = *mode; drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V); DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n"); break; } } } if (!native_mode->clock) { DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n"); radeon_encoder->rmx_type = RMX_OFF; } } static int radeon_lvds_get_modes(struct drm_connector *connector) { struct drm_encoder *encoder; int ret = 0; struct drm_display_mode *mode; radeon_connector_get_edid(connector); ret = radeon_ddc_get_modes(connector); if (ret > 0) { encoder = radeon_best_single_encoder(connector); if (encoder) { radeon_fixup_lvds_native_mode(encoder, connector); /* add scaled modes */ radeon_add_common_modes(encoder, connector); } return ret; } encoder = radeon_best_single_encoder(connector); if (!encoder) return 0; /* we have no EDID modes */ mode = radeon_fp_native_mode(encoder); if (mode) { ret = 1; drm_mode_probed_add(connector, mode); /* add the width/height from vbios tables if available */ connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; /* add scaled modes */ radeon_add_common_modes(encoder, connector); } return ret; } static int radeon_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_encoder *encoder = radeon_best_single_encoder(connector); if ((mode->hdisplay < 320) || (mode->vdisplay < 240)) return MODE_PANEL; if (encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_display_mode *native_mode = &radeon_encoder->native_mode; /* AVIVO hardware supports downscaling modes larger than the panel * to the panel size, but I'm not sure this is desirable. */ if ((mode->hdisplay > native_mode->hdisplay) || (mode->vdisplay > native_mode->vdisplay)) return MODE_PANEL; /* if scaling is disabled, block non-native modes */ if (radeon_encoder->rmx_type == RMX_OFF) { if ((mode->hdisplay != native_mode->hdisplay) || (mode->vdisplay != native_mode->vdisplay)) return MODE_PANEL; } } return MODE_OK; } static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder = radeon_best_single_encoder(connector); enum drm_connector_status ret = connector_status_disconnected; int r; r = pm_runtime_get_sync(connector->dev->dev); if (r < 0) return connector_status_disconnected; if (encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_display_mode *native_mode = &radeon_encoder->native_mode; /* check if panel is valid */ if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) ret = connector_status_connected; /* don't fetch the edid from the vbios if ddc fails and runpm is * enabled so we report disconnected. */ if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) ret = connector_status_disconnected; } /* check for edid as well */ radeon_connector_get_edid(connector); if (radeon_connector->edid) ret = connector_status_connected; /* check acpi lid status ??? */ radeon_connector_update_scratch_regs(connector, ret); pm_runtime_mark_last_busy(connector->dev->dev); pm_runtime_put_autosuspend(connector->dev->dev); return ret; } static void radeon_connector_destroy(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); radeon_connector_free_edid(connector); kfree(radeon_connector->con_priv); drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } static int radeon_lvds_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { struct drm_device *dev = connector->dev; struct radeon_encoder *radeon_encoder; enum radeon_rmx_type rmx_type; DRM_DEBUG_KMS("\n"); if (property != dev->mode_config.scaling_mode_property) return 0; if (connector->encoder) radeon_encoder = to_radeon_encoder(connector->encoder); else { struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector)); } switch (value) { case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break; case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break; case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break; default: case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break; } if (radeon_encoder->rmx_type == rmx_type) return 0; radeon_encoder->rmx_type = rmx_type; radeon_property_change_mode(&radeon_encoder->base); return 0; } static const struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = { .get_modes = radeon_lvds_get_modes, .mode_valid = radeon_lvds_mode_valid, .best_encoder = radeon_best_single_encoder, }; static const struct drm_connector_funcs radeon_lvds_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_lvds_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = radeon_connector_destroy, .set_property = radeon_lvds_set_property, }; static int radeon_vga_get_modes(struct drm_connector *connector) { int ret; radeon_connector_get_edid(connector); ret = radeon_ddc_get_modes(connector); radeon_get_native_mode(connector); return ret; } static int radeon_vga_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; /* XXX check mode bandwidth */ if ((mode->clock / 10) > rdev->clock.max_pixel_clock) return MODE_CLOCK_HIGH; return MODE_OK; } static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder; struct drm_encoder_helper_funcs *encoder_funcs; bool dret = false; enum drm_connector_status ret = connector_status_disconnected; int r; r = pm_runtime_get_sync(connector->dev->dev); if (r < 0) return connector_status_disconnected; encoder = radeon_best_single_encoder(connector); if (!encoder) ret = connector_status_disconnected; if (radeon_connector->ddc_bus) dret = radeon_ddc_probe(radeon_connector, false); if (dret) { radeon_connector->detected_by_load = false; radeon_connector_free_edid(connector); radeon_connector_get_edid(connector); if (!radeon_connector->edid) { DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", connector->name); ret = connector_status_connected; } else { radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); /* some oems have boards with separate digital and analog connectors * with a shared ddc line (often vga + hdmi) */ if (radeon_connector->use_digital && radeon_connector->shared_ddc) { radeon_connector_free_edid(connector); ret = connector_status_disconnected; } else { ret = connector_status_connected; } } } else { /* if we aren't forcing don't do destructive polling */ if (!force) { /* only return the previous status if we last * detected a monitor via load. */ if (radeon_connector->detected_by_load) ret = connector->status; goto out; } if (radeon_connector->dac_load_detect && encoder) { encoder_funcs = encoder->helper_private; ret = encoder_funcs->detect(encoder, connector); if (ret != connector_status_disconnected) radeon_connector->detected_by_load = true; } } if (ret == connector_status_connected) ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the * vbios to deal with KVMs. If we have one and are not able to detect a monitor * by other means, assume the CRT is connected and use that EDID. */ if ((!rdev->is_atom_bios) && (ret == connector_status_disconnected) && rdev->mode_info.bios_hardcoded_edid_size) { ret = connector_status_connected; } radeon_connector_update_scratch_regs(connector, ret); out: pm_runtime_mark_last_busy(connector->dev->dev); pm_runtime_put_autosuspend(connector->dev->dev); return ret; } static const struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = { .get_modes = radeon_vga_get_modes, .mode_valid = radeon_vga_mode_valid, .best_encoder = radeon_best_single_encoder, }; static const struct drm_connector_funcs radeon_vga_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_vga_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = radeon_connector_destroy, .set_property = radeon_connector_set_property, }; static int radeon_tv_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct drm_display_mode *tv_mode; struct drm_encoder *encoder; encoder = radeon_best_single_encoder(connector); if (!encoder) return 0; /* avivo chips can scale any mode */ if (rdev->family >= CHIP_RS600) /* add scaled modes */ radeon_add_common_modes(encoder, connector); else { /* only 800x600 is supported right now on pre-avivo chips */ tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false); tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, tv_mode); } return 1; } static int radeon_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { if ((mode->hdisplay > 1024) || (mode->vdisplay > 768)) return MODE_CLOCK_RANGE; return MODE_OK; } static enum drm_connector_status radeon_tv_detect(struct drm_connector *connector, bool force) { struct drm_encoder *encoder; struct drm_encoder_helper_funcs *encoder_funcs; struct radeon_connector *radeon_connector = to_radeon_connector(connector); enum drm_connector_status ret = connector_status_disconnected; int r; if (!radeon_connector->dac_load_detect) return ret; r = pm_runtime_get_sync(connector->dev->dev); if (r < 0) return connector_status_disconnected; encoder = radeon_best_single_encoder(connector); if (!encoder) ret = connector_status_disconnected; else { encoder_funcs = encoder->helper_private; ret = encoder_funcs->detect(encoder, connector); } if (ret == connector_status_connected) ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); radeon_connector_update_scratch_regs(connector, ret); pm_runtime_mark_last_busy(connector->dev->dev); pm_runtime_put_autosuspend(connector->dev->dev); return ret; } static const struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = { .get_modes = radeon_tv_get_modes, .mode_valid = radeon_tv_mode_valid, .best_encoder = radeon_best_single_encoder, }; static const struct drm_connector_funcs radeon_tv_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_tv_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = radeon_connector_destroy, .set_property = radeon_connector_set_property, }; static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); enum drm_connector_status status; /* We only trust HPD on R600 and newer ASICS. */ if (rdev->family >= CHIP_R600 && radeon_connector->hpd.hpd != RADEON_HPD_NONE) { if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) status = connector_status_connected; else status = connector_status_disconnected; if (connector->status == status) return true; } return false; } /* * DVI is complicated * Do a DDC probe, if DDC probe passes, get the full EDID so * we can do analog/digital monitor detection at this point. * If the monitor is an analog monitor or we got no DDC, * we need to find the DAC encoder object for this connector. * If we got no DDC, we do load detection on the DAC encoder object. * If we got analog DDC or load detection passes on the DAC encoder * we have to check if this analog encoder is shared with anyone else (TV) * if its shared we have to set the other connector to disconnected. */ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder = NULL; struct drm_encoder_helper_funcs *encoder_funcs; int i, r; enum drm_connector_status ret = connector_status_disconnected; bool dret = false, broken_edid = false; r = pm_runtime_get_sync(connector->dev->dev); if (r < 0) return connector_status_disconnected; if (!force && radeon_check_hpd_status_unchanged(connector)) { ret = connector->status; goto exit; } if (radeon_connector->ddc_bus) dret = radeon_ddc_probe(radeon_connector, false); if (dret) { radeon_connector->detected_by_load = false; radeon_connector_free_edid(connector); radeon_connector_get_edid(connector); if (!radeon_connector->edid) { DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", connector->name); /* rs690 seems to have a problem with connectors not existing and always * return a block of 0's. If we see this just stop polling on this output */ if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) { ret = connector_status_disconnected; DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", connector->name); radeon_connector->ddc_bus = NULL; } else { ret = connector_status_connected; broken_edid = true; /* defer use_digital to later */ } } else { radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); /* some oems have boards with separate digital and analog connectors * with a shared ddc line (often vga + hdmi) */ if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) { radeon_connector_free_edid(connector); ret = connector_status_disconnected; } else { ret = connector_status_connected; } /* This gets complicated. We have boards with VGA + HDMI with a * shared DDC line and we have boards with DVI-D + HDMI with a shared * DDC line. The latter is more complex because with DVI<->HDMI adapters * you don't really know what's connected to which port as both are digital. */ if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { struct drm_connector *list_connector; struct radeon_connector *list_radeon_connector; list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { if (connector == list_connector) continue; list_radeon_connector = to_radeon_connector(list_connector); if (list_radeon_connector->shared_ddc && (list_radeon_connector->ddc_bus->rec.i2c_id == radeon_connector->ddc_bus->rec.i2c_id)) { /* cases where both connectors are digital */ if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) { /* hpd is our only option in this case */ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { radeon_connector_free_edid(connector); ret = connector_status_disconnected; } } } } } } } if ((ret == connector_status_connected) && (radeon_connector->use_digital == true)) goto out; /* DVI-D and HDMI-A are digital only */ if ((connector->connector_type == DRM_MODE_CONNECTOR_DVID) || (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA)) goto out; /* if we aren't forcing don't do destructive polling */ if (!force) { /* only return the previous status if we last * detected a monitor via load. */ if (radeon_connector->detected_by_load) ret = connector->status; goto out; } /* find analog encoder */ if (radeon_connector->dac_load_detect) { for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) break; encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); if (!encoder) continue; if (encoder->encoder_type != DRM_MODE_ENCODER_DAC && encoder->encoder_type != DRM_MODE_ENCODER_TVDAC) continue; encoder_funcs = encoder->helper_private; if (encoder_funcs->detect) { if (!broken_edid) { if (ret != connector_status_connected) { /* deal with analog monitors without DDC */ ret = encoder_funcs->detect(encoder, connector); if (ret == connector_status_connected) { radeon_connector->use_digital = false; } if (ret != connector_status_disconnected) radeon_connector->detected_by_load = true; } } else { enum drm_connector_status lret; /* assume digital unless load detected otherwise */ radeon_connector->use_digital = true; lret = encoder_funcs->detect(encoder, connector); DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret); if (lret == connector_status_connected) radeon_connector->use_digital = false; } break; } } } if ((ret == connector_status_connected) && (radeon_connector->use_digital == false) && encoder) { ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); } /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the * vbios to deal with KVMs. If we have one and are not able to detect a monitor * by other means, assume the DFP is connected and use that EDID. In most * cases the DVI port is actually a virtual KVM port connected to the service * processor. */ out: if ((!rdev->is_atom_bios) && (ret == connector_status_disconnected) && rdev->mode_info.bios_hardcoded_edid_size) { radeon_connector->use_digital = true; ret = connector_status_connected; } /* updated in get modes as well since we need to know if it's analog or digital */ radeon_connector_update_scratch_regs(connector, ret); exit: pm_runtime_mark_last_busy(connector->dev->dev); pm_runtime_put_autosuspend(connector->dev->dev); return ret; } /* okay need to be smart in here about which encoder to pick */ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder; int i; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) break; encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); if (!encoder) continue; if (radeon_connector->use_digital == true) { if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS) return encoder; } else { if (encoder->encoder_type == DRM_MODE_ENCODER_DAC || encoder->encoder_type == DRM_MODE_ENCODER_TVDAC) return encoder; } } /* see if we have a default encoder TODO */ /* then check use digitial */ /* pick the first one */ if (enc_id) return drm_encoder_find(connector->dev, enc_id); return NULL; } static void radeon_dvi_force(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); if (connector->force == DRM_FORCE_ON) radeon_connector->use_digital = false; if (connector->force == DRM_FORCE_ON_DIGITAL) radeon_connector->use_digital = true; } static int radeon_dvi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); /* XXX check mode bandwidth */ /* clocks over 135 MHz have heat issues with DVI on RV100 */ if (radeon_connector->use_digital && (rdev->family == CHIP_RV100) && (mode->clock > 135000)) return MODE_CLOCK_HIGH; if (radeon_connector->use_digital && (mode->clock > 165000)) { if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) || (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) return MODE_OK; else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { /* HDMI 1.3+ supports max clock of 340 Mhz */ if (mode->clock > 340000) return MODE_CLOCK_HIGH; else return MODE_OK; } else { return MODE_CLOCK_HIGH; } } /* check against the max pixel clock */ if ((mode->clock / 10) > rdev->clock.max_pixel_clock) return MODE_CLOCK_HIGH; return MODE_OK; } static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = { .get_modes = radeon_vga_get_modes, .mode_valid = radeon_dvi_mode_valid, .best_encoder = radeon_dvi_encoder, }; static const struct drm_connector_funcs radeon_dvi_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_dvi_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = radeon_connector_set_property, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, }; static int radeon_dp_get_modes(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; struct drm_encoder *encoder = radeon_best_single_encoder(connector); int ret; if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { struct drm_display_mode *mode; if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { if (!radeon_dig_connector->edp_on) atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_ON); radeon_connector_get_edid(connector); ret = radeon_ddc_get_modes(connector); if (!radeon_dig_connector->edp_on) atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_OFF); } else { /* need to setup ddc on the bridge */ if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) != ENCODER_OBJECT_ID_NONE) { if (encoder) radeon_atom_ext_encoder_setup_ddc(encoder); } radeon_connector_get_edid(connector); ret = radeon_ddc_get_modes(connector); } if (ret > 0) { if (encoder) { radeon_fixup_lvds_native_mode(encoder, connector); /* add scaled modes */ radeon_add_common_modes(encoder, connector); } return ret; } if (!encoder) return 0; /* we have no EDID modes */ mode = radeon_fp_native_mode(encoder); if (mode) { ret = 1; drm_mode_probed_add(connector, mode); /* add the width/height from vbios tables if available */ connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; /* add scaled modes */ radeon_add_common_modes(encoder, connector); } } else { /* need to setup ddc on the bridge */ if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) != ENCODER_OBJECT_ID_NONE) { if (encoder) radeon_atom_ext_encoder_setup_ddc(encoder); } radeon_connector_get_edid(connector); ret = radeon_ddc_get_modes(connector); radeon_get_native_mode(connector); } return ret; } u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector) { struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; int i; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) break; encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); if (!encoder) continue; radeon_encoder = to_radeon_encoder(encoder); switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_TRAVIS: case ENCODER_OBJECT_ID_NUTMEG: return radeon_encoder->encoder_id; default: break; } } return ENCODER_OBJECT_ID_NONE; } static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector) { struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; int i; bool found = false; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) break; encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); if (!encoder) continue; radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2) found = true; } return found; } bool radeon_connector_is_dp12_capable(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; if (ASIC_IS_DCE5(rdev) && (rdev->clock.default_dispclk >= 53900) && radeon_connector_encoder_is_hbr2(connector)) { return true; } return false; } static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); enum drm_connector_status ret = connector_status_disconnected; struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; struct drm_encoder *encoder = radeon_best_single_encoder(connector); int r; r = pm_runtime_get_sync(connector->dev->dev); if (r < 0) return connector_status_disconnected; if (!force && radeon_check_hpd_status_unchanged(connector)) { ret = connector->status; goto out; } radeon_connector_free_edid(connector); if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { if (encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_display_mode *native_mode = &radeon_encoder->native_mode; /* check if panel is valid */ if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) ret = connector_status_connected; /* don't fetch the edid from the vbios if ddc fails and runpm is * enabled so we report disconnected. */ if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) ret = connector_status_disconnected; } /* eDP is always DP */ radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; if (!radeon_dig_connector->edp_on) atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_ON); if (radeon_dp_getdpcd(radeon_connector)) ret = connector_status_connected; if (!radeon_dig_connector->edp_on) atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_OFF); } else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) != ENCODER_OBJECT_ID_NONE) { /* DP bridges are always DP */ radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; /* get the DPCD from the bridge */ radeon_dp_getdpcd(radeon_connector); if (encoder) { /* setup ddc on the bridge */ radeon_atom_ext_encoder_setup_ddc(encoder); /* bridge chips are always aux */ if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */ ret = connector_status_connected; else if (radeon_connector->dac_load_detect) { /* try load detection */ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; ret = encoder_funcs->detect(encoder, connector); } } } else { radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { ret = connector_status_connected; if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) radeon_dp_getdpcd(radeon_connector); } else { if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { if (radeon_dp_getdpcd(radeon_connector)) ret = connector_status_connected; } else { /* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */ if (radeon_ddc_probe(radeon_connector, false)) ret = connector_status_connected; } } } radeon_connector_update_scratch_regs(connector, ret); out: pm_runtime_mark_last_busy(connector->dev->dev); pm_runtime_put_autosuspend(connector->dev->dev); return ret; } static int radeon_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; /* XXX check mode bandwidth */ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { struct drm_encoder *encoder = radeon_best_single_encoder(connector); if ((mode->hdisplay < 320) || (mode->vdisplay < 240)) return MODE_PANEL; if (encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_display_mode *native_mode = &radeon_encoder->native_mode; /* AVIVO hardware supports downscaling modes larger than the panel * to the panel size, but I'm not sure this is desirable. */ if ((mode->hdisplay > native_mode->hdisplay) || (mode->vdisplay > native_mode->vdisplay)) return MODE_PANEL; /* if scaling is disabled, block non-native modes */ if (radeon_encoder->rmx_type == RMX_OFF) { if ((mode->hdisplay != native_mode->hdisplay) || (mode->vdisplay != native_mode->vdisplay)) return MODE_PANEL; } } } else { if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { return radeon_dp_mode_valid_helper(connector, mode); } else { if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { /* HDMI 1.3+ supports max clock of 340 Mhz */ if (mode->clock > 340000) return MODE_CLOCK_HIGH; } else { if (mode->clock > 165000) return MODE_CLOCK_HIGH; } } } return MODE_OK; } static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { .get_modes = radeon_dp_get_modes, .mode_valid = radeon_dp_mode_valid, .best_encoder = radeon_dvi_encoder, }; static const struct drm_connector_funcs radeon_dp_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = radeon_connector_set_property, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, }; static const struct drm_connector_funcs radeon_edp_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = radeon_lvds_set_property, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, }; static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = radeon_lvds_set_property, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, }; void radeon_add_atom_connector(struct drm_device *dev, uint32_t connector_id, uint32_t supported_device, int connector_type, struct radeon_i2c_bus_rec *i2c_bus, uint32_t igp_lane_info, uint16_t connector_object_id, struct radeon_hpd *hpd, struct radeon_router *router) { struct radeon_device *rdev = dev->dev_private; struct drm_connector *connector; struct radeon_connector *radeon_connector; struct radeon_connector_atom_dig *radeon_dig_connector; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; uint32_t subpixel_order = SubPixelNone; bool shared_ddc = false; bool is_dp_bridge = false; bool has_aux = false; if (connector_type == DRM_MODE_CONNECTOR_Unknown) return; /* if the user selected tv=0 don't try and add the connector */ if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) || (connector_type == DRM_MODE_CONNECTOR_Composite) || (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) && (radeon_tv == 0)) return; /* see if we already added it */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { radeon_connector = to_radeon_connector(connector); if (radeon_connector->connector_id == connector_id) { radeon_connector->devices |= supported_device; return; } if (radeon_connector->ddc_bus && i2c_bus->valid) { if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) { radeon_connector->shared_ddc = true; shared_ddc = true; } if (radeon_connector->router_bus && router->ddc_valid && (radeon_connector->router.router_id == router->router_id)) { radeon_connector->shared_ddc = false; shared_ddc = false; } } } /* check if it's a dp bridge */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->devices & supported_device) { switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_TRAVIS: case ENCODER_OBJECT_ID_NUTMEG: is_dp_bridge = true; break; default: break; } } } radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL); if (!radeon_connector) return; connector = &radeon_connector->base; radeon_connector->connector_id = connector_id; radeon_connector->devices = supported_device; radeon_connector->shared_ddc = shared_ddc; radeon_connector->connector_object_id = connector_object_id; radeon_connector->hpd = *hpd; radeon_connector->router = *router; if (router->ddc_valid || router->cd_valid) { radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); if (!radeon_connector->router_bus) DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n"); } if (is_dp_bridge) { radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); if (!radeon_dig_connector) goto failed; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (radeon_connector->ddc_bus) has_aux = true; else DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } switch (connector_type) { case DRM_MODE_CONNECTOR_VGA: case DRM_MODE_CONNECTOR_DVIA: default: drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); connector->interlace_allowed = true; connector->doublescan_allowed = true; radeon_connector->dac_load_detect = true; drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.load_detect_property, 1); drm_object_attach_property(&radeon_connector->base.base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_NONE); break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIB: case DRM_MODE_CONNECTOR_DisplayPort: drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.underscan_property, UNDERSCAN_OFF); drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.underscan_hborder_property, 0); drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.underscan_vborder_property, 0); drm_object_attach_property(&radeon_connector->base.base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_NONE); drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.dither_property, RADEON_FMT_DITHER_DISABLE); if (radeon_audio != 0) drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.audio_property, RADEON_AUDIO_AUTO); subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = true; if (connector_type == DRM_MODE_CONNECTOR_HDMIB) connector->doublescan_allowed = true; else connector->doublescan_allowed = false; if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.load_detect_property, 1); } break; case DRM_MODE_CONNECTOR_LVDS: case DRM_MODE_CONNECTOR_eDP: drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_bridge_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); drm_object_attach_property(&radeon_connector->base.base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; break; } } else { switch (connector_type) { case DRM_MODE_CONNECTOR_VGA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.load_detect_property, 1); if (ASIC_IS_AVIVO(rdev)) drm_object_attach_property(&radeon_connector->base.base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_NONE); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->polled = DRM_CONNECTOR_POLL_CONNECT; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVIA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.load_detect_property, 1); if (ASIC_IS_AVIVO(rdev)) drm_object_attach_property(&radeon_connector->base.base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_NONE); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); if (!radeon_dig_connector) goto failed; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } subpixel_order = SubPixelHorizontalRGB; drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.coherent_mode_property, 1); if (ASIC_IS_AVIVO(rdev)) { drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.underscan_property, UNDERSCAN_OFF); drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.underscan_hborder_property, 0); drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.underscan_vborder_property, 0); drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.dither_property, RADEON_FMT_DITHER_DISABLE); drm_object_attach_property(&radeon_connector->base.base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_NONE); } if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) { drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.audio_property, RADEON_AUDIO_AUTO); } if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.load_detect_property, 1); } connector->interlace_allowed = true; if (connector_type == DRM_MODE_CONNECTOR_DVII) connector->doublescan_allowed = true; else connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIB: radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); if (!radeon_dig_connector) goto failed; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.coherent_mode_property, 1); if (ASIC_IS_AVIVO(rdev)) { drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.underscan_property, UNDERSCAN_OFF); drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.underscan_hborder_property, 0); drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.underscan_vborder_property, 0); drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.dither_property, RADEON_FMT_DITHER_DISABLE); drm_object_attach_property(&radeon_connector->base.base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_NONE); } if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) { drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.audio_property, RADEON_AUDIO_AUTO); } subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = true; if (connector_type == DRM_MODE_CONNECTOR_HDMIB) connector->doublescan_allowed = true; else connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_DisplayPort: radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); if (!radeon_dig_connector) goto failed; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (radeon_connector->ddc_bus) has_aux = true; else DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } subpixel_order = SubPixelHorizontalRGB; drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.coherent_mode_property, 1); if (ASIC_IS_AVIVO(rdev)) { drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.underscan_property, UNDERSCAN_OFF); drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.underscan_hborder_property, 0); drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.underscan_vborder_property, 0); drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.dither_property, RADEON_FMT_DITHER_DISABLE); drm_object_attach_property(&radeon_connector->base.base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_NONE); } if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) { drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.audio_property, RADEON_AUDIO_AUTO); } connector->interlace_allowed = true; /* in theory with a DP to VGA converter... */ connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_eDP: radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); if (!radeon_dig_connector) goto failed; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (radeon_connector->ddc_bus) has_aux = true; else DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } drm_object_attach_property(&radeon_connector->base.base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_9PinDIN: drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); radeon_connector->dac_load_detect = true; drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.load_detect_property, 1); drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.tv_std_property, radeon_atombios_get_tv_info(rdev)); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->interlace_allowed = false; connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_LVDS: radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); if (!radeon_dig_connector) goto failed; radeon_dig_connector->igp_lane_info = igp_lane_info; radeon_connector->con_priv = radeon_dig_connector; drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } drm_object_attach_property(&radeon_connector->base.base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; break; } } if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { if (i2c_bus->valid) connector->polled = DRM_CONNECTOR_POLL_CONNECT; } else connector->polled = DRM_CONNECTOR_POLL_HPD; connector->display_info.subpixel_order = subpixel_order; drm_connector_register(connector); if (has_aux) radeon_dp_aux_init(radeon_connector); return; failed: drm_connector_cleanup(connector); kfree(connector); } void radeon_add_legacy_connector(struct drm_device *dev, uint32_t connector_id, uint32_t supported_device, int connector_type, struct radeon_i2c_bus_rec *i2c_bus, uint16_t connector_object_id, struct radeon_hpd *hpd) { struct radeon_device *rdev = dev->dev_private; struct drm_connector *connector; struct radeon_connector *radeon_connector; uint32_t subpixel_order = SubPixelNone; if (connector_type == DRM_MODE_CONNECTOR_Unknown) return; /* if the user selected tv=0 don't try and add the connector */ if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) || (connector_type == DRM_MODE_CONNECTOR_Composite) || (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) && (radeon_tv == 0)) return; /* see if we already added it */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { radeon_connector = to_radeon_connector(connector); if (radeon_connector->connector_id == connector_id) { radeon_connector->devices |= supported_device; return; } } radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL); if (!radeon_connector) return; connector = &radeon_connector->base; radeon_connector->connector_id = connector_id; radeon_connector->devices = supported_device; radeon_connector->connector_object_id = connector_object_id; radeon_connector->hpd = *hpd; switch (connector_type) { case DRM_MODE_CONNECTOR_VGA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.load_detect_property, 1); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->polled = DRM_CONNECTOR_POLL_CONNECT; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVIA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.load_detect_property, 1); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.load_detect_property, 1); } subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = true; if (connector_type == DRM_MODE_CONNECTOR_DVII) connector->doublescan_allowed = true; else connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_9PinDIN: drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); radeon_connector->dac_load_detect = true; /* RS400,RC410,RS480 chipset seems to report a lot * of false positive on load detect, we haven't yet * found a way to make load detect reliable on those * chipset, thus just disable it for TV. */ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) radeon_connector->dac_load_detect = false; drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.load_detect_property, radeon_connector->dac_load_detect); drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.tv_std_property, radeon_combios_get_tv_info(rdev)); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->interlace_allowed = false; connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_LVDS: drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } drm_object_attach_property(&radeon_connector->base.base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; break; } if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { if (i2c_bus->valid) connector->polled = DRM_CONNECTOR_POLL_CONNECT; } else connector->polled = DRM_CONNECTOR_POLL_HPD; connector->display_info.subpixel_order = subpixel_order; drm_connector_register(connector); }
gpl-2.0
curbthepain/revkernel_ubers5
fs/sdcardfs/packagelist.c
498
12948
/* * fs/sdcardfs/packagelist.c * * Copyright (c) 2013 Samsung Electronics Co. Ltd * Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun, * Sunghwan Yun, Sungjong Seo * * This program has been developed as a stackable file system based on * the WrapFS which written by * * Copyright (c) 1998-2011 Erez Zadok * Copyright (c) 2009 Shrikar Archak * Copyright (c) 2003-2011 Stony Brook University * Copyright (c) 2003-2011 The Research Foundation of SUNY * * This file is dual licensed. It may be redistributed and/or modified * under the terms of the Apache 2.0 License OR version 2 of the GNU * General Public License. */ #include "sdcardfs.h" #include "strtok.h" #include "hashtable.h" #include <linux/syscalls.h> #include <linux/kthread.h> #include <linux/inotify.h> #include <linux/delay.h> #define STRING_BUF_SIZE (512) struct hashtable_entry { struct hlist_node hlist; void *key; int value; }; struct packagelist_data { DECLARE_HASHTABLE(package_to_appid,8); DECLARE_HASHTABLE(appid_with_rw,7); struct mutex hashtable_lock; struct task_struct *thread_id; gid_t write_gid; char *strtok_last; char read_buf[STRING_BUF_SIZE]; char event_buf[STRING_BUF_SIZE]; char app_name_buf[STRING_BUF_SIZE]; char gids_buf[STRING_BUF_SIZE]; }; static struct kmem_cache *hashtable_entry_cachep; /* Path to system-provided mapping of package name to appIds */ static const char* const kpackageslist_file = "/data/system/packages.list"; /* Supplementary groups to execute with */ static const gid_t kgroups[1] = { AID_PACKAGE_INFO }; static unsigned int str_hash(void *key) { int i; unsigned int h = strlen(key); char *data = (char *)key; for (i = 0; i < strlen(key); i++) { h = h * 31 + *data; data++; } return h; } static int contain_appid_key(struct packagelist_data *pkgl_dat, void *appid) { struct hashtable_entry *hash_cur; struct hlist_node *h_n; hash_for_each_possible(pkgl_dat->appid_with_rw, hash_cur, hlist, (unsigned int)appid, h_n) if (appid == hash_cur->key) return 1; return 0; } /* Return if the calling UID holds sdcard_rw. */ int get_caller_has_rw_locked(void *pkgl_id, derive_t derive) { struct packagelist_data *pkgl_dat = (struct packagelist_data *)pkgl_id; appid_t appid; int ret; /* No additional permissions enforcement */ if (derive == DERIVE_NONE) { return 1; } appid = multiuser_get_app_id(current_fsuid()); mutex_lock(&pkgl_dat->hashtable_lock); ret = contain_appid_key(pkgl_dat, (void *)appid); mutex_unlock(&pkgl_dat->hashtable_lock); return ret; } appid_t get_appid(void *pkgl_id, const char *app_name) { struct packagelist_data *pkgl_dat = (struct packagelist_data *)pkgl_id; struct hashtable_entry *hash_cur; struct hlist_node *h_n; unsigned int hash = str_hash((void *)app_name); appid_t ret_id; //printk(KERN_INFO "sdcardfs: %s: %s, %u\n", __func__, (char *)app_name, hash); mutex_lock(&pkgl_dat->hashtable_lock); hash_for_each_possible(pkgl_dat->package_to_appid, hash_cur, hlist, hash, h_n) { //printk(KERN_INFO "sdcardfs: %s: %s\n", __func__, (char *)hash_cur->key); if (!strcasecmp(app_name, hash_cur->key)) { ret_id = (appid_t)hash_cur->value; mutex_unlock(&pkgl_dat->hashtable_lock); //printk(KERN_INFO "=> app_id: %d\n", (int)ret_id); return ret_id; } } mutex_unlock(&pkgl_dat->hashtable_lock); //printk(KERN_INFO "=> app_id: %d\n", 0); return 0; } /* Kernel has already enforced everything we returned through * derive_permissions_locked(), so this is used to lock down access * even further, such as enforcing that apps hold sdcard_rw. */ int check_caller_access_to_name(struct inode *parent_node, const char* name, derive_t derive, int w_ok, int has_rw) { /* Always block security-sensitive files at root */ if (parent_node && SDCARDFS_I(parent_node)->perm == PERM_ROOT) { if (!strcasecmp(name, "autorun.inf") || !strcasecmp(name, ".android_secure") || !strcasecmp(name, "android_secure")) { return 0; } } /* No additional permissions enforcement */ if (derive == DERIVE_NONE) { return 1; } /* Root always has access; access for any other UIDs should always * be controlled through packages.list. */ if (current_fsuid() == 0) { return 1; } /* If asking to write, verify that caller either owns the * parent or holds sdcard_rw. */ if (w_ok) { if (parent_node && (current_fsuid() == SDCARDFS_I(parent_node)->d_uid)) { return 1; } return has_rw; } /* No extra permissions to enforce */ return 1; } /* This function is used when file opening. The open flags must be * checked before calling check_caller_access_to_name() */ int open_flags_to_access_mode(int open_flags) { if((open_flags & O_ACCMODE) == O_RDONLY) { return 0; /* R_OK */ } else if ((open_flags & O_ACCMODE) == O_WRONLY) { return 1; /* W_OK */ } else { /* Probably O_RDRW, but treat as default to be safe */ return 1; /* R_OK | W_OK */ } } static int insert_str_to_int(struct packagelist_data *pkgl_dat, void *key, int value) { struct hashtable_entry *hash_cur; struct hashtable_entry *new_entry; struct hlist_node *h_n; unsigned int hash = str_hash(key); //printk(KERN_INFO "sdcardfs: %s: %s: %d, %u\n", __func__, (char *)key, value, hash); hash_for_each_possible(pkgl_dat->package_to_appid, hash_cur, hlist, hash, h_n) { if (!strcasecmp(key, hash_cur->key)) { hash_cur->value = value; return 0; } } new_entry = kmem_cache_alloc(hashtable_entry_cachep, GFP_KERNEL); if (!new_entry) return -ENOMEM; new_entry->key = kstrdup(key, GFP_KERNEL); new_entry->value = value; hash_add(pkgl_dat->package_to_appid, &new_entry->hlist, hash); return 0; } static void remove_str_to_int(struct hashtable_entry *h_entry) { //printk(KERN_INFO "sdcardfs: %s: %s: %d\n", __func__, (char *)h_entry->key, h_entry->value); kfree(h_entry->key); kmem_cache_free(hashtable_entry_cachep, h_entry); } static int insert_int_to_null(struct packagelist_data *pkgl_dat, void *key, int value) { struct hashtable_entry *hash_cur; struct hashtable_entry *new_entry; struct hlist_node *h_n; //printk(KERN_INFO "sdcardfs: %s: %d: %d\n", __func__, (int)key, value); hash_for_each_possible(pkgl_dat->appid_with_rw, hash_cur, hlist, (unsigned int)key, h_n) { if (key == hash_cur->key) { hash_cur->value = value; return 0; } } new_entry = kmem_cache_alloc(hashtable_entry_cachep, GFP_KERNEL); if (!new_entry) return -ENOMEM; new_entry->key = key; new_entry->value = value; hash_add(pkgl_dat->appid_with_rw, &new_entry->hlist, (unsigned int)new_entry->key); return 0; } static void remove_int_to_null(struct hashtable_entry *h_entry) { //printk(KERN_INFO "sdcardfs: %s: %d: %d\n", __func__, (int)h_entry->key, h_entry->value); kmem_cache_free(hashtable_entry_cachep, h_entry); } static void remove_all_hashentrys(struct packagelist_data *pkgl_dat) { struct hashtable_entry *hash_cur; struct hlist_node *h_n; struct hlist_node *h_t; int i; hash_for_each_safe(pkgl_dat->package_to_appid, i, h_t, hash_cur, hlist, h_n) remove_str_to_int(hash_cur); hash_for_each_safe(pkgl_dat->appid_with_rw, i, h_t, hash_cur, hlist, h_n) remove_int_to_null(hash_cur); hash_init(pkgl_dat->package_to_appid); hash_init(pkgl_dat->appid_with_rw); } static int read_package_list(struct packagelist_data *pkgl_dat) { int ret; int fd; int read_amount; printk(KERN_INFO "sdcardfs: read_package_list\n"); mutex_lock(&pkgl_dat->hashtable_lock); remove_all_hashentrys(pkgl_dat); fd = sys_open(kpackageslist_file, O_RDONLY, 0); if (fd < 0) { printk(KERN_ERR "sdcardfs: failed to open package list\n"); mutex_unlock(&pkgl_dat->hashtable_lock); return fd; } while ((read_amount = sys_read(fd, pkgl_dat->read_buf, sizeof(pkgl_dat->read_buf))) > 0) { int appid; char *token; int one_line_len = 0; int additional_read; unsigned long ret_gid; while (one_line_len < read_amount) { if (pkgl_dat->read_buf[one_line_len] == '\n') { one_line_len++; break; } one_line_len++; } additional_read = read_amount - one_line_len; if (additional_read > 0) sys_lseek(fd, -additional_read, SEEK_CUR); if (sscanf(pkgl_dat->read_buf, "%s %d %*d %*s %*s %s", pkgl_dat->app_name_buf, &appid, pkgl_dat->gids_buf) == 3) { ret = insert_str_to_int(pkgl_dat, pkgl_dat->app_name_buf, appid); if (ret) { sys_close(fd); mutex_unlock(&pkgl_dat->hashtable_lock); return ret; } token = strtok_r(pkgl_dat->gids_buf, ",", &pkgl_dat->strtok_last); while (token != NULL) { if (!kstrtoul(token, 10, &ret_gid) && (ret_gid == pkgl_dat->write_gid)) { ret = insert_int_to_null(pkgl_dat, (void *)appid, 1); if (ret) { sys_close(fd); mutex_unlock(&pkgl_dat->hashtable_lock); return ret; } break; } token = strtok_r(NULL, ",", &pkgl_dat->strtok_last); } } } sys_close(fd); mutex_unlock(&pkgl_dat->hashtable_lock); return 0; } static int packagelist_reader(void *thread_data) { struct packagelist_data *pkgl_dat = (struct packagelist_data *)thread_data; struct inotify_event *event; bool active = false; int event_pos; int event_size; int res = 0; int nfd; allow_signal(SIGINT); nfd = sys_inotify_init(); if (nfd < 0) { printk(KERN_ERR "sdcardfs: inotify_init failed: %d\n", nfd); return nfd; } while (!kthread_should_stop()) { if (signal_pending(current)) { ssleep(1); continue; } if (!active) { res = sys_inotify_add_watch(nfd, kpackageslist_file, IN_DELETE_SELF); if (res < 0) { if (res == -ENOENT || res == -EACCES) { /* Framework may not have created yet, sleep and retry */ printk(KERN_ERR "sdcardfs: missing packages.list; retrying\n"); ssleep(2); printk(KERN_ERR "sdcardfs: missing packages.list_end; retrying\n"); continue; } else { printk(KERN_ERR "sdcardfs: inotify_add_watch failed: %d\n", res); goto interruptable_sleep; } } /* Watch above will tell us about any future changes, so * read the current state. */ res = read_package_list(pkgl_dat); if (res) { printk(KERN_ERR "sdcardfs: read_package_list failed: %d\n", res); goto interruptable_sleep; } active = true; } event_pos = 0; res = sys_read(nfd, pkgl_dat->event_buf, sizeof(pkgl_dat->event_buf)); if (res < (int) sizeof(*event)) { if (res == -EINTR) continue; printk(KERN_ERR "sdcardfs: failed to read inotify event: %d\n", res); goto interruptable_sleep; } while (res >= (int) sizeof(*event)) { event = (struct inotify_event *) (pkgl_dat->event_buf + event_pos); printk(KERN_INFO "sdcardfs: inotify event: %08x\n", event->mask); if ((event->mask & IN_IGNORED) == IN_IGNORED) { /* Previously watched file was deleted, probably due to move * that swapped in new data; re-arm the watch and read. */ active = false; } event_size = sizeof(*event) + event->len; res -= event_size; event_pos += event_size; } continue; interruptable_sleep: set_current_state(TASK_INTERRUPTIBLE); schedule(); } flush_signals(current); sys_close(nfd); return res; } void * packagelist_create(gid_t write_gid) { struct packagelist_data *pkgl_dat; struct task_struct *packagelist_thread; pkgl_dat = kmalloc(sizeof(*pkgl_dat), GFP_KERNEL | __GFP_ZERO); if (!pkgl_dat) { printk(KERN_ERR "sdcardfs: creating kthread failed\n"); return ERR_PTR(-ENOMEM); } mutex_init(&pkgl_dat->hashtable_lock); hash_init(pkgl_dat->package_to_appid); hash_init(pkgl_dat->appid_with_rw); pkgl_dat->write_gid = write_gid; packagelist_thread = kthread_run(packagelist_reader, (void *)pkgl_dat, "pkgld"); if (IS_ERR(packagelist_thread)) { printk(KERN_ERR "sdcardfs: creating kthread failed\n"); kfree(pkgl_dat); return packagelist_thread; } pkgl_dat->thread_id = packagelist_thread; printk(KERN_INFO "sdcardfs: created packagelist pkgld/%d\n", (int)pkgl_dat->thread_id->pid); return (void *)pkgl_dat; } void packagelist_destroy(void *pkgl_id) { struct packagelist_data *pkgl_dat = (struct packagelist_data *)pkgl_id; pid_t pkgl_pid = pkgl_dat->thread_id->pid; force_sig_info(SIGINT, SEND_SIG_PRIV, pkgl_dat->thread_id); kthread_stop(pkgl_dat->thread_id); remove_all_hashentrys(pkgl_dat); printk(KERN_INFO "sdcardfs: destroyed packagelist pkgld/%d\n", (int)pkgl_pid); kfree(pkgl_dat); } int packagelist_init(void) { hashtable_entry_cachep = kmem_cache_create("packagelist_hashtable_entry", sizeof(struct hashtable_entry), 0, 0, NULL); if (!hashtable_entry_cachep) { printk(KERN_ERR "sdcardfs: failed creating pkgl_hashtable entry slab cache\n"); return -ENOMEM; } return 0; } void packagelist_exit(void) { if (hashtable_entry_cachep) kmem_cache_destroy(hashtable_entry_cachep); }
gpl-2.0
vidoardes/Vivo-2.6.35
fs/squashfs/export.c
1266
4489
/* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 * Phillip Lougher <phillip@lougher.demon.co.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2, * or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * export.c */ /* * This file implements code to make Squashfs filesystems exportable (NFS etc.) * * The export code uses an inode lookup table to map inode numbers passed in * filehandles to an inode location on disk. This table is stored compressed * into metadata blocks. A second index table is used to locate these. This * second index table for speed of access (and because it is small) is read at * mount time and cached in memory. * * The inode lookup table is used only by the export code, inode disk * locations are directly encoded in directories, enabling direct access * without an intermediate lookup for all operations except the export ops. */ #include <linux/fs.h> #include <linux/vfs.h> #include <linux/dcache.h> #include <linux/exportfs.h> #include <linux/slab.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "squashfs_fs_i.h" #include "squashfs.h" /* * Look-up inode number (ino) in table, returning the inode location. */ static long long squashfs_inode_lookup(struct super_block *sb, int ino_num) { struct squashfs_sb_info *msblk = sb->s_fs_info; int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1); int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1); u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]); __le64 ino; int err; TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num); err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino)); if (err < 0) return err; TRACE("squashfs_inode_lookup, inode = 0x%llx\n", (u64) le64_to_cpu(ino)); return le64_to_cpu(ino); } static struct dentry *squashfs_export_iget(struct super_block *sb, unsigned int ino_num) { long long ino; struct dentry *dentry = ERR_PTR(-ENOENT); TRACE("Entered squashfs_export_iget\n"); ino = squashfs_inode_lookup(sb, ino_num); if (ino >= 0) dentry = d_obtain_alias(squashfs_iget(sb, ino, ino_num)); return dentry; } static struct dentry *squashfs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { if ((fh_type != FILEID_INO32_GEN && fh_type != FILEID_INO32_GEN_PARENT) || fh_len < 2) return NULL; return squashfs_export_iget(sb, fid->i32.ino); } static struct dentry *squashfs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { if (fh_type != FILEID_INO32_GEN_PARENT || fh_len < 4) return NULL; return squashfs_export_iget(sb, fid->i32.parent_ino); } static struct dentry *squashfs_get_parent(struct dentry *child) { struct inode *inode = child->d_inode; unsigned int parent_ino = squashfs_i(inode)->parent; return squashfs_export_iget(inode->i_sb, parent_ino); } /* * Read uncompressed inode lookup table indexes off disk into memory */ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, u64 lookup_table_start, unsigned int inodes) { unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes); __le64 *inode_lookup_table; int err; TRACE("In read_inode_lookup_table, length %d\n", length); /* Allocate inode lookup table indexes */ inode_lookup_table = kmalloc(length, GFP_KERNEL); if (inode_lookup_table == NULL) { ERROR("Failed to allocate inode lookup table\n"); return ERR_PTR(-ENOMEM); } err = squashfs_read_table(sb, inode_lookup_table, lookup_table_start, length); if (err < 0) { ERROR("unable to read inode lookup table\n"); kfree(inode_lookup_table); return ERR_PTR(err); } return inode_lookup_table; } const struct export_operations squashfs_export_ops = { .fh_to_dentry = squashfs_fh_to_dentry, .fh_to_parent = squashfs_fh_to_parent, .get_parent = squashfs_get_parent };
gpl-2.0