repo_name
string
path
string
copies
string
size
string
content
string
license
string
xplodwild/packaged-linux-linaro-3.2-ci
drivers/net/wireless/iwmc3200wifi/netdev.c
8056
4466
/* * Intel Wireless Multicomm 3200 WiFi driver * * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com> * Samuel Ortiz <samuel.ortiz@intel.com> * Zhu Yi <yi.zhu@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ /* * This is the netdev related hooks for iwm. * * Some interesting code paths: * * iwm_open() (Called at netdev interface bringup time) * -> iwm_up() (main.c) * -> iwm_bus_enable() * -> if_sdio_enable() (In case of an SDIO bus) * -> sdio_enable_func() * -> iwm_notif_wait(BARKER_REBOOT) (wait for reboot barker) * -> iwm_notif_wait(ACK_BARKER) (wait for ACK barker) * -> iwm_load_fw() (fw.c) * -> iwm_load_umac() * -> iwm_load_lmac() (Calibration LMAC) * -> iwm_load_lmac() (Operational LMAC) * -> iwm_send_umac_config() * * iwm_stop() (Called at netdev interface bringdown time) * -> iwm_down() * -> iwm_bus_disable() * -> if_sdio_disable() (In case of an SDIO bus) * -> sdio_disable_func() */ #include <linux/netdevice.h> #include <linux/slab.h> #include "iwm.h" #include "commands.h" #include "cfg80211.h" #include "debug.h" static int iwm_open(struct net_device *ndev) { struct iwm_priv *iwm = ndev_to_iwm(ndev); return iwm_up(iwm); } static int iwm_stop(struct net_device *ndev) { struct iwm_priv *iwm = ndev_to_iwm(ndev); return iwm_down(iwm); } /* * iwm AC to queue mapping * * AC_VO -> queue 3 * AC_VI -> queue 2 * AC_BE -> queue 1 * AC_BK -> queue 0 */ static const u16 iwm_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 }; int iwm_tid_to_queue(u16 tid) { if (tid > IWM_UMAC_TID_NR - 2) return -EINVAL; return iwm_1d_to_queue[tid]; } static u16 iwm_select_queue(struct net_device *dev, struct sk_buff *skb) { skb->priority = cfg80211_classify8021d(skb); return iwm_1d_to_queue[skb->priority]; } static const struct net_device_ops iwm_netdev_ops = { .ndo_open = iwm_open, .ndo_stop = iwm_stop, .ndo_start_xmit = iwm_xmit_frame, .ndo_select_queue = iwm_select_queue, }; void *iwm_if_alloc(int sizeof_bus, struct device *dev, struct iwm_if_ops *if_ops) { struct net_device *ndev; struct wireless_dev *wdev; struct iwm_priv *iwm; int ret = 0; wdev = iwm_wdev_alloc(sizeof_bus, dev); if (IS_ERR(wdev)) return wdev; iwm = wdev_to_iwm(wdev); iwm->bus_ops = if_ops; iwm->wdev = wdev; ret = iwm_priv_init(iwm); if (ret) { dev_err(dev, "failed to init iwm_priv\n"); goto out_wdev; } wdev->iftype = iwm_mode_to_nl80211_iftype(iwm->conf.mode); ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES); if (!ndev) { dev_err(dev, "no memory for network device instance\n"); ret = -ENOMEM; goto out_priv; } ndev->netdev_ops = &iwm_netdev_ops; ndev->ieee80211_ptr = wdev; SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); wdev->netdev = ndev; iwm->umac_profile = kmalloc(sizeof(struct iwm_umac_profile), GFP_KERNEL); if (!iwm->umac_profile) { dev_err(dev, "Couldn't alloc memory for profile\n"); ret = -ENOMEM; goto out_profile; } iwm_init_default_profile(iwm, iwm->umac_profile); return iwm; out_profile: free_netdev(ndev); out_priv: iwm_priv_deinit(iwm); out_wdev: iwm_wdev_free(iwm); return ERR_PTR(ret); } void iwm_if_free(struct iwm_priv *iwm) { if (!iwm_to_ndev(iwm)) return; cancel_delayed_work_sync(&iwm->ct_kill_delay); free_netdev(iwm_to_ndev(iwm)); iwm_priv_deinit(iwm); kfree(iwm->umac_profile); iwm->umac_profile = NULL; iwm_wdev_free(iwm); } int iwm_if_add(struct iwm_priv *iwm) { struct net_device *ndev = iwm_to_ndev(iwm); int ret; ret = register_netdev(ndev); if (ret < 0) { dev_err(&ndev->dev, "Failed to register netdev: %d\n", ret); return ret; } return 0; } void iwm_if_remove(struct iwm_priv *iwm) { unregister_netdev(iwm_to_ndev(iwm)); }
gpl-2.0
devil1437/GalaxyNexusKernel
net/netfilter/xt_u32.c
13688
2755
/* * xt_u32 - kernel module to match u32 packet content * * Original author: Don Cohen <don@isis.cs3-inc.com> * (C) CC Computer Consultants GmbH, 2007 */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/spinlock.h> #include <linux/skbuff.h> #include <linux/types.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_u32.h> static bool u32_match_it(const struct xt_u32 *data, const struct sk_buff *skb) { const struct xt_u32_test *ct; unsigned int testind; unsigned int nnums; unsigned int nvals; unsigned int i; __be32 n; u_int32_t pos; u_int32_t val; u_int32_t at; /* * Small example: "0 >> 28 == 4 && 8 & 0xFF0000 >> 16 = 6, 17" * (=IPv4 and (TCP or UDP)). Outer loop runs over the "&&" operands. */ for (testind = 0; testind < data->ntests; ++testind) { ct = &data->tests[testind]; at = 0; pos = ct->location[0].number; if (skb->len < 4 || pos > skb->len - 4) return false; if (skb_copy_bits(skb, pos, &n, sizeof(n)) < 0) BUG(); val = ntohl(n); nnums = ct->nnums; /* Inner loop runs over "&", "<<", ">>" and "@" operands */ for (i = 1; i < nnums; ++i) { u_int32_t number = ct->location[i].number; switch (ct->location[i].nextop) { case XT_U32_AND: val &= number; break; case XT_U32_LEFTSH: val <<= number; break; case XT_U32_RIGHTSH: val >>= number; break; case XT_U32_AT: if (at + val < at) return false; at += val; pos = number; if (at + 4 < at || skb->len < at + 4 || pos > skb->len - at - 4) return false; if (skb_copy_bits(skb, at + pos, &n, sizeof(n)) < 0) BUG(); val = ntohl(n); break; } } /* Run over the "," and ":" operands */ nvals = ct->nvalues; for (i = 0; i < nvals; ++i) if (ct->value[i].min <= val && val <= ct->value[i].max) break; if (i >= ct->nvalues) return false; } return true; } static bool u32_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_u32 *data = par->matchinfo; bool ret; ret = u32_match_it(data, skb); return ret ^ data->invert; } static struct xt_match xt_u32_mt_reg __read_mostly = { .name = "u32", .revision = 0, .family = NFPROTO_UNSPEC, .match = u32_mt, .matchsize = sizeof(struct xt_u32), .me = THIS_MODULE, }; static int __init u32_mt_init(void) { return xt_register_match(&xt_u32_mt_reg); } static void __exit u32_mt_exit(void) { xt_unregister_match(&xt_u32_mt_reg); } module_init(u32_mt_init); module_exit(u32_mt_exit); MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); MODULE_DESCRIPTION("Xtables: arbitrary byte matching"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_u32"); MODULE_ALIAS("ip6t_u32");
gpl-2.0
ltangvald/mysql
storage/ndb/src/common/debugger/signaldata/NdbfsContinueB.cpp
121
1381
/* Copyright (C) 2003-2006 MySQL AB All rights reserved. Use is subject to license terms. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <signaldata/NdbfsContinueB.hpp> bool printCONTINUEB_NDBFS(FILE * output, const Uint32 * theData, Uint32 len, Uint16 not_used){ (void)not_used; switch (theData[0]) { case NdbfsContinueB::ZSCAN_MEMORYCHANNEL_10MS_DELAY: fprintf(output, " Scanning the memory channel every 10ms\n"); return true; break; case NdbfsContinueB::ZSCAN_MEMORYCHANNEL_NO_DELAY: fprintf(output, " Scanning the memory channel again with no delay\n"); return true; break; default: fprintf(output, " Default system error lab...\n"); return false; break; }//switch return false; }
gpl-2.0
CSE3320/kernel-code
.backup_do_not_remove/arch/s390/mm/maccess.c
121
5956
// SPDX-License-Identifier: GPL-2.0 /* * Access kernel memory without faulting -- s390 specific implementation. * * Copyright IBM Corp. 2009, 2015 * * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, * */ #include <linux/uaccess.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/gfp.h> #include <linux/cpu.h> #include <asm/ctl_reg.h> #include <asm/io.h> #include <asm/stacktrace.h> static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size) { unsigned long aligned, offset, count; char tmp[8]; aligned = (unsigned long) dst & ~7UL; offset = (unsigned long) dst & 7UL; size = min(8UL - offset, size); count = size - 1; asm volatile( " bras 1,0f\n" " mvc 0(1,%4),0(%5)\n" "0: mvc 0(8,%3),0(%0)\n" " ex %1,0(1)\n" " lg %1,0(%3)\n" " lra %0,0(%0)\n" " sturg %1,%0\n" : "+&a" (aligned), "+&a" (count), "=m" (tmp) : "a" (&tmp), "a" (&tmp[offset]), "a" (src) : "cc", "memory", "1"); return size; } /* * s390_kernel_write - write to kernel memory bypassing DAT * @dst: destination address * @src: source address * @size: number of bytes to copy * * This function writes to kernel memory bypassing DAT and possible page table * write protection. It writes to the destination using the sturg instruction. * Therefore we have a read-modify-write sequence: the function reads eight * bytes from destination at an eight byte boundary, modifies the bytes * requested and writes the result back in a loop. */ static DEFINE_SPINLOCK(s390_kernel_write_lock); notrace void *s390_kernel_write(void *dst, const void *src, size_t size) { void *tmp = dst; unsigned long flags; long copied; spin_lock_irqsave(&s390_kernel_write_lock, flags); if (!(flags & PSW_MASK_DAT)) { memcpy(dst, src, size); } else { while (size) { copied = s390_kernel_write_odd(tmp, src, size); tmp += copied; src += copied; size -= copied; } } spin_unlock_irqrestore(&s390_kernel_write_lock, flags); return dst; } static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count) { register unsigned long _dest asm("2") = (unsigned long) dest; register unsigned long _len1 asm("3") = (unsigned long) count; register unsigned long _src asm("4") = (unsigned long) src; register unsigned long _len2 asm("5") = (unsigned long) count; int rc = -EFAULT; asm volatile ( "0: mvcle %1,%2,0x0\n" "1: jo 0b\n" " lhi %0,0x0\n" "2:\n" EX_TABLE(1b,2b) : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1), "+d" (_len2), "=m" (*((long *) dest)) : "m" (*((long *) src)) : "cc", "memory"); return rc; } static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest, unsigned long src, unsigned long count) { int irqs_disabled, rc; unsigned long flags; if (!count) return 0; flags = arch_local_irq_save(); irqs_disabled = arch_irqs_disabled_flags(flags); if (!irqs_disabled) trace_hardirqs_off(); __arch_local_irq_stnsm(0xf8); // disable DAT rc = __memcpy_real((void *) dest, (void *) src, (size_t) count); if (flags & PSW_MASK_DAT) __arch_local_irq_stosm(0x04); // enable DAT if (!irqs_disabled) trace_hardirqs_on(); __arch_local_irq_ssm(flags); return rc; } /* * Copy memory in real mode (kernel to kernel) */ int memcpy_real(void *dest, void *src, size_t count) { int rc; if (S390_lowcore.nodat_stack != 0) { preempt_disable(); rc = CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack, 3, dest, src, count); preempt_enable(); return rc; } /* * This is a really early memcpy_real call, the stacks are * not set up yet. Just call _memcpy_real on the early boot * stack */ return _memcpy_real((unsigned long) dest,(unsigned long) src, (unsigned long) count); } /* * Copy memory in absolute mode (kernel to kernel) */ void memcpy_absolute(void *dest, void *src, size_t count) { unsigned long cr0, flags, prefix; flags = arch_local_irq_save(); __ctl_store(cr0, 0, 0); __ctl_clear_bit(0, 28); /* disable lowcore protection */ prefix = store_prefix(); if (prefix) { local_mcck_disable(); set_prefix(0); memcpy(dest, src, count); set_prefix(prefix); local_mcck_enable(); } else { memcpy(dest, src, count); } __ctl_load(cr0, 0, 0); arch_local_irq_restore(flags); } /* * Copy memory from kernel (real) to user (virtual) */ int copy_to_user_real(void __user *dest, void *src, unsigned long count) { int offs = 0, size, rc; char *buf; buf = (char *) __get_free_page(GFP_KERNEL); if (!buf) return -ENOMEM; rc = -EFAULT; while (offs < count) { size = min(PAGE_SIZE, count - offs); if (memcpy_real(buf, src + offs, size)) goto out; if (copy_to_user(dest + offs, buf, size)) goto out; offs += size; } rc = 0; out: free_page((unsigned long) buf); return rc; } /* * Check if physical address is within prefix or zero page */ static int is_swapped(unsigned long addr) { unsigned long lc; int cpu; if (addr < sizeof(struct lowcore)) return 1; for_each_online_cpu(cpu) { lc = (unsigned long) lowcore_ptr[cpu]; if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc) continue; return 1; } return 0; } /* * Convert a physical pointer for /dev/mem access * * For swapped prefix pages a new buffer is returned that contains a copy of * the absolute memory. The buffer size is maximum one page large. */ void *xlate_dev_mem_ptr(phys_addr_t addr) { void *bounce = (void *) addr; unsigned long size; get_online_cpus(); preempt_disable(); if (is_swapped(addr)) { size = PAGE_SIZE - (addr & ~PAGE_MASK); bounce = (void *) __get_free_page(GFP_ATOMIC); if (bounce) memcpy_absolute(bounce, (void *) addr, size); } preempt_enable(); put_online_cpus(); return bounce; } /* * Free converted buffer for /dev/mem access (if necessary) */ void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf) { if ((void *) addr != buf) free_page((unsigned long) buf); }
gpl-2.0
jeppeter/mysql56
storage/ndb/src/common/debugger/signaldata/StartRec.cpp
121
2552
/* Copyright (C) 2003, 2005-2007 MySQL AB, 2008 Sun Microsystems, Inc. All rights reserved. Use is subject to license terms. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <RefConvert.hpp> #include <signaldata/StartRec.hpp> #include <signaldata/StartFragReq.hpp> bool printSTART_REC_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 recBlockNo){ StartRecReq * sig = (StartRecReq *) theData; if (len != StartRecReq::SignalLength) return false; fprintf(output, " receivingNodeId: %d senderRef: (%d, %d)\n", sig->receivingNodeId, refToNode(sig->senderRef), refToBlock(sig->senderRef)); fprintf(output, " keepGci: %d lastCompletedGci: %d newestGci: %d senderData: %x\n", sig->keepGci, sig->lastCompletedGci, sig->newestGci, sig->senderData); NdbNodeBitmask mask; mask.assign(NdbNodeBitmask::Size, sig->sr_nodes); char buf[100]; fprintf(output, " sr_nodes: %s\n", mask.getText(buf)); return true; } bool printSTART_REC_CONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 recBlockNo){ StartRecConf * sig = (StartRecConf *) theData; if (len != StartRecConf::SignalLength) return false; fprintf(output, " startingNodeId: %d senderData: %u\n", sig->startingNodeId, sig->senderData); return true; } bool printSTART_FRAG_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 recBlockNo) { StartFragReq* sig = (StartFragReq*)theData; fprintf(output, " table: %d frag: %d lcpId: %d lcpNo: %d #nodes: %d \n", sig->tableId, sig->fragId, sig->lcpId, sig->lcpNo, sig->noOfLogNodes); for(Uint32 i = 0; i<sig->noOfLogNodes; i++) { fprintf(output, " (node: %d startGci: %d lastGci: %d)", sig->lqhLogNode[i], sig->startGci[i], sig->lastGci[i]); } fprintf(output, "\n"); return true; }
gpl-2.0
jcowgill/dolphin
Externals/wxWidgets3/src/common/wincmn.cpp
121
120305
///////////////////////////////////////////////////////////////////////////// // Name: src/common/wincmn.cpp // Purpose: common (to all ports) wxWindow functions // Author: Julian Smart, Vadim Zeitlin // Modified by: // Created: 13/07/98 // Copyright: (c) wxWidgets team // Licence: wxWindows licence ///////////////////////////////////////////////////////////////////////////// // ============================================================================ // declarations // ============================================================================ // ---------------------------------------------------------------------------- // headers // ---------------------------------------------------------------------------- // For compilers that support precompilation, includes "wx.h". #include "wx/wxprec.h" #ifdef __BORLANDC__ #pragma hdrstop #endif #ifndef WX_PRECOMP #include "wx/string.h" #include "wx/log.h" #include "wx/intl.h" #include "wx/frame.h" #include "wx/window.h" #include "wx/control.h" #include "wx/checkbox.h" #include "wx/radiobut.h" #include "wx/statbox.h" #include "wx/textctrl.h" #include "wx/settings.h" #include "wx/dialog.h" #include "wx/msgdlg.h" #include "wx/msgout.h" #include "wx/statusbr.h" #include "wx/toolbar.h" #include "wx/dcclient.h" #include "wx/scrolbar.h" #include "wx/layout.h" #include "wx/sizer.h" #include "wx/menu.h" #endif //WX_PRECOMP #if wxUSE_DRAG_AND_DROP #include "wx/dnd.h" #endif // wxUSE_DRAG_AND_DROP #if wxUSE_ACCESSIBILITY #include "wx/access.h" #endif #if wxUSE_HELP #include "wx/cshelp.h" #endif // wxUSE_HELP #if wxUSE_TOOLTIPS #include "wx/tooltip.h" #endif // wxUSE_TOOLTIPS #if wxUSE_CARET #include "wx/caret.h" #endif // wxUSE_CARET #if wxUSE_SYSTEM_OPTIONS #include "wx/sysopt.h" #endif #include "wx/platinfo.h" #include "wx/recguard.h" #include "wx/private/window.h" #ifdef __WINDOWS__ #include "wx/msw/wrapwin.h" #endif // Windows List WXDLLIMPEXP_DATA_CORE(wxWindowList) wxTopLevelWindows; // globals #if wxUSE_MENUS wxMenu *wxCurrentPopupMenu = NULL; #endif // wxUSE_MENUS extern WXDLLEXPORT_DATA(const char) wxPanelNameStr[] = "panel"; namespace wxMouseCapture { // Check if the given window is in the capture stack. bool IsInCaptureStack(wxWindowBase* win); } // wxMouseCapture // ---------------------------------------------------------------------------- // static data // ---------------------------------------------------------------------------- IMPLEMENT_ABSTRACT_CLASS(wxWindowBase, wxEvtHandler) // ---------------------------------------------------------------------------- // event table // ---------------------------------------------------------------------------- BEGIN_EVENT_TABLE(wxWindowBase, wxEvtHandler) EVT_SYS_COLOUR_CHANGED(wxWindowBase::OnSysColourChanged) EVT_INIT_DIALOG(wxWindowBase::OnInitDialog) EVT_MIDDLE_DOWN(wxWindowBase::OnMiddleClick) #if wxUSE_HELP EVT_HELP(wxID_ANY, wxWindowBase::OnHelp) #endif // wxUSE_HELP EVT_SIZE(wxWindowBase::InternalOnSize) END_EVENT_TABLE() // ============================================================================ // implementation of the common functionality of the wxWindow class // ============================================================================ // ---------------------------------------------------------------------------- // XTI // ---------------------------------------------------------------------------- #if wxUSE_EXTENDED_RTTI // windows that are created from a parent window during its Create method, // eg. spin controls in a calendar controls must never been streamed out // separately otherwise chaos occurs. Right now easiest is to test for negative ids, // as windows with negative ids never can be recreated anyway bool wxWindowStreamingCallback( const wxObject *object, wxObjectWriter *, wxObjectWriterCallback *, const wxStringToAnyHashMap & ) { const wxWindow * win = wx_dynamic_cast(const wxWindow*, object); if ( win && win->GetId() < 0 ) return false; return true; } wxIMPLEMENT_DYNAMIC_CLASS_XTI_CALLBACK(wxWindow, wxWindowBase, "wx/window.h", \ wxWindowStreamingCallback) // make wxWindowList known before the property is used wxCOLLECTION_TYPE_INFO( wxWindow*, wxWindowList ); template<> void wxCollectionToVariantArray( wxWindowList const &theList, wxAnyList &value) { wxListCollectionToAnyList<wxWindowList::compatibility_iterator>( theList, value ); } wxDEFINE_FLAGS( wxWindowStyle ) wxBEGIN_FLAGS( wxWindowStyle ) // new style border flags, we put them first to // use them for streaming out wxFLAGS_MEMBER(wxBORDER_SIMPLE) wxFLAGS_MEMBER(wxBORDER_SUNKEN) wxFLAGS_MEMBER(wxBORDER_DOUBLE) wxFLAGS_MEMBER(wxBORDER_RAISED) wxFLAGS_MEMBER(wxBORDER_STATIC) wxFLAGS_MEMBER(wxBORDER_NONE) // old style border flags wxFLAGS_MEMBER(wxSIMPLE_BORDER) wxFLAGS_MEMBER(wxSUNKEN_BORDER) wxFLAGS_MEMBER(wxDOUBLE_BORDER) wxFLAGS_MEMBER(wxRAISED_BORDER) wxFLAGS_MEMBER(wxSTATIC_BORDER) wxFLAGS_MEMBER(wxBORDER) // standard window styles wxFLAGS_MEMBER(wxTAB_TRAVERSAL) wxFLAGS_MEMBER(wxCLIP_CHILDREN) wxFLAGS_MEMBER(wxTRANSPARENT_WINDOW) wxFLAGS_MEMBER(wxWANTS_CHARS) wxFLAGS_MEMBER(wxFULL_REPAINT_ON_RESIZE) wxFLAGS_MEMBER(wxALWAYS_SHOW_SB ) wxFLAGS_MEMBER(wxVSCROLL) wxFLAGS_MEMBER(wxHSCROLL) wxEND_FLAGS( wxWindowStyle ) wxBEGIN_PROPERTIES_TABLE(wxWindow) wxEVENT_PROPERTY( Close, wxEVT_CLOSE_WINDOW, wxCloseEvent) wxEVENT_PROPERTY( Create, wxEVT_CREATE, wxWindowCreateEvent ) wxEVENT_PROPERTY( Destroy, wxEVT_DESTROY, wxWindowDestroyEvent ) // Always constructor Properties first wxREADONLY_PROPERTY( Parent,wxWindow*, GetParent, wxEMPTY_PARAMETER_VALUE, \ 0 /*flags*/, wxT("Helpstring"), wxT("group")) wxPROPERTY( Id,wxWindowID, SetId, GetId, -1 /*wxID_ANY*/, 0 /*flags*/, \ wxT("Helpstring"), wxT("group") ) wxPROPERTY( Position,wxPoint, SetPosition, GetPosition, wxDefaultPosition, \ 0 /*flags*/, wxT("Helpstring"), wxT("group")) // pos wxPROPERTY( Size,wxSize, SetSize, GetSize, wxDefaultSize, 0 /*flags*/, \ wxT("Helpstring"), wxT("group")) // size wxPROPERTY( WindowStyle, long, SetWindowStyleFlag, GetWindowStyleFlag, \ wxEMPTY_PARAMETER_VALUE, 0 /*flags*/, wxT("Helpstring"), wxT("group")) // style wxPROPERTY( Name,wxString, SetName, GetName, wxEmptyString, 0 /*flags*/, \ wxT("Helpstring"), wxT("group") ) // Then all relations of the object graph wxREADONLY_PROPERTY_COLLECTION( Children, wxWindowList, wxWindowBase*, \ GetWindowChildren, wxPROP_OBJECT_GRAPH /*flags*/, \ wxT("Helpstring"), wxT("group")) // and finally all other properties wxPROPERTY( ExtraStyle, long, SetExtraStyle, GetExtraStyle, wxEMPTY_PARAMETER_VALUE, \ 0 /*flags*/, wxT("Helpstring"), wxT("group")) // extstyle wxPROPERTY( BackgroundColour, wxColour, SetBackgroundColour, GetBackgroundColour, \ wxEMPTY_PARAMETER_VALUE, 0 /*flags*/, wxT("Helpstring"), wxT("group")) // bg wxPROPERTY( ForegroundColour, wxColour, SetForegroundColour, GetForegroundColour, \ wxEMPTY_PARAMETER_VALUE, 0 /*flags*/, wxT("Helpstring"), wxT("group")) // fg wxPROPERTY( Enabled, bool, Enable, IsEnabled, wxAny((bool)true), 0 /*flags*/, \ wxT("Helpstring"), wxT("group")) wxPROPERTY( Shown, bool, Show, IsShown, wxAny((bool)true), 0 /*flags*/, \ wxT("Helpstring"), wxT("group")) #if 0 // possible property candidates (not in xrc) or not valid in all subclasses wxPROPERTY( Title,wxString, SetTitle, GetTitle, wxEmptyString ) wxPROPERTY( Font, wxFont, SetFont, GetWindowFont , ) wxPROPERTY( Label,wxString, SetLabel, GetLabel, wxEmptyString ) // MaxHeight, Width, MinHeight, Width // TODO switch label to control and title to toplevels wxPROPERTY( ThemeEnabled, bool, SetThemeEnabled, GetThemeEnabled, ) //wxPROPERTY( Cursor, wxCursor, SetCursor, GetCursor, ) // wxPROPERTY( ToolTip, wxString, SetToolTip, GetToolTipText, ) wxPROPERTY( AutoLayout, bool, SetAutoLayout, GetAutoLayout, ) #endif wxEND_PROPERTIES_TABLE() wxEMPTY_HANDLERS_TABLE(wxWindow) wxCONSTRUCTOR_DUMMY(wxWindow) #else #ifndef __WXUNIVERSAL__ IMPLEMENT_DYNAMIC_CLASS(wxWindow, wxWindowBase) #endif #endif // ---------------------------------------------------------------------------- // initialization // ---------------------------------------------------------------------------- // the default initialization wxWindowBase::wxWindowBase() { // no window yet, no parent nor children m_parent = NULL; m_windowId = wxID_ANY; // no constraints on the minimal window size m_minWidth = m_maxWidth = wxDefaultCoord; m_minHeight = m_maxHeight = wxDefaultCoord; // invalidiated cache value m_bestSizeCache = wxDefaultSize; // window are created enabled and visible by default m_isShown = m_isEnabled = true; // the default event handler is just this window m_eventHandler = this; #if wxUSE_VALIDATORS // no validator m_windowValidator = NULL; #endif // wxUSE_VALIDATORS // the colours/fonts are default for now, so leave m_font, // m_backgroundColour and m_foregroundColour uninitialized and set those m_hasBgCol = m_hasFgCol = m_hasFont = false; m_inheritBgCol = m_inheritFgCol = m_inheritFont = false; // no style bits m_exStyle = m_windowStyle = 0; m_backgroundStyle = wxBG_STYLE_ERASE; #if wxUSE_CONSTRAINTS // no constraints whatsoever m_constraints = NULL; m_constraintsInvolvedIn = NULL; #endif // wxUSE_CONSTRAINTS m_windowSizer = NULL; m_containingSizer = NULL; m_autoLayout = false; #if wxUSE_DRAG_AND_DROP m_dropTarget = NULL; #endif // wxUSE_DRAG_AND_DROP #if wxUSE_TOOLTIPS m_tooltip = NULL; #endif // wxUSE_TOOLTIPS #if wxUSE_CARET m_caret = NULL; #endif // wxUSE_CARET #if wxUSE_PALETTE m_hasCustomPalette = false; #endif // wxUSE_PALETTE #if wxUSE_ACCESSIBILITY m_accessible = NULL; #endif m_virtualSize = wxDefaultSize; m_scrollHelper = NULL; m_windowVariant = wxWINDOW_VARIANT_NORMAL; #if wxUSE_SYSTEM_OPTIONS if ( wxSystemOptions::HasOption(wxWINDOW_DEFAULT_VARIANT) ) { m_windowVariant = (wxWindowVariant) wxSystemOptions::GetOptionInt( wxWINDOW_DEFAULT_VARIANT ) ; } #endif // Whether we're using the current theme for this window (wxGTK only for now) m_themeEnabled = false; // This is set to true by SendDestroyEvent() which should be called by the // most derived class to ensure that the destruction event is sent as soon // as possible to allow its handlers to still see the undestroyed window m_isBeingDeleted = false; m_freezeCount = 0; } // common part of window creation process bool wxWindowBase::CreateBase(wxWindowBase *parent, wxWindowID id, const wxPoint& WXUNUSED(pos), const wxSize& size, long style, const wxString& name) { // ids are limited to 16 bits under MSW so if you care about portability, // it's not a good idea to use ids out of this range (and negative ids are // reserved for wxWidgets own usage) wxASSERT_MSG( id == wxID_ANY || (id >= 0 && id < 32767) || (id >= wxID_AUTO_LOWEST && id <= wxID_AUTO_HIGHEST), wxT("invalid id value") ); // generate a new id if the user doesn't care about it if ( id == wxID_ANY ) { m_windowId = NewControlId(); } else // valid id specified { m_windowId = id; } // don't use SetWindowStyleFlag() here, this function should only be called // to change the flag after creation as it tries to reflect the changes in // flags by updating the window dynamically and we don't need this here m_windowStyle = style; // assume the user doesn't want this window to shrink beneath its initial // size, this worked like this in wxWidgets 2.8 and before and generally // often makes sense for child windows (for top level ones it definitely // does not as the user should be able to resize the window) // // note that we can't use IsTopLevel() from ctor if ( size != wxDefaultSize && !wxTopLevelWindows.Find((wxWindow *)this) ) SetMinSize(size); SetName(name); SetParent(parent); return true; } bool wxWindowBase::CreateBase(wxWindowBase *parent, wxWindowID id, const wxPoint& pos, const wxSize& size, long style, const wxValidator& wxVALIDATOR_PARAM(validator), const wxString& name) { if ( !CreateBase(parent, id, pos, size, style, name) ) return false; #if wxUSE_VALIDATORS SetValidator(validator); #endif // wxUSE_VALIDATORS // if the parent window has wxWS_EX_VALIDATE_RECURSIVELY set, we want to // have it too - like this it's possible to set it only in the top level // dialog/frame and all children will inherit it by defult if ( parent && (parent->GetExtraStyle() & wxWS_EX_VALIDATE_RECURSIVELY) ) { SetExtraStyle(GetExtraStyle() | wxWS_EX_VALIDATE_RECURSIVELY); } return true; } bool wxWindowBase::ToggleWindowStyle(int flag) { wxASSERT_MSG( flag, wxT("flags with 0 value can't be toggled") ); bool rc; long style = GetWindowStyleFlag(); if ( style & flag ) { style &= ~flag; rc = false; } else // currently off { style |= flag; rc = true; } SetWindowStyleFlag(style); return rc; } // ---------------------------------------------------------------------------- // destruction // ---------------------------------------------------------------------------- // common clean up wxWindowBase::~wxWindowBase() { wxASSERT_MSG( !wxMouseCapture::IsInCaptureStack(this), "Destroying window before releasing mouse capture: this " "will result in a crash later." ); // FIXME if these 2 cases result from programming errors in the user code // we should probably assert here instead of silently fixing them // Just in case the window has been Closed, but we're then deleting // immediately: don't leave dangling pointers. wxPendingDelete.DeleteObject(this); // Just in case we've loaded a top-level window via LoadNativeDialog but // we weren't a dialog class wxTopLevelWindows.DeleteObject((wxWindow*)this); // Any additional event handlers should be popped before the window is // deleted as otherwise the last handler will be left with a dangling // pointer to this window result in a difficult to diagnose crash later on. wxASSERT_MSG( GetEventHandler() == this, wxT("any pushed event handlers must have been removed") ); #if wxUSE_MENUS // The associated popup menu can still be alive, disassociate from it in // this case if ( wxCurrentPopupMenu && wxCurrentPopupMenu->GetInvokingWindow() == this ) wxCurrentPopupMenu->SetInvokingWindow(NULL); #endif // wxUSE_MENUS wxASSERT_MSG( GetChildren().GetCount() == 0, wxT("children not destroyed") ); // notify the parent about this window destruction if ( m_parent ) m_parent->RemoveChild(this); #if wxUSE_CARET delete m_caret; #endif // wxUSE_CARET #if wxUSE_VALIDATORS delete m_windowValidator; #endif // wxUSE_VALIDATORS #if wxUSE_CONSTRAINTS // Have to delete constraints/sizer FIRST otherwise sizers may try to look // at deleted windows as they delete themselves. DeleteRelatedConstraints(); if ( m_constraints ) { // This removes any dangling pointers to this window in other windows' // constraintsInvolvedIn lists. UnsetConstraints(m_constraints); wxDELETE(m_constraints); } #endif // wxUSE_CONSTRAINTS if ( m_containingSizer ) m_containingSizer->Detach( (wxWindow*)this ); delete m_windowSizer; #if wxUSE_DRAG_AND_DROP delete m_dropTarget; #endif // wxUSE_DRAG_AND_DROP #if wxUSE_TOOLTIPS delete m_tooltip; #endif // wxUSE_TOOLTIPS #if wxUSE_ACCESSIBILITY delete m_accessible; #endif #if wxUSE_HELP // NB: this has to be called unconditionally, because we don't know // whether this window has associated help text or not wxHelpProvider *helpProvider = wxHelpProvider::Get(); if ( helpProvider ) helpProvider->RemoveHelp(this); #endif } bool wxWindowBase::IsBeingDeleted() const { return m_isBeingDeleted || (!IsTopLevel() && m_parent && m_parent->IsBeingDeleted()); } void wxWindowBase::SendDestroyEvent() { if ( m_isBeingDeleted ) { // we could have been already called from a more derived class dtor, // e.g. ~wxTLW calls us and so does ~wxWindow and the latter call // should be simply ignored return; } m_isBeingDeleted = true; wxWindowDestroyEvent event; event.SetEventObject(this); event.SetId(GetId()); GetEventHandler()->ProcessEvent(event); } bool wxWindowBase::Destroy() { // If our handle is invalid, it means that this window has never been // created, either because creating it failed or, more typically, because // this wxWindow object was default-constructed and its Create() method had // never been called. As we didn't send wxWindowCreateEvent in this case // (which is sent after successful creation), don't send the matching // wxWindowDestroyEvent neither. if ( GetHandle() ) SendDestroyEvent(); delete this; return true; } bool wxWindowBase::Close(bool force) { wxCloseEvent event(wxEVT_CLOSE_WINDOW, m_windowId); event.SetEventObject(this); event.SetCanVeto(!force); // return false if window wasn't closed because the application vetoed the // close event return HandleWindowEvent(event) && !event.GetVeto(); } bool wxWindowBase::DestroyChildren() { wxWindowList::compatibility_iterator node; for ( ;; ) { // we iterate until the list becomes empty node = GetChildren().GetFirst(); if ( !node ) break; wxWindow *child = node->GetData(); // note that we really want to delete it immediately so don't call the // possible overridden Destroy() version which might not delete the // child immediately resulting in problems with our (top level) child // outliving its parent child->wxWindowBase::Destroy(); wxASSERT_MSG( !GetChildren().Find(child), wxT("child didn't remove itself using RemoveChild()") ); } return true; } // ---------------------------------------------------------------------------- // size/position related methods // ---------------------------------------------------------------------------- // centre the window with respect to its parent in either (or both) directions void wxWindowBase::DoCentre(int dir) { wxCHECK_RET( !(dir & wxCENTRE_ON_SCREEN) && GetParent(), wxT("this method only implements centering child windows") ); SetSize(GetRect().CentreIn(GetParent()->GetClientSize(), dir)); } // fits the window around the children void wxWindowBase::Fit() { SetSize(GetBestSize()); } // fits virtual size (ie. scrolled area etc.) around children void wxWindowBase::FitInside() { SetVirtualSize( GetBestVirtualSize() ); } // On Mac, scrollbars are explicitly children. #if defined( __WXMAC__ ) && !defined(__WXUNIVERSAL__) static bool wxHasRealChildren(const wxWindowBase* win) { int realChildCount = 0; for ( wxWindowList::compatibility_iterator node = win->GetChildren().GetFirst(); node; node = node->GetNext() ) { wxWindow *win = node->GetData(); if ( !win->IsTopLevel() && win->IsShown() #if wxUSE_SCROLLBAR && !wxDynamicCast(win, wxScrollBar) #endif ) realChildCount ++; } return (realChildCount > 0); } #endif void wxWindowBase::InvalidateBestSize() { m_bestSizeCache = wxDefaultSize; // parent's best size calculation may depend on its children's // as long as child window we are in is not top level window itself // (because the TLW size is never resized automatically) // so let's invalidate it as well to be safe: if (m_parent && !IsTopLevel()) m_parent->InvalidateBestSize(); } // return the size best suited for the current window wxSize wxWindowBase::DoGetBestSize() const { wxSize best; if ( m_windowSizer ) { best = m_windowSizer->GetMinSize(); } #if wxUSE_CONSTRAINTS else if ( m_constraints ) { wxConstCast(this, wxWindowBase)->SatisfyConstraints(); // our minimal acceptable size is such that all our windows fit inside int maxX = 0, maxY = 0; for ( wxWindowList::compatibility_iterator node = GetChildren().GetFirst(); node; node = node->GetNext() ) { wxLayoutConstraints *c = node->GetData()->GetConstraints(); if ( !c ) { // it's not normal that we have an unconstrained child, but // what can we do about it? continue; } int x = c->right.GetValue(), y = c->bottom.GetValue(); if ( x > maxX ) maxX = x; if ( y > maxY ) maxY = y; // TODO: we must calculate the overlaps somehow, otherwise we // will never return a size bigger than the current one :-( } best = wxSize(maxX, maxY); } #endif // wxUSE_CONSTRAINTS else if ( !GetChildren().empty() #if defined( __WXMAC__ ) && !defined(__WXUNIVERSAL__) && wxHasRealChildren(this) #endif ) { // our minimal acceptable size is such that all our visible child // windows fit inside int maxX = 0, maxY = 0; for ( wxWindowList::compatibility_iterator node = GetChildren().GetFirst(); node; node = node->GetNext() ) { wxWindow *win = node->GetData(); if ( win->IsTopLevel() || !win->IsShown() #if wxUSE_STATUSBAR || wxDynamicCast(win, wxStatusBar) #endif // wxUSE_STATUSBAR ) { // dialogs and frames lie in different top level windows - // don't deal with them here; as for the status bars, they // don't lie in the client area at all continue; } int wx, wy, ww, wh; win->GetPosition(&wx, &wy); // if the window hadn't been positioned yet, assume that it is in // the origin if ( wx == wxDefaultCoord ) wx = 0; if ( wy == wxDefaultCoord ) wy = 0; win->GetSize(&ww, &wh); if ( wx + ww > maxX ) maxX = wx + ww; if ( wy + wh > maxY ) maxY = wy + wh; } best = wxSize(maxX, maxY); } else // ! has children { wxSize size = GetMinSize(); if ( !size.IsFullySpecified() ) { // if the window doesn't define its best size we assume that it can // be arbitrarily small -- usually this is not the case, of course, // but we have no way to know what the limit is, it should really // override DoGetBestClientSize() itself to tell us size.SetDefaults(wxSize(1, 1)); } // return as-is, unadjusted by the client size difference. return size; } // Add any difference between size and client size wxSize diff = GetSize() - GetClientSize(); best.x += wxMax(0, diff.x); best.y += wxMax(0, diff.y); return best; } // helper of GetWindowBorderSize(): as many ports don't implement support for // wxSYS_BORDER/EDGE_X/Y metrics in their wxSystemSettings, use hard coded // fallbacks in this case static int wxGetMetricOrDefault(wxSystemMetric what, const wxWindowBase* win) { int rc = wxSystemSettings::GetMetric( what, static_cast<wxWindow*>(const_cast<wxWindowBase*>(win))); if ( rc == -1 ) { switch ( what ) { case wxSYS_BORDER_X: case wxSYS_BORDER_Y: // 2D border is by default 1 pixel wide rc = 1; break; case wxSYS_EDGE_X: case wxSYS_EDGE_Y: // 3D borders are by default 2 pixels rc = 2; break; default: wxFAIL_MSG( wxT("unexpected wxGetMetricOrDefault() argument") ); rc = 0; } } return rc; } wxSize wxWindowBase::GetWindowBorderSize() const { wxSize size; switch ( GetBorder() ) { case wxBORDER_NONE: // nothing to do, size is already (0, 0) break; case wxBORDER_SIMPLE: case wxBORDER_STATIC: size.x = wxGetMetricOrDefault(wxSYS_BORDER_X, this); size.y = wxGetMetricOrDefault(wxSYS_BORDER_Y, this); break; case wxBORDER_SUNKEN: case wxBORDER_RAISED: size.x = wxMax(wxGetMetricOrDefault(wxSYS_EDGE_X, this), wxGetMetricOrDefault(wxSYS_BORDER_X, this)); size.y = wxMax(wxGetMetricOrDefault(wxSYS_EDGE_Y, this), wxGetMetricOrDefault(wxSYS_BORDER_Y, this)); break; case wxBORDER_DOUBLE: size.x = wxGetMetricOrDefault(wxSYS_EDGE_X, this) + wxGetMetricOrDefault(wxSYS_BORDER_X, this); size.y = wxGetMetricOrDefault(wxSYS_EDGE_Y, this) + wxGetMetricOrDefault(wxSYS_BORDER_Y, this); break; default: wxFAIL_MSG(wxT("Unknown border style.")); break; } // we have borders on both sides return size*2; } bool wxWindowBase::InformFirstDirection(int direction, int size, int availableOtherDir) { return GetSizer() && GetSizer()->InformFirstDirection(direction, size, availableOtherDir); } wxSize wxWindowBase::GetEffectiveMinSize() const { // merge the best size with the min size, giving priority to the min size wxSize min = GetMinSize(); if (min.x == wxDefaultCoord || min.y == wxDefaultCoord) { wxSize best = GetBestSize(); if (min.x == wxDefaultCoord) min.x = best.x; if (min.y == wxDefaultCoord) min.y = best.y; } return min; } wxSize wxWindowBase::DoGetBorderSize() const { // there is one case in which we can implement it for all ports easily if ( GetBorder() == wxBORDER_NONE ) return wxSize(0, 0); // otherwise use the difference between the real size and the client size // as a fallback: notice that this is incorrect in general as client size // also doesn't take the scrollbars into account return GetSize() - GetClientSize(); } wxSize wxWindowBase::GetBestSize() const { if ( !m_windowSizer && m_bestSizeCache.IsFullySpecified() ) return m_bestSizeCache; // call DoGetBestClientSize() first, if a derived class overrides it wants // it to be used wxSize size = DoGetBestClientSize(); if ( size != wxDefaultSize ) size += DoGetBorderSize(); else size = DoGetBestSize(); // Ensure that the best size is at least as large as min size. size.IncTo(GetMinSize()); // And not larger than max size. size.DecToIfSpecified(GetMaxSize()); // Finally cache result and return. CacheBestSize(size); return size; } int wxWindowBase::GetBestHeight(int width) const { const int height = DoGetBestClientHeight(width); return height == wxDefaultCoord ? GetBestSize().y : height + DoGetBorderSize().y; } int wxWindowBase::GetBestWidth(int height) const { const int width = DoGetBestClientWidth(height); return width == wxDefaultCoord ? GetBestSize().x : width + DoGetBorderSize().x; } void wxWindowBase::SetMinSize(const wxSize& minSize) { m_minWidth = minSize.x; m_minHeight = minSize.y; InvalidateBestSize(); } void wxWindowBase::SetMaxSize(const wxSize& maxSize) { m_maxWidth = maxSize.x; m_maxHeight = maxSize.y; InvalidateBestSize(); } void wxWindowBase::SetInitialSize(const wxSize& size) { // Set the min size to the size passed in. This will usually either be // wxDefaultSize or the size passed to this window's ctor/Create function. SetMinSize(size); // Merge the size with the best size if needed wxSize best = GetEffectiveMinSize(); // If the current size doesn't match then change it if (GetSize() != best) SetSize(best); } // by default the origin is not shifted wxPoint wxWindowBase::GetClientAreaOrigin() const { return wxPoint(0,0); } wxSize wxWindowBase::ClientToWindowSize(const wxSize& size) const { const wxSize diff(GetSize() - GetClientSize()); return wxSize(size.x == -1 ? -1 : size.x + diff.x, size.y == -1 ? -1 : size.y + diff.y); } wxSize wxWindowBase::WindowToClientSize(const wxSize& size) const { const wxSize diff(GetSize() - GetClientSize()); return wxSize(size.x == -1 ? -1 : size.x - diff.x, size.y == -1 ? -1 : size.y - diff.y); } void wxWindowBase::SetWindowVariant( wxWindowVariant variant ) { if ( m_windowVariant != variant ) { m_windowVariant = variant; DoSetWindowVariant(variant); } } void wxWindowBase::DoSetWindowVariant( wxWindowVariant variant ) { // adjust the font height to correspond to our new variant (notice that // we're only called if something really changed) wxFont font = GetFont(); int size = font.GetPointSize(); switch ( variant ) { case wxWINDOW_VARIANT_NORMAL: break; case wxWINDOW_VARIANT_SMALL: size = wxRound(size * 3.0 / 4.0); break; case wxWINDOW_VARIANT_MINI: size = wxRound(size * 2.0 / 3.0); break; case wxWINDOW_VARIANT_LARGE: size = wxRound(size * 5.0 / 4.0); break; default: wxFAIL_MSG(wxT("unexpected window variant")); break; } font.SetPointSize(size); SetFont(font); } void wxWindowBase::DoSetSizeHints( int minW, int minH, int maxW, int maxH, int WXUNUSED(incW), int WXUNUSED(incH) ) { wxCHECK_RET( (minW == wxDefaultCoord || maxW == wxDefaultCoord || minW <= maxW) && (minH == wxDefaultCoord || maxH == wxDefaultCoord || minH <= maxH), wxT("min width/height must be less than max width/height!") ); m_minWidth = minW; m_maxWidth = maxW; m_minHeight = minH; m_maxHeight = maxH; } #if WXWIN_COMPATIBILITY_2_8 void wxWindowBase::SetVirtualSizeHints(int WXUNUSED(minW), int WXUNUSED(minH), int WXUNUSED(maxW), int WXUNUSED(maxH)) { } void wxWindowBase::SetVirtualSizeHints(const wxSize& WXUNUSED(minsize), const wxSize& WXUNUSED(maxsize)) { } #endif // WXWIN_COMPATIBILITY_2_8 void wxWindowBase::DoSetVirtualSize( int x, int y ) { m_virtualSize = wxSize(x, y); } wxSize wxWindowBase::DoGetVirtualSize() const { // we should use the entire client area so if it is greater than our // virtual size, expand it to fit (otherwise if the window is big enough we // wouldn't be using parts of it) wxSize size = GetClientSize(); if ( m_virtualSize.x > size.x ) size.x = m_virtualSize.x; if ( m_virtualSize.y >= size.y ) size.y = m_virtualSize.y; return size; } void wxWindowBase::DoGetScreenPosition(int *x, int *y) const { // screen position is the same as (0, 0) in client coords for non TLWs (and // TLWs override this method) if ( x ) *x = 0; if ( y ) *y = 0; ClientToScreen(x, y); } void wxWindowBase::SendSizeEvent(int flags) { wxSizeEvent event(GetSize(), GetId()); event.SetEventObject(this); if ( flags & wxSEND_EVENT_POST ) wxPostEvent(GetEventHandler(), event); else HandleWindowEvent(event); } void wxWindowBase::SendSizeEventToParent(int flags) { wxWindow * const parent = GetParent(); if ( parent && !parent->IsBeingDeleted() ) parent->SendSizeEvent(flags); } bool wxWindowBase::CanScroll(int orient) const { return (m_windowStyle & (orient == wxHORIZONTAL ? wxHSCROLL : wxVSCROLL)) != 0; } bool wxWindowBase::HasScrollbar(int orient) const { // if scrolling in the given direction is disabled, we can't have the // corresponding scrollbar no matter what if ( !CanScroll(orient) ) return false; const wxSize sizeVirt = GetVirtualSize(); const wxSize sizeClient = GetClientSize(); return orient == wxHORIZONTAL ? sizeVirt.x > sizeClient.x : sizeVirt.y > sizeClient.y; } // ---------------------------------------------------------------------------- // show/hide/enable/disable the window // ---------------------------------------------------------------------------- bool wxWindowBase::Show(bool show) { if ( show != m_isShown ) { m_isShown = show; return true; } else { return false; } } bool wxWindowBase::IsEnabled() const { return IsThisEnabled() && (IsTopLevel() || !GetParent() || GetParent()->IsEnabled()); } void wxWindowBase::NotifyWindowOnEnableChange(bool enabled) { // Under some platforms there is no need to update the window state // explicitly, it will become disabled when its parent is. On other ones we // do need to disable all windows recursively though. #ifndef wxHAS_NATIVE_ENABLED_MANAGEMENT DoEnable(enabled); #endif // !defined(wxHAS_NATIVE_ENABLED_MANAGEMENT) // Disabling a top level window is typically done when showing a modal // dialog and we don't need to disable its children in this case, they will // be logically disabled anyhow (i.e. their IsEnabled() will return false) // and the TLW won't accept any input for them. Moreover, explicitly // disabling them would look ugly as the entire TLW would be greyed out // whenever a modal dialog is shown and no native applications under any // platform behave like this. if ( IsTopLevel() && !enabled ) return; // When disabling (or enabling back) a non-TLW window we need to // recursively propagate the change of the state to its children, otherwise // they would still show as enabled even though they wouldn't actually // accept any input (at least under MSW where children don't accept input // if any of the windows in their parent chain is enabled). #ifndef wxHAS_NATIVE_ENABLED_MANAGEMENT for ( wxWindowList::compatibility_iterator node = GetChildren().GetFirst(); node; node = node->GetNext() ) { wxWindowBase * const child = node->GetData(); if ( !child->IsTopLevel() && child->IsThisEnabled() ) child->NotifyWindowOnEnableChange(enabled); } #endif // !defined(wxHAS_NATIVE_ENABLED_MANAGEMENT) } bool wxWindowBase::Enable(bool enable) { if ( enable == IsThisEnabled() ) return false; m_isEnabled = enable; // If we call DoEnable() from NotifyWindowOnEnableChange(), we don't need // to do it from here. #ifdef wxHAS_NATIVE_ENABLED_MANAGEMENT DoEnable(enable); #endif // !defined(wxHAS_NATIVE_ENABLED_MANAGEMENT) NotifyWindowOnEnableChange(enable); return true; } bool wxWindowBase::IsShownOnScreen() const { // A window is shown on screen if it itself is shown and so are all its // parents. But if a window is toplevel one, then its always visible on // screen if IsShown() returns true, even if it has a hidden parent. return IsShown() && (IsTopLevel() || GetParent() == NULL || GetParent()->IsShownOnScreen()); } // ---------------------------------------------------------------------------- // RTTI // ---------------------------------------------------------------------------- bool wxWindowBase::IsTopLevel() const { return false; } // ---------------------------------------------------------------------------- // Freeze/Thaw // ---------------------------------------------------------------------------- void wxWindowBase::Freeze() { if ( !m_freezeCount++ ) { // physically freeze this window: DoFreeze(); // and recursively freeze all children: for ( wxWindowList::iterator i = GetChildren().begin(); i != GetChildren().end(); ++i ) { wxWindow *child = *i; if ( child->IsTopLevel() ) continue; child->Freeze(); } } } void wxWindowBase::Thaw() { wxASSERT_MSG( m_freezeCount, "Thaw() without matching Freeze()" ); if ( !--m_freezeCount ) { // recursively thaw all children: for ( wxWindowList::iterator i = GetChildren().begin(); i != GetChildren().end(); ++i ) { wxWindow *child = *i; if ( child->IsTopLevel() ) continue; child->Thaw(); } // physically thaw this window: DoThaw(); } } // ---------------------------------------------------------------------------- // Dealing with parents and children. // ---------------------------------------------------------------------------- bool wxWindowBase::IsDescendant(wxWindowBase* win) const { // Iterate until we find this window in the parent chain or exhaust it. while ( win ) { if ( win == this ) return true; // Stop iterating on reaching the top level window boundary. if ( win->IsTopLevel() ) break; win = win->GetParent(); } return false; } void wxWindowBase::AddChild(wxWindowBase *child) { wxCHECK_RET( child, wxT("can't add a NULL child") ); // this should never happen and it will lead to a crash later if it does // because RemoveChild() will remove only one node from the children list // and the other(s) one(s) will be left with dangling pointers in them wxASSERT_MSG( !GetChildren().Find((wxWindow*)child), wxT("AddChild() called twice") ); GetChildren().Append((wxWindow*)child); child->SetParent(this); // adding a child while frozen will assert when thawed, so freeze it as if // it had been already present when we were frozen if ( IsFrozen() && !child->IsTopLevel() ) child->Freeze(); } void wxWindowBase::RemoveChild(wxWindowBase *child) { wxCHECK_RET( child, wxT("can't remove a NULL child") ); // removing a child while frozen may result in permanently frozen window // if used e.g. from Reparent(), so thaw it // // NB: IsTopLevel() doesn't return true any more when a TLW child is being // removed from its ~wxWindowBase, so check for IsBeingDeleted() too if ( IsFrozen() && !child->IsBeingDeleted() && !child->IsTopLevel() ) child->Thaw(); GetChildren().DeleteObject((wxWindow *)child); child->SetParent(NULL); } void wxWindowBase::SetParent(wxWindowBase *parent) { // This assert catches typos which may result in using "this" instead of // "parent" when creating the window. This doesn't happen often but when it // does the results are unpleasant because the program typically just // crashes when due to a stack overflow or something similar and this // assert doesn't cost much (OTOH doing a more general check that the // parent is not one of our children would be more expensive and probably // not worth it). wxASSERT_MSG( parent != this, wxS("Can't use window as its own parent") ); m_parent = (wxWindow *)parent; } bool wxWindowBase::Reparent(wxWindowBase *newParent) { wxWindow *oldParent = GetParent(); if ( newParent == oldParent ) { // nothing done return false; } const bool oldEnabledState = IsEnabled(); // unlink this window from the existing parent. if ( oldParent ) { oldParent->RemoveChild(this); } else { wxTopLevelWindows.DeleteObject((wxWindow *)this); } // add it to the new one if ( newParent ) { newParent->AddChild(this); } else { wxTopLevelWindows.Append((wxWindow *)this); } // We need to notify window (and its subwindows) if by changing the parent // we also change our enabled/disabled status. const bool newEnabledState = IsEnabled(); if ( newEnabledState != oldEnabledState ) { NotifyWindowOnEnableChange(newEnabledState); } return true; } // ---------------------------------------------------------------------------- // event handler stuff // ---------------------------------------------------------------------------- void wxWindowBase::SetEventHandler(wxEvtHandler *handler) { wxCHECK_RET(handler != NULL, "SetEventHandler(NULL) called"); m_eventHandler = handler; } void wxWindowBase::SetNextHandler(wxEvtHandler *WXUNUSED(handler)) { // disable wxEvtHandler chain mechanism for wxWindows: // wxWindow uses its own stack mechanism which doesn't mix well with wxEvtHandler's one wxFAIL_MSG("wxWindow cannot be part of a wxEvtHandler chain"); } void wxWindowBase::SetPreviousHandler(wxEvtHandler *WXUNUSED(handler)) { // we can't simply wxFAIL here as in SetNextHandler: in fact the last // handler of our stack when is destroyed will be Unlink()ed and thus // will call this function to update the pointer of this window... //wxFAIL_MSG("wxWindow cannot be part of a wxEvtHandler chain"); } void wxWindowBase::PushEventHandler(wxEvtHandler *handlerToPush) { wxCHECK_RET( handlerToPush != NULL, "PushEventHandler(NULL) called" ); // the new handler is going to be part of the wxWindow stack of event handlers: // it can't be part also of an event handler double-linked chain: wxASSERT_MSG(handlerToPush->IsUnlinked(), "The handler being pushed in the wxWindow stack shouldn't be part of " "a wxEvtHandler chain; call Unlink() on it first"); wxEvtHandler *handlerOld = GetEventHandler(); wxCHECK_RET( handlerOld, "an old event handler is NULL?" ); // now use wxEvtHandler double-linked list to implement a stack: handlerToPush->SetNextHandler(handlerOld); if (handlerOld != this) handlerOld->SetPreviousHandler(handlerToPush); SetEventHandler(handlerToPush); #if wxDEBUG_LEVEL // final checks of the operations done above: wxASSERT_MSG( handlerToPush->GetPreviousHandler() == NULL, "the first handler of the wxWindow stack should " "have no previous handlers set" ); wxASSERT_MSG( handlerToPush->GetNextHandler() != NULL, "the first handler of the wxWindow stack should " "have non-NULL next handler" ); wxEvtHandler* pLast = handlerToPush; while ( pLast && pLast != this ) pLast = pLast->GetNextHandler(); wxASSERT_MSG( pLast->GetNextHandler() == NULL, "the last handler of the wxWindow stack should " "have this window as next handler" ); #endif // wxDEBUG_LEVEL } wxEvtHandler *wxWindowBase::PopEventHandler(bool deleteHandler) { // we need to pop the wxWindow stack, i.e. we need to remove the first handler wxEvtHandler *firstHandler = GetEventHandler(); wxCHECK_MSG( firstHandler != NULL, NULL, "wxWindow cannot have a NULL event handler" ); wxCHECK_MSG( firstHandler != this, NULL, "cannot pop the wxWindow itself" ); wxCHECK_MSG( firstHandler->GetPreviousHandler() == NULL, NULL, "the first handler of the wxWindow stack should have no previous handlers set" ); wxEvtHandler *secondHandler = firstHandler->GetNextHandler(); wxCHECK_MSG( secondHandler != NULL, NULL, "the first handler of the wxWindow stack should have non-NULL next handler" ); firstHandler->SetNextHandler(NULL); // It is harmless but useless to unset the previous handler of the window // itself as it's always NULL anyhow, so don't do this. if ( secondHandler != this ) secondHandler->SetPreviousHandler(NULL); // now firstHandler is completely unlinked; set secondHandler as the new window event handler SetEventHandler(secondHandler); if ( deleteHandler ) { wxDELETE(firstHandler); } return firstHandler; } bool wxWindowBase::RemoveEventHandler(wxEvtHandler *handlerToRemove) { wxCHECK_MSG( handlerToRemove != NULL, false, "RemoveEventHandler(NULL) called" ); wxCHECK_MSG( handlerToRemove != this, false, "Cannot remove the window itself" ); if (handlerToRemove == GetEventHandler()) { // removing the first event handler is equivalent to "popping" the stack PopEventHandler(false); return true; } // NOTE: the wxWindow event handler list is always terminated with "this" handler wxEvtHandler *handlerCur = GetEventHandler()->GetNextHandler(); while ( handlerCur != this && handlerCur ) { wxEvtHandler *handlerNext = handlerCur->GetNextHandler(); if ( handlerCur == handlerToRemove ) { handlerCur->Unlink(); wxASSERT_MSG( handlerCur != GetEventHandler(), "the case Remove == Pop should was already handled" ); return true; } handlerCur = handlerNext; } wxFAIL_MSG( wxT("where has the event handler gone?") ); return false; } bool wxWindowBase::HandleWindowEvent(wxEvent& event) const { // SafelyProcessEvent() will handle exceptions nicely return GetEventHandler()->SafelyProcessEvent(event); } // ---------------------------------------------------------------------------- // colours, fonts &c // ---------------------------------------------------------------------------- void wxWindowBase::InheritAttributes() { const wxWindowBase * const parent = GetParent(); if ( !parent ) return; // we only inherit attributes which had been explicitly set for the parent // which ensures that this only happens if the user really wants it and // not by default which wouldn't make any sense in modern GUIs where the // controls don't all use the same fonts (nor colours) if ( parent->m_inheritFont && !m_hasFont ) SetFont(parent->GetFont()); // in addition, there is a possibility to explicitly forbid inheriting // colours at each class level by overriding ShouldInheritColours() if ( ShouldInheritColours() ) { if ( parent->m_inheritFgCol && !m_hasFgCol ) SetForegroundColour(parent->GetForegroundColour()); // inheriting (solid) background colour is wrong as it totally breaks // any kind of themed backgrounds // // instead, the controls should use the same background as their parent // (ideally by not drawing it at all) #if 0 if ( parent->m_inheritBgCol && !m_hasBgCol ) SetBackgroundColour(parent->GetBackgroundColour()); #endif // 0 } } /* static */ wxVisualAttributes wxWindowBase::GetClassDefaultAttributes(wxWindowVariant WXUNUSED(variant)) { // it is important to return valid values for all attributes from here, // GetXXX() below rely on this wxVisualAttributes attrs; attrs.font = wxSystemSettings::GetFont(wxSYS_DEFAULT_GUI_FONT); attrs.colFg = wxSystemSettings::GetColour(wxSYS_COLOUR_WINDOWTEXT); // On Smartphone/PocketPC, wxSYS_COLOUR_WINDOW is a better reflection of // the usual background colour than wxSYS_COLOUR_BTNFACE. // It's a pity that wxSYS_COLOUR_WINDOW isn't always a suitable background // colour on other platforms. #if defined(__WXWINCE__) && (defined(__SMARTPHONE__) || defined(__POCKETPC__)) attrs.colBg = wxSystemSettings::GetColour(wxSYS_COLOUR_WINDOW); #else attrs.colBg = wxSystemSettings::GetColour(wxSYS_COLOUR_BTNFACE); #endif return attrs; } wxColour wxWindowBase::GetBackgroundColour() const { if ( !m_backgroundColour.IsOk() ) { wxASSERT_MSG( !m_hasBgCol, wxT("we have invalid explicit bg colour?") ); // get our default background colour wxColour colBg = GetDefaultAttributes().colBg; // we must return some valid colour to avoid redoing this every time // and also to avoid surprising the applications written for older // wxWidgets versions where GetBackgroundColour() always returned // something -- so give them something even if it doesn't make sense // for this window (e.g. it has a themed background) if ( !colBg.IsOk() ) colBg = GetClassDefaultAttributes().colBg; return colBg; } else return m_backgroundColour; } wxColour wxWindowBase::GetForegroundColour() const { // logic is the same as above if ( !m_hasFgCol && !m_foregroundColour.IsOk() ) { wxColour colFg = GetDefaultAttributes().colFg; if ( !colFg.IsOk() ) colFg = GetClassDefaultAttributes().colFg; return colFg; } else return m_foregroundColour; } bool wxWindowBase::SetBackgroundStyle(wxBackgroundStyle style) { // The checks below shouldn't be triggered if we're not really changing the // style. if ( style == m_backgroundStyle ) return true; // Transparent background style can be only set before creation because of // wxGTK limitation. wxCHECK_MSG( (style != wxBG_STYLE_TRANSPARENT) || !GetHandle(), false, "wxBG_STYLE_TRANSPARENT style can only be set before " "Create()-ing the window." ); // And once it is set, wxBG_STYLE_TRANSPARENT can't be unset. wxCHECK_MSG( (m_backgroundStyle != wxBG_STYLE_TRANSPARENT) || (style == wxBG_STYLE_TRANSPARENT), false, "wxBG_STYLE_TRANSPARENT can't be unset once it was set." ); m_backgroundStyle = style; return true; } bool wxWindowBase::IsTransparentBackgroundSupported(wxString *reason) const { if ( reason ) *reason = _("This platform does not support background transparency."); return false; } bool wxWindowBase::SetBackgroundColour( const wxColour &colour ) { if ( colour == m_backgroundColour ) return false; m_hasBgCol = colour.IsOk(); m_inheritBgCol = m_hasBgCol; m_backgroundColour = colour; SetThemeEnabled( !m_hasBgCol && !m_foregroundColour.IsOk() ); return true; } bool wxWindowBase::SetForegroundColour( const wxColour &colour ) { if (colour == m_foregroundColour ) return false; m_hasFgCol = colour.IsOk(); m_inheritFgCol = m_hasFgCol; m_foregroundColour = colour; SetThemeEnabled( !m_hasFgCol && !m_backgroundColour.IsOk() ); return true; } bool wxWindowBase::SetCursor(const wxCursor& cursor) { // setting an invalid cursor is ok, it means that we don't have any special // cursor if ( m_cursor.IsSameAs(cursor) ) { // no change return false; } m_cursor = cursor; return true; } wxFont wxWindowBase::GetFont() const { // logic is the same as in GetBackgroundColour() if ( !m_font.IsOk() ) { wxASSERT_MSG( !m_hasFont, wxT("we have invalid explicit font?") ); wxFont font = GetDefaultAttributes().font; if ( !font.IsOk() ) font = GetClassDefaultAttributes().font; return font; } else return m_font; } bool wxWindowBase::SetFont(const wxFont& font) { if ( font == m_font ) { // no change return false; } m_font = font; m_hasFont = font.IsOk(); m_inheritFont = m_hasFont; InvalidateBestSize(); return true; } #if wxUSE_PALETTE void wxWindowBase::SetPalette(const wxPalette& pal) { m_hasCustomPalette = true; m_palette = pal; // VZ: can anyone explain me what do we do here? wxWindowDC d((wxWindow *) this); d.SetPalette(pal); } wxWindow *wxWindowBase::GetAncestorWithCustomPalette() const { wxWindow *win = (wxWindow *)this; while ( win && !win->HasCustomPalette() ) { win = win->GetParent(); } return win; } #endif // wxUSE_PALETTE #if wxUSE_CARET void wxWindowBase::SetCaret(wxCaret *caret) { if ( m_caret ) { delete m_caret; } m_caret = caret; if ( m_caret ) { wxASSERT_MSG( m_caret->GetWindow() == this, wxT("caret should be created associated to this window") ); } } #endif // wxUSE_CARET #if wxUSE_VALIDATORS // ---------------------------------------------------------------------------- // validators // ---------------------------------------------------------------------------- void wxWindowBase::SetValidator(const wxValidator& validator) { if ( m_windowValidator ) delete m_windowValidator; m_windowValidator = (wxValidator *)validator.Clone(); if ( m_windowValidator ) m_windowValidator->SetWindow(this); } #endif // wxUSE_VALIDATORS // ---------------------------------------------------------------------------- // update region stuff // ---------------------------------------------------------------------------- wxRect wxWindowBase::GetUpdateClientRect() const { wxRegion rgnUpdate = GetUpdateRegion(); rgnUpdate.Intersect(GetClientRect()); wxRect rectUpdate = rgnUpdate.GetBox(); wxPoint ptOrigin = GetClientAreaOrigin(); rectUpdate.x -= ptOrigin.x; rectUpdate.y -= ptOrigin.y; return rectUpdate; } bool wxWindowBase::DoIsExposed(int x, int y) const { return m_updateRegion.Contains(x, y) != wxOutRegion; } bool wxWindowBase::DoIsExposed(int x, int y, int w, int h) const { return m_updateRegion.Contains(x, y, w, h) != wxOutRegion; } void wxWindowBase::ClearBackground() { // wxGTK uses its own version, no need to add never used code #ifndef __WXGTK__ wxClientDC dc((wxWindow *)this); wxBrush brush(GetBackgroundColour(), wxBRUSHSTYLE_SOLID); dc.SetBackground(brush); dc.Clear(); #endif // __WXGTK__ } // ---------------------------------------------------------------------------- // find child window by id or name // ---------------------------------------------------------------------------- wxWindow *wxWindowBase::FindWindow(long id) const { if ( id == m_windowId ) return (wxWindow *)this; wxWindowBase *res = NULL; wxWindowList::compatibility_iterator node; for ( node = m_children.GetFirst(); node && !res; node = node->GetNext() ) { wxWindowBase *child = node->GetData(); // As usual, don't recurse into child dialogs, finding a button in a // child dialog when looking in this window would be unexpected. if ( child->IsTopLevel() ) continue; res = child->FindWindow( id ); } return (wxWindow *)res; } wxWindow *wxWindowBase::FindWindow(const wxString& name) const { if ( name == m_windowName ) return (wxWindow *)this; wxWindowBase *res = NULL; wxWindowList::compatibility_iterator node; for ( node = m_children.GetFirst(); node && !res; node = node->GetNext() ) { wxWindow *child = node->GetData(); // As in FindWindow() overload above, never recurse into child dialogs. if ( child->IsTopLevel() ) continue; res = child->FindWindow(name); } return (wxWindow *)res; } // find any window by id or name or label: If parent is non-NULL, look through // children for a label or title matching the specified string. If NULL, look // through all top-level windows. // // to avoid duplicating code we reuse the same helper function but with // different comparators typedef bool (*wxFindWindowCmp)(const wxWindow *win, const wxString& label, long id); static bool wxFindWindowCmpLabels(const wxWindow *win, const wxString& label, long WXUNUSED(id)) { return win->GetLabel() == label; } static bool wxFindWindowCmpNames(const wxWindow *win, const wxString& label, long WXUNUSED(id)) { return win->GetName() == label; } static bool wxFindWindowCmpIds(const wxWindow *win, const wxString& WXUNUSED(label), long id) { return win->GetId() == id; } // recursive helper for the FindWindowByXXX() functions static wxWindow *wxFindWindowRecursively(const wxWindow *parent, const wxString& label, long id, wxFindWindowCmp cmp) { if ( parent ) { // see if this is the one we're looking for if ( (*cmp)(parent, label, id) ) return (wxWindow *)parent; // It wasn't, so check all its children for ( wxWindowList::compatibility_iterator node = parent->GetChildren().GetFirst(); node; node = node->GetNext() ) { // recursively check each child wxWindow *win = (wxWindow *)node->GetData(); wxWindow *retwin = wxFindWindowRecursively(win, label, id, cmp); if (retwin) return retwin; } } // Not found return NULL; } // helper for FindWindowByXXX() static wxWindow *wxFindWindowHelper(const wxWindow *parent, const wxString& label, long id, wxFindWindowCmp cmp) { if ( parent ) { // just check parent and all its children return wxFindWindowRecursively(parent, label, id, cmp); } // start at very top of wx's windows for ( wxWindowList::compatibility_iterator node = wxTopLevelWindows.GetFirst(); node; node = node->GetNext() ) { // recursively check each window & its children wxWindow *win = node->GetData(); wxWindow *retwin = wxFindWindowRecursively(win, label, id, cmp); if (retwin) return retwin; } return NULL; } /* static */ wxWindow * wxWindowBase::FindWindowByLabel(const wxString& title, const wxWindow *parent) { return wxFindWindowHelper(parent, title, 0, wxFindWindowCmpLabels); } /* static */ wxWindow * wxWindowBase::FindWindowByName(const wxString& title, const wxWindow *parent) { wxWindow *win = wxFindWindowHelper(parent, title, 0, wxFindWindowCmpNames); if ( !win ) { // fall back to the label win = FindWindowByLabel(title, parent); } return win; } /* static */ wxWindow * wxWindowBase::FindWindowById( long id, const wxWindow* parent ) { return wxFindWindowHelper(parent, wxEmptyString, id, wxFindWindowCmpIds); } // ---------------------------------------------------------------------------- // dialog oriented functions // ---------------------------------------------------------------------------- #if WXWIN_COMPATIBILITY_2_8 void wxWindowBase::MakeModal(bool modal) { // Disable all other windows if ( IsTopLevel() ) { wxWindowList::compatibility_iterator node = wxTopLevelWindows.GetFirst(); while (node) { wxWindow *win = node->GetData(); if (win != this) win->Enable(!modal); node = node->GetNext(); } } } #endif // WXWIN_COMPATIBILITY_2_8 #if wxUSE_VALIDATORS namespace { // This class encapsulates possibly recursive iteration on window children done // by Validate() and TransferData{To,From}Window() and allows to avoid code // duplication in all three functions. class ValidationTraverserBase { public: wxEXPLICIT ValidationTraverserBase(wxWindowBase* win) : m_win(static_cast<wxWindow*>(win)) { } // Traverse all the direct children calling OnDo() on them and also all // grandchildren if wxWS_EX_VALIDATE_RECURSIVELY is used, calling // OnRecurse() for them. bool DoForAllChildren() { const bool recurse = m_win->HasExtraStyle(wxWS_EX_VALIDATE_RECURSIVELY); wxWindowList& children = m_win->GetChildren(); for ( wxWindowList::iterator i = children.begin(); i != children.end(); ++i ) { wxWindow* const child = static_cast<wxWindow*>(*i); wxValidator* const validator = child->GetValidator(); if ( validator && !OnDo(validator) ) { return false; } // Notice that validation should never recurse into top level // children, e.g. some other dialog which might happen to be // currently shown. if ( recurse && !child->IsTopLevel() && !OnRecurse(child) ) { return false; } } return true; } // Give it a virtual dtor just to suppress gcc warnings about a class with // virtual methods but non-virtual dtor -- even if this is completely safe // here as we never use the objects of this class polymorphically. virtual ~ValidationTraverserBase() { } protected: // Called for each child, validator is guaranteed to be non-NULL. virtual bool OnDo(wxValidator* validator) = 0; // Called for each child if we need to recurse into its children. virtual bool OnRecurse(wxWindow* child) = 0; // The window whose children we're traversing. wxWindow* const m_win; wxDECLARE_NO_COPY_CLASS(ValidationTraverserBase); }; } // anonymous namespace #endif // wxUSE_VALIDATORS bool wxWindowBase::Validate() { #if wxUSE_VALIDATORS class ValidateTraverser : public ValidationTraverserBase { public: wxEXPLICIT ValidateTraverser(wxWindowBase* win) : ValidationTraverserBase(win) { } virtual bool OnDo(wxValidator* validator) { return validator->Validate(m_win); } virtual bool OnRecurse(wxWindow* child) { return child->Validate(); } }; return ValidateTraverser(this).DoForAllChildren(); #else // !wxUSE_VALIDATORS return true; #endif // wxUSE_VALIDATORS/!wxUSE_VALIDATORS } bool wxWindowBase::TransferDataToWindow() { #if wxUSE_VALIDATORS class DataToWindowTraverser : public ValidationTraverserBase { public: wxEXPLICIT DataToWindowTraverser(wxWindowBase* win) : ValidationTraverserBase(win) { } virtual bool OnDo(wxValidator* validator) { if ( !validator->TransferToWindow() ) { wxLogWarning(_("Could not transfer data to window")); #if wxUSE_LOG wxLog::FlushActive(); #endif // wxUSE_LOG return false; } return true; } virtual bool OnRecurse(wxWindow* child) { return child->TransferDataToWindow(); } }; return DataToWindowTraverser(this).DoForAllChildren(); #else // !wxUSE_VALIDATORS return true; #endif // wxUSE_VALIDATORS/!wxUSE_VALIDATORS } bool wxWindowBase::TransferDataFromWindow() { #if wxUSE_VALIDATORS class DataFromWindowTraverser : public ValidationTraverserBase { public: DataFromWindowTraverser(wxWindowBase* win) : ValidationTraverserBase(win) { } virtual bool OnDo(wxValidator* validator) { return validator->TransferFromWindow(); } virtual bool OnRecurse(wxWindow* child) { return child->TransferDataFromWindow(); } }; return DataFromWindowTraverser(this).DoForAllChildren(); #else // !wxUSE_VALIDATORS return true; #endif // wxUSE_VALIDATORS/!wxUSE_VALIDATORS } void wxWindowBase::InitDialog() { wxInitDialogEvent event(GetId()); event.SetEventObject( this ); GetEventHandler()->ProcessEvent(event); } // ---------------------------------------------------------------------------- // context-sensitive help support // ---------------------------------------------------------------------------- #if wxUSE_HELP // associate this help text with this window void wxWindowBase::SetHelpText(const wxString& text) { wxHelpProvider *helpProvider = wxHelpProvider::Get(); if ( helpProvider ) { helpProvider->AddHelp(this, text); } } #if WXWIN_COMPATIBILITY_2_8 // associate this help text with all windows with the same id as this // one void wxWindowBase::SetHelpTextForId(const wxString& text) { wxHelpProvider *helpProvider = wxHelpProvider::Get(); if ( helpProvider ) { helpProvider->AddHelp(GetId(), text); } } #endif // WXWIN_COMPATIBILITY_2_8 // get the help string associated with this window (may be empty) // default implementation forwards calls to the help provider wxString wxWindowBase::GetHelpTextAtPoint(const wxPoint & WXUNUSED(pt), wxHelpEvent::Origin WXUNUSED(origin)) const { wxString text; wxHelpProvider *helpProvider = wxHelpProvider::Get(); if ( helpProvider ) { text = helpProvider->GetHelp(this); } return text; } // show help for this window void wxWindowBase::OnHelp(wxHelpEvent& event) { wxHelpProvider *helpProvider = wxHelpProvider::Get(); if ( helpProvider ) { wxPoint pos = event.GetPosition(); const wxHelpEvent::Origin origin = event.GetOrigin(); if ( origin == wxHelpEvent::Origin_Keyboard ) { // if the help event was generated from keyboard it shouldn't // appear at the mouse position (which is still the only position // associated with help event) if the mouse is far away, although // we still do use the mouse position if it's over the window // because we suppose the user looks approximately at the mouse // already and so it would be more convenient than showing tooltip // at some arbitrary position which can be quite far from it const wxRect rectClient = GetClientRect(); if ( !rectClient.Contains(ScreenToClient(pos)) ) { // position help slightly under and to the right of this window pos = ClientToScreen(wxPoint( 2*GetCharWidth(), rectClient.height + GetCharHeight() )); } } if ( helpProvider->ShowHelpAtPoint(this, pos, origin) ) { // skip the event.Skip() below return; } } event.Skip(); } #endif // wxUSE_HELP // ---------------------------------------------------------------------------- // tooltips // ---------------------------------------------------------------------------- #if wxUSE_TOOLTIPS wxString wxWindowBase::GetToolTipText() const { return m_tooltip ? m_tooltip->GetTip() : wxString(); } void wxWindowBase::SetToolTip( const wxString &tip ) { // don't create the new tooltip if we already have one if ( m_tooltip ) { m_tooltip->SetTip( tip ); } else { SetToolTip( new wxToolTip( tip ) ); } // setting empty tooltip text does not remove the tooltip any more - use // SetToolTip(NULL) for this } void wxWindowBase::DoSetToolTip(wxToolTip *tooltip) { if ( m_tooltip != tooltip ) { if ( m_tooltip ) delete m_tooltip; m_tooltip = tooltip; } } bool wxWindowBase::CopyToolTip(wxToolTip *tip) { SetToolTip(tip ? new wxToolTip(tip->GetTip()) : NULL); return tip != NULL; } #endif // wxUSE_TOOLTIPS // ---------------------------------------------------------------------------- // constraints and sizers // ---------------------------------------------------------------------------- #if wxUSE_CONSTRAINTS void wxWindowBase::SetConstraints( wxLayoutConstraints *constraints ) { if ( m_constraints ) { UnsetConstraints(m_constraints); delete m_constraints; } m_constraints = constraints; if ( m_constraints ) { // Make sure other windows know they're part of a 'meaningful relationship' if ( m_constraints->left.GetOtherWindow() && (m_constraints->left.GetOtherWindow() != this) ) m_constraints->left.GetOtherWindow()->AddConstraintReference(this); if ( m_constraints->top.GetOtherWindow() && (m_constraints->top.GetOtherWindow() != this) ) m_constraints->top.GetOtherWindow()->AddConstraintReference(this); if ( m_constraints->right.GetOtherWindow() && (m_constraints->right.GetOtherWindow() != this) ) m_constraints->right.GetOtherWindow()->AddConstraintReference(this); if ( m_constraints->bottom.GetOtherWindow() && (m_constraints->bottom.GetOtherWindow() != this) ) m_constraints->bottom.GetOtherWindow()->AddConstraintReference(this); if ( m_constraints->width.GetOtherWindow() && (m_constraints->width.GetOtherWindow() != this) ) m_constraints->width.GetOtherWindow()->AddConstraintReference(this); if ( m_constraints->height.GetOtherWindow() && (m_constraints->height.GetOtherWindow() != this) ) m_constraints->height.GetOtherWindow()->AddConstraintReference(this); if ( m_constraints->centreX.GetOtherWindow() && (m_constraints->centreX.GetOtherWindow() != this) ) m_constraints->centreX.GetOtherWindow()->AddConstraintReference(this); if ( m_constraints->centreY.GetOtherWindow() && (m_constraints->centreY.GetOtherWindow() != this) ) m_constraints->centreY.GetOtherWindow()->AddConstraintReference(this); } } // This removes any dangling pointers to this window in other windows' // constraintsInvolvedIn lists. void wxWindowBase::UnsetConstraints(wxLayoutConstraints *c) { if ( c ) { if ( c->left.GetOtherWindow() && (c->left.GetOtherWindow() != this) ) c->left.GetOtherWindow()->RemoveConstraintReference(this); if ( c->top.GetOtherWindow() && (c->top.GetOtherWindow() != this) ) c->top.GetOtherWindow()->RemoveConstraintReference(this); if ( c->right.GetOtherWindow() && (c->right.GetOtherWindow() != this) ) c->right.GetOtherWindow()->RemoveConstraintReference(this); if ( c->bottom.GetOtherWindow() && (c->bottom.GetOtherWindow() != this) ) c->bottom.GetOtherWindow()->RemoveConstraintReference(this); if ( c->width.GetOtherWindow() && (c->width.GetOtherWindow() != this) ) c->width.GetOtherWindow()->RemoveConstraintReference(this); if ( c->height.GetOtherWindow() && (c->height.GetOtherWindow() != this) ) c->height.GetOtherWindow()->RemoveConstraintReference(this); if ( c->centreX.GetOtherWindow() && (c->centreX.GetOtherWindow() != this) ) c->centreX.GetOtherWindow()->RemoveConstraintReference(this); if ( c->centreY.GetOtherWindow() && (c->centreY.GetOtherWindow() != this) ) c->centreY.GetOtherWindow()->RemoveConstraintReference(this); } } // Back-pointer to other windows we're involved with, so if we delete this // window, we must delete any constraints we're involved with. void wxWindowBase::AddConstraintReference(wxWindowBase *otherWin) { if ( !m_constraintsInvolvedIn ) m_constraintsInvolvedIn = new wxWindowList; if ( !m_constraintsInvolvedIn->Find((wxWindow *)otherWin) ) m_constraintsInvolvedIn->Append((wxWindow *)otherWin); } // REMOVE back-pointer to other windows we're involved with. void wxWindowBase::RemoveConstraintReference(wxWindowBase *otherWin) { if ( m_constraintsInvolvedIn ) m_constraintsInvolvedIn->DeleteObject((wxWindow *)otherWin); } // Reset any constraints that mention this window void wxWindowBase::DeleteRelatedConstraints() { if ( m_constraintsInvolvedIn ) { wxWindowList::compatibility_iterator node = m_constraintsInvolvedIn->GetFirst(); while (node) { wxWindow *win = node->GetData(); wxLayoutConstraints *constr = win->GetConstraints(); // Reset any constraints involving this window if ( constr ) { constr->left.ResetIfWin(this); constr->top.ResetIfWin(this); constr->right.ResetIfWin(this); constr->bottom.ResetIfWin(this); constr->width.ResetIfWin(this); constr->height.ResetIfWin(this); constr->centreX.ResetIfWin(this); constr->centreY.ResetIfWin(this); } wxWindowList::compatibility_iterator next = node->GetNext(); m_constraintsInvolvedIn->Erase(node); node = next; } wxDELETE(m_constraintsInvolvedIn); } } #endif // wxUSE_CONSTRAINTS void wxWindowBase::SetSizer(wxSizer *sizer, bool deleteOld) { if ( sizer == m_windowSizer) return; if ( m_windowSizer ) { m_windowSizer->SetContainingWindow(NULL); if ( deleteOld ) delete m_windowSizer; } m_windowSizer = sizer; if ( m_windowSizer ) { m_windowSizer->SetContainingWindow((wxWindow *)this); } SetAutoLayout(m_windowSizer != NULL); } void wxWindowBase::SetSizerAndFit(wxSizer *sizer, bool deleteOld) { SetSizer( sizer, deleteOld ); sizer->SetSizeHints( (wxWindow*) this ); } void wxWindowBase::SetContainingSizer(wxSizer* sizer) { // adding a window to a sizer twice is going to result in fatal and // hard to debug problems later because when deleting the second // associated wxSizerItem we're going to dereference a dangling // pointer; so try to detect this as early as possible wxASSERT_MSG( !sizer || m_containingSizer != sizer, wxT("Adding a window to the same sizer twice?") ); m_containingSizer = sizer; } #if wxUSE_CONSTRAINTS void wxWindowBase::SatisfyConstraints() { wxLayoutConstraints *constr = GetConstraints(); bool wasOk = constr && constr->AreSatisfied(); ResetConstraints(); // Mark all constraints as unevaluated int noChanges = 1; // if we're a top level panel (i.e. our parent is frame/dialog), our // own constraints will never be satisfied any more unless we do it // here if ( wasOk ) { while ( noChanges > 0 ) { LayoutPhase1(&noChanges); } } LayoutPhase2(&noChanges); } #endif // wxUSE_CONSTRAINTS bool wxWindowBase::Layout() { // If there is a sizer, use it instead of the constraints if ( GetSizer() ) { int w = 0, h = 0; GetVirtualSize(&w, &h); GetSizer()->SetDimension( 0, 0, w, h ); } #if wxUSE_CONSTRAINTS else { SatisfyConstraints(); // Find the right constraints values SetConstraintSizes(); // Recursively set the real window sizes } #endif return true; } void wxWindowBase::InternalOnSize(wxSizeEvent& event) { if ( GetAutoLayout() ) Layout(); event.Skip(); } #if wxUSE_CONSTRAINTS // first phase of the constraints evaluation: set our own constraints bool wxWindowBase::LayoutPhase1(int *noChanges) { wxLayoutConstraints *constr = GetConstraints(); return !constr || constr->SatisfyConstraints(this, noChanges); } // second phase: set the constraints for our children bool wxWindowBase::LayoutPhase2(int *noChanges) { *noChanges = 0; // Layout children DoPhase(1); // Layout grand children DoPhase(2); return true; } // Do a phase of evaluating child constraints bool wxWindowBase::DoPhase(int phase) { // the list containing the children for which the constraints are already // set correctly wxWindowList succeeded; // the max number of iterations we loop before concluding that we can't set // the constraints static const int maxIterations = 500; for ( int noIterations = 0; noIterations < maxIterations; noIterations++ ) { int noChanges = 0; // loop over all children setting their constraints for ( wxWindowList::compatibility_iterator node = GetChildren().GetFirst(); node; node = node->GetNext() ) { wxWindow *child = node->GetData(); if ( child->IsTopLevel() ) { // top level children are not inside our client area continue; } if ( !child->GetConstraints() || succeeded.Find(child) ) { // this one is either already ok or nothing we can do about it continue; } int tempNoChanges = 0; bool success = phase == 1 ? child->LayoutPhase1(&tempNoChanges) : child->LayoutPhase2(&tempNoChanges); noChanges += tempNoChanges; if ( success ) { succeeded.Append(child); } } if ( !noChanges ) { // constraints are set break; } } return true; } void wxWindowBase::ResetConstraints() { wxLayoutConstraints *constr = GetConstraints(); if ( constr ) { constr->left.SetDone(false); constr->top.SetDone(false); constr->right.SetDone(false); constr->bottom.SetDone(false); constr->width.SetDone(false); constr->height.SetDone(false); constr->centreX.SetDone(false); constr->centreY.SetDone(false); } wxWindowList::compatibility_iterator node = GetChildren().GetFirst(); while (node) { wxWindow *win = node->GetData(); if ( !win->IsTopLevel() ) win->ResetConstraints(); node = node->GetNext(); } } // Need to distinguish between setting the 'fake' size for windows and sizers, // and setting the real values. void wxWindowBase::SetConstraintSizes(bool recurse) { wxLayoutConstraints *constr = GetConstraints(); if ( constr && constr->AreSatisfied() ) { ChildrenRepositioningGuard repositionGuard(this); int x = constr->left.GetValue(); int y = constr->top.GetValue(); int w = constr->width.GetValue(); int h = constr->height.GetValue(); if ( (constr->width.GetRelationship() != wxAsIs ) || (constr->height.GetRelationship() != wxAsIs) ) { // We really shouldn't set negative sizes for the windows so make // them at least of 1*1 size SetSize(x, y, w > 0 ? w : 1, h > 0 ? h : 1); } else { // If we don't want to resize this window, just move it... Move(x, y); } } else if ( constr ) { wxLogDebug(wxT("Constraints not satisfied for %s named '%s'."), GetClassInfo()->GetClassName(), GetName().c_str()); } if ( recurse ) { wxWindowList::compatibility_iterator node = GetChildren().GetFirst(); while (node) { wxWindow *win = node->GetData(); if ( !win->IsTopLevel() && win->GetConstraints() ) win->SetConstraintSizes(); node = node->GetNext(); } } } // Only set the size/position of the constraint (if any) void wxWindowBase::SetSizeConstraint(int x, int y, int w, int h) { wxLayoutConstraints *constr = GetConstraints(); if ( constr ) { if ( x != wxDefaultCoord ) { constr->left.SetValue(x); constr->left.SetDone(true); } if ( y != wxDefaultCoord ) { constr->top.SetValue(y); constr->top.SetDone(true); } if ( w != wxDefaultCoord ) { constr->width.SetValue(w); constr->width.SetDone(true); } if ( h != wxDefaultCoord ) { constr->height.SetValue(h); constr->height.SetDone(true); } } } void wxWindowBase::MoveConstraint(int x, int y) { wxLayoutConstraints *constr = GetConstraints(); if ( constr ) { if ( x != wxDefaultCoord ) { constr->left.SetValue(x); constr->left.SetDone(true); } if ( y != wxDefaultCoord ) { constr->top.SetValue(y); constr->top.SetDone(true); } } } void wxWindowBase::GetSizeConstraint(int *w, int *h) const { wxLayoutConstraints *constr = GetConstraints(); if ( constr ) { *w = constr->width.GetValue(); *h = constr->height.GetValue(); } else GetSize(w, h); } void wxWindowBase::GetClientSizeConstraint(int *w, int *h) const { wxLayoutConstraints *constr = GetConstraints(); if ( constr ) { *w = constr->width.GetValue(); *h = constr->height.GetValue(); } else GetClientSize(w, h); } void wxWindowBase::GetPositionConstraint(int *x, int *y) const { wxLayoutConstraints *constr = GetConstraints(); if ( constr ) { *x = constr->left.GetValue(); *y = constr->top.GetValue(); } else GetPosition(x, y); } #endif // wxUSE_CONSTRAINTS void wxWindowBase::AdjustForParentClientOrigin(int& x, int& y, int sizeFlags) const { wxWindow *parent = GetParent(); if ( !(sizeFlags & wxSIZE_NO_ADJUSTMENTS) && parent ) { wxPoint pt(parent->GetClientAreaOrigin()); x += pt.x; y += pt.y; } } // ---------------------------------------------------------------------------- // Update UI processing // ---------------------------------------------------------------------------- void wxWindowBase::UpdateWindowUI(long flags) { wxUpdateUIEvent event(GetId()); event.SetEventObject(this); if ( GetEventHandler()->ProcessEvent(event) ) { DoUpdateWindowUI(event); } if (flags & wxUPDATE_UI_RECURSE) { wxWindowList::compatibility_iterator node = GetChildren().GetFirst(); while (node) { wxWindow* child = (wxWindow*) node->GetData(); child->UpdateWindowUI(flags); node = node->GetNext(); } } } // do the window-specific processing after processing the update event void wxWindowBase::DoUpdateWindowUI(wxUpdateUIEvent& event) { if ( event.GetSetEnabled() ) Enable(event.GetEnabled()); if ( event.GetSetShown() ) Show(event.GetShown()); } // ---------------------------------------------------------------------------- // Idle processing // ---------------------------------------------------------------------------- // Send idle event to window and all subwindows bool wxWindowBase::SendIdleEvents(wxIdleEvent& event) { bool needMore = false; OnInternalIdle(); // should we send idle event to this window? if (wxIdleEvent::GetMode() == wxIDLE_PROCESS_ALL || HasExtraStyle(wxWS_EX_PROCESS_IDLE)) { event.SetEventObject(this); HandleWindowEvent(event); if (event.MoreRequested()) needMore = true; } wxWindowList::compatibility_iterator node = GetChildren().GetFirst(); for (; node; node = node->GetNext()) { wxWindow* child = node->GetData(); if (child->SendIdleEvents(event)) needMore = true; } return needMore; } void wxWindowBase::OnInternalIdle() { if ( wxUpdateUIEvent::CanUpdate(this) ) UpdateWindowUI(wxUPDATE_UI_FROMIDLE); } // ---------------------------------------------------------------------------- // dialog units translations // ---------------------------------------------------------------------------- // Windows' computes dialog units using average character width over upper- // and lower-case ASCII alphabet and not using the average character width // metadata stored in the font; see // http://support.microsoft.com/default.aspx/kb/145994 for detailed discussion. // It's important that we perform the conversion in identical way, because // dialog units natively exist only on Windows and Windows HIG is expressed // using them. wxSize wxWindowBase::GetDlgUnitBase() const { const wxWindowBase * const parent = wxGetTopLevelParent((wxWindow*)this); wxCHECK_MSG( parent, wxDefaultSize, wxS("Must have TLW parent") ); if ( !parent->m_font.IsOk() ) { // Default GUI font is used. This is the most common case, so // cache the results. static wxSize s_defFontSize; if ( s_defFontSize.x == 0 ) s_defFontSize = wxPrivate::GetAverageASCIILetterSize(*parent); return s_defFontSize; } else { // Custom font, we always need to compute the result return wxPrivate::GetAverageASCIILetterSize(*parent); } } wxPoint wxWindowBase::ConvertPixelsToDialog(const wxPoint& pt) const { const wxSize base = GetDlgUnitBase(); // NB: wxMulDivInt32() is used, because it correctly rounds the result wxPoint pt2 = wxDefaultPosition; if (pt.x != wxDefaultCoord) pt2.x = wxMulDivInt32(pt.x, 4, base.x); if (pt.y != wxDefaultCoord) pt2.y = wxMulDivInt32(pt.y, 8, base.y); return pt2; } wxPoint wxWindowBase::ConvertDialogToPixels(const wxPoint& pt) const { const wxSize base = GetDlgUnitBase(); wxPoint pt2 = wxDefaultPosition; if (pt.x != wxDefaultCoord) pt2.x = wxMulDivInt32(pt.x, base.x, 4); if (pt.y != wxDefaultCoord) pt2.y = wxMulDivInt32(pt.y, base.y, 8); return pt2; } // ---------------------------------------------------------------------------- // event handlers // ---------------------------------------------------------------------------- // propagate the colour change event to the subwindows void wxWindowBase::OnSysColourChanged(wxSysColourChangedEvent& WXUNUSED(event)) { wxWindowList::compatibility_iterator node = GetChildren().GetFirst(); while ( node ) { // Only propagate to non-top-level windows wxWindow *win = node->GetData(); if ( !win->IsTopLevel() ) { wxSysColourChangedEvent event2; event2.SetEventObject(win); win->GetEventHandler()->ProcessEvent(event2); } node = node->GetNext(); } Refresh(); } // the default action is to populate dialog with data when it's created, // and nudge the UI into displaying itself correctly in case // we've turned the wxUpdateUIEvents frequency down low. void wxWindowBase::OnInitDialog( wxInitDialogEvent &WXUNUSED(event) ) { TransferDataToWindow(); // Update the UI at this point UpdateWindowUI(wxUPDATE_UI_RECURSE); } // ---------------------------------------------------------------------------- // menu-related functions // ---------------------------------------------------------------------------- #if wxUSE_MENUS bool wxWindowBase::PopupMenu(wxMenu *menu, int x, int y) { wxCHECK_MSG( menu, false, "can't popup NULL menu" ); wxMenuInvokingWindowSetter setInvokingWin(*menu, static_cast<wxWindow *>(this)); wxCurrentPopupMenu = menu; const bool rc = DoPopupMenu(menu, x, y); wxCurrentPopupMenu = NULL; return rc; } // this is used to pass the id of the selected item from the menu event handler // to the main function itself // // it's ok to use a global here as there can be at most one popup menu shown at // any time static int gs_popupMenuSelection = wxID_NONE; void wxWindowBase::InternalOnPopupMenu(wxCommandEvent& event) { // store the id in a global variable where we'll retrieve it from later gs_popupMenuSelection = event.GetId(); } void wxWindowBase::InternalOnPopupMenuUpdate(wxUpdateUIEvent& WXUNUSED(event)) { // nothing to do but do not skip it } int wxWindowBase::DoGetPopupMenuSelectionFromUser(wxMenu& menu, int x, int y) { gs_popupMenuSelection = wxID_NONE; Connect(wxEVT_MENU, wxCommandEventHandler(wxWindowBase::InternalOnPopupMenu), NULL, this); // it is common to construct the menu passed to this function dynamically // using some fixed range of ids which could clash with the ids used // elsewhere in the program, which could result in some menu items being // unintentionally disabled or otherwise modified by update UI handlers // elsewhere in the program code and this is difficult to avoid in the // program itself, so instead we just temporarily suspend UI updating while // this menu is shown Connect(wxEVT_UPDATE_UI, wxUpdateUIEventHandler(wxWindowBase::InternalOnPopupMenuUpdate), NULL, this); PopupMenu(&menu, x, y); Disconnect(wxEVT_UPDATE_UI, wxUpdateUIEventHandler(wxWindowBase::InternalOnPopupMenuUpdate), NULL, this); Disconnect(wxEVT_MENU, wxCommandEventHandler(wxWindowBase::InternalOnPopupMenu), NULL, this); return gs_popupMenuSelection; } #endif // wxUSE_MENUS // methods for drawing the sizers in a visible way: this is currently only // enabled for "full debug" builds with wxDEBUG_LEVEL==2 as it doesn't work // that well and also because we don't want to leave it enabled in default // builds used for production #if wxDEBUG_LEVEL > 1 static void DrawSizers(wxWindowBase *win); static void DrawBorder(wxWindowBase *win, const wxRect& rect, bool fill, const wxPen* pen) { wxClientDC dc((wxWindow *)win); dc.SetPen(*pen); dc.SetBrush(fill ? wxBrush(pen->GetColour(), wxBRUSHSTYLE_CROSSDIAG_HATCH) : *wxTRANSPARENT_BRUSH); dc.DrawRectangle(rect.Deflate(1, 1)); } static void DrawSizer(wxWindowBase *win, wxSizer *sizer) { const wxSizerItemList& items = sizer->GetChildren(); for ( wxSizerItemList::const_iterator i = items.begin(), end = items.end(); i != end; ++i ) { wxSizerItem *item = *i; if ( item->IsSizer() ) { DrawBorder(win, item->GetRect().Deflate(2), false, wxRED_PEN); DrawSizer(win, item->GetSizer()); } else if ( item->IsSpacer() ) { DrawBorder(win, item->GetRect().Deflate(2), true, wxBLUE_PEN); } else if ( item->IsWindow() ) { DrawSizers(item->GetWindow()); } else wxFAIL_MSG("inconsistent wxSizerItem status!"); } } static void DrawSizers(wxWindowBase *win) { DrawBorder(win, win->GetClientSize(), false, wxGREEN_PEN); wxSizer *sizer = win->GetSizer(); if ( sizer ) { DrawSizer(win, sizer); } else // no sizer, still recurse into the children { const wxWindowList& children = win->GetChildren(); for ( wxWindowList::const_iterator i = children.begin(), end = children.end(); i != end; ++i ) { DrawSizers(*i); } // show all kind of sizes of this window; see the "window sizing" topic // overview for more info about the various differences: wxSize fullSz = win->GetSize(); wxSize clientSz = win->GetClientSize(); wxSize bestSz = win->GetBestSize(); wxSize minSz = win->GetMinSize(); wxSize maxSz = win->GetMaxSize(); wxSize virtualSz = win->GetVirtualSize(); wxMessageOutputDebug dbgout; dbgout.Printf( "%-10s => fullsz=%4d;%-4d clientsz=%4d;%-4d bestsz=%4d;%-4d minsz=%4d;%-4d maxsz=%4d;%-4d virtualsz=%4d;%-4d\n", win->GetName(), fullSz.x, fullSz.y, clientSz.x, clientSz.y, bestSz.x, bestSz.y, minSz.x, minSz.y, maxSz.x, maxSz.y, virtualSz.x, virtualSz.y); } } #endif // wxDEBUG_LEVEL // process special middle clicks void wxWindowBase::OnMiddleClick( wxMouseEvent& event ) { if ( event.ControlDown() && event.AltDown() ) { #if wxDEBUG_LEVEL > 1 // Ctrl-Alt-Shift-mclick makes the sizers visible in debug builds if ( event.ShiftDown() ) { DrawSizers(this); } else #endif // __WXDEBUG__ { #if wxUSE_MSGDLG // just Ctrl-Alt-middle click shows information about wx version ::wxInfoMessageBox((wxWindow*)this); #endif // wxUSE_MSGDLG } } else { event.Skip(); } } // ---------------------------------------------------------------------------- // accessibility // ---------------------------------------------------------------------------- #if wxUSE_ACCESSIBILITY void wxWindowBase::SetAccessible(wxAccessible* accessible) { if (m_accessible && (accessible != m_accessible)) delete m_accessible; m_accessible = accessible; if (m_accessible) m_accessible->SetWindow((wxWindow*) this); } // Returns the accessible object, creating if necessary. wxAccessible* wxWindowBase::GetOrCreateAccessible() { if (!m_accessible) m_accessible = CreateAccessible(); return m_accessible; } // Override to create a specific accessible object. wxAccessible* wxWindowBase::CreateAccessible() { return new wxWindowAccessible((wxWindow*) this); } #endif // ---------------------------------------------------------------------------- // list classes implementation // ---------------------------------------------------------------------------- #if wxUSE_STD_CONTAINERS #include "wx/listimpl.cpp" WX_DEFINE_LIST(wxWindowList) #else // !wxUSE_STD_CONTAINERS void wxWindowListNode::DeleteData() { delete (wxWindow *)GetData(); } #endif // wxUSE_STD_CONTAINERS/!wxUSE_STD_CONTAINERS // ---------------------------------------------------------------------------- // borders // ---------------------------------------------------------------------------- wxBorder wxWindowBase::GetBorder(long flags) const { wxBorder border = (wxBorder)(flags & wxBORDER_MASK); if ( border == wxBORDER_DEFAULT ) { border = GetDefaultBorder(); } else if ( border == wxBORDER_THEME ) { border = GetDefaultBorderForControl(); } return border; } wxBorder wxWindowBase::GetDefaultBorder() const { return wxBORDER_NONE; } // ---------------------------------------------------------------------------- // hit testing // ---------------------------------------------------------------------------- wxHitTest wxWindowBase::DoHitTest(wxCoord x, wxCoord y) const { // here we just check if the point is inside the window or not // check the top and left border first bool outside = x < 0 || y < 0; if ( !outside ) { // check the right and bottom borders too wxSize size = GetSize(); outside = x >= size.x || y >= size.y; } return outside ? wxHT_WINDOW_OUTSIDE : wxHT_WINDOW_INSIDE; } // ---------------------------------------------------------------------------- // mouse capture // ---------------------------------------------------------------------------- // Private data used for mouse capture tracking. namespace wxMouseCapture { // Stack of the windows which previously had the capture, the top most element // is the window that has the mouse capture now. // // NB: We use wxVector and not wxStack to be able to examine all of the stack // elements for debug checks, but only the stack operations should be // performed with this vector. wxVector<wxWindow*> stack; // Flag preventing reentrancy in {Capture,Release}Mouse(). wxRecursionGuardFlag changing; bool IsInCaptureStack(wxWindowBase* win) { for ( wxVector<wxWindow*>::const_iterator it = stack.begin(); it != stack.end(); ++it ) { if ( *it == win ) return true; } return false; } } // wxMouseCapture void wxWindowBase::CaptureMouse() { wxLogTrace(wxT("mousecapture"), wxT("CaptureMouse(%p)"), static_cast<void*>(this)); wxRecursionGuard guard(wxMouseCapture::changing); wxASSERT_MSG( !guard.IsInside(), wxT("recursive CaptureMouse call?") ); wxASSERT_MSG( !wxMouseCapture::IsInCaptureStack(this), "Recapturing the mouse in the same window?" ); wxWindow *winOld = GetCapture(); if ( winOld ) ((wxWindowBase*) winOld)->DoReleaseMouse(); DoCaptureMouse(); wxMouseCapture::stack.push_back(static_cast<wxWindow*>(this)); } void wxWindowBase::ReleaseMouse() { wxLogTrace(wxT("mousecapture"), wxT("ReleaseMouse(%p)"), static_cast<void*>(this)); wxRecursionGuard guard(wxMouseCapture::changing); wxASSERT_MSG( !guard.IsInside(), wxT("recursive ReleaseMouse call?") ); #if wxDEBUG_LEVEL wxWindow* const winCapture = GetCapture(); if ( !winCapture ) { wxFAIL_MSG ( wxString::Format ( "Releasing mouse in %p(%s) but it is not captured", this, GetClassInfo()->GetClassName() ) ); } else if ( winCapture != this ) { wxFAIL_MSG ( wxString::Format ( "Releasing mouse in %p(%s) but it is captured by %p(%s)", this, GetClassInfo()->GetClassName(), winCapture, winCapture->GetClassInfo()->GetClassName() ) ); } #endif // wxDEBUG_LEVEL DoReleaseMouse(); wxCHECK_RET( !wxMouseCapture::stack.empty(), "Releasing mouse capture but capture stack empty?" ); wxCHECK_RET( wxMouseCapture::stack.back() == this, "Window releasing mouse capture not top of capture stack?" ); wxMouseCapture::stack.pop_back(); // Restore the capture to the previous window, if any. if ( !wxMouseCapture::stack.empty() ) { ((wxWindowBase*)wxMouseCapture::stack.back())->DoCaptureMouse(); } wxLogTrace(wxT("mousecapture"), wxT("After ReleaseMouse() mouse is captured by %p"), static_cast<void*>(GetCapture())); } static void DoNotifyWindowAboutCaptureLost(wxWindow *win) { wxMouseCaptureLostEvent event(win->GetId()); event.SetEventObject(win); if ( !win->GetEventHandler()->ProcessEvent(event) ) { // windows must handle this event, otherwise the app wouldn't behave // correctly if it loses capture unexpectedly; see the discussion here: // http://sourceforge.net/tracker/index.php?func=detail&aid=1153662&group_id=9863&atid=109863 // http://article.gmane.org/gmane.comp.lib.wxwidgets.devel/82376 wxFAIL_MSG( wxT("window that captured the mouse didn't process wxEVT_MOUSE_CAPTURE_LOST") ); } } /* static */ void wxWindowBase::NotifyCaptureLost() { // don't do anything if capture lost was expected, i.e. resulted from // a wx call to ReleaseMouse or CaptureMouse: wxRecursionGuard guard(wxMouseCapture::changing); if ( guard.IsInside() ) return; // if the capture was lost unexpectedly, notify every window that has // capture (on stack or current) about it and clear the stack: while ( !wxMouseCapture::stack.empty() ) { DoNotifyWindowAboutCaptureLost(wxMouseCapture::stack.back()); wxMouseCapture::stack.pop_back(); } } #if wxUSE_HOTKEY bool wxWindowBase::RegisterHotKey(int WXUNUSED(hotkeyId), int WXUNUSED(modifiers), int WXUNUSED(keycode)) { // not implemented return false; } bool wxWindowBase::UnregisterHotKey(int WXUNUSED(hotkeyId)) { // not implemented return false; } #endif // wxUSE_HOTKEY // ---------------------------------------------------------------------------- // event processing // ---------------------------------------------------------------------------- bool wxWindowBase::TryBefore(wxEvent& event) { #if wxUSE_VALIDATORS // Can only use the validator of the window which // is receiving the event if ( event.GetEventObject() == this ) { wxValidator * const validator = GetValidator(); if ( validator && validator->ProcessEventLocally(event) ) { return true; } } #endif // wxUSE_VALIDATORS return wxEvtHandler::TryBefore(event); } bool wxWindowBase::TryAfter(wxEvent& event) { // carry on up the parent-child hierarchy if the propagation count hasn't // reached zero yet if ( event.ShouldPropagate() ) { // honour the requests to stop propagation at this window: this is // used by the dialogs, for example, to prevent processing the events // from the dialog controls in the parent frame which rarely, if ever, // makes sense if ( !(GetExtraStyle() & wxWS_EX_BLOCK_EVENTS) ) { wxWindow *parent = GetParent(); if ( parent && !parent->IsBeingDeleted() ) { wxPropagateOnce propagateOnce(event, this); return parent->GetEventHandler()->ProcessEvent(event); } } } return wxEvtHandler::TryAfter(event); } // ---------------------------------------------------------------------------- // window relationships // ---------------------------------------------------------------------------- wxWindow *wxWindowBase::DoGetSibling(WindowOrder order) const { wxCHECK_MSG( GetParent(), NULL, wxT("GetPrev/NextSibling() don't work for TLWs!") ); wxWindowList& siblings = GetParent()->GetChildren(); wxWindowList::compatibility_iterator i = siblings.Find((wxWindow *)this); wxCHECK_MSG( i, NULL, wxT("window not a child of its parent?") ); if ( order == OrderBefore ) i = i->GetPrevious(); else // OrderAfter i = i->GetNext(); return i ? i->GetData() : NULL; } // ---------------------------------------------------------------------------- // keyboard navigation // ---------------------------------------------------------------------------- // Navigates in the specified direction inside this window bool wxWindowBase::DoNavigateIn(int flags) { #ifdef wxHAS_NATIVE_TAB_TRAVERSAL // native code doesn't process our wxNavigationKeyEvents anyhow wxUnusedVar(flags); return false; #else // !wxHAS_NATIVE_TAB_TRAVERSAL wxNavigationKeyEvent eventNav; wxWindow *focused = FindFocus(); eventNav.SetCurrentFocus(focused); eventNav.SetEventObject(focused); eventNav.SetFlags(flags); return GetEventHandler()->ProcessEvent(eventNav); #endif // wxHAS_NATIVE_TAB_TRAVERSAL/!wxHAS_NATIVE_TAB_TRAVERSAL } bool wxWindowBase::HandleAsNavigationKey(const wxKeyEvent& event) { if ( event.GetKeyCode() != WXK_TAB ) return false; int flags = wxNavigationKeyEvent::FromTab; if ( event.ShiftDown() ) flags |= wxNavigationKeyEvent::IsBackward; else flags |= wxNavigationKeyEvent::IsForward; if ( event.ControlDown() ) flags |= wxNavigationKeyEvent::WinChange; Navigate(flags); return true; } void wxWindowBase::DoMoveInTabOrder(wxWindow *win, WindowOrder move) { // check that we're not a top level window wxCHECK_RET( GetParent(), wxT("MoveBefore/AfterInTabOrder() don't work for TLWs!") ); // detect the special case when we have nothing to do anyhow and when the // code below wouldn't work if ( win == this ) return; // find the target window in the siblings list wxWindowList& siblings = GetParent()->GetChildren(); wxWindowList::compatibility_iterator i = siblings.Find(win); wxCHECK_RET( i, wxT("MoveBefore/AfterInTabOrder(): win is not a sibling") ); // unfortunately, when wxUSE_STD_CONTAINERS == 1 DetachNode() is not // implemented so we can't just move the node around wxWindow *self = (wxWindow *)this; siblings.DeleteObject(self); if ( move == OrderAfter ) { i = i->GetNext(); } if ( i ) { siblings.Insert(i, self); } else // OrderAfter and win was the last sibling { siblings.Append(self); } } // ---------------------------------------------------------------------------- // focus handling // ---------------------------------------------------------------------------- /*static*/ wxWindow* wxWindowBase::FindFocus() { wxWindowBase *win = DoFindFocus(); return win ? win->GetMainWindowOfCompositeControl() : NULL; } bool wxWindowBase::HasFocus() const { wxWindowBase* const win = DoFindFocus(); return win && (this == win || this == win->GetMainWindowOfCompositeControl()); } // ---------------------------------------------------------------------------- // drag and drop // ---------------------------------------------------------------------------- #if wxUSE_DRAG_AND_DROP && !defined(__WXMSW__) namespace { class DragAcceptFilesTarget : public wxFileDropTarget { public: DragAcceptFilesTarget(wxWindowBase *win) : m_win(win) {} virtual bool OnDropFiles(wxCoord x, wxCoord y, const wxArrayString& filenames) { wxDropFilesEvent event(wxEVT_DROP_FILES, filenames.size(), wxCArrayString(filenames).Release()); event.SetEventObject(m_win); event.m_pos.x = x; event.m_pos.y = y; return m_win->HandleWindowEvent(event); } private: wxWindowBase * const m_win; wxDECLARE_NO_COPY_CLASS(DragAcceptFilesTarget); }; } // anonymous namespace // Generic version of DragAcceptFiles(). It works by installing a simple // wxFileDropTarget-to-EVT_DROP_FILES adaptor and therefore cannot be used // together with explicit SetDropTarget() calls. void wxWindowBase::DragAcceptFiles(bool accept) { if ( accept ) { wxASSERT_MSG( !GetDropTarget(), "cannot use DragAcceptFiles() and SetDropTarget() together" ); SetDropTarget(new DragAcceptFilesTarget(this)); } else { SetDropTarget(NULL); } } #endif // wxUSE_DRAG_AND_DROP && !defined(__WXMSW__) // ---------------------------------------------------------------------------- // global functions // ---------------------------------------------------------------------------- wxWindow* wxGetTopLevelParent(wxWindow *win) { while ( win && !win->IsTopLevel() ) win = win->GetParent(); return win; } #if wxUSE_ACCESSIBILITY // ---------------------------------------------------------------------------- // accessible object for windows // ---------------------------------------------------------------------------- // Can return either a child object, or an integer // representing the child element, starting from 1. wxAccStatus wxWindowAccessible::HitTest(const wxPoint& WXUNUSED(pt), int* WXUNUSED(childId), wxAccessible** WXUNUSED(childObject)) { wxASSERT( GetWindow() != NULL ); if (!GetWindow()) return wxACC_FAIL; return wxACC_NOT_IMPLEMENTED; } // Returns the rectangle for this object (id = 0) or a child element (id > 0). wxAccStatus wxWindowAccessible::GetLocation(wxRect& rect, int elementId) { wxASSERT( GetWindow() != NULL ); if (!GetWindow()) return wxACC_FAIL; wxWindow* win = NULL; if (elementId == 0) { win = GetWindow(); } else { if (elementId <= (int) GetWindow()->GetChildren().GetCount()) { win = GetWindow()->GetChildren().Item(elementId-1)->GetData(); } else return wxACC_FAIL; } if (win) { rect = win->GetRect(); if (win->GetParent() && !wxDynamicCast(win, wxTopLevelWindow)) rect.SetPosition(win->GetParent()->ClientToScreen(rect.GetPosition())); return wxACC_OK; } return wxACC_NOT_IMPLEMENTED; } // Navigates from fromId to toId/toObject. wxAccStatus wxWindowAccessible::Navigate(wxNavDir navDir, int fromId, int* WXUNUSED(toId), wxAccessible** toObject) { wxASSERT( GetWindow() != NULL ); if (!GetWindow()) return wxACC_FAIL; switch (navDir) { case wxNAVDIR_FIRSTCHILD: { if (GetWindow()->GetChildren().GetCount() == 0) return wxACC_FALSE; wxWindow* childWindow = (wxWindow*) GetWindow()->GetChildren().GetFirst()->GetData(); *toObject = childWindow->GetOrCreateAccessible(); return wxACC_OK; } case wxNAVDIR_LASTCHILD: { if (GetWindow()->GetChildren().GetCount() == 0) return wxACC_FALSE; wxWindow* childWindow = (wxWindow*) GetWindow()->GetChildren().GetLast()->GetData(); *toObject = childWindow->GetOrCreateAccessible(); return wxACC_OK; } case wxNAVDIR_RIGHT: case wxNAVDIR_DOWN: case wxNAVDIR_NEXT: { wxWindowList::compatibility_iterator node = wxWindowList::compatibility_iterator(); if (fromId == 0) { // Can't navigate to sibling of this window // if we're a top-level window. if (!GetWindow()->GetParent()) return wxACC_NOT_IMPLEMENTED; node = GetWindow()->GetParent()->GetChildren().Find(GetWindow()); } else { if (fromId <= (int) GetWindow()->GetChildren().GetCount()) node = GetWindow()->GetChildren().Item(fromId-1); } if (node && node->GetNext()) { wxWindow* nextWindow = node->GetNext()->GetData(); *toObject = nextWindow->GetOrCreateAccessible(); return wxACC_OK; } else return wxACC_FALSE; } case wxNAVDIR_LEFT: case wxNAVDIR_UP: case wxNAVDIR_PREVIOUS: { wxWindowList::compatibility_iterator node = wxWindowList::compatibility_iterator(); if (fromId == 0) { // Can't navigate to sibling of this window // if we're a top-level window. if (!GetWindow()->GetParent()) return wxACC_NOT_IMPLEMENTED; node = GetWindow()->GetParent()->GetChildren().Find(GetWindow()); } else { if (fromId <= (int) GetWindow()->GetChildren().GetCount()) node = GetWindow()->GetChildren().Item(fromId-1); } if (node && node->GetPrevious()) { wxWindow* previousWindow = node->GetPrevious()->GetData(); *toObject = previousWindow->GetOrCreateAccessible(); return wxACC_OK; } else return wxACC_FALSE; } } return wxACC_NOT_IMPLEMENTED; } // Gets the name of the specified object. wxAccStatus wxWindowAccessible::GetName(int childId, wxString* name) { wxASSERT( GetWindow() != NULL ); if (!GetWindow()) return wxACC_FAIL; wxString title; // If a child, leave wxWidgets to call the function on the actual // child object. if (childId > 0) return wxACC_NOT_IMPLEMENTED; // This will eventually be replaced by specialised // accessible classes, one for each kind of wxWidgets // control or window. #if wxUSE_BUTTON if (wxDynamicCast(GetWindow(), wxButton)) title = ((wxButton*) GetWindow())->GetLabel(); else #endif title = GetWindow()->GetName(); if (!title.empty()) { *name = title; return wxACC_OK; } else return wxACC_NOT_IMPLEMENTED; } // Gets the number of children. wxAccStatus wxWindowAccessible::GetChildCount(int* childId) { wxASSERT( GetWindow() != NULL ); if (!GetWindow()) return wxACC_FAIL; *childId = (int) GetWindow()->GetChildren().GetCount(); return wxACC_OK; } // Gets the specified child (starting from 1). // If *child is NULL and return value is wxACC_OK, // this means that the child is a simple element and // not an accessible object. wxAccStatus wxWindowAccessible::GetChild(int childId, wxAccessible** child) { wxASSERT( GetWindow() != NULL ); if (!GetWindow()) return wxACC_FAIL; if (childId == 0) { *child = this; return wxACC_OK; } if (childId > (int) GetWindow()->GetChildren().GetCount()) return wxACC_FAIL; wxWindow* childWindow = GetWindow()->GetChildren().Item(childId-1)->GetData(); *child = childWindow->GetOrCreateAccessible(); if (*child) return wxACC_OK; else return wxACC_FAIL; } // Gets the parent, or NULL. wxAccStatus wxWindowAccessible::GetParent(wxAccessible** parent) { wxASSERT( GetWindow() != NULL ); if (!GetWindow()) return wxACC_FAIL; wxWindow* parentWindow = GetWindow()->GetParent(); if (!parentWindow) { *parent = NULL; return wxACC_OK; } else { *parent = parentWindow->GetOrCreateAccessible(); if (*parent) return wxACC_OK; else return wxACC_FAIL; } } // Performs the default action. childId is 0 (the action for this object) // or > 0 (the action for a child). // Return wxACC_NOT_SUPPORTED if there is no default action for this // window (e.g. an edit control). wxAccStatus wxWindowAccessible::DoDefaultAction(int WXUNUSED(childId)) { wxASSERT( GetWindow() != NULL ); if (!GetWindow()) return wxACC_FAIL; return wxACC_NOT_IMPLEMENTED; } // Gets the default action for this object (0) or > 0 (the action for a child). // Return wxACC_OK even if there is no action. actionName is the action, or the empty // string if there is no action. // The retrieved string describes the action that is performed on an object, // not what the object does as a result. For example, a toolbar button that prints // a document has a default action of "Press" rather than "Prints the current document." wxAccStatus wxWindowAccessible::GetDefaultAction(int WXUNUSED(childId), wxString* WXUNUSED(actionName)) { wxASSERT( GetWindow() != NULL ); if (!GetWindow()) return wxACC_FAIL; return wxACC_NOT_IMPLEMENTED; } // Returns the description for this object or a child. wxAccStatus wxWindowAccessible::GetDescription(int WXUNUSED(childId), wxString* description) { wxASSERT( GetWindow() != NULL ); if (!GetWindow()) return wxACC_FAIL; wxString ht(GetWindow()->GetHelpTextAtPoint(wxDefaultPosition, wxHelpEvent::Origin_Keyboard)); if (!ht.empty()) { *description = ht; return wxACC_OK; } return wxACC_NOT_IMPLEMENTED; } // Returns help text for this object or a child, similar to tooltip text. wxAccStatus wxWindowAccessible::GetHelpText(int WXUNUSED(childId), wxString* helpText) { wxASSERT( GetWindow() != NULL ); if (!GetWindow()) return wxACC_FAIL; wxString ht(GetWindow()->GetHelpTextAtPoint(wxDefaultPosition, wxHelpEvent::Origin_Keyboard)); if (!ht.empty()) { *helpText = ht; return wxACC_OK; } return wxACC_NOT_IMPLEMENTED; } // Returns the keyboard shortcut for this object or child. // Return e.g. ALT+K wxAccStatus wxWindowAccessible::GetKeyboardShortcut(int WXUNUSED(childId), wxString* WXUNUSED(shortcut)) { wxASSERT( GetWindow() != NULL ); if (!GetWindow()) return wxACC_FAIL; return wxACC_NOT_IMPLEMENTED; } // Returns a role constant. wxAccStatus wxWindowAccessible::GetRole(int childId, wxAccRole* role) { wxASSERT( GetWindow() != NULL ); if (!GetWindow()) return wxACC_FAIL; // If a child, leave wxWidgets to call the function on the actual // child object. if (childId > 0) return wxACC_NOT_IMPLEMENTED; if (wxDynamicCast(GetWindow(), wxControl)) return wxACC_NOT_IMPLEMENTED; #if wxUSE_STATUSBAR if (wxDynamicCast(GetWindow(), wxStatusBar)) return wxACC_NOT_IMPLEMENTED; #endif #if wxUSE_TOOLBAR if (wxDynamicCast(GetWindow(), wxToolBar)) return wxACC_NOT_IMPLEMENTED; #endif //*role = wxROLE_SYSTEM_CLIENT; *role = wxROLE_SYSTEM_CLIENT; return wxACC_OK; #if 0 return wxACC_NOT_IMPLEMENTED; #endif } // Returns a state constant. wxAccStatus wxWindowAccessible::GetState(int childId, long* state) { wxASSERT( GetWindow() != NULL ); if (!GetWindow()) return wxACC_FAIL; // If a child, leave wxWidgets to call the function on the actual // child object. if (childId > 0) return wxACC_NOT_IMPLEMENTED; if (wxDynamicCast(GetWindow(), wxControl)) return wxACC_NOT_IMPLEMENTED; #if wxUSE_STATUSBAR if (wxDynamicCast(GetWindow(), wxStatusBar)) return wxACC_NOT_IMPLEMENTED; #endif #if wxUSE_TOOLBAR if (wxDynamicCast(GetWindow(), wxToolBar)) return wxACC_NOT_IMPLEMENTED; #endif *state = 0; return wxACC_OK; #if 0 return wxACC_NOT_IMPLEMENTED; #endif } // Returns a localized string representing the value for the object // or child. wxAccStatus wxWindowAccessible::GetValue(int WXUNUSED(childId), wxString* WXUNUSED(strValue)) { wxASSERT( GetWindow() != NULL ); if (!GetWindow()) return wxACC_FAIL; return wxACC_NOT_IMPLEMENTED; } // Selects the object or child. wxAccStatus wxWindowAccessible::Select(int WXUNUSED(childId), wxAccSelectionFlags WXUNUSED(selectFlags)) { wxASSERT( GetWindow() != NULL ); if (!GetWindow()) return wxACC_FAIL; return wxACC_NOT_IMPLEMENTED; } // Gets the window with the keyboard focus. // If childId is 0 and child is NULL, no object in // this subhierarchy has the focus. // If this object has the focus, child should be 'this'. wxAccStatus wxWindowAccessible::GetFocus(int* WXUNUSED(childId), wxAccessible** WXUNUSED(child)) { wxASSERT( GetWindow() != NULL ); if (!GetWindow()) return wxACC_FAIL; return wxACC_NOT_IMPLEMENTED; } #if wxUSE_VARIANT // Gets a variant representing the selected children // of this object. // Acceptable values: // - a null variant (IsNull() returns true) // - a list variant (GetType() == wxT("list") // - an integer representing the selected child element, // or 0 if this object is selected (GetType() == wxT("long") // - a "void*" pointer to a wxAccessible child object wxAccStatus wxWindowAccessible::GetSelections(wxVariant* WXUNUSED(selections)) { wxASSERT( GetWindow() != NULL ); if (!GetWindow()) return wxACC_FAIL; return wxACC_NOT_IMPLEMENTED; } #endif // wxUSE_VARIANT #endif // wxUSE_ACCESSIBILITY // ---------------------------------------------------------------------------- // RTL support // ---------------------------------------------------------------------------- wxCoord wxWindowBase::AdjustForLayoutDirection(wxCoord x, wxCoord width, wxCoord widthTotal) const { if ( GetLayoutDirection() == wxLayout_RightToLeft ) { x = widthTotal - x - width; } return x; }
gpl-2.0
nullpo-head/linux
drivers/misc/bh1780gli.c
633
6164
/* * bh1780gli.c * ROHM Ambient Light Sensor Driver * * Copyright (C) 2010 Texas Instruments * Author: Hemanth V <hemanthv@ti.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/i2c.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/of.h> #define BH1780_REG_CONTROL 0x80 #define BH1780_REG_PARTID 0x8A #define BH1780_REG_MANFID 0x8B #define BH1780_REG_DLOW 0x8C #define BH1780_REG_DHIGH 0x8D #define BH1780_REVMASK (0xf) #define BH1780_POWMASK (0x3) #define BH1780_POFF (0x0) #define BH1780_PON (0x3) /* power on settling time in ms */ #define BH1780_PON_DELAY 2 struct bh1780_data { struct i2c_client *client; int power_state; /* lock for sysfs operations */ struct mutex lock; }; static int bh1780_write(struct bh1780_data *ddata, u8 reg, u8 val, char *msg) { int ret = i2c_smbus_write_byte_data(ddata->client, reg, val); if (ret < 0) dev_err(&ddata->client->dev, "i2c_smbus_write_byte_data failed error %d Register (%s)\n", ret, msg); return ret; } static int bh1780_read(struct bh1780_data *ddata, u8 reg, char *msg) { int ret = i2c_smbus_read_byte_data(ddata->client, reg); if (ret < 0) dev_err(&ddata->client->dev, "i2c_smbus_read_byte_data failed error %d Register (%s)\n", ret, msg); return ret; } static ssize_t bh1780_show_lux(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); struct bh1780_data *ddata = platform_get_drvdata(pdev); int lsb, msb; lsb = bh1780_read(ddata, BH1780_REG_DLOW, "DLOW"); if (lsb < 0) return lsb; msb = bh1780_read(ddata, BH1780_REG_DHIGH, "DHIGH"); if (msb < 0) return msb; return sprintf(buf, "%d\n", (msb << 8) | lsb); } static ssize_t bh1780_show_power_state(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); struct bh1780_data *ddata = platform_get_drvdata(pdev); int state; state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL"); if (state < 0) return state; return sprintf(buf, "%d\n", state & BH1780_POWMASK); } static ssize_t bh1780_store_power_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); struct bh1780_data *ddata = platform_get_drvdata(pdev); unsigned long val; int error; error = kstrtoul(buf, 0, &val); if (error) return error; if (val < BH1780_POFF || val > BH1780_PON) return -EINVAL; mutex_lock(&ddata->lock); error = bh1780_write(ddata, BH1780_REG_CONTROL, val, "CONTROL"); if (error < 0) { mutex_unlock(&ddata->lock); return error; } msleep(BH1780_PON_DELAY); ddata->power_state = val; mutex_unlock(&ddata->lock); return count; } static DEVICE_ATTR(lux, S_IRUGO, bh1780_show_lux, NULL); static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO, bh1780_show_power_state, bh1780_store_power_state); static struct attribute *bh1780_attributes[] = { &dev_attr_power_state.attr, &dev_attr_lux.attr, NULL }; static const struct attribute_group bh1780_attr_group = { .attrs = bh1780_attributes, }; static int bh1780_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret; struct bh1780_data *ddata; struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) return -EIO; ddata = devm_kzalloc(&client->dev, sizeof(struct bh1780_data), GFP_KERNEL); if (ddata == NULL) return -ENOMEM; ddata->client = client; i2c_set_clientdata(client, ddata); ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID"); if (ret < 0) return ret; dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n", (ret & BH1780_REVMASK)); mutex_init(&ddata->lock); return sysfs_create_group(&client->dev.kobj, &bh1780_attr_group); } static int bh1780_remove(struct i2c_client *client) { sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group); return 0; } #ifdef CONFIG_PM_SLEEP static int bh1780_suspend(struct device *dev) { struct bh1780_data *ddata; int state, ret; struct i2c_client *client = to_i2c_client(dev); ddata = i2c_get_clientdata(client); state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL"); if (state < 0) return state; ddata->power_state = state & BH1780_POWMASK; ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF, "CONTROL"); if (ret < 0) return ret; return 0; } static int bh1780_resume(struct device *dev) { struct bh1780_data *ddata; int state, ret; struct i2c_client *client = to_i2c_client(dev); ddata = i2c_get_clientdata(client); state = ddata->power_state; ret = bh1780_write(ddata, BH1780_REG_CONTROL, state, "CONTROL"); if (ret < 0) return ret; return 0; } #endif /* CONFIG_PM_SLEEP */ static SIMPLE_DEV_PM_OPS(bh1780_pm, bh1780_suspend, bh1780_resume); static const struct i2c_device_id bh1780_id[] = { { "bh1780", 0 }, { }, }; #ifdef CONFIG_OF static const struct of_device_id of_bh1780_match[] = { { .compatible = "rohm,bh1780gli", }, {}, }; MODULE_DEVICE_TABLE(of, of_bh1780_match); #endif static struct i2c_driver bh1780_driver = { .probe = bh1780_probe, .remove = bh1780_remove, .id_table = bh1780_id, .driver = { .name = "bh1780", .pm = &bh1780_pm, .of_match_table = of_match_ptr(of_bh1780_match), }, }; module_i2c_driver(bh1780_driver); MODULE_DESCRIPTION("BH1780GLI Ambient Light Sensor Driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Hemanth V <hemanthv@ti.com>");
gpl-2.0
linhphi9x94/zte-kernel-msm7x27
drivers/net/phy/national.c
889
4156
/* * drivers/net/phy/national.c * * Driver for National Semiconductor PHYs * * Author: Stuart Menefy <stuart.menefy@st.com> * Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com> * * Copyright (c) 2008 STMicroelectronics Limited * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/phy.h> #include <linux/netdevice.h> /* DP83865 phy identifier values */ #define DP83865_PHY_ID 0x20005c7a #define DP83865_INT_MASK_REG 0x15 #define DP83865_INT_MASK_STATUS 0x14 #define DP83865_INT_REMOTE_FAULT 0x0008 #define DP83865_INT_ANE_COMPLETED 0x0010 #define DP83865_INT_LINK_CHANGE 0xe000 #define DP83865_INT_MASK_DEFAULT (DP83865_INT_REMOTE_FAULT | \ DP83865_INT_ANE_COMPLETED | \ DP83865_INT_LINK_CHANGE) /* Advanced proprietary configuration */ #define NS_EXP_MEM_CTL 0x16 #define NS_EXP_MEM_DATA 0x1d #define NS_EXP_MEM_ADD 0x1e #define LED_CTRL_REG 0x13 #define AN_FALLBACK_AN 0x0001 #define AN_FALLBACK_CRC 0x0002 #define AN_FALLBACK_IE 0x0004 #define ALL_FALLBACK_ON (AN_FALLBACK_AN | AN_FALLBACK_CRC | AN_FALLBACK_IE) enum hdx_loopback { hdx_loopback_on = 0, hdx_loopback_off = 1, }; static u8 ns_exp_read(struct phy_device *phydev, u16 reg) { phy_write(phydev, NS_EXP_MEM_ADD, reg); return phy_read(phydev, NS_EXP_MEM_DATA); } static void ns_exp_write(struct phy_device *phydev, u16 reg, u8 data) { phy_write(phydev, NS_EXP_MEM_ADD, reg); phy_write(phydev, NS_EXP_MEM_DATA, data); } static int ns_config_intr(struct phy_device *phydev) { int err; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) err = phy_write(phydev, DP83865_INT_MASK_REG, DP83865_INT_MASK_DEFAULT); else err = phy_write(phydev, DP83865_INT_MASK_REG, 0); return err; } static int ns_ack_interrupt(struct phy_device *phydev) { int ret = phy_read(phydev, DP83865_INT_MASK_STATUS); if (ret < 0) return ret; return 0; } static void ns_giga_speed_fallback(struct phy_device *phydev, int mode) { int bmcr = phy_read(phydev, MII_BMCR); phy_write(phydev, MII_BMCR, (bmcr | BMCR_PDOWN)); /* Enable 8 bit expended memory read/write (no auto increment) */ phy_write(phydev, NS_EXP_MEM_CTL, 0); phy_write(phydev, NS_EXP_MEM_ADD, 0x1C0); phy_write(phydev, NS_EXP_MEM_DATA, 0x0008); phy_write(phydev, MII_BMCR, (bmcr & ~BMCR_PDOWN)); phy_write(phydev, LED_CTRL_REG, mode); } static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable) { if (disable) ns_exp_write(phydev, 0x1c0, ns_exp_read(phydev, 0x1c0) | 1); else ns_exp_write(phydev, 0x1c0, ns_exp_read(phydev, 0x1c0) & 0xfffe); printk(KERN_DEBUG "DP83865 PHY: 10BASE-T HDX loopback %s\n", (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on"); } static int ns_config_init(struct phy_device *phydev) { ns_giga_speed_fallback(phydev, ALL_FALLBACK_ON); /* In the latest MAC or switches design, the 10 Mbps loopback is desired to be turned off. */ ns_10_base_t_hdx_loopack(phydev, hdx_loopback_off); return ns_ack_interrupt(phydev); } static struct phy_driver dp83865_driver = { .phy_id = DP83865_PHY_ID, .phy_id_mask = 0xfffffff0, .name = "NatSemi DP83865", .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, .flags = PHY_HAS_INTERRUPT, .config_init = ns_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = ns_ack_interrupt, .config_intr = ns_config_intr, .driver = {.owner = THIS_MODULE,} }; static int __init ns_init(void) { return phy_driver_register(&dp83865_driver); } static void __exit ns_exit(void) { phy_driver_unregister(&dp83865_driver); } MODULE_DESCRIPTION("NatSemi PHY driver"); MODULE_AUTHOR("Stuart Menefy"); MODULE_LICENSE("GPL"); module_init(ns_init); module_exit(ns_exit); static struct mdio_device_id ns_tbl[] = { { DP83865_PHY_ID, 0xfffffff0 }, { } }; MODULE_DEVICE_TABLE(mdio, ns_tbl);
gpl-2.0
h4ck3rm1k3/linux-android-kernel-qemu
drivers/char/generic_nvram.c
889
3407
/* * Generic /dev/nvram driver for architectures providing some * "generic" hooks, that is : * * nvram_read_byte, nvram_write_byte, nvram_sync, nvram_get_size * * Note that an additional hook is supported for PowerMac only * for getting the nvram "partition" informations * */ #define NVRAM_VERSION "1.1" #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/smp_lock.h> #include <asm/uaccess.h> #include <asm/nvram.h> #ifdef CONFIG_PPC_PMAC #include <asm/machdep.h> #endif #define NVRAM_SIZE 8192 static ssize_t nvram_len; static loff_t nvram_llseek(struct file *file, loff_t offset, int origin) { switch (origin) { case 1: offset += file->f_pos; break; case 2: offset += nvram_len; break; } if (offset < 0) return -EINVAL; file->f_pos = offset; return file->f_pos; } static ssize_t read_nvram(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned int i; char __user *p = buf; if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; if (*ppos >= nvram_len) return 0; for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count) if (__put_user(nvram_read_byte(i), p)) return -EFAULT; *ppos = i; return p - buf; } static ssize_t write_nvram(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned int i; const char __user *p = buf; char c; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; if (*ppos >= nvram_len) return 0; for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count) { if (__get_user(c, p)) return -EFAULT; nvram_write_byte(c, i); } *ppos = i; return p - buf; } static int nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch(cmd) { #ifdef CONFIG_PPC_PMAC case OBSOLETE_PMAC_NVRAM_GET_OFFSET: printk(KERN_WARNING "nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n"); case IOC_NVRAM_GET_OFFSET: { int part, offset; if (!machine_is(powermac)) return -EINVAL; if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0) return -EFAULT; if (part < pmac_nvram_OF || part > pmac_nvram_NR) return -EINVAL; offset = pmac_get_partition(part); if (copy_to_user((void __user*)arg, &offset, sizeof(offset)) != 0) return -EFAULT; break; } #endif /* CONFIG_PPC_PMAC */ case IOC_NVRAM_SYNC: nvram_sync(); break; default: return -EINVAL; } return 0; } static long nvram_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; lock_kernel(); ret = nvram_ioctl(file, cmd, arg); unlock_kernel(); return ret; } const struct file_operations nvram_fops = { .owner = THIS_MODULE, .llseek = nvram_llseek, .read = read_nvram, .write = write_nvram, .unlocked_ioctl = nvram_unlocked_ioctl, }; static struct miscdevice nvram_dev = { NVRAM_MINOR, "nvram", &nvram_fops }; int __init nvram_init(void) { int ret = 0; printk(KERN_INFO "Generic non-volatile memory driver v%s\n", NVRAM_VERSION); ret = misc_register(&nvram_dev); if (ret != 0) goto out; nvram_len = nvram_get_size(); if (nvram_len < 0) nvram_len = NVRAM_SIZE; out: return ret; } void __exit nvram_cleanup(void) { misc_deregister( &nvram_dev ); } module_init(nvram_init); module_exit(nvram_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
kannu1994/crespo_kernel
arch/arm/mach-pxa/spitz.c
2425
25415
/* * Support for Sharp SL-Cxx00 Series of PDAs * Models: SL-C3000 (Spitz), SL-C1000 (Akita) and SL-C3100 (Borzoi) * * Copyright (c) 2005 Richard Purdie * * Based on Sharp's 2.4 kernel patches/lubbock.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/gpio_keys.h> #include <linux/gpio.h> #include <linux/leds.h> #include <linux/i2c.h> #include <linux/i2c/pxa-i2c.h> #include <linux/i2c/pca953x.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #include <linux/spi/corgi_lcd.h> #include <linux/spi/pxa2xx_spi.h> #include <linux/mtd/sharpsl.h> #include <linux/mtd/physmap.h> #include <linux/input/matrix_keypad.h> #include <linux/regulator/machine.h> #include <linux/io.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/sharpsl_param.h> #include <asm/hardware/scoop.h> #include <mach/pxa27x.h> #include <mach/pxa27x-udc.h> #include <mach/reset.h> #include <mach/irda.h> #include <mach/mmc.h> #include <mach/ohci.h> #include <mach/pxafb.h> #include <mach/spitz.h> #include <mach/sharpsl_pm.h> #include <mach/smemc.h> #include "generic.h" #include "devices.h" /****************************************************************************** * Pin configuration ******************************************************************************/ static unsigned long spitz_pin_config[] __initdata = { /* Chip Selects */ GPIO78_nCS_2, /* SCOOP #2 */ GPIO79_nCS_3, /* NAND */ GPIO80_nCS_4, /* SCOOP #1 */ /* LCD - 16bpp Active TFT */ GPIOxx_LCD_TFT_16BPP, /* PC Card */ GPIO48_nPOE, GPIO49_nPWE, GPIO50_nPIOR, GPIO51_nPIOW, GPIO85_nPCE_1, GPIO54_nPCE_2, GPIO55_nPREG, GPIO56_nPWAIT, GPIO57_nIOIS16, GPIO104_PSKTSEL, /* I2S */ GPIO28_I2S_BITCLK_OUT, GPIO29_I2S_SDATA_IN, GPIO30_I2S_SDATA_OUT, GPIO31_I2S_SYNC, /* MMC */ GPIO32_MMC_CLK, GPIO112_MMC_CMD, GPIO92_MMC_DAT_0, GPIO109_MMC_DAT_1, GPIO110_MMC_DAT_2, GPIO111_MMC_DAT_3, /* GPIOs */ GPIO9_GPIO, /* SPITZ_GPIO_nSD_DETECT */ GPIO16_GPIO, /* SPITZ_GPIO_SYNC */ GPIO81_GPIO, /* SPITZ_GPIO_nSD_WP */ GPIO41_GPIO, /* SPITZ_GPIO_USB_CONNECT */ GPIO37_GPIO, /* SPITZ_GPIO_USB_HOST */ GPIO35_GPIO, /* SPITZ_GPIO_USB_DEVICE */ GPIO22_GPIO, /* SPITZ_GPIO_HSYNC */ GPIO94_GPIO, /* SPITZ_GPIO_CF_CD */ GPIO105_GPIO, /* SPITZ_GPIO_CF_IRQ */ GPIO106_GPIO, /* SPITZ_GPIO_CF2_IRQ */ /* GPIO matrix keypad */ GPIO88_GPIO, /* column 0 */ GPIO23_GPIO, /* column 1 */ GPIO24_GPIO, /* column 2 */ GPIO25_GPIO, /* column 3 */ GPIO26_GPIO, /* column 4 */ GPIO27_GPIO, /* column 5 */ GPIO52_GPIO, /* column 6 */ GPIO103_GPIO, /* column 7 */ GPIO107_GPIO, /* column 8 */ GPIO108_GPIO, /* column 9 */ GPIO114_GPIO, /* column 10 */ GPIO12_GPIO, /* row 0 */ GPIO17_GPIO, /* row 1 */ GPIO91_GPIO, /* row 2 */ GPIO34_GPIO, /* row 3 */ GPIO36_GPIO, /* row 4 */ GPIO38_GPIO, /* row 5 */ GPIO39_GPIO, /* row 6 */ /* I2C */ GPIO117_I2C_SCL, GPIO118_I2C_SDA, GPIO0_GPIO | WAKEUP_ON_EDGE_RISE, /* SPITZ_GPIO_KEY_INT */ GPIO1_GPIO | WAKEUP_ON_EDGE_FALL, /* SPITZ_GPIO_RESET */ }; /****************************************************************************** * Scoop GPIO expander ******************************************************************************/ #if defined(CONFIG_SHARP_SCOOP) || defined(CONFIG_SHARP_SCOOP_MODULE) /* SCOOP Device #1 */ static struct resource spitz_scoop_1_resources[] = { [0] = { .start = 0x10800000, .end = 0x10800fff, .flags = IORESOURCE_MEM, }, }; static struct scoop_config spitz_scoop_1_setup = { .io_dir = SPITZ_SCP_IO_DIR, .io_out = SPITZ_SCP_IO_OUT, .suspend_clr = SPITZ_SCP_SUS_CLR, .suspend_set = SPITZ_SCP_SUS_SET, .gpio_base = SPITZ_SCP_GPIO_BASE, }; struct platform_device spitz_scoop_1_device = { .name = "sharp-scoop", .id = 0, .dev = { .platform_data = &spitz_scoop_1_setup, }, .num_resources = ARRAY_SIZE(spitz_scoop_1_resources), .resource = spitz_scoop_1_resources, }; /* SCOOP Device #2 */ static struct resource spitz_scoop_2_resources[] = { [0] = { .start = 0x08800040, .end = 0x08800fff, .flags = IORESOURCE_MEM, }, }; static struct scoop_config spitz_scoop_2_setup = { .io_dir = SPITZ_SCP2_IO_DIR, .io_out = SPITZ_SCP2_IO_OUT, .suspend_clr = SPITZ_SCP2_SUS_CLR, .suspend_set = SPITZ_SCP2_SUS_SET, .gpio_base = SPITZ_SCP2_GPIO_BASE, }; struct platform_device spitz_scoop_2_device = { .name = "sharp-scoop", .id = 1, .dev = { .platform_data = &spitz_scoop_2_setup, }, .num_resources = ARRAY_SIZE(spitz_scoop_2_resources), .resource = spitz_scoop_2_resources, }; static void __init spitz_scoop_init(void) { platform_device_register(&spitz_scoop_1_device); /* Akita doesn't have the second SCOOP chip */ if (!machine_is_akita()) platform_device_register(&spitz_scoop_2_device); } /* Power control is shared with between one of the CF slots and SD */ static void spitz_card_pwr_ctrl(uint8_t enable, uint8_t new_cpr) { unsigned short cpr; unsigned long flags; if (new_cpr & 0x7) { gpio_set_value(SPITZ_GPIO_CF_POWER, 1); mdelay(5); } local_irq_save(flags); cpr = read_scoop_reg(&spitz_scoop_1_device.dev, SCOOP_CPR); if (enable & new_cpr) cpr |= new_cpr; else cpr &= ~enable; write_scoop_reg(&spitz_scoop_1_device.dev, SCOOP_CPR, cpr); local_irq_restore(flags); if (!(cpr & 0x7)) { mdelay(1); gpio_set_value(SPITZ_GPIO_CF_POWER, 0); } } #else static inline void spitz_scoop_init(void) {} static inline void spitz_card_pwr_ctrl(uint8_t enable, uint8_t new_cpr) {} #endif /****************************************************************************** * PCMCIA ******************************************************************************/ #if defined(CONFIG_PCMCIA_PXA2XX) || defined(CONFIG_PCMCIA_PXA2XX_MODULE) static void spitz_pcmcia_pwr(struct device *scoop, uint16_t cpr, int nr) { /* Only need to override behaviour for slot 0 */ if (nr == 0) spitz_card_pwr_ctrl( cpr & (SCOOP_CPR_CF_3V | SCOOP_CPR_CF_XV), cpr); else write_scoop_reg(scoop, SCOOP_CPR, cpr); } static struct scoop_pcmcia_dev spitz_pcmcia_scoop[] = { { .dev = &spitz_scoop_1_device.dev, .irq = SPITZ_IRQ_GPIO_CF_IRQ, .cd_irq = SPITZ_IRQ_GPIO_CF_CD, .cd_irq_str = "PCMCIA0 CD", }, { .dev = &spitz_scoop_2_device.dev, .irq = SPITZ_IRQ_GPIO_CF2_IRQ, .cd_irq = -1, }, }; static struct scoop_pcmcia_config spitz_pcmcia_config = { .devs = &spitz_pcmcia_scoop[0], .num_devs = 2, .power_ctrl = spitz_pcmcia_pwr, }; static void __init spitz_pcmcia_init(void) { /* Akita has only one PCMCIA slot used */ if (machine_is_akita()) spitz_pcmcia_config.num_devs = 1; platform_scoop_config = &spitz_pcmcia_config; } #else static inline void spitz_pcmcia_init(void) {} #endif /****************************************************************************** * GPIO keyboard ******************************************************************************/ #if defined(CONFIG_KEYBOARD_MATRIX) || defined(CONFIG_KEYBOARD_MATRIX_MODULE) #define SPITZ_KEY_CALENDAR KEY_F1 #define SPITZ_KEY_ADDRESS KEY_F2 #define SPITZ_KEY_FN KEY_F3 #define SPITZ_KEY_CANCEL KEY_F4 #define SPITZ_KEY_EXOK KEY_F5 #define SPITZ_KEY_EXCANCEL KEY_F6 #define SPITZ_KEY_EXJOGDOWN KEY_F7 #define SPITZ_KEY_EXJOGUP KEY_F8 #define SPITZ_KEY_JAP1 KEY_LEFTALT #define SPITZ_KEY_JAP2 KEY_RIGHTCTRL #define SPITZ_KEY_SYNC KEY_F9 #define SPITZ_KEY_MAIL KEY_F10 #define SPITZ_KEY_OK KEY_F11 #define SPITZ_KEY_MENU KEY_F12 static const uint32_t spitz_keymap[] = { KEY(0, 0, KEY_LEFTCTRL), KEY(0, 1, KEY_1), KEY(0, 2, KEY_3), KEY(0, 3, KEY_5), KEY(0, 4, KEY_6), KEY(0, 5, KEY_7), KEY(0, 6, KEY_9), KEY(0, 7, KEY_0), KEY(0, 8, KEY_BACKSPACE), KEY(0, 9, SPITZ_KEY_EXOK), /* EXOK */ KEY(0, 10, SPITZ_KEY_EXCANCEL), /* EXCANCEL */ KEY(1, 1, KEY_2), KEY(1, 2, KEY_4), KEY(1, 3, KEY_R), KEY(1, 4, KEY_Y), KEY(1, 5, KEY_8), KEY(1, 6, KEY_I), KEY(1, 7, KEY_O), KEY(1, 8, KEY_P), KEY(1, 9, SPITZ_KEY_EXJOGDOWN), /* EXJOGDOWN */ KEY(1, 10, SPITZ_KEY_EXJOGUP), /* EXJOGUP */ KEY(2, 0, KEY_TAB), KEY(2, 1, KEY_Q), KEY(2, 2, KEY_E), KEY(2, 3, KEY_T), KEY(2, 4, KEY_G), KEY(2, 5, KEY_U), KEY(2, 6, KEY_J), KEY(2, 7, KEY_K), KEY(3, 0, SPITZ_KEY_ADDRESS), /* ADDRESS */ KEY(3, 1, KEY_W), KEY(3, 2, KEY_S), KEY(3, 3, KEY_F), KEY(3, 4, KEY_V), KEY(3, 5, KEY_H), KEY(3, 6, KEY_M), KEY(3, 7, KEY_L), KEY(3, 9, KEY_RIGHTSHIFT), KEY(4, 0, SPITZ_KEY_CALENDAR), /* CALENDAR */ KEY(4, 1, KEY_A), KEY(4, 2, KEY_D), KEY(4, 3, KEY_C), KEY(4, 4, KEY_B), KEY(4, 5, KEY_N), KEY(4, 6, KEY_DOT), KEY(4, 8, KEY_ENTER), KEY(4, 9, KEY_LEFTSHIFT), KEY(5, 0, SPITZ_KEY_MAIL), /* MAIL */ KEY(5, 1, KEY_Z), KEY(5, 2, KEY_X), KEY(5, 3, KEY_MINUS), KEY(5, 4, KEY_SPACE), KEY(5, 5, KEY_COMMA), KEY(5, 7, KEY_UP), KEY(5, 10, SPITZ_KEY_FN), /* FN */ KEY(6, 0, KEY_SYSRQ), KEY(6, 1, SPITZ_KEY_JAP1), /* JAP1 */ KEY(6, 2, SPITZ_KEY_JAP2), /* JAP2 */ KEY(6, 3, SPITZ_KEY_CANCEL), /* CANCEL */ KEY(6, 4, SPITZ_KEY_OK), /* OK */ KEY(6, 5, SPITZ_KEY_MENU), /* MENU */ KEY(6, 6, KEY_LEFT), KEY(6, 7, KEY_DOWN), KEY(6, 8, KEY_RIGHT), }; static const struct matrix_keymap_data spitz_keymap_data = { .keymap = spitz_keymap, .keymap_size = ARRAY_SIZE(spitz_keymap), }; static const uint32_t spitz_row_gpios[] = { 12, 17, 91, 34, 36, 38, 39 }; static const uint32_t spitz_col_gpios[] = { 88, 23, 24, 25, 26, 27, 52, 103, 107, 108, 114 }; static struct matrix_keypad_platform_data spitz_mkp_pdata = { .keymap_data = &spitz_keymap_data, .row_gpios = spitz_row_gpios, .col_gpios = spitz_col_gpios, .num_row_gpios = ARRAY_SIZE(spitz_row_gpios), .num_col_gpios = ARRAY_SIZE(spitz_col_gpios), .col_scan_delay_us = 10, .debounce_ms = 10, .wakeup = 1, }; static struct platform_device spitz_mkp_device = { .name = "matrix-keypad", .id = -1, .dev = { .platform_data = &spitz_mkp_pdata, }, }; static void __init spitz_mkp_init(void) { platform_device_register(&spitz_mkp_device); } #else static inline void spitz_mkp_init(void) {} #endif /****************************************************************************** * GPIO keys ******************************************************************************/ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) static struct gpio_keys_button spitz_gpio_keys[] = { { .type = EV_PWR, .code = KEY_SUSPEND, .gpio = SPITZ_GPIO_ON_KEY, .desc = "On Off", .wakeup = 1, }, /* Two buttons detecting the lid state */ { .type = EV_SW, .code = 0, .gpio = SPITZ_GPIO_SWA, .desc = "Display Down", }, { .type = EV_SW, .code = 1, .gpio = SPITZ_GPIO_SWB, .desc = "Lid Closed", }, }; static struct gpio_keys_platform_data spitz_gpio_keys_platform_data = { .buttons = spitz_gpio_keys, .nbuttons = ARRAY_SIZE(spitz_gpio_keys), }; static struct platform_device spitz_gpio_keys_device = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &spitz_gpio_keys_platform_data, }, }; static void __init spitz_keys_init(void) { platform_device_register(&spitz_gpio_keys_device); } #else static inline void spitz_keys_init(void) {} #endif /****************************************************************************** * LEDs ******************************************************************************/ #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) static struct gpio_led spitz_gpio_leds[] = { { .name = "spitz:amber:charge", .default_trigger = "sharpsl-charge", .gpio = SPITZ_GPIO_LED_ORANGE, }, { .name = "spitz:green:hddactivity", .default_trigger = "ide-disk", .gpio = SPITZ_GPIO_LED_GREEN, }, }; static struct gpio_led_platform_data spitz_gpio_leds_info = { .leds = spitz_gpio_leds, .num_leds = ARRAY_SIZE(spitz_gpio_leds), }; static struct platform_device spitz_led_device = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &spitz_gpio_leds_info, }, }; static void __init spitz_leds_init(void) { platform_device_register(&spitz_led_device); } #else static inline void spitz_leds_init(void) {} #endif /****************************************************************************** * SSP Devices ******************************************************************************/ #if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE) static void spitz_ads7846_wait_for_hsync(void) { while (gpio_get_value(SPITZ_GPIO_HSYNC)) cpu_relax(); while (!gpio_get_value(SPITZ_GPIO_HSYNC)) cpu_relax(); } static struct ads7846_platform_data spitz_ads7846_info = { .model = 7846, .vref_delay_usecs = 100, .x_plate_ohms = 419, .y_plate_ohms = 486, .pressure_max = 1024, .gpio_pendown = SPITZ_GPIO_TP_INT, .wait_for_sync = spitz_ads7846_wait_for_hsync, }; static struct pxa2xx_spi_chip spitz_ads7846_chip = { .gpio_cs = SPITZ_GPIO_ADS7846_CS, }; static void spitz_bl_kick_battery(void) { void (*kick_batt)(void); kick_batt = symbol_get(sharpsl_battery_kick); if (kick_batt) { kick_batt(); symbol_put(sharpsl_battery_kick); } } static struct corgi_lcd_platform_data spitz_lcdcon_info = { .init_mode = CORGI_LCD_MODE_VGA, .max_intensity = 0x2f, .default_intensity = 0x1f, .limit_mask = 0x0b, .gpio_backlight_cont = SPITZ_GPIO_BACKLIGHT_CONT, .gpio_backlight_on = SPITZ_GPIO_BACKLIGHT_ON, .kick_battery = spitz_bl_kick_battery, }; static struct pxa2xx_spi_chip spitz_lcdcon_chip = { .gpio_cs = SPITZ_GPIO_LCDCON_CS, }; static struct pxa2xx_spi_chip spitz_max1111_chip = { .gpio_cs = SPITZ_GPIO_MAX1111_CS, }; static struct spi_board_info spitz_spi_devices[] = { { .modalias = "ads7846", .max_speed_hz = 1200000, .bus_num = 2, .chip_select = 0, .platform_data = &spitz_ads7846_info, .controller_data = &spitz_ads7846_chip, .irq = gpio_to_irq(SPITZ_GPIO_TP_INT), }, { .modalias = "corgi-lcd", .max_speed_hz = 50000, .bus_num = 2, .chip_select = 1, .platform_data = &spitz_lcdcon_info, .controller_data = &spitz_lcdcon_chip, }, { .modalias = "max1111", .max_speed_hz = 450000, .bus_num = 2, .chip_select = 2, .controller_data = &spitz_max1111_chip, }, }; static struct pxa2xx_spi_master spitz_spi_info = { .num_chipselect = 3, }; static void __init spitz_spi_init(void) { struct corgi_lcd_platform_data *lcd_data = &spitz_lcdcon_info; if (machine_is_akita()) { lcd_data->gpio_backlight_cont = AKITA_GPIO_BACKLIGHT_CONT; lcd_data->gpio_backlight_on = AKITA_GPIO_BACKLIGHT_ON; } pxa2xx_set_spi_info(2, &spitz_spi_info); spi_register_board_info(ARRAY_AND_SIZE(spitz_spi_devices)); } #else static inline void spitz_spi_init(void) {} #endif /****************************************************************************** * SD/MMC card controller ******************************************************************************/ #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) /* * NOTE: The card detect interrupt isn't debounced so we delay it by 250ms to * give the card a chance to fully insert/eject. */ static void spitz_mci_setpower(struct device *dev, unsigned int vdd) { struct pxamci_platform_data* p_d = dev->platform_data; if ((1 << vdd) & p_d->ocr_mask) spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, SCOOP_CPR_SD_3V); else spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, 0x0); } static struct pxamci_platform_data spitz_mci_platform_data = { .detect_delay_ms = 250, .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .setpower = spitz_mci_setpower, .gpio_card_detect = SPITZ_GPIO_nSD_DETECT, .gpio_card_ro = SPITZ_GPIO_nSD_WP, .gpio_power = -1, }; static void __init spitz_mmc_init(void) { pxa_set_mci_info(&spitz_mci_platform_data); } #else static inline void spitz_mmc_init(void) {} #endif /****************************************************************************** * USB Host ******************************************************************************/ #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) static int spitz_ohci_init(struct device *dev) { int err; err = gpio_request(SPITZ_GPIO_USB_HOST, "USB_HOST"); if (err) return err; /* Only Port 2 is connected, setup USB Port 2 Output Control Register */ UP2OCR = UP2OCR_HXS | UP2OCR_HXOE | UP2OCR_DPPDE | UP2OCR_DMPDE; return gpio_direction_output(SPITZ_GPIO_USB_HOST, 1); } static void spitz_ohci_exit(struct device *dev) { gpio_free(SPITZ_GPIO_USB_HOST); } static struct pxaohci_platform_data spitz_ohci_platform_data = { .port_mode = PMM_NPS_MODE, .init = spitz_ohci_init, .exit = spitz_ohci_exit, .flags = ENABLE_PORT_ALL | NO_OC_PROTECTION, .power_budget = 150, }; static void __init spitz_uhc_init(void) { pxa_set_ohci_info(&spitz_ohci_platform_data); } #else static inline void spitz_uhc_init(void) {} #endif /****************************************************************************** * IrDA ******************************************************************************/ #if defined(CONFIG_PXA_FICP) || defined(CONFIG_PXA_FICP_MODULE) static struct pxaficp_platform_data spitz_ficp_platform_data = { .transceiver_cap = IR_SIRMODE | IR_OFF, }; static void __init spitz_irda_init(void) { if (machine_is_akita()) spitz_ficp_platform_data.gpio_pwdown = AKITA_GPIO_IR_ON; else spitz_ficp_platform_data.gpio_pwdown = SPITZ_GPIO_IR_ON; pxa_set_ficp_info(&spitz_ficp_platform_data); } #else static inline void spitz_irda_init(void) {} #endif /****************************************************************************** * Framebuffer ******************************************************************************/ #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) static struct pxafb_mode_info spitz_pxafb_modes[] = { { .pixclock = 19231, .xres = 480, .yres = 640, .bpp = 16, .hsync_len = 40, .left_margin = 46, .right_margin = 125, .vsync_len = 3, .upper_margin = 1, .lower_margin = 0, .sync = 0, }, { .pixclock = 134617, .xres = 240, .yres = 320, .bpp = 16, .hsync_len = 20, .left_margin = 20, .right_margin = 46, .vsync_len = 2, .upper_margin = 1, .lower_margin = 0, .sync = 0, }, }; static struct pxafb_mach_info spitz_pxafb_info = { .modes = spitz_pxafb_modes, .num_modes = ARRAY_SIZE(spitz_pxafb_modes), .fixed_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_ALTERNATE_MAPPING, }; static void __init spitz_lcd_init(void) { pxa_set_fb_info(NULL, &spitz_pxafb_info); } #else static inline void spitz_lcd_init(void) {} #endif /****************************************************************************** * Framebuffer ******************************************************************************/ #if defined(CONFIG_MTD_NAND_SHARPSL) || defined(CONFIG_MTD_NAND_SHARPSL_MODULE) static struct mtd_partition spitz_nand_partitions[] = { { .name = "System Area", .offset = 0, .size = 7 * 1024 * 1024, }, { .name = "Root Filesystem", .offset = 7 * 1024 * 1024, }, { .name = "Home Filesystem", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; static struct nand_bbt_descr spitz_nand_bbt = { .options = 0, .offs = 4, .len = 2, .pattern = scan_ff_pattern }; static struct nand_ecclayout akita_oobinfo = { .oobfree = { {0x08, 0x09} }, .eccbytes = 24, .eccpos = { 0x05, 0x01, 0x02, 0x03, 0x06, 0x07, 0x15, 0x11, 0x12, 0x13, 0x16, 0x17, 0x25, 0x21, 0x22, 0x23, 0x26, 0x27, 0x35, 0x31, 0x32, 0x33, 0x36, 0x37, }, }; static struct sharpsl_nand_platform_data spitz_nand_pdata = { .badblock_pattern = &spitz_nand_bbt, .partitions = spitz_nand_partitions, .nr_partitions = ARRAY_SIZE(spitz_nand_partitions), }; static struct resource spitz_nand_resources[] = { { .start = PXA_CS3_PHYS, .end = PXA_CS3_PHYS + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device spitz_nand_device = { .name = "sharpsl-nand", .id = -1, .resource = spitz_nand_resources, .num_resources = ARRAY_SIZE(spitz_nand_resources), .dev = { .platform_data = &spitz_nand_pdata, } }; static void __init spitz_nand_init(void) { if (machine_is_spitz()) { spitz_nand_partitions[1].size = 5 * 1024 * 1024; } else if (machine_is_akita()) { spitz_nand_partitions[1].size = 58 * 1024 * 1024; spitz_nand_bbt.len = 1; spitz_nand_pdata.ecc_layout = &akita_oobinfo; } else if (machine_is_borzoi()) { spitz_nand_partitions[1].size = 32 * 1024 * 1024; spitz_nand_bbt.len = 1; spitz_nand_pdata.ecc_layout = &akita_oobinfo; } platform_device_register(&spitz_nand_device); } #else static inline void spitz_nand_init(void) {} #endif /****************************************************************************** * NOR Flash ******************************************************************************/ #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct mtd_partition spitz_rom_parts[] = { { .name ="Boot PROM Filesystem", .offset = 0x00140000, .size = MTDPART_SIZ_FULL, }, }; static struct physmap_flash_data spitz_rom_data = { .width = 2, .nr_parts = ARRAY_SIZE(spitz_rom_parts), .parts = spitz_rom_parts, }; static struct resource spitz_rom_resources[] = { { .start = PXA_CS0_PHYS, .end = PXA_CS0_PHYS + SZ_8M - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device spitz_rom_device = { .name = "physmap-flash", .id = -1, .resource = spitz_rom_resources, .num_resources = ARRAY_SIZE(spitz_rom_resources), .dev = { .platform_data = &spitz_rom_data, }, }; static void __init spitz_nor_init(void) { platform_device_register(&spitz_rom_device); } #else static inline void spitz_nor_init(void) {} #endif /****************************************************************************** * GPIO expander ******************************************************************************/ #if defined(CONFIG_I2C_PXA) || defined(CONFIG_I2C_PXA_MODULE) static struct pca953x_platform_data akita_pca953x_pdata = { .gpio_base = AKITA_IOEXP_GPIO_BASE, }; static struct i2c_board_info spitz_i2c_devs[] = { { .type = "wm8750", .addr = 0x1b, }, { .type = "max7310", .addr = 0x18, .platform_data = &akita_pca953x_pdata, }, }; static struct regulator_consumer_supply isl6271a_consumers[] = { { .supply = "vcc_core", } }; static struct regulator_init_data isl6271a_info[] = { { .constraints = { .name = "vcc_core range", .min_uV = 850000, .max_uV = 1600000, .always_on = 1, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, }, .consumer_supplies = isl6271a_consumers, .num_consumer_supplies = ARRAY_SIZE(isl6271a_consumers), } }; static struct i2c_board_info spitz_pi2c_devs[] = { { .type = "isl6271a", .addr = 0x0c, .platform_data = &isl6271a_info, }, }; static void __init spitz_i2c_init(void) { int size = ARRAY_SIZE(spitz_i2c_devs); /* Only Akita has the max7310 chip */ if (!machine_is_akita()) size--; pxa_set_i2c_info(NULL); pxa27x_set_i2c_power_info(NULL); i2c_register_board_info(0, spitz_i2c_devs, size); i2c_register_board_info(1, ARRAY_AND_SIZE(spitz_pi2c_devs)); } #else static inline void spitz_i2c_init(void) {} #endif /****************************************************************************** * Machine init ******************************************************************************/ static void spitz_poweroff(void) { arm_machine_restart('g', NULL); } static void spitz_restart(char mode, const char *cmd) { uint32_t msc0 = __raw_readl(MSC0); /* Bootloader magic for a reboot */ if ((msc0 & 0xffff0000) == 0x7ff00000) __raw_writel((msc0 & 0xffff) | 0x7ee00000, MSC0); spitz_poweroff(); } static void __init spitz_init(void) { init_gpio_reset(SPITZ_GPIO_ON_RESET, 1, 0); pm_power_off = spitz_poweroff; arm_pm_restart = spitz_restart; PMCR = 0x00; /* Stop 3.6MHz and drive HIGH to PCMCIA and CS */ PCFR |= PCFR_OPDE; pxa2xx_mfp_config(ARRAY_AND_SIZE(spitz_pin_config)); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); spitz_spi_init(); spitz_scoop_init(); spitz_mkp_init(); spitz_keys_init(); spitz_leds_init(); spitz_mmc_init(); spitz_pcmcia_init(); spitz_irda_init(); spitz_uhc_init(); spitz_lcd_init(); spitz_nor_init(); spitz_nand_init(); spitz_i2c_init(); } static void __init spitz_fixup(struct machine_desc *desc, struct tag *tags, char **cmdline, struct meminfo *mi) { sharpsl_save_param(); mi->nr_banks = 1; mi->bank[0].start = 0xa0000000; mi->bank[0].size = (64*1024*1024); } #ifdef CONFIG_MACH_SPITZ MACHINE_START(SPITZ, "SHARP Spitz") .fixup = spitz_fixup, .map_io = pxa27x_map_io, .init_irq = pxa27x_init_irq, .init_machine = spitz_init, .timer = &pxa_timer, MACHINE_END #endif #ifdef CONFIG_MACH_BORZOI MACHINE_START(BORZOI, "SHARP Borzoi") .fixup = spitz_fixup, .map_io = pxa27x_map_io, .init_irq = pxa27x_init_irq, .init_machine = spitz_init, .timer = &pxa_timer, MACHINE_END #endif #ifdef CONFIG_MACH_AKITA MACHINE_START(AKITA, "SHARP Akita") .fixup = spitz_fixup, .map_io = pxa27x_map_io, .init_irq = pxa27x_init_irq, .init_machine = spitz_init, .timer = &pxa_timer, MACHINE_END #endif
gpl-2.0
pakohan/syso-kernel
linux-3.4.68/arch/alpha/mm/fault.c
3961
5797
/* * linux/arch/alpha/mm/fault.c * * Copyright (C) 1995 Linus Torvalds */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <asm/io.h> #define __EXTERN_INLINE inline #include <asm/mmu_context.h> #include <asm/tlbflush.h> #undef __EXTERN_INLINE #include <linux/signal.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/module.h> #include <asm/uaccess.h> extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *); /* * Force a new ASN for a task. */ #ifndef CONFIG_SMP unsigned long last_asn = ASN_FIRST_VERSION; #endif void __load_new_mm_context(struct mm_struct *next_mm) { unsigned long mmc; struct pcb_struct *pcb; mmc = __get_new_mm_context(next_mm, smp_processor_id()); next_mm->context[smp_processor_id()] = mmc; pcb = &current_thread_info()->pcb; pcb->asn = mmc & HARDWARE_ASN_MASK; pcb->ptbr = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; __reload_thread(pcb); } /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to handle_mm_fault(). * * mmcsr: * 0 = translation not valid * 1 = access violation * 2 = fault-on-read * 3 = fault-on-execute * 4 = fault-on-write * * cause: * -1 = instruction fetch * 0 = load * 1 = store * * Registers $9 through $15 are saved in a block just prior to `regs' and * are saved and restored around the call to allow exception code to * modify them. */ /* Macro for exception fixup code to access integer registers. */ #define dpf_reg(r) \ (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ (r) <= 18 ? (r)+8 : (r)-10]) asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr, long cause, struct pt_regs *regs) { struct vm_area_struct * vma; struct mm_struct *mm = current->mm; const struct exception_table_entry *fixup; int fault, si_code = SEGV_MAPERR; siginfo_t info; /* As of EV6, a load into $31/$f31 is a prefetch, and never faults (or is suppressed by the PALcode). Support that for older CPUs by ignoring such an instruction. */ if (cause == 0) { unsigned int insn; __get_user(insn, (unsigned int __user *)regs->pc); if ((insn >> 21 & 0x1f) == 0x1f && /* ldq ldl ldt lds ldg ldf ldwu ldbu */ (1ul << (insn >> 26) & 0x30f00001400ul)) { regs->pc += 4; return; } } /* If we're in an interrupt context, or have no user context, we must not take the fault. */ if (!mm || in_atomic()) goto no_context; #ifdef CONFIG_ALPHA_LARGE_VMALLOC if (address >= TASK_SIZE) goto vmalloc_fault; #endif down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* Ok, we have a good vm_area for this memory access, so we can handle it. */ good_area: si_code = SEGV_ACCERR; if (cause < 0) { if (!(vma->vm_flags & VM_EXEC)) goto bad_area; } else if (!cause) { /* Allow reads even for write-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_WRITE))) goto bad_area; } else { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } /* If for any reason at all we couldn't handle the fault, make sure we exit gracefully rather than endlessly redo the fault. */ fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0); up_read(&mm->mmap_sem); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (fault & VM_FAULT_MAJOR) current->maj_flt++; else current->min_flt++; return; /* Something tried to access memory that isn't in our memory map. Fix it, but check if it's kernel or user first. */ bad_area: up_read(&mm->mmap_sem); if (user_mode(regs)) goto do_sigsegv; no_context: /* Are we prepared to handle this fault as an exception? */ if ((fixup = search_exception_tables(regs->pc)) != 0) { unsigned long newpc; newpc = fixup_exception(dpf_reg, fixup, regs->pc); regs->pc = newpc; return; } /* Oops. The kernel tried to access some bad page. We'll have to terminate things with extreme prejudice. */ printk(KERN_ALERT "Unable to handle kernel paging request at " "virtual address %016lx\n", address); die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16); do_exit(SIGKILL); /* We ran out of memory, or some other thing happened to us that made us unable to handle the page fault gracefully. */ out_of_memory: if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: /* Send a sigbus, regardless of whether we were in kernel or user mode. */ info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; info.si_addr = (void __user *) address; force_sig_info(SIGBUS, &info, current); if (!user_mode(regs)) goto no_context; return; do_sigsegv: info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = si_code; info.si_addr = (void __user *) address; force_sig_info(SIGSEGV, &info, current); return; #ifdef CONFIG_ALPHA_LARGE_VMALLOC vmalloc_fault: if (user_mode(regs)) goto do_sigsegv; else { /* Synchronize this task's top level page-table with the "reference" page table from init. */ long index = pgd_index(address); pgd_t *pgd, *pgd_k; pgd = current->active_mm->pgd + index; pgd_k = swapper_pg_dir + index; if (!pgd_present(*pgd) && pgd_present(*pgd_k)) { pgd_val(*pgd) = pgd_val(*pgd_k); return; } goto no_context; } #endif }
gpl-2.0
bigzz/linux-btrfs
arch/arm/mach-sa1100/shannon.c
4473
2486
/* * linux/arch/arm/mach-sa1100/shannon.c */ #include <linux/init.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/platform_data/sa11x0-serial.h> #include <linux/tty.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <video/sa1100fb.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/setup.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #include <asm/mach/map.h> #include <linux/platform_data/mfd-mcp-sa11x0.h> #include <mach/shannon.h> #include <mach/irqs.h> #include "generic.h" static struct mtd_partition shannon_partitions[] = { { .name = "BLOB boot loader", .offset = 0, .size = 0x20000 }, { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = 0xe0000 }, { .name = "initrd", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL } }; static struct flash_platform_data shannon_flash_data = { .map_name = "cfi_probe", .parts = shannon_partitions, .nr_parts = ARRAY_SIZE(shannon_partitions), }; static struct resource shannon_flash_resource = DEFINE_RES_MEM(SA1100_CS0_PHYS, SZ_4M); static struct mcp_plat_data shannon_mcp_data = { .mccr0 = MCCR0_ADM, .sclk_rate = 11981000, }; static struct sa1100fb_mach_info shannon_lcd_info = { .pixclock = 152500, .bpp = 8, .xres = 640, .yres = 480, .hsync_len = 4, .vsync_len = 3, .left_margin = 2, .upper_margin = 0, .right_margin = 1, .lower_margin = 0, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .lccr0 = LCCR0_Color | LCCR0_Dual | LCCR0_Pas, .lccr3 = LCCR3_ACBsDiv(512), }; static void __init shannon_init(void) { sa11x0_ppc_configure_mcp(); sa11x0_register_lcd(&shannon_lcd_info); sa11x0_register_mtd(&shannon_flash_data, &shannon_flash_resource, 1); sa11x0_register_mcp(&shannon_mcp_data); } static void __init shannon_map_io(void) { sa1100_map_io(); sa1100_register_uart(0, 3); sa1100_register_uart(1, 1); Ser1SDCR0 |= SDCR0_SUS; GAFR |= (GPIO_UART_TXD | GPIO_UART_RXD); GPDR |= GPIO_UART_TXD | SHANNON_GPIO_CODEC_RESET; GPDR &= ~GPIO_UART_RXD; PPAR |= PPAR_UPR; /* reset the codec */ GPCR = SHANNON_GPIO_CODEC_RESET; GPSR = SHANNON_GPIO_CODEC_RESET; } MACHINE_START(SHANNON, "Shannon (AKA: Tuxscreen)") .atag_offset = 0x100, .map_io = shannon_map_io, .nr_irqs = SA1100_NR_IRQS, .init_irq = sa1100_init_irq, .init_time = sa1100_timer_init, .init_machine = shannon_init, .init_late = sa11x0_init_late, .restart = sa11x0_restart, MACHINE_END
gpl-2.0
AndroPlus-org/android_kernel_sony_msm8974ac
arch/arm/mach-omap2/board-omap3beagle.c
4729
14649
/* * linux/arch/arm/mach-omap2/board-omap3beagle.c * * Copyright (C) 2008 Texas Instruments * * Modified from mach-omap2/board-3430sdp.c * * Initial code: Syed Mohammed Khasim * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/leds.h> #include <linux/gpio.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <linux/opp.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/nand.h> #include <linux/mmc/host.h> #include <linux/regulator/machine.h> #include <linux/i2c/twl.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/flash.h> #include <plat/board.h> #include "common.h" #include <video/omapdss.h> #include <video/omap-panel-dvi.h> #include <plat/gpmc.h> #include <plat/nand.h> #include <plat/usb.h> #include <plat/omap_device.h> #include "mux.h" #include "hsmmc.h" #include "pm.h" #include "common-board-devices.h" /* * OMAP3 Beagle revision * Run time detection of Beagle revision is done by reading GPIO. * GPIO ID - * AXBX = GPIO173, GPIO172, GPIO171: 1 1 1 * C1_3 = GPIO173, GPIO172, GPIO171: 1 1 0 * C4 = GPIO173, GPIO172, GPIO171: 1 0 1 * XMA/XMB = GPIO173, GPIO172, GPIO171: 0 0 0 * XMC = GPIO173, GPIO172, GPIO171: 0 1 0 */ enum { OMAP3BEAGLE_BOARD_UNKN = 0, OMAP3BEAGLE_BOARD_AXBX, OMAP3BEAGLE_BOARD_C1_3, OMAP3BEAGLE_BOARD_C4, OMAP3BEAGLE_BOARD_XM, OMAP3BEAGLE_BOARD_XMC, }; static u8 omap3_beagle_version; /* * Board-specific configuration * Defaults to BeagleBoard-xMC */ static struct { int mmc1_gpio_wp; int usb_pwr_level; int reset_gpio; int usr_button_gpio; } beagle_config = { .mmc1_gpio_wp = -EINVAL, .usb_pwr_level = GPIOF_OUT_INIT_LOW, .reset_gpio = 129, .usr_button_gpio = 4, }; static struct gpio omap3_beagle_rev_gpios[] __initdata = { { 171, GPIOF_IN, "rev_id_0" }, { 172, GPIOF_IN, "rev_id_1" }, { 173, GPIOF_IN, "rev_id_2" }, }; static void __init omap3_beagle_init_rev(void) { int ret; u16 beagle_rev = 0; omap_mux_init_gpio(171, OMAP_PIN_INPUT_PULLUP); omap_mux_init_gpio(172, OMAP_PIN_INPUT_PULLUP); omap_mux_init_gpio(173, OMAP_PIN_INPUT_PULLUP); ret = gpio_request_array(omap3_beagle_rev_gpios, ARRAY_SIZE(omap3_beagle_rev_gpios)); if (ret < 0) { printk(KERN_ERR "Unable to get revision detection GPIO pins\n"); omap3_beagle_version = OMAP3BEAGLE_BOARD_UNKN; return; } beagle_rev = gpio_get_value(171) | (gpio_get_value(172) << 1) | (gpio_get_value(173) << 2); gpio_free_array(omap3_beagle_rev_gpios, ARRAY_SIZE(omap3_beagle_rev_gpios)); switch (beagle_rev) { case 7: printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n"); omap3_beagle_version = OMAP3BEAGLE_BOARD_AXBX; beagle_config.mmc1_gpio_wp = 29; beagle_config.reset_gpio = 170; beagle_config.usr_button_gpio = 7; break; case 6: printk(KERN_INFO "OMAP3 Beagle Rev: C1/C2/C3\n"); omap3_beagle_version = OMAP3BEAGLE_BOARD_C1_3; beagle_config.mmc1_gpio_wp = 23; beagle_config.reset_gpio = 170; beagle_config.usr_button_gpio = 7; break; case 5: printk(KERN_INFO "OMAP3 Beagle Rev: C4\n"); omap3_beagle_version = OMAP3BEAGLE_BOARD_C4; beagle_config.mmc1_gpio_wp = 23; beagle_config.reset_gpio = 170; beagle_config.usr_button_gpio = 7; break; case 0: printk(KERN_INFO "OMAP3 Beagle Rev: xM Ax/Bx\n"); omap3_beagle_version = OMAP3BEAGLE_BOARD_XM; beagle_config.usb_pwr_level = GPIOF_OUT_INIT_HIGH; break; case 2: printk(KERN_INFO "OMAP3 Beagle Rev: xM C\n"); omap3_beagle_version = OMAP3BEAGLE_BOARD_XMC; break; default: printk(KERN_INFO "OMAP3 Beagle Rev: unknown %hd\n", beagle_rev); omap3_beagle_version = OMAP3BEAGLE_BOARD_UNKN; } } static struct mtd_partition omap3beagle_nand_partitions[] = { /* All the partition sizes are listed in terms of NAND block size */ { .name = "X-Loader", .offset = 0, .size = 4 * NAND_BLOCK_SIZE, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "U-Boot", .offset = MTDPART_OFS_APPEND, /* Offset = 0x80000 */ .size = 15 * NAND_BLOCK_SIZE, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "U-Boot Env", .offset = MTDPART_OFS_APPEND, /* Offset = 0x260000 */ .size = 1 * NAND_BLOCK_SIZE, }, { .name = "Kernel", .offset = MTDPART_OFS_APPEND, /* Offset = 0x280000 */ .size = 32 * NAND_BLOCK_SIZE, }, { .name = "File System", .offset = MTDPART_OFS_APPEND, /* Offset = 0x680000 */ .size = MTDPART_SIZ_FULL, }, }; /* DSS */ static int beagle_enable_dvi(struct omap_dss_device *dssdev) { if (gpio_is_valid(dssdev->reset_gpio)) gpio_set_value(dssdev->reset_gpio, 1); return 0; } static void beagle_disable_dvi(struct omap_dss_device *dssdev) { if (gpio_is_valid(dssdev->reset_gpio)) gpio_set_value(dssdev->reset_gpio, 0); } static struct panel_dvi_platform_data dvi_panel = { .platform_enable = beagle_enable_dvi, .platform_disable = beagle_disable_dvi, .i2c_bus_num = 3, }; static struct omap_dss_device beagle_dvi_device = { .type = OMAP_DISPLAY_TYPE_DPI, .name = "dvi", .driver_name = "dvi", .data = &dvi_panel, .phy.dpi.data_lines = 24, .reset_gpio = -EINVAL, }; static struct omap_dss_device beagle_tv_device = { .name = "tv", .driver_name = "venc", .type = OMAP_DISPLAY_TYPE_VENC, .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO, }; static struct omap_dss_device *beagle_dss_devices[] = { &beagle_dvi_device, &beagle_tv_device, }; static struct omap_dss_board_info beagle_dss_data = { .num_devices = ARRAY_SIZE(beagle_dss_devices), .devices = beagle_dss_devices, .default_device = &beagle_dvi_device, }; static void __init beagle_display_init(void) { int r; r = gpio_request_one(beagle_dvi_device.reset_gpio, GPIOF_OUT_INIT_LOW, "DVI reset"); if (r < 0) printk(KERN_ERR "Unable to get DVI reset GPIO\n"); } #include "sdram-micron-mt46h32m32lf-6.h" static struct omap2_hsmmc_info mmc[] = { { .mmc = 1, .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA, .gpio_wp = -EINVAL, .deferred = true, }, {} /* Terminator */ }; static struct regulator_consumer_supply beagle_vmmc1_supply[] = { REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"), }; static struct regulator_consumer_supply beagle_vsim_supply[] = { REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"), }; static struct gpio_led gpio_leds[]; static int beagle_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) { int r; mmc[0].gpio_wp = beagle_config.mmc1_gpio_wp; /* gpio + 0 is "mmc0_cd" (input/IRQ) */ mmc[0].gpio_cd = gpio + 0; omap_hsmmc_late_init(mmc); /* * TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, XM active * high / others active low) * DVI reset GPIO is different between beagle revisions */ /* Valid for all -xM revisions */ if (cpu_is_omap3630()) { /* * gpio + 1 on Xm controls the TFP410's enable line (active low) * gpio + 2 control varies depending on the board rev as below: * P7/P8 revisions(prototype): Camera EN * A2+ revisions (production): LDO (DVI, serial, led blocks) */ r = gpio_request_one(gpio + 1, GPIOF_OUT_INIT_LOW, "nDVI_PWR_EN"); if (r) pr_err("%s: unable to configure nDVI_PWR_EN\n", __func__); r = gpio_request_one(gpio + 2, GPIOF_OUT_INIT_HIGH, "DVI_LDO_EN"); if (r) pr_err("%s: unable to configure DVI_LDO_EN\n", __func__); } else { /* * REVISIT: need ehci-omap hooks for external VBUS * power switch and overcurrent detect */ if (gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC")) pr_err("%s: unable to configure EHCI_nOC\n", __func__); } beagle_dvi_device.reset_gpio = beagle_config.reset_gpio; gpio_request_one(gpio + TWL4030_GPIO_MAX, beagle_config.usb_pwr_level, "nEN_USB_PWR"); /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */ gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; return 0; } static struct twl4030_gpio_platform_data beagle_gpio_data = { .gpio_base = OMAP_MAX_GPIO_LINES, .irq_base = TWL4030_GPIO_IRQ_BASE, .irq_end = TWL4030_GPIO_IRQ_END, .use_leds = true, .pullups = BIT(1), .pulldowns = BIT(2) | BIT(6) | BIT(7) | BIT(8) | BIT(13) | BIT(15) | BIT(16) | BIT(17), .setup = beagle_twl_gpio_setup, }; /* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */ static struct regulator_init_data beagle_vmmc1 = { .constraints = { .min_uV = 1850000, .max_uV = 3150000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(beagle_vmmc1_supply), .consumer_supplies = beagle_vmmc1_supply, }; /* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */ static struct regulator_init_data beagle_vsim = { .constraints = { .min_uV = 1800000, .max_uV = 3000000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(beagle_vsim_supply), .consumer_supplies = beagle_vsim_supply, }; static struct twl4030_platform_data beagle_twldata = { /* platform_data for children goes here */ .gpio = &beagle_gpio_data, .vmmc1 = &beagle_vmmc1, .vsim = &beagle_vsim, }; static struct i2c_board_info __initdata beagle_i2c_eeprom[] = { { I2C_BOARD_INFO("eeprom", 0x50), }, }; static int __init omap3_beagle_i2c_init(void) { omap3_pmic_get_config(&beagle_twldata, TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_MADC | TWL_COMMON_PDATA_AUDIO, TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2); beagle_twldata.vpll2->constraints.name = "VDVI"; omap3_pmic_init("twl4030", &beagle_twldata); /* Bus 3 is attached to the DVI port where devices like the pico DLP * projector don't work reliably with 400kHz */ omap_register_i2c_bus(3, 100, beagle_i2c_eeprom, ARRAY_SIZE(beagle_i2c_eeprom)); return 0; } static struct gpio_led gpio_leds[] = { { .name = "beagleboard::usr0", .default_trigger = "heartbeat", .gpio = 150, }, { .name = "beagleboard::usr1", .default_trigger = "mmc0", .gpio = 149, }, { .name = "beagleboard::pmu_stat", .gpio = -EINVAL, /* gets replaced */ .active_low = true, }, }; static struct gpio_led_platform_data gpio_led_info = { .leds = gpio_leds, .num_leds = ARRAY_SIZE(gpio_leds), }; static struct platform_device leds_gpio = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &gpio_led_info, }, }; static struct gpio_keys_button gpio_buttons[] = { { .code = BTN_EXTRA, /* Dynamically assigned depending on board */ .gpio = -EINVAL, .desc = "user", .wakeup = 1, }, }; static struct gpio_keys_platform_data gpio_key_info = { .buttons = gpio_buttons, .nbuttons = ARRAY_SIZE(gpio_buttons), }; static struct platform_device keys_gpio = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &gpio_key_info, }, }; static struct platform_device madc_hwmon = { .name = "twl4030_madc_hwmon", .id = -1, }; static struct platform_device *omap3_beagle_devices[] __initdata = { &leds_gpio, &keys_gpio, &madc_hwmon, }; static const struct usbhs_omap_board_data usbhs_bdata __initconst = { .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = -EINVAL, .reset_gpio_port[1] = 147, .reset_gpio_port[2] = -EINVAL }; #ifdef CONFIG_OMAP_MUX static struct omap_board_mux board_mux[] __initdata = { { .reg_offset = OMAP_MUX_TERMINATOR }, }; #endif static void __init beagle_opp_init(void) { int r = 0; /* Initialize the omap3 opp table */ if (omap3_opp_init()) { pr_err("%s: opp default init failed\n", __func__); return; } /* Custom OPP enabled for all xM versions */ if (cpu_is_omap3630()) { struct device *mpu_dev, *iva_dev; mpu_dev = omap_device_get_by_hwmod_name("mpu"); iva_dev = omap_device_get_by_hwmod_name("iva"); if (!mpu_dev || !iva_dev) { pr_err("%s: Aiee.. no mpu/dsp devices? %p %p\n", __func__, mpu_dev, iva_dev); return; } /* Enable MPU 1GHz and lower opps */ r = opp_enable(mpu_dev, 800000000); /* TODO: MPU 1GHz needs SR and ABB */ /* Enable IVA 800MHz and lower opps */ r |= opp_enable(iva_dev, 660000000); /* TODO: DSP 800MHz needs SR and ABB */ if (r) { pr_err("%s: failed to enable higher opp %d\n", __func__, r); /* * Cleanup - disable the higher freqs - we dont care * about the results */ opp_disable(mpu_dev, 800000000); opp_disable(iva_dev, 660000000); } } return; } static void __init omap3_beagle_init(void) { omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); omap3_beagle_init_rev(); if (beagle_config.mmc1_gpio_wp != -EINVAL) omap_mux_init_gpio(beagle_config.mmc1_gpio_wp, OMAP_PIN_INPUT); omap_hsmmc_init(mmc); omap3_beagle_i2c_init(); gpio_buttons[0].gpio = beagle_config.usr_button_gpio; platform_add_devices(omap3_beagle_devices, ARRAY_SIZE(omap3_beagle_devices)); omap_display_init(&beagle_dss_data); omap_serial_init(); omap_sdrc_init(mt46h32m32lf6_sdrc_params, mt46h32m32lf6_sdrc_params); omap_mux_init_gpio(170, OMAP_PIN_INPUT); /* REVISIT leave DVI powered down until it's needed ... */ gpio_request_one(170, GPIOF_OUT_INIT_HIGH, "DVI_nPD"); usb_musb_init(NULL); usbhs_init(&usbhs_bdata); omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions, ARRAY_SIZE(omap3beagle_nand_partitions)); /* Ensure msecure is mux'd to be able to set the RTC. */ omap_mux_init_signal("sys_drm_msecure", OMAP_PIN_OFF_OUTPUT_HIGH); /* Ensure SDRC pins are mux'd for self-refresh */ omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT); beagle_display_init(); beagle_opp_init(); } MACHINE_START(OMAP3_BEAGLE, "OMAP3 Beagle Board") /* Maintainer: Syed Mohammed Khasim - http://beagleboard.org */ .atag_offset = 0x100, .reserve = omap_reserve, .map_io = omap3_map_io, .init_early = omap3_init_early, .init_irq = omap3_init_irq, .handle_irq = omap3_intc_handle_irq, .init_machine = omap3_beagle_init, .timer = &omap3_secure_timer, .restart = omap_prcm_restart, MACHINE_END
gpl-2.0
CaptainThrowback/kernel_htc_m8_LolliSense
drivers/input/misc/ad714x-spi.c
4985
3048
/* * AD714X CapTouch Programmable Controller driver (SPI bus) * * Copyright 2009-2011 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/input.h> /* BUS_SPI */ #include <linux/module.h> #include <linux/spi/spi.h> #include <linux/pm.h> #include <linux/types.h> #include "ad714x.h" #define AD714x_SPI_CMD_PREFIX 0xE000 /* bits 15:11 */ #define AD714x_SPI_READ BIT(10) #ifdef CONFIG_PM static int ad714x_spi_suspend(struct device *dev) { return ad714x_disable(spi_get_drvdata(to_spi_device(dev))); } static int ad714x_spi_resume(struct device *dev) { return ad714x_enable(spi_get_drvdata(to_spi_device(dev))); } #endif static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume); static int ad714x_spi_read(struct ad714x_chip *chip, unsigned short reg, unsigned short *data, size_t len) { struct spi_device *spi = to_spi_device(chip->dev); struct spi_message message; struct spi_transfer xfer[2]; int i; int error; spi_message_init(&message); memset(xfer, 0, sizeof(xfer)); chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX | AD714x_SPI_READ | reg); xfer[0].tx_buf = &chip->xfer_buf[0]; xfer[0].len = sizeof(chip->xfer_buf[0]); spi_message_add_tail(&xfer[0], &message); xfer[1].rx_buf = &chip->xfer_buf[1]; xfer[1].len = sizeof(chip->xfer_buf[1]) * len; spi_message_add_tail(&xfer[1], &message); error = spi_sync(spi, &message); if (unlikely(error)) { dev_err(chip->dev, "SPI read error: %d\n", error); return error; } for (i = 0; i < len; i++) data[i] = be16_to_cpu(chip->xfer_buf[i + 1]); return 0; } static int ad714x_spi_write(struct ad714x_chip *chip, unsigned short reg, unsigned short data) { struct spi_device *spi = to_spi_device(chip->dev); int error; chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX | reg); chip->xfer_buf[1] = cpu_to_be16(data); error = spi_write(spi, (u8 *)chip->xfer_buf, 2 * sizeof(*chip->xfer_buf)); if (unlikely(error)) { dev_err(chip->dev, "SPI write error: %d\n", error); return error; } return 0; } static int __devinit ad714x_spi_probe(struct spi_device *spi) { struct ad714x_chip *chip; int err; spi->bits_per_word = 8; err = spi_setup(spi); if (err < 0) return err; chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq, ad714x_spi_read, ad714x_spi_write); if (IS_ERR(chip)) return PTR_ERR(chip); spi_set_drvdata(spi, chip); return 0; } static int __devexit ad714x_spi_remove(struct spi_device *spi) { struct ad714x_chip *chip = spi_get_drvdata(spi); ad714x_remove(chip); spi_set_drvdata(spi, NULL); return 0; } static struct spi_driver ad714x_spi_driver = { .driver = { .name = "ad714x_captouch", .owner = THIS_MODULE, .pm = &ad714x_spi_pm, }, .probe = ad714x_spi_probe, .remove = __devexit_p(ad714x_spi_remove), }; module_spi_driver(ad714x_spi_driver); MODULE_DESCRIPTION("Analog Devices AD714X Capacitance Touch Sensor SPI Bus Driver"); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_LICENSE("GPL");
gpl-2.0
TheTypoMaster/android_kernel_samsung_smdk4412
drivers/gpu/drm/nouveau/nv84_bsp.c
5497
2365
/* * Copyright 2011 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_util.h" #include "nouveau_vm.h" #include "nouveau_ramht.h" /*XXX: This stub is currently used on NV98+ also, as soon as this becomes * more than just an enable/disable stub this needs to be split out to * nv98_bsp.c... */ struct nv84_bsp_engine { struct nouveau_exec_engine base; }; static int nv84_bsp_fini(struct drm_device *dev, int engine, bool suspend) { if (!(nv_rd32(dev, 0x000200) & 0x00008000)) return 0; nv_mask(dev, 0x000200, 0x00008000, 0x00000000); return 0; } static int nv84_bsp_init(struct drm_device *dev, int engine) { nv_mask(dev, 0x000200, 0x00008000, 0x00000000); nv_mask(dev, 0x000200, 0x00008000, 0x00008000); return 0; } static void nv84_bsp_destroy(struct drm_device *dev, int engine) { struct nv84_bsp_engine *pbsp = nv_engine(dev, engine); NVOBJ_ENGINE_DEL(dev, BSP); kfree(pbsp); } int nv84_bsp_create(struct drm_device *dev) { struct nv84_bsp_engine *pbsp; pbsp = kzalloc(sizeof(*pbsp), GFP_KERNEL); if (!pbsp) return -ENOMEM; pbsp->base.destroy = nv84_bsp_destroy; pbsp->base.init = nv84_bsp_init; pbsp->base.fini = nv84_bsp_fini; NVOBJ_ENGINE_ADD(dev, BSP, &pbsp->base); return 0; }
gpl-2.0
Kayant/android_kernel_motorola_msm8226
arch/unicore32/mm/alignment.c
5753
13291
/* * linux/arch/unicore32/mm/alignment.c * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* * TODO: * FPU ldm/stm not handling */ #include <linux/compiler.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/uaccess.h> #include <asm/tlbflush.h> #include <asm/unaligned.h> #include "mm.h" #define CODING_BITS(i) (i & 0xe0000120) #define LDST_P_BIT(i) (i & (1 << 28)) /* Preindex */ #define LDST_U_BIT(i) (i & (1 << 27)) /* Add offset */ #define LDST_W_BIT(i) (i & (1 << 25)) /* Writeback */ #define LDST_L_BIT(i) (i & (1 << 24)) /* Load */ #define LDST_P_EQ_U(i) ((((i) ^ ((i) >> 1)) & (1 << 27)) == 0) #define LDSTH_I_BIT(i) (i & (1 << 26)) /* half-word immed */ #define LDM_S_BIT(i) (i & (1 << 26)) /* write ASR from BSR */ #define LDM_H_BIT(i) (i & (1 << 6)) /* select r0-r15 or r16-r31 */ #define RN_BITS(i) ((i >> 19) & 31) /* Rn */ #define RD_BITS(i) ((i >> 14) & 31) /* Rd */ #define RM_BITS(i) (i & 31) /* Rm */ #define REGMASK_BITS(i) (((i & 0x7fe00) >> 3) | (i & 0x3f)) #define OFFSET_BITS(i) (i & 0x03fff) #define SHIFT_BITS(i) ((i >> 9) & 0x1f) #define SHIFT_TYPE(i) (i & 0xc0) #define SHIFT_LSL 0x00 #define SHIFT_LSR 0x40 #define SHIFT_ASR 0x80 #define SHIFT_RORRRX 0xc0 union offset_union { unsigned long un; signed long sn; }; #define TYPE_ERROR 0 #define TYPE_FAULT 1 #define TYPE_LDST 2 #define TYPE_DONE 3 #define TYPE_SWAP 4 #define TYPE_COLS 5 /* Coprocessor load/store */ #define get8_unaligned_check(val, addr, err) \ __asm__( \ "1: ldb.u %1, [%2], #1\n" \ "2:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, #1\n" \ " b 2b\n" \ " .popsection\n" \ " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 3b\n" \ " .popsection\n" \ : "=r" (err), "=&r" (val), "=r" (addr) \ : "0" (err), "2" (addr)) #define get8t_unaligned_check(val, addr, err) \ __asm__( \ "1: ldb.u %1, [%2], #1\n" \ "2:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, #1\n" \ " b 2b\n" \ " .popsection\n" \ " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 3b\n" \ " .popsection\n" \ : "=r" (err), "=&r" (val), "=r" (addr) \ : "0" (err), "2" (addr)) #define get16_unaligned_check(val, addr) \ do { \ unsigned int err = 0, v, a = addr; \ get8_unaligned_check(val, a, err); \ get8_unaligned_check(v, a, err); \ val |= v << 8; \ if (err) \ goto fault; \ } while (0) #define put16_unaligned_check(val, addr) \ do { \ unsigned int err = 0, v = val, a = addr; \ __asm__( \ "1: stb.u %1, [%2], #1\n" \ " mov %1, %1 >> #8\n" \ "2: stb.u %1, [%2]\n" \ "3:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "4: mov %0, #1\n" \ " b 3b\n" \ " .popsection\n" \ " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 4b\n" \ " .long 2b, 4b\n" \ " .popsection\n" \ : "=r" (err), "=&r" (v), "=&r" (a) \ : "0" (err), "1" (v), "2" (a)); \ if (err) \ goto fault; \ } while (0) #define __put32_unaligned_check(ins, val, addr) \ do { \ unsigned int err = 0, v = val, a = addr; \ __asm__( \ "1: "ins" %1, [%2], #1\n" \ " mov %1, %1 >> #8\n" \ "2: "ins" %1, [%2], #1\n" \ " mov %1, %1 >> #8\n" \ "3: "ins" %1, [%2], #1\n" \ " mov %1, %1 >> #8\n" \ "4: "ins" %1, [%2]\n" \ "5:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "6: mov %0, #1\n" \ " b 5b\n" \ " .popsection\n" \ " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 6b\n" \ " .long 2b, 6b\n" \ " .long 3b, 6b\n" \ " .long 4b, 6b\n" \ " .popsection\n" \ : "=r" (err), "=&r" (v), "=&r" (a) \ : "0" (err), "1" (v), "2" (a)); \ if (err) \ goto fault; \ } while (0) #define get32_unaligned_check(val, addr) \ do { \ unsigned int err = 0, v, a = addr; \ get8_unaligned_check(val, a, err); \ get8_unaligned_check(v, a, err); \ val |= v << 8; \ get8_unaligned_check(v, a, err); \ val |= v << 16; \ get8_unaligned_check(v, a, err); \ val |= v << 24; \ if (err) \ goto fault; \ } while (0) #define put32_unaligned_check(val, addr) \ __put32_unaligned_check("stb.u", val, addr) #define get32t_unaligned_check(val, addr) \ do { \ unsigned int err = 0, v, a = addr; \ get8t_unaligned_check(val, a, err); \ get8t_unaligned_check(v, a, err); \ val |= v << 8; \ get8t_unaligned_check(v, a, err); \ val |= v << 16; \ get8t_unaligned_check(v, a, err); \ val |= v << 24; \ if (err) \ goto fault; \ } while (0) #define put32t_unaligned_check(val, addr) \ __put32_unaligned_check("stb.u", val, addr) static void do_alignment_finish_ldst(unsigned long addr, unsigned long instr, struct pt_regs *regs, union offset_union offset) { if (!LDST_U_BIT(instr)) offset.un = -offset.un; if (!LDST_P_BIT(instr)) addr += offset.un; if (!LDST_P_BIT(instr) || LDST_W_BIT(instr)) regs->uregs[RN_BITS(instr)] = addr; } static int do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *regs) { unsigned int rd = RD_BITS(instr); /* old value 0x40002120, can't judge swap instr correctly */ if ((instr & 0x4b003fe0) == 0x40000120) goto swp; if (LDST_L_BIT(instr)) { unsigned long val; get16_unaligned_check(val, addr); /* signed half-word? */ if (instr & 0x80) val = (signed long)((signed short)val); regs->uregs[rd] = val; } else put16_unaligned_check(regs->uregs[rd], addr); return TYPE_LDST; swp: /* only handle swap word * for swap byte should not active this alignment exception */ get32_unaligned_check(regs->uregs[RD_BITS(instr)], addr); put32_unaligned_check(regs->uregs[RM_BITS(instr)], addr); return TYPE_SWAP; fault: return TYPE_FAULT; } static int do_alignment_ldrstr(unsigned long addr, unsigned long instr, struct pt_regs *regs) { unsigned int rd = RD_BITS(instr); if (!LDST_P_BIT(instr) && LDST_W_BIT(instr)) goto trans; if (LDST_L_BIT(instr)) get32_unaligned_check(regs->uregs[rd], addr); else put32_unaligned_check(regs->uregs[rd], addr); return TYPE_LDST; trans: if (LDST_L_BIT(instr)) get32t_unaligned_check(regs->uregs[rd], addr); else put32t_unaligned_check(regs->uregs[rd], addr); return TYPE_LDST; fault: return TYPE_FAULT; } /* * LDM/STM alignment handler. * * There are 4 variants of this instruction: * * B = rn pointer before instruction, A = rn pointer after instruction * ------ increasing address -----> * | | r0 | r1 | ... | rx | | * PU = 01 B A * PU = 11 B A * PU = 00 A B * PU = 10 A B */ static int do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *regs) { unsigned int rd, rn, pc_correction, reg_correction, nr_regs, regbits; unsigned long eaddr, newaddr; if (LDM_S_BIT(instr)) goto bad; pc_correction = 4; /* processor implementation defined */ /* count the number of registers in the mask to be transferred */ nr_regs = hweight16(REGMASK_BITS(instr)) * 4; rn = RN_BITS(instr); newaddr = eaddr = regs->uregs[rn]; if (!LDST_U_BIT(instr)) nr_regs = -nr_regs; newaddr += nr_regs; if (!LDST_U_BIT(instr)) eaddr = newaddr; if (LDST_P_EQ_U(instr)) /* U = P */ eaddr += 4; /* * This is a "hint" - we already have eaddr worked out by the * processor for us. */ if (addr != eaddr) { printk(KERN_ERR "LDMSTM: PC = %08lx, instr = %08lx, " "addr = %08lx, eaddr = %08lx\n", instruction_pointer(regs), instr, addr, eaddr); show_regs(regs); } if (LDM_H_BIT(instr)) reg_correction = 0x10; else reg_correction = 0x00; for (regbits = REGMASK_BITS(instr), rd = 0; regbits; regbits >>= 1, rd += 1) if (regbits & 1) { if (LDST_L_BIT(instr)) get32_unaligned_check(regs-> uregs[rd + reg_correction], eaddr); else put32_unaligned_check(regs-> uregs[rd + reg_correction], eaddr); eaddr += 4; } if (LDST_W_BIT(instr)) regs->uregs[rn] = newaddr; return TYPE_DONE; fault: regs->UCreg_pc -= pc_correction; return TYPE_FAULT; bad: printk(KERN_ERR "Alignment trap: not handling ldm with s-bit set\n"); return TYPE_ERROR; } static int do_alignment(unsigned long addr, unsigned int error_code, struct pt_regs *regs) { union offset_union offset; unsigned long instr, instrptr; int (*handler) (unsigned long addr, unsigned long instr, struct pt_regs *regs); unsigned int type; instrptr = instruction_pointer(regs); if (instrptr >= PAGE_OFFSET) instr = *(unsigned long *)instrptr; else { __asm__ __volatile__( "ldw.u %0, [%1]\n" : "=&r"(instr) : "r"(instrptr)); } regs->UCreg_pc += 4; switch (CODING_BITS(instr)) { case 0x40000120: /* ldrh or strh */ if (LDSTH_I_BIT(instr)) offset.un = (instr & 0x3e00) >> 4 | (instr & 31); else offset.un = regs->uregs[RM_BITS(instr)]; handler = do_alignment_ldrhstrh; break; case 0x60000000: /* ldr or str immediate */ case 0x60000100: /* ldr or str immediate */ case 0x60000020: /* ldr or str immediate */ case 0x60000120: /* ldr or str immediate */ offset.un = OFFSET_BITS(instr); handler = do_alignment_ldrstr; break; case 0x40000000: /* ldr or str register */ offset.un = regs->uregs[RM_BITS(instr)]; { unsigned int shiftval = SHIFT_BITS(instr); switch (SHIFT_TYPE(instr)) { case SHIFT_LSL: offset.un <<= shiftval; break; case SHIFT_LSR: offset.un >>= shiftval; break; case SHIFT_ASR: offset.sn >>= shiftval; break; case SHIFT_RORRRX: if (shiftval == 0) { offset.un >>= 1; if (regs->UCreg_asr & PSR_C_BIT) offset.un |= 1 << 31; } else offset.un = offset.un >> shiftval | offset.un << (32 - shiftval); break; } } handler = do_alignment_ldrstr; break; case 0x80000000: /* ldm or stm */ case 0x80000020: /* ldm or stm */ handler = do_alignment_ldmstm; break; default: goto bad; } type = handler(addr, instr, regs); if (type == TYPE_ERROR || type == TYPE_FAULT) goto bad_or_fault; if (type == TYPE_LDST) do_alignment_finish_ldst(addr, instr, regs, offset); return 0; bad_or_fault: if (type == TYPE_ERROR) goto bad; regs->UCreg_pc -= 4; /* * We got a fault - fix it up, or die. */ do_bad_area(addr, error_code, regs); return 0; bad: /* * Oops, we didn't handle the instruction. * However, we must handle fpu instr firstly. */ #ifdef CONFIG_UNICORE_FPU_F64 /* handle co.load/store */ #define CODING_COLS 0xc0000000 #define COLS_OFFSET_BITS(i) (i & 0x1FF) #define COLS_L_BITS(i) (i & (1<<24)) #define COLS_FN_BITS(i) ((i>>14) & 31) if ((instr & 0xe0000000) == CODING_COLS) { unsigned int fn = COLS_FN_BITS(instr); unsigned long val = 0; if (COLS_L_BITS(instr)) { get32t_unaligned_check(val, addr); switch (fn) { #define ASM_MTF(n) case n: \ __asm__ __volatile__("MTF %0, F" __stringify(n) \ : : "r"(val)); \ break; ASM_MTF(0); ASM_MTF(1); ASM_MTF(2); ASM_MTF(3); ASM_MTF(4); ASM_MTF(5); ASM_MTF(6); ASM_MTF(7); ASM_MTF(8); ASM_MTF(9); ASM_MTF(10); ASM_MTF(11); ASM_MTF(12); ASM_MTF(13); ASM_MTF(14); ASM_MTF(15); ASM_MTF(16); ASM_MTF(17); ASM_MTF(18); ASM_MTF(19); ASM_MTF(20); ASM_MTF(21); ASM_MTF(22); ASM_MTF(23); ASM_MTF(24); ASM_MTF(25); ASM_MTF(26); ASM_MTF(27); ASM_MTF(28); ASM_MTF(29); ASM_MTF(30); ASM_MTF(31); #undef ASM_MTF } } else { switch (fn) { #define ASM_MFF(n) case n: \ __asm__ __volatile__("MFF %0, F" __stringify(n) \ : : "r"(val)); \ break; ASM_MFF(0); ASM_MFF(1); ASM_MFF(2); ASM_MFF(3); ASM_MFF(4); ASM_MFF(5); ASM_MFF(6); ASM_MFF(7); ASM_MFF(8); ASM_MFF(9); ASM_MFF(10); ASM_MFF(11); ASM_MFF(12); ASM_MFF(13); ASM_MFF(14); ASM_MFF(15); ASM_MFF(16); ASM_MFF(17); ASM_MFF(18); ASM_MFF(19); ASM_MFF(20); ASM_MFF(21); ASM_MFF(22); ASM_MFF(23); ASM_MFF(24); ASM_MFF(25); ASM_MFF(26); ASM_MFF(27); ASM_MFF(28); ASM_MFF(29); ASM_MFF(30); ASM_MFF(31); #undef ASM_MFF } put32t_unaligned_check(val, addr); } return TYPE_COLS; } fault: return TYPE_FAULT; #endif printk(KERN_ERR "Alignment trap: not handling instruction " "%08lx at [<%08lx>]\n", instr, instrptr); return 1; } /* * This needs to be done after sysctl_init, otherwise sys/ will be * overwritten. Actually, this shouldn't be in sys/ at all since * it isn't a sysctl, and it doesn't contain sysctl information. */ static int __init alignment_init(void) { hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, "alignment exception"); return 0; } fs_initcall(alignment_init);
gpl-2.0
SolidRun/linux-imx6-3.14
arch/blackfin/mach-bf538/ext-gpio.c
9337
4282
/* * GPIOLIB interface for BF538/9 PORT C, D, and E GPIOs * * Copyright 2009-2011 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <linux/err.h> #include <asm/blackfin.h> #include <asm/gpio.h> #include <asm/portmux.h> #define DEFINE_REG(reg, off) \ static inline u16 read_##reg(void __iomem *port) \ { return bfin_read16(port + off); } \ static inline void write_##reg(void __iomem *port, u16 v) \ { bfin_write16(port + off, v); } DEFINE_REG(PORTIO, 0x00) DEFINE_REG(PORTIO_CLEAR, 0x10) DEFINE_REG(PORTIO_SET, 0x20) DEFINE_REG(PORTIO_DIR, 0x40) DEFINE_REG(PORTIO_INEN, 0x50) static void __iomem *gpio_chip_to_mmr(struct gpio_chip *chip) { switch (chip->base) { default: /* not really needed, but keeps gcc happy */ case GPIO_PC0: return (void __iomem *)PORTCIO; case GPIO_PD0: return (void __iomem *)PORTDIO; case GPIO_PE0: return (void __iomem *)PORTEIO; } } static int bf538_gpio_get_value(struct gpio_chip *chip, unsigned gpio) { void __iomem *port = gpio_chip_to_mmr(chip); return !!(read_PORTIO(port) & (1u << gpio)); } static void bf538_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value) { void __iomem *port = gpio_chip_to_mmr(chip); if (value) write_PORTIO_SET(port, (1u << gpio)); else write_PORTIO_CLEAR(port, (1u << gpio)); } static int bf538_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) { void __iomem *port = gpio_chip_to_mmr(chip); write_PORTIO_DIR(port, read_PORTIO_DIR(port) & ~(1u << gpio)); write_PORTIO_INEN(port, read_PORTIO_INEN(port) | (1u << gpio)); return 0; } static int bf538_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int value) { void __iomem *port = gpio_chip_to_mmr(chip); write_PORTIO_INEN(port, read_PORTIO_INEN(port) & ~(1u << gpio)); bf538_gpio_set_value(port, gpio, value); write_PORTIO_DIR(port, read_PORTIO_DIR(port) | (1u << gpio)); return 0; } static int bf538_gpio_request(struct gpio_chip *chip, unsigned gpio) { return bfin_special_gpio_request(chip->base + gpio, chip->label); } static void bf538_gpio_free(struct gpio_chip *chip, unsigned gpio) { return bfin_special_gpio_free(chip->base + gpio); } /* We don't set the irq fields as these banks cannot generate interrupts */ static struct gpio_chip bf538_portc_chip = { .label = "GPIO-PC", .direction_input = bf538_gpio_direction_input, .get = bf538_gpio_get_value, .direction_output = bf538_gpio_direction_output, .set = bf538_gpio_set_value, .request = bf538_gpio_request, .free = bf538_gpio_free, .base = GPIO_PC0, .ngpio = GPIO_PC9 - GPIO_PC0 + 1, }; static struct gpio_chip bf538_portd_chip = { .label = "GPIO-PD", .direction_input = bf538_gpio_direction_input, .get = bf538_gpio_get_value, .direction_output = bf538_gpio_direction_output, .set = bf538_gpio_set_value, .request = bf538_gpio_request, .free = bf538_gpio_free, .base = GPIO_PD0, .ngpio = GPIO_PD13 - GPIO_PD0 + 1, }; static struct gpio_chip bf538_porte_chip = { .label = "GPIO-PE", .direction_input = bf538_gpio_direction_input, .get = bf538_gpio_get_value, .direction_output = bf538_gpio_direction_output, .set = bf538_gpio_set_value, .request = bf538_gpio_request, .free = bf538_gpio_free, .base = GPIO_PE0, .ngpio = GPIO_PE15 - GPIO_PE0 + 1, }; static int __init bf538_extgpio_setup(void) { return gpiochip_add(&bf538_portc_chip) | gpiochip_add(&bf538_portd_chip) | gpiochip_add(&bf538_porte_chip); } arch_initcall(bf538_extgpio_setup); #ifdef CONFIG_PM static struct { u16 data, dir, inen; } gpio_bank_saved[3]; static void __iomem * const port_bases[3] = { (void *)PORTCIO, (void *)PORTDIO, (void *)PORTEIO, }; void bfin_special_gpio_pm_hibernate_suspend(void) { int i; for (i = 0; i < ARRAY_SIZE(port_bases); ++i) { gpio_bank_saved[i].data = read_PORTIO(port_bases[i]); gpio_bank_saved[i].inen = read_PORTIO_INEN(port_bases[i]); gpio_bank_saved[i].dir = read_PORTIO_DIR(port_bases[i]); } } void bfin_special_gpio_pm_hibernate_restore(void) { int i; for (i = 0; i < ARRAY_SIZE(port_bases); ++i) { write_PORTIO_INEN(port_bases[i], gpio_bank_saved[i].inen); write_PORTIO_SET(port_bases[i], gpio_bank_saved[i].data & gpio_bank_saved[i].dir); write_PORTIO_DIR(port_bases[i], gpio_bank_saved[i].dir); } } #endif
gpl-2.0
cr1exe/android_kernel_sony_taoshan
arch/avr32/oprofile/backtrace.c
13689
2074
/* * AVR32 specific backtracing code for oprofile * * Copyright 2008 Weinmann GmbH * * Author: Nikolaus Voss <n.voss@weinmann.de> * * Based on i386 oprofile backtrace code by John Levon and David Smith * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/oprofile.h> #include <linux/sched.h> #include <linux/uaccess.h> /* The first two words of each frame on the stack look like this if we have * frame pointers */ struct frame_head { unsigned long lr; struct frame_head *fp; }; /* copied from arch/avr32/kernel/process.c */ static inline int valid_stack_ptr(struct thread_info *tinfo, unsigned long p) { return (p > (unsigned long)tinfo) && (p < (unsigned long)tinfo + THREAD_SIZE - 3); } /* copied from arch/x86/oprofile/backtrace.c */ static struct frame_head *dump_user_backtrace(struct frame_head *head) { struct frame_head bufhead[2]; /* Also check accessibility of one struct frame_head beyond */ if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) return NULL; if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) return NULL; oprofile_add_trace(bufhead[0].lr); /* frame pointers should strictly progress back up the stack * (towards higher addresses) */ if (bufhead[0].fp <= head) return NULL; return bufhead[0].fp; } void avr32_backtrace(struct pt_regs * const regs, unsigned int depth) { /* Get first frame pointer */ struct frame_head *head = (struct frame_head *)(regs->r7); if (!user_mode(regs)) { #ifdef CONFIG_FRAME_POINTER /* * Traverse the kernel stack from frame to frame up to * "depth" steps. */ while (depth-- && valid_stack_ptr(task_thread_info(current), (unsigned long)head)) { oprofile_add_trace(head->lr); if (head->fp <= head) break; head = head->fp; } #endif } else { /* Assume we have frame pointers in user mode process */ while (depth-- && head) head = dump_user_backtrace(head); } }
gpl-2.0
tpmullan/android_kernel_asus_tf700
fs/namei.c
122
86339
/* * linux/fs/namei.c * * Copyright (C) 1991, 1992 Linus Torvalds */ /* * Some corrections by tytso. */ /* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname * lookup logic. */ /* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture. */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/pagemap.h> #include <linux/fsnotify.h> #include <linux/personality.h> #include <linux/security.h> #include <linux/ima.h> #include <linux/syscalls.h> #include <linux/mount.h> #include <linux/audit.h> #include <linux/capability.h> #include <linux/file.h> #include <linux/fcntl.h> #include <linux/device_cgroup.h> #include <linux/fs_struct.h> #include <asm/uaccess.h> #include "internal.h" /* [Feb-1997 T. Schoebel-Theuer] * Fundamental changes in the pathname lookup mechanisms (namei) * were necessary because of omirr. The reason is that omirr needs * to know the _real_ pathname, not the user-supplied one, in case * of symlinks (and also when transname replacements occur). * * The new code replaces the old recursive symlink resolution with * an iterative one (in case of non-nested symlink chains). It does * this with calls to <fs>_follow_link(). * As a side effect, dir_namei(), _namei() and follow_link() are now * replaced with a single function lookup_dentry() that can handle all * the special cases of the former code. * * With the new dcache, the pathname is stored at each inode, at least as * long as the refcount of the inode is positive. As a side effect, the * size of the dcache depends on the inode cache and thus is dynamic. * * [29-Apr-1998 C. Scott Ananian] Updated above description of symlink * resolution to correspond with current state of the code. * * Note that the symlink resolution is not *completely* iterative. * There is still a significant amount of tail- and mid- recursion in * the algorithm. Also, note that <fs>_readlink() is not used in * lookup_dentry(): lookup_dentry() on the result of <fs>_readlink() * may return different results than <fs>_follow_link(). Many virtual * filesystems (including /proc) exhibit this behavior. */ /* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation: * New symlink semantics: when open() is called with flags O_CREAT | O_EXCL * and the name already exists in form of a symlink, try to create the new * name indicated by the symlink. The old code always complained that the * name already exists, due to not following the symlink even if its target * is nonexistent. The new semantics affects also mknod() and link() when * the name is a symlink pointing to a non-existent name. * * I don't know which semantics is the right one, since I have no access * to standards. But I found by trial that HP-UX 9.0 has the full "new" * semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the * "old" one. Personally, I think the new semantics is much more logical. * Note that "ln old new" where "new" is a symlink pointing to a non-existing * file does succeed in both HP-UX and SunOs, but not in Solaris * and in the old Linux semantics. */ /* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink * semantics. See the comments in "open_namei" and "do_link" below. * * [10-Sep-98 Alan Modra] Another symlink change. */ /* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks: * inside the path - always follow. * in the last component in creation/removal/renaming - never follow. * if LOOKUP_FOLLOW passed - follow. * if the pathname has trailing slashes - follow. * otherwise - don't follow. * (applied in that order). * * [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT * restored for 2.4. This is the last surviving part of old 4.2BSD bug. * During the 2.4 we need to fix the userland stuff depending on it - * hopefully we will be able to get rid of that wart in 2.5. So far only * XEmacs seems to be relying on it... */ /* * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland) * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives * any extra contention... */ /* In order to reduce some races, while at the same time doing additional * checking and hopefully speeding things up, we copy filenames to the * kernel data space before using them.. * * POSIX.1 2.4: an empty pathname is invalid (ENOENT). * PATH_MAX includes the nul terminator --RR. */ static int do_getname(const char __user *filename, char *page) { int retval; unsigned long len = PATH_MAX; if (!segment_eq(get_fs(), KERNEL_DS)) { if ((unsigned long) filename >= TASK_SIZE) return -EFAULT; if (TASK_SIZE - (unsigned long) filename < PATH_MAX) len = TASK_SIZE - (unsigned long) filename; } retval = strncpy_from_user(page, filename, len); if (retval > 0) { if (retval < len) return 0; return -ENAMETOOLONG; } else if (!retval) retval = -ENOENT; return retval; } static char *getname_flags(const char __user * filename, int flags) { char *tmp, *result; result = ERR_PTR(-ENOMEM); tmp = __getname(); if (tmp) { int retval = do_getname(filename, tmp); result = tmp; if (retval < 0) { if (retval != -ENOENT || !(flags & LOOKUP_EMPTY)) { __putname(tmp); result = ERR_PTR(retval); } } } audit_getname(result); return result; } char *getname(const char __user * filename) { return getname_flags(filename, 0); } #ifdef CONFIG_AUDITSYSCALL void putname(const char *name) { if (unlikely(!audit_dummy_context())) audit_putname(name); else __putname(name); } EXPORT_SYMBOL(putname); #endif /* * This does basic POSIX ACL permission checking */ static int acl_permission_check(struct inode *inode, int mask, unsigned int flags, int (*check_acl)(struct inode *inode, int mask, unsigned int flags)) { unsigned int mode = inode->i_mode; mask &= MAY_READ | MAY_WRITE | MAY_EXEC; if (current_user_ns() != inode_userns(inode)) goto other_perms; if (current_fsuid() == inode->i_uid) mode >>= 6; else { if (IS_POSIXACL(inode) && (mode & S_IRWXG) && check_acl) { int error = check_acl(inode, mask, flags); if (error != -EAGAIN) return error; } if (in_group_p(inode->i_gid)) mode >>= 3; } other_perms: /* * If the DACs are ok we don't need any capability check. */ if ((mask & ~mode) == 0) return 0; return -EACCES; } /** * generic_permission - check for access rights on a Posix-like filesystem * @inode: inode to check access rights for * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * @check_acl: optional callback to check for Posix ACLs * @flags: IPERM_FLAG_ flags. * * Used to check for read/write/execute permissions on a file. * We use "fsuid" for this, letting us set arbitrary permissions * for filesystem access without changing the "normal" uids which * are used for other things. * * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk * request cannot be satisfied (eg. requires blocking or too much complexity). * It would then be called again in ref-walk mode. */ int generic_permission(struct inode *inode, int mask, unsigned int flags, int (*check_acl)(struct inode *inode, int mask, unsigned int flags)) { int ret; /* * Do the basic POSIX ACL permission checks. */ ret = acl_permission_check(inode, mask, flags, check_acl); if (ret != -EACCES) return ret; /* * Read/write DACs are always overridable. * Executable DACs are overridable if at least one exec bit is set. */ if (!(mask & MAY_EXEC) || execute_ok(inode)) if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) return 0; /* * Searching includes executable on directories, else just read. */ mask &= MAY_READ | MAY_WRITE | MAY_EXEC; if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH)) return 0; return -EACCES; } /** * inode_permission - check for access rights to a given inode * @inode: inode to check permission on * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Used to check for read/write/execute permissions on an inode. * We use "fsuid" for this, letting us set arbitrary permissions * for filesystem access without changing the "normal" uids which * are used for other things. */ int inode_permission(struct inode *inode, int mask) { int retval; if (mask & MAY_WRITE) { umode_t mode = inode->i_mode; /* * Nobody gets write access to a read-only fs. */ if (IS_RDONLY(inode) && (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) return -EROFS; /* * Nobody gets write access to an immutable file. */ if (IS_IMMUTABLE(inode)) return -EACCES; } if (inode->i_op->permission) retval = inode->i_op->permission(inode, mask, 0); else retval = generic_permission(inode, mask, 0, inode->i_op->check_acl); if (retval) return retval; retval = devcgroup_inode_permission(inode, mask); if (retval) return retval; return security_inode_permission(inode, mask); } /** * file_permission - check for additional access rights to a given file * @file: file to check access rights for * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Used to check for read/write/execute permissions on an already opened * file. * * Note: * Do not use this function in new code. All access checks should * be done using inode_permission(). */ int file_permission(struct file *file, int mask) { return inode_permission(file->f_path.dentry->d_inode, mask); } /* * get_write_access() gets write permission for a file. * put_write_access() releases this write permission. * This is used for regular files. * We cannot support write (and maybe mmap read-write shared) accesses and * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode * can have the following values: * 0: no writers, no VM_DENYWRITE mappings * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist * > 0: (i_writecount) users are writing to the file. * * Normally we operate on that counter with atomic_{inc,dec} and it's safe * except for the cases where we don't hold i_writecount yet. Then we need to * use {get,deny}_write_access() - these functions check the sign and refuse * to do the change if sign is wrong. Exclusion between them is provided by * the inode->i_lock spinlock. */ int get_write_access(struct inode * inode) { spin_lock(&inode->i_lock); if (atomic_read(&inode->i_writecount) < 0) { spin_unlock(&inode->i_lock); return -ETXTBSY; } atomic_inc(&inode->i_writecount); spin_unlock(&inode->i_lock); return 0; } int deny_write_access(struct file * file) { struct inode *inode = file->f_path.dentry->d_inode; spin_lock(&inode->i_lock); if (atomic_read(&inode->i_writecount) > 0) { spin_unlock(&inode->i_lock); return -ETXTBSY; } atomic_dec(&inode->i_writecount); spin_unlock(&inode->i_lock); return 0; } /** * path_get - get a reference to a path * @path: path to get the reference to * * Given a path increment the reference count to the dentry and the vfsmount. */ void path_get(struct path *path) { mntget(path->mnt); dget(path->dentry); } EXPORT_SYMBOL(path_get); /** * path_put - put a reference to a path * @path: path to put the reference to * * Given a path decrement the reference count to the dentry and the vfsmount. */ void path_put(struct path *path) { dput(path->dentry); mntput(path->mnt); } EXPORT_SYMBOL(path_put); /** * nameidata_drop_rcu - drop this nameidata out of rcu-walk * @nd: nameidata pathwalk data to drop * Returns: 0 on success, -ECHILD on failure * * Path walking has 2 modes, rcu-walk and ref-walk (see * Documentation/filesystems/path-lookup.txt). __drop_rcu* functions attempt * to drop out of rcu-walk mode and take normal reference counts on dentries * and vfsmounts to transition to rcu-walk mode. __drop_rcu* functions take * refcounts at the last known good point before rcu-walk got stuck, so * ref-walk may continue from there. If this is not successful (eg. a seqcount * has changed), then failure is returned and path walk restarts from the * beginning in ref-walk mode. * * nameidata_drop_rcu attempts to drop the current nd->path and nd->root into * ref-walk. Must be called from rcu-walk context. */ static int nameidata_drop_rcu(struct nameidata *nd) { struct fs_struct *fs = current->fs; struct dentry *dentry = nd->path.dentry; int want_root = 0; BUG_ON(!(nd->flags & LOOKUP_RCU)); if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { want_root = 1; spin_lock(&fs->lock); if (nd->root.mnt != fs->root.mnt || nd->root.dentry != fs->root.dentry) goto err_root; } spin_lock(&dentry->d_lock); if (!__d_rcu_to_refcount(dentry, nd->seq)) goto err; BUG_ON(nd->inode != dentry->d_inode); spin_unlock(&dentry->d_lock); if (want_root) { path_get(&nd->root); spin_unlock(&fs->lock); } mntget(nd->path.mnt); rcu_read_unlock(); br_read_unlock(vfsmount_lock); nd->flags &= ~LOOKUP_RCU; return 0; err: spin_unlock(&dentry->d_lock); err_root: if (want_root) spin_unlock(&fs->lock); return -ECHILD; } /* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing. */ static inline int nameidata_drop_rcu_maybe(struct nameidata *nd) { if (nd->flags & LOOKUP_RCU) return nameidata_drop_rcu(nd); return 0; } /** * nameidata_dentry_drop_rcu - drop nameidata and dentry out of rcu-walk * @nd: nameidata pathwalk data to drop * @dentry: dentry to drop * Returns: 0 on success, -ECHILD on failure * * nameidata_dentry_drop_rcu attempts to drop the current nd->path and nd->root, * and dentry into ref-walk. @dentry must be a path found by a do_lookup call on * @nd. Must be called from rcu-walk context. */ static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry) { struct fs_struct *fs = current->fs; struct dentry *parent = nd->path.dentry; int want_root = 0; BUG_ON(!(nd->flags & LOOKUP_RCU)); if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { want_root = 1; spin_lock(&fs->lock); if (nd->root.mnt != fs->root.mnt || nd->root.dentry != fs->root.dentry) goto err_root; } spin_lock(&parent->d_lock); spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); if (!__d_rcu_to_refcount(dentry, nd->seq)) goto err; /* * If the sequence check on the child dentry passed, then the child has * not been removed from its parent. This means the parent dentry must * be valid and able to take a reference at this point. */ BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent); BUG_ON(!parent->d_count); parent->d_count++; spin_unlock(&dentry->d_lock); spin_unlock(&parent->d_lock); if (want_root) { path_get(&nd->root); spin_unlock(&fs->lock); } mntget(nd->path.mnt); rcu_read_unlock(); br_read_unlock(vfsmount_lock); nd->flags &= ~LOOKUP_RCU; return 0; err: spin_unlock(&dentry->d_lock); spin_unlock(&parent->d_lock); err_root: if (want_root) spin_unlock(&fs->lock); return -ECHILD; } /* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing. */ static inline int nameidata_dentry_drop_rcu_maybe(struct nameidata *nd, struct dentry *dentry) { if (nd->flags & LOOKUP_RCU) { if (unlikely(nameidata_dentry_drop_rcu(nd, dentry))) { nd->flags &= ~LOOKUP_RCU; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); br_read_unlock(vfsmount_lock); return -ECHILD; } } return 0; } /** * nameidata_drop_rcu_last - drop nameidata ending path walk out of rcu-walk * @nd: nameidata pathwalk data to drop * Returns: 0 on success, -ECHILD on failure * * nameidata_drop_rcu_last attempts to drop the current nd->path into ref-walk. * nd->path should be the final element of the lookup, so nd->root is discarded. * Must be called from rcu-walk context. */ static int nameidata_drop_rcu_last(struct nameidata *nd) { struct dentry *dentry = nd->path.dentry; BUG_ON(!(nd->flags & LOOKUP_RCU)); nd->flags &= ~LOOKUP_RCU; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; spin_lock(&dentry->d_lock); if (!__d_rcu_to_refcount(dentry, nd->seq)) goto err_unlock; BUG_ON(nd->inode != dentry->d_inode); spin_unlock(&dentry->d_lock); mntget(nd->path.mnt); rcu_read_unlock(); br_read_unlock(vfsmount_lock); return 0; err_unlock: spin_unlock(&dentry->d_lock); rcu_read_unlock(); br_read_unlock(vfsmount_lock); return -ECHILD; } /** * release_open_intent - free up open intent resources * @nd: pointer to nameidata */ void release_open_intent(struct nameidata *nd) { struct file *file = nd->intent.open.file; if (file && !IS_ERR(file)) { if (file->f_path.dentry == NULL) put_filp(file); else fput(file); } } static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd) { return dentry->d_op->d_revalidate(dentry, nd); } static struct dentry * do_revalidate(struct dentry *dentry, struct nameidata *nd) { int status = d_revalidate(dentry, nd); if (unlikely(status <= 0)) { /* * The dentry failed validation. * If d_revalidate returned 0 attempt to invalidate * the dentry otherwise d_revalidate is asking us * to return a fail status. */ if (status < 0) { dput(dentry); dentry = ERR_PTR(status); } else if (!d_invalidate(dentry)) { dput(dentry); dentry = NULL; } } return dentry; } /* * handle_reval_path - force revalidation of a dentry * * In some situations the path walking code will trust dentries without * revalidating them. This causes problems for filesystems that depend on * d_revalidate to handle file opens (e.g. NFSv4). When FS_REVAL_DOT is set * (which indicates that it's possible for the dentry to go stale), force * a d_revalidate call before proceeding. * * Returns 0 if the revalidation was successful. If the revalidation fails, * either return the error returned by d_revalidate or -ESTALE if the * revalidation it just returned 0. If d_revalidate returns 0, we attempt to * invalidate the dentry. It's up to the caller to handle putting references * to the path if necessary. */ static inline int handle_reval_path(struct nameidata *nd) { struct dentry *dentry = nd->path.dentry; int status; if (likely(!(nd->flags & LOOKUP_JUMPED))) return 0; if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE))) return 0; if (likely(!(dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT))) return 0; /* Note: we do not d_invalidate() */ status = d_revalidate(dentry, nd); if (status > 0) return 0; if (!status) status = -ESTALE; return status; } /* * Short-cut version of permission(), for calling on directories * during pathname resolution. Combines parts of permission() * and generic_permission(), and tests ONLY for MAY_EXEC permission. * * If appropriate, check DAC only. If not appropriate, or * short-cut DAC fails, then call ->permission() to do more * complete permission check. */ static inline int exec_permission(struct inode *inode, unsigned int flags) { int ret; struct user_namespace *ns = inode_userns(inode); if (inode->i_op->permission) { ret = inode->i_op->permission(inode, MAY_EXEC, flags); } else { ret = acl_permission_check(inode, MAY_EXEC, flags, inode->i_op->check_acl); } if (likely(!ret)) goto ok; if (ret == -ECHILD) return ret; if (ns_capable(ns, CAP_DAC_OVERRIDE) || ns_capable(ns, CAP_DAC_READ_SEARCH)) goto ok; return ret; ok: return security_inode_exec_permission(inode, flags); } static __always_inline void set_root(struct nameidata *nd) { if (!nd->root.mnt) get_fs_root(current->fs, &nd->root); } static int link_path_walk(const char *, struct nameidata *); static __always_inline void set_root_rcu(struct nameidata *nd) { if (!nd->root.mnt) { struct fs_struct *fs = current->fs; unsigned seq; do { seq = read_seqcount_begin(&fs->seq); nd->root = fs->root; nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } } static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link) { int ret; if (IS_ERR(link)) goto fail; if (*link == '/') { set_root(nd); path_put(&nd->path); nd->path = nd->root; path_get(&nd->root); nd->flags |= LOOKUP_JUMPED; } nd->inode = nd->path.dentry->d_inode; ret = link_path_walk(link, nd); return ret; fail: path_put(&nd->path); return PTR_ERR(link); } static void path_put_conditional(struct path *path, struct nameidata *nd) { dput(path->dentry); if (path->mnt != nd->path.mnt) mntput(path->mnt); } static inline void path_to_nameidata(const struct path *path, struct nameidata *nd) { if (!(nd->flags & LOOKUP_RCU)) { dput(nd->path.dentry); if (nd->path.mnt != path->mnt) mntput(nd->path.mnt); } nd->path.mnt = path->mnt; nd->path.dentry = path->dentry; } static inline void put_link(struct nameidata *nd, struct path *link, void *cookie) { struct inode *inode = link->dentry->d_inode; if (!IS_ERR(cookie) && inode->i_op->put_link) inode->i_op->put_link(link->dentry, nd, cookie); path_put(link); } static __always_inline int follow_link(struct path *link, struct nameidata *nd, void **p) { int error; struct dentry *dentry = link->dentry; BUG_ON(nd->flags & LOOKUP_RCU); if (link->mnt == nd->path.mnt) mntget(link->mnt); if (unlikely(current->total_link_count >= 40)) { *p = ERR_PTR(-ELOOP); /* no ->put_link(), please */ path_put(&nd->path); return -ELOOP; } cond_resched(); current->total_link_count++; touch_atime(link->mnt, dentry); nd_set_link(nd, NULL); error = security_inode_follow_link(link->dentry, nd); if (error) { *p = ERR_PTR(error); /* no ->put_link(), please */ path_put(&nd->path); return error; } nd->last_type = LAST_BIND; *p = dentry->d_inode->i_op->follow_link(dentry, nd); error = PTR_ERR(*p); if (!IS_ERR(*p)) { char *s = nd_get_link(nd); error = 0; if (s) error = __vfs_follow_link(nd, s); else if (nd->last_type == LAST_BIND) { nd->flags |= LOOKUP_JUMPED; nd->inode = nd->path.dentry->d_inode; if (nd->inode->i_op->follow_link) { /* stepped on a _really_ weird one */ path_put(&nd->path); error = -ELOOP; } } } return error; } static int follow_up_rcu(struct path *path) { struct vfsmount *parent; struct dentry *mountpoint; parent = path->mnt->mnt_parent; if (parent == path->mnt) return 0; mountpoint = path->mnt->mnt_mountpoint; path->dentry = mountpoint; path->mnt = parent; return 1; } int follow_up(struct path *path) { struct vfsmount *parent; struct dentry *mountpoint; br_read_lock(vfsmount_lock); parent = path->mnt->mnt_parent; if (parent == path->mnt) { br_read_unlock(vfsmount_lock); return 0; } mntget(parent); mountpoint = dget(path->mnt->mnt_mountpoint); br_read_unlock(vfsmount_lock); dput(path->dentry); path->dentry = mountpoint; mntput(path->mnt); path->mnt = parent; return 1; } /* * Perform an automount * - return -EISDIR to tell follow_managed() to stop and return the path we * were called with. */ static int follow_automount(struct path *path, unsigned flags, bool *need_mntput) { struct vfsmount *mnt; int err; if (!path->dentry->d_op || !path->dentry->d_op->d_automount) return -EREMOTE; /* We don't want to mount if someone supplied AT_NO_AUTOMOUNT * and this is the terminal part of the path. */ if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_CONTINUE)) return -EISDIR; /* we actually want to stop here */ /* We want to mount if someone is trying to open/create a file of any * type under the mountpoint, wants to traverse through the mountpoint * or wants to open the mounted directory. * * We don't want to mount if someone's just doing a stat and they've * set AT_SYMLINK_NOFOLLOW - unless they're stat'ing a directory and * appended a '/' to the name. */ if (!(flags & LOOKUP_FOLLOW) && !(flags & (LOOKUP_CONTINUE | LOOKUP_DIRECTORY | LOOKUP_OPEN | LOOKUP_CREATE))) return -EISDIR; current->total_link_count++; if (current->total_link_count >= 40) return -ELOOP; mnt = path->dentry->d_op->d_automount(path); if (IS_ERR(mnt)) { /* * The filesystem is allowed to return -EISDIR here to indicate * it doesn't want to automount. For instance, autofs would do * this so that its userspace daemon can mount on this dentry. * * However, we can only permit this if it's a terminal point in * the path being looked up; if it wasn't then the remainder of * the path is inaccessible and we should say so. */ if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_CONTINUE)) return -EREMOTE; return PTR_ERR(mnt); } if (!mnt) /* mount collision */ return 0; err = finish_automount(mnt, path); switch (err) { case -EBUSY: /* Someone else made a mount here whilst we were busy */ return 0; case 0: dput(path->dentry); if (*need_mntput) mntput(path->mnt); path->mnt = mnt; path->dentry = dget(mnt->mnt_root); *need_mntput = true; return 0; default: return err; } } /* * Handle a dentry that is managed in some way. * - Flagged for transit management (autofs) * - Flagged as mountpoint * - Flagged as automount point * * This may only be called in refwalk mode. * * Serialization is taken care of in namespace.c */ static int follow_managed(struct path *path, unsigned flags) { unsigned managed; bool need_mntput = false; int ret; /* Given that we're not holding a lock here, we retain the value in a * local variable for each dentry as we look at it so that we don't see * the components of that value change under us */ while (managed = ACCESS_ONCE(path->dentry->d_flags), managed &= DCACHE_MANAGED_DENTRY, unlikely(managed != 0)) { /* Allow the filesystem to manage the transit without i_mutex * being held. */ if (managed & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage(path->dentry, false); if (ret < 0) return ret == -EISDIR ? 0 : ret; } /* Transit to a mounted filesystem. */ if (managed & DCACHE_MOUNTED) { struct vfsmount *mounted = lookup_mnt(path); if (mounted) { dput(path->dentry); if (need_mntput) mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); need_mntput = true; continue; } /* Something is mounted on this dentry in another * namespace and/or whatever was mounted there in this * namespace got unmounted before we managed to get the * vfsmount_lock */ } /* Handle an automount point */ if (managed & DCACHE_NEED_AUTOMOUNT) { ret = follow_automount(path, flags, &need_mntput); if (ret < 0) return ret == -EISDIR ? 0 : ret; continue; } /* We didn't change the current path point */ break; } return 0; } int follow_down_one(struct path *path) { struct vfsmount *mounted; mounted = lookup_mnt(path); if (mounted) { dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); return 1; } return 0; } static inline bool managed_dentry_might_block(struct dentry *dentry) { return (dentry->d_flags & DCACHE_MANAGE_TRANSIT && dentry->d_op->d_manage(dentry, true) < 0); } /* * Skip to top of mountpoint pile in rcuwalk mode. We abort the rcu-walk if we * meet a managed dentry and we're not walking to "..". True is returned to * continue, false to abort. */ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, struct inode **inode, bool reverse_transit) { for (;;) { struct vfsmount *mounted; /* * Don't forget we might have a non-mountpoint managed dentry * that wants to block transit. */ if (!reverse_transit && unlikely(managed_dentry_might_block(path->dentry))) return false; if (!d_mountpoint(path->dentry)) break; mounted = __lookup_mnt(path->mnt, path->dentry, 1); if (!mounted) break; path->mnt = mounted; path->dentry = mounted->mnt_root; nd->seq = read_seqcount_begin(&path->dentry->d_seq); /* * Update the inode too. We don't need to re-check the * dentry sequence number here after this d_inode read, * because a mount-point is always pinned. */ *inode = path->dentry->d_inode; } if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT)) return reverse_transit; return true; } static int follow_dotdot_rcu(struct nameidata *nd) { struct inode *inode = nd->inode; set_root_rcu(nd); while (1) { if (nd->path.dentry == nd->root.dentry && nd->path.mnt == nd->root.mnt) { break; } if (nd->path.dentry != nd->path.mnt->mnt_root) { struct dentry *old = nd->path.dentry; struct dentry *parent = old->d_parent; unsigned seq; seq = read_seqcount_begin(&parent->d_seq); if (read_seqcount_retry(&old->d_seq, nd->seq)) goto failed; inode = parent->d_inode; nd->path.dentry = parent; nd->seq = seq; break; } if (!follow_up_rcu(&nd->path)) break; nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); inode = nd->path.dentry->d_inode; } __follow_mount_rcu(nd, &nd->path, &inode, true); nd->inode = inode; return 0; failed: nd->flags &= ~LOOKUP_RCU; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); br_read_unlock(vfsmount_lock); return -ECHILD; } /* * Follow down to the covering mount currently visible to userspace. At each * point, the filesystem owning that dentry may be queried as to whether the * caller is permitted to proceed or not. * * Care must be taken as namespace_sem may be held (indicated by mounting_here * being true). */ int follow_down(struct path *path) { unsigned managed; int ret; while (managed = ACCESS_ONCE(path->dentry->d_flags), unlikely(managed & DCACHE_MANAGED_DENTRY)) { /* Allow the filesystem to manage the transit without i_mutex * being held. * * We indicate to the filesystem if someone is trying to mount * something here. This gives autofs the chance to deny anyone * other than its daemon the right to mount on its * superstructure. * * The filesystem may sleep at this point. */ if (managed & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage( path->dentry, false); if (ret < 0) return ret == -EISDIR ? 0 : ret; } /* Transit to a mounted filesystem. */ if (managed & DCACHE_MOUNTED) { struct vfsmount *mounted = lookup_mnt(path); if (!mounted) break; dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); continue; } /* Don't handle automount points here */ break; } return 0; } /* * Skip to top of mountpoint pile in refwalk mode for follow_dotdot() */ static void follow_mount(struct path *path) { while (d_mountpoint(path->dentry)) { struct vfsmount *mounted = lookup_mnt(path); if (!mounted) break; dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); } } static void follow_dotdot(struct nameidata *nd) { set_root(nd); while(1) { struct dentry *old = nd->path.dentry; if (nd->path.dentry == nd->root.dentry && nd->path.mnt == nd->root.mnt) { break; } if (nd->path.dentry != nd->path.mnt->mnt_root) { /* rare case of legitimate dget_parent()... */ nd->path.dentry = dget_parent(nd->path.dentry); dput(old); break; } if (!follow_up(&nd->path)) break; } follow_mount(&nd->path); nd->inode = nd->path.dentry->d_inode; } /* * Allocate a dentry with name and parent, and perform a parent * directory ->lookup on it. Returns the new dentry, or ERR_PTR * on error. parent->d_inode->i_mutex must be held. d_lookup must * have verified that no child exists while under i_mutex. */ static struct dentry *d_alloc_and_lookup(struct dentry *parent, struct qstr *name, struct nameidata *nd) { struct inode *inode = parent->d_inode; struct dentry *dentry; struct dentry *old; /* Don't create child dentry for a dead directory. */ if (unlikely(IS_DEADDIR(inode))) return ERR_PTR(-ENOENT); dentry = d_alloc(parent, name); if (unlikely(!dentry)) return ERR_PTR(-ENOMEM); old = inode->i_op->lookup(inode, dentry, nd); if (unlikely(old)) { dput(dentry); dentry = old; } return dentry; } /* * It's more convoluted than I'd like it to be, but... it's still fairly * small and for now I'd prefer to have fast path as straight as possible. * It _is_ time-critical. */ static int do_lookup(struct nameidata *nd, struct qstr *name, struct path *path, struct inode **inode) { struct vfsmount *mnt = nd->path.mnt; struct dentry *dentry, *parent = nd->path.dentry; int need_reval = 1; int status = 1; int err; /* * Rename seqlock is not required here because in the off chance * of a false negative due to a concurrent rename, we're going to * do the non-racy lookup, below. */ if (nd->flags & LOOKUP_RCU) { unsigned seq; *inode = nd->inode; dentry = __d_lookup_rcu(parent, name, &seq, inode); if (!dentry) goto unlazy; /* Memory barrier in read_seqcount_begin of child is enough */ if (__read_seqcount_retry(&parent->d_seq, nd->seq)) return -ECHILD; nd->seq = seq; if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { status = d_revalidate(dentry, nd); if (unlikely(status <= 0)) { if (status != -ECHILD) need_reval = 0; goto unlazy; } } path->mnt = mnt; path->dentry = dentry; if (likely(__follow_mount_rcu(nd, path, inode, false))) return 0; unlazy: if (dentry) { if (nameidata_dentry_drop_rcu(nd, dentry)) return -ECHILD; } else { if (nameidata_drop_rcu(nd)) return -ECHILD; } } else { dentry = __d_lookup(parent, name); } retry: if (unlikely(!dentry)) { struct inode *dir = parent->d_inode; BUG_ON(nd->inode != dir); mutex_lock(&dir->i_mutex); dentry = d_lookup(parent, name); if (likely(!dentry)) { dentry = d_alloc_and_lookup(parent, name, nd); if (IS_ERR(dentry)) { mutex_unlock(&dir->i_mutex); return PTR_ERR(dentry); } /* known good */ need_reval = 0; status = 1; } mutex_unlock(&dir->i_mutex); } if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval) status = d_revalidate(dentry, nd); if (unlikely(status <= 0)) { if (status < 0) { dput(dentry); return status; } if (!d_invalidate(dentry)) { dput(dentry); dentry = NULL; need_reval = 1; goto retry; } } path->mnt = mnt; path->dentry = dentry; err = follow_managed(path, nd->flags); if (unlikely(err < 0)) { path_put_conditional(path, nd); return err; } *inode = path->dentry->d_inode; return 0; } static inline int may_lookup(struct nameidata *nd) { if (nd->flags & LOOKUP_RCU) { int err = exec_permission(nd->inode, IPERM_FLAG_RCU); if (err != -ECHILD) return err; if (nameidata_drop_rcu(nd)) return -ECHILD; } return exec_permission(nd->inode, 0); } static inline int handle_dots(struct nameidata *nd, int type) { if (type == LAST_DOTDOT) { if (nd->flags & LOOKUP_RCU) { if (follow_dotdot_rcu(nd)) return -ECHILD; } else follow_dotdot(nd); } return 0; } static void terminate_walk(struct nameidata *nd) { if (!(nd->flags & LOOKUP_RCU)) { path_put(&nd->path); } else { nd->flags &= ~LOOKUP_RCU; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); br_read_unlock(vfsmount_lock); } } static inline int walk_component(struct nameidata *nd, struct path *path, struct qstr *name, int type, int follow) { struct inode *inode; int err; /* * "." and ".." are special - ".." especially so because it has * to be able to know about the current root directory and * parent relationships. */ if (unlikely(type != LAST_NORM)) return handle_dots(nd, type); err = do_lookup(nd, name, path, &inode); if (unlikely(err)) { terminate_walk(nd); return err; } if (!inode) { path_to_nameidata(path, nd); terminate_walk(nd); return -ENOENT; } if (unlikely(inode->i_op->follow_link) && follow) { if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry)) return -ECHILD; BUG_ON(inode != path->dentry->d_inode); return 1; } path_to_nameidata(path, nd); nd->inode = inode; return 0; } /* * This limits recursive symlink follows to 8, while * limiting consecutive symlinks to 40. * * Without that kind of total limit, nasty chains of consecutive * symlinks can cause almost arbitrarily long lookups. */ static inline int nested_symlink(struct path *path, struct nameidata *nd) { int res; if (unlikely(current->link_count >= MAX_NESTED_LINKS)) { path_put_conditional(path, nd); path_put(&nd->path); return -ELOOP; } BUG_ON(nd->depth >= MAX_NESTED_LINKS); nd->depth++; current->link_count++; do { struct path link = *path; void *cookie; res = follow_link(&link, nd, &cookie); if (!res) res = walk_component(nd, path, &nd->last, nd->last_type, LOOKUP_FOLLOW); put_link(nd, &link, cookie); } while (res > 0); current->link_count--; nd->depth--; return res; } /* * Name resolution. * This is the basic name resolution function, turning a pathname into * the final dentry. We expect 'base' to be positive and a directory. * * Returns 0 and nd will have valid dentry and mnt on success. * Returns error and drops reference to input namei data on failure. */ static int link_path_walk(const char *name, struct nameidata *nd) { struct path next; int err; unsigned int lookup_flags = nd->flags; while (*name=='/') name++; if (!*name) return 0; /* At this point we know we have a real path component. */ for(;;) { unsigned long hash; struct qstr this; unsigned int c; int type; nd->flags |= LOOKUP_CONTINUE; err = may_lookup(nd); if (err) break; this.name = name; c = *(const unsigned char *)name; hash = init_name_hash(); do { name++; hash = partial_name_hash(c, hash); c = *(const unsigned char *)name; } while (c && (c != '/')); this.len = name - (const char *) this.name; this.hash = end_name_hash(hash); type = LAST_NORM; if (this.name[0] == '.') switch (this.len) { case 2: if (this.name[1] == '.') { type = LAST_DOTDOT; nd->flags |= LOOKUP_JUMPED; } break; case 1: type = LAST_DOT; } if (likely(type == LAST_NORM)) { struct dentry *parent = nd->path.dentry; nd->flags &= ~LOOKUP_JUMPED; if (unlikely(parent->d_flags & DCACHE_OP_HASH)) { err = parent->d_op->d_hash(parent, nd->inode, &this); if (err < 0) break; } } /* remove trailing slashes? */ if (!c) goto last_component; while (*++name == '/'); if (!*name) goto last_component; err = walk_component(nd, &next, &this, type, LOOKUP_FOLLOW); if (err < 0) return err; if (err) { err = nested_symlink(&next, nd); if (err) return err; } err = -ENOTDIR; if (!nd->inode->i_op->lookup) break; continue; /* here ends the main loop */ last_component: /* Clear LOOKUP_CONTINUE iff it was previously unset */ nd->flags &= lookup_flags | ~LOOKUP_CONTINUE; nd->last = this; nd->last_type = type; return 0; } terminate_walk(nd); return err; } static int path_init(int dfd, const char *name, unsigned int flags, struct nameidata *nd, struct file **fp) { int retval = 0; int fput_needed; struct file *file; nd->last_type = LAST_ROOT; /* if there are only slashes... */ nd->flags = flags | LOOKUP_JUMPED; nd->depth = 0; if (flags & LOOKUP_ROOT) { struct inode *inode = nd->root.dentry->d_inode; if (*name) { if (!inode->i_op->lookup) return -ENOTDIR; retval = inode_permission(inode, MAY_EXEC); if (retval) return retval; } nd->path = nd->root; nd->inode = inode; if (flags & LOOKUP_RCU) { br_read_lock(vfsmount_lock); rcu_read_lock(); nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); } else { path_get(&nd->path); } return 0; } nd->root.mnt = NULL; if (*name=='/') { if (flags & LOOKUP_RCU) { br_read_lock(vfsmount_lock); rcu_read_lock(); set_root_rcu(nd); } else { set_root(nd); path_get(&nd->root); } nd->path = nd->root; } else if (dfd == AT_FDCWD) { if (flags & LOOKUP_RCU) { struct fs_struct *fs = current->fs; unsigned seq; br_read_lock(vfsmount_lock); rcu_read_lock(); do { seq = read_seqcount_begin(&fs->seq); nd->path = fs->pwd; nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } else { get_fs_pwd(current->fs, &nd->path); } } else { struct dentry *dentry; file = fget_raw_light(dfd, &fput_needed); retval = -EBADF; if (!file) goto out_fail; dentry = file->f_path.dentry; if (*name) { retval = -ENOTDIR; if (!S_ISDIR(dentry->d_inode->i_mode)) goto fput_fail; retval = file_permission(file, MAY_EXEC); if (retval) goto fput_fail; } nd->path = file->f_path; if (flags & LOOKUP_RCU) { if (fput_needed) *fp = file; nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); br_read_lock(vfsmount_lock); rcu_read_lock(); } else { path_get(&file->f_path); fput_light(file, fput_needed); } } nd->inode = nd->path.dentry->d_inode; return 0; fput_fail: fput_light(file, fput_needed); out_fail: return retval; } static inline int lookup_last(struct nameidata *nd, struct path *path) { if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len]) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; nd->flags &= ~LOOKUP_PARENT; return walk_component(nd, path, &nd->last, nd->last_type, nd->flags & LOOKUP_FOLLOW); } /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ static int path_lookupat(int dfd, const char *name, unsigned int flags, struct nameidata *nd) { struct file *base = NULL; struct path path; int err; /* * Path walking is largely split up into 2 different synchronisation * schemes, rcu-walk and ref-walk (explained in * Documentation/filesystems/path-lookup.txt). These share much of the * path walk code, but some things particularly setup, cleanup, and * following mounts are sufficiently divergent that functions are * duplicated. Typically there is a function foo(), and its RCU * analogue, foo_rcu(). * * -ECHILD is the error number of choice (just to avoid clashes) that * is returned if some aspect of an rcu-walk fails. Such an error must * be handled by restarting a traditional ref-walk (which will always * be able to complete). */ err = path_init(dfd, name, flags | LOOKUP_PARENT, nd, &base); if (unlikely(err)) return err; current->total_link_count = 0; err = link_path_walk(name, nd); if (!err && !(flags & LOOKUP_PARENT)) { err = lookup_last(nd, &path); while (err > 0) { void *cookie; struct path link = path; nd->flags |= LOOKUP_PARENT; err = follow_link(&link, nd, &cookie); if (!err) err = lookup_last(nd, &path); put_link(nd, &link, cookie); } } if (nd->flags & LOOKUP_RCU) { /* went all way through without dropping RCU */ BUG_ON(err); if (nameidata_drop_rcu_last(nd)) err = -ECHILD; } if (!err) { err = handle_reval_path(nd); if (err) path_put(&nd->path); } if (!err && nd->flags & LOOKUP_DIRECTORY) { if (!nd->inode->i_op->lookup) { path_put(&nd->path); err = -ENOTDIR; } } if (base) fput(base); if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { path_put(&nd->root); nd->root.mnt = NULL; } return err; } static int do_path_lookup(int dfd, const char *name, unsigned int flags, struct nameidata *nd) { int retval = path_lookupat(dfd, name, flags | LOOKUP_RCU, nd); if (unlikely(retval == -ECHILD)) retval = path_lookupat(dfd, name, flags, nd); if (unlikely(retval == -ESTALE)) retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd); if (likely(!retval)) { if (unlikely(!audit_dummy_context())) { if (nd->path.dentry && nd->inode) audit_inode(name, nd->path.dentry); } } return retval; } int kern_path_parent(const char *name, struct nameidata *nd) { return do_path_lookup(AT_FDCWD, name, LOOKUP_PARENT, nd); } int kern_path(const char *name, unsigned int flags, struct path *path) { struct nameidata nd; int res = do_path_lookup(AT_FDCWD, name, flags, &nd); if (!res) *path = nd.path; return res; } /** * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair * @dentry: pointer to dentry of the base directory * @mnt: pointer to vfs mount of the base directory * @name: pointer to file name * @flags: lookup flags * @nd: pointer to nameidata */ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, const char *name, unsigned int flags, struct nameidata *nd) { nd->root.dentry = dentry; nd->root.mnt = mnt; /* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */ return do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, nd); } static struct dentry *__lookup_hash(struct qstr *name, struct dentry *base, struct nameidata *nd) { struct inode *inode = base->d_inode; struct dentry *dentry; int err; err = exec_permission(inode, 0); if (err) return ERR_PTR(err); /* * Don't bother with __d_lookup: callers are for creat as * well as unlink, so a lot of the time it would cost * a double lookup. */ dentry = d_lookup(base, name); if (dentry && (dentry->d_flags & DCACHE_OP_REVALIDATE)) dentry = do_revalidate(dentry, nd); if (!dentry) dentry = d_alloc_and_lookup(base, name, nd); return dentry; } /* * Restricted form of lookup. Doesn't follow links, single-component only, * needs parent already locked. Doesn't follow mounts. * SMP-safe. */ static struct dentry *lookup_hash(struct nameidata *nd) { return __lookup_hash(&nd->last, nd->path.dentry, nd); } /** * lookup_one_len - filesystem helper to lookup single pathname component * @name: pathname component to lookup * @base: base directory to lookup from * @len: maximum length @len should be interpreted to * * Note that this routine is purely a helper for filesystem usage and should * not be called by generic code. Also note that by using this function the * nameidata argument is passed to the filesystem methods and a filesystem * using this helper needs to be prepared for that. */ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) { struct qstr this; unsigned long hash; unsigned int c; WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex)); this.name = name; this.len = len; if (!len) return ERR_PTR(-EACCES); hash = init_name_hash(); while (len--) { c = *(const unsigned char *)name++; if (c == '/' || c == '\0') return ERR_PTR(-EACCES); hash = partial_name_hash(c, hash); } this.hash = end_name_hash(hash); /* * See if the low-level filesystem might want * to use its own hash.. */ if (base->d_flags & DCACHE_OP_HASH) { int err = base->d_op->d_hash(base, base->d_inode, &this); if (err < 0) return ERR_PTR(err); } return __lookup_hash(&this, base, NULL); } int user_path_at(int dfd, const char __user *name, unsigned flags, struct path *path) { struct nameidata nd; char *tmp = getname_flags(name, flags); int err = PTR_ERR(tmp); if (!IS_ERR(tmp)) { BUG_ON(flags & LOOKUP_PARENT); err = do_path_lookup(dfd, tmp, flags, &nd); putname(tmp); if (!err) *path = nd.path; } return err; } static int user_path_parent(int dfd, const char __user *path, struct nameidata *nd, char **name) { char *s = getname(path); int error; if (IS_ERR(s)) return PTR_ERR(s); error = do_path_lookup(dfd, s, LOOKUP_PARENT, nd); if (error) putname(s); else *name = s; return error; } /* * It's inline, so penalty for filesystems that don't use sticky bit is * minimal. */ static inline int check_sticky(struct inode *dir, struct inode *inode) { uid_t fsuid = current_fsuid(); if (!(dir->i_mode & S_ISVTX)) return 0; if (current_user_ns() != inode_userns(inode)) goto other_userns; if (inode->i_uid == fsuid) return 0; if (dir->i_uid == fsuid) return 0; other_userns: return !ns_capable(inode_userns(inode), CAP_FOWNER); } /* * Check whether we can remove a link victim from directory dir, check * whether the type of victim is right. * 1. We can't do it if dir is read-only (done in permission()) * 2. We should have write and exec permissions on dir * 3. We can't remove anything from append-only dir * 4. We can't do anything with immutable dir (done in permission()) * 5. If the sticky bit on dir is set we should either * a. be owner of dir, or * b. be owner of victim, or * c. have CAP_FOWNER capability * 6. If the victim is append-only or immutable we can't do antyhing with * links pointing to it. * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR. * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR. * 9. We can't remove a root or mountpoint. * 10. We don't allow removal of NFS sillyrenamed files; it's handled by * nfs_async_unlink(). */ static int may_delete(struct inode *dir,struct dentry *victim,int isdir) { int error; if (!victim->d_inode) return -ENOENT; BUG_ON(victim->d_parent->d_inode != dir); audit_inode_child(victim, dir); error = inode_permission(dir, MAY_WRITE | MAY_EXEC); if (error) return error; if (IS_APPEND(dir)) return -EPERM; if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)|| IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) return -EPERM; if (isdir) { if (!S_ISDIR(victim->d_inode->i_mode)) return -ENOTDIR; if (IS_ROOT(victim)) return -EBUSY; } else if (S_ISDIR(victim->d_inode->i_mode)) return -EISDIR; if (IS_DEADDIR(dir)) return -ENOENT; if (victim->d_flags & DCACHE_NFSFS_RENAMED) return -EBUSY; return 0; } /* Check whether we can create an object with dentry child in directory * dir. * 1. We can't do it if child already exists (open has special treatment for * this case, but since we are inlined it's OK) * 2. We can't do it if dir is read-only (done in permission()) * 3. We should have write and exec permissions on dir * 4. We can't do it if dir is immutable (done in permission()) */ static inline int may_create(struct inode *dir, struct dentry *child) { if (child->d_inode) return -EEXIST; if (IS_DEADDIR(dir)) return -ENOENT; return inode_permission(dir, MAY_WRITE | MAY_EXEC); } /* * p1 and p2 should be directories on the same fs. */ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) { struct dentry *p; if (p1 == p2) { mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); return NULL; } mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex); p = d_ancestor(p2, p1); if (p) { mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD); return p; } p = d_ancestor(p1, p2); if (p) { mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); return p; } mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); return NULL; } void unlock_rename(struct dentry *p1, struct dentry *p2) { mutex_unlock(&p1->d_inode->i_mutex); if (p1 != p2) { mutex_unlock(&p2->d_inode->i_mutex); mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex); } } int vfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->create) return -EACCES; /* shouldn't it be ENOSYS? */ mode &= S_IALLUGO; mode |= S_IFREG; error = security_inode_create(dir, dentry, mode); if (error) return error; error = dir->i_op->create(dir, dentry, mode, nd); if (!error) fsnotify_create(dir, dentry); return error; } static int may_open(struct path *path, int acc_mode, int flag) { struct dentry *dentry = path->dentry; struct inode *inode = dentry->d_inode; int error; /* O_PATH? */ if (!acc_mode) return 0; if (!inode) return -ENOENT; switch (inode->i_mode & S_IFMT) { case S_IFLNK: return -ELOOP; case S_IFDIR: if (acc_mode & MAY_WRITE) return -EISDIR; break; case S_IFBLK: case S_IFCHR: if (path->mnt->mnt_flags & MNT_NODEV) return -EACCES; /*FALLTHRU*/ case S_IFIFO: case S_IFSOCK: flag &= ~O_TRUNC; break; } error = inode_permission(inode, acc_mode); if (error) return error; /* * An append-only file must be opened in append mode for writing. */ if (IS_APPEND(inode)) { if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND)) return -EPERM; if (flag & O_TRUNC) return -EPERM; } /* O_NOATIME can only be set by the owner or superuser */ if (flag & O_NOATIME && !inode_owner_or_capable(inode)) return -EPERM; /* * Ensure there are no outstanding leases on the file. */ return break_lease(inode, flag); } static int handle_truncate(struct file *filp) { struct path *path = &filp->f_path; struct inode *inode = path->dentry->d_inode; int error = get_write_access(inode); if (error) return error; /* * Refuse to truncate files with mandatory locks held on them. */ error = locks_verify_locked(inode); if (!error) error = security_path_truncate(path); if (!error) { error = do_truncate(path->dentry, 0, ATTR_MTIME|ATTR_CTIME|ATTR_OPEN, filp); } put_write_access(inode); return error; } /* * Note that while the flag value (low two bits) for sys_open means: * 00 - read-only * 01 - write-only * 10 - read-write * 11 - special * it is changed into * 00 - no permissions needed * 01 - read-permission * 10 - write-permission * 11 - read-write * for the internal routines (ie open_namei()/follow_link() etc) * This is more logical, and also allows the 00 "no perm needed" * to be used for symlinks (where the permissions are checked * later). * */ static inline int open_to_namei_flags(int flag) { if ((flag+1) & O_ACCMODE) flag++; return flag; } /* * Handle the last step of open() */ static struct file *do_last(struct nameidata *nd, struct path *path, const struct open_flags *op, const char *pathname) { struct dentry *dir = nd->path.dentry; struct dentry *dentry; int open_flag = op->open_flag; int will_truncate = open_flag & O_TRUNC; int want_write = 0; int acc_mode = op->acc_mode; struct file *filp; int error; nd->flags &= ~LOOKUP_PARENT; nd->flags |= op->intent; switch (nd->last_type) { case LAST_DOTDOT: case LAST_DOT: error = handle_dots(nd, nd->last_type); if (error) return ERR_PTR(error); /* fallthrough */ case LAST_ROOT: if (nd->flags & LOOKUP_RCU) { if (nameidata_drop_rcu_last(nd)) return ERR_PTR(-ECHILD); } error = handle_reval_path(nd); if (error) goto exit; audit_inode(pathname, nd->path.dentry); if (open_flag & O_CREAT) { error = -EISDIR; goto exit; } goto ok; case LAST_BIND: /* can't be RCU mode here */ error = handle_reval_path(nd); if (error) goto exit; audit_inode(pathname, dir); goto ok; } if (!(open_flag & O_CREAT)) { int symlink_ok = 0; if (nd->last.name[nd->last.len]) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW)) symlink_ok = 1; /* we _can_ be in RCU mode here */ error = walk_component(nd, path, &nd->last, LAST_NORM, !symlink_ok); if (error < 0) return ERR_PTR(error); if (error) /* symlink */ return NULL; /* sayonara */ if (nd->flags & LOOKUP_RCU) { if (nameidata_drop_rcu_last(nd)) return ERR_PTR(-ECHILD); } error = -ENOTDIR; if (nd->flags & LOOKUP_DIRECTORY) { if (!nd->inode->i_op->lookup) goto exit; } audit_inode(pathname, nd->path.dentry); goto ok; } /* create side of things */ if (nd->flags & LOOKUP_RCU) { if (nameidata_drop_rcu_last(nd)) return ERR_PTR(-ECHILD); } audit_inode(pathname, dir); error = -EISDIR; /* trailing slashes? */ if (nd->last.name[nd->last.len]) goto exit; mutex_lock(&dir->d_inode->i_mutex); dentry = lookup_hash(nd); error = PTR_ERR(dentry); if (IS_ERR(dentry)) { mutex_unlock(&dir->d_inode->i_mutex); goto exit; } path->dentry = dentry; path->mnt = nd->path.mnt; /* Negative dentry, just create the file */ if (!dentry->d_inode) { int mode = op->mode; if (!IS_POSIXACL(dir->d_inode)) mode &= ~current_umask(); /* * This write is needed to ensure that a * rw->ro transition does not occur between * the time when the file is created and when * a permanent write count is taken through * the 'struct file' in nameidata_to_filp(). */ error = mnt_want_write(nd->path.mnt); if (error) goto exit_mutex_unlock; want_write = 1; /* Don't check for write permission, don't truncate */ open_flag &= ~O_TRUNC; will_truncate = 0; acc_mode = MAY_OPEN; error = security_path_mknod(&nd->path, dentry, mode, 0); if (error) goto exit_mutex_unlock; error = vfs_create(dir->d_inode, dentry, mode, nd); if (error) goto exit_mutex_unlock; mutex_unlock(&dir->d_inode->i_mutex); dput(nd->path.dentry); nd->path.dentry = dentry; goto common; } /* * It already exists. */ mutex_unlock(&dir->d_inode->i_mutex); audit_inode(pathname, path->dentry); error = -EEXIST; if (open_flag & O_EXCL) goto exit_dput; error = follow_managed(path, nd->flags); if (error < 0) goto exit_dput; error = -ENOENT; if (!path->dentry->d_inode) goto exit_dput; if (path->dentry->d_inode->i_op->follow_link) return NULL; path_to_nameidata(path, nd); nd->inode = path->dentry->d_inode; error = -EISDIR; if (S_ISDIR(nd->inode->i_mode)) goto exit; ok: if (!S_ISREG(nd->inode->i_mode)) will_truncate = 0; if (will_truncate) { error = mnt_want_write(nd->path.mnt); if (error) goto exit; want_write = 1; } common: error = may_open(&nd->path, acc_mode, open_flag); if (error) goto exit; filp = nameidata_to_filp(nd); if (!IS_ERR(filp)) { error = ima_file_check(filp, op->acc_mode); if (error) { fput(filp); filp = ERR_PTR(error); } } if (!IS_ERR(filp)) { if (will_truncate) { error = handle_truncate(filp); if (error) { fput(filp); filp = ERR_PTR(error); } } } out: if (want_write) mnt_drop_write(nd->path.mnt); path_put(&nd->path); return filp; exit_mutex_unlock: mutex_unlock(&dir->d_inode->i_mutex); exit_dput: path_put_conditional(path, nd); exit: filp = ERR_PTR(error); goto out; } static struct file *path_openat(int dfd, const char *pathname, struct nameidata *nd, const struct open_flags *op, int flags) { struct file *base = NULL; struct file *filp; struct path path; int error; filp = get_empty_filp(); if (!filp) return ERR_PTR(-ENFILE); filp->f_flags = op->open_flag; nd->intent.open.file = filp; nd->intent.open.flags = open_to_namei_flags(op->open_flag); nd->intent.open.create_mode = op->mode; error = path_init(dfd, pathname, flags | LOOKUP_PARENT, nd, &base); if (unlikely(error)) goto out_filp; current->total_link_count = 0; error = link_path_walk(pathname, nd); if (unlikely(error)) goto out_filp; filp = do_last(nd, &path, op, pathname); while (unlikely(!filp)) { /* trailing symlink */ struct path link = path; void *cookie; if (!(nd->flags & LOOKUP_FOLLOW)) { path_put_conditional(&path, nd); path_put(&nd->path); filp = ERR_PTR(-ELOOP); break; } nd->flags |= LOOKUP_PARENT; nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL); error = follow_link(&link, nd, &cookie); if (unlikely(error)) filp = ERR_PTR(error); else filp = do_last(nd, &path, op, pathname); put_link(nd, &link, cookie); } out: if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) path_put(&nd->root); if (base) fput(base); release_open_intent(nd); return filp; out_filp: filp = ERR_PTR(error); goto out; } struct file *do_filp_open(int dfd, const char *pathname, const struct open_flags *op, int flags) { struct nameidata nd; struct file *filp; filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_RCU); if (unlikely(filp == ERR_PTR(-ECHILD))) filp = path_openat(dfd, pathname, &nd, op, flags); if (unlikely(filp == ERR_PTR(-ESTALE))) filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_REVAL); return filp; } struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, const char *name, const struct open_flags *op, int flags) { struct nameidata nd; struct file *file; nd.root.mnt = mnt; nd.root.dentry = dentry; flags |= LOOKUP_ROOT; if (dentry->d_inode->i_op->follow_link && op->intent & LOOKUP_OPEN) return ERR_PTR(-ELOOP); file = path_openat(-1, name, &nd, op, flags | LOOKUP_RCU); if (unlikely(file == ERR_PTR(-ECHILD))) file = path_openat(-1, name, &nd, op, flags); if (unlikely(file == ERR_PTR(-ESTALE))) file = path_openat(-1, name, &nd, op, flags | LOOKUP_REVAL); return file; } /** * lookup_create - lookup a dentry, creating it if it doesn't exist * @nd: nameidata info * @is_dir: directory flag * * Simple function to lookup and return a dentry and create it * if it doesn't exist. Is SMP-safe. * * Returns with nd->path.dentry->d_inode->i_mutex locked. */ struct dentry *lookup_create(struct nameidata *nd, int is_dir) { struct dentry *dentry = ERR_PTR(-EEXIST); mutex_lock_nested(&nd->path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); /* * Yucky last component or no last component at all? * (foo/., foo/.., /////) */ if (nd->last_type != LAST_NORM) goto fail; nd->flags &= ~LOOKUP_PARENT; nd->flags |= LOOKUP_CREATE | LOOKUP_EXCL; nd->intent.open.flags = O_EXCL; /* * Do the final lookup. */ dentry = lookup_hash(nd); if (IS_ERR(dentry)) goto fail; if (dentry->d_inode) goto eexist; /* * Special case - lookup gave negative, but... we had foo/bar/ * From the vfs_mknod() POV we just have a negative dentry - * all is fine. Let's be bastards - you had / on the end, you've * been asking for (non-existent) directory. -ENOENT for you. */ if (unlikely(!is_dir && nd->last.name[nd->last.len])) { dput(dentry); dentry = ERR_PTR(-ENOENT); } return dentry; eexist: dput(dentry); dentry = ERR_PTR(-EEXIST); fail: return dentry; } EXPORT_SYMBOL_GPL(lookup_create); int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) { int error = may_create(dir, dentry); if (error) return error; if ((S_ISCHR(mode) || S_ISBLK(mode)) && !ns_capable(inode_userns(dir), CAP_MKNOD)) return -EPERM; if (!dir->i_op->mknod) return -EPERM; error = devcgroup_inode_mknod(mode, dev); if (error) return error; error = security_inode_mknod(dir, dentry, mode, dev); if (error) return error; error = dir->i_op->mknod(dir, dentry, mode, dev); if (!error) fsnotify_create(dir, dentry); return error; } static int may_mknod(mode_t mode) { switch (mode & S_IFMT) { case S_IFREG: case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: case 0: /* zero mode translates to S_IFREG */ return 0; case S_IFDIR: return -EPERM; default: return -EINVAL; } } SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode, unsigned, dev) { int error; char *tmp; struct dentry *dentry; struct nameidata nd; if (S_ISDIR(mode)) return -EPERM; error = user_path_parent(dfd, filename, &nd, &tmp); if (error) return error; dentry = lookup_create(&nd, 0); if (IS_ERR(dentry)) { error = PTR_ERR(dentry); goto out_unlock; } if (!IS_POSIXACL(nd.path.dentry->d_inode)) mode &= ~current_umask(); error = may_mknod(mode); if (error) goto out_dput; error = mnt_want_write(nd.path.mnt); if (error) goto out_dput; error = security_path_mknod(&nd.path, dentry, mode, dev); if (error) goto out_drop_write; switch (mode & S_IFMT) { case 0: case S_IFREG: error = vfs_create(nd.path.dentry->d_inode,dentry,mode,&nd); break; case S_IFCHR: case S_IFBLK: error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode, new_decode_dev(dev)); break; case S_IFIFO: case S_IFSOCK: error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode,0); break; } out_drop_write: mnt_drop_write(nd.path.mnt); out_dput: dput(dentry); out_unlock: mutex_unlock(&nd.path.dentry->d_inode->i_mutex); path_put(&nd.path); putname(tmp); return error; } SYSCALL_DEFINE3(mknod, const char __user *, filename, int, mode, unsigned, dev) { return sys_mknodat(AT_FDCWD, filename, mode, dev); } int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->mkdir) return -EPERM; mode &= (S_IRWXUGO|S_ISVTX); error = security_inode_mkdir(dir, dentry, mode); if (error) return error; error = dir->i_op->mkdir(dir, dentry, mode); if (!error) fsnotify_mkdir(dir, dentry); return error; } SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode) { int error = 0; char * tmp; struct dentry *dentry; struct nameidata nd; error = user_path_parent(dfd, pathname, &nd, &tmp); if (error) goto out_err; dentry = lookup_create(&nd, 1); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out_unlock; if (!IS_POSIXACL(nd.path.dentry->d_inode)) mode &= ~current_umask(); error = mnt_want_write(nd.path.mnt); if (error) goto out_dput; error = security_path_mkdir(&nd.path, dentry, mode); if (error) goto out_drop_write; error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode); out_drop_write: mnt_drop_write(nd.path.mnt); out_dput: dput(dentry); out_unlock: mutex_unlock(&nd.path.dentry->d_inode->i_mutex); path_put(&nd.path); putname(tmp); out_err: return error; } SYSCALL_DEFINE2(mkdir, const char __user *, pathname, int, mode) { return sys_mkdirat(AT_FDCWD, pathname, mode); } /* * We try to drop the dentry early: we should have * a usage count of 2 if we're the only user of this * dentry, and if that is true (possibly after pruning * the dcache), then we drop the dentry now. * * A low-level filesystem can, if it choses, legally * do a * * if (!d_unhashed(dentry)) * return -EBUSY; * * if it cannot handle the case of removing a directory * that is still in use by something else.. */ void dentry_unhash(struct dentry *dentry) { dget(dentry); shrink_dcache_parent(dentry); spin_lock(&dentry->d_lock); if (dentry->d_count == 2) __d_drop(dentry); spin_unlock(&dentry->d_lock); } int vfs_rmdir(struct inode *dir, struct dentry *dentry) { int error = may_delete(dir, dentry, 1); if (error) return error; if (!dir->i_op->rmdir) return -EPERM; mutex_lock(&dentry->d_inode->i_mutex); dentry_unhash(dentry); if (d_mountpoint(dentry)) error = -EBUSY; else { error = security_inode_rmdir(dir, dentry); if (!error) { error = dir->i_op->rmdir(dir, dentry); if (!error) { dentry->d_inode->i_flags |= S_DEAD; dont_mount(dentry); } } } mutex_unlock(&dentry->d_inode->i_mutex); if (!error) { d_delete(dentry); } dput(dentry); return error; } static long do_rmdir(int dfd, const char __user *pathname) { int error = 0; char * name; struct dentry *dentry; struct nameidata nd; error = user_path_parent(dfd, pathname, &nd, &name); if (error) return error; switch(nd.last_type) { case LAST_DOTDOT: error = -ENOTEMPTY; goto exit1; case LAST_DOT: error = -EINVAL; goto exit1; case LAST_ROOT: error = -EBUSY; goto exit1; } nd.flags &= ~LOOKUP_PARENT; mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_hash(&nd); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto exit2; error = mnt_want_write(nd.path.mnt); if (error) goto exit3; error = security_path_rmdir(&nd.path, dentry); if (error) goto exit4; error = vfs_rmdir(nd.path.dentry->d_inode, dentry); exit4: mnt_drop_write(nd.path.mnt); exit3: dput(dentry); exit2: mutex_unlock(&nd.path.dentry->d_inode->i_mutex); exit1: path_put(&nd.path); putname(name); return error; } SYSCALL_DEFINE1(rmdir, const char __user *, pathname) { return do_rmdir(AT_FDCWD, pathname); } int vfs_unlink(struct inode *dir, struct dentry *dentry) { int error = may_delete(dir, dentry, 0); if (error) return error; if (!dir->i_op->unlink) return -EPERM; mutex_lock(&dentry->d_inode->i_mutex); if (d_mountpoint(dentry)) error = -EBUSY; else { error = security_inode_unlink(dir, dentry); if (!error) { error = dir->i_op->unlink(dir, dentry); if (!error) dont_mount(dentry); } } mutex_unlock(&dentry->d_inode->i_mutex); /* We don't d_delete() NFS sillyrenamed files--they still exist. */ if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { fsnotify_link_count(dentry->d_inode); d_delete(dentry); } return error; } /* * Make sure that the actual truncation of the file will occur outside its * directory's i_mutex. Truncate can take a long time if there is a lot of * writeout happening, and we don't want to prevent access to the directory * while waiting on the I/O. */ static long do_unlinkat(int dfd, const char __user *pathname) { int error; char *name; struct dentry *dentry; struct nameidata nd; struct inode *inode = NULL; error = user_path_parent(dfd, pathname, &nd, &name); if (error) return error; error = -EISDIR; if (nd.last_type != LAST_NORM) goto exit1; nd.flags &= ~LOOKUP_PARENT; mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_hash(&nd); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { /* Why not before? Because we want correct error value */ if (nd.last.name[nd.last.len]) goto slashes; inode = dentry->d_inode; if (inode) ihold(inode); error = mnt_want_write(nd.path.mnt); if (error) goto exit2; error = security_path_unlink(&nd.path, dentry); if (error) goto exit3; error = vfs_unlink(nd.path.dentry->d_inode, dentry); exit3: mnt_drop_write(nd.path.mnt); exit2: dput(dentry); } mutex_unlock(&nd.path.dentry->d_inode->i_mutex); if (inode) iput(inode); /* truncate the inode here */ exit1: path_put(&nd.path); putname(name); return error; slashes: error = !dentry->d_inode ? -ENOENT : S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR; goto exit2; } SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag) { if ((flag & ~AT_REMOVEDIR) != 0) return -EINVAL; if (flag & AT_REMOVEDIR) return do_rmdir(dfd, pathname); return do_unlinkat(dfd, pathname); } SYSCALL_DEFINE1(unlink, const char __user *, pathname) { return do_unlinkat(AT_FDCWD, pathname); } int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->symlink) return -EPERM; error = security_inode_symlink(dir, dentry, oldname); if (error) return error; error = dir->i_op->symlink(dir, dentry, oldname); if (!error) fsnotify_create(dir, dentry); return error; } SYSCALL_DEFINE3(symlinkat, const char __user *, oldname, int, newdfd, const char __user *, newname) { int error; char *from; char *to; struct dentry *dentry; struct nameidata nd; from = getname(oldname); if (IS_ERR(from)) return PTR_ERR(from); error = user_path_parent(newdfd, newname, &nd, &to); if (error) goto out_putname; dentry = lookup_create(&nd, 0); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out_unlock; error = mnt_want_write(nd.path.mnt); if (error) goto out_dput; error = security_path_symlink(&nd.path, dentry, from); if (error) goto out_drop_write; error = vfs_symlink(nd.path.dentry->d_inode, dentry, from); out_drop_write: mnt_drop_write(nd.path.mnt); out_dput: dput(dentry); out_unlock: mutex_unlock(&nd.path.dentry->d_inode->i_mutex); path_put(&nd.path); putname(to); out_putname: putname(from); return error; } SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname) { return sys_symlinkat(oldname, AT_FDCWD, newname); } int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { struct inode *inode = old_dentry->d_inode; int error; if (!inode) return -ENOENT; error = may_create(dir, new_dentry); if (error) return error; if (dir->i_sb != inode->i_sb) return -EXDEV; /* * A link to an append-only or immutable file cannot be created. */ if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; if (!dir->i_op->link) return -EPERM; if (S_ISDIR(inode->i_mode)) return -EPERM; error = security_inode_link(old_dentry, dir, new_dentry); if (error) return error; mutex_lock(&inode->i_mutex); /* Make sure we don't allow creating hardlink to an unlinked file */ if (inode->i_nlink == 0) error = -ENOENT; else error = dir->i_op->link(old_dentry, dir, new_dentry); mutex_unlock(&inode->i_mutex); if (!error) fsnotify_link(dir, inode, new_dentry); return error; } /* * Hardlinks are often used in delicate situations. We avoid * security-related surprises by not following symlinks on the * newname. --KAB * * We don't follow them on the oldname either to be compatible * with linux 2.0, and to avoid hard-linking to directories * and other special files. --ADM */ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, int, flags) { struct dentry *new_dentry; struct nameidata nd; struct path old_path; int how = 0; int error; char *to; if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) return -EINVAL; /* * To use null names we require CAP_DAC_READ_SEARCH * This ensures that not everyone will be able to create * handlink using the passed filedescriptor. */ if (flags & AT_EMPTY_PATH) { if (!capable(CAP_DAC_READ_SEARCH)) return -ENOENT; how = LOOKUP_EMPTY; } if (flags & AT_SYMLINK_FOLLOW) how |= LOOKUP_FOLLOW; error = user_path_at(olddfd, oldname, how, &old_path); if (error) return error; error = user_path_parent(newdfd, newname, &nd, &to); if (error) goto out; error = -EXDEV; if (old_path.mnt != nd.path.mnt) goto out_release; new_dentry = lookup_create(&nd, 0); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) goto out_unlock; error = mnt_want_write(nd.path.mnt); if (error) goto out_dput; error = security_path_link(old_path.dentry, &nd.path, new_dentry); if (error) goto out_drop_write; error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry); out_drop_write: mnt_drop_write(nd.path.mnt); out_dput: dput(new_dentry); out_unlock: mutex_unlock(&nd.path.dentry->d_inode->i_mutex); out_release: path_put(&nd.path); putname(to); out: path_put(&old_path); return error; } SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname) { return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0); } /* * The worst of all namespace operations - renaming directory. "Perverted" * doesn't even start to describe it. Somebody in UCB had a heck of a trip... * Problems: * a) we can get into loop creation. Check is done in is_subdir(). * b) race potential - two innocent renames can create a loop together. * That's where 4.4 screws up. Current fix: serialization on * sb->s_vfs_rename_mutex. We might be more accurate, but that's another * story. * c) we have to lock _three_ objects - parents and victim (if it exists). * And that - after we got ->i_mutex on parents (until then we don't know * whether the target exists). Solution: try to be smart with locking * order for inodes. We rely on the fact that tree topology may change * only under ->s_vfs_rename_mutex _and_ that parent of the object we * move will be locked. Thus we can rank directories by the tree * (ancestors first) and rank all non-directories after them. * That works since everybody except rename does "lock parent, lookup, * lock child" and rename is under ->s_vfs_rename_mutex. * HOWEVER, it relies on the assumption that any object with ->lookup() * has no more than 1 dentry. If "hybrid" objects will ever appear, * we'd better make sure that there's no link(2) for them. * d) some filesystems don't support opened-but-unlinked directories, * either because of layout or because they are not ready to deal with * all cases correctly. The latter will be fixed (taking this sort of * stuff into VFS), but the former is not going away. Solution: the same * trick as in rmdir(). * e) conversion from fhandle to dentry may come in the wrong moment - when * we are removing the target. Solution: we will have to grab ->i_mutex * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on * ->i_mutex on parents, which works but leads to some truly excessive * locking]. */ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { int error = 0; struct inode *target; /* * If we are going to change the parent - check write permissions, * we'll need to flip '..'. */ if (new_dir != old_dir) { error = inode_permission(old_dentry->d_inode, MAY_WRITE); if (error) return error; } error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry); if (error) return error; target = new_dentry->d_inode; if (target) mutex_lock(&target->i_mutex); if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry)) error = -EBUSY; else { if (target) dentry_unhash(new_dentry); error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); } if (target) { if (!error) { target->i_flags |= S_DEAD; dont_mount(new_dentry); } mutex_unlock(&target->i_mutex); if (d_unhashed(new_dentry)) d_rehash(new_dentry); dput(new_dentry); } if (!error) if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) d_move(old_dentry,new_dentry); return error; } static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *target; int error; error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry); if (error) return error; dget(new_dentry); target = new_dentry->d_inode; if (target) mutex_lock(&target->i_mutex); if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry)) error = -EBUSY; else error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); if (!error) { if (target) dont_mount(new_dentry); if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) d_move(old_dentry, new_dentry); } if (target) mutex_unlock(&target->i_mutex); dput(new_dentry); return error; } int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { int error; int is_dir = S_ISDIR(old_dentry->d_inode->i_mode); const unsigned char *old_name; if (old_dentry->d_inode == new_dentry->d_inode) return 0; error = may_delete(old_dir, old_dentry, is_dir); if (error) return error; if (!new_dentry->d_inode) error = may_create(new_dir, new_dentry); else error = may_delete(new_dir, new_dentry, is_dir); if (error) return error; if (!old_dir->i_op->rename) return -EPERM; old_name = fsnotify_oldname_init(old_dentry->d_name.name); if (is_dir) error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry); else error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry); if (!error) fsnotify_move(old_dir, new_dir, old_name, is_dir, new_dentry->d_inode, old_dentry); fsnotify_oldname_free(old_name); return error; } SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname) { struct dentry *old_dir, *new_dir; struct dentry *old_dentry, *new_dentry; struct dentry *trap; struct nameidata oldnd, newnd; char *from; char *to; int error; error = user_path_parent(olddfd, oldname, &oldnd, &from); if (error) goto exit; error = user_path_parent(newdfd, newname, &newnd, &to); if (error) goto exit1; error = -EXDEV; if (oldnd.path.mnt != newnd.path.mnt) goto exit2; old_dir = oldnd.path.dentry; error = -EBUSY; if (oldnd.last_type != LAST_NORM) goto exit2; new_dir = newnd.path.dentry; if (newnd.last_type != LAST_NORM) goto exit2; oldnd.flags &= ~LOOKUP_PARENT; newnd.flags &= ~LOOKUP_PARENT; newnd.flags |= LOOKUP_RENAME_TARGET; trap = lock_rename(new_dir, old_dir); old_dentry = lookup_hash(&oldnd); error = PTR_ERR(old_dentry); if (IS_ERR(old_dentry)) goto exit3; /* source must exist */ error = -ENOENT; if (!old_dentry->d_inode) goto exit4; /* unless the source is a directory trailing slashes give -ENOTDIR */ if (!S_ISDIR(old_dentry->d_inode->i_mode)) { error = -ENOTDIR; if (oldnd.last.name[oldnd.last.len]) goto exit4; if (newnd.last.name[newnd.last.len]) goto exit4; } /* source should not be ancestor of target */ error = -EINVAL; if (old_dentry == trap) goto exit4; new_dentry = lookup_hash(&newnd); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) goto exit4; /* target should not be an ancestor of source */ error = -ENOTEMPTY; if (new_dentry == trap) goto exit5; error = mnt_want_write(oldnd.path.mnt); if (error) goto exit5; error = security_path_rename(&oldnd.path, old_dentry, &newnd.path, new_dentry); if (error) goto exit6; error = vfs_rename(old_dir->d_inode, old_dentry, new_dir->d_inode, new_dentry); exit6: mnt_drop_write(oldnd.path.mnt); exit5: dput(new_dentry); exit4: dput(old_dentry); exit3: unlock_rename(new_dir, old_dir); exit2: path_put(&newnd.path); putname(to); exit1: path_put(&oldnd.path); putname(from); exit: return error; } SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname) { return sys_renameat(AT_FDCWD, oldname, AT_FDCWD, newname); } int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link) { int len; len = PTR_ERR(link); if (IS_ERR(link)) goto out; len = strlen(link); if (len > (unsigned) buflen) len = buflen; if (copy_to_user(buffer, link, len)) len = -EFAULT; out: return len; } /* * A helper for ->readlink(). This should be used *ONLY* for symlinks that * have ->follow_link() touching nd only in nd_set_link(). Using (or not * using) it for any given inode is up to filesystem. */ int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen) { struct nameidata nd; void *cookie; int res; nd.depth = 0; cookie = dentry->d_inode->i_op->follow_link(dentry, &nd); if (IS_ERR(cookie)) return PTR_ERR(cookie); res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd)); if (dentry->d_inode->i_op->put_link) dentry->d_inode->i_op->put_link(dentry, &nd, cookie); return res; } int vfs_follow_link(struct nameidata *nd, const char *link) { return __vfs_follow_link(nd, link); } /* get the link contents into pagecache */ static char *page_getlink(struct dentry * dentry, struct page **ppage) { char *kaddr; struct page *page; struct address_space *mapping = dentry->d_inode->i_mapping; page = read_mapping_page(mapping, 0, NULL); if (IS_ERR(page)) return (char*)page; *ppage = page; kaddr = kmap(page); nd_terminate_link(kaddr, dentry->d_inode->i_size, PAGE_SIZE - 1); return kaddr; } int page_readlink(struct dentry *dentry, char __user *buffer, int buflen) { struct page *page = NULL; char *s = page_getlink(dentry, &page); int res = vfs_readlink(dentry,buffer,buflen,s); if (page) { kunmap(page); page_cache_release(page); } return res; } void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd) { struct page *page = NULL; nd_set_link(nd, page_getlink(dentry, &page)); return page; } void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { struct page *page = cookie; if (page) { kunmap(page); page_cache_release(page); } } /* * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS */ int __page_symlink(struct inode *inode, const char *symname, int len, int nofs) { struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata; int err; char *kaddr; unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE; if (nofs) flags |= AOP_FLAG_NOFS; retry: err = pagecache_write_begin(NULL, mapping, 0, len-1, flags, &page, &fsdata); if (err) goto fail; kaddr = kmap_atomic(page, KM_USER0); memcpy(kaddr, symname, len-1); kunmap_atomic(kaddr, KM_USER0); err = pagecache_write_end(NULL, mapping, 0, len-1, len-1, page, fsdata); if (err < 0) goto fail; if (err < len-1) goto retry; mark_inode_dirty(inode); return 0; fail: return err; } int page_symlink(struct inode *inode, const char *symname, int len) { return __page_symlink(inode, symname, len, !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS)); } const struct inode_operations page_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, }; EXPORT_SYMBOL(user_path_at); EXPORT_SYMBOL(follow_down_one); EXPORT_SYMBOL(follow_down); EXPORT_SYMBOL(follow_up); EXPORT_SYMBOL(get_write_access); /* binfmt_aout */ EXPORT_SYMBOL(getname); EXPORT_SYMBOL(lock_rename); EXPORT_SYMBOL(lookup_one_len); EXPORT_SYMBOL(page_follow_link_light); EXPORT_SYMBOL(page_put_link); EXPORT_SYMBOL(page_readlink); EXPORT_SYMBOL(__page_symlink); EXPORT_SYMBOL(page_symlink); EXPORT_SYMBOL(page_symlink_inode_operations); EXPORT_SYMBOL(kern_path_parent); EXPORT_SYMBOL(kern_path); EXPORT_SYMBOL(vfs_path_lookup); EXPORT_SYMBOL(inode_permission); EXPORT_SYMBOL(file_permission); EXPORT_SYMBOL(unlock_rename); EXPORT_SYMBOL(vfs_create); EXPORT_SYMBOL(vfs_follow_link); EXPORT_SYMBOL(vfs_link); EXPORT_SYMBOL(vfs_mkdir); EXPORT_SYMBOL(vfs_mknod); EXPORT_SYMBOL(generic_permission); EXPORT_SYMBOL(vfs_readlink); EXPORT_SYMBOL(vfs_rename); EXPORT_SYMBOL(vfs_rmdir); EXPORT_SYMBOL(vfs_symlink); EXPORT_SYMBOL(vfs_unlink); EXPORT_SYMBOL(dentry_unhash); EXPORT_SYMBOL(generic_readlink);
gpl-2.0
libcg/android_kernel_samsung_exynos4
drivers/usb/gadget/u_serial.c
122
34279
/* * u_serial.c - utilities for USB gadget "serial port"/TTY support * * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) * Copyright (C) 2008 David Brownell * Copyright (C) 2008 by Nokia Corporation * * This code also borrows from usbserial.c, which is * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2000 Peter Berger (pberger@brimson.com) * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com) * * This software is distributed under the terms of the GNU General * Public License ("GPL") as published by the Free Software Foundation, * either version 2 of that License or (at your option) any later version. */ /* #define VERBOSE_DEBUG */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/slab.h> #include <linux/export.h> #include "u_serial.h" /* * This component encapsulates the TTY layer glue needed to provide basic * "serial port" functionality through the USB gadget stack. Each such * port is exposed through a /dev/ttyGS* node. * * After initialization (gserial_setup), these TTY port devices stay * available until they are removed (gserial_cleanup). Each one may be * connected to a USB function (gserial_connect), or disconnected (with * gserial_disconnect) when the USB host issues a config change event. * Data can only flow when the port is connected to the host. * * A given TTY port can be made available in multiple configurations. * For example, each one might expose a ttyGS0 node which provides a * login application. In one case that might use CDC ACM interface 0, * while another configuration might use interface 3 for that. The * work to handle that (including descriptor management) is not part * of this component. * * Configurations may expose more than one TTY port. For example, if * ttyGS0 provides login service, then ttyGS1 might provide dialer access * for a telephone or fax link. And ttyGS2 might be something that just * needs a simple byte stream interface for some messaging protocol that * is managed in userspace ... OBEX, PTP, and MTP have been mentioned. */ #define PREFIX "ttyGS" /* * gserial is the lifecycle interface, used by USB functions * gs_port is the I/O nexus, used by the tty driver * tty_struct links to the tty/filesystem framework * * gserial <---> gs_port ... links will be null when the USB link is * inactive; managed by gserial_{connect,disconnect}(). each gserial * instance can wrap its own USB control protocol. * gserial->ioport == usb_ep->driver_data ... gs_port * gs_port->port_usb ... gserial * * gs_port <---> tty_struct ... links will be null when the TTY file * isn't opened; managed by gs_open()/gs_close() * gserial->port_tty ... tty_struct * tty_struct->driver_data ... gserial */ /* RX and TX queues can buffer QUEUE_SIZE packets before they hit the * next layer of buffering. For TX that's a circular buffer; for RX * consider it a NOP. A third layer is provided by the TTY code. */ #define QUEUE_SIZE 16 #define WRITE_BUF_SIZE 8192 /* TX only */ /* circular buffer */ struct gs_buf { unsigned buf_size; char *buf_buf; char *buf_get; char *buf_put; }; /* * The port structure holds info for each port, one for each minor number * (and thus for each /dev/ node). */ struct gs_port { struct tty_port port; spinlock_t port_lock; /* guard port_* access */ struct gserial *port_usb; bool openclose; /* open/close in progress */ u8 port_num; struct list_head read_pool; int read_started; int read_allocated; struct list_head read_queue; unsigned n_read; struct tasklet_struct push; struct list_head write_pool; int write_started; int write_allocated; struct gs_buf port_write_buf; wait_queue_head_t drain_wait; /* wait while writes drain */ /* REVISIT this state ... */ struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */ }; /* increase N_PORTS if you need more */ #define N_PORTS 4 static struct portmaster { struct mutex lock; /* protect open/close */ struct gs_port *port; } ports[N_PORTS]; static unsigned n_ports; #define GS_CLOSE_TIMEOUT 15 /* seconds */ #ifdef VERBOSE_DEBUG #define pr_vdebug(fmt, arg...) \ pr_debug(fmt, ##arg) #else #define pr_vdebug(fmt, arg...) \ ({ if (0) pr_debug(fmt, ##arg); }) #endif /*-------------------------------------------------------------------------*/ /* Circular Buffer */ /* * gs_buf_alloc * * Allocate a circular buffer and all associated memory. */ static int gs_buf_alloc(struct gs_buf *gb, unsigned size) { gb->buf_buf = kmalloc(size, GFP_KERNEL); if (gb->buf_buf == NULL) return -ENOMEM; gb->buf_size = size; gb->buf_put = gb->buf_buf; gb->buf_get = gb->buf_buf; return 0; } /* * gs_buf_free * * Free the buffer and all associated memory. */ static void gs_buf_free(struct gs_buf *gb) { kfree(gb->buf_buf); gb->buf_buf = NULL; } /* * gs_buf_clear * * Clear out all data in the circular buffer. */ static void gs_buf_clear(struct gs_buf *gb) { gb->buf_get = gb->buf_put; /* equivalent to a get of all data available */ } /* * gs_buf_data_avail * * Return the number of bytes of data written into the circular * buffer. */ static unsigned gs_buf_data_avail(struct gs_buf *gb) { return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size; } /* * gs_buf_space_avail * * Return the number of bytes of space available in the circular * buffer. */ static unsigned gs_buf_space_avail(struct gs_buf *gb) { return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size; } /* * gs_buf_put * * Copy data data from a user buffer and put it into the circular buffer. * Restrict to the amount of space available. * * Return the number of bytes copied. */ static unsigned gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count) { unsigned len; len = gs_buf_space_avail(gb); if (count > len) count = len; if (count == 0) return 0; len = gb->buf_buf + gb->buf_size - gb->buf_put; if (count > len) { memcpy(gb->buf_put, buf, len); memcpy(gb->buf_buf, buf+len, count - len); gb->buf_put = gb->buf_buf + count - len; } else { memcpy(gb->buf_put, buf, count); if (count < len) gb->buf_put += count; else /* count == len */ gb->buf_put = gb->buf_buf; } return count; } /* * gs_buf_get * * Get data from the circular buffer and copy to the given buffer. * Restrict to the amount of data available. * * Return the number of bytes copied. */ static unsigned gs_buf_get(struct gs_buf *gb, char *buf, unsigned count) { unsigned len; len = gs_buf_data_avail(gb); if (count > len) count = len; if (count == 0) return 0; len = gb->buf_buf + gb->buf_size - gb->buf_get; if (count > len) { memcpy(buf, gb->buf_get, len); memcpy(buf+len, gb->buf_buf, count - len); gb->buf_get = gb->buf_buf + count - len; } else { memcpy(buf, gb->buf_get, count); if (count < len) gb->buf_get += count; else /* count == len */ gb->buf_get = gb->buf_buf; } return count; } /*-------------------------------------------------------------------------*/ /* I/O glue between TTY (upper) and USB function (lower) driver layers */ /* * gs_alloc_req * * Allocate a usb_request and its buffer. Returns a pointer to the * usb_request or NULL if there is an error. */ struct usb_request * gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) { struct usb_request *req; req = usb_ep_alloc_request(ep, kmalloc_flags); if (req != NULL) { req->length = len; req->buf = kmalloc(len, kmalloc_flags); if (req->buf == NULL) { usb_ep_free_request(ep, req); return NULL; } } return req; } /* * gs_free_req * * Free a usb_request and its buffer. */ void gs_free_req(struct usb_ep *ep, struct usb_request *req) { kfree(req->buf); usb_ep_free_request(ep, req); } /* * gs_send_packet * * If there is data to send, a packet is built in the given * buffer and the size is returned. If there is no data to * send, 0 is returned. * * Called with port_lock held. */ static unsigned gs_send_packet(struct gs_port *port, char *packet, unsigned size) { unsigned len; len = gs_buf_data_avail(&port->port_write_buf); if (len < size) size = len; if (size != 0) size = gs_buf_get(&port->port_write_buf, packet, size); return size; } /* * gs_start_tx * * This function finds available write requests, calls * gs_send_packet to fill these packets with data, and * continues until either there are no more write requests * available or no more data to send. This function is * run whenever data arrives or write requests are available. * * Context: caller owns port_lock; port_usb is non-null. */ static int gs_start_tx(struct gs_port *port) /* __releases(&port->port_lock) __acquires(&port->port_lock) */ { struct list_head *pool = &port->write_pool; struct usb_ep *in = port->port_usb->in; int status = 0; bool do_tty_wake = false; while (!list_empty(pool)) { struct usb_request *req; int len; if (port->write_started >= QUEUE_SIZE) break; req = list_entry(pool->next, struct usb_request, list); len = gs_send_packet(port, req->buf, in->maxpacket); if (len == 0) { wake_up_interruptible(&port->drain_wait); break; } do_tty_wake = true; req->length = len; list_del(&req->list); req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0); pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n", port->port_num, len, *((u8 *)req->buf), *((u8 *)req->buf+1), *((u8 *)req->buf+2)); /* Drop lock while we call out of driver; completions * could be issued while we do so. Disconnection may * happen too; maybe immediately before we queue this! * * NOTE that we may keep sending data for a while after * the TTY closed (dev->ioport->port_tty is NULL). */ spin_unlock(&port->port_lock); status = usb_ep_queue(in, req, GFP_ATOMIC); spin_lock(&port->port_lock); if (status) { pr_debug("%s: %s %s err %d\n", __func__, "queue", in->name, status); list_add(&req->list, pool); break; } port->write_started++; /* abort immediately after disconnect */ if (!port->port_usb) break; } if (do_tty_wake && port->port.tty) tty_wakeup(port->port.tty); return status; } /* * Context: caller owns port_lock, and port_usb is set */ static unsigned gs_start_rx(struct gs_port *port) /* __releases(&port->port_lock) __acquires(&port->port_lock) */ { struct list_head *pool = &port->read_pool; struct usb_ep *out = port->port_usb->out; while (!list_empty(pool)) { struct usb_request *req; int status; struct tty_struct *tty; /* no more rx if closed */ tty = port->port.tty; if (!tty) break; if (port->read_started >= QUEUE_SIZE) break; req = list_entry(pool->next, struct usb_request, list); list_del(&req->list); req->length = out->maxpacket; /* drop lock while we call out; the controller driver * may need to call us back (e.g. for disconnect) */ spin_unlock(&port->port_lock); status = usb_ep_queue(out, req, GFP_ATOMIC); spin_lock(&port->port_lock); if (status) { pr_debug("%s: %s %s err %d\n", __func__, "queue", out->name, status); list_add(&req->list, pool); break; } port->read_started++; /* abort immediately after disconnect */ if (!port->port_usb) break; } return port->read_started; } /* * RX tasklet takes data out of the RX queue and hands it up to the TTY * layer until it refuses to take any more data (or is throttled back). * Then it issues reads for any further data. * * If the RX queue becomes full enough that no usb_request is queued, * the OUT endpoint may begin NAKing as soon as its FIFO fills up. * So QUEUE_SIZE packets plus however many the FIFO holds (usually two) * can be buffered before the TTY layer's buffers (currently 64 KB). */ static void gs_rx_push(unsigned long _port) { struct gs_port *port = (void *)_port; struct tty_struct *tty; struct list_head *queue = &port->read_queue; bool disconnect = false; bool do_push = false; /* hand any queued data to the tty */ spin_lock_irq(&port->port_lock); tty = port->port.tty; while (!list_empty(queue)) { struct usb_request *req; req = list_first_entry(queue, struct usb_request, list); /* discard data if tty was closed */ if (!tty) goto recycle; /* leave data queued if tty was rx throttled */ if (test_bit(TTY_THROTTLED, &tty->flags)) break; switch (req->status) { case -ESHUTDOWN: disconnect = true; pr_vdebug(PREFIX "%d: shutdown\n", port->port_num); break; default: /* presumably a transient fault */ pr_warning(PREFIX "%d: unexpected RX status %d\n", port->port_num, req->status); /* FALLTHROUGH */ case 0: /* normal completion */ break; } /* push data to (open) tty */ if (req->actual) { char *packet = req->buf; unsigned size = req->actual; unsigned n; int count; /* we may have pushed part of this packet already... */ n = port->n_read; if (n) { packet += n; size -= n; } count = tty_insert_flip_string(tty, packet, size); if (count) do_push = true; if (count != size) { /* stop pushing; TTY layer can't handle more */ port->n_read += count; pr_vdebug(PREFIX "%d: rx block %d/%d\n", port->port_num, count, req->actual); break; } port->n_read = 0; } recycle: list_move(&req->list, &port->read_pool); port->read_started--; } /* Push from tty to ldisc; without low_latency set this is handled by * a workqueue, so we won't get callbacks and can hold port_lock */ if (tty && do_push) tty_flip_buffer_push(tty); /* We want our data queue to become empty ASAP, keeping data * in the tty and ldisc (not here). If we couldn't push any * this time around, there may be trouble unless there's an * implicit tty_unthrottle() call on its way... * * REVISIT we should probably add a timer to keep the tasklet * from starving ... but it's not clear that case ever happens. */ if (!list_empty(queue) && tty) { if (!test_bit(TTY_THROTTLED, &tty->flags)) { if (do_push) tasklet_schedule(&port->push); else pr_warning(PREFIX "%d: RX not scheduled?\n", port->port_num); } } /* If we're still connected, refill the USB RX queue. */ if (!disconnect && port->port_usb) gs_start_rx(port); spin_unlock_irq(&port->port_lock); } static void gs_read_complete(struct usb_ep *ep, struct usb_request *req) { struct gs_port *port = ep->driver_data; /* Queue all received data until the tty layer is ready for it. */ spin_lock(&port->port_lock); list_add_tail(&req->list, &port->read_queue); tasklet_schedule(&port->push); spin_unlock(&port->port_lock); } static void gs_write_complete(struct usb_ep *ep, struct usb_request *req) { struct gs_port *port = ep->driver_data; spin_lock(&port->port_lock); list_add(&req->list, &port->write_pool); port->write_started--; switch (req->status) { default: /* presumably a transient fault */ pr_warning("%s: unexpected %s status %d\n", __func__, ep->name, req->status); /* FALL THROUGH */ case 0: /* normal completion */ gs_start_tx(port); break; case -ESHUTDOWN: /* disconnect */ pr_vdebug("%s: %s shutdown\n", __func__, ep->name); break; } spin_unlock(&port->port_lock); } static void gs_free_requests(struct usb_ep *ep, struct list_head *head, int *allocated) { struct usb_request *req; while (!list_empty(head)) { req = list_entry(head->next, struct usb_request, list); list_del(&req->list); gs_free_req(ep, req); if (allocated) (*allocated)--; } } static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, void (*fn)(struct usb_ep *, struct usb_request *), int *allocated) { int i; struct usb_request *req; int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE; /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't * do quite that many this time, don't fail ... we just won't * be as speedy as we might otherwise be. */ for (i = 0; i < n; i++) { req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC); if (!req) return list_empty(head) ? -ENOMEM : 0; req->complete = fn; list_add_tail(&req->list, head); if (allocated) (*allocated)++; } return 0; } /** * gs_start_io - start USB I/O streams * @dev: encapsulates endpoints to use * Context: holding port_lock; port_tty and port_usb are non-null * * We only start I/O when something is connected to both sides of * this port. If nothing is listening on the host side, we may * be pointlessly filling up our TX buffers and FIFO. */ static int gs_start_io(struct gs_port *port) { struct list_head *head = &port->read_pool; struct usb_ep *ep = port->port_usb->out; int status; unsigned started; /* Allocate RX and TX I/O buffers. We can't easily do this much * earlier (with GFP_KERNEL) because the requests are coupled to * endpoints, as are the packet sizes we'll be using. Different * configurations may use different endpoints with a given port; * and high speed vs full speed changes packet sizes too. */ status = gs_alloc_requests(ep, head, gs_read_complete, &port->read_allocated); if (status) return status; status = gs_alloc_requests(port->port_usb->in, &port->write_pool, gs_write_complete, &port->write_allocated); if (status) { gs_free_requests(ep, head, &port->read_allocated); return status; } /* queue read requests */ port->n_read = 0; started = gs_start_rx(port); /* unblock any pending writes into our circular buffer */ if (started) { tty_wakeup(port->port.tty); } else { gs_free_requests(ep, head, &port->read_allocated); gs_free_requests(port->port_usb->in, &port->write_pool, &port->write_allocated); status = -EIO; } return status; } /*-------------------------------------------------------------------------*/ /* TTY Driver */ /* * gs_open sets up the link between a gs_port and its associated TTY. * That link is broken *only* by TTY close(), and all driver methods * know that. */ static int gs_open(struct tty_struct *tty, struct file *file) { int port_num = tty->index; struct gs_port *port; int status; do { mutex_lock(&ports[port_num].lock); port = ports[port_num].port; if (!port) status = -ENODEV; else { spin_lock_irq(&port->port_lock); /* already open? Great. */ if (port->port.count) { status = 0; port->port.count++; /* currently opening/closing? wait ... */ } else if (port->openclose) { status = -EBUSY; /* ... else we do the work */ } else { status = -EAGAIN; port->openclose = true; } spin_unlock_irq(&port->port_lock); } mutex_unlock(&ports[port_num].lock); switch (status) { default: /* fully handled */ return status; case -EAGAIN: /* must do the work */ break; case -EBUSY: /* wait for EAGAIN task to finish */ msleep(1); /* REVISIT could have a waitchannel here, if * concurrent open performance is important */ break; } } while (status != -EAGAIN); /* Do the "real open" */ spin_lock_irq(&port->port_lock); /* allocate circular buffer on first open */ if (port->port_write_buf.buf_buf == NULL) { spin_unlock_irq(&port->port_lock); status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE); spin_lock_irq(&port->port_lock); if (status) { pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n", port->port_num, tty, file); port->openclose = false; goto exit_unlock_port; } } /* REVISIT if REMOVED (ports[].port NULL), abort the open * to let rmmod work faster (but this way isn't wrong). */ /* REVISIT maybe wait for "carrier detect" */ tty->driver_data = port; port->port.tty = tty; port->port.count = 1; port->openclose = false; /* if connected, start the I/O stream */ if (port->port_usb) { struct gserial *gser = port->port_usb; pr_debug("gs_open: start ttyGS%d\n", port->port_num); gs_start_io(port); if (gser->connect) gser->connect(gser); } pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file); status = 0; exit_unlock_port: spin_unlock_irq(&port->port_lock); return status; } static int gs_writes_finished(struct gs_port *p) { int cond; /* return true on disconnect or empty buffer */ spin_lock_irq(&p->port_lock); cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf); spin_unlock_irq(&p->port_lock); return cond; } static void gs_close(struct tty_struct *tty, struct file *file) { struct gs_port *port = tty->driver_data; struct gserial *gser; spin_lock_irq(&port->port_lock); if (port->port.count != 1) { if (port->port.count == 0) WARN_ON(1); else --port->port.count; goto exit; } pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file); /* mark port as closing but in use; we can drop port lock * and sleep if necessary */ port->openclose = true; port->port.count = 0; gser = port->port_usb; if (gser && gser->disconnect) gser->disconnect(gser); /* wait for circular write buffer to drain, disconnect, or at * most GS_CLOSE_TIMEOUT seconds; then discard the rest */ if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) { spin_unlock_irq(&port->port_lock); wait_event_interruptible_timeout(port->drain_wait, gs_writes_finished(port), GS_CLOSE_TIMEOUT * HZ); spin_lock_irq(&port->port_lock); gser = port->port_usb; } /* Iff we're disconnected, there can be no I/O in flight so it's * ok to free the circular buffer; else just scrub it. And don't * let the push tasklet fire again until we're re-opened. */ if (gser == NULL) gs_buf_free(&port->port_write_buf); else gs_buf_clear(&port->port_write_buf); tty->driver_data = NULL; port->port.tty = NULL; port->openclose = false; pr_debug("gs_close: ttyGS%d (%p,%p) done!\n", port->port_num, tty, file); wake_up_interruptible(&port->port.close_wait); exit: spin_unlock_irq(&port->port_lock); } static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct gs_port *port = tty->driver_data; unsigned long flags; int status; pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n", port->port_num, tty, count); spin_lock_irqsave(&port->port_lock, flags); if (count) count = gs_buf_put(&port->port_write_buf, buf, count); /* treat count == 0 as flush_chars() */ if (port->port_usb) status = gs_start_tx(port); spin_unlock_irqrestore(&port->port_lock, flags); return count; } static int gs_put_char(struct tty_struct *tty, unsigned char ch) { struct gs_port *port = tty->driver_data; unsigned long flags; int status; pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %pf\n", port->port_num, tty, ch, __builtin_return_address(0)); spin_lock_irqsave(&port->port_lock, flags); status = gs_buf_put(&port->port_write_buf, &ch, 1); spin_unlock_irqrestore(&port->port_lock, flags); return status; } static void gs_flush_chars(struct tty_struct *tty) { struct gs_port *port = tty->driver_data; unsigned long flags; pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty); spin_lock_irqsave(&port->port_lock, flags); if (port->port_usb) gs_start_tx(port); spin_unlock_irqrestore(&port->port_lock, flags); } static int gs_write_room(struct tty_struct *tty) { struct gs_port *port = tty->driver_data; unsigned long flags; int room = 0; spin_lock_irqsave(&port->port_lock, flags); if (port->port_usb) room = gs_buf_space_avail(&port->port_write_buf); spin_unlock_irqrestore(&port->port_lock, flags); pr_vdebug("gs_write_room: (%d,%p) room=%d\n", port->port_num, tty, room); return room; } static int gs_chars_in_buffer(struct tty_struct *tty) { struct gs_port *port = tty->driver_data; unsigned long flags; int chars = 0; spin_lock_irqsave(&port->port_lock, flags); chars = gs_buf_data_avail(&port->port_write_buf); spin_unlock_irqrestore(&port->port_lock, flags); pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n", port->port_num, tty, chars); return chars; } /* undo side effects of setting TTY_THROTTLED */ static void gs_unthrottle(struct tty_struct *tty) { struct gs_port *port = tty->driver_data; unsigned long flags; spin_lock_irqsave(&port->port_lock, flags); if (port->port_usb) { /* Kickstart read queue processing. We don't do xon/xoff, * rts/cts, or other handshaking with the host, but if the * read queue backs up enough we'll be NAKing OUT packets. */ tasklet_schedule(&port->push); pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num); } spin_unlock_irqrestore(&port->port_lock, flags); } static int gs_break_ctl(struct tty_struct *tty, int duration) { struct gs_port *port = tty->driver_data; int status = 0; struct gserial *gser; pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n", port->port_num, duration); spin_lock_irq(&port->port_lock); gser = port->port_usb; if (gser && gser->send_break) status = gser->send_break(gser, duration); spin_unlock_irq(&port->port_lock); return status; } static const struct tty_operations gs_tty_ops = { .open = gs_open, .close = gs_close, .write = gs_write, .put_char = gs_put_char, .flush_chars = gs_flush_chars, .write_room = gs_write_room, .chars_in_buffer = gs_chars_in_buffer, .unthrottle = gs_unthrottle, .break_ctl = gs_break_ctl, }; /*-------------------------------------------------------------------------*/ static struct tty_driver *gs_tty_driver; static int gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding) { struct gs_port *port; port = kzalloc(sizeof(struct gs_port), GFP_KERNEL); if (port == NULL) return -ENOMEM; tty_port_init(&port->port); spin_lock_init(&port->port_lock); init_waitqueue_head(&port->drain_wait); tasklet_init(&port->push, gs_rx_push, (unsigned long) port); INIT_LIST_HEAD(&port->read_pool); INIT_LIST_HEAD(&port->read_queue); INIT_LIST_HEAD(&port->write_pool); port->port_num = port_num; port->port_line_coding = *coding; ports[port_num].port = port; return 0; } /** * gserial_setup - initialize TTY driver for one or more ports * @g: gadget to associate with these ports * @count: how many ports to support * Context: may sleep * * The TTY stack needs to know in advance how many devices it should * plan to manage. Use this call to set up the ports you will be * exporting through USB. Later, connect them to functions based * on what configuration is activated by the USB host; and disconnect * them as appropriate. * * An example would be a two-configuration device in which both * configurations expose port 0, but through different functions. * One configuration could even expose port 1 while the other * one doesn't. * * Returns negative errno or zero. */ int gserial_setup(struct usb_gadget *g, unsigned count) { unsigned i; struct usb_cdc_line_coding coding; int status; if (count == 0 || count > N_PORTS) return -EINVAL; gs_tty_driver = alloc_tty_driver(count); if (!gs_tty_driver) return -ENOMEM; gs_tty_driver->driver_name = "g_serial"; gs_tty_driver->name = PREFIX; /* uses dynamically assigned dev_t values */ gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; gs_tty_driver->subtype = SERIAL_TYPE_NORMAL; gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; gs_tty_driver->init_termios = tty_std_termios; /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on * MS-Windows. Otherwise, most of these flags shouldn't affect * anything unless we were to actually hook up to a serial line. */ gs_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; gs_tty_driver->init_termios.c_ispeed = 9600; gs_tty_driver->init_termios.c_ospeed = 9600; coding.dwDTERate = cpu_to_le32(9600); coding.bCharFormat = 8; coding.bParityType = USB_CDC_NO_PARITY; coding.bDataBits = USB_CDC_1_STOP_BITS; tty_set_operations(gs_tty_driver, &gs_tty_ops); /* make devices be openable */ for (i = 0; i < count; i++) { mutex_init(&ports[i].lock); status = gs_port_alloc(i, &coding); if (status) { count = i; goto fail; } } n_ports = count; /* export the driver ... */ status = tty_register_driver(gs_tty_driver); if (status) { pr_err("%s: cannot register, err %d\n", __func__, status); goto fail; } /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */ for (i = 0; i < count; i++) { struct device *tty_dev; tty_dev = tty_register_device(gs_tty_driver, i, &g->dev); if (IS_ERR(tty_dev)) pr_warning("%s: no classdev for port %d, err %ld\n", __func__, i, PTR_ERR(tty_dev)); } pr_debug("%s: registered %d ttyGS* device%s\n", __func__, count, (count == 1) ? "" : "s"); return status; fail: while (count--) kfree(ports[count].port); put_tty_driver(gs_tty_driver); gs_tty_driver = NULL; return status; } static int gs_closed(struct gs_port *port) { int cond; spin_lock_irq(&port->port_lock); cond = (port->port.count == 0) && !port->openclose; spin_unlock_irq(&port->port_lock); return cond; } /** * gserial_cleanup - remove TTY-over-USB driver and devices * Context: may sleep * * This is called to free all resources allocated by @gserial_setup(). * Accordingly, it may need to wait until some open /dev/ files have * closed. * * The caller must have issued @gserial_disconnect() for any ports * that had previously been connected, so that there is never any * I/O pending when it's called. */ void gserial_cleanup(void) { unsigned i; struct gs_port *port; if (!gs_tty_driver) return; /* start sysfs and /dev/ttyGS* node removal */ for (i = 0; i < n_ports; i++) tty_unregister_device(gs_tty_driver, i); for (i = 0; i < n_ports; i++) { /* prevent new opens */ mutex_lock(&ports[i].lock); port = ports[i].port; ports[i].port = NULL; mutex_unlock(&ports[i].lock); tasklet_kill(&port->push); /* wait for old opens to finish */ wait_event(port->port.close_wait, gs_closed(port)); WARN_ON(port->port_usb != NULL); kfree(port); } n_ports = 0; tty_unregister_driver(gs_tty_driver); put_tty_driver(gs_tty_driver); gs_tty_driver = NULL; pr_debug("%s: cleaned up ttyGS* support\n", __func__); } /** * gserial_connect - notify TTY I/O glue that USB link is active * @gser: the function, set up with endpoints and descriptors * @port_num: which port is active * Context: any (usually from irq) * * This is called activate endpoints and let the TTY layer know that * the connection is active ... not unlike "carrier detect". It won't * necessarily start I/O queues; unless the TTY is held open by any * task, there would be no point. However, the endpoints will be * activated so the USB host can perform I/O, subject to basic USB * hardware flow control. * * Caller needs to have set up the endpoints and USB function in @dev * before calling this, as well as the appropriate (speed-specific) * endpoint descriptors, and also have set up the TTY driver by calling * @gserial_setup(). * * Returns negative errno or zero. * On success, ep->driver_data will be overwritten. */ int gserial_connect(struct gserial *gser, u8 port_num) { struct gs_port *port; unsigned long flags; int status; if (!gs_tty_driver || port_num >= n_ports) return -ENXIO; /* we "know" gserial_cleanup() hasn't been called */ port = ports[port_num].port; /* activate the endpoints */ status = usb_ep_enable(gser->in); if (status < 0) return status; gser->in->driver_data = port; status = usb_ep_enable(gser->out); if (status < 0) goto fail_out; gser->out->driver_data = port; /* then tell the tty glue that I/O can work */ spin_lock_irqsave(&port->port_lock, flags); gser->ioport = port; port->port_usb = gser; /* REVISIT unclear how best to handle this state... * we don't really couple it with the Linux TTY. */ gser->port_line_coding = port->port_line_coding; /* REVISIT if waiting on "carrier detect", signal. */ /* if it's already open, start I/O ... and notify the serial * protocol about open/close status (connect/disconnect). */ if (port->port.count) { pr_debug("gserial_connect: start ttyGS%d\n", port->port_num); gs_start_io(port); if (gser->connect) gser->connect(gser); } else { if (gser->disconnect) gser->disconnect(gser); } spin_unlock_irqrestore(&port->port_lock, flags); return status; fail_out: usb_ep_disable(gser->in); gser->in->driver_data = NULL; return status; } /** * gserial_disconnect - notify TTY I/O glue that USB link is inactive * @gser: the function, on which gserial_connect() was called * Context: any (usually from irq) * * This is called to deactivate endpoints and let the TTY layer know * that the connection went inactive ... not unlike "hangup". * * On return, the state is as if gserial_connect() had never been called; * there is no active USB I/O on these endpoints. */ void gserial_disconnect(struct gserial *gser) { struct gs_port *port = gser->ioport; unsigned long flags; if (!port) return; /* tell the TTY glue not to do I/O here any more */ spin_lock_irqsave(&port->port_lock, flags); /* REVISIT as above: how best to track this? */ port->port_line_coding = gser->port_line_coding; port->port_usb = NULL; gser->ioport = NULL; if (port->port.count > 0 || port->openclose) { wake_up_interruptible(&port->drain_wait); if (port->port.tty) tty_hangup(port->port.tty); } spin_unlock_irqrestore(&port->port_lock, flags); /* disable endpoints, aborting down any active I/O */ usb_ep_disable(gser->out); gser->out->driver_data = NULL; usb_ep_disable(gser->in); gser->in->driver_data = NULL; /* finally, free any unused/unusable I/O buffers */ spin_lock_irqsave(&port->port_lock, flags); if (port->port.count == 0 && !port->openclose) gs_buf_free(&port->port_write_buf); gs_free_requests(gser->out, &port->read_pool, NULL); gs_free_requests(gser->out, &port->read_queue, NULL); gs_free_requests(gser->in, &port->write_pool, NULL); port->read_allocated = port->read_started = port->write_allocated = port->write_started = 0; spin_unlock_irqrestore(&port->port_lock, flags); }
gpl-2.0
110440/fastsocket
kernel/drivers/media/video/upd64031a.c
122
7832
/* * upd64031A - NEC Electronics Ghost Reduction for NTSC in Japan * * 2003 by T.Adachi <tadachi@tadachi-net.com> * 2003 by Takeru KOMORIYA <komoriya@paken.org> * 2006 by Hans Verkuil <hverkuil@xs4all.nl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include <media/upd64031a.h> /* --------------------- read registers functions define -------------------- */ /* bit masks */ #define GR_MODE_MASK 0xc0 #define DIRECT_3DYCS_CONNECT_MASK 0xc0 #define SYNC_CIRCUIT_MASK 0xa0 /* -------------------------------------------------------------------------- */ MODULE_DESCRIPTION("uPD64031A driver"); MODULE_AUTHOR("T. Adachi, Takeru KOMORIYA, Hans Verkuil"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); enum { R00 = 0, R01, R02, R03, R04, R05, R06, R07, R08, R09, R0A, R0B, R0C, R0D, R0E, R0F, /* unused registers R10, R11, R12, R13, R14, R15, R16, R17, */ TOT_REGS }; struct upd64031a_state { struct v4l2_subdev sd; u8 regs[TOT_REGS]; u8 gr_mode; u8 direct_3dycs_connect; u8 ext_comp_sync; u8 ext_vert_sync; }; static inline struct upd64031a_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct upd64031a_state, sd); } static u8 upd64031a_init[] = { 0x00, 0xb8, 0x48, 0xd2, 0xe6, 0x03, 0x10, 0x0b, 0xaf, 0x7f, 0x00, 0x00, 0x1d, 0x5e, 0x00, 0xd0 }; /* ------------------------------------------------------------------------ */ static u8 upd64031a_read(struct v4l2_subdev *sd, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); u8 buf[2]; if (reg >= sizeof(buf)) return 0xff; i2c_master_recv(client, buf, 2); return buf[reg]; } /* ------------------------------------------------------------------------ */ static void upd64031a_write(struct v4l2_subdev *sd, u8 reg, u8 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); u8 buf[2]; buf[0] = reg; buf[1] = val; v4l2_dbg(1, debug, sd, "write reg: %02X val: %02X\n", reg, val); if (i2c_master_send(client, buf, 2) != 2) v4l2_err(sd, "I/O error write 0x%02x/0x%02x\n", reg, val); } /* ------------------------------------------------------------------------ */ /* The input changed due to new input or channel changed */ static int upd64031a_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq) { struct upd64031a_state *state = to_state(sd); u8 reg = state->regs[R00]; v4l2_dbg(1, debug, sd, "changed input or channel\n"); upd64031a_write(sd, R00, reg | 0x10); upd64031a_write(sd, R00, reg & ~0x10); return 0; } /* ------------------------------------------------------------------------ */ static int upd64031a_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct upd64031a_state *state = to_state(sd); u8 r00, r05, r08; state->gr_mode = (input & 3) << 6; state->direct_3dycs_connect = (input & 0xc) << 4; state->ext_comp_sync = (input & UPD64031A_COMPOSITE_EXTERNAL) << 1; state->ext_vert_sync = (input & UPD64031A_VERTICAL_EXTERNAL) << 2; r00 = (state->regs[R00] & ~GR_MODE_MASK) | state->gr_mode; r05 = (state->regs[R00] & ~SYNC_CIRCUIT_MASK) | state->ext_comp_sync | state->ext_vert_sync; r08 = (state->regs[R08] & ~DIRECT_3DYCS_CONNECT_MASK) | state->direct_3dycs_connect; upd64031a_write(sd, R00, r00); upd64031a_write(sd, R05, r05); upd64031a_write(sd, R08, r08); return upd64031a_s_frequency(sd, NULL); } static int upd64031a_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_UPD64031A, 0); } static int upd64031a_log_status(struct v4l2_subdev *sd) { v4l2_info(sd, "Status: SA00=0x%02x SA01=0x%02x\n", upd64031a_read(sd, 0), upd64031a_read(sd, 1)); return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int upd64031a_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; reg->val = upd64031a_read(sd, reg->reg & 0xff); reg->size = 1; return 0; } static int upd64031a_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; upd64031a_write(sd, reg->reg & 0xff, reg->val & 0xff); return 0; } #endif /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops upd64031a_core_ops = { .log_status = upd64031a_log_status, .g_chip_ident = upd64031a_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = upd64031a_g_register, .s_register = upd64031a_s_register, #endif }; static const struct v4l2_subdev_tuner_ops upd64031a_tuner_ops = { .s_frequency = upd64031a_s_frequency, }; static const struct v4l2_subdev_video_ops upd64031a_video_ops = { .s_routing = upd64031a_s_routing, }; static const struct v4l2_subdev_ops upd64031a_ops = { .core = &upd64031a_core_ops, .tuner = &upd64031a_tuner_ops, .video = &upd64031a_video_ops, }; /* ------------------------------------------------------------------------ */ /* i2c implementation */ static int upd64031a_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct upd64031a_state *state; struct v4l2_subdev *sd; int i; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); state = kmalloc(sizeof(struct upd64031a_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &upd64031a_ops); memcpy(state->regs, upd64031a_init, sizeof(state->regs)); state->gr_mode = UPD64031A_GR_ON << 6; state->direct_3dycs_connect = UPD64031A_3DYCS_COMPOSITE << 4; state->ext_comp_sync = state->ext_vert_sync = 0; for (i = 0; i < TOT_REGS; i++) upd64031a_write(sd, i, state->regs[i]); return 0; } static int upd64031a_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(to_state(sd)); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id upd64031a_id[] = { { "upd64031a", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, upd64031a_id); static struct i2c_driver upd64031a_driver = { .driver = { .owner = THIS_MODULE, .name = "upd64031a", }, .probe = upd64031a_probe, .remove = upd64031a_remove, .id_table = upd64031a_id, }; static __init int init_upd64031a(void) { return i2c_add_driver(&upd64031a_driver); } static __exit void exit_upd64031a(void) { i2c_del_driver(&upd64031a_driver); } module_init(init_upd64031a); module_exit(exit_upd64031a);
gpl-2.0
Bdaman80/BD-Ace
net/sched/sch_multiq.c
890
9518
/* * Copyright (c) 2008, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Author: Alexander Duyck <alexander.h.duyck@intel.com> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <net/netlink.h> #include <net/pkt_sched.h> struct multiq_sched_data { u16 bands; u16 max_bands; u16 curband; struct tcf_proto *filter_list; struct Qdisc **queues; }; static struct Qdisc * multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct multiq_sched_data *q = qdisc_priv(sch); u32 band; struct tcf_result res; int err; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; err = tc_classify(skb, q->filter_list, &res); #ifdef CONFIG_NET_CLS_ACT switch (err) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; } #endif band = skb_get_queue_mapping(skb); if (band >= q->bands) return q->queues[0]; return q->queues[band]; } static int multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct Qdisc *qdisc; int ret; qdisc = multiq_classify(skb, sch, &ret); #ifdef CONFIG_NET_CLS_ACT if (qdisc == NULL) { if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; } #endif ret = qdisc_enqueue(skb, qdisc); if (ret == NET_XMIT_SUCCESS) { sch->bstats.bytes += qdisc_pkt_len(skb); sch->bstats.packets++; sch->q.qlen++; return NET_XMIT_SUCCESS; } if (net_xmit_drop_count(ret)) sch->qstats.drops++; return ret; } static struct sk_buff *multiq_dequeue(struct Qdisc *sch) { struct multiq_sched_data *q = qdisc_priv(sch); struct Qdisc *qdisc; struct sk_buff *skb; int band; for (band = 0; band < q->bands; band++) { /* cycle through bands to ensure fairness */ q->curband++; if (q->curband >= q->bands) q->curband = 0; /* Check that target subqueue is available before * pulling an skb to avoid head-of-line blocking. */ if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) { qdisc = q->queues[q->curband]; skb = qdisc->dequeue(qdisc); if (skb) { sch->q.qlen--; return skb; } } } return NULL; } static struct sk_buff *multiq_peek(struct Qdisc *sch) { struct multiq_sched_data *q = qdisc_priv(sch); unsigned int curband = q->curband; struct Qdisc *qdisc; struct sk_buff *skb; int band; for (band = 0; band < q->bands; band++) { /* cycle through bands to ensure fairness */ curband++; if (curband >= q->bands) curband = 0; /* Check that target subqueue is available before * pulling an skb to avoid head-of-line blocking. */ if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) { qdisc = q->queues[curband]; skb = qdisc->ops->peek(qdisc); if (skb) return skb; } } return NULL; } static unsigned int multiq_drop(struct Qdisc *sch) { struct multiq_sched_data *q = qdisc_priv(sch); int band; unsigned int len; struct Qdisc *qdisc; for (band = q->bands-1; band >= 0; band--) { qdisc = q->queues[band]; if (qdisc->ops->drop) { len = qdisc->ops->drop(qdisc); if (len != 0) { sch->q.qlen--; return len; } } } return 0; } static void multiq_reset(struct Qdisc *sch) { u16 band; struct multiq_sched_data *q = qdisc_priv(sch); for (band = 0; band < q->bands; band++) qdisc_reset(q->queues[band]); sch->q.qlen = 0; q->curband = 0; } static void multiq_destroy(struct Qdisc *sch) { int band; struct multiq_sched_data *q = qdisc_priv(sch); tcf_destroy_chain(&q->filter_list); for (band = 0; band < q->bands; band++) qdisc_destroy(q->queues[band]); kfree(q->queues); } static int multiq_tune(struct Qdisc *sch, struct nlattr *opt) { struct multiq_sched_data *q = qdisc_priv(sch); struct tc_multiq_qopt *qopt; int i; if (!netif_is_multiqueue(qdisc_dev(sch))) return -EOPNOTSUPP; if (nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); qopt->bands = qdisc_dev(sch)->real_num_tx_queues; sch_tree_lock(sch); q->bands = qopt->bands; for (i = q->bands; i < q->max_bands; i++) { if (q->queues[i] != &noop_qdisc) { struct Qdisc *child = q->queues[i]; q->queues[i] = &noop_qdisc; qdisc_tree_decrease_qlen(child, child->q.qlen); qdisc_destroy(child); } } sch_tree_unlock(sch); for (i = 0; i < q->bands; i++) { if (q->queues[i] == &noop_qdisc) { struct Qdisc *child, *old; child = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, &pfifo_qdisc_ops, TC_H_MAKE(sch->handle, i + 1)); if (child) { sch_tree_lock(sch); old = q->queues[i]; q->queues[i] = child; if (old != &noop_qdisc) { qdisc_tree_decrease_qlen(old, old->q.qlen); qdisc_destroy(old); } sch_tree_unlock(sch); } } } return 0; } static int multiq_init(struct Qdisc *sch, struct nlattr *opt) { struct multiq_sched_data *q = qdisc_priv(sch); int i, err; q->queues = NULL; if (opt == NULL) return -EINVAL; q->max_bands = qdisc_dev(sch)->num_tx_queues; q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL); if (!q->queues) return -ENOBUFS; for (i = 0; i < q->max_bands; i++) q->queues[i] = &noop_qdisc; err = multiq_tune(sch,opt); if (err) kfree(q->queues); return err; } static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb) { struct multiq_sched_data *q = qdisc_priv(sch); unsigned char *b = skb_tail_pointer(skb); struct tc_multiq_qopt opt; opt.bands = q->bands; opt.max_bands = q->max_bands; NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) { struct multiq_sched_data *q = qdisc_priv(sch); unsigned long band = arg - 1; if (new == NULL) new = &noop_qdisc; sch_tree_lock(sch); *old = q->queues[band]; q->queues[band] = new; qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); sch_tree_unlock(sch); return 0; } static struct Qdisc * multiq_leaf(struct Qdisc *sch, unsigned long arg) { struct multiq_sched_data *q = qdisc_priv(sch); unsigned long band = arg - 1; return q->queues[band]; } static unsigned long multiq_get(struct Qdisc *sch, u32 classid) { struct multiq_sched_data *q = qdisc_priv(sch); unsigned long band = TC_H_MIN(classid); if (band - 1 >= q->bands) return 0; return band; } static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent, u32 classid) { return multiq_get(sch, classid); } static void multiq_put(struct Qdisc *q, unsigned long cl) { } static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { struct multiq_sched_data *q = qdisc_priv(sch); tcm->tcm_handle |= TC_H_MIN(cl); tcm->tcm_info = q->queues[cl-1]->handle; return 0; } static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct gnet_dump *d) { struct multiq_sched_data *q = qdisc_priv(sch); struct Qdisc *cl_q; cl_q = q->queues[cl - 1]; cl_q->qstats.qlen = cl_q->q.qlen; if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 || gnet_stats_copy_queue(d, &cl_q->qstats) < 0) return -1; return 0; } static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg) { struct multiq_sched_data *q = qdisc_priv(sch); int band; if (arg->stop) return; for (band = 0; band < q->bands; band++) { if (arg->count < arg->skip) { arg->count++; continue; } if (arg->fn(sch, band+1, arg) < 0) { arg->stop = 1; break; } arg->count++; } } static struct tcf_proto **multiq_find_tcf(struct Qdisc *sch, unsigned long cl) { struct multiq_sched_data *q = qdisc_priv(sch); if (cl) return NULL; return &q->filter_list; } static const struct Qdisc_class_ops multiq_class_ops = { .graft = multiq_graft, .leaf = multiq_leaf, .get = multiq_get, .put = multiq_put, .walk = multiq_walk, .tcf_chain = multiq_find_tcf, .bind_tcf = multiq_bind, .unbind_tcf = multiq_put, .dump = multiq_dump_class, .dump_stats = multiq_dump_class_stats, }; static struct Qdisc_ops multiq_qdisc_ops __read_mostly = { .next = NULL, .cl_ops = &multiq_class_ops, .id = "multiq", .priv_size = sizeof(struct multiq_sched_data), .enqueue = multiq_enqueue, .dequeue = multiq_dequeue, .peek = multiq_peek, .drop = multiq_drop, .init = multiq_init, .reset = multiq_reset, .destroy = multiq_destroy, .change = multiq_tune, .dump = multiq_dump, .owner = THIS_MODULE, }; static int __init multiq_module_init(void) { return register_qdisc(&multiq_qdisc_ops); } static void __exit multiq_module_exit(void) { unregister_qdisc(&multiq_qdisc_ops); } module_init(multiq_module_init) module_exit(multiq_module_exit) MODULE_LICENSE("GPL");
gpl-2.0
OESF/Linaro-Android_LinaroSprint2011Q1
drivers/parisc/gsc.c
890
6032
/* * Interrupt management for most GSC and related devices. * * (c) Copyright 1999 Alex deVries for The Puffin Group * (c) Copyright 1999 Grant Grundler for Hewlett-Packard * (c) Copyright 1999 Matthew Wilcox * (c) Copyright 2000 Helge Deller * (c) Copyright 2001 Matthew Wilcox for Hewlett-Packard * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/bitops.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/module.h> #include <linux/types.h> #include <asm/hardware.h> #include <asm/io.h> #include "gsc.h" #undef DEBUG #ifdef DEBUG #define DEBPRINTK printk #else #define DEBPRINTK(x,...) #endif int gsc_alloc_irq(struct gsc_irq *i) { int irq = txn_alloc_irq(GSC_EIM_WIDTH); if (irq < 0) { printk("cannot get irq\n"); return irq; } i->txn_addr = txn_alloc_addr(irq); i->txn_data = txn_alloc_data(irq); i->irq = irq; return irq; } int gsc_claim_irq(struct gsc_irq *i, int irq) { int c = irq; irq += CPU_IRQ_BASE; /* virtualize the IRQ first */ irq = txn_claim_irq(irq); if (irq < 0) { printk("cannot claim irq %d\n", c); return irq; } i->txn_addr = txn_alloc_addr(irq); i->txn_data = txn_alloc_data(irq); i->irq = irq; return irq; } EXPORT_SYMBOL(gsc_alloc_irq); EXPORT_SYMBOL(gsc_claim_irq); /* Common interrupt demultiplexer used by Asp, Lasi & Wax. */ irqreturn_t gsc_asic_intr(int gsc_asic_irq, void *dev) { unsigned long irr; struct gsc_asic *gsc_asic = dev; irr = gsc_readl(gsc_asic->hpa + OFFSET_IRR); if (irr == 0) return IRQ_NONE; DEBPRINTK("%s intr, mask=0x%x\n", gsc_asic->name, irr); do { int local_irq = __ffs(irr); unsigned int irq = gsc_asic->global_irq[local_irq]; __do_IRQ(irq); irr &= ~(1 << local_irq); } while (irr); return IRQ_HANDLED; } int gsc_find_local_irq(unsigned int irq, int *global_irqs, int limit) { int local_irq; for (local_irq = 0; local_irq < limit; local_irq++) { if (global_irqs[local_irq] == irq) return local_irq; } return NO_IRQ; } static void gsc_asic_disable_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); struct gsc_asic *irq_dev = desc->chip_data; int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32); u32 imr; DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, irq, irq_dev->name, imr); /* Disable the IRQ line by clearing the bit in the IMR */ imr = gsc_readl(irq_dev->hpa + OFFSET_IMR); imr &= ~(1 << local_irq); gsc_writel(imr, irq_dev->hpa + OFFSET_IMR); } static void gsc_asic_enable_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); struct gsc_asic *irq_dev = desc->chip_data; int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32); u32 imr; DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, irq, irq_dev->name, imr); /* Enable the IRQ line by setting the bit in the IMR */ imr = gsc_readl(irq_dev->hpa + OFFSET_IMR); imr |= 1 << local_irq; gsc_writel(imr, irq_dev->hpa + OFFSET_IMR); /* * FIXME: read IPR to make sure the IRQ isn't already pending. * If so, we need to read IRR and manually call do_irq(). */ } static unsigned int gsc_asic_startup_irq(unsigned int irq) { gsc_asic_enable_irq(irq); return 0; } static struct irq_chip gsc_asic_interrupt_type = { .name = "GSC-ASIC", .startup = gsc_asic_startup_irq, .shutdown = gsc_asic_disable_irq, .enable = gsc_asic_enable_irq, .disable = gsc_asic_disable_irq, .ack = no_ack_irq, .end = no_end_irq, }; int gsc_assign_irq(struct irq_chip *type, void *data) { static int irq = GSC_IRQ_BASE; struct irq_desc *desc; if (irq > GSC_IRQ_MAX) return NO_IRQ; desc = irq_to_desc(irq); desc->chip = type; desc->chip_data = data; return irq++; } void gsc_asic_assign_irq(struct gsc_asic *asic, int local_irq, int *irqp) { int irq = asic->global_irq[local_irq]; if (irq <= 0) { irq = gsc_assign_irq(&gsc_asic_interrupt_type, asic); if (irq == NO_IRQ) return; asic->global_irq[local_irq] = irq; } *irqp = irq; } struct gsc_fixup_struct { void (*choose_irq)(struct parisc_device *, void *); void *ctrl; }; static int gsc_fixup_irqs_callback(struct device *dev, void *data) { struct parisc_device *padev = to_parisc_device(dev); struct gsc_fixup_struct *gf = data; /* work-around for 715/64 and others which have parent at path [5] and children at path [5/0/x] */ if (padev->id.hw_type == HPHW_FAULTY) gsc_fixup_irqs(padev, gf->ctrl, gf->choose_irq); gf->choose_irq(padev, gf->ctrl); return 0; } void gsc_fixup_irqs(struct parisc_device *parent, void *ctrl, void (*choose_irq)(struct parisc_device *, void *)) { struct gsc_fixup_struct data = { .choose_irq = choose_irq, .ctrl = ctrl, }; device_for_each_child(&parent->dev, &data, gsc_fixup_irqs_callback); } int gsc_common_setup(struct parisc_device *parent, struct gsc_asic *gsc_asic) { struct resource *res; int i; gsc_asic->gsc = parent; /* Initialise local irq -> global irq mapping */ for (i = 0; i < 32; i++) { gsc_asic->global_irq[i] = NO_IRQ; } /* allocate resource region */ res = request_mem_region(gsc_asic->hpa, 0x100000, gsc_asic->name); if (res) { res->flags = IORESOURCE_MEM; /* do not mark it busy ! */ } #if 0 printk(KERN_WARNING "%s IRQ %d EIM 0x%x", gsc_asic->name, parent->irq, gsc_asic->eim); if (gsc_readl(gsc_asic->hpa + OFFSET_IMR)) printk(" IMR is non-zero! (0x%x)", gsc_readl(gsc_asic->hpa + OFFSET_IMR)); printk("\n"); #endif return 0; } extern struct parisc_driver lasi_driver; extern struct parisc_driver asp_driver; extern struct parisc_driver wax_driver; void __init gsc_init(void) { #ifdef CONFIG_GSC_LASI register_parisc_driver(&lasi_driver); register_parisc_driver(&asp_driver); #endif #ifdef CONFIG_GSC_WAX register_parisc_driver(&wax_driver); #endif }
gpl-2.0
fortunave3gxx/android_kernel_samsung_fortuna-common
drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
1914
68866
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../efuse.h" #include "../base.h" #include "../regd.h" #include "../cam.h" #include "../ps.h" #include "../pci.h" #include "reg.h" #include "def.h" #include "phy.h" #include "../rtl8192c/fw_common.h" #include "dm.h" #include "led.h" #include "hw.h" #define LLT_CONFIG 5 static void _rtl92ce_set_bcn_ctrl_reg(struct ieee80211_hw *hw, u8 set_bits, u8 clear_bits) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpci->reg_bcn_ctrl_val |= set_bits; rtlpci->reg_bcn_ctrl_val &= ~clear_bits; rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val); } static void _rtl92ce_stop_tx_beacon(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 tmp1byte; tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6))); rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64); tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2); tmp1byte &= ~(BIT(0)); rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte); } static void _rtl92ce_resume_tx_beacon(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 tmp1byte; tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6)); rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff); tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2); tmp1byte |= BIT(0); rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte); } static void _rtl92ce_enable_bcn_sub_func(struct ieee80211_hw *hw) { _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(1)); } static void _rtl92ce_disable_bcn_sub_func(struct ieee80211_hw *hw) { _rtl92ce_set_bcn_ctrl_reg(hw, BIT(1), 0); } void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); switch (variable) { case HW_VAR_RCR: *((u32 *) (val)) = rtlpci->receive_config; break; case HW_VAR_RF_STATE: *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state; break; case HW_VAR_FWLPS_RF_ON:{ enum rf_pwrstate rfState; u32 val_rcr; rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, (u8 *) (&rfState)); if (rfState == ERFOFF) { *((bool *) (val)) = true; } else { val_rcr = rtl_read_dword(rtlpriv, REG_RCR); val_rcr &= 0x00070000; if (val_rcr) *((bool *) (val)) = false; else *((bool *) (val)) = true; } break; } case HW_VAR_FW_PSMODE_STATUS: *((bool *) (val)) = ppsc->fw_current_inpsmode; break; case HW_VAR_CORRECT_TSF:{ u64 tsf; u32 *ptsf_low = (u32 *)&tsf; u32 *ptsf_high = ((u32 *)&tsf) + 1; *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4)); *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR); *((u64 *) (val)) = tsf; break; } default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not processed\n"); break; } } void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); u8 idx; switch (variable) { case HW_VAR_ETHER_ADDR:{ for (idx = 0; idx < ETH_ALEN; idx++) { rtl_write_byte(rtlpriv, (REG_MACID + idx), val[idx]); } break; } case HW_VAR_BASIC_RATE:{ u16 rate_cfg = ((u16 *) val)[0]; u8 rate_index = 0; rate_cfg &= 0x15f; rate_cfg |= 0x01; rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff); rtl_write_byte(rtlpriv, REG_RRSR + 1, (rate_cfg >> 8) & 0xff); while (rate_cfg > 0x1) { rate_cfg = (rate_cfg >> 1); rate_index++; } rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, rate_index); break; } case HW_VAR_BSSID:{ for (idx = 0; idx < ETH_ALEN; idx++) { rtl_write_byte(rtlpriv, (REG_BSSID + idx), val[idx]); } break; } case HW_VAR_SIFS:{ rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]); rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]); rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]); rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]); if (!mac->ht_enable) rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, 0x0e0e); else rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, *((u16 *) val)); break; } case HW_VAR_SLOT_TIME:{ u8 e_aci; RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, "HW_VAR_SLOT_TIME %x\n", val[0]); rtl_write_byte(rtlpriv, REG_SLOT, val[0]); for (e_aci = 0; e_aci < AC_MAX; e_aci++) { rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, &e_aci); } break; } case HW_VAR_ACK_PREAMBLE:{ u8 reg_tmp; u8 short_preamble = (bool)*val; reg_tmp = (mac->cur_40_prime_sc) << 5; if (short_preamble) reg_tmp |= 0x80; rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp); break; } case HW_VAR_AMPDU_MIN_SPACE:{ u8 min_spacing_to_set; u8 sec_min_space; min_spacing_to_set = *val; if (min_spacing_to_set <= 7) { sec_min_space = 0; if (min_spacing_to_set < sec_min_space) min_spacing_to_set = sec_min_space; mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) | min_spacing_to_set); *val = min_spacing_to_set; RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n", mac->min_space_cfg); rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, mac->min_space_cfg); } break; } case HW_VAR_SHORTGI_DENSITY:{ u8 density_to_set; density_to_set = *val; mac->min_space_cfg |= (density_to_set << 3); RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, "Set HW_VAR_SHORTGI_DENSITY: %#x\n", mac->min_space_cfg); rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, mac->min_space_cfg); break; } case HW_VAR_AMPDU_FACTOR:{ u8 regtoset_normal[4] = {0x41, 0xa8, 0x72, 0xb9}; u8 regtoset_bt[4] = {0x31, 0x74, 0x42, 0x97}; u8 factor_toset; u8 *p_regtoset = NULL; u8 index = 0; if ((rtlpcipriv->bt_coexist.bt_coexistence) && (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) p_regtoset = regtoset_bt; else p_regtoset = regtoset_normal; factor_toset = *(val); if (factor_toset <= 3) { factor_toset = (1 << (factor_toset + 2)); if (factor_toset > 0xf) factor_toset = 0xf; for (index = 0; index < 4; index++) { if ((p_regtoset[index] & 0xf0) > (factor_toset << 4)) p_regtoset[index] = (p_regtoset[index] & 0x0f) | (factor_toset << 4); if ((p_regtoset[index] & 0x0f) > factor_toset) p_regtoset[index] = (p_regtoset[index] & 0xf0) | (factor_toset); rtl_write_byte(rtlpriv, (REG_AGGLEN_LMT + index), p_regtoset[index]); } RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, "Set HW_VAR_AMPDU_FACTOR: %#x\n", factor_toset); } break; } case HW_VAR_AC_PARAM:{ u8 e_aci = *(val); rtl92c_dm_init_edca_turbo(hw); if (rtlpci->acm_method != eAcmWay2_SW) rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL, (&e_aci)); break; } case HW_VAR_ACM_CTRL:{ u8 e_aci = *(val); union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)(&(mac->ac[0].aifs)); u8 acm = p_aci_aifsn->f.acm; u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL); acm_ctrl = acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1); if (acm) { switch (e_aci) { case AC0_BE: acm_ctrl |= AcmHw_BeqEn; break; case AC2_VI: acm_ctrl |= AcmHw_ViqEn; break; case AC3_VO: acm_ctrl |= AcmHw_VoqEn; break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n", acm); break; } } else { switch (e_aci) { case AC0_BE: acm_ctrl &= (~AcmHw_BeqEn); break; case AC2_VI: acm_ctrl &= (~AcmHw_ViqEn); break; case AC3_VO: acm_ctrl &= (~AcmHw_BeqEn); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not processed\n"); break; } } RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE, "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", acm_ctrl); rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl); break; } case HW_VAR_RCR:{ rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]); rtlpci->receive_config = ((u32 *) (val))[0]; break; } case HW_VAR_RETRY_LIMIT:{ u8 retry_limit = val[0]; rtl_write_word(rtlpriv, REG_RL, retry_limit << RETRY_LIMIT_SHORT_SHIFT | retry_limit << RETRY_LIMIT_LONG_SHIFT); break; } case HW_VAR_DUAL_TSF_RST: rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1))); break; case HW_VAR_EFUSE_BYTES: rtlefuse->efuse_usedbytes = *((u16 *) val); break; case HW_VAR_EFUSE_USAGE: rtlefuse->efuse_usedpercentage = *val; break; case HW_VAR_IO_CMD: rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val)); break; case HW_VAR_WPA_CONFIG: rtl_write_byte(rtlpriv, REG_SECCFG, *val); break; case HW_VAR_SET_RPWM:{ u8 rpwm_val; rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM); udelay(1); if (rpwm_val & BIT(7)) { rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val); } else { rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val | BIT(7)); } break; } case HW_VAR_H2C_FW_PWRMODE:{ u8 psmode = *val; if ((psmode != FW_PS_ACTIVE_MODE) && (!IS_92C_SERIAL(rtlhal->version))) { rtl92c_dm_rf_saving(hw, true); } rtl92c_set_fw_pwrmode_cmd(hw, *val); break; } case HW_VAR_FW_PSMODE_STATUS: ppsc->fw_current_inpsmode = *((bool *) val); break; case HW_VAR_H2C_FW_JOINBSSRPT:{ u8 mstatus = *val; u8 tmp_regcr, tmp_reg422; bool recover = false; if (mstatus == RT_MEDIA_CONNECT) { rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, NULL); tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1); rtl_write_byte(rtlpriv, REG_CR + 1, (tmp_regcr | BIT(0))); _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(3)); _rtl92ce_set_bcn_ctrl_reg(hw, BIT(4), 0); tmp_reg422 = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); if (tmp_reg422 & BIT(6)) recover = true; rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp_reg422 & (~BIT(6))); rtl92c_set_fw_rsvdpagepkt(hw, 0); _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0); _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4)); if (recover) { rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp_reg422); } rtl_write_byte(rtlpriv, REG_CR + 1, (tmp_regcr & ~(BIT(0)))); } rtl92c_set_fw_joinbss_report_cmd(hw, *val); break; } case HW_VAR_H2C_FW_P2P_PS_OFFLOAD: rtl92c_set_p2p_ps_offload_cmd(hw, (*(u8 *)val)); break; case HW_VAR_AID:{ u16 u2btmp; u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT); u2btmp &= 0xC000; rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp | mac->assoc_id)); break; } case HW_VAR_CORRECT_TSF:{ u8 btype_ibss = val[0]; if (btype_ibss) _rtl92ce_stop_tx_beacon(hw); _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(3)); rtl_write_dword(rtlpriv, REG_TSFTR, (u32) (mac->tsf & 0xffffffff)); rtl_write_dword(rtlpriv, REG_TSFTR + 4, (u32) ((mac->tsf >> 32) & 0xffffffff)); _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0); if (btype_ibss) _rtl92ce_resume_tx_beacon(hw); break; } case HW_VAR_FW_LPS_ACTION: { bool enter_fwlps = *((bool *)val); u8 rpwm_val, fw_pwrmode; bool fw_current_inps; if (enter_fwlps) { rpwm_val = 0x02; /* RF off */ fw_current_inps = true; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, (u8 *)(&fw_current_inps)); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ppsc->fwctrl_psmode)); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, (u8 *)(&rpwm_val)); } else { rpwm_val = 0x0C; /* RF on */ fw_pwrmode = FW_PS_ACTIVE_MODE; fw_current_inps = false; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, (u8 *)(&rpwm_val)); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&fw_pwrmode)); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, (u8 *)(&fw_current_inps)); } break; } default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not processed\n"); break; } } static bool _rtl92ce_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) { struct rtl_priv *rtlpriv = rtl_priv(hw); bool status = true; long count = 0; u32 value = _LLT_INIT_ADDR(address) | _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS); rtl_write_dword(rtlpriv, REG_LLT_INIT, value); do { value = rtl_read_dword(rtlpriv, REG_LLT_INIT); if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value)) break; if (count > POLLING_LLT_THRESHOLD) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to polling write LLT done at address %d!\n", address); status = false; break; } } while (++count); return status; } static bool _rtl92ce_llt_table_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); unsigned short i; u8 txpktbuf_bndy; u8 maxPage; bool status; #if LLT_CONFIG == 1 maxPage = 255; txpktbuf_bndy = 252; #elif LLT_CONFIG == 2 maxPage = 127; txpktbuf_bndy = 124; #elif LLT_CONFIG == 3 maxPage = 255; txpktbuf_bndy = 174; #elif LLT_CONFIG == 4 maxPage = 255; txpktbuf_bndy = 246; #elif LLT_CONFIG == 5 maxPage = 255; txpktbuf_bndy = 246; #endif #if LLT_CONFIG == 1 rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x1c); rtl_write_dword(rtlpriv, REG_RQPN, 0x80a71c1c); #elif LLT_CONFIG == 2 rtl_write_dword(rtlpriv, REG_RQPN, 0x845B1010); #elif LLT_CONFIG == 3 rtl_write_dword(rtlpriv, REG_RQPN, 0x84838484); #elif LLT_CONFIG == 4 rtl_write_dword(rtlpriv, REG_RQPN, 0x80bd1c1c); #elif LLT_CONFIG == 5 rtl_write_word(rtlpriv, REG_RQPN_NPQ, 0x0000); rtl_write_dword(rtlpriv, REG_RQPN, 0x80b01c29); #endif rtl_write_dword(rtlpriv, REG_TRXFF_BNDY, (0x27FF0000 | txpktbuf_bndy)); rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy); rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy); rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy); rtl_write_byte(rtlpriv, 0x45D, txpktbuf_bndy); rtl_write_byte(rtlpriv, REG_PBP, 0x11); rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4); for (i = 0; i < (txpktbuf_bndy - 1); i++) { status = _rtl92ce_llt_write(hw, i, i + 1); if (true != status) return status; } status = _rtl92ce_llt_write(hw, (txpktbuf_bndy - 1), 0xFF); if (true != status) return status; for (i = txpktbuf_bndy; i < maxPage; i++) { status = _rtl92ce_llt_write(hw, i, (i + 1)); if (true != status) return status; } status = _rtl92ce_llt_write(hw, maxPage, txpktbuf_bndy); if (true != status) return status; return true; } static void _rtl92ce_gen_refresh_led_state(struct ieee80211_hw *hw) { struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); if (rtlpci->up_first_time) return; if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) rtl92ce_sw_led_on(hw, pLed0); else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT) rtl92ce_sw_led_on(hw, pLed0); else rtl92ce_sw_led_off(hw, pLed0); } static bool _rtl92ce_init_mac(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); unsigned char bytetmp; unsigned short wordtmp; u16 retry; rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00); if (rtlpcipriv->bt_coexist.bt_coexistence) { u32 value32; value32 = rtl_read_dword(rtlpriv, REG_APS_FSMCO); value32 |= (SOP_ABG | SOP_AMB | XOP_BTCK); rtl_write_dword(rtlpriv, REG_APS_FSMCO, value32); } rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b); rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL, 0x0F); if (rtlpcipriv->bt_coexist.bt_coexistence) { u32 u4b_tmp = rtl_read_dword(rtlpriv, REG_AFE_XTAL_CTRL); u4b_tmp &= (~0x00024800); rtl_write_dword(rtlpriv, REG_AFE_XTAL_CTRL, u4b_tmp); } bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) | BIT(0); udelay(2); rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, bytetmp); udelay(2); bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1); udelay(2); retry = 0; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "reg0xec:%x:%x\n", rtl_read_dword(rtlpriv, 0xEC), bytetmp); while ((bytetmp & BIT(0)) && retry < 1000) { retry++; udelay(50); bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "reg0xec:%x:%x\n", rtl_read_dword(rtlpriv, 0xEC), bytetmp); udelay(50); } rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x1012); rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL + 1, 0x82); udelay(2); if (rtlpcipriv->bt_coexist.bt_coexistence) { bytetmp = rtl_read_byte(rtlpriv, REG_AFE_XTAL_CTRL+2) & 0xfd; rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL+2, bytetmp); } rtl_write_word(rtlpriv, REG_CR, 0x2ff); if (!_rtl92ce_llt_table_init(hw)) return false; rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff); rtl_write_byte(rtlpriv, REG_HISRE, 0xff); rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x27ff); wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL); wordtmp &= 0xf; wordtmp |= 0xF771; rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp); rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 1, 0x1F); rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config); rtl_write_dword(rtlpriv, REG_TCR, rtlpci->transmit_config); rtl_write_byte(rtlpriv, 0x4d0, 0x0); rtl_write_dword(rtlpriv, REG_BCNQ_DESA, ((u64) rtlpci->tx_ring[BEACON_QUEUE].dma) & DMA_BIT_MASK(32)); rtl_write_dword(rtlpriv, REG_MGQ_DESA, (u64) rtlpci->tx_ring[MGNT_QUEUE].dma & DMA_BIT_MASK(32)); rtl_write_dword(rtlpriv, REG_VOQ_DESA, (u64) rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32)); rtl_write_dword(rtlpriv, REG_VIQ_DESA, (u64) rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32)); rtl_write_dword(rtlpriv, REG_BEQ_DESA, (u64) rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32)); rtl_write_dword(rtlpriv, REG_BKQ_DESA, (u64) rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32)); rtl_write_dword(rtlpriv, REG_HQ_DESA, (u64) rtlpci->tx_ring[HIGH_QUEUE].dma & DMA_BIT_MASK(32)); rtl_write_dword(rtlpriv, REG_RX_DESA, (u64) rtlpci->rx_ring[RX_MPDU_QUEUE].dma & DMA_BIT_MASK(32)); if (IS_92C_SERIAL(rtlhal->version)) rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, 0x77); else rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, 0x22); rtl_write_dword(rtlpriv, REG_INT_MIG, 0); bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL); rtl_write_byte(rtlpriv, REG_APSD_CTRL, bytetmp & ~BIT(6)); do { retry++; bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL); } while ((retry < 200) && (bytetmp & BIT(7))); _rtl92ce_gen_refresh_led_state(hw); rtl_write_dword(rtlpriv, REG_MCUTST_1, 0x0); return true; } static void _rtl92ce_hw_configure(struct ieee80211_hw *hw) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); u8 reg_bw_opmode; u32 reg_prsr; reg_bw_opmode = BW_OPMODE_20MHZ; reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG; rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, 0x8); rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode); rtl_write_dword(rtlpriv, REG_RRSR, reg_prsr); rtl_write_byte(rtlpriv, REG_SLOT, 0x09); rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, 0x0); rtl_write_word(rtlpriv, REG_FWHW_TXQ_CTRL, 0x1F80); rtl_write_word(rtlpriv, REG_RL, 0x0707); rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x02012802); rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF); rtl_write_dword(rtlpriv, REG_DARFRC, 0x01000000); rtl_write_dword(rtlpriv, REG_DARFRC + 4, 0x07060504); rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000); rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504); if ((rtlpcipriv->bt_coexist.bt_coexistence) && (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x97427431); else rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0xb972a841); rtl_write_byte(rtlpriv, REG_ATIMWND, 0x2); rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xff); rtlpci->reg_bcn_ctrl_val = 0x1f; rtl_write_byte(rtlpriv, REG_BCN_CTRL, rtlpci->reg_bcn_ctrl_val); rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff); rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff); rtl_write_byte(rtlpriv, REG_PIFS, 0x1C); rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16); if ((rtlpcipriv->bt_coexist.bt_coexistence) && (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) { rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020); rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0402); } else { rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020); rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020); } if ((rtlpcipriv->bt_coexist.bt_coexistence) && (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03086666); else rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x086666); rtl_write_byte(rtlpriv, REG_ACKTO, 0x40); rtl_write_word(rtlpriv, REG_SPEC_SIFS, 0x1010); rtl_write_word(rtlpriv, REG_MAC_SPEC_SIFS, 0x1010); rtl_write_word(rtlpriv, REG_SIFS_CTX, 0x1010); rtl_write_word(rtlpriv, REG_SIFS_TRX, 0x1010); rtl_write_dword(rtlpriv, REG_MAR, 0xffffffff); rtl_write_dword(rtlpriv, REG_MAR + 4, 0xffffffff); } static void _rtl92ce_enable_aspm_back_door(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); rtl_write_byte(rtlpriv, 0x34b, 0x93); rtl_write_word(rtlpriv, 0x350, 0x870c); rtl_write_byte(rtlpriv, 0x352, 0x1); if (ppsc->support_backdoor) rtl_write_byte(rtlpriv, 0x349, 0x1b); else rtl_write_byte(rtlpriv, 0x349, 0x03); rtl_write_word(rtlpriv, 0x350, 0x2718); rtl_write_byte(rtlpriv, 0x352, 0x1); } void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 sec_reg_value; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n", rtlpriv->sec.pairwise_enc_algorithm, rtlpriv->sec.group_enc_algorithm); if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) { RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "not open hw encryption\n"); return; } sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable; if (rtlpriv->sec.use_defaultkey) { sec_reg_value |= SCR_TxUseDK; sec_reg_value |= SCR_RxUseDK; } sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK); rtl_write_byte(rtlpriv, REG_CR + 1, 0x02); RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "The SECR-value %x\n", sec_reg_value); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value); } int rtl92ce_hw_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); bool rtstatus = true; bool is92c; int err; u8 tmp_u1b; unsigned long flags; rtlpci->being_init_adapter = true; /* Since this function can take a very long time (up to 350 ms) * and can be called with irqs disabled, reenable the irqs * to let the other devices continue being serviced. * * It is safe doing so since our own interrupts will only be enabled * in a subsequent step. */ local_save_flags(flags); local_irq_enable(); rtlpriv->intf_ops->disable_aspm(hw); rtstatus = _rtl92ce_init_mac(hw); if (!rtstatus) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); err = 1; goto exit; } err = rtl92c_download_fw(hw); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Failed to download FW. Init HW without FW now..\n"); err = 1; goto exit; } rtlhal->last_hmeboxnum = 0; rtl92c_phy_mac_config(hw); /* because last function modify RCR, so we update * rcr var here, or TP will unstable for receive_config * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252*/ rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR); rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV); rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config); rtl92c_phy_bb_config(hw); rtlphy->rf_mode = RF_OP_BY_SW_3WIRE; rtl92c_phy_rf_config(hw); if (IS_VENDOR_UMC_A_CUT(rtlhal->version) && !IS_92C_SERIAL(rtlhal->version)) { rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, 0x30255); rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G2, MASKDWORD, 0x50a00); } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) { rtl_set_rfreg(hw, RF90_PATH_A, 0x0C, MASKDWORD, 0x894AE); rtl_set_rfreg(hw, RF90_PATH_A, 0x0A, MASKDWORD, 0x1AF31); rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, MASKDWORD, 0x8F425); rtl_set_rfreg(hw, RF90_PATH_A, RF_SYN_G2, MASKDWORD, 0x4F200); rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK1, MASKDWORD, 0x44053); rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK2, MASKDWORD, 0x80201); } rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0, RF_CHNLBW, RFREG_OFFSET_MASK); rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1, RF_CHNLBW, RFREG_OFFSET_MASK); rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1); rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1); rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1); _rtl92ce_hw_configure(hw); rtl_cam_reset_all_entry(hw); rtl92ce_enable_hw_security_config(hw); ppsc->rfpwr_state = ERFON; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr); _rtl92ce_enable_aspm_back_door(hw); rtlpriv->intf_ops->enable_aspm(hw); rtl8192ce_bt_hw_init(hw); if (ppsc->rfpwr_state == ERFON) { rtl92c_phy_set_rfpath_switch(hw, 1); if (rtlphy->iqk_initialized) { rtl92c_phy_iq_calibrate(hw, true); } else { rtl92c_phy_iq_calibrate(hw, false); rtlphy->iqk_initialized = true; } rtl92c_dm_check_txpower_tracking(hw); rtl92c_phy_lc_calibrate(hw); } is92c = IS_92C_SERIAL(rtlhal->version); tmp_u1b = efuse_read_1byte(hw, 0x1FA); if (!(tmp_u1b & BIT(0))) { rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05); RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path A\n"); } if (!(tmp_u1b & BIT(1)) && is92c) { rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0F, 0x05); RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path B\n"); } if (!(tmp_u1b & BIT(4))) { tmp_u1b = rtl_read_byte(rtlpriv, 0x16); tmp_u1b &= 0x0F; rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80); udelay(10); rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90); RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n"); } rtl92c_dm_init(hw); exit: local_irq_restore(flags); rtlpci->being_init_adapter = false; return err; } static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); enum version_8192c version = VERSION_UNKNOWN; u32 value32; const char *versionid; value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG); if (value32 & TRP_VAUX_EN) { version = (value32 & TYPE_ID) ? VERSION_A_CHIP_92C : VERSION_A_CHIP_88C; } else { version = (enum version_8192c) (CHIP_VER_B | ((value32 & TYPE_ID) ? CHIP_92C_BITMASK : 0) | ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0)); if ((!IS_CHIP_VENDOR_UMC(version)) && (value32 & CHIP_VER_RTL_MASK)) { version = (enum version_8192c)(version | ((((value32 & CHIP_VER_RTL_MASK) == BIT(12)) ? CHIP_VENDOR_UMC_B_CUT : CHIP_UNKNOWN) | CHIP_VENDOR_UMC)); } if (IS_92C_SERIAL(version)) { value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM); version = (enum version_8192c)(version | ((CHIP_BONDING_IDENTIFIER(value32) == CHIP_BONDING_92C_1T2R) ? RF_TYPE_1T2R : 0)); } } switch (version) { case VERSION_B_CHIP_92C: versionid = "B_CHIP_92C"; break; case VERSION_B_CHIP_88C: versionid = "B_CHIP_88C"; break; case VERSION_A_CHIP_92C: versionid = "A_CHIP_92C"; break; case VERSION_A_CHIP_88C: versionid = "A_CHIP_88C"; break; case VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT: versionid = "A_CUT_92C_1T2R"; break; case VERSION_NORMAL_UMC_CHIP_92C_A_CUT: versionid = "A_CUT_92C"; break; case VERSION_NORMAL_UMC_CHIP_88C_A_CUT: versionid = "A_CUT_88C"; break; case VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT: versionid = "B_CUT_92C_1T2R"; break; case VERSION_NORMAL_UMC_CHIP_92C_B_CUT: versionid = "B_CUT_92C"; break; case VERSION_NORMAL_UMC_CHIP_88C_B_CUT: versionid = "B_CUT_88C"; break; default: versionid = "Unknown. Bug?"; break; } RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Chip Version ID: %s\n", versionid); switch (version & 0x3) { case CHIP_88C: rtlphy->rf_type = RF_1T1R; break; case CHIP_92C: rtlphy->rf_type = RF_2T2R; break; case CHIP_92C_1T2R: rtlphy->rf_type = RF_1T2R; break; default: rtlphy->rf_type = RF_1T1R; RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "ERROR RF_Type is set!!\n"); break; } RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Chip RF Type: %s\n", rtlphy->rf_type == RF_2T2R ? "RF_2T2R" : "RF_1T1R"); return version; } static int _rtl92ce_set_media_status(struct ieee80211_hw *hw, enum nl80211_iftype type) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 bt_msr = rtl_read_byte(rtlpriv, MSR); enum led_ctl_mode ledaction = LED_CTL_NO_LINK; bt_msr &= 0xfc; if (type == NL80211_IFTYPE_UNSPECIFIED || type == NL80211_IFTYPE_STATION) { _rtl92ce_stop_tx_beacon(hw); _rtl92ce_enable_bcn_sub_func(hw); } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_MESH_POINT) { _rtl92ce_resume_tx_beacon(hw); _rtl92ce_disable_bcn_sub_func(hw); } else { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Set HW_VAR_MEDIA_STATUS: No such media status(%x)\n", type); } switch (type) { case NL80211_IFTYPE_UNSPECIFIED: bt_msr |= MSR_NOLINK; ledaction = LED_CTL_LINK; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Set Network type to NO LINK!\n"); break; case NL80211_IFTYPE_ADHOC: bt_msr |= MSR_ADHOC; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Set Network type to Ad Hoc!\n"); break; case NL80211_IFTYPE_STATION: bt_msr |= MSR_INFRA; ledaction = LED_CTL_LINK; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Set Network type to STA!\n"); break; case NL80211_IFTYPE_AP: bt_msr |= MSR_AP; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Set Network type to AP!\n"); break; case NL80211_IFTYPE_MESH_POINT: bt_msr |= MSR_ADHOC; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Set Network type to Mesh Point!\n"); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Network type %d not supported!\n", type); return 1; break; } rtl_write_byte(rtlpriv, (MSR), bt_msr); rtlpriv->cfg->ops->led_control(hw, ledaction); if ((bt_msr & 0xfc) == MSR_AP) rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); else rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66); return 0; } void rtl92ce_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) { struct rtl_priv *rtlpriv = rtl_priv(hw); u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR); if (rtlpriv->psc.rfpwr_state != ERFON) return; if (check_bssid) { reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *) (&reg_rcr)); _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4)); } else if (!check_bssid) { reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); _rtl92ce_set_bcn_ctrl_reg(hw, BIT(4), 0); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *) (&reg_rcr)); } } int rtl92ce_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (_rtl92ce_set_media_status(hw, type)) return -EOPNOTSUPP; if (rtlpriv->mac80211.link_state == MAC80211_LINKED) { if (type != NL80211_IFTYPE_AP && type != NL80211_IFTYPE_MESH_POINT) rtl92ce_set_check_bssid(hw, true); } else { rtl92ce_set_check_bssid(hw, false); } return 0; } /* don't set REG_EDCA_BE_PARAM here because mac80211 will send pkt when scan */ void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtl92c_dm_init_edca_turbo(hw); switch (aci) { case AC1_BK: rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f); break; case AC0_BE: /* rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param); */ break; case AC2_VI: rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322); break; case AC3_VO: rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); break; default: RT_ASSERT(false, "invalid aci: %d !\n", aci); break; } } void rtl92ce_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); } void rtl92ce_disable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED); rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED); synchronize_irq(rtlpci->pdev->irq); } static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); u8 u1b_tmp; u32 u4b_tmp; rtlpriv->intf_ops->enable_aspm(hw); rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00); rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00); rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40); rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE0); if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) rtl92c_firmware_selfreset(hw); rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x51); rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00000000); u1b_tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL); if ((rtlpcipriv->bt_coexist.bt_coexistence) && ((rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) || (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC8))) { rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00F30000 | (u1b_tmp << 8)); } else { rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00FF0000 | (u1b_tmp << 8)); } rtl_write_word(rtlpriv, REG_GPIO_IO_SEL, 0x0790); rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080); rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80); if (!IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23); if (rtlpcipriv->bt_coexist.bt_coexistence) { u4b_tmp = rtl_read_dword(rtlpriv, REG_AFE_XTAL_CTRL); u4b_tmp |= 0x03824800; rtl_write_dword(rtlpriv, REG_AFE_XTAL_CTRL, u4b_tmp); } else { rtl_write_dword(rtlpriv, REG_AFE_XTAL_CTRL, 0x0e); } rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e); rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, 0x10); } void rtl92ce_card_disable(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); enum nl80211_iftype opmode; mac->link_state = MAC80211_NOLINK; opmode = NL80211_IFTYPE_UNSPECIFIED; _rtl92ce_set_media_status(hw, opmode); if (rtlpci->driver_is_goingto_unload || ppsc->rfoff_reason > RF_CHANGE_BY_PS) rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF); RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); _rtl92ce_poweroff_adapter(hw); /* after power off we should do iqk again */ rtlpriv->phy.iqk_initialized = false; } void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw, u32 *p_inta, u32 *p_intb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; rtl_write_dword(rtlpriv, ISR, *p_inta); /* * *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1]; * rtl_write_dword(rtlpriv, ISR + 4, *p_intb); */ } void rtl92ce_set_beacon_related_registers(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); u16 bcn_interval, atim_window; bcn_interval = mac->beacon_interval; atim_window = 2; /*FIX MERGE */ rtl92ce_disable_interrupt(hw); rtl_write_word(rtlpriv, REG_ATIMWND, atim_window); rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f); rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18); rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18); rtl_write_byte(rtlpriv, 0x606, 0x30); rtl92ce_enable_interrupt(hw); } void rtl92ce_set_beacon_interval(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); u16 bcn_interval = mac->beacon_interval; RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG, "beacon_interval:%d\n", bcn_interval); rtl92ce_disable_interrupt(hw); rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); rtl92ce_enable_interrupt(hw); } void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw, u32 add_msr, u32 rm_msr) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD, "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr); if (add_msr) rtlpci->irq_mask[0] |= add_msr; if (rm_msr) rtlpci->irq_mask[0] &= (~rm_msr); rtl92ce_disable_interrupt(hw); rtl92ce_enable_interrupt(hw); } static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, bool autoload_fail, u8 *hwinfo) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 rf_path, index, tempval; u16 i; for (rf_path = 0; rf_path < 2; rf_path++) { for (i = 0; i < 3; i++) { if (!autoload_fail) { rtlefuse-> eeprom_chnlarea_txpwr_cck[rf_path][i] = hwinfo[EEPROM_TXPOWERCCK + rf_path * 3 + i]; rtlefuse-> eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] = hwinfo[EEPROM_TXPOWERHT40_1S + rf_path * 3 + i]; } else { rtlefuse-> eeprom_chnlarea_txpwr_cck[rf_path][i] = EEPROM_DEFAULT_TXPOWERLEVEL; rtlefuse-> eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] = EEPROM_DEFAULT_TXPOWERLEVEL; } } } for (i = 0; i < 3; i++) { if (!autoload_fail) tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i]; else tempval = EEPROM_DEFAULT_HT40_2SDIFF; rtlefuse->eprom_chnl_txpwr_ht40_2sdf[RF90_PATH_A][i] = (tempval & 0xf); rtlefuse->eprom_chnl_txpwr_ht40_2sdf[RF90_PATH_B][i] = ((tempval & 0xf0) >> 4); } for (rf_path = 0; rf_path < 2; rf_path++) for (i = 0; i < 3; i++) RTPRINT(rtlpriv, FINIT, INIT_EEPROM, "RF(%d) EEPROM CCK Area(%d) = 0x%x\n", rf_path, i, rtlefuse-> eeprom_chnlarea_txpwr_cck[rf_path][i]); for (rf_path = 0; rf_path < 2; rf_path++) for (i = 0; i < 3; i++) RTPRINT(rtlpriv, FINIT, INIT_EEPROM, "RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n", rf_path, i, rtlefuse-> eeprom_chnlarea_txpwr_ht40_1s[rf_path][i]); for (rf_path = 0; rf_path < 2; rf_path++) for (i = 0; i < 3; i++) RTPRINT(rtlpriv, FINIT, INIT_EEPROM, "RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n", rf_path, i, rtlefuse-> eprom_chnl_txpwr_ht40_2sdf[rf_path][i]); for (rf_path = 0; rf_path < 2; rf_path++) { for (i = 0; i < 14; i++) { index = _rtl92c_get_chnl_group((u8) i); rtlefuse->txpwrlevel_cck[rf_path][i] = rtlefuse->eeprom_chnlarea_txpwr_cck[rf_path][index]; rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = rtlefuse-> eeprom_chnlarea_txpwr_ht40_1s[rf_path][index]; if ((rtlefuse-> eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] - rtlefuse-> eprom_chnl_txpwr_ht40_2sdf[rf_path][index]) > 0) { rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = rtlefuse-> eeprom_chnlarea_txpwr_ht40_1s[rf_path] [index] - rtlefuse-> eprom_chnl_txpwr_ht40_2sdf[rf_path] [index]; } else { rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0; } } for (i = 0; i < 14; i++) { RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = [0x%x / 0x%x / 0x%x]\n", rf_path, i, rtlefuse->txpwrlevel_cck[rf_path][i], rtlefuse->txpwrlevel_ht40_1s[rf_path][i], rtlefuse->txpwrlevel_ht40_2s[rf_path][i]); } } for (i = 0; i < 3; i++) { if (!autoload_fail) { rtlefuse->eeprom_pwrlimit_ht40[i] = hwinfo[EEPROM_TXPWR_GROUP + i]; rtlefuse->eeprom_pwrlimit_ht20[i] = hwinfo[EEPROM_TXPWR_GROUP + 3 + i]; } else { rtlefuse->eeprom_pwrlimit_ht40[i] = 0; rtlefuse->eeprom_pwrlimit_ht20[i] = 0; } } for (rf_path = 0; rf_path < 2; rf_path++) { for (i = 0; i < 14; i++) { index = _rtl92c_get_chnl_group((u8) i); if (rf_path == RF90_PATH_A) { rtlefuse->pwrgroup_ht20[rf_path][i] = (rtlefuse->eeprom_pwrlimit_ht20[index] & 0xf); rtlefuse->pwrgroup_ht40[rf_path][i] = (rtlefuse->eeprom_pwrlimit_ht40[index] & 0xf); } else if (rf_path == RF90_PATH_B) { rtlefuse->pwrgroup_ht20[rf_path][i] = ((rtlefuse->eeprom_pwrlimit_ht20[index] & 0xf0) >> 4); rtlefuse->pwrgroup_ht40[rf_path][i] = ((rtlefuse->eeprom_pwrlimit_ht40[index] & 0xf0) >> 4); } RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "RF-%d pwrgroup_ht20[%d] = 0x%x\n", rf_path, i, rtlefuse->pwrgroup_ht20[rf_path][i]); RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "RF-%d pwrgroup_ht40[%d] = 0x%x\n", rf_path, i, rtlefuse->pwrgroup_ht40[rf_path][i]); } } for (i = 0; i < 14; i++) { index = _rtl92c_get_chnl_group((u8) i); if (!autoload_fail) tempval = hwinfo[EEPROM_TXPOWERHT20DIFF + index]; else tempval = EEPROM_DEFAULT_HT20_DIFF; rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF); rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] = ((tempval >> 4) & 0xF); if (rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] & BIT(3)) rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] |= 0xF0; if (rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] & BIT(3)) rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] |= 0xF0; index = _rtl92c_get_chnl_group((u8) i); if (!autoload_fail) tempval = hwinfo[EEPROM_TXPOWER_OFDMDIFF + index]; else tempval = EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF; rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] = (tempval & 0xF); rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] = ((tempval >> 4) & 0xF); } rtlefuse->legacy_ht_txpowerdiff = rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7]; for (i = 0; i < 14; i++) RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i, rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]); for (i = 0; i < 14; i++) RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]); for (i = 0; i < 14; i++) RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i, rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]); for (i = 0; i < 14; i++) RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i, rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]); if (!autoload_fail) rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7); else rtlefuse->eeprom_regulatory = 0; RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory); if (!autoload_fail) { rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A]; rtlefuse->eeprom_tssi[RF90_PATH_B] = hwinfo[EEPROM_TSSI_B]; } else { rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI; rtlefuse->eeprom_tssi[RF90_PATH_B] = EEPROM_DEFAULT_TSSI; } RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "TSSI_A = 0x%x, TSSI_B = 0x%x\n", rtlefuse->eeprom_tssi[RF90_PATH_A], rtlefuse->eeprom_tssi[RF90_PATH_B]); if (!autoload_fail) tempval = hwinfo[EEPROM_THERMAL_METER]; else tempval = EEPROM_DEFAULT_THERMALMETER; rtlefuse->eeprom_thermalmeter = (tempval & 0x1f); if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail) rtlefuse->apk_thermalmeterignore = true; rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter; RTPRINT(rtlpriv, FINIT, INIT_TXPOWER, "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter); } static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u16 i, usvalue; u8 hwinfo[HWSET_MAX_SIZE]; u16 eeprom_id; if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { rtl_efuse_shadow_map_update(hw); memcpy((void *)hwinfo, (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE); } else if (rtlefuse->epromtype == EEPROM_93C46) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "RTL819X Not boot from eeprom, check it !!"); } RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP", hwinfo, HWSET_MAX_SIZE); eeprom_id = *((u16 *)&hwinfo[0]); if (eeprom_id != RTL8190_EEPROM_ID) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "EEPROM ID(%#x) is invalid!!\n", eeprom_id); rtlefuse->autoload_failflag = true; } else { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); rtlefuse->autoload_failflag = false; } if (rtlefuse->autoload_failflag) return; rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID]; rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID]; rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID]; rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID]; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROMId = 0x%4x\n", eeprom_id); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid); for (i = 0; i < 6; i += 2) { usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i]; *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue; } RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr); _rtl92ce_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag, hwinfo); rtl8192ce_read_bt_coexist_info_from_hwpg(hw, rtlefuse->autoload_failflag, hwinfo); rtlefuse->eeprom_channelplan = *&hwinfo[EEPROM_CHANNELPLAN]; rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; rtlefuse->txpwr_fromeprom = true; rtlefuse->eeprom_oemid = *&hwinfo[EEPROM_CUSTOMER_ID]; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); /* set channel paln to world wide 13 */ rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13; if (rtlhal->oem_id == RT_CID_DEFAULT) { switch (rtlefuse->eeprom_oemid) { case EEPROM_CID_DEFAULT: if (rtlefuse->eeprom_did == 0x8176) { if ((rtlefuse->eeprom_svid == 0x103C && rtlefuse->eeprom_smid == 0x1629)) rtlhal->oem_id = RT_CID_819x_HP; else rtlhal->oem_id = RT_CID_DEFAULT; } else { rtlhal->oem_id = RT_CID_DEFAULT; } break; case EEPROM_CID_TOSHIBA: rtlhal->oem_id = RT_CID_TOSHIBA; break; case EEPROM_CID_QMI: rtlhal->oem_id = RT_CID_819x_QMI; break; case EEPROM_CID_WHQL: default: rtlhal->oem_id = RT_CID_DEFAULT; break; } } } static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); switch (rtlhal->oem_id) { case RT_CID_819x_HP: pcipriv->ledctl.led_opendrain = true; break; case RT_CID_819x_Lenovo: case RT_CID_DEFAULT: case RT_CID_TOSHIBA: case RT_CID_CCX: case RT_CID_819x_Acer: case RT_CID_WHQL: default: break; } RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "RT Customized ID: 0x%02X\n", rtlhal->oem_id); } void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u8 tmp_u1b; rtlhal->version = _rtl92ce_read_chip_version(hw); if (get_rf_type(rtlphy) == RF_1T1R) rtlpriv->dm.rfpath_rxenable[0] = true; else rtlpriv->dm.rfpath_rxenable[0] = rtlpriv->dm.rfpath_rxenable[1] = true; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n", rtlhal->version); tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR); if (tmp_u1b & BIT(4)) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n"); rtlefuse->epromtype = EEPROM_93C46; } else { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n"); rtlefuse->epromtype = EEPROM_BOOT_EFUSE; } if (tmp_u1b & BIT(5)) { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); rtlefuse->autoload_failflag = false; _rtl92ce_read_adapter_info(hw); } else { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); } _rtl92ce_hal_customized_behavior(hw); } static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw, struct ieee80211_sta *sta) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u32 ratr_value; u8 ratr_index = 0; u8 nmode = mac->ht_enable; u8 mimo_ps = IEEE80211_SMPS_OFF; u16 shortgi_rate; u32 tmp_ratr_value; u8 curtxbw_40mhz = mac->bw_40; u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; enum wireless_mode wirelessmode = mac->mode; if (rtlhal->current_bandtype == BAND_ON_5G) ratr_value = sta->supp_rates[1] << 4; else ratr_value = sta->supp_rates[0]; if (mac->opmode == NL80211_IFTYPE_ADHOC) ratr_value = 0xfff; ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 | sta->ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: if (ratr_value & 0x0000000c) ratr_value &= 0x0000000d; else ratr_value &= 0x0000000f; break; case WIRELESS_MODE_G: ratr_value &= 0x00000FF5; break; case WIRELESS_MODE_N_24G: case WIRELESS_MODE_N_5G: nmode = 1; if (mimo_ps == IEEE80211_SMPS_STATIC) { ratr_value &= 0x0007F005; } else { u32 ratr_mask; if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_1T1R) ratr_mask = 0x000ff005; else ratr_mask = 0x0f0ff005; ratr_value &= ratr_mask; } break; default: if (rtlphy->rf_type == RF_1T2R) ratr_value &= 0x000ff0ff; else ratr_value &= 0x0f0ff0ff; break; } if ((rtlpcipriv->bt_coexist.bt_coexistence) && (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) && (rtlpcipriv->bt_coexist.bt_cur_state) && (rtlpcipriv->bt_coexist.bt_ant_isolation) && ((rtlpcipriv->bt_coexist.bt_service == BT_SCO) || (rtlpcipriv->bt_coexist.bt_service == BT_BUSY))) ratr_value &= 0x0fffcfc0; else ratr_value &= 0x0FFFFFFF; if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || (!curtxbw_40mhz && curshortgi_20mhz))) { ratr_value |= 0x10000000; tmp_ratr_value = (ratr_value >> 12); for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) { if ((1 << shortgi_rate) & tmp_ratr_value) break; } shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) | (shortgi_rate << 4) | (shortgi_rate); } rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0)); } static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u8 rssi_level) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_sta_info *sta_entry = NULL; u32 ratr_bitmap; u8 ratr_index; u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0; u8 curshortgi_40mhz = curtxbw_40mhz && (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; enum wireless_mode wirelessmode = 0; bool shortgi = false; u8 rate_mask[5]; u8 macid = 0; u8 mimo_ps = IEEE80211_SMPS_OFF; sta_entry = (struct rtl_sta_info *) sta->drv_priv; wirelessmode = sta_entry->wireless_mode; if (mac->opmode == NL80211_IFTYPE_STATION || mac->opmode == NL80211_IFTYPE_MESH_POINT) curtxbw_40mhz = mac->bw_40; else if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC) macid = sta->aid + 1; if (rtlhal->current_bandtype == BAND_ON_5G) ratr_bitmap = sta->supp_rates[1] << 4; else ratr_bitmap = sta->supp_rates[0]; if (mac->opmode == NL80211_IFTYPE_ADHOC) ratr_bitmap = 0xfff; ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 | sta->ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: ratr_index = RATR_INX_WIRELESS_B; if (ratr_bitmap & 0x0000000c) ratr_bitmap &= 0x0000000d; else ratr_bitmap &= 0x0000000f; break; case WIRELESS_MODE_G: ratr_index = RATR_INX_WIRELESS_GB; if (rssi_level == 1) ratr_bitmap &= 0x00000f00; else if (rssi_level == 2) ratr_bitmap &= 0x00000ff0; else ratr_bitmap &= 0x00000ff5; break; case WIRELESS_MODE_A: ratr_index = RATR_INX_WIRELESS_A; ratr_bitmap &= 0x00000ff0; break; case WIRELESS_MODE_N_24G: case WIRELESS_MODE_N_5G: ratr_index = RATR_INX_WIRELESS_NGB; if (mimo_ps == IEEE80211_SMPS_STATIC) { if (rssi_level == 1) ratr_bitmap &= 0x00070000; else if (rssi_level == 2) ratr_bitmap &= 0x0007f000; else ratr_bitmap &= 0x0007f005; } else { if (rtlphy->rf_type == RF_1T2R || rtlphy->rf_type == RF_1T1R) { if (curtxbw_40mhz) { if (rssi_level == 1) ratr_bitmap &= 0x000f0000; else if (rssi_level == 2) ratr_bitmap &= 0x000ff000; else ratr_bitmap &= 0x000ff015; } else { if (rssi_level == 1) ratr_bitmap &= 0x000f0000; else if (rssi_level == 2) ratr_bitmap &= 0x000ff000; else ratr_bitmap &= 0x000ff005; } } else { if (curtxbw_40mhz) { if (rssi_level == 1) ratr_bitmap &= 0x0f0f0000; else if (rssi_level == 2) ratr_bitmap &= 0x0f0ff000; else ratr_bitmap &= 0x0f0ff015; } else { if (rssi_level == 1) ratr_bitmap &= 0x0f0f0000; else if (rssi_level == 2) ratr_bitmap &= 0x0f0ff000; else ratr_bitmap &= 0x0f0ff005; } } } if ((curtxbw_40mhz && curshortgi_40mhz) || (!curtxbw_40mhz && curshortgi_20mhz)) { if (macid == 0) shortgi = true; else if (macid == 1) shortgi = false; } break; default: ratr_index = RATR_INX_WIRELESS_NGB; if (rtlphy->rf_type == RF_1T2R) ratr_bitmap &= 0x000ff0ff; else ratr_bitmap &= 0x0f0ff0ff; break; } sta_entry->ratr_index = ratr_index; RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "ratr_bitmap :%x\n", ratr_bitmap); *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) | (ratr_index << 28); rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80; RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "Rate_index:%x, ratr_val:%x, %5phC\n", ratr_index, ratr_bitmap, rate_mask); rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask); if (macid != 0) sta_entry->ratr_index = ratr_index; } void rtl92ce_update_hal_rate_tbl(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u8 rssi_level) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (rtlpriv->dm.useramask) rtl92ce_update_hal_rate_mask(hw, sta, rssi_level); else rtl92ce_update_hal_rate_table(hw, sta); } void rtl92ce_update_channel_access_setting(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); u16 sifs_timer; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, &mac->slot_time); if (!mac->ht_enable) sifs_timer = 0x0a0a; else sifs_timer = 0x1010; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer); } bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); enum rf_pwrstate e_rfpowerstate_toset; u8 u1tmp; bool actuallyset = false; unsigned long flag; if (rtlpci->being_init_adapter) return false; if (ppsc->swrf_processing) return false; spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); if (ppsc->rfchange_inprogress) { spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); return false; } else { ppsc->rfchange_inprogress = true; spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); } rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG)&~(BIT(3))); u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL); e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF; if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "GPIOChangeRF - HW Radio ON, RF ON\n"); e_rfpowerstate_toset = ERFON; ppsc->hwradiooff = false; actuallyset = true; } else if (!ppsc->hwradiooff && (e_rfpowerstate_toset == ERFOFF)) { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "GPIOChangeRF - HW Radio OFF, RF OFF\n"); e_rfpowerstate_toset = ERFOFF; ppsc->hwradiooff = true; actuallyset = true; } if (actuallyset) { spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); ppsc->rfchange_inprogress = false; spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); } else { if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); ppsc->rfchange_inprogress = false; spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); } *valid = 1; return !ppsc->hwradiooff; } void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr, bool is_group, u8 enc_algo, bool is_wepkey, bool clear_all) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 *macaddr = p_macaddr; u32 entry_id = 0; bool is_pairwise = false; static u8 cam_const_addr[4][6] = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x02}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x03} }; static u8 cam_const_broad[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; if (clear_all) { u8 idx = 0; u8 cam_offset = 0; u8 clear_number = 5; RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n"); for (idx = 0; idx < clear_number; idx++) { rtl_cam_mark_invalid(hw, cam_offset + idx); rtl_cam_empty_entry(hw, cam_offset + idx); if (idx < 5) { memset(rtlpriv->sec.key_buf[idx], 0, MAX_KEY_LEN); rtlpriv->sec.key_len[idx] = 0; } } } else { switch (enc_algo) { case WEP40_ENCRYPTION: enc_algo = CAM_WEP40; break; case WEP104_ENCRYPTION: enc_algo = CAM_WEP104; break; case TKIP_ENCRYPTION: enc_algo = CAM_TKIP; break; case AESCCMP_ENCRYPTION: enc_algo = CAM_AES; break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not processed\n"); enc_algo = CAM_TKIP; break; } if (is_wepkey || rtlpriv->sec.use_defaultkey) { macaddr = cam_const_addr[key_index]; entry_id = key_index; } else { if (is_group) { macaddr = cam_const_broad; entry_id = key_index; } else { if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_MESH_POINT) { entry_id = rtl_cam_get_free_entry(hw, p_macaddr); if (entry_id >= TOTAL_CAM_ENTRY) { RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, "Can not find free hw security cam entry\n"); return; } } else { entry_id = CAM_PAIRWISE_KEY_POSITION; } key_index = PAIRWISE_KEYIDX; is_pairwise = true; } } if (rtlpriv->sec.key_len[key_index] == 0) { RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "delete one entry, entry_id is %d\n", entry_id); if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_MESH_POINT) rtl_cam_del_entry(hw, p_macaddr); rtl_cam_delete_one_entry(hw, p_macaddr, entry_id); } else { RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "The insert KEY length is %d\n", rtlpriv->sec.key_len[PAIRWISE_KEYIDX]); RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "The insert KEY is %x %x\n", rtlpriv->sec.key_buf[0][0], rtlpriv->sec.key_buf[0][1]); RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "add one entry\n"); if (is_pairwise) { RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD, "Pairwise Key content", rtlpriv->sec.pairwise_key, rtlpriv->sec. key_len[PAIRWISE_KEYIDX]); RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "set Pairwise key\n"); rtl_cam_add_one_entry(hw, macaddr, key_index, entry_id, enc_algo, CAM_CONFIG_NO_USEDK, rtlpriv->sec. key_buf[key_index]); } else { RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "set group key\n"); if (mac->opmode == NL80211_IFTYPE_ADHOC) { rtl_cam_add_one_entry(hw, rtlefuse->dev_addr, PAIRWISE_KEYIDX, CAM_PAIRWISE_KEY_POSITION, enc_algo, CAM_CONFIG_NO_USEDK, rtlpriv->sec.key_buf [entry_id]); } rtl_cam_add_one_entry(hw, macaddr, key_index, entry_id, enc_algo, CAM_CONFIG_NO_USEDK, rtlpriv->sec.key_buf[entry_id]); } } } } static void rtl8192ce_bt_var_init(struct ieee80211_hw *hw) { struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); rtlpcipriv->bt_coexist.bt_coexistence = rtlpcipriv->bt_coexist.eeprom_bt_coexist; rtlpcipriv->bt_coexist.bt_ant_num = rtlpcipriv->bt_coexist.eeprom_bt_ant_num; rtlpcipriv->bt_coexist.bt_coexist_type = rtlpcipriv->bt_coexist.eeprom_bt_type; if (rtlpcipriv->bt_coexist.reg_bt_iso == 2) rtlpcipriv->bt_coexist.bt_ant_isolation = rtlpcipriv->bt_coexist.eeprom_bt_ant_isol; else rtlpcipriv->bt_coexist.bt_ant_isolation = rtlpcipriv->bt_coexist.reg_bt_iso; rtlpcipriv->bt_coexist.bt_radio_shared_type = rtlpcipriv->bt_coexist.eeprom_bt_radio_shared; if (rtlpcipriv->bt_coexist.bt_coexistence) { if (rtlpcipriv->bt_coexist.reg_bt_sco == 1) rtlpcipriv->bt_coexist.bt_service = BT_OTHER_ACTION; else if (rtlpcipriv->bt_coexist.reg_bt_sco == 2) rtlpcipriv->bt_coexist.bt_service = BT_SCO; else if (rtlpcipriv->bt_coexist.reg_bt_sco == 4) rtlpcipriv->bt_coexist.bt_service = BT_BUSY; else if (rtlpcipriv->bt_coexist.reg_bt_sco == 5) rtlpcipriv->bt_coexist.bt_service = BT_OTHERBUSY; else rtlpcipriv->bt_coexist.bt_service = BT_IDLE; rtlpcipriv->bt_coexist.bt_edca_ul = 0; rtlpcipriv->bt_coexist.bt_edca_dl = 0; rtlpcipriv->bt_coexist.bt_rssi_state = 0xff; } } void rtl8192ce_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, bool auto_load_fail, u8 *hwinfo) { struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); u8 val; if (!auto_load_fail) { rtlpcipriv->bt_coexist.eeprom_bt_coexist = ((hwinfo[RF_OPTION1] & 0xe0) >> 5); val = hwinfo[RF_OPTION4]; rtlpcipriv->bt_coexist.eeprom_bt_type = ((val & 0xe) >> 1); rtlpcipriv->bt_coexist.eeprom_bt_ant_num = (val & 0x1); rtlpcipriv->bt_coexist.eeprom_bt_ant_isol = ((val & 0x10) >> 4); rtlpcipriv->bt_coexist.eeprom_bt_radio_shared = ((val & 0x20) >> 5); } else { rtlpcipriv->bt_coexist.eeprom_bt_coexist = 0; rtlpcipriv->bt_coexist.eeprom_bt_type = BT_2WIRE; rtlpcipriv->bt_coexist.eeprom_bt_ant_num = ANT_X2; rtlpcipriv->bt_coexist.eeprom_bt_ant_isol = 0; rtlpcipriv->bt_coexist.eeprom_bt_radio_shared = BT_RADIO_SHARED; } rtl8192ce_bt_var_init(hw); } void rtl8192ce_bt_reg_init(struct ieee80211_hw *hw) { struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); /* 0:Low, 1:High, 2:From Efuse. */ rtlpcipriv->bt_coexist.reg_bt_iso = 2; /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */ rtlpcipriv->bt_coexist.reg_bt_sco = 3; /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */ rtlpcipriv->bt_coexist.reg_bt_sco = 0; } void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); u8 u1_tmp; if (rtlpcipriv->bt_coexist.bt_coexistence && ((rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) || rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC8)) { if (rtlpcipriv->bt_coexist.bt_ant_isolation) rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0); u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) & BIT_OFFSET_LEN_MASK_32(0, 1); u1_tmp = u1_tmp | ((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ? 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) | ((rtlpcipriv->bt_coexist.bt_service == BT_SCO) ? 0 : BIT_OFFSET_LEN_MASK_32(2, 1)); rtl_write_byte(rtlpriv, 0x4fd, u1_tmp); rtl_write_dword(rtlpriv, REG_BT_COEX_TABLE+4, 0xaaaa9aaa); rtl_write_dword(rtlpriv, REG_BT_COEX_TABLE+8, 0xffbd0040); rtl_write_dword(rtlpriv, REG_BT_COEX_TABLE+0xc, 0x40000010); /* Config to 1T1R. */ if (rtlphy->rf_type == RF_1T1R) { u1_tmp = rtl_read_byte(rtlpriv, ROFDM0_TRXPATHENABLE); u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1)); rtl_write_byte(rtlpriv, ROFDM0_TRXPATHENABLE, u1_tmp); u1_tmp = rtl_read_byte(rtlpriv, ROFDM1_TRXPATHENABLE); u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1)); rtl_write_byte(rtlpriv, ROFDM1_TRXPATHENABLE, u1_tmp); } } } void rtl92ce_suspend(struct ieee80211_hw *hw) { } void rtl92ce_resume(struct ieee80211_hw *hw) { } /* Turn on AAP (RCR:bit 0) for promicuous mode. */ void rtl92ce_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da, bool write_into_reg) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); if (allow_all_da) {/* Set BIT0 */ rtlpci->receive_config |= RCR_AAP; } else {/* Clear BIT0 */ rtlpci->receive_config &= ~RCR_AAP; } if (write_into_reg) rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config); RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD, "receive_config=0x%08X, write_into_reg=%d\n", rtlpci->receive_config, write_into_reg); }
gpl-2.0
ptmr3/smdk4412
arch/arm/mach-u300/core.c
2426
54714
/* * * arch/arm/mach-u300/core.c * * * Copyright (C) 2007-2010 ST-Ericsson SA * License terms: GNU General Public License (GPL) version 2 * Core platform support, IRQ handling and device definitions. * Author: Linus Walleij <linus.walleij@stericsson.com> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/device.h> #include <linux/mm.h> #include <linux/termios.h> #include <linux/dmaengine.h> #include <linux/amba/bus.h> #include <linux/amba/serial.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/mtd/nand.h> #include <linux/mtd/fsmc.h> #include <asm/types.h> #include <asm/setup.h> #include <asm/memory.h> #include <asm/hardware/vic.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/coh901318.h> #include <mach/hardware.h> #include <mach/syscon.h> #include <mach/dma_channels.h> #include "clock.h" #include "mmc.h" #include "spi.h" #include "i2c.h" /* * Static I/O mappings that are needed for booting the U300 platforms. The * only things we need are the areas where we find the timer, syscon and * intcon, since the remaining device drivers will map their own memory * physical to virtual as the need arise. */ static struct map_desc u300_io_desc[] __initdata = { { .virtual = U300_SLOW_PER_VIRT_BASE, .pfn = __phys_to_pfn(U300_SLOW_PER_PHYS_BASE), .length = SZ_64K, .type = MT_DEVICE, }, { .virtual = U300_AHB_PER_VIRT_BASE, .pfn = __phys_to_pfn(U300_AHB_PER_PHYS_BASE), .length = SZ_32K, .type = MT_DEVICE, }, { .virtual = U300_FAST_PER_VIRT_BASE, .pfn = __phys_to_pfn(U300_FAST_PER_PHYS_BASE), .length = SZ_32K, .type = MT_DEVICE, }, { .virtual = 0xffff2000, /* TCM memory */ .pfn = __phys_to_pfn(0xffff2000), .length = SZ_16K, .type = MT_DEVICE, }, /* * This overlaps with the IRQ vectors etc at 0xffff0000, so these * may have to be moved to 0x00000000 in order to use the ROM. */ /* { .virtual = U300_BOOTROM_VIRT_BASE, .pfn = __phys_to_pfn(U300_BOOTROM_PHYS_BASE), .length = SZ_64K, .type = MT_ROM, }, */ }; void __init u300_map_io(void) { iotable_init(u300_io_desc, ARRAY_SIZE(u300_io_desc)); } /* * Declaration of devices found on the U300 board and * their respective memory locations. */ static struct amba_pl011_data uart0_plat_data = { #ifdef CONFIG_COH901318 .dma_filter = coh901318_filter_id, .dma_rx_param = (void *) U300_DMA_UART0_RX, .dma_tx_param = (void *) U300_DMA_UART0_TX, #endif }; static struct amba_device uart0_device = { .dev = { .coherent_dma_mask = ~0, .init_name = "uart0", /* Slow device at 0x3000 offset */ .platform_data = &uart0_plat_data, }, .res = { .start = U300_UART0_BASE, .end = U300_UART0_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, .irq = { IRQ_U300_UART0, NO_IRQ }, }; /* The U335 have an additional UART1 on the APP CPU */ #ifdef CONFIG_MACH_U300_BS335 static struct amba_pl011_data uart1_plat_data = { #ifdef CONFIG_COH901318 .dma_filter = coh901318_filter_id, .dma_rx_param = (void *) U300_DMA_UART1_RX, .dma_tx_param = (void *) U300_DMA_UART1_TX, #endif }; static struct amba_device uart1_device = { .dev = { .coherent_dma_mask = ~0, .init_name = "uart1", /* Fast device at 0x7000 offset */ .platform_data = &uart1_plat_data, }, .res = { .start = U300_UART1_BASE, .end = U300_UART1_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, .irq = { IRQ_U300_UART1, NO_IRQ }, }; #endif static struct amba_device pl172_device = { .dev = { .init_name = "pl172", /* AHB device at 0x4000 offset */ .platform_data = NULL, }, .res = { .start = U300_EMIF_CFG_BASE, .end = U300_EMIF_CFG_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; /* * Everything within this next ifdef deals with external devices connected to * the APP SPI bus. */ static struct amba_device pl022_device = { .dev = { .coherent_dma_mask = ~0, .init_name = "pl022", /* Fast device at 0x6000 offset */ }, .res = { .start = U300_SPI_BASE, .end = U300_SPI_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, .irq = {IRQ_U300_SPI, NO_IRQ }, /* * This device has a DMA channel but the Linux driver does not use * it currently. */ }; static struct amba_device mmcsd_device = { .dev = { .init_name = "mmci", /* Fast device at 0x1000 offset */ .platform_data = NULL, /* Added later */ }, .res = { .start = U300_MMCSD_BASE, .end = U300_MMCSD_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, .irq = {IRQ_U300_MMCSD_MCIINTR0, IRQ_U300_MMCSD_MCIINTR1 }, /* * This device has a DMA channel but the Linux driver does not use * it currently. */ }; /* * The order of device declaration may be important, since some devices * have dependencies on other devices being initialized first. */ static struct amba_device *amba_devs[] __initdata = { &uart0_device, #ifdef CONFIG_MACH_U300_BS335 &uart1_device, #endif &pl022_device, &pl172_device, &mmcsd_device, }; /* Here follows a list of all hw resources that the platform devices * allocate. Note, clock dependencies are not included */ static struct resource gpio_resources[] = { { .start = U300_GPIO_BASE, .end = (U300_GPIO_BASE + SZ_4K - 1), .flags = IORESOURCE_MEM, }, { .name = "gpio0", .start = IRQ_U300_GPIO_PORT0, .end = IRQ_U300_GPIO_PORT0, .flags = IORESOURCE_IRQ, }, { .name = "gpio1", .start = IRQ_U300_GPIO_PORT1, .end = IRQ_U300_GPIO_PORT1, .flags = IORESOURCE_IRQ, }, { .name = "gpio2", .start = IRQ_U300_GPIO_PORT2, .end = IRQ_U300_GPIO_PORT2, .flags = IORESOURCE_IRQ, }, #ifdef U300_COH901571_3 { .name = "gpio3", .start = IRQ_U300_GPIO_PORT3, .end = IRQ_U300_GPIO_PORT3, .flags = IORESOURCE_IRQ, }, { .name = "gpio4", .start = IRQ_U300_GPIO_PORT4, .end = IRQ_U300_GPIO_PORT4, .flags = IORESOURCE_IRQ, }, #ifdef CONFIG_MACH_U300_BS335 { .name = "gpio5", .start = IRQ_U300_GPIO_PORT5, .end = IRQ_U300_GPIO_PORT5, .flags = IORESOURCE_IRQ, }, { .name = "gpio6", .start = IRQ_U300_GPIO_PORT6, .end = IRQ_U300_GPIO_PORT6, .flags = IORESOURCE_IRQ, }, #endif /* CONFIG_MACH_U300_BS335 */ #endif /* U300_COH901571_3 */ }; static struct resource keypad_resources[] = { { .start = U300_KEYPAD_BASE, .end = U300_KEYPAD_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .name = "coh901461-press", .start = IRQ_U300_KEYPAD_KEYBF, .end = IRQ_U300_KEYPAD_KEYBF, .flags = IORESOURCE_IRQ, }, { .name = "coh901461-release", .start = IRQ_U300_KEYPAD_KEYBR, .end = IRQ_U300_KEYPAD_KEYBR, .flags = IORESOURCE_IRQ, }, }; static struct resource rtc_resources[] = { { .start = U300_RTC_BASE, .end = U300_RTC_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_U300_RTC, .end = IRQ_U300_RTC, .flags = IORESOURCE_IRQ, }, }; /* * Fsmc does have IRQs: #43 and #44 (NFIF and NFIF2) * but these are not yet used by the driver. */ static struct resource fsmc_resources[] = { { .name = "nand_data", .start = U300_NAND_CS0_PHYS_BASE, .end = U300_NAND_CS0_PHYS_BASE + SZ_16K - 1, .flags = IORESOURCE_MEM, }, { .name = "fsmc_regs", .start = U300_NAND_IF_PHYS_BASE, .end = U300_NAND_IF_PHYS_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct resource i2c0_resources[] = { { .start = U300_I2C0_BASE, .end = U300_I2C0_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_U300_I2C0, .end = IRQ_U300_I2C0, .flags = IORESOURCE_IRQ, }, }; static struct resource i2c1_resources[] = { { .start = U300_I2C1_BASE, .end = U300_I2C1_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_U300_I2C1, .end = IRQ_U300_I2C1, .flags = IORESOURCE_IRQ, }, }; static struct resource wdog_resources[] = { { .start = U300_WDOG_BASE, .end = U300_WDOG_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_U300_WDOG, .end = IRQ_U300_WDOG, .flags = IORESOURCE_IRQ, } }; /* TODO: These should be protected by suitable #ifdef's */ static struct resource ave_resources[] = { { .name = "AVE3e I/O Area", .start = U300_VIDEOENC_BASE, .end = U300_VIDEOENC_BASE + SZ_512K - 1, .flags = IORESOURCE_MEM, }, { .name = "AVE3e IRQ0", .start = IRQ_U300_VIDEO_ENC_0, .end = IRQ_U300_VIDEO_ENC_0, .flags = IORESOURCE_IRQ, }, { .name = "AVE3e IRQ1", .start = IRQ_U300_VIDEO_ENC_1, .end = IRQ_U300_VIDEO_ENC_1, .flags = IORESOURCE_IRQ, }, { .name = "AVE3e Physmem Area", .start = 0, /* 0 will be remapped to reserved memory */ .end = SZ_1M - 1, .flags = IORESOURCE_MEM, }, /* * The AVE3e requires two regions of 256MB that it considers * "invisible". The hardware will not be able to access these * addresses, so they should never point to system RAM. */ { .name = "AVE3e Reserved 0", .start = 0xd0000000, .end = 0xd0000000 + SZ_256M - 1, .flags = IORESOURCE_MEM, }, { .name = "AVE3e Reserved 1", .start = 0xe0000000, .end = 0xe0000000 + SZ_256M - 1, .flags = IORESOURCE_MEM, }, }; static struct resource dma_resource[] = { { .start = U300_DMAC_BASE, .end = U300_DMAC_BASE + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_U300_DMA, .end = IRQ_U300_DMA, .flags = IORESOURCE_IRQ, } }; #ifdef CONFIG_MACH_U300_BS335 /* points out all dma slave channels. * Syntax is [A1, B1, A2, B2, .... ,-1,-1] * Select all channels from A to B, end of list is marked with -1,-1 */ static int dma_slave_channels[] = { U300_DMA_MSL_TX_0, U300_DMA_SPI_RX, U300_DMA_UART1_TX, U300_DMA_UART1_RX, -1, -1}; /* points out all dma memcpy channels. */ static int dma_memcpy_channels[] = { U300_DMA_GENERAL_PURPOSE_0, U300_DMA_GENERAL_PURPOSE_8, -1, -1}; #else /* CONFIG_MACH_U300_BS335 */ static int dma_slave_channels[] = {U300_DMA_MSL_TX_0, U300_DMA_SPI_RX, -1, -1}; static int dma_memcpy_channels[] = { U300_DMA_GENERAL_PURPOSE_0, U300_DMA_GENERAL_PURPOSE_10, -1, -1}; #endif /** register dma for memory access * * active 1 means dma intends to access memory * 0 means dma wont access memory */ static void coh901318_access_memory_state(struct device *dev, bool active) { } #define flags_memcpy_config (COH901318_CX_CFG_CH_DISABLE | \ COH901318_CX_CFG_RM_MEMORY_TO_MEMORY | \ COH901318_CX_CFG_LCR_DISABLE | \ COH901318_CX_CFG_TC_IRQ_ENABLE | \ COH901318_CX_CFG_BE_IRQ_ENABLE) #define flags_memcpy_lli_chained (COH901318_CX_CTRL_TC_ENABLE | \ COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \ COH901318_CX_CTRL_MASTER_MODE_M1RW | \ COH901318_CX_CTRL_TCP_DISABLE | \ COH901318_CX_CTRL_TC_IRQ_DISABLE | \ COH901318_CX_CTRL_HSP_DISABLE | \ COH901318_CX_CTRL_HSS_DISABLE | \ COH901318_CX_CTRL_DDMA_LEGACY | \ COH901318_CX_CTRL_PRDD_SOURCE) #define flags_memcpy_lli (COH901318_CX_CTRL_TC_ENABLE | \ COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \ COH901318_CX_CTRL_MASTER_MODE_M1RW | \ COH901318_CX_CTRL_TCP_DISABLE | \ COH901318_CX_CTRL_TC_IRQ_DISABLE | \ COH901318_CX_CTRL_HSP_DISABLE | \ COH901318_CX_CTRL_HSS_DISABLE | \ COH901318_CX_CTRL_DDMA_LEGACY | \ COH901318_CX_CTRL_PRDD_SOURCE) #define flags_memcpy_lli_last (COH901318_CX_CTRL_TC_ENABLE | \ COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \ COH901318_CX_CTRL_MASTER_MODE_M1RW | \ COH901318_CX_CTRL_TCP_DISABLE | \ COH901318_CX_CTRL_TC_IRQ_ENABLE | \ COH901318_CX_CTRL_HSP_DISABLE | \ COH901318_CX_CTRL_HSS_DISABLE | \ COH901318_CX_CTRL_DDMA_LEGACY | \ COH901318_CX_CTRL_PRDD_SOURCE) const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = { { .number = U300_DMA_MSL_TX_0, .name = "MSL TX 0", .priority_high = 0, .dev_addr = U300_MSL_BASE + 0 * 0x40 + 0x20, }, { .number = U300_DMA_MSL_TX_1, .name = "MSL TX 1", .priority_high = 0, .dev_addr = U300_MSL_BASE + 1 * 0x40 + 0x20, .param.config = COH901318_CX_CFG_CH_DISABLE | COH901318_CX_CFG_LCR_DISABLE | COH901318_CX_CFG_TC_IRQ_ENABLE | COH901318_CX_CFG_BE_IRQ_ENABLE, .param.ctrl_lli_chained = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_SOURCE, .param.ctrl_lli = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_SOURCE, .param.ctrl_lli_last = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_SOURCE, }, { .number = U300_DMA_MSL_TX_2, .name = "MSL TX 2", .priority_high = 0, .dev_addr = U300_MSL_BASE + 2 * 0x40 + 0x20, .param.config = COH901318_CX_CFG_CH_DISABLE | COH901318_CX_CFG_LCR_DISABLE | COH901318_CX_CFG_TC_IRQ_ENABLE | COH901318_CX_CFG_BE_IRQ_ENABLE, .param.ctrl_lli_chained = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_SOURCE, .param.ctrl_lli = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_SOURCE, .param.ctrl_lli_last = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_SOURCE, .desc_nbr_max = 10, }, { .number = U300_DMA_MSL_TX_3, .name = "MSL TX 3", .priority_high = 0, .dev_addr = U300_MSL_BASE + 3 * 0x40 + 0x20, .param.config = COH901318_CX_CFG_CH_DISABLE | COH901318_CX_CFG_LCR_DISABLE | COH901318_CX_CFG_TC_IRQ_ENABLE | COH901318_CX_CFG_BE_IRQ_ENABLE, .param.ctrl_lli_chained = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_SOURCE, .param.ctrl_lli = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_SOURCE, .param.ctrl_lli_last = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_SOURCE, }, { .number = U300_DMA_MSL_TX_4, .name = "MSL TX 4", .priority_high = 0, .dev_addr = U300_MSL_BASE + 4 * 0x40 + 0x20, .param.config = COH901318_CX_CFG_CH_DISABLE | COH901318_CX_CFG_LCR_DISABLE | COH901318_CX_CFG_TC_IRQ_ENABLE | COH901318_CX_CFG_BE_IRQ_ENABLE, .param.ctrl_lli_chained = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_SOURCE, .param.ctrl_lli = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_SOURCE, .param.ctrl_lli_last = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_SOURCE, }, { .number = U300_DMA_MSL_TX_5, .name = "MSL TX 5", .priority_high = 0, .dev_addr = U300_MSL_BASE + 5 * 0x40 + 0x20, }, { .number = U300_DMA_MSL_TX_6, .name = "MSL TX 6", .priority_high = 0, .dev_addr = U300_MSL_BASE + 6 * 0x40 + 0x20, }, { .number = U300_DMA_MSL_RX_0, .name = "MSL RX 0", .priority_high = 0, .dev_addr = U300_MSL_BASE + 0 * 0x40 + 0x220, }, { .number = U300_DMA_MSL_RX_1, .name = "MSL RX 1", .priority_high = 0, .dev_addr = U300_MSL_BASE + 1 * 0x40 + 0x220, .param.config = COH901318_CX_CFG_CH_DISABLE | COH901318_CX_CFG_LCR_DISABLE | COH901318_CX_CFG_TC_IRQ_ENABLE | COH901318_CX_CFG_BE_IRQ_ENABLE, .param.ctrl_lli_chained = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | COH901318_CX_CTRL_PRDD_DEST, .param.ctrl_lli = 0, .param.ctrl_lli_last = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | COH901318_CX_CTRL_PRDD_DEST, }, { .number = U300_DMA_MSL_RX_2, .name = "MSL RX 2", .priority_high = 0, .dev_addr = U300_MSL_BASE + 2 * 0x40 + 0x220, .param.config = COH901318_CX_CFG_CH_DISABLE | COH901318_CX_CFG_LCR_DISABLE | COH901318_CX_CFG_TC_IRQ_ENABLE | COH901318_CX_CFG_BE_IRQ_ENABLE, .param.ctrl_lli_chained = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | COH901318_CX_CTRL_PRDD_DEST, .param.ctrl_lli = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | COH901318_CX_CTRL_PRDD_DEST, .param.ctrl_lli_last = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | COH901318_CX_CTRL_PRDD_DEST, }, { .number = U300_DMA_MSL_RX_3, .name = "MSL RX 3", .priority_high = 0, .dev_addr = U300_MSL_BASE + 3 * 0x40 + 0x220, .param.config = COH901318_CX_CFG_CH_DISABLE | COH901318_CX_CFG_LCR_DISABLE | COH901318_CX_CFG_TC_IRQ_ENABLE | COH901318_CX_CFG_BE_IRQ_ENABLE, .param.ctrl_lli_chained = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | COH901318_CX_CTRL_PRDD_DEST, .param.ctrl_lli = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | COH901318_CX_CTRL_PRDD_DEST, .param.ctrl_lli_last = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | COH901318_CX_CTRL_PRDD_DEST, }, { .number = U300_DMA_MSL_RX_4, .name = "MSL RX 4", .priority_high = 0, .dev_addr = U300_MSL_BASE + 4 * 0x40 + 0x220, .param.config = COH901318_CX_CFG_CH_DISABLE | COH901318_CX_CFG_LCR_DISABLE | COH901318_CX_CFG_TC_IRQ_ENABLE | COH901318_CX_CFG_BE_IRQ_ENABLE, .param.ctrl_lli_chained = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | COH901318_CX_CTRL_PRDD_DEST, .param.ctrl_lli = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | COH901318_CX_CTRL_PRDD_DEST, .param.ctrl_lli_last = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | COH901318_CX_CTRL_PRDD_DEST, }, { .number = U300_DMA_MSL_RX_5, .name = "MSL RX 5", .priority_high = 0, .dev_addr = U300_MSL_BASE + 5 * 0x40 + 0x220, .param.config = COH901318_CX_CFG_CH_DISABLE | COH901318_CX_CFG_LCR_DISABLE | COH901318_CX_CFG_TC_IRQ_ENABLE | COH901318_CX_CFG_BE_IRQ_ENABLE, .param.ctrl_lli_chained = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | COH901318_CX_CTRL_PRDD_DEST, .param.ctrl_lli = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | COH901318_CX_CTRL_PRDD_DEST, .param.ctrl_lli_last = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_32_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | COH901318_CX_CTRL_PRDD_DEST, }, { .number = U300_DMA_MSL_RX_6, .name = "MSL RX 6", .priority_high = 0, .dev_addr = U300_MSL_BASE + 6 * 0x40 + 0x220, }, /* * Don't set up device address, burst count or size of src * or dst bus for this peripheral - handled by PrimeCell * DMA extension. */ { .number = U300_DMA_MMCSD_RX_TX, .name = "MMCSD RX TX", .priority_high = 0, .param.config = COH901318_CX_CFG_CH_DISABLE | COH901318_CX_CFG_LCR_DISABLE | COH901318_CX_CFG_TC_IRQ_ENABLE | COH901318_CX_CFG_BE_IRQ_ENABLE, .param.ctrl_lli_chained = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY, .param.ctrl_lli = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY, .param.ctrl_lli_last = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY, }, { .number = U300_DMA_MSPRO_TX, .name = "MSPRO TX", .priority_high = 0, }, { .number = U300_DMA_MSPRO_RX, .name = "MSPRO RX", .priority_high = 0, }, /* * Don't set up device address, burst count or size of src * or dst bus for this peripheral - handled by PrimeCell * DMA extension. */ { .number = U300_DMA_UART0_TX, .name = "UART0 TX", .priority_high = 0, .param.config = COH901318_CX_CFG_CH_DISABLE | COH901318_CX_CFG_LCR_DISABLE | COH901318_CX_CFG_TC_IRQ_ENABLE | COH901318_CX_CFG_BE_IRQ_ENABLE, .param.ctrl_lli_chained = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY, .param.ctrl_lli = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY, .param.ctrl_lli_last = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY, }, { .number = U300_DMA_UART0_RX, .name = "UART0 RX", .priority_high = 0, .param.config = COH901318_CX_CFG_CH_DISABLE | COH901318_CX_CFG_LCR_DISABLE | COH901318_CX_CFG_TC_IRQ_ENABLE | COH901318_CX_CFG_BE_IRQ_ENABLE, .param.ctrl_lli_chained = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY, .param.ctrl_lli = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY, .param.ctrl_lli_last = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY, }, { .number = U300_DMA_APEX_TX, .name = "APEX TX", .priority_high = 0, }, { .number = U300_DMA_APEX_RX, .name = "APEX RX", .priority_high = 0, }, { .number = U300_DMA_PCM_I2S0_TX, .name = "PCM I2S0 TX", .priority_high = 1, .dev_addr = U300_PCM_I2S0_BASE + 0x14, .param.config = COH901318_CX_CFG_CH_DISABLE | COH901318_CX_CFG_LCR_DISABLE | COH901318_CX_CFG_TC_IRQ_ENABLE | COH901318_CX_CFG_BE_IRQ_ENABLE, .param.ctrl_lli_chained = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_16_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_SOURCE, .param.ctrl_lli = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_16_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_SOURCE, .param.ctrl_lli_last = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_16_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_SOURCE, }, { .number = U300_DMA_PCM_I2S0_RX, .name = "PCM I2S0 RX", .priority_high = 1, .dev_addr = U300_PCM_I2S0_BASE + 0x10, .param.config = COH901318_CX_CFG_CH_DISABLE | COH901318_CX_CFG_LCR_DISABLE | COH901318_CX_CFG_TC_IRQ_ENABLE | COH901318_CX_CFG_BE_IRQ_ENABLE, .param.ctrl_lli_chained = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_16_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_DEST, .param.ctrl_lli = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_16_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_DEST, .param.ctrl_lli_last = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_16_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_DEST, }, { .number = U300_DMA_PCM_I2S1_TX, .name = "PCM I2S1 TX", .priority_high = 1, .dev_addr = U300_PCM_I2S1_BASE + 0x14, .param.config = COH901318_CX_CFG_CH_DISABLE | COH901318_CX_CFG_LCR_DISABLE | COH901318_CX_CFG_TC_IRQ_ENABLE | COH901318_CX_CFG_BE_IRQ_ENABLE, .param.ctrl_lli_chained = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_16_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_SOURCE, .param.ctrl_lli = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_16_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_SOURCE, .param.ctrl_lli_last = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_16_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_SOURCE, }, { .number = U300_DMA_PCM_I2S1_RX, .name = "PCM I2S1 RX", .priority_high = 1, .dev_addr = U300_PCM_I2S1_BASE + 0x10, .param.config = COH901318_CX_CFG_CH_DISABLE | COH901318_CX_CFG_LCR_DISABLE | COH901318_CX_CFG_TC_IRQ_ENABLE | COH901318_CX_CFG_BE_IRQ_ENABLE, .param.ctrl_lli_chained = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_16_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_DEST, .param.ctrl_lli = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_16_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_DEST, .param.ctrl_lli_last = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_BURST_COUNT_16_BYTES | COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_ENABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY | COH901318_CX_CTRL_PRDD_DEST, }, { .number = U300_DMA_XGAM_CDI, .name = "XGAM CDI", .priority_high = 0, }, { .number = U300_DMA_XGAM_PDI, .name = "XGAM PDI", .priority_high = 0, }, /* * Don't set up device address, burst count or size of src * or dst bus for this peripheral - handled by PrimeCell * DMA extension. */ { .number = U300_DMA_SPI_TX, .name = "SPI TX", .priority_high = 0, .param.config = COH901318_CX_CFG_CH_DISABLE | COH901318_CX_CFG_LCR_DISABLE | COH901318_CX_CFG_TC_IRQ_ENABLE | COH901318_CX_CFG_BE_IRQ_ENABLE, .param.ctrl_lli_chained = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY, .param.ctrl_lli = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY, .param.ctrl_lli_last = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY, }, { .number = U300_DMA_SPI_RX, .name = "SPI RX", .priority_high = 0, .param.config = COH901318_CX_CFG_CH_DISABLE | COH901318_CX_CFG_LCR_DISABLE | COH901318_CX_CFG_TC_IRQ_ENABLE | COH901318_CX_CFG_BE_IRQ_ENABLE, .param.ctrl_lli_chained = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_DISABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY, .param.ctrl_lli = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY, .param.ctrl_lli_last = 0 | COH901318_CX_CTRL_TC_ENABLE | COH901318_CX_CTRL_MASTER_MODE_M1RW | COH901318_CX_CTRL_TCP_DISABLE | COH901318_CX_CTRL_TC_IRQ_ENABLE | COH901318_CX_CTRL_HSP_ENABLE | COH901318_CX_CTRL_HSS_DISABLE | COH901318_CX_CTRL_DDMA_LEGACY, }, { .number = U300_DMA_GENERAL_PURPOSE_0, .name = "GENERAL 00", .priority_high = 0, .param.config = flags_memcpy_config, .param.ctrl_lli_chained = flags_memcpy_lli_chained, .param.ctrl_lli = flags_memcpy_lli, .param.ctrl_lli_last = flags_memcpy_lli_last, }, { .number = U300_DMA_GENERAL_PURPOSE_1, .name = "GENERAL 01", .priority_high = 0, .param.config = flags_memcpy_config, .param.ctrl_lli_chained = flags_memcpy_lli_chained, .param.ctrl_lli = flags_memcpy_lli, .param.ctrl_lli_last = flags_memcpy_lli_last, }, { .number = U300_DMA_GENERAL_PURPOSE_2, .name = "GENERAL 02", .priority_high = 0, .param.config = flags_memcpy_config, .param.ctrl_lli_chained = flags_memcpy_lli_chained, .param.ctrl_lli = flags_memcpy_lli, .param.ctrl_lli_last = flags_memcpy_lli_last, }, { .number = U300_DMA_GENERAL_PURPOSE_3, .name = "GENERAL 03", .priority_high = 0, .param.config = flags_memcpy_config, .param.ctrl_lli_chained = flags_memcpy_lli_chained, .param.ctrl_lli = flags_memcpy_lli, .param.ctrl_lli_last = flags_memcpy_lli_last, }, { .number = U300_DMA_GENERAL_PURPOSE_4, .name = "GENERAL 04", .priority_high = 0, .param.config = flags_memcpy_config, .param.ctrl_lli_chained = flags_memcpy_lli_chained, .param.ctrl_lli = flags_memcpy_lli, .param.ctrl_lli_last = flags_memcpy_lli_last, }, { .number = U300_DMA_GENERAL_PURPOSE_5, .name = "GENERAL 05", .priority_high = 0, .param.config = flags_memcpy_config, .param.ctrl_lli_chained = flags_memcpy_lli_chained, .param.ctrl_lli = flags_memcpy_lli, .param.ctrl_lli_last = flags_memcpy_lli_last, }, { .number = U300_DMA_GENERAL_PURPOSE_6, .name = "GENERAL 06", .priority_high = 0, .param.config = flags_memcpy_config, .param.ctrl_lli_chained = flags_memcpy_lli_chained, .param.ctrl_lli = flags_memcpy_lli, .param.ctrl_lli_last = flags_memcpy_lli_last, }, { .number = U300_DMA_GENERAL_PURPOSE_7, .name = "GENERAL 07", .priority_high = 0, .param.config = flags_memcpy_config, .param.ctrl_lli_chained = flags_memcpy_lli_chained, .param.ctrl_lli = flags_memcpy_lli, .param.ctrl_lli_last = flags_memcpy_lli_last, }, { .number = U300_DMA_GENERAL_PURPOSE_8, .name = "GENERAL 08", .priority_high = 0, .param.config = flags_memcpy_config, .param.ctrl_lli_chained = flags_memcpy_lli_chained, .param.ctrl_lli = flags_memcpy_lli, .param.ctrl_lli_last = flags_memcpy_lli_last, }, #ifdef CONFIG_MACH_U300_BS335 { .number = U300_DMA_UART1_TX, .name = "UART1 TX", .priority_high = 0, }, { .number = U300_DMA_UART1_RX, .name = "UART1 RX", .priority_high = 0, } #else { .number = U300_DMA_GENERAL_PURPOSE_9, .name = "GENERAL 09", .priority_high = 0, .param.config = flags_memcpy_config, .param.ctrl_lli_chained = flags_memcpy_lli_chained, .param.ctrl_lli = flags_memcpy_lli, .param.ctrl_lli_last = flags_memcpy_lli_last, }, { .number = U300_DMA_GENERAL_PURPOSE_10, .name = "GENERAL 10", .priority_high = 0, .param.config = flags_memcpy_config, .param.ctrl_lli_chained = flags_memcpy_lli_chained, .param.ctrl_lli = flags_memcpy_lli, .param.ctrl_lli_last = flags_memcpy_lli_last, } #endif }; static struct coh901318_platform coh901318_platform = { .chans_slave = dma_slave_channels, .chans_memcpy = dma_memcpy_channels, .access_memory_state = coh901318_access_memory_state, .chan_conf = chan_config, .max_channels = U300_DMA_CHANNELS, }; static struct platform_device wdog_device = { .name = "coh901327_wdog", .id = -1, .num_resources = ARRAY_SIZE(wdog_resources), .resource = wdog_resources, }; static struct platform_device i2c0_device = { .name = "stu300", .id = 0, .num_resources = ARRAY_SIZE(i2c0_resources), .resource = i2c0_resources, }; static struct platform_device i2c1_device = { .name = "stu300", .id = 1, .num_resources = ARRAY_SIZE(i2c1_resources), .resource = i2c1_resources, }; static struct platform_device gpio_device = { .name = "u300-gpio", .id = -1, .num_resources = ARRAY_SIZE(gpio_resources), .resource = gpio_resources, }; static struct platform_device keypad_device = { .name = "keypad", .id = -1, .num_resources = ARRAY_SIZE(keypad_resources), .resource = keypad_resources, }; static struct platform_device rtc_device = { .name = "rtc-coh901331", .id = -1, .num_resources = ARRAY_SIZE(rtc_resources), .resource = rtc_resources, }; static struct mtd_partition u300_partitions[] = { { .name = "bootrecords", .offset = 0, .size = SZ_128K, }, { .name = "free", .offset = SZ_128K, .size = 8064 * SZ_1K, }, { .name = "platform", .offset = 8192 * SZ_1K, .size = 253952 * SZ_1K, }, }; static struct fsmc_nand_platform_data nand_platform_data = { .partitions = u300_partitions, .nr_partitions = ARRAY_SIZE(u300_partitions), .options = NAND_SKIP_BBTSCAN, .width = FSMC_NAND_BW8, }; static struct platform_device nand_device = { .name = "fsmc-nand", .id = -1, .resource = fsmc_resources, .num_resources = ARRAY_SIZE(fsmc_resources), .dev = { .platform_data = &nand_platform_data, }, }; static struct platform_device ave_device = { .name = "video_enc", .id = -1, .num_resources = ARRAY_SIZE(ave_resources), .resource = ave_resources, }; static struct platform_device dma_device = { .name = "coh901318", .id = -1, .resource = dma_resource, .num_resources = ARRAY_SIZE(dma_resource), .dev = { .platform_data = &coh901318_platform, .coherent_dma_mask = ~0, }, }; /* * Notice that AMBA devices are initialized before platform devices. * */ static struct platform_device *platform_devs[] __initdata = { &dma_device, &i2c0_device, &i2c1_device, &keypad_device, &rtc_device, &gpio_device, &nand_device, &wdog_device, &ave_device }; /* * Interrupts: the U300 platforms have two pl190 ARM PrimeCells connected * together so some interrupts are connected to the first one and some * to the second one. */ void __init u300_init_irq(void) { u32 mask[2] = {0, 0}; struct clk *clk; int i; /* initialize clocking early, we want to clock the INTCON */ u300_clock_init(); /* Clock the interrupt controller */ clk = clk_get_sys("intcon", NULL); BUG_ON(IS_ERR(clk)); clk_enable(clk); for (i = 0; i < NR_IRQS; i++) set_bit(i, (unsigned long *) &mask[0]); vic_init((void __iomem *) U300_INTCON0_VBASE, 0, mask[0], mask[0]); vic_init((void __iomem *) U300_INTCON1_VBASE, 32, mask[1], mask[1]); } /* * U300 platforms peripheral handling */ struct db_chip { u16 chipid; const char *name; }; /* * This is a list of the Digital Baseband chips used in the U300 platform. */ static struct db_chip db_chips[] __initdata = { { .chipid = 0xb800, .name = "DB3000", }, { .chipid = 0xc000, .name = "DB3100", }, { .chipid = 0xc800, .name = "DB3150", }, { .chipid = 0xd800, .name = "DB3200", }, { .chipid = 0xe000, .name = "DB3250", }, { .chipid = 0xe800, .name = "DB3210", }, { .chipid = 0xf000, .name = "DB3350 P1x", }, { .chipid = 0xf100, .name = "DB3350 P2x", }, { .chipid = 0x0000, /* List terminator */ .name = NULL, } }; static void __init u300_init_check_chip(void) { u16 val; struct db_chip *chip; const char *chipname; const char unknown[] = "UNKNOWN"; /* Read out and print chip ID */ val = readw(U300_SYSCON_VBASE + U300_SYSCON_CIDR); /* This is in funky bigendian order... */ val = (val & 0xFFU) << 8 | (val >> 8); chip = db_chips; chipname = unknown; for ( ; chip->chipid; chip++) { if (chip->chipid == (val & 0xFF00U)) { chipname = chip->name; break; } } printk(KERN_INFO "Initializing U300 system on %s baseband chip " \ "(chip ID 0x%04x)\n", chipname, val); #ifdef CONFIG_MACH_U300_BS330 if ((val & 0xFF00U) != 0xd800) { printk(KERN_ERR "Platform configured for BS330 " \ "with DB3200 but %s detected, expect problems!", chipname); } #endif #ifdef CONFIG_MACH_U300_BS335 if ((val & 0xFF00U) != 0xf000 && (val & 0xFF00U) != 0xf100) { printk(KERN_ERR "Platform configured for BS335 " \ " with DB3350 but %s detected, expect problems!", chipname); } #endif #ifdef CONFIG_MACH_U300_BS365 if ((val & 0xFF00U) != 0xe800) { printk(KERN_ERR "Platform configured for BS365 " \ "with DB3210 but %s detected, expect problems!", chipname); } #endif } /* * Some devices and their resources require reserved physical memory from * the end of the available RAM. This function traverses the list of devices * and assigns actual addresses to these. */ static void __init u300_assign_physmem(void) { unsigned long curr_start = __pa(high_memory); int i, j; for (i = 0; i < ARRAY_SIZE(platform_devs); i++) { for (j = 0; j < platform_devs[i]->num_resources; j++) { struct resource *const res = &platform_devs[i]->resource[j]; if (IORESOURCE_MEM == res->flags && 0 == res->start) { res->start = curr_start; res->end += curr_start; curr_start += (res->end - res->start + 1); printk(KERN_INFO "core.c: Mapping RAM " \ "%#x-%#x to device %s:%s\n", res->start, res->end, platform_devs[i]->name, res->name); } } } } void __init u300_init_devices(void) { int i; u16 val; /* Check what platform we run and print some status information */ u300_init_check_chip(); /* Set system to run at PLL208, max performance, a known state. */ val = readw(U300_SYSCON_VBASE + U300_SYSCON_CCR); val &= ~U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK; writew(val, U300_SYSCON_VBASE + U300_SYSCON_CCR); /* Wait for the PLL208 to lock if not locked in yet */ while (!(readw(U300_SYSCON_VBASE + U300_SYSCON_CSR) & U300_SYSCON_CSR_PLL208_LOCK_IND)); /* Initialize SPI device with some board specifics */ u300_spi_init(&pl022_device); /* Register the AMBA devices in the AMBA bus abstraction layer */ for (i = 0; i < ARRAY_SIZE(amba_devs); i++) { struct amba_device *d = amba_devs[i]; amba_device_register(d, &iomem_resource); } u300_assign_physmem(); /* Register subdevices on the I2C buses */ u300_i2c_register_board_devices(); /* Register the platform devices */ platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs)); /* Register subdevices on the SPI bus */ u300_spi_register_board_devices(); #ifndef CONFIG_MACH_U300_SEMI_IS_SHARED /* * Enable SEMI self refresh. Self-refresh of the SDRAM is entered when * both subsystems are requesting this mode. * If we not share the Acc SDRAM, this is never the case. Therefore * enable it here from the App side. */ val = readw(U300_SYSCON_VBASE + U300_SYSCON_SMCR) | U300_SYSCON_SMCR_SEMI_SREFREQ_ENABLE; writew(val, U300_SYSCON_VBASE + U300_SYSCON_SMCR); #endif /* CONFIG_MACH_U300_SEMI_IS_SHARED */ } static int core_module_init(void) { /* * This needs to be initialized later: it needs the input framework * to be initialized first. */ return mmc_init(&mmcsd_device); } module_init(core_module_init);
gpl-2.0
sudosurootdev/kernel_lge_lgl24
arch/x86/kernel/cpu/mcheck/mce-severity.c
2682
7034
/* * MCE grading rules. * Copyright 2008, 2009 Intel Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. * * Author: Andi Kleen */ #include <linux/kernel.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/debugfs.h> #include <asm/mce.h> #include "mce-internal.h" /* * Grade an mce by severity. In general the most severe ones are processed * first. Since there are quite a lot of combinations test the bits in a * table-driven way. The rules are simply processed in order, first * match wins. * * Note this is only used for machine check exceptions, the corrected * errors use much simpler rules. The exceptions still check for the corrected * errors, but only to leave them alone for the CMCI handler (except for * panic situations) */ enum context { IN_KERNEL = 1, IN_USER = 2 }; enum ser { SER_REQUIRED = 1, NO_SER = 2 }; static struct severity { u64 mask; u64 result; unsigned char sev; unsigned char mcgmask; unsigned char mcgres; unsigned char ser; unsigned char context; unsigned char covered; char *msg; } severities[] = { #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c } #define KERNEL .context = IN_KERNEL #define USER .context = IN_USER #define SER .ser = SER_REQUIRED #define NOSER .ser = NO_SER #define BITCLR(x) .mask = x, .result = 0 #define BITSET(x) .mask = x, .result = x #define MCGMASK(x, y) .mcgmask = x, .mcgres = y #define MASK(x, y) .mask = x, .result = y #define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S) #define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR) #define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV) #define MCACOD 0xffff /* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ #define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ #define MCACOD_SCRUBMSK 0xfff0 #define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ #define MCACOD_DATA 0x0134 /* Data Load */ #define MCACOD_INSTR 0x0150 /* Instruction Fetch */ MCESEV( NO, "Invalid", BITCLR(MCI_STATUS_VAL) ), MCESEV( NO, "Not enabled", BITCLR(MCI_STATUS_EN) ), MCESEV( PANIC, "Processor context corrupt", BITSET(MCI_STATUS_PCC) ), /* When MCIP is not set something is very confused */ MCESEV( PANIC, "MCIP not set in MCA handler", MCGMASK(MCG_STATUS_MCIP, 0) ), /* Neither return not error IP -- no chance to recover -> PANIC */ MCESEV( PANIC, "Neither restart nor error IP", MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, 0) ), MCESEV( PANIC, "In kernel and no restart IP", KERNEL, MCGMASK(MCG_STATUS_RIPV, 0) ), MCESEV( KEEP, "Corrected error", NOSER, BITCLR(MCI_STATUS_UC) ), /* ignore OVER for UCNA */ MCESEV( KEEP, "Uncorrected no action required", SER, MASK(MCI_UC_SAR, MCI_STATUS_UC) ), MCESEV( PANIC, "Illegal combination (UCNA with AR=1)", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR) ), MCESEV( KEEP, "Non signalled machine check", SER, BITCLR(MCI_STATUS_S) ), MCESEV( PANIC, "Action required with lost events", SER, BITSET(MCI_STATUS_OVER|MCI_UC_SAR) ), /* known AR MCACODs: */ #ifdef CONFIG_MEMORY_FAILURE MCESEV( KEEP, "HT thread notices Action required: data load error", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), MCGMASK(MCG_STATUS_EIPV, 0) ), MCESEV( AR, "Action required: data load error", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), USER ), #endif MCESEV( PANIC, "Action required: unknown MCACOD", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_SAR) ), /* known AO MCACODs: */ MCESEV( AO, "Action optional: memory scrubbing error", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD_SCRUBMSK, MCI_UC_S|MCACOD_SCRUB) ), MCESEV( AO, "Action optional: last level cache writeback error", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD, MCI_UC_S|MCACOD_L3WB) ), MCESEV( SOME, "Action optional: unknown MCACOD", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S) ), MCESEV( SOME, "Action optional with lost events", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_OVER|MCI_UC_S) ), MCESEV( PANIC, "Overflowed uncorrected", BITSET(MCI_STATUS_OVER|MCI_STATUS_UC) ), MCESEV( UC, "Uncorrected", BITSET(MCI_STATUS_UC) ), MCESEV( SOME, "No match", BITSET(0) ) /* always matches. keep at end */ }; /* * If the EIPV bit is set, it means the saved IP is the * instruction which caused the MCE. */ static int error_context(struct mce *m) { if (m->mcgstatus & MCG_STATUS_EIPV) return (m->ip && (m->cs & 3) == 3) ? IN_USER : IN_KERNEL; /* Unknown, assume kernel */ return IN_KERNEL; } int mce_severity(struct mce *m, int tolerant, char **msg) { enum context ctx = error_context(m); struct severity *s; for (s = severities;; s++) { if ((m->status & s->mask) != s->result) continue; if ((m->mcgstatus & s->mcgmask) != s->mcgres) continue; if (s->ser == SER_REQUIRED && !mce_ser) continue; if (s->ser == NO_SER && mce_ser) continue; if (s->context && ctx != s->context) continue; if (msg) *msg = s->msg; s->covered = 1; if (s->sev >= MCE_UC_SEVERITY && ctx == IN_KERNEL) { if (panic_on_oops || tolerant < 1) return MCE_PANIC_SEVERITY; } return s->sev; } } #ifdef CONFIG_DEBUG_FS static void *s_start(struct seq_file *f, loff_t *pos) { if (*pos >= ARRAY_SIZE(severities)) return NULL; return &severities[*pos]; } static void *s_next(struct seq_file *f, void *data, loff_t *pos) { if (++(*pos) >= ARRAY_SIZE(severities)) return NULL; return &severities[*pos]; } static void s_stop(struct seq_file *f, void *data) { } static int s_show(struct seq_file *f, void *data) { struct severity *ser = data; seq_printf(f, "%d\t%s\n", ser->covered, ser->msg); return 0; } static const struct seq_operations severities_seq_ops = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; static int severities_coverage_open(struct inode *inode, struct file *file) { return seq_open(file, &severities_seq_ops); } static ssize_t severities_coverage_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { int i; for (i = 0; i < ARRAY_SIZE(severities); i++) severities[i].covered = 0; return count; } static const struct file_operations severities_coverage_fops = { .open = severities_coverage_open, .release = seq_release, .read = seq_read, .write = severities_coverage_write, .llseek = seq_lseek, }; static int __init severities_debugfs_init(void) { struct dentry *dmce, *fsev; dmce = mce_get_debugfs_dir(); if (!dmce) goto err_out; fsev = debugfs_create_file("severities-coverage", 0444, dmce, NULL, &severities_coverage_fops); if (!fsev) goto err_out; return 0; err_out: return -ENOMEM; } late_initcall(severities_debugfs_init); #endif /* CONFIG_DEBUG_FS */
gpl-2.0
agat63/N861_ZTE_kernel
tools/perf/util/ui/browsers/hists.c
2938
28463
#define _GNU_SOURCE #include <stdio.h> #undef _GNU_SOURCE #include "../libslang.h" #include <stdlib.h> #include <string.h> #include <newt.h> #include <linux/rbtree.h> #include "../../evsel.h" #include "../../evlist.h" #include "../../hist.h" #include "../../pstack.h" #include "../../sort.h" #include "../../util.h" #include "../browser.h" #include "../helpline.h" #include "../util.h" #include "map.h" struct hist_browser { struct ui_browser b; struct hists *hists; struct hist_entry *he_selection; struct map_symbol *selection; }; static void hist_browser__refresh_dimensions(struct hist_browser *self) { /* 3 == +/- toggle symbol before actual hist_entry rendering */ self->b.width = 3 + (hists__sort_list_width(self->hists) + sizeof("[k]")); } static void hist_browser__reset(struct hist_browser *self) { self->b.nr_entries = self->hists->nr_entries; hist_browser__refresh_dimensions(self); ui_browser__reset_index(&self->b); } static char tree__folded_sign(bool unfolded) { return unfolded ? '-' : '+'; } static char map_symbol__folded(const struct map_symbol *self) { return self->has_children ? tree__folded_sign(self->unfolded) : ' '; } static char hist_entry__folded(const struct hist_entry *self) { return map_symbol__folded(&self->ms); } static char callchain_list__folded(const struct callchain_list *self) { return map_symbol__folded(&self->ms); } static void map_symbol__set_folding(struct map_symbol *self, bool unfold) { self->unfolded = unfold ? self->has_children : false; } static int callchain_node__count_rows_rb_tree(struct callchain_node *self) { int n = 0; struct rb_node *nd; for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) { struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); struct callchain_list *chain; char folded_sign = ' '; /* No children */ list_for_each_entry(chain, &child->val, list) { ++n; /* We need this because we may not have children */ folded_sign = callchain_list__folded(chain); if (folded_sign == '+') break; } if (folded_sign == '-') /* Have children and they're unfolded */ n += callchain_node__count_rows_rb_tree(child); } return n; } static int callchain_node__count_rows(struct callchain_node *node) { struct callchain_list *chain; bool unfolded = false; int n = 0; list_for_each_entry(chain, &node->val, list) { ++n; unfolded = chain->ms.unfolded; } if (unfolded) n += callchain_node__count_rows_rb_tree(node); return n; } static int callchain__count_rows(struct rb_root *chain) { struct rb_node *nd; int n = 0; for (nd = rb_first(chain); nd; nd = rb_next(nd)) { struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); n += callchain_node__count_rows(node); } return n; } static bool map_symbol__toggle_fold(struct map_symbol *self) { if (!self->has_children) return false; self->unfolded = !self->unfolded; return true; } static void callchain_node__init_have_children_rb_tree(struct callchain_node *self) { struct rb_node *nd = rb_first(&self->rb_root); for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) { struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); struct callchain_list *chain; bool first = true; list_for_each_entry(chain, &child->val, list) { if (first) { first = false; chain->ms.has_children = chain->list.next != &child->val || !RB_EMPTY_ROOT(&child->rb_root); } else chain->ms.has_children = chain->list.next == &child->val && !RB_EMPTY_ROOT(&child->rb_root); } callchain_node__init_have_children_rb_tree(child); } } static void callchain_node__init_have_children(struct callchain_node *self) { struct callchain_list *chain; list_for_each_entry(chain, &self->val, list) chain->ms.has_children = !RB_EMPTY_ROOT(&self->rb_root); callchain_node__init_have_children_rb_tree(self); } static void callchain__init_have_children(struct rb_root *self) { struct rb_node *nd; for (nd = rb_first(self); nd; nd = rb_next(nd)) { struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); callchain_node__init_have_children(node); } } static void hist_entry__init_have_children(struct hist_entry *self) { if (!self->init_have_children) { self->ms.has_children = !RB_EMPTY_ROOT(&self->sorted_chain); callchain__init_have_children(&self->sorted_chain); self->init_have_children = true; } } static bool hist_browser__toggle_fold(struct hist_browser *self) { if (map_symbol__toggle_fold(self->selection)) { struct hist_entry *he = self->he_selection; hist_entry__init_have_children(he); self->hists->nr_entries -= he->nr_rows; if (he->ms.unfolded) he->nr_rows = callchain__count_rows(&he->sorted_chain); else he->nr_rows = 0; self->hists->nr_entries += he->nr_rows; self->b.nr_entries = self->hists->nr_entries; return true; } /* If it doesn't have children, no toggling performed */ return false; } static int callchain_node__set_folding_rb_tree(struct callchain_node *self, bool unfold) { int n = 0; struct rb_node *nd; for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) { struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); struct callchain_list *chain; bool has_children = false; list_for_each_entry(chain, &child->val, list) { ++n; map_symbol__set_folding(&chain->ms, unfold); has_children = chain->ms.has_children; } if (has_children) n += callchain_node__set_folding_rb_tree(child, unfold); } return n; } static int callchain_node__set_folding(struct callchain_node *node, bool unfold) { struct callchain_list *chain; bool has_children = false; int n = 0; list_for_each_entry(chain, &node->val, list) { ++n; map_symbol__set_folding(&chain->ms, unfold); has_children = chain->ms.has_children; } if (has_children) n += callchain_node__set_folding_rb_tree(node, unfold); return n; } static int callchain__set_folding(struct rb_root *chain, bool unfold) { struct rb_node *nd; int n = 0; for (nd = rb_first(chain); nd; nd = rb_next(nd)) { struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); n += callchain_node__set_folding(node, unfold); } return n; } static void hist_entry__set_folding(struct hist_entry *self, bool unfold) { hist_entry__init_have_children(self); map_symbol__set_folding(&self->ms, unfold); if (self->ms.has_children) { int n = callchain__set_folding(&self->sorted_chain, unfold); self->nr_rows = unfold ? n : 0; } else self->nr_rows = 0; } static void hists__set_folding(struct hists *self, bool unfold) { struct rb_node *nd; self->nr_entries = 0; for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) { struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); hist_entry__set_folding(he, unfold); self->nr_entries += 1 + he->nr_rows; } } static void hist_browser__set_folding(struct hist_browser *self, bool unfold) { hists__set_folding(self->hists, unfold); self->b.nr_entries = self->hists->nr_entries; /* Go to the start, we may be way after valid entries after a collapse */ ui_browser__reset_index(&self->b); } static int hist_browser__run(struct hist_browser *self, const char *title) { int key; int exit_keys[] = { 'a', '?', 'h', 'C', 'd', 'D', 'E', 't', NEWT_KEY_ENTER, NEWT_KEY_RIGHT, NEWT_KEY_LEFT, NEWT_KEY_TAB, NEWT_KEY_UNTAB, 0, }; self->b.entries = &self->hists->entries; self->b.nr_entries = self->hists->nr_entries; hist_browser__refresh_dimensions(self); if (ui_browser__show(&self->b, title, "Press '?' for help on key bindings") < 0) return -1; ui_browser__add_exit_keys(&self->b, exit_keys); while (1) { key = ui_browser__run(&self->b); switch (key) { case 'D': { /* Debug */ static int seq; struct hist_entry *h = rb_entry(self->b.top, struct hist_entry, rb_node); ui_helpline__pop(); ui_helpline__fpush("%d: nr_ent=(%d,%d), height=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d", seq++, self->b.nr_entries, self->hists->nr_entries, self->b.height, self->b.index, self->b.top_idx, h->row_offset, h->nr_rows); } break; case 'C': /* Collapse the whole world. */ hist_browser__set_folding(self, false); break; case 'E': /* Expand the whole world. */ hist_browser__set_folding(self, true); break; case NEWT_KEY_ENTER: if (hist_browser__toggle_fold(self)) break; /* fall thru */ default: goto out; } } out: ui_browser__hide(&self->b); return key; } static char *callchain_list__sym_name(struct callchain_list *self, char *bf, size_t bfsize) { if (self->ms.sym) return self->ms.sym->name; snprintf(bf, bfsize, "%#" PRIx64, self->ip); return bf; } #define LEVEL_OFFSET_STEP 3 static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self, struct callchain_node *chain_node, u64 total, int level, unsigned short row, off_t *row_offset, bool *is_current_entry) { struct rb_node *node; int first_row = row, width, offset = level * LEVEL_OFFSET_STEP; u64 new_total, remaining; if (callchain_param.mode == CHAIN_GRAPH_REL) new_total = chain_node->children_hit; else new_total = total; remaining = new_total; node = rb_first(&chain_node->rb_root); while (node) { struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node); struct rb_node *next = rb_next(node); u64 cumul = callchain_cumul_hits(child); struct callchain_list *chain; char folded_sign = ' '; int first = true; int extra_offset = 0; remaining -= cumul; list_for_each_entry(chain, &child->val, list) { char ipstr[BITS_PER_LONG / 4 + 1], *alloc_str; const char *str; int color; bool was_first = first; if (first) first = false; else extra_offset = LEVEL_OFFSET_STEP; folded_sign = callchain_list__folded(chain); if (*row_offset != 0) { --*row_offset; goto do_next; } alloc_str = NULL; str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); if (was_first) { double percent = cumul * 100.0 / new_total; if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0) str = "Not enough memory!"; else str = alloc_str; } color = HE_COLORSET_NORMAL; width = self->b.width - (offset + extra_offset + 2); if (ui_browser__is_current_entry(&self->b, row)) { self->selection = &chain->ms; color = HE_COLORSET_SELECTED; *is_current_entry = true; } ui_browser__set_color(&self->b, color); ui_browser__gotorc(&self->b, row, 0); slsmg_write_nstring(" ", offset + extra_offset); slsmg_printf("%c ", folded_sign); slsmg_write_nstring(str, width); free(alloc_str); if (++row == self->b.height) goto out; do_next: if (folded_sign == '+') break; } if (folded_sign == '-') { const int new_level = level + (extra_offset ? 2 : 1); row += hist_browser__show_callchain_node_rb_tree(self, child, new_total, new_level, row, row_offset, is_current_entry); } if (row == self->b.height) goto out; node = next; } out: return row - first_row; } static int hist_browser__show_callchain_node(struct hist_browser *self, struct callchain_node *node, int level, unsigned short row, off_t *row_offset, bool *is_current_entry) { struct callchain_list *chain; int first_row = row, offset = level * LEVEL_OFFSET_STEP, width = self->b.width - offset; char folded_sign = ' '; list_for_each_entry(chain, &node->val, list) { char ipstr[BITS_PER_LONG / 4 + 1], *s; int color; folded_sign = callchain_list__folded(chain); if (*row_offset != 0) { --*row_offset; continue; } color = HE_COLORSET_NORMAL; if (ui_browser__is_current_entry(&self->b, row)) { self->selection = &chain->ms; color = HE_COLORSET_SELECTED; *is_current_entry = true; } s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); ui_browser__gotorc(&self->b, row, 0); ui_browser__set_color(&self->b, color); slsmg_write_nstring(" ", offset); slsmg_printf("%c ", folded_sign); slsmg_write_nstring(s, width - 2); if (++row == self->b.height) goto out; } if (folded_sign == '-') row += hist_browser__show_callchain_node_rb_tree(self, node, self->hists->stats.total_period, level + 1, row, row_offset, is_current_entry); out: return row - first_row; } static int hist_browser__show_callchain(struct hist_browser *self, struct rb_root *chain, int level, unsigned short row, off_t *row_offset, bool *is_current_entry) { struct rb_node *nd; int first_row = row; for (nd = rb_first(chain); nd; nd = rb_next(nd)) { struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); row += hist_browser__show_callchain_node(self, node, level, row, row_offset, is_current_entry); if (row == self->b.height) break; } return row - first_row; } static int hist_browser__show_entry(struct hist_browser *self, struct hist_entry *entry, unsigned short row) { char s[256]; double percent; int printed = 0; int color, width = self->b.width; char folded_sign = ' '; bool current_entry = ui_browser__is_current_entry(&self->b, row); off_t row_offset = entry->row_offset; if (current_entry) { self->he_selection = entry; self->selection = &entry->ms; } if (symbol_conf.use_callchain) { hist_entry__init_have_children(entry); folded_sign = hist_entry__folded(entry); } if (row_offset == 0) { hist_entry__snprintf(entry, s, sizeof(s), self->hists, NULL, false, 0, false, self->hists->stats.total_period); percent = (entry->period * 100.0) / self->hists->stats.total_period; color = HE_COLORSET_SELECTED; if (!current_entry) { if (percent >= MIN_RED) color = HE_COLORSET_TOP; else if (percent >= MIN_GREEN) color = HE_COLORSET_MEDIUM; else color = HE_COLORSET_NORMAL; } ui_browser__set_color(&self->b, color); ui_browser__gotorc(&self->b, row, 0); if (symbol_conf.use_callchain) { slsmg_printf("%c ", folded_sign); width -= 2; } slsmg_write_nstring(s, width); ++row; ++printed; } else --row_offset; if (folded_sign == '-' && row != self->b.height) { printed += hist_browser__show_callchain(self, &entry->sorted_chain, 1, row, &row_offset, &current_entry); if (current_entry) self->he_selection = entry; } return printed; } static unsigned int hist_browser__refresh(struct ui_browser *self) { unsigned row = 0; struct rb_node *nd; struct hist_browser *hb = container_of(self, struct hist_browser, b); if (self->top == NULL) self->top = rb_first(&hb->hists->entries); for (nd = self->top; nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); if (h->filtered) continue; row += hist_browser__show_entry(hb, h, row); if (row == self->height) break; } return row; } static struct rb_node *hists__filter_entries(struct rb_node *nd) { while (nd != NULL) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); if (!h->filtered) return nd; nd = rb_next(nd); } return NULL; } static struct rb_node *hists__filter_prev_entries(struct rb_node *nd) { while (nd != NULL) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); if (!h->filtered) return nd; nd = rb_prev(nd); } return NULL; } static void ui_browser__hists_seek(struct ui_browser *self, off_t offset, int whence) { struct hist_entry *h; struct rb_node *nd; bool first = true; if (self->nr_entries == 0) return; switch (whence) { case SEEK_SET: nd = hists__filter_entries(rb_first(self->entries)); break; case SEEK_CUR: nd = self->top; goto do_offset; case SEEK_END: nd = hists__filter_prev_entries(rb_last(self->entries)); first = false; break; default: return; } /* * Moves not relative to the first visible entry invalidates its * row_offset: */ h = rb_entry(self->top, struct hist_entry, rb_node); h->row_offset = 0; /* * Here we have to check if nd is expanded (+), if it is we can't go * the next top level hist_entry, instead we must compute an offset of * what _not_ to show and not change the first visible entry. * * This offset increments when we are going from top to bottom and * decreases when we're going from bottom to top. * * As we don't have backpointers to the top level in the callchains * structure, we need to always print the whole hist_entry callchain, * skipping the first ones that are before the first visible entry * and stop when we printed enough lines to fill the screen. */ do_offset: if (offset > 0) { do { h = rb_entry(nd, struct hist_entry, rb_node); if (h->ms.unfolded) { u16 remaining = h->nr_rows - h->row_offset; if (offset > remaining) { offset -= remaining; h->row_offset = 0; } else { h->row_offset += offset; offset = 0; self->top = nd; break; } } nd = hists__filter_entries(rb_next(nd)); if (nd == NULL) break; --offset; self->top = nd; } while (offset != 0); } else if (offset < 0) { while (1) { h = rb_entry(nd, struct hist_entry, rb_node); if (h->ms.unfolded) { if (first) { if (-offset > h->row_offset) { offset += h->row_offset; h->row_offset = 0; } else { h->row_offset += offset; offset = 0; self->top = nd; break; } } else { if (-offset > h->nr_rows) { offset += h->nr_rows; h->row_offset = 0; } else { h->row_offset = h->nr_rows + offset; offset = 0; self->top = nd; break; } } } nd = hists__filter_prev_entries(rb_prev(nd)); if (nd == NULL) break; ++offset; self->top = nd; if (offset == 0) { /* * Last unfiltered hist_entry, check if it is * unfolded, if it is then we should have * row_offset at its last entry. */ h = rb_entry(nd, struct hist_entry, rb_node); if (h->ms.unfolded) h->row_offset = h->nr_rows; break; } first = false; } } else { self->top = nd; h = rb_entry(nd, struct hist_entry, rb_node); h->row_offset = 0; } } static struct hist_browser *hist_browser__new(struct hists *hists) { struct hist_browser *self = zalloc(sizeof(*self)); if (self) { self->hists = hists; self->b.refresh = hist_browser__refresh; self->b.seek = ui_browser__hists_seek; } return self; } static void hist_browser__delete(struct hist_browser *self) { free(self); } static struct hist_entry *hist_browser__selected_entry(struct hist_browser *self) { return self->he_selection; } static struct thread *hist_browser__selected_thread(struct hist_browser *self) { return self->he_selection->thread; } static int hists__browser_title(struct hists *self, char *bf, size_t size, const char *ev_name, const struct dso *dso, const struct thread *thread) { char unit; int printed; unsigned long nr_events = self->stats.nr_events[PERF_RECORD_SAMPLE]; nr_events = convert_unit(nr_events, &unit); printed = snprintf(bf, size, "Events: %lu%c %s", nr_events, unit, ev_name); if (thread) printed += snprintf(bf + printed, size - printed, ", Thread: %s(%d)", (thread->comm_set ? thread->comm : ""), thread->pid); if (dso) printed += snprintf(bf + printed, size - printed, ", DSO: %s", dso->short_name); return printed; } static int perf_evsel__hists_browse(struct perf_evsel *evsel, const char *helpline, const char *ev_name, bool left_exits) { struct hists *self = &evsel->hists; struct hist_browser *browser = hist_browser__new(self); struct pstack *fstack; const struct thread *thread_filter = NULL; const struct dso *dso_filter = NULL; char msg[160]; int key = -1; if (browser == NULL) return -1; fstack = pstack__new(2); if (fstack == NULL) goto out; ui_helpline__push(helpline); hists__browser_title(self, msg, sizeof(msg), ev_name, dso_filter, thread_filter); while (1) { const struct thread *thread = NULL; const struct dso *dso = NULL; char *options[16]; int nr_options = 0, choice = 0, i, annotate = -2, zoom_dso = -2, zoom_thread = -2, browse_map = -2; key = hist_browser__run(browser, msg); if (browser->he_selection != NULL) { thread = hist_browser__selected_thread(browser); dso = browser->selection->map ? browser->selection->map->dso : NULL; } switch (key) { case NEWT_KEY_TAB: case NEWT_KEY_UNTAB: /* * Exit the browser, let hists__browser_tree * go to the next or previous */ goto out_free_stack; case 'a': if (browser->selection == NULL || browser->selection->sym == NULL || browser->selection->map->dso->annotate_warned) continue; goto do_annotate; case 'd': goto zoom_dso; case 't': goto zoom_thread; case NEWT_KEY_F1: case 'h': case '?': ui__help_window("-> Zoom into DSO/Threads & Annotate current symbol\n" "<- Zoom out\n" "a Annotate current symbol\n" "h/?/F1 Show this window\n" "C Collapse all callchains\n" "E Expand all callchains\n" "d Zoom into current DSO\n" "t Zoom into current Thread\n" "TAB/UNTAB Switch events\n" "q/CTRL+C Exit browser"); continue; case NEWT_KEY_ENTER: case NEWT_KEY_RIGHT: /* menu */ break; case NEWT_KEY_LEFT: { const void *top; if (pstack__empty(fstack)) { /* * Go back to the perf_evsel_menu__run or other user */ if (left_exits) goto out_free_stack; continue; } top = pstack__pop(fstack); if (top == &dso_filter) goto zoom_out_dso; if (top == &thread_filter) goto zoom_out_thread; continue; } case NEWT_KEY_ESCAPE: if (!left_exits && !ui__dialog_yesno("Do you really want to exit?")) continue; /* Fall thru */ default: goto out_free_stack; } if (browser->selection != NULL && browser->selection->sym != NULL && !browser->selection->map->dso->annotate_warned && asprintf(&options[nr_options], "Annotate %s", browser->selection->sym->name) > 0) annotate = nr_options++; if (thread != NULL && asprintf(&options[nr_options], "Zoom %s %s(%d) thread", (thread_filter ? "out of" : "into"), (thread->comm_set ? thread->comm : ""), thread->pid) > 0) zoom_thread = nr_options++; if (dso != NULL && asprintf(&options[nr_options], "Zoom %s %s DSO", (dso_filter ? "out of" : "into"), (dso->kernel ? "the Kernel" : dso->short_name)) > 0) zoom_dso = nr_options++; if (browser->selection != NULL && browser->selection->map != NULL && asprintf(&options[nr_options], "Browse map details") > 0) browse_map = nr_options++; options[nr_options++] = (char *)"Exit"; choice = ui__popup_menu(nr_options, options); for (i = 0; i < nr_options - 1; ++i) free(options[i]); if (choice == nr_options - 1) break; if (choice == -1) continue; if (choice == annotate) { struct hist_entry *he; do_annotate: he = hist_browser__selected_entry(browser); if (he == NULL) continue; hist_entry__tui_annotate(he, evsel->idx); } else if (choice == browse_map) map__browse(browser->selection->map); else if (choice == zoom_dso) { zoom_dso: if (dso_filter) { pstack__remove(fstack, &dso_filter); zoom_out_dso: ui_helpline__pop(); dso_filter = NULL; } else { if (dso == NULL) continue; ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"", dso->kernel ? "the Kernel" : dso->short_name); dso_filter = dso; pstack__push(fstack, &dso_filter); } hists__filter_by_dso(self, dso_filter); hists__browser_title(self, msg, sizeof(msg), ev_name, dso_filter, thread_filter); hist_browser__reset(browser); } else if (choice == zoom_thread) { zoom_thread: if (thread_filter) { pstack__remove(fstack, &thread_filter); zoom_out_thread: ui_helpline__pop(); thread_filter = NULL; } else { ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"", thread->comm_set ? thread->comm : "", thread->pid); thread_filter = thread; pstack__push(fstack, &thread_filter); } hists__filter_by_thread(self, thread_filter); hists__browser_title(self, msg, sizeof(msg), ev_name, dso_filter, thread_filter); hist_browser__reset(browser); } } out_free_stack: pstack__delete(fstack); out: hist_browser__delete(browser); return key; } struct perf_evsel_menu { struct ui_browser b; struct perf_evsel *selection; }; static void perf_evsel_menu__write(struct ui_browser *browser, void *entry, int row) { struct perf_evsel_menu *menu = container_of(browser, struct perf_evsel_menu, b); struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node); bool current_entry = ui_browser__is_current_entry(browser, row); unsigned long nr_events = evsel->hists.stats.nr_events[PERF_RECORD_SAMPLE]; const char *ev_name = event_name(evsel); char bf[256], unit; ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED : HE_COLORSET_NORMAL); nr_events = convert_unit(nr_events, &unit); snprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events, unit, unit == ' ' ? "" : " ", ev_name); slsmg_write_nstring(bf, browser->width); if (current_entry) menu->selection = evsel; } static int perf_evsel_menu__run(struct perf_evsel_menu *menu, const char *help) { int exit_keys[] = { NEWT_KEY_ENTER, NEWT_KEY_RIGHT, 0, }; struct perf_evlist *evlist = menu->b.priv; struct perf_evsel *pos; const char *ev_name, *title = "Available samples"; int key; if (ui_browser__show(&menu->b, title, "ESC: exit, ENTER|->: Browse histograms") < 0) return -1; ui_browser__add_exit_keys(&menu->b, exit_keys); while (1) { key = ui_browser__run(&menu->b); switch (key) { case NEWT_KEY_RIGHT: case NEWT_KEY_ENTER: if (!menu->selection) continue; pos = menu->selection; browse_hists: ev_name = event_name(pos); key = perf_evsel__hists_browse(pos, help, ev_name, true); ui_browser__show_title(&menu->b, title); break; case NEWT_KEY_LEFT: continue; case NEWT_KEY_ESCAPE: if (!ui__dialog_yesno("Do you really want to exit?")) continue; /* Fall thru */ default: goto out; } switch (key) { case NEWT_KEY_TAB: if (pos->node.next == &evlist->entries) pos = list_entry(evlist->entries.next, struct perf_evsel, node); else pos = list_entry(pos->node.next, struct perf_evsel, node); goto browse_hists; case NEWT_KEY_UNTAB: if (pos->node.prev == &evlist->entries) pos = list_entry(evlist->entries.prev, struct perf_evsel, node); else pos = list_entry(pos->node.prev, struct perf_evsel, node); goto browse_hists; case 'q': case CTRL('c'): goto out; default: break; } } out: ui_browser__hide(&menu->b); return key; } static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help) { struct perf_evsel *pos; struct perf_evsel_menu menu = { .b = { .entries = &evlist->entries, .refresh = ui_browser__list_head_refresh, .seek = ui_browser__list_head_seek, .write = perf_evsel_menu__write, .nr_entries = evlist->nr_entries, .priv = evlist, }, }; ui_helpline__push("Press ESC to exit"); list_for_each_entry(pos, &evlist->entries, node) { const char *ev_name = event_name(pos); size_t line_len = strlen(ev_name) + 7; if (menu.b.width < line_len) menu.b.width = line_len; /* * Cache the evsel name, tracepoints have a _high_ cost per * event_name() call. */ if (pos->name == NULL) pos->name = strdup(ev_name); } return perf_evsel_menu__run(&menu, help); } int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help) { if (evlist->nr_entries == 1) { struct perf_evsel *first = list_entry(evlist->entries.next, struct perf_evsel, node); const char *ev_name = event_name(first); return perf_evsel__hists_browse(first, help, ev_name, false); } return __perf_evlist__tui_browse_hists(evlist, help); }
gpl-2.0
dukie/sun4i-kernel
tools/perf/util/ui/browsers/hists.c
2938
28463
#define _GNU_SOURCE #include <stdio.h> #undef _GNU_SOURCE #include "../libslang.h" #include <stdlib.h> #include <string.h> #include <newt.h> #include <linux/rbtree.h> #include "../../evsel.h" #include "../../evlist.h" #include "../../hist.h" #include "../../pstack.h" #include "../../sort.h" #include "../../util.h" #include "../browser.h" #include "../helpline.h" #include "../util.h" #include "map.h" struct hist_browser { struct ui_browser b; struct hists *hists; struct hist_entry *he_selection; struct map_symbol *selection; }; static void hist_browser__refresh_dimensions(struct hist_browser *self) { /* 3 == +/- toggle symbol before actual hist_entry rendering */ self->b.width = 3 + (hists__sort_list_width(self->hists) + sizeof("[k]")); } static void hist_browser__reset(struct hist_browser *self) { self->b.nr_entries = self->hists->nr_entries; hist_browser__refresh_dimensions(self); ui_browser__reset_index(&self->b); } static char tree__folded_sign(bool unfolded) { return unfolded ? '-' : '+'; } static char map_symbol__folded(const struct map_symbol *self) { return self->has_children ? tree__folded_sign(self->unfolded) : ' '; } static char hist_entry__folded(const struct hist_entry *self) { return map_symbol__folded(&self->ms); } static char callchain_list__folded(const struct callchain_list *self) { return map_symbol__folded(&self->ms); } static void map_symbol__set_folding(struct map_symbol *self, bool unfold) { self->unfolded = unfold ? self->has_children : false; } static int callchain_node__count_rows_rb_tree(struct callchain_node *self) { int n = 0; struct rb_node *nd; for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) { struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); struct callchain_list *chain; char folded_sign = ' '; /* No children */ list_for_each_entry(chain, &child->val, list) { ++n; /* We need this because we may not have children */ folded_sign = callchain_list__folded(chain); if (folded_sign == '+') break; } if (folded_sign == '-') /* Have children and they're unfolded */ n += callchain_node__count_rows_rb_tree(child); } return n; } static int callchain_node__count_rows(struct callchain_node *node) { struct callchain_list *chain; bool unfolded = false; int n = 0; list_for_each_entry(chain, &node->val, list) { ++n; unfolded = chain->ms.unfolded; } if (unfolded) n += callchain_node__count_rows_rb_tree(node); return n; } static int callchain__count_rows(struct rb_root *chain) { struct rb_node *nd; int n = 0; for (nd = rb_first(chain); nd; nd = rb_next(nd)) { struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); n += callchain_node__count_rows(node); } return n; } static bool map_symbol__toggle_fold(struct map_symbol *self) { if (!self->has_children) return false; self->unfolded = !self->unfolded; return true; } static void callchain_node__init_have_children_rb_tree(struct callchain_node *self) { struct rb_node *nd = rb_first(&self->rb_root); for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) { struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); struct callchain_list *chain; bool first = true; list_for_each_entry(chain, &child->val, list) { if (first) { first = false; chain->ms.has_children = chain->list.next != &child->val || !RB_EMPTY_ROOT(&child->rb_root); } else chain->ms.has_children = chain->list.next == &child->val && !RB_EMPTY_ROOT(&child->rb_root); } callchain_node__init_have_children_rb_tree(child); } } static void callchain_node__init_have_children(struct callchain_node *self) { struct callchain_list *chain; list_for_each_entry(chain, &self->val, list) chain->ms.has_children = !RB_EMPTY_ROOT(&self->rb_root); callchain_node__init_have_children_rb_tree(self); } static void callchain__init_have_children(struct rb_root *self) { struct rb_node *nd; for (nd = rb_first(self); nd; nd = rb_next(nd)) { struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); callchain_node__init_have_children(node); } } static void hist_entry__init_have_children(struct hist_entry *self) { if (!self->init_have_children) { self->ms.has_children = !RB_EMPTY_ROOT(&self->sorted_chain); callchain__init_have_children(&self->sorted_chain); self->init_have_children = true; } } static bool hist_browser__toggle_fold(struct hist_browser *self) { if (map_symbol__toggle_fold(self->selection)) { struct hist_entry *he = self->he_selection; hist_entry__init_have_children(he); self->hists->nr_entries -= he->nr_rows; if (he->ms.unfolded) he->nr_rows = callchain__count_rows(&he->sorted_chain); else he->nr_rows = 0; self->hists->nr_entries += he->nr_rows; self->b.nr_entries = self->hists->nr_entries; return true; } /* If it doesn't have children, no toggling performed */ return false; } static int callchain_node__set_folding_rb_tree(struct callchain_node *self, bool unfold) { int n = 0; struct rb_node *nd; for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) { struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); struct callchain_list *chain; bool has_children = false; list_for_each_entry(chain, &child->val, list) { ++n; map_symbol__set_folding(&chain->ms, unfold); has_children = chain->ms.has_children; } if (has_children) n += callchain_node__set_folding_rb_tree(child, unfold); } return n; } static int callchain_node__set_folding(struct callchain_node *node, bool unfold) { struct callchain_list *chain; bool has_children = false; int n = 0; list_for_each_entry(chain, &node->val, list) { ++n; map_symbol__set_folding(&chain->ms, unfold); has_children = chain->ms.has_children; } if (has_children) n += callchain_node__set_folding_rb_tree(node, unfold); return n; } static int callchain__set_folding(struct rb_root *chain, bool unfold) { struct rb_node *nd; int n = 0; for (nd = rb_first(chain); nd; nd = rb_next(nd)) { struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); n += callchain_node__set_folding(node, unfold); } return n; } static void hist_entry__set_folding(struct hist_entry *self, bool unfold) { hist_entry__init_have_children(self); map_symbol__set_folding(&self->ms, unfold); if (self->ms.has_children) { int n = callchain__set_folding(&self->sorted_chain, unfold); self->nr_rows = unfold ? n : 0; } else self->nr_rows = 0; } static void hists__set_folding(struct hists *self, bool unfold) { struct rb_node *nd; self->nr_entries = 0; for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) { struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); hist_entry__set_folding(he, unfold); self->nr_entries += 1 + he->nr_rows; } } static void hist_browser__set_folding(struct hist_browser *self, bool unfold) { hists__set_folding(self->hists, unfold); self->b.nr_entries = self->hists->nr_entries; /* Go to the start, we may be way after valid entries after a collapse */ ui_browser__reset_index(&self->b); } static int hist_browser__run(struct hist_browser *self, const char *title) { int key; int exit_keys[] = { 'a', '?', 'h', 'C', 'd', 'D', 'E', 't', NEWT_KEY_ENTER, NEWT_KEY_RIGHT, NEWT_KEY_LEFT, NEWT_KEY_TAB, NEWT_KEY_UNTAB, 0, }; self->b.entries = &self->hists->entries; self->b.nr_entries = self->hists->nr_entries; hist_browser__refresh_dimensions(self); if (ui_browser__show(&self->b, title, "Press '?' for help on key bindings") < 0) return -1; ui_browser__add_exit_keys(&self->b, exit_keys); while (1) { key = ui_browser__run(&self->b); switch (key) { case 'D': { /* Debug */ static int seq; struct hist_entry *h = rb_entry(self->b.top, struct hist_entry, rb_node); ui_helpline__pop(); ui_helpline__fpush("%d: nr_ent=(%d,%d), height=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d", seq++, self->b.nr_entries, self->hists->nr_entries, self->b.height, self->b.index, self->b.top_idx, h->row_offset, h->nr_rows); } break; case 'C': /* Collapse the whole world. */ hist_browser__set_folding(self, false); break; case 'E': /* Expand the whole world. */ hist_browser__set_folding(self, true); break; case NEWT_KEY_ENTER: if (hist_browser__toggle_fold(self)) break; /* fall thru */ default: goto out; } } out: ui_browser__hide(&self->b); return key; } static char *callchain_list__sym_name(struct callchain_list *self, char *bf, size_t bfsize) { if (self->ms.sym) return self->ms.sym->name; snprintf(bf, bfsize, "%#" PRIx64, self->ip); return bf; } #define LEVEL_OFFSET_STEP 3 static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self, struct callchain_node *chain_node, u64 total, int level, unsigned short row, off_t *row_offset, bool *is_current_entry) { struct rb_node *node; int first_row = row, width, offset = level * LEVEL_OFFSET_STEP; u64 new_total, remaining; if (callchain_param.mode == CHAIN_GRAPH_REL) new_total = chain_node->children_hit; else new_total = total; remaining = new_total; node = rb_first(&chain_node->rb_root); while (node) { struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node); struct rb_node *next = rb_next(node); u64 cumul = callchain_cumul_hits(child); struct callchain_list *chain; char folded_sign = ' '; int first = true; int extra_offset = 0; remaining -= cumul; list_for_each_entry(chain, &child->val, list) { char ipstr[BITS_PER_LONG / 4 + 1], *alloc_str; const char *str; int color; bool was_first = first; if (first) first = false; else extra_offset = LEVEL_OFFSET_STEP; folded_sign = callchain_list__folded(chain); if (*row_offset != 0) { --*row_offset; goto do_next; } alloc_str = NULL; str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); if (was_first) { double percent = cumul * 100.0 / new_total; if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0) str = "Not enough memory!"; else str = alloc_str; } color = HE_COLORSET_NORMAL; width = self->b.width - (offset + extra_offset + 2); if (ui_browser__is_current_entry(&self->b, row)) { self->selection = &chain->ms; color = HE_COLORSET_SELECTED; *is_current_entry = true; } ui_browser__set_color(&self->b, color); ui_browser__gotorc(&self->b, row, 0); slsmg_write_nstring(" ", offset + extra_offset); slsmg_printf("%c ", folded_sign); slsmg_write_nstring(str, width); free(alloc_str); if (++row == self->b.height) goto out; do_next: if (folded_sign == '+') break; } if (folded_sign == '-') { const int new_level = level + (extra_offset ? 2 : 1); row += hist_browser__show_callchain_node_rb_tree(self, child, new_total, new_level, row, row_offset, is_current_entry); } if (row == self->b.height) goto out; node = next; } out: return row - first_row; } static int hist_browser__show_callchain_node(struct hist_browser *self, struct callchain_node *node, int level, unsigned short row, off_t *row_offset, bool *is_current_entry) { struct callchain_list *chain; int first_row = row, offset = level * LEVEL_OFFSET_STEP, width = self->b.width - offset; char folded_sign = ' '; list_for_each_entry(chain, &node->val, list) { char ipstr[BITS_PER_LONG / 4 + 1], *s; int color; folded_sign = callchain_list__folded(chain); if (*row_offset != 0) { --*row_offset; continue; } color = HE_COLORSET_NORMAL; if (ui_browser__is_current_entry(&self->b, row)) { self->selection = &chain->ms; color = HE_COLORSET_SELECTED; *is_current_entry = true; } s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); ui_browser__gotorc(&self->b, row, 0); ui_browser__set_color(&self->b, color); slsmg_write_nstring(" ", offset); slsmg_printf("%c ", folded_sign); slsmg_write_nstring(s, width - 2); if (++row == self->b.height) goto out; } if (folded_sign == '-') row += hist_browser__show_callchain_node_rb_tree(self, node, self->hists->stats.total_period, level + 1, row, row_offset, is_current_entry); out: return row - first_row; } static int hist_browser__show_callchain(struct hist_browser *self, struct rb_root *chain, int level, unsigned short row, off_t *row_offset, bool *is_current_entry) { struct rb_node *nd; int first_row = row; for (nd = rb_first(chain); nd; nd = rb_next(nd)) { struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); row += hist_browser__show_callchain_node(self, node, level, row, row_offset, is_current_entry); if (row == self->b.height) break; } return row - first_row; } static int hist_browser__show_entry(struct hist_browser *self, struct hist_entry *entry, unsigned short row) { char s[256]; double percent; int printed = 0; int color, width = self->b.width; char folded_sign = ' '; bool current_entry = ui_browser__is_current_entry(&self->b, row); off_t row_offset = entry->row_offset; if (current_entry) { self->he_selection = entry; self->selection = &entry->ms; } if (symbol_conf.use_callchain) { hist_entry__init_have_children(entry); folded_sign = hist_entry__folded(entry); } if (row_offset == 0) { hist_entry__snprintf(entry, s, sizeof(s), self->hists, NULL, false, 0, false, self->hists->stats.total_period); percent = (entry->period * 100.0) / self->hists->stats.total_period; color = HE_COLORSET_SELECTED; if (!current_entry) { if (percent >= MIN_RED) color = HE_COLORSET_TOP; else if (percent >= MIN_GREEN) color = HE_COLORSET_MEDIUM; else color = HE_COLORSET_NORMAL; } ui_browser__set_color(&self->b, color); ui_browser__gotorc(&self->b, row, 0); if (symbol_conf.use_callchain) { slsmg_printf("%c ", folded_sign); width -= 2; } slsmg_write_nstring(s, width); ++row; ++printed; } else --row_offset; if (folded_sign == '-' && row != self->b.height) { printed += hist_browser__show_callchain(self, &entry->sorted_chain, 1, row, &row_offset, &current_entry); if (current_entry) self->he_selection = entry; } return printed; } static unsigned int hist_browser__refresh(struct ui_browser *self) { unsigned row = 0; struct rb_node *nd; struct hist_browser *hb = container_of(self, struct hist_browser, b); if (self->top == NULL) self->top = rb_first(&hb->hists->entries); for (nd = self->top; nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); if (h->filtered) continue; row += hist_browser__show_entry(hb, h, row); if (row == self->height) break; } return row; } static struct rb_node *hists__filter_entries(struct rb_node *nd) { while (nd != NULL) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); if (!h->filtered) return nd; nd = rb_next(nd); } return NULL; } static struct rb_node *hists__filter_prev_entries(struct rb_node *nd) { while (nd != NULL) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); if (!h->filtered) return nd; nd = rb_prev(nd); } return NULL; } static void ui_browser__hists_seek(struct ui_browser *self, off_t offset, int whence) { struct hist_entry *h; struct rb_node *nd; bool first = true; if (self->nr_entries == 0) return; switch (whence) { case SEEK_SET: nd = hists__filter_entries(rb_first(self->entries)); break; case SEEK_CUR: nd = self->top; goto do_offset; case SEEK_END: nd = hists__filter_prev_entries(rb_last(self->entries)); first = false; break; default: return; } /* * Moves not relative to the first visible entry invalidates its * row_offset: */ h = rb_entry(self->top, struct hist_entry, rb_node); h->row_offset = 0; /* * Here we have to check if nd is expanded (+), if it is we can't go * the next top level hist_entry, instead we must compute an offset of * what _not_ to show and not change the first visible entry. * * This offset increments when we are going from top to bottom and * decreases when we're going from bottom to top. * * As we don't have backpointers to the top level in the callchains * structure, we need to always print the whole hist_entry callchain, * skipping the first ones that are before the first visible entry * and stop when we printed enough lines to fill the screen. */ do_offset: if (offset > 0) { do { h = rb_entry(nd, struct hist_entry, rb_node); if (h->ms.unfolded) { u16 remaining = h->nr_rows - h->row_offset; if (offset > remaining) { offset -= remaining; h->row_offset = 0; } else { h->row_offset += offset; offset = 0; self->top = nd; break; } } nd = hists__filter_entries(rb_next(nd)); if (nd == NULL) break; --offset; self->top = nd; } while (offset != 0); } else if (offset < 0) { while (1) { h = rb_entry(nd, struct hist_entry, rb_node); if (h->ms.unfolded) { if (first) { if (-offset > h->row_offset) { offset += h->row_offset; h->row_offset = 0; } else { h->row_offset += offset; offset = 0; self->top = nd; break; } } else { if (-offset > h->nr_rows) { offset += h->nr_rows; h->row_offset = 0; } else { h->row_offset = h->nr_rows + offset; offset = 0; self->top = nd; break; } } } nd = hists__filter_prev_entries(rb_prev(nd)); if (nd == NULL) break; ++offset; self->top = nd; if (offset == 0) { /* * Last unfiltered hist_entry, check if it is * unfolded, if it is then we should have * row_offset at its last entry. */ h = rb_entry(nd, struct hist_entry, rb_node); if (h->ms.unfolded) h->row_offset = h->nr_rows; break; } first = false; } } else { self->top = nd; h = rb_entry(nd, struct hist_entry, rb_node); h->row_offset = 0; } } static struct hist_browser *hist_browser__new(struct hists *hists) { struct hist_browser *self = zalloc(sizeof(*self)); if (self) { self->hists = hists; self->b.refresh = hist_browser__refresh; self->b.seek = ui_browser__hists_seek; } return self; } static void hist_browser__delete(struct hist_browser *self) { free(self); } static struct hist_entry *hist_browser__selected_entry(struct hist_browser *self) { return self->he_selection; } static struct thread *hist_browser__selected_thread(struct hist_browser *self) { return self->he_selection->thread; } static int hists__browser_title(struct hists *self, char *bf, size_t size, const char *ev_name, const struct dso *dso, const struct thread *thread) { char unit; int printed; unsigned long nr_events = self->stats.nr_events[PERF_RECORD_SAMPLE]; nr_events = convert_unit(nr_events, &unit); printed = snprintf(bf, size, "Events: %lu%c %s", nr_events, unit, ev_name); if (thread) printed += snprintf(bf + printed, size - printed, ", Thread: %s(%d)", (thread->comm_set ? thread->comm : ""), thread->pid); if (dso) printed += snprintf(bf + printed, size - printed, ", DSO: %s", dso->short_name); return printed; } static int perf_evsel__hists_browse(struct perf_evsel *evsel, const char *helpline, const char *ev_name, bool left_exits) { struct hists *self = &evsel->hists; struct hist_browser *browser = hist_browser__new(self); struct pstack *fstack; const struct thread *thread_filter = NULL; const struct dso *dso_filter = NULL; char msg[160]; int key = -1; if (browser == NULL) return -1; fstack = pstack__new(2); if (fstack == NULL) goto out; ui_helpline__push(helpline); hists__browser_title(self, msg, sizeof(msg), ev_name, dso_filter, thread_filter); while (1) { const struct thread *thread = NULL; const struct dso *dso = NULL; char *options[16]; int nr_options = 0, choice = 0, i, annotate = -2, zoom_dso = -2, zoom_thread = -2, browse_map = -2; key = hist_browser__run(browser, msg); if (browser->he_selection != NULL) { thread = hist_browser__selected_thread(browser); dso = browser->selection->map ? browser->selection->map->dso : NULL; } switch (key) { case NEWT_KEY_TAB: case NEWT_KEY_UNTAB: /* * Exit the browser, let hists__browser_tree * go to the next or previous */ goto out_free_stack; case 'a': if (browser->selection == NULL || browser->selection->sym == NULL || browser->selection->map->dso->annotate_warned) continue; goto do_annotate; case 'd': goto zoom_dso; case 't': goto zoom_thread; case NEWT_KEY_F1: case 'h': case '?': ui__help_window("-> Zoom into DSO/Threads & Annotate current symbol\n" "<- Zoom out\n" "a Annotate current symbol\n" "h/?/F1 Show this window\n" "C Collapse all callchains\n" "E Expand all callchains\n" "d Zoom into current DSO\n" "t Zoom into current Thread\n" "TAB/UNTAB Switch events\n" "q/CTRL+C Exit browser"); continue; case NEWT_KEY_ENTER: case NEWT_KEY_RIGHT: /* menu */ break; case NEWT_KEY_LEFT: { const void *top; if (pstack__empty(fstack)) { /* * Go back to the perf_evsel_menu__run or other user */ if (left_exits) goto out_free_stack; continue; } top = pstack__pop(fstack); if (top == &dso_filter) goto zoom_out_dso; if (top == &thread_filter) goto zoom_out_thread; continue; } case NEWT_KEY_ESCAPE: if (!left_exits && !ui__dialog_yesno("Do you really want to exit?")) continue; /* Fall thru */ default: goto out_free_stack; } if (browser->selection != NULL && browser->selection->sym != NULL && !browser->selection->map->dso->annotate_warned && asprintf(&options[nr_options], "Annotate %s", browser->selection->sym->name) > 0) annotate = nr_options++; if (thread != NULL && asprintf(&options[nr_options], "Zoom %s %s(%d) thread", (thread_filter ? "out of" : "into"), (thread->comm_set ? thread->comm : ""), thread->pid) > 0) zoom_thread = nr_options++; if (dso != NULL && asprintf(&options[nr_options], "Zoom %s %s DSO", (dso_filter ? "out of" : "into"), (dso->kernel ? "the Kernel" : dso->short_name)) > 0) zoom_dso = nr_options++; if (browser->selection != NULL && browser->selection->map != NULL && asprintf(&options[nr_options], "Browse map details") > 0) browse_map = nr_options++; options[nr_options++] = (char *)"Exit"; choice = ui__popup_menu(nr_options, options); for (i = 0; i < nr_options - 1; ++i) free(options[i]); if (choice == nr_options - 1) break; if (choice == -1) continue; if (choice == annotate) { struct hist_entry *he; do_annotate: he = hist_browser__selected_entry(browser); if (he == NULL) continue; hist_entry__tui_annotate(he, evsel->idx); } else if (choice == browse_map) map__browse(browser->selection->map); else if (choice == zoom_dso) { zoom_dso: if (dso_filter) { pstack__remove(fstack, &dso_filter); zoom_out_dso: ui_helpline__pop(); dso_filter = NULL; } else { if (dso == NULL) continue; ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"", dso->kernel ? "the Kernel" : dso->short_name); dso_filter = dso; pstack__push(fstack, &dso_filter); } hists__filter_by_dso(self, dso_filter); hists__browser_title(self, msg, sizeof(msg), ev_name, dso_filter, thread_filter); hist_browser__reset(browser); } else if (choice == zoom_thread) { zoom_thread: if (thread_filter) { pstack__remove(fstack, &thread_filter); zoom_out_thread: ui_helpline__pop(); thread_filter = NULL; } else { ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"", thread->comm_set ? thread->comm : "", thread->pid); thread_filter = thread; pstack__push(fstack, &thread_filter); } hists__filter_by_thread(self, thread_filter); hists__browser_title(self, msg, sizeof(msg), ev_name, dso_filter, thread_filter); hist_browser__reset(browser); } } out_free_stack: pstack__delete(fstack); out: hist_browser__delete(browser); return key; } struct perf_evsel_menu { struct ui_browser b; struct perf_evsel *selection; }; static void perf_evsel_menu__write(struct ui_browser *browser, void *entry, int row) { struct perf_evsel_menu *menu = container_of(browser, struct perf_evsel_menu, b); struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node); bool current_entry = ui_browser__is_current_entry(browser, row); unsigned long nr_events = evsel->hists.stats.nr_events[PERF_RECORD_SAMPLE]; const char *ev_name = event_name(evsel); char bf[256], unit; ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED : HE_COLORSET_NORMAL); nr_events = convert_unit(nr_events, &unit); snprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events, unit, unit == ' ' ? "" : " ", ev_name); slsmg_write_nstring(bf, browser->width); if (current_entry) menu->selection = evsel; } static int perf_evsel_menu__run(struct perf_evsel_menu *menu, const char *help) { int exit_keys[] = { NEWT_KEY_ENTER, NEWT_KEY_RIGHT, 0, }; struct perf_evlist *evlist = menu->b.priv; struct perf_evsel *pos; const char *ev_name, *title = "Available samples"; int key; if (ui_browser__show(&menu->b, title, "ESC: exit, ENTER|->: Browse histograms") < 0) return -1; ui_browser__add_exit_keys(&menu->b, exit_keys); while (1) { key = ui_browser__run(&menu->b); switch (key) { case NEWT_KEY_RIGHT: case NEWT_KEY_ENTER: if (!menu->selection) continue; pos = menu->selection; browse_hists: ev_name = event_name(pos); key = perf_evsel__hists_browse(pos, help, ev_name, true); ui_browser__show_title(&menu->b, title); break; case NEWT_KEY_LEFT: continue; case NEWT_KEY_ESCAPE: if (!ui__dialog_yesno("Do you really want to exit?")) continue; /* Fall thru */ default: goto out; } switch (key) { case NEWT_KEY_TAB: if (pos->node.next == &evlist->entries) pos = list_entry(evlist->entries.next, struct perf_evsel, node); else pos = list_entry(pos->node.next, struct perf_evsel, node); goto browse_hists; case NEWT_KEY_UNTAB: if (pos->node.prev == &evlist->entries) pos = list_entry(evlist->entries.prev, struct perf_evsel, node); else pos = list_entry(pos->node.prev, struct perf_evsel, node); goto browse_hists; case 'q': case CTRL('c'): goto out; default: break; } } out: ui_browser__hide(&menu->b); return key; } static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help) { struct perf_evsel *pos; struct perf_evsel_menu menu = { .b = { .entries = &evlist->entries, .refresh = ui_browser__list_head_refresh, .seek = ui_browser__list_head_seek, .write = perf_evsel_menu__write, .nr_entries = evlist->nr_entries, .priv = evlist, }, }; ui_helpline__push("Press ESC to exit"); list_for_each_entry(pos, &evlist->entries, node) { const char *ev_name = event_name(pos); size_t line_len = strlen(ev_name) + 7; if (menu.b.width < line_len) menu.b.width = line_len; /* * Cache the evsel name, tracepoints have a _high_ cost per * event_name() call. */ if (pos->name == NULL) pos->name = strdup(ev_name); } return perf_evsel_menu__run(&menu, help); } int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help) { if (evlist->nr_entries == 1) { struct perf_evsel *first = list_entry(evlist->entries.next, struct perf_evsel, node); const char *ev_name = event_name(first); return perf_evsel__hists_browse(first, help, ev_name, false); } return __perf_evlist__tui_browse_hists(evlist, help); }
gpl-2.0
manumanfred/kernel_tegra
drivers/scsi/libfc/fc_libfc.c
2938
9000
/* * Copyright(c) 2009 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Maintained at www.Open-FCoE.org */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/scatterlist.h> #include <linux/crc32.h> #include <scsi/libfc.h> #include <scsi/fc_encode.h> #include "fc_libfc.h" MODULE_AUTHOR("Open-FCoE.org"); MODULE_DESCRIPTION("libfc"); MODULE_LICENSE("GPL v2"); unsigned int fc_debug_logging; module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); DEFINE_MUTEX(fc_prov_mutex); static LIST_HEAD(fc_local_ports); struct blocking_notifier_head fc_lport_notifier_head = BLOCKING_NOTIFIER_INIT(fc_lport_notifier_head); EXPORT_SYMBOL(fc_lport_notifier_head); /* * Providers which primarily send requests and PRLIs. */ struct fc4_prov *fc_active_prov[FC_FC4_PROV_SIZE] = { [0] = &fc_rport_t0_prov, [FC_TYPE_FCP] = &fc_rport_fcp_init, }; /* * Providers which receive requests. */ struct fc4_prov *fc_passive_prov[FC_FC4_PROV_SIZE] = { [FC_TYPE_ELS] = &fc_lport_els_prov, }; /** * libfc_init() - Initialize libfc.ko */ static int __init libfc_init(void) { int rc = 0; rc = fc_setup_fcp(); if (rc) return rc; rc = fc_setup_exch_mgr(); if (rc) goto destroy_pkt_cache; rc = fc_setup_rport(); if (rc) goto destroy_em; return rc; destroy_em: fc_destroy_exch_mgr(); destroy_pkt_cache: fc_destroy_fcp(); return rc; } module_init(libfc_init); /** * libfc_exit() - Tear down libfc.ko */ static void __exit libfc_exit(void) { fc_destroy_fcp(); fc_destroy_exch_mgr(); fc_destroy_rport(); } module_exit(libfc_exit); /** * fc_copy_buffer_to_sglist() - This routine copies the data of a buffer * into a scatter-gather list (SG list). * * @buf: pointer to the data buffer. * @len: the byte-length of the data buffer. * @sg: pointer to the pointer of the SG list. * @nents: pointer to the remaining number of entries in the SG list. * @offset: pointer to the current offset in the SG list. * @km_type: dedicated page table slot type for kmap_atomic. * @crc: pointer to the 32-bit crc value. * If crc is NULL, CRC is not calculated. */ u32 fc_copy_buffer_to_sglist(void *buf, size_t len, struct scatterlist *sg, u32 *nents, size_t *offset, enum km_type km_type, u32 *crc) { size_t remaining = len; u32 copy_len = 0; while (remaining > 0 && sg) { size_t off, sg_bytes; void *page_addr; if (*offset >= sg->length) { /* * Check for end and drop resources * from the last iteration. */ if (!(*nents)) break; --(*nents); *offset -= sg->length; sg = sg_next(sg); continue; } sg_bytes = min(remaining, sg->length - *offset); /* * The scatterlist item may be bigger than PAGE_SIZE, * but we are limited to mapping PAGE_SIZE at a time. */ off = *offset + sg->offset; sg_bytes = min(sg_bytes, (size_t)(PAGE_SIZE - (off & ~PAGE_MASK))); page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT), km_type); if (crc) *crc = crc32(*crc, buf, sg_bytes); memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes); kunmap_atomic(page_addr, km_type); buf += sg_bytes; *offset += sg_bytes; remaining -= sg_bytes; copy_len += sg_bytes; } return copy_len; } /** * fc_fill_hdr() - fill FC header fields based on request * @fp: reply frame containing header to be filled in * @in_fp: request frame containing header to use in filling in reply * @r_ctl: R_CTL value for header * @f_ctl: F_CTL value for header, with 0 pad * @seq_cnt: sequence count for the header, ignored if frame has a sequence * @parm_offset: parameter / offset value */ void fc_fill_hdr(struct fc_frame *fp, const struct fc_frame *in_fp, enum fc_rctl r_ctl, u32 f_ctl, u16 seq_cnt, u32 parm_offset) { struct fc_frame_header *fh; struct fc_frame_header *in_fh; struct fc_seq *sp; u32 fill; fh = __fc_frame_header_get(fp); in_fh = __fc_frame_header_get(in_fp); if (f_ctl & FC_FC_END_SEQ) { fill = -fr_len(fp) & 3; if (fill) { /* TODO, this may be a problem with fragmented skb */ memset(skb_put(fp_skb(fp), fill), 0, fill); f_ctl |= fill; } fr_eof(fp) = FC_EOF_T; } else { WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */ fr_eof(fp) = FC_EOF_N; } fh->fh_r_ctl = r_ctl; memcpy(fh->fh_d_id, in_fh->fh_s_id, sizeof(fh->fh_d_id)); memcpy(fh->fh_s_id, in_fh->fh_d_id, sizeof(fh->fh_s_id)); fh->fh_type = in_fh->fh_type; hton24(fh->fh_f_ctl, f_ctl); fh->fh_ox_id = in_fh->fh_ox_id; fh->fh_rx_id = in_fh->fh_rx_id; fh->fh_cs_ctl = 0; fh->fh_df_ctl = 0; fh->fh_parm_offset = htonl(parm_offset); sp = fr_seq(in_fp); if (sp) { fr_seq(fp) = sp; fh->fh_seq_id = sp->id; seq_cnt = sp->cnt; } else { fh->fh_seq_id = 0; } fh->fh_seq_cnt = ntohs(seq_cnt); fr_sof(fp) = seq_cnt ? FC_SOF_N3 : FC_SOF_I3; fr_encaps(fp) = fr_encaps(in_fp); } EXPORT_SYMBOL(fc_fill_hdr); /** * fc_fill_reply_hdr() - fill FC reply header fields based on request * @fp: reply frame containing header to be filled in * @in_fp: request frame containing header to use in filling in reply * @r_ctl: R_CTL value for reply * @parm_offset: parameter / offset value */ void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp, enum fc_rctl r_ctl, u32 parm_offset) { struct fc_seq *sp; sp = fr_seq(in_fp); if (sp) fr_seq(fp) = fr_dev(in_fp)->tt.seq_start_next(sp); fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset); } EXPORT_SYMBOL(fc_fill_reply_hdr); /** * fc_fc4_conf_lport_params() - Modify "service_params" of specified lport * if there is service provider (target provider) registered with libfc * for specified "fc_ft_type" * @lport: Local port which service_params needs to be modified * @type: FC-4 type, such as FC_TYPE_FCP */ void fc_fc4_conf_lport_params(struct fc_lport *lport, enum fc_fh_type type) { struct fc4_prov *prov_entry; BUG_ON(type >= FC_FC4_PROV_SIZE); BUG_ON(!lport); prov_entry = fc_passive_prov[type]; if (type == FC_TYPE_FCP) { if (prov_entry && prov_entry->recv) lport->service_params |= FCP_SPPF_TARG_FCN; } } void fc_lport_iterate(void (*notify)(struct fc_lport *, void *), void *arg) { struct fc_lport *lport; mutex_lock(&fc_prov_mutex); list_for_each_entry(lport, &fc_local_ports, lport_list) notify(lport, arg); mutex_unlock(&fc_prov_mutex); } EXPORT_SYMBOL(fc_lport_iterate); /** * fc_fc4_register_provider() - register FC-4 upper-level provider. * @type: FC-4 type, such as FC_TYPE_FCP * @prov: structure describing provider including ops vector. * * Returns 0 on success, negative error otherwise. */ int fc_fc4_register_provider(enum fc_fh_type type, struct fc4_prov *prov) { struct fc4_prov **prov_entry; int ret = 0; if (type >= FC_FC4_PROV_SIZE) return -EINVAL; mutex_lock(&fc_prov_mutex); prov_entry = (prov->recv ? fc_passive_prov : fc_active_prov) + type; if (*prov_entry) ret = -EBUSY; else *prov_entry = prov; mutex_unlock(&fc_prov_mutex); return ret; } EXPORT_SYMBOL(fc_fc4_register_provider); /** * fc_fc4_deregister_provider() - deregister FC-4 upper-level provider. * @type: FC-4 type, such as FC_TYPE_FCP * @prov: structure describing provider including ops vector. */ void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *prov) { BUG_ON(type >= FC_FC4_PROV_SIZE); mutex_lock(&fc_prov_mutex); if (prov->recv) rcu_assign_pointer(fc_passive_prov[type], NULL); else rcu_assign_pointer(fc_active_prov[type], NULL); mutex_unlock(&fc_prov_mutex); synchronize_rcu(); } EXPORT_SYMBOL(fc_fc4_deregister_provider); /** * fc_fc4_add_lport() - add new local port to list and run notifiers. * @lport: The new local port. */ void fc_fc4_add_lport(struct fc_lport *lport) { mutex_lock(&fc_prov_mutex); list_add_tail(&lport->lport_list, &fc_local_ports); blocking_notifier_call_chain(&fc_lport_notifier_head, FC_LPORT_EV_ADD, lport); mutex_unlock(&fc_prov_mutex); } /** * fc_fc4_del_lport() - remove local port from list and run notifiers. * @lport: The new local port. */ void fc_fc4_del_lport(struct fc_lport *lport) { mutex_lock(&fc_prov_mutex); list_del(&lport->lport_list); blocking_notifier_call_chain(&fc_lport_notifier_head, FC_LPORT_EV_DEL, lport); mutex_unlock(&fc_prov_mutex); }
gpl-2.0
eugene373/apexq
drivers/mtd/inftlmount.c
3194
23426
/* * inftlmount.c -- INFTL mount code with extensive checks. * * Author: Greg Ungerer (gerg@snapgear.com) * Copyright © 2002-2003, Greg Ungerer (gerg@snapgear.com) * * Based heavily on the nftlmount.c code which is: * Author: Fabrice Bellard (fabrice.bellard@netgem.com) * Copyright © 2000 Netgem S.A. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <asm/errno.h> #include <asm/io.h> #include <asm/uaccess.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nftl.h> #include <linux/mtd/inftl.h> /* * find_boot_record: Find the INFTL Media Header and its Spare copy which * contains the various device information of the INFTL partition and * Bad Unit Table. Update the PUtable[] table according to the Bad * Unit Table. PUtable[] is used for management of Erase Unit in * other routines in inftlcore.c and inftlmount.c. */ static int find_boot_record(struct INFTLrecord *inftl) { struct inftl_unittail h1; //struct inftl_oob oob; unsigned int i, block; u8 buf[SECTORSIZE]; struct INFTLMediaHeader *mh = &inftl->MediaHdr; struct mtd_info *mtd = inftl->mbd.mtd; struct INFTLPartition *ip; size_t retlen; DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl); /* * Assume logical EraseSize == physical erasesize for starting the * scan. We'll sort it out later if we find a MediaHeader which says * otherwise. */ inftl->EraseSize = inftl->mbd.mtd->erasesize; inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize; inftl->MediaUnit = BLOCK_NIL; /* Search for a valid boot record */ for (block = 0; block < inftl->nb_blocks; block++) { int ret; /* * Check for BNAND header first. Then whinge if it's found * but later checks fail. */ ret = mtd->read(mtd, block * inftl->EraseSize, SECTORSIZE, &retlen, buf); /* We ignore ret in case the ECC of the MediaHeader is invalid (which is apparently acceptable) */ if (retlen != SECTORSIZE) { static int warncount = 5; if (warncount) { printk(KERN_WARNING "INFTL: block read at 0x%x " "of mtd%d failed: %d\n", block * inftl->EraseSize, inftl->mbd.mtd->index, ret); if (!--warncount) printk(KERN_WARNING "INFTL: further " "failures for this block will " "not be printed\n"); } continue; } if (retlen < 6 || memcmp(buf, "BNAND", 6)) { /* BNAND\0 not found. Continue */ continue; } /* To be safer with BIOS, also use erase mark as discriminant */ ret = inftl_read_oob(mtd, block * inftl->EraseSize + SECTORSIZE + 8, 8, &retlen,(char *)&h1); if (ret < 0) { printk(KERN_WARNING "INFTL: ANAND header found at " "0x%x in mtd%d, but OOB data read failed " "(err %d)\n", block * inftl->EraseSize, inftl->mbd.mtd->index, ret); continue; } /* * This is the first we've seen. * Copy the media header structure into place. */ memcpy(mh, buf, sizeof(struct INFTLMediaHeader)); /* Read the spare media header at offset 4096 */ mtd->read(mtd, block * inftl->EraseSize + 4096, SECTORSIZE, &retlen, buf); if (retlen != SECTORSIZE) { printk(KERN_WARNING "INFTL: Unable to read spare " "Media Header\n"); return -1; } /* Check if this one is the same as the first one we found. */ if (memcmp(mh, buf, sizeof(struct INFTLMediaHeader))) { printk(KERN_WARNING "INFTL: Primary and spare Media " "Headers disagree.\n"); return -1; } mh->NoOfBootImageBlocks = le32_to_cpu(mh->NoOfBootImageBlocks); mh->NoOfBinaryPartitions = le32_to_cpu(mh->NoOfBinaryPartitions); mh->NoOfBDTLPartitions = le32_to_cpu(mh->NoOfBDTLPartitions); mh->BlockMultiplierBits = le32_to_cpu(mh->BlockMultiplierBits); mh->FormatFlags = le32_to_cpu(mh->FormatFlags); mh->PercentUsed = le32_to_cpu(mh->PercentUsed); #ifdef CONFIG_MTD_DEBUG_VERBOSE if (CONFIG_MTD_DEBUG_VERBOSE >= 2) { printk("INFTL: Media Header ->\n" " bootRecordID = %s\n" " NoOfBootImageBlocks = %d\n" " NoOfBinaryPartitions = %d\n" " NoOfBDTLPartitions = %d\n" " BlockMultiplerBits = %d\n" " FormatFlgs = %d\n" " OsakVersion = 0x%x\n" " PercentUsed = %d\n", mh->bootRecordID, mh->NoOfBootImageBlocks, mh->NoOfBinaryPartitions, mh->NoOfBDTLPartitions, mh->BlockMultiplierBits, mh->FormatFlags, mh->OsakVersion, mh->PercentUsed); } #endif if (mh->NoOfBDTLPartitions == 0) { printk(KERN_WARNING "INFTL: Media Header sanity check " "failed: NoOfBDTLPartitions (%d) == 0, " "must be at least 1\n", mh->NoOfBDTLPartitions); return -1; } if ((mh->NoOfBDTLPartitions + mh->NoOfBinaryPartitions) > 4) { printk(KERN_WARNING "INFTL: Media Header sanity check " "failed: Total Partitions (%d) > 4, " "BDTL=%d Binary=%d\n", mh->NoOfBDTLPartitions + mh->NoOfBinaryPartitions, mh->NoOfBDTLPartitions, mh->NoOfBinaryPartitions); return -1; } if (mh->BlockMultiplierBits > 1) { printk(KERN_WARNING "INFTL: sorry, we don't support " "UnitSizeFactor 0x%02x\n", mh->BlockMultiplierBits); return -1; } else if (mh->BlockMultiplierBits == 1) { printk(KERN_WARNING "INFTL: support for INFTL with " "UnitSizeFactor 0x%02x is experimental\n", mh->BlockMultiplierBits); inftl->EraseSize = inftl->mbd.mtd->erasesize << mh->BlockMultiplierBits; inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize; block >>= mh->BlockMultiplierBits; } /* Scan the partitions */ for (i = 0; (i < 4); i++) { ip = &mh->Partitions[i]; ip->virtualUnits = le32_to_cpu(ip->virtualUnits); ip->firstUnit = le32_to_cpu(ip->firstUnit); ip->lastUnit = le32_to_cpu(ip->lastUnit); ip->flags = le32_to_cpu(ip->flags); ip->spareUnits = le32_to_cpu(ip->spareUnits); ip->Reserved0 = le32_to_cpu(ip->Reserved0); #ifdef CONFIG_MTD_DEBUG_VERBOSE if (CONFIG_MTD_DEBUG_VERBOSE >= 2) { printk(" PARTITION[%d] ->\n" " virtualUnits = %d\n" " firstUnit = %d\n" " lastUnit = %d\n" " flags = 0x%x\n" " spareUnits = %d\n", i, ip->virtualUnits, ip->firstUnit, ip->lastUnit, ip->flags, ip->spareUnits); } #endif if (ip->Reserved0 != ip->firstUnit) { struct erase_info *instr = &inftl->instr; instr->mtd = inftl->mbd.mtd; /* * Most likely this is using the * undocumented qiuck mount feature. * We don't support that, we will need * to erase the hidden block for full * compatibility. */ instr->addr = ip->Reserved0 * inftl->EraseSize; instr->len = inftl->EraseSize; mtd->erase(mtd, instr); } if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) { printk(KERN_WARNING "INFTL: Media Header " "Partition %d sanity check failed\n" " firstUnit %d : lastUnit %d > " "virtualUnits %d\n", i, ip->lastUnit, ip->firstUnit, ip->Reserved0); return -1; } if (ip->Reserved1 != 0) { printk(KERN_WARNING "INFTL: Media Header " "Partition %d sanity check failed: " "Reserved1 %d != 0\n", i, ip->Reserved1); return -1; } if (ip->flags & INFTL_BDTL) break; } if (i >= 4) { printk(KERN_WARNING "INFTL: Media Header Partition " "sanity check failed:\n No partition " "marked as Disk Partition\n"); return -1; } inftl->nb_boot_blocks = ip->firstUnit; inftl->numvunits = ip->virtualUnits; if (inftl->numvunits > (inftl->nb_blocks - inftl->nb_boot_blocks - 2)) { printk(KERN_WARNING "INFTL: Media Header sanity check " "failed:\n numvunits (%d) > nb_blocks " "(%d) - nb_boot_blocks(%d) - 2\n", inftl->numvunits, inftl->nb_blocks, inftl->nb_boot_blocks); return -1; } inftl->mbd.size = inftl->numvunits * (inftl->EraseSize / SECTORSIZE); /* * Block count is set to last used EUN (we won't need to keep * any meta-data past that point). */ inftl->firstEUN = ip->firstUnit; inftl->lastEUN = ip->lastUnit; inftl->nb_blocks = ip->lastUnit + 1; /* Memory alloc */ inftl->PUtable = kmalloc(inftl->nb_blocks * sizeof(u16), GFP_KERNEL); if (!inftl->PUtable) { printk(KERN_WARNING "INFTL: allocation of PUtable " "failed (%zd bytes)\n", inftl->nb_blocks * sizeof(u16)); return -ENOMEM; } inftl->VUtable = kmalloc(inftl->nb_blocks * sizeof(u16), GFP_KERNEL); if (!inftl->VUtable) { kfree(inftl->PUtable); printk(KERN_WARNING "INFTL: allocation of VUtable " "failed (%zd bytes)\n", inftl->nb_blocks * sizeof(u16)); return -ENOMEM; } /* Mark the blocks before INFTL MediaHeader as reserved */ for (i = 0; i < inftl->nb_boot_blocks; i++) inftl->PUtable[i] = BLOCK_RESERVED; /* Mark all remaining blocks as potentially containing data */ for (; i < inftl->nb_blocks; i++) inftl->PUtable[i] = BLOCK_NOTEXPLORED; /* Mark this boot record (NFTL MediaHeader) block as reserved */ inftl->PUtable[block] = BLOCK_RESERVED; /* Read Bad Erase Unit Table and modify PUtable[] accordingly */ for (i = 0; i < inftl->nb_blocks; i++) { int physblock; /* If any of the physical eraseblocks are bad, don't use the unit. */ for (physblock = 0; physblock < inftl->EraseSize; physblock += inftl->mbd.mtd->erasesize) { if (inftl->mbd.mtd->block_isbad(inftl->mbd.mtd, i * inftl->EraseSize + physblock)) inftl->PUtable[i] = BLOCK_RESERVED; } } inftl->MediaUnit = block; return 0; } /* Not found. */ return -1; } static int memcmpb(void *a, int c, int n) { int i; for (i = 0; i < n; i++) { if (c != ((unsigned char *)a)[i]) return 1; } return 0; } /* * check_free_sector: check if a free sector is actually FREE, * i.e. All 0xff in data and oob area. */ static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address, int len, int check_oob) { u8 buf[SECTORSIZE + inftl->mbd.mtd->oobsize]; struct mtd_info *mtd = inftl->mbd.mtd; size_t retlen; int i; for (i = 0; i < len; i += SECTORSIZE) { if (mtd->read(mtd, address, SECTORSIZE, &retlen, buf)) return -1; if (memcmpb(buf, 0xff, SECTORSIZE) != 0) return -1; if (check_oob) { if(inftl_read_oob(mtd, address, mtd->oobsize, &retlen, &buf[SECTORSIZE]) < 0) return -1; if (memcmpb(buf + SECTORSIZE, 0xff, mtd->oobsize) != 0) return -1; } address += SECTORSIZE; } return 0; } /* * INFTL_format: format a Erase Unit by erasing ALL Erase Zones in the Erase * Unit and Update INFTL metadata. Each erase operation is * checked with check_free_sectors. * * Return: 0 when succeed, -1 on error. * * ToDo: 1. Is it neceressary to check_free_sector after erasing ?? */ int INFTL_formatblock(struct INFTLrecord *inftl, int block) { size_t retlen; struct inftl_unittail uci; struct erase_info *instr = &inftl->instr; struct mtd_info *mtd = inftl->mbd.mtd; int physblock; DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_formatblock(inftl=%p," "block=%d)\n", inftl, block); memset(instr, 0, sizeof(struct erase_info)); /* FIXME: Shouldn't we be setting the 'discarded' flag to zero _first_? */ /* Use async erase interface, test return code */ instr->mtd = inftl->mbd.mtd; instr->addr = block * inftl->EraseSize; instr->len = inftl->mbd.mtd->erasesize; /* Erase one physical eraseblock at a time, even though the NAND api allows us to group them. This way we if we have a failure, we can mark only the failed block in the bbt. */ for (physblock = 0; physblock < inftl->EraseSize; physblock += instr->len, instr->addr += instr->len) { mtd->erase(inftl->mbd.mtd, instr); if (instr->state == MTD_ERASE_FAILED) { printk(KERN_WARNING "INFTL: error while formatting block %d\n", block); goto fail; } /* * Check the "freeness" of Erase Unit before updating metadata. * FixMe: is this check really necessary? Since we have check * the return code after the erase operation. */ if (check_free_sectors(inftl, instr->addr, instr->len, 1) != 0) goto fail; } uci.EraseMark = cpu_to_le16(ERASE_MARK); uci.EraseMark1 = cpu_to_le16(ERASE_MARK); uci.Reserved[0] = 0; uci.Reserved[1] = 0; uci.Reserved[2] = 0; uci.Reserved[3] = 0; instr->addr = block * inftl->EraseSize + SECTORSIZE * 2; if (inftl_write_oob(mtd, instr->addr + 8, 8, &retlen, (char *)&uci) < 0) goto fail; return 0; fail: /* could not format, update the bad block table (caller is responsible for setting the PUtable to BLOCK_RESERVED on failure) */ inftl->mbd.mtd->block_markbad(inftl->mbd.mtd, instr->addr); return -1; } /* * format_chain: Format an invalid Virtual Unit chain. It frees all the Erase * Units in a Virtual Unit Chain, i.e. all the units are disconnected. * * Since the chain is invalid then we will have to erase it from its * head (normally for INFTL we go from the oldest). But if it has a * loop then there is no oldest... */ static void format_chain(struct INFTLrecord *inftl, unsigned int first_block) { unsigned int block = first_block, block1; printk(KERN_WARNING "INFTL: formatting chain at block %d\n", first_block); for (;;) { block1 = inftl->PUtable[block]; printk(KERN_WARNING "INFTL: formatting block %d\n", block); if (INFTL_formatblock(inftl, block) < 0) { /* * Cannot format !!!! Mark it as Bad Unit, */ inftl->PUtable[block] = BLOCK_RESERVED; } else { inftl->PUtable[block] = BLOCK_FREE; } /* Goto next block on the chain */ block = block1; if (block == BLOCK_NIL || block >= inftl->lastEUN) break; } } void INFTL_dumptables(struct INFTLrecord *s) { int i; printk("-------------------------------------------" "----------------------------------\n"); printk("VUtable[%d] ->", s->nb_blocks); for (i = 0; i < s->nb_blocks; i++) { if ((i % 8) == 0) printk("\n%04x: ", i); printk("%04x ", s->VUtable[i]); } printk("\n-------------------------------------------" "----------------------------------\n"); printk("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks); for (i = 0; i <= s->lastEUN; i++) { if ((i % 8) == 0) printk("\n%04x: ", i); printk("%04x ", s->PUtable[i]); } printk("\n-------------------------------------------" "----------------------------------\n"); printk("INFTL ->\n" " EraseSize = %d\n" " h/s/c = %d/%d/%d\n" " numvunits = %d\n" " firstEUN = %d\n" " lastEUN = %d\n" " numfreeEUNs = %d\n" " LastFreeEUN = %d\n" " nb_blocks = %d\n" " nb_boot_blocks = %d", s->EraseSize, s->heads, s->sectors, s->cylinders, s->numvunits, s->firstEUN, s->lastEUN, s->numfreeEUNs, s->LastFreeEUN, s->nb_blocks, s->nb_boot_blocks); printk("\n-------------------------------------------" "----------------------------------\n"); } void INFTL_dumpVUchains(struct INFTLrecord *s) { int logical, block, i; printk("-------------------------------------------" "----------------------------------\n"); printk("INFTL Virtual Unit Chains:\n"); for (logical = 0; logical < s->nb_blocks; logical++) { block = s->VUtable[logical]; if (block > s->nb_blocks) continue; printk(" LOGICAL %d --> %d ", logical, block); for (i = 0; i < s->nb_blocks; i++) { if (s->PUtable[block] == BLOCK_NIL) break; block = s->PUtable[block]; printk("%d ", block); } printk("\n"); } printk("-------------------------------------------" "----------------------------------\n"); } int INFTL_mount(struct INFTLrecord *s) { struct mtd_info *mtd = s->mbd.mtd; unsigned int block, first_block, prev_block, last_block; unsigned int first_logical_block, logical_block, erase_mark; int chain_length, do_format_chain; struct inftl_unithead1 h0; struct inftl_unittail h1; size_t retlen; int i; u8 *ANACtable, ANAC; DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_mount(inftl=%p)\n", s); /* Search for INFTL MediaHeader and Spare INFTL Media Header */ if (find_boot_record(s) < 0) { printk(KERN_WARNING "INFTL: could not find valid boot record?\n"); return -ENXIO; } /* Init the logical to physical table */ for (i = 0; i < s->nb_blocks; i++) s->VUtable[i] = BLOCK_NIL; logical_block = block = BLOCK_NIL; /* Temporary buffer to store ANAC numbers. */ ANACtable = kcalloc(s->nb_blocks, sizeof(u8), GFP_KERNEL); if (!ANACtable) { printk(KERN_WARNING "INFTL: allocation of ANACtable " "failed (%zd bytes)\n", s->nb_blocks * sizeof(u8)); return -ENOMEM; } /* * First pass is to explore each physical unit, and construct the * virtual chains that exist (newest physical unit goes into VUtable). * Any block that is in any way invalid will be left in the * NOTEXPLORED state. Then at the end we will try to format it and * mark it as free. */ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 1, explore each unit\n"); for (first_block = s->firstEUN; first_block <= s->lastEUN; first_block++) { if (s->PUtable[first_block] != BLOCK_NOTEXPLORED) continue; do_format_chain = 0; first_logical_block = BLOCK_NIL; last_block = BLOCK_NIL; block = first_block; for (chain_length = 0; ; chain_length++) { if ((chain_length == 0) && (s->PUtable[block] != BLOCK_NOTEXPLORED)) { /* Nothing to do here, onto next block */ break; } if (inftl_read_oob(mtd, block * s->EraseSize + 8, 8, &retlen, (char *)&h0) < 0 || inftl_read_oob(mtd, block * s->EraseSize + 2 * SECTORSIZE + 8, 8, &retlen, (char *)&h1) < 0) { /* Should never happen? */ do_format_chain++; break; } logical_block = le16_to_cpu(h0.virtualUnitNo); prev_block = le16_to_cpu(h0.prevUnitNo); erase_mark = le16_to_cpu((h1.EraseMark | h1.EraseMark1)); ANACtable[block] = h0.ANAC; /* Previous block is relative to start of Partition */ if (prev_block < s->nb_blocks) prev_block += s->firstEUN; /* Already explored partial chain? */ if (s->PUtable[block] != BLOCK_NOTEXPLORED) { /* Check if chain for this logical */ if (logical_block == first_logical_block) { if (last_block != BLOCK_NIL) s->PUtable[last_block] = block; } break; } /* Check for invalid block */ if (erase_mark != ERASE_MARK) { printk(KERN_WARNING "INFTL: corrupt block %d " "in chain %d, chain length %d, erase " "mark 0x%x?\n", block, first_block, chain_length, erase_mark); /* * Assume end of chain, probably incomplete * fold/erase... */ if (chain_length == 0) do_format_chain++; break; } /* Check for it being free already then... */ if ((logical_block == BLOCK_FREE) || (logical_block == BLOCK_NIL)) { s->PUtable[block] = BLOCK_FREE; break; } /* Sanity checks on block numbers */ if ((logical_block >= s->nb_blocks) || ((prev_block >= s->nb_blocks) && (prev_block != BLOCK_NIL))) { if (chain_length > 0) { printk(KERN_WARNING "INFTL: corrupt " "block %d in chain %d?\n", block, first_block); do_format_chain++; } break; } if (first_logical_block == BLOCK_NIL) { first_logical_block = logical_block; } else { if (first_logical_block != logical_block) { /* Normal for folded chain... */ break; } } /* * Current block is valid, so if we followed a virtual * chain to get here then we can set the previous * block pointer in our PUtable now. Then move onto * the previous block in the chain. */ s->PUtable[block] = BLOCK_NIL; if (last_block != BLOCK_NIL) s->PUtable[last_block] = block; last_block = block; block = prev_block; /* Check for end of chain */ if (block == BLOCK_NIL) break; /* Validate next block before following it... */ if (block > s->lastEUN) { printk(KERN_WARNING "INFTL: invalid previous " "block %d in chain %d?\n", block, first_block); do_format_chain++; break; } } if (do_format_chain) { format_chain(s, first_block); continue; } /* * Looks like a valid chain then. It may not really be the * newest block in the chain, but it is the newest we have * found so far. We might update it in later iterations of * this loop if we find something newer. */ s->VUtable[first_logical_block] = first_block; logical_block = BLOCK_NIL; } #ifdef CONFIG_MTD_DEBUG_VERBOSE if (CONFIG_MTD_DEBUG_VERBOSE >= 2) INFTL_dumptables(s); #endif /* * Second pass, check for infinite loops in chains. These are * possible because we don't update the previous pointers when * we fold chains. No big deal, just fix them up in PUtable. */ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 2, validate virtual chains\n"); for (logical_block = 0; logical_block < s->numvunits; logical_block++) { block = s->VUtable[logical_block]; last_block = BLOCK_NIL; /* Check for free/reserved/nil */ if (block >= BLOCK_RESERVED) continue; ANAC = ANACtable[block]; for (i = 0; i < s->numvunits; i++) { if (s->PUtable[block] == BLOCK_NIL) break; if (s->PUtable[block] > s->lastEUN) { printk(KERN_WARNING "INFTL: invalid prev %d, " "in virtual chain %d\n", s->PUtable[block], logical_block); s->PUtable[block] = BLOCK_NIL; } if (ANACtable[block] != ANAC) { /* * Chain must point back to itself. This is ok, * but we will need adjust the tables with this * newest block and oldest block. */ s->VUtable[logical_block] = block; s->PUtable[last_block] = BLOCK_NIL; break; } ANAC--; last_block = block; block = s->PUtable[block]; } if (i >= s->nb_blocks) { /* * Uhoo, infinite chain with valid ANACS! * Format whole chain... */ format_chain(s, first_block); } } #ifdef CONFIG_MTD_DEBUG_VERBOSE if (CONFIG_MTD_DEBUG_VERBOSE >= 2) INFTL_dumptables(s); if (CONFIG_MTD_DEBUG_VERBOSE >= 2) INFTL_dumpVUchains(s); #endif /* * Third pass, format unreferenced blocks and init free block count. */ s->numfreeEUNs = 0; s->LastFreeEUN = BLOCK_NIL; DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 3, format unused blocks\n"); for (block = s->firstEUN; block <= s->lastEUN; block++) { if (s->PUtable[block] == BLOCK_NOTEXPLORED) { printk("INFTL: unreferenced block %d, formatting it\n", block); if (INFTL_formatblock(s, block) < 0) s->PUtable[block] = BLOCK_RESERVED; else s->PUtable[block] = BLOCK_FREE; } if (s->PUtable[block] == BLOCK_FREE) { s->numfreeEUNs++; if (s->LastFreeEUN == BLOCK_NIL) s->LastFreeEUN = block; } } kfree(ANACtable); return 0; }
gpl-2.0
davidmueller13/valexKernel-lt03wifi
arch/openrisc/kernel/init_task.c
4474
1338
/* * OpenRISC init_task.c * * Linux architectural port borrowing liberally from similar works of * others. All original copyrights apply as per the original source * declaration. * * Modifications for the OpenRISC architecture: * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/init_task.h> #include <linux/mqueue.h> #include <linux/export.h> static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); /* * Initial thread structure. * * We need to make sure that this is THREAD_SIZE aligned due to the * way process stacks are handled. This is done by having a special * "init_task" linker map entry.. */ union thread_union init_thread_union __init_task_data = { INIT_THREAD_INFO(init_task) }; /* * Initial task structure. * * All other task structs will be allocated on slabs in fork.c */ struct task_struct init_task = INIT_TASK(init_task); EXPORT_SYMBOL(init_task);
gpl-2.0
MoKee/android_kernel_htc_pyramid
arch/arm/mach-at91/board-sam9rlek.c
4730
7002
/* * Copyright (C) 2005 SAN People * Copyright (C) 2007 Atmel Corporation * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/types.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/fb.h> #include <linux/clk.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <video/atmel_lcdc.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/board.h> #include <mach/at91sam9_smc.h> #include <mach/at91_shdwc.h> #include "sam9_smc.h" #include "generic.h" static void __init ek_init_early(void) { /* Initialize processor: 12.000 MHz crystal */ at91_initialize(12000000); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, CTS, RTS) */ at91_register_uart(AT91SAM9RL_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } /* * USB HS Device port */ static struct usba_platform_data __initdata ek_usba_udc_data = { .vbus_pin = AT91_PIN_PA8, }; /* * MCI (SD/MMC) */ static struct at91_mmc_data __initdata ek_mmc_data = { .wire4 = 1, .det_pin = AT91_PIN_PA15, .wp_pin = -EINVAL, .vcc_pin = -EINVAL, }; /* * NAND flash */ static struct mtd_partition __initdata ek_nand_partition[] = { { .name = "Partition 1", .offset = 0, .size = SZ_256K, }, { .name = "Partition 2", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, }, }; static struct atmel_nand_data __initdata ek_nand_data = { .ale = 21, .cle = 22, .det_pin = -EINVAL, .rdy_pin = AT91_PIN_PD17, .enable_pin = AT91_PIN_PB6, .ecc_mode = NAND_ECC_SOFT, .on_flash_bbt = 1, .parts = ek_nand_partition, .num_parts = ARRAY_SIZE(ek_nand_partition), }; static struct sam9_smc_config __initdata ek_nand_smc_config = { .ncs_read_setup = 0, .nrd_setup = 1, .ncs_write_setup = 0, .nwe_setup = 1, .ncs_read_pulse = 3, .nrd_pulse = 3, .ncs_write_pulse = 3, .nwe_pulse = 3, .read_cycle = 5, .write_cycle = 5, .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8, .tdf_cycles = 2, }; static void __init ek_add_device_nand(void) { /* configure chip-select 3 (NAND) */ sam9_smc_configure(0, 3, &ek_nand_smc_config); at91_add_device_nand(&ek_nand_data); } /* * SPI devices */ static struct spi_board_info ek_spi_devices[] = { { /* DataFlash chip */ .modalias = "mtd_dataflash", .chip_select = 0, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, }, }; /* * LCD Controller */ #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE) static struct fb_videomode at91_tft_vga_modes[] = { { .name = "TX09D50VM1CCA @ 60", .refresh = 60, .xres = 240, .yres = 320, .pixclock = KHZ2PICOS(4965), .left_margin = 1, .right_margin = 33, .upper_margin = 1, .lower_margin = 0, .hsync_len = 5, .vsync_len = 1, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED, }, }; static struct fb_monspecs at91fb_default_monspecs = { .manufacturer = "HIT", .monitor = "TX09D50VM1CCA", .modedb = at91_tft_vga_modes, .modedb_len = ARRAY_SIZE(at91_tft_vga_modes), .hfmin = 15000, .hfmax = 64000, .vfmin = 50, .vfmax = 150, }; #define AT91SAM9RL_DEFAULT_LCDCON2 (ATMEL_LCDC_MEMOR_LITTLE \ | ATMEL_LCDC_DISTYPE_TFT \ | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE) static void at91_lcdc_power_control(int on) { if (on) at91_set_gpio_value(AT91_PIN_PC1, 0); /* power up */ else at91_set_gpio_value(AT91_PIN_PC1, 1); /* power down */ } /* Driver datas */ static struct atmel_lcdfb_info __initdata ek_lcdc_data = { .lcdcon_is_backlight = true, .default_bpp = 16, .default_dmacon = ATMEL_LCDC_DMAEN, .default_lcdcon2 = AT91SAM9RL_DEFAULT_LCDCON2, .default_monspecs = &at91fb_default_monspecs, .atmel_lcdfb_power_control = at91_lcdc_power_control, .guard_time = 1, .lcd_wiring_mode = ATMEL_LCDC_WIRING_RGB, }; #else static struct atmel_lcdfb_info __initdata ek_lcdc_data; #endif /* * AC97 * reset_pin is not connected: NRST */ static struct ac97c_platform_data ek_ac97_data = { .reset_pin = -EINVAL, }; /* * LEDs */ static struct gpio_led ek_leds[] = { { /* "bottom" led, green, userled1 to be defined */ .name = "ds1", .gpio = AT91_PIN_PD15, .active_low = 1, .default_trigger = "none", }, { /* "bottom" led, green, userled2 to be defined */ .name = "ds2", .gpio = AT91_PIN_PD16, .active_low = 1, .default_trigger = "none", }, { /* "power" led, yellow */ .name = "ds3", .gpio = AT91_PIN_PD14, .default_trigger = "heartbeat", } }; /* * Touchscreen */ static struct at91_tsadcc_data ek_tsadcc_data = { .adc_clock = 1000000, .pendet_debounce = 0x0f, .ts_sample_hold_time = 0x03, }; /* * GPIO Buttons */ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) static struct gpio_keys_button ek_buttons[] = { { .gpio = AT91_PIN_PB0, .code = BTN_2, .desc = "Right Click", .active_low = 1, .wakeup = 1, }, { .gpio = AT91_PIN_PB1, .code = BTN_1, .desc = "Left Click", .active_low = 1, .wakeup = 1, } }; static struct gpio_keys_platform_data ek_button_data = { .buttons = ek_buttons, .nbuttons = ARRAY_SIZE(ek_buttons), }; static struct platform_device ek_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &ek_button_data, } }; static void __init ek_add_device_buttons(void) { at91_set_gpio_input(AT91_PIN_PB1, 1); /* btn1 */ at91_set_deglitch(AT91_PIN_PB1, 1); at91_set_gpio_input(AT91_PIN_PB0, 1); /* btn2 */ at91_set_deglitch(AT91_PIN_PB0, 1); platform_device_register(&ek_button_device); } #else static void __init ek_add_device_buttons(void) {} #endif static void __init ek_board_init(void) { /* Serial */ at91_add_device_serial(); /* USB HS */ at91_add_device_usba(&ek_usba_udc_data); /* I2C */ at91_add_device_i2c(NULL, 0); /* NAND */ ek_add_device_nand(); /* SPI */ at91_add_device_spi(ek_spi_devices, ARRAY_SIZE(ek_spi_devices)); /* MMC */ at91_add_device_mmc(0, &ek_mmc_data); /* LCD Controller */ at91_add_device_lcdc(&ek_lcdc_data); /* AC97 */ at91_add_device_ac97(&ek_ac97_data); /* Touch Screen Controller */ at91_add_device_tsadcc(&ek_tsadcc_data); /* LEDs */ at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds)); /* Push Buttons */ ek_add_device_buttons(); } MACHINE_START(AT91SAM9RLEK, "Atmel AT91SAM9RL-EK") /* Maintainer: Atmel */ .timer = &at91sam926x_timer, .map_io = at91_map_io, .init_early = ek_init_early, .init_irq = at91_init_irq_default, .init_machine = ek_board_init, MACHINE_END
gpl-2.0
drewx2/caf-kernel
sound/soc/pxa/corgi.c
4986
9072
/* * corgi.c -- SoC audio for Corgi * * Copyright 2005 Wolfson Microelectronics PLC. * Copyright 2005 Openedhand Ltd. * * Authors: Liam Girdwood <lrg@slimlogic.co.uk> * Richard Purdie <richard@openedhand.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/timer.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <asm/mach-types.h> #include <mach/corgi.h> #include <mach/audio.h> #include "../codecs/wm8731.h" #include "pxa2xx-i2s.h" #define CORGI_HP 0 #define CORGI_MIC 1 #define CORGI_LINE 2 #define CORGI_HEADSET 3 #define CORGI_HP_OFF 4 #define CORGI_SPK_ON 0 #define CORGI_SPK_OFF 1 /* audio clock in Hz - rounded from 12.235MHz */ #define CORGI_AUDIO_CLOCK 12288000 static int corgi_jack_func; static int corgi_spk_func; static void corgi_ext_control(struct snd_soc_dapm_context *dapm) { /* set up jack connection */ switch (corgi_jack_func) { case CORGI_HP: /* set = unmute headphone */ gpio_set_value(CORGI_GPIO_MUTE_L, 1); gpio_set_value(CORGI_GPIO_MUTE_R, 1); snd_soc_dapm_disable_pin(dapm, "Mic Jack"); snd_soc_dapm_disable_pin(dapm, "Line Jack"); snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); snd_soc_dapm_disable_pin(dapm, "Headset Jack"); break; case CORGI_MIC: /* reset = mute headphone */ gpio_set_value(CORGI_GPIO_MUTE_L, 0); gpio_set_value(CORGI_GPIO_MUTE_R, 0); snd_soc_dapm_enable_pin(dapm, "Mic Jack"); snd_soc_dapm_disable_pin(dapm, "Line Jack"); snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); snd_soc_dapm_disable_pin(dapm, "Headset Jack"); break; case CORGI_LINE: gpio_set_value(CORGI_GPIO_MUTE_L, 0); gpio_set_value(CORGI_GPIO_MUTE_R, 0); snd_soc_dapm_disable_pin(dapm, "Mic Jack"); snd_soc_dapm_enable_pin(dapm, "Line Jack"); snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); snd_soc_dapm_disable_pin(dapm, "Headset Jack"); break; case CORGI_HEADSET: gpio_set_value(CORGI_GPIO_MUTE_L, 0); gpio_set_value(CORGI_GPIO_MUTE_R, 1); snd_soc_dapm_enable_pin(dapm, "Mic Jack"); snd_soc_dapm_disable_pin(dapm, "Line Jack"); snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); snd_soc_dapm_enable_pin(dapm, "Headset Jack"); break; } if (corgi_spk_func == CORGI_SPK_ON) snd_soc_dapm_enable_pin(dapm, "Ext Spk"); else snd_soc_dapm_disable_pin(dapm, "Ext Spk"); /* signal a DAPM event */ snd_soc_dapm_sync(dapm); } static int corgi_startup(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; mutex_lock(&codec->mutex); /* check the jack status at stream startup */ corgi_ext_control(&codec->dapm); mutex_unlock(&codec->mutex); return 0; } /* we need to unmute the HP at shutdown as the mute burns power on corgi */ static void corgi_shutdown(struct snd_pcm_substream *substream) { /* set = unmute headphone */ gpio_set_value(CORGI_GPIO_MUTE_L, 1); gpio_set_value(CORGI_GPIO_MUTE_R, 1); } static int corgi_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; unsigned int clk = 0; int ret = 0; switch (params_rate(params)) { case 8000: case 16000: case 48000: case 96000: clk = 12288000; break; case 11025: case 22050: case 44100: clk = 11289600; break; } /* set the codec system clock for DAC and ADC */ ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_XTAL, clk, SND_SOC_CLOCK_IN); if (ret < 0) return ret; /* set the I2S system clock as input (unused) */ ret = snd_soc_dai_set_sysclk(cpu_dai, PXA2XX_I2S_SYSCLK, 0, SND_SOC_CLOCK_IN); if (ret < 0) return ret; return 0; } static struct snd_soc_ops corgi_ops = { .startup = corgi_startup, .hw_params = corgi_hw_params, .shutdown = corgi_shutdown, }; static int corgi_get_jack(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = corgi_jack_func; return 0; } static int corgi_set_jack(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_card *card = snd_kcontrol_chip(kcontrol); if (corgi_jack_func == ucontrol->value.integer.value[0]) return 0; corgi_jack_func = ucontrol->value.integer.value[0]; corgi_ext_control(&card->dapm); return 1; } static int corgi_get_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = corgi_spk_func; return 0; } static int corgi_set_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_card *card = snd_kcontrol_chip(kcontrol); if (corgi_spk_func == ucontrol->value.integer.value[0]) return 0; corgi_spk_func = ucontrol->value.integer.value[0]; corgi_ext_control(&card->dapm); return 1; } static int corgi_amp_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { gpio_set_value(CORGI_GPIO_APM_ON, SND_SOC_DAPM_EVENT_ON(event)); return 0; } static int corgi_mic_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { gpio_set_value(CORGI_GPIO_MIC_BIAS, SND_SOC_DAPM_EVENT_ON(event)); return 0; } /* corgi machine dapm widgets */ static const struct snd_soc_dapm_widget wm8731_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_MIC("Mic Jack", corgi_mic_event), SND_SOC_DAPM_SPK("Ext Spk", corgi_amp_event), SND_SOC_DAPM_LINE("Line Jack", NULL), SND_SOC_DAPM_HP("Headset Jack", NULL), }; /* Corgi machine audio map (connections to the codec pins) */ static const struct snd_soc_dapm_route corgi_audio_map[] = { /* headset Jack - in = micin, out = LHPOUT*/ {"Headset Jack", NULL, "LHPOUT"}, /* headphone connected to LHPOUT1, RHPOUT1 */ {"Headphone Jack", NULL, "LHPOUT"}, {"Headphone Jack", NULL, "RHPOUT"}, /* speaker connected to LOUT, ROUT */ {"Ext Spk", NULL, "ROUT"}, {"Ext Spk", NULL, "LOUT"}, /* mic is connected to MICIN (via right channel of headphone jack) */ {"MICIN", NULL, "Mic Jack"}, /* Same as the above but no mic bias for line signals */ {"MICIN", NULL, "Line Jack"}, }; static const char *jack_function[] = {"Headphone", "Mic", "Line", "Headset", "Off"}; static const char *spk_function[] = {"On", "Off"}; static const struct soc_enum corgi_enum[] = { SOC_ENUM_SINGLE_EXT(5, jack_function), SOC_ENUM_SINGLE_EXT(2, spk_function), }; static const struct snd_kcontrol_new wm8731_corgi_controls[] = { SOC_ENUM_EXT("Jack Function", corgi_enum[0], corgi_get_jack, corgi_set_jack), SOC_ENUM_EXT("Speaker Function", corgi_enum[1], corgi_get_spk, corgi_set_spk), }; /* * Logic for a wm8731 as connected on a Sharp SL-C7x0 Device */ static int corgi_wm8731_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; snd_soc_dapm_nc_pin(dapm, "LLINEIN"); snd_soc_dapm_nc_pin(dapm, "RLINEIN"); return 0; } /* corgi digital audio interface glue - connects codec <--> CPU */ static struct snd_soc_dai_link corgi_dai = { .name = "WM8731", .stream_name = "WM8731", .cpu_dai_name = "pxa2xx-i2s", .codec_dai_name = "wm8731-hifi", .platform_name = "pxa-pcm-audio", .codec_name = "wm8731.0-001b", .init = corgi_wm8731_init, .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS, .ops = &corgi_ops, }; /* corgi audio machine driver */ static struct snd_soc_card corgi = { .name = "Corgi", .owner = THIS_MODULE, .dai_link = &corgi_dai, .num_links = 1, .controls = wm8731_corgi_controls, .num_controls = ARRAY_SIZE(wm8731_corgi_controls), .dapm_widgets = wm8731_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8731_dapm_widgets), .dapm_routes = corgi_audio_map, .num_dapm_routes = ARRAY_SIZE(corgi_audio_map), }; static int __devinit corgi_probe(struct platform_device *pdev) { struct snd_soc_card *card = &corgi; int ret; card->dev = &pdev->dev; ret = snd_soc_register_card(card); if (ret) dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); return ret; } static int __devexit corgi_remove(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); snd_soc_unregister_card(card); return 0; } static struct platform_driver corgi_driver = { .driver = { .name = "corgi-audio", .owner = THIS_MODULE, }, .probe = corgi_probe, .remove = __devexit_p(corgi_remove), }; module_platform_driver(corgi_driver); /* Module information */ MODULE_AUTHOR("Richard Purdie"); MODULE_DESCRIPTION("ALSA SoC Corgi"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:corgi-audio");
gpl-2.0
AICP/kernel_motorola_msm8226
arch/mips/vr41xx/common/giu.c
4986
2825
/* * NEC VR4100 series GIU platform device. * * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/errno.h> #include <linux/init.h> #include <linux/smp.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <asm/cpu.h> #include <asm/vr41xx/giu.h> #include <asm/vr41xx/irq.h> static struct resource giu_50pins_pullupdown_resource[] __initdata = { { .start = 0x0b000100, .end = 0x0b00011f, .flags = IORESOURCE_MEM, }, { .start = 0x0b0002e0, .end = 0x0b0002e3, .flags = IORESOURCE_MEM, }, { .start = GIUINT_IRQ, .end = GIUINT_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct resource giu_36pins_resource[] __initdata = { { .start = 0x0f000140, .end = 0x0f00015f, .flags = IORESOURCE_MEM, }, { .start = GIUINT_IRQ, .end = GIUINT_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct resource giu_48pins_resource[] __initdata = { { .start = 0x0f000140, .end = 0x0f000167, .flags = IORESOURCE_MEM, }, { .start = GIUINT_IRQ, .end = GIUINT_IRQ, .flags = IORESOURCE_IRQ, }, }; static int __init vr41xx_giu_add(void) { struct platform_device *pdev; struct resource *res; unsigned int num; int retval; pdev = platform_device_alloc("GIU", -1); if (!pdev) return -ENOMEM; switch (current_cpu_type()) { case CPU_VR4111: case CPU_VR4121: pdev->id = GPIO_50PINS_PULLUPDOWN; res = giu_50pins_pullupdown_resource; num = ARRAY_SIZE(giu_50pins_pullupdown_resource); break; case CPU_VR4122: case CPU_VR4131: pdev->id = GPIO_36PINS; res = giu_36pins_resource; num = ARRAY_SIZE(giu_36pins_resource); break; case CPU_VR4133: pdev->id = GPIO_48PINS_EDGE_SELECT; res = giu_48pins_resource; num = ARRAY_SIZE(giu_48pins_resource); break; default: retval = -ENODEV; goto err_free_device; } retval = platform_device_add_resources(pdev, res, num); if (retval) goto err_free_device; retval = platform_device_add(pdev); if (retval) goto err_free_device; return 0; err_free_device: platform_device_put(pdev); return retval; } device_initcall(vr41xx_giu_add);
gpl-2.0
TeamWin/android_kernel_samsung_j2lte
drivers/staging/cxt1e1/comet_tables.c
7802
28568
/*----------------------------------------------------------------------------- * comet_tables.c - waveform tables for the PM4351 'COMET' * * Copyright (C) 2003-2005 SBE, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * For further information, contact via email: support@sbei.com * SBE, Inc. San Ramon, California U.S.A. *----------------------------------------------------------------------------- */ #include <linux/types.h> /***************************************************************************** * * Array names: * * TWVLongHaul0DB * TWVLongHaul7_5DB * TWVLongHaul15DB * TWVLongHaul22_5DB * TWVShortHaul0 * TWVShortHaul1 * TWVShortHaul2 * TWVShortHaul3 * TWVShortHaul4 * TWVShortHaul5 * TWV_E1_120Ohm * TWV_E1_75Ohm <not supported> * T1_Equalizer * E1_Equalizer * *****************************************************************************/ u_int8_t TWVLongHaul0DB[25][5] =/* T1 Long Haul 0 DB */ { {0x00, 0x44, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x20, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x32, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x3E, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x3D, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x3C, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x3B, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x39, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x39, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x38, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x37, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x36, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x34, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x29, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x4F, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x4C, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x49, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ {0x0C} /* PMC's suggested value */ /* {0x14} Output Amplitude */ }; u_int8_t TWVLongHaul7_5DB[25][5] = /* T1 Long Haul 7.5 DB */ { {0x00, 0x10, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x01, 0x0E, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x02, 0x0C, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x04, 0x0A, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x08, 0x08, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x0C, 0x06, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x10, 0x04, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x16, 0x02, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x1A, 0x01, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x1E, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x22, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x26, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x2A, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x2B, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x2C, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x2D, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x2C, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x28, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x24, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x20, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x1C, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x18, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x14, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x12, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ {0x07} /* PMC's suggested value */ /* { 0x0A } Output Amplitude */ }; u_int8_t TWVLongHaul15DB[25][5] = /* T1 Long Haul 15 DB */ { {0x00, 0x2A, 0x09, 0x01, 0x00}, /* Sample 0 */ {0x00, 0x28, 0x08, 0x01, 0x00}, /* Sample 1 */ {0x00, 0x26, 0x08, 0x01, 0x00}, /* Sample 2 */ {0x00, 0x24, 0x07, 0x01, 0x00}, /* Sample 3 */ {0x01, 0x22, 0x07, 0x01, 0x00}, /* Sample 4 */ {0x02, 0x20, 0x06, 0x01, 0x00}, /* Sample 5 */ {0x04, 0x1E, 0x06, 0x01, 0x00}, /* Sample 6 */ {0x07, 0x1C, 0x05, 0x00, 0x00}, /* Sample 7 */ {0x0A, 0x1B, 0x05, 0x00, 0x00}, /* Sample 8 */ {0x0D, 0x19, 0x05, 0x00, 0x00}, /* Sample 9 */ {0x10, 0x18, 0x04, 0x00, 0x00}, /* Sample 10 */ {0x14, 0x16, 0x04, 0x00, 0x00}, /* Sample 11 */ {0x18, 0x15, 0x04, 0x00, 0x00}, /* Sample 12 */ {0x1B, 0x13, 0x03, 0x00, 0x00}, /* Sample 13 */ {0x1E, 0x12, 0x03, 0x00, 0x00}, /* Sample 14 */ {0x21, 0x10, 0x03, 0x00, 0x00}, /* Sample 15 */ {0x24, 0x0F, 0x03, 0x00, 0x00}, /* Sample 16 */ {0x27, 0x0D, 0x03, 0x00, 0x00}, /* Sample 17 */ {0x2A, 0x0D, 0x02, 0x00, 0x00}, /* Sample 18 */ {0x2D, 0x0B, 0x02, 0x00, 0x00}, /* Sample 19 */ {0x30, 0x0B, 0x02, 0x00, 0x00}, /* Sample 20 */ {0x30, 0x0A, 0x02, 0x00, 0x00}, /* Sample 21 */ {0x2E, 0x0A, 0x02, 0x00, 0x00}, /* Sample 22 */ {0x2C, 0x09, 0x02, 0x00, 0x00}, /* Sample 23 */ {0x03} /* Output Amplitude */ }; u_int8_t TWVLongHaul22_5DB[25][5] = /* T1 Long Haul 22.5 DB */ { {0x00, 0x1F, 0x16, 0x06, 0x01}, /* Sample 0 */ {0x00, 0x20, 0x15, 0x05, 0x01}, /* Sample 1 */ {0x00, 0x21, 0x15, 0x05, 0x01}, /* Sample 2 */ {0x00, 0x22, 0x14, 0x05, 0x01}, /* Sample 3 */ {0x00, 0x22, 0x13, 0x04, 0x00}, /* Sample 4 */ {0x00, 0x23, 0x12, 0x04, 0x00}, /* Sample 5 */ {0x01, 0x23, 0x12, 0x04, 0x00}, /* Sample 6 */ {0x01, 0x24, 0x11, 0x03, 0x00}, /* Sample 7 */ {0x01, 0x23, 0x10, 0x03, 0x00}, /* Sample 8 */ {0x02, 0x23, 0x10, 0x03, 0x00}, /* Sample 9 */ {0x03, 0x22, 0x0F, 0x03, 0x00}, /* Sample 10 */ {0x05, 0x22, 0x0E, 0x03, 0x00}, /* Sample 11 */ {0x07, 0x21, 0x0E, 0x02, 0x00}, /* Sample 12 */ {0x09, 0x20, 0x0D, 0x02, 0x00}, /* Sample 13 */ {0x0B, 0x1E, 0x0C, 0x02, 0x00}, /* Sample 14 */ {0x0E, 0x1D, 0x0C, 0x02, 0x00}, /* Sample 15 */ {0x10, 0x1B, 0x0B, 0x02, 0x00}, /* Sample 16 */ {0x13, 0x1B, 0x0A, 0x02, 0x00}, /* Sample 17 */ {0x15, 0x1A, 0x0A, 0x02, 0x00}, /* Sample 18 */ {0x17, 0x19, 0x09, 0x01, 0x00}, /* Sample 19 */ {0x19, 0x19, 0x08, 0x01, 0x00}, /* Sample 20 */ {0x1B, 0x18, 0x08, 0x01, 0x00}, /* Sample 21 */ {0x1D, 0x17, 0x07, 0x01, 0x00}, /* Sample 22 */ {0x1E, 0x17, 0x06, 0x01, 0x00}, /* Sample 23 */ {0x02} /* Output Amplitude */ }; u_int8_t TWVShortHaul0[25][5] = /* T1 Short Haul 0 - 110 ft */ { {0x00, 0x45, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x20, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x3C, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x3B, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x39, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x39, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x38, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x37, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x36, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x34, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x29, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x59, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x55, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x50, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x4D, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x48, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ {0x0C} /* Output Amplitude */ }; u_int8_t TWVShortHaul1[25][5] = /* T1 Short Haul 110 - 220 ft */ { {0x00, 0x44, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x36, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x34, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x30, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x2F, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x2E, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x2D, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x2C, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x2B, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x2A, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x28, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x26, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x68, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x54, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x4F, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x49, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ {0x10} /* Output Amplitude */ }; u_int8_t TWVShortHaul2[25][5] = /* T1 Short Haul 220 - 330 ft */ { {0x00, 0x44, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x3A, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x3A, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x38, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x30, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x2F, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x2E, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x2D, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x2C, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x2B, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x2A, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x29, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x23, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x6C, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x60, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x4F, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x49, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ {0x11} /* Output Amplitude */ }; u_int8_t TWVShortHaul3[25][5] = /* T1 Short Haul 330 - 440 ft */ { {0x00, 0x44, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x2F, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x2E, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x2D, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x2C, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x2B, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x2A, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x29, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x28, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x19, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x7F, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x60, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x4F, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x49, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ {0x12} /* Output Amplitude */ }; u_int8_t TWVShortHaul4[25][5] = /* T1 Short Haul 440 - 550 ft */ { {0x00, 0x44, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x30, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x2B, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x2A, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x29, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x28, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x27, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x26, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x26, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x24, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x7F, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x7F, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x4F, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x49, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ {0x14} /* Output Amplitude */ }; u_int8_t TWVShortHaul5[25][5] = /* T1 Short Haul 550 - 660 ft */ { {0x00, 0x44, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x3F, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x30, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x2A, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x29, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x28, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x27, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x26, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x25, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x24, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x7F, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x7F, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x5F, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x50, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x49, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ {0x15} /* Output Amplitude */ }; u_int8_t TWV_E1_120Ohm[25][5] = /* E1 120 Ohm */ { {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x0A, 0x00, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x3F, 0x00, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x3F, 0x00, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x39, 0x00, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x38, 0x00, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x36, 0x00, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x36, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x35, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x35, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x35, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x35, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x35, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x35, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x2D, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ {0x0C} /* PMC's suggested value */ /* { 0x10 } Output Amplitude */ }; u_int8_t TWV_E1_75Ohm[25][5] = /* E1 75 Ohm */ { #ifdef PMCC4_DOES_NOT_SUPPORT {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 0 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 1 */ {0x0A, 0x00, 0x00, 0x00, 0x00}, /* Sample 2 */ {0x28, 0x00, 0x00, 0x00, 0x00}, /* Sample 3 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 4 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 5 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 6 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 7 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */ {0x32, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */ {0x14, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */ #endif {0x0C} /* Output Amplitude */ }; u_int32_t T1_Equalizer[256] = /* T1 Receiver Equalizer */ { 0x03FE1840, 0x03F61840, 0x03EE1840, 0x03E61840, /* 000 - 003 */ 0x03DE1840, 0x03D61840, 0x03D61840, 0x03D61840, /* 004 - 007 */ 0x03CE1840, 0x03CE1840, 0x03CE1840, 0x03CE1840, /* 008 - 011 */ 0x03C61840, 0x03C61840, 0x03C61840, 0x0BBE1840, /* 012 - 015 */ 0x0BBE1840, 0x0BBE1840, 0x0BBE1840, 0x0BB61840, /* 016 - 019 */ 0x0BB61840, 0x0BB61840, 0x0BB61840, 0x13AE1838, /* 020 - 023 */ 0x13AE183C, 0x13AE1840, 0x13AE1840, 0x13AE1840, /* 024 - 027 */ 0x13AE1840, 0x1BB618B8, 0x1BAE18B8, 0x1BAE18BC, /* 028 - 031 */ 0x1BAE18C0, 0x1BAE18C0, 0x23A618C0, 0x23A618C0, /* 032 - 035 */ 0x23A618C0, 0x23A618C0, 0x23A618C0, 0x239E18C0, /* 036 - 039 */ 0x239E18C0, 0x239E18C0, 0x239E18C0, 0x239E18C0, /* 040 - 043 */ 0x2B9618C0, 0x2B9618C0, 0x2B9618C0, 0x33961940, /* 044 - 047 */ 0x37961940, 0x37961940, 0x37961940, 0x3F9E19C0, /* 048 - 051 */ 0x3F9E19C0, 0x3F9E19C0, 0x3FA61A40, 0x3FA61A40, /* 052 - 055 */ 0x3FA61A40, 0x3FA61A40, 0x3F9619C0, 0x3F9619C0, /* 056 - 059 */ 0x3F9619C0, 0x3F9619C0, 0x479E1A40, 0x479E1A40, /* 060 - 063 */ 0x479E1A40, 0x47961A40, 0x47961A40, 0x47961A40, /* 064 - 067 */ 0x47961A40, 0x4F8E1A40, 0x4F8E1A40, 0x4F8E1A40, /* 068 - 071 */ 0x4F8E1A40, 0x4F8E1A40, 0x57861A40, 0x57861A40, /* 072 - 075 */ 0x57861A40, 0x57861A40, 0x57861A40, 0x5F861AC0, /* 076 - 079 */ 0x5F861AC0, 0x5F861AC0, 0x5F861AC0, 0x5F861AC0, /* 080 - 083 */ 0x5F861AC0, 0x5F7E1AC0, 0x5F7E1AC0, 0x5F7E1AC0, /* 084 - 087 */ 0x5F7E1AC0, 0x5F7E1AC0, 0x677E2AC0, 0x677E2AC0, /* 088 - 091 */ 0x677E2AC0, 0x677E2AC0, 0x67762AC0, 0x67762AC0, /* 092 - 095 */ 0x67762AC0, 0x67762AC0, 0x67762AC0, 0x6F6E2AC0, /* 096 - 099 */ 0x6F6E2AC0, 0x6F6E2AC0, 0x6F6E2AC0, 0x776E3AC0, /* 100 - 103 */ 0x776E3AC0, 0x776E3AC0, 0x776E3AC0, 0x7F663AC0, /* 104 - 107 */ 0x7F663AC0, 0x7F664AC0, 0x7F664AC0, 0x7F664AC0, /* 108 - 111 */ 0x7F664AC0, 0x87665AC0, 0x87665AC0, 0x87665AC0, /* 112 - 115 */ 0x87665AC0, 0x87665AC0, 0x875E5AC0, 0x875E5AC0, /* 116 - 119 */ 0x875E5AC0, 0x875E5AC0, 0x875E5AC0, 0x8F5E6AC0, /* 120 - 123 */ 0x8F5E6AC0, 0x8F5E6AC0, 0x8F5E6AC0, 0x975E7AC0, /* 124 - 127 */ 0x975E7AC0, 0x975E7AC0, 0x975E7AC0, 0x9F5E8AC0, /* 128 - 131 */ 0x9F5E8AC0, 0x9F5E8AC0, 0x9F5E8AC0, 0x9F5E8AC0, /* 132 - 135 */ 0xA7569AC0, 0xA7569AC0, 0xA7569AC0, 0xA7569AC0, /* 136 - 139 */ 0xA756AAC0, 0xA756AAC0, 0xA756AAC0, 0xAF4EAAC0, /* 140 - 143 */ 0xAF4EAAC0, 0xAF4EAAC0, 0xAF4EAAC0, 0xAF4EAAC0, /* 144 - 147 */ 0xB746AAC0, 0xB746AAC0, 0xB746AAC0, 0xB746AAC0, /* 148 - 151 */ 0xB746AAC0, 0xB746AAC0, 0xB746AAC0, 0xB746BAC0, /* 152 - 155 */ 0xB746BAC0, 0xB746BAC0, 0xBF4EBB40, 0xBF4EBB40, /* 156 - 159 */ 0xBF4EBB40, 0xBF4EBB40, 0xBF4EBB40, 0xBF4EBB40, /* 160 - 163 */ 0xBF4EBB40, 0xBF4EBB40, 0xBF4EBB40, 0xBE46CB40, /* 164 - 167 */ 0xBE46CB40, 0xBE46CB40, 0xBE46CB40, 0xBE46CB40, /* 168 - 171 */ 0xBE46CB40, 0xBE46DB40, 0xBE46DB40, 0xBE46DB40, /* 172 - 175 */ 0xC63ECB40, 0xC63ECB40, 0xC63EDB40, 0xC63EDB40, /* 176 - 179 */ 0xC63EDB40, 0xC644DB40, 0xC644DB40, 0xC644DB40, /* 180 - 183 */ 0xC644DB40, 0xC63CDB40, 0xC63CDB40, 0xC63CDB40, /* 184 - 187 */ 0xC63CDB40, 0xD634DB40, 0xD634DB40, 0xD634DB40, /* 188 - 191 */ 0xD634DB40, 0xD634DB40, 0xDE2CDB3C, 0xDE2CDB3C, /* 192 - 195 */ 0xDE2CDB3C, 0xE62CDB40, 0xE62CDB40, 0xE62CDB40, /* 196 - 199 */ 0xE62CDB40, 0xE62CDB40, 0xE62CEB40, 0xE62CEB40, /* 200 - 203 */ 0xE62CEB40, 0xEE2CFB40, 0xEE2CFB40, 0xEE2CFB40, /* 204 - 207 */ 0xEE2D0B40, 0xEE2D0B40, 0xEE2D0B40, 0xEE2D0B40, /* 208 - 211 */ 0xEE2D0B40, 0xF5250B38, 0xF5250B3C, 0xF5250B40, /* 212 - 215 */ 0xF5251B40, 0xF5251B40, 0xF5251B40, 0xF5251B40, /* 216 - 219 */ 0xF5251B40, 0xFD252B40, 0xFD252B40, 0xFD252B40, /* 220 - 223 */ 0xFD252B40, 0xFD252740, 0xFD252740, 0xFD252740, /* 224 - 227 */ 0xFD252340, 0xFD252340, 0xFD252340, 0xFD253340, /* 228 - 231 */ 0xFD253340, 0xFD253340, 0xFD253340, 0xFD253340, /* 232 - 235 */ 0xFD253340, 0xFD253340, 0xFD253340, 0xFC254340, /* 236 - 239 */ 0xFD254340, 0xFD254340, 0xFD254344, 0xFC254348, /* 240 - 243 */ 0xFC25434C, 0xFD2543BC, 0xFD2543C0, 0xFC2543C0, /* 244 - 247 */ 0xFC2343C0, 0xFC2343C0, 0xFD2343C0, 0xFC2143C0, /* 248 - 251 */ 0xFC2143C0, 0xFC2153C0, 0xFD2153C0, 0xFC2153C0 /* 252 - 255 */ }; u_int32_t E1_Equalizer[256] = /* E1 Receiver Equalizer */ { 0x07DE182C, 0x07DE182C, 0x07D6182C, 0x07D6182C, /* 000 - 003 */ 0x07D6182C, 0x07CE182C, 0x07CE182C, 0x07CE182C, /* 004 - 007 */ 0x07C6182C, 0x07C6182C, 0x07C6182C, 0x07BE182C, /* 008 - 011 */ 0x07BE182C, 0x07BE182C, 0x07BE182C, 0x07BE182C, /* 012 - 015 */ 0x07B6182C, 0x07B6182C, 0x07B6182C, 0x07B6182C, /* 016 - 019 */ 0x07B6182C, 0x07AE182C, 0x07AE182C, 0x07AE182C, /* 020 - 023 */ 0x07AE182C, 0x07AE182C, 0x07B618AC, 0x07AE18AC, /* 024 - 027 */ 0x07AE18AC, 0x07AE18AC, 0x07AE18AC, 0x07A618AC, /* 028 - 031 */ 0x07A618AC, 0x07A618AC, 0x07A618AC, 0x079E18AC, /* 032 - 035 */ 0x07A6192C, 0x07A6192C, 0x07A6192C, 0x0FA6192C, /* 036 - 039 */ 0x0FA6192C, 0x0F9E192C, 0x0F9E192C, 0x0F9E192C, /* 040 - 043 */ 0x179E192C, 0x17A619AC, 0x179E19AC, 0x179E19AC, /* 044 - 047 */ 0x179619AC, 0x1F9619AC, 0x1F9619AC, 0x1F8E19AC, /* 048 - 051 */ 0x1F8E19AC, 0x1F8E19AC, 0x278E19AC, 0x278E1A2C, /* 052 - 055 */ 0x278E1A2C, 0x278E1A2C, 0x278E1A2C, 0x2F861A2C, /* 056 - 059 */ 0x2F861A2C, 0x2F861A2C, 0x2F7E1A2C, 0x2F7E1A2C, /* 060 - 063 */ 0x2F7E1A2C, 0x377E1A2C, 0x377E1AAC, 0x377E1AAC, /* 064 - 067 */ 0x377E1AAC, 0x377E1AAC, 0x3F7E2AAC, 0x3F7E2AAC, /* 068 - 071 */ 0x3F762AAC, 0x3F862B2C, 0x3F7E2B2C, 0x477E2B2C, /* 072 - 075 */ 0x477E2F2C, 0x477E2F2C, 0x477E2F2C, 0x47762F2C, /* 076 - 079 */ 0x4F762F2C, 0x4F762F2C, 0x4F6E2F2C, 0x4F6E2F2C, /* 080 - 083 */ 0x4F6E2F2C, 0x576E2F2C, 0x576E2F2C, 0x576E3F2C, /* 084 - 087 */ 0x576E3F2C, 0x576E3F2C, 0x5F6E3F2C, 0x5F6E4F2C, /* 088 - 091 */ 0x5F6E4F2C, 0x5F6E4F2C, 0x5F664F2C, 0x67664F2C, /* 092 - 095 */ 0x67664F2C, 0x675E4F2C, 0x675E4F2C, 0x67664F2C, /* 096 - 099 */ 0x67664F2C, 0x67665F2C, 0x6F6E5F2C, 0x6F6E6F2C, /* 100 - 103 */ 0x6F6E6F2C, 0x6F6E7F2C, 0x6F6E7F2C, 0x6F6E7F2C, /* 104 - 107 */ 0x77667F2C, 0x77667F2C, 0x775E6F2C, 0x775E7F2C, /* 108 - 111 */ 0x775E7F2C, 0x7F5E7F2C, 0x7F5E8F2C, 0x7F5E8F2C, /* 112 - 115 */ 0x7F5E8F2C, 0x87568F2C, 0x87568F2C, 0x87568F2C, /* 116 - 119 */ 0x874E8F2C, 0x874E8F2C, 0x874E8F2C, 0x8F4E9F2C, /* 120 - 123 */ 0x8F4E9F2C, 0x8F4EAF2C, 0x8F4EAF2C, 0x8F4EAF2C, /* 124 - 127 */ 0x974EAF2C, 0x974EAF2C, 0x974EAB2C, 0x974EAB2C, /* 128 - 131 */ 0x974EAB2C, 0x9F4EAB2C, 0x9F4EBB2C, 0x9F4EBB2C, /* 132 - 135 */ 0x9F4EBB2C, 0x9F4ECB2C, 0xA74ECB2C, 0xA74ECB2C, /* 136 - 139 */ 0xA746CB2C, 0xA746CB2C, 0xA746CB2C, 0xA746DB2C, /* 140 - 143 */ 0xAF46DB2C, 0xAF46EB2C, 0xAF46EB2C, 0xAF4EEB2C, /* 144 - 147 */ 0xAE4EEB2C, 0xAE4EEB2C, 0xB546FB2C, 0xB554FB2C, /* 148 - 151 */ 0xB54CEB2C, 0xB554FB2C, 0xB554FB2C, 0xBD54FB2C, /* 152 - 155 */ 0xBD4CFB2C, 0xBD4CFB2C, 0xBD4CFB2C, 0xBD44EB2C, /* 156 - 159 */ 0xC544FB2C, 0xC544FB2C, 0xC544FB2C, 0xC5450B2C, /* 160 - 163 */ 0xC5450B2C, 0xC5450B2C, 0xCD450B2C, 0xCD450B2C, /* 164 - 167 */ 0xCD3D0B2C, 0xCD3D0B2C, 0xCD3D0B2C, 0xD53D0B2C, /* 168 - 171 */ 0xD53D0B2C, 0xD53D1B2C, 0xD53D1B2C, 0xD53D1B2C, /* 172 - 175 */ 0xDD3D1B2C, 0xDD3D1B2C, 0xDD351B2C, 0xDD351B2C, /* 176 - 179 */ 0xDD351B2C, 0xE5351B2C, 0xE5351B2C, 0xE52D1B2C, /* 180 - 183 */ 0xE52D1B2C, 0xE52D3B2C, 0xED2D4B2C, 0xED2D1BA8, /* 184 - 187 */ 0xED2D1BAC, 0xED2D17AC, 0xED2D17AC, 0xED2D27AC, /* 188 - 191 */ 0xF52D27AC, 0xF52D27AC, 0xF52D2BAC, 0xF52D2BAC, /* 192 - 195 */ 0xF52D2BAC, 0xFD2D2BAC, 0xFD2B2BAC, 0xFD2B2BAC, /* 196 - 199 */ 0xFD2B2BAC, 0xFD2B2BAC, 0xFD232BAC, 0xFD232BAC, /* 200 - 203 */ 0xFD232BAC, 0xFD212BAC, 0xFD212BAC, 0xFD292BAC, /* 204 - 207 */ 0xFD292BAC, 0xFD2927AC, 0xFD2937AC, 0xFD2923AC, /* 208 - 211 */ 0xFD2923AC, 0xFD2923AC, 0xFD2923AC, 0xFD2123AC, /* 212 - 215 */ 0xFD2123AC, 0xFD2123AC, 0xFD2133AC, 0xFD2133AC, /* 216 - 219 */ 0xFD2133AC, 0xFD2143AC, 0xFD2143AC, 0xFD2143AC, /* 220 - 223 */ 0xFC2143AC, 0xFC2143AC, 0xFC1943AC, 0xFC1943AC, /* 224 - 227 */ 0xFC1943AC, 0xFC1943AC, 0xFC1953AC, 0xFC1953AC, /* 228 - 231 */ 0xFC1953AC, 0xFC1953AC, 0xFC1963AC, 0xFC1963AC, /* 232 - 235 */ 0xFC1963AC, 0xFC1973AC, 0xFC1973AC, 0xFC1973AC, /* 236 - 239 */ 0xFC1973AC, 0xFC1973AC, 0xFC1983AC, 0xFC1983AC, /* 240 - 243 */ 0xFC1983AC, 0xFC1983AC, 0xFC1983AC, 0xFC1993AC, /* 244 - 247 */ 0xFC1993AC, 0xFC1993AC, 0xFC19A3AC, 0xFC19A3AC, /* 248 - 251 */ 0xFC19B3AC, 0xFC19B3AC, 0xFC19B3AC, 0xFC19B3AC /* 252 - 255 */ }; /*** End-of-Files ***/
gpl-2.0
zombi-x/grimlock_kernel_asus_tegra3_unified
arch/arm/mach-davinci/gpio-tnetv107x.c
10618
5415
/* * Texas Instruments TNETV107X GPIO Controller * * Copyright (C) 2010 Texas Instruments * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/gpio.h> #include <mach/common.h> #include <mach/tnetv107x.h> struct tnetv107x_gpio_regs { u32 idver; u32 data_in[3]; u32 data_out[3]; u32 direction[3]; u32 enable[3]; }; #define gpio_reg_index(gpio) ((gpio) >> 5) #define gpio_reg_bit(gpio) BIT((gpio) & 0x1f) #define gpio_reg_rmw(reg, mask, val) \ __raw_writel((__raw_readl(reg) & ~(mask)) | (val), (reg)) #define gpio_reg_set_bit(reg, gpio) \ gpio_reg_rmw((reg) + gpio_reg_index(gpio), 0, gpio_reg_bit(gpio)) #define gpio_reg_clear_bit(reg, gpio) \ gpio_reg_rmw((reg) + gpio_reg_index(gpio), gpio_reg_bit(gpio), 0) #define gpio_reg_get_bit(reg, gpio) \ (__raw_readl((reg) + gpio_reg_index(gpio)) & gpio_reg_bit(gpio)) #define chip2controller(chip) \ container_of(chip, struct davinci_gpio_controller, chip) #define TNETV107X_GPIO_CTLRS DIV_ROUND_UP(TNETV107X_N_GPIO, 32) static struct davinci_gpio_controller chips[TNETV107X_GPIO_CTLRS]; static int tnetv107x_gpio_request(struct gpio_chip *chip, unsigned offset) { struct davinci_gpio_controller *ctlr = chip2controller(chip); struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs; unsigned gpio = chip->base + offset; unsigned long flags; spin_lock_irqsave(&ctlr->lock, flags); gpio_reg_set_bit(regs->enable, gpio); spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } static void tnetv107x_gpio_free(struct gpio_chip *chip, unsigned offset) { struct davinci_gpio_controller *ctlr = chip2controller(chip); struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs; unsigned gpio = chip->base + offset; unsigned long flags; spin_lock_irqsave(&ctlr->lock, flags); gpio_reg_clear_bit(regs->enable, gpio); spin_unlock_irqrestore(&ctlr->lock, flags); } static int tnetv107x_gpio_dir_in(struct gpio_chip *chip, unsigned offset) { struct davinci_gpio_controller *ctlr = chip2controller(chip); struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs; unsigned gpio = chip->base + offset; unsigned long flags; spin_lock_irqsave(&ctlr->lock, flags); gpio_reg_set_bit(regs->direction, gpio); spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } static int tnetv107x_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value) { struct davinci_gpio_controller *ctlr = chip2controller(chip); struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs; unsigned gpio = chip->base + offset; unsigned long flags; spin_lock_irqsave(&ctlr->lock, flags); if (value) gpio_reg_set_bit(regs->data_out, gpio); else gpio_reg_clear_bit(regs->data_out, gpio); gpio_reg_clear_bit(regs->direction, gpio); spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } static int tnetv107x_gpio_get(struct gpio_chip *chip, unsigned offset) { struct davinci_gpio_controller *ctlr = chip2controller(chip); struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs; unsigned gpio = chip->base + offset; int ret; ret = gpio_reg_get_bit(regs->data_in, gpio); return ret ? 1 : 0; } static void tnetv107x_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct davinci_gpio_controller *ctlr = chip2controller(chip); struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs; unsigned gpio = chip->base + offset; unsigned long flags; spin_lock_irqsave(&ctlr->lock, flags); if (value) gpio_reg_set_bit(regs->data_out, gpio); else gpio_reg_clear_bit(regs->data_out, gpio); spin_unlock_irqrestore(&ctlr->lock, flags); } static int __init tnetv107x_gpio_setup(void) { int i, base; unsigned ngpio; struct davinci_soc_info *soc_info = &davinci_soc_info; struct tnetv107x_gpio_regs *regs; struct davinci_gpio_controller *ctlr; if (soc_info->gpio_type != GPIO_TYPE_TNETV107X) return 0; ngpio = soc_info->gpio_num; if (ngpio == 0) { pr_err("GPIO setup: how many GPIOs?\n"); return -EINVAL; } if (WARN_ON(TNETV107X_N_GPIO < ngpio)) ngpio = TNETV107X_N_GPIO; regs = ioremap(soc_info->gpio_base, SZ_4K); if (WARN_ON(!regs)) return -EINVAL; for (i = 0, base = 0; base < ngpio; i++, base += 32) { ctlr = &chips[i]; ctlr->chip.label = "tnetv107x"; ctlr->chip.can_sleep = 0; ctlr->chip.base = base; ctlr->chip.ngpio = ngpio - base; if (ctlr->chip.ngpio > 32) ctlr->chip.ngpio = 32; ctlr->chip.request = tnetv107x_gpio_request; ctlr->chip.free = tnetv107x_gpio_free; ctlr->chip.direction_input = tnetv107x_gpio_dir_in; ctlr->chip.get = tnetv107x_gpio_get; ctlr->chip.direction_output = tnetv107x_gpio_dir_out; ctlr->chip.set = tnetv107x_gpio_set; spin_lock_init(&ctlr->lock); ctlr->regs = regs; ctlr->set_data = &regs->data_out[i]; ctlr->clr_data = &regs->data_out[i]; ctlr->in_data = &regs->data_in[i]; gpiochip_add(&ctlr->chip); } soc_info->gpio_ctlrs = chips; soc_info->gpio_ctlrs_num = DIV_ROUND_UP(ngpio, 32); return 0; } pure_initcall(tnetv107x_gpio_setup);
gpl-2.0
htc-msm8960/android_kernel_htc_m7
tools/perf/util/strfilter.c
10618
4178
#include "util.h" #include "string.h" #include "strfilter.h" /* Operators */ static const char *OP_and = "&"; /* Logical AND */ static const char *OP_or = "|"; /* Logical OR */ static const char *OP_not = "!"; /* Logical NOT */ #define is_operator(c) ((c) == '|' || (c) == '&' || (c) == '!') #define is_separator(c) (is_operator(c) || (c) == '(' || (c) == ')') static void strfilter_node__delete(struct strfilter_node *self) { if (self) { if (self->p && !is_operator(*self->p)) free((char *)self->p); strfilter_node__delete(self->l); strfilter_node__delete(self->r); free(self); } } void strfilter__delete(struct strfilter *self) { if (self) { strfilter_node__delete(self->root); free(self); } } static const char *get_token(const char *s, const char **e) { const char *p; while (isspace(*s)) /* Skip spaces */ s++; if (*s == '\0') { p = s; goto end; } p = s + 1; if (!is_separator(*s)) { /* End search */ retry: while (*p && !is_separator(*p) && !isspace(*p)) p++; /* Escape and special case: '!' is also used in glob pattern */ if (*(p - 1) == '\\' || (*p == '!' && *(p - 1) == '[')) { p++; goto retry; } } end: *e = p; return s; } static struct strfilter_node *strfilter_node__alloc(const char *op, struct strfilter_node *l, struct strfilter_node *r) { struct strfilter_node *ret = zalloc(sizeof(struct strfilter_node)); if (ret) { ret->p = op; ret->l = l; ret->r = r; } return ret; } static struct strfilter_node *strfilter_node__new(const char *s, const char **ep) { struct strfilter_node root, *cur, *last_op; const char *e; if (!s) return NULL; memset(&root, 0, sizeof(root)); last_op = cur = &root; s = get_token(s, &e); while (*s != '\0' && *s != ')') { switch (*s) { case '&': /* Exchg last OP->r with AND */ if (!cur->r || !last_op->r) goto error; cur = strfilter_node__alloc(OP_and, last_op->r, NULL); if (!cur) goto nomem; last_op->r = cur; last_op = cur; break; case '|': /* Exchg the root with OR */ if (!cur->r || !root.r) goto error; cur = strfilter_node__alloc(OP_or, root.r, NULL); if (!cur) goto nomem; root.r = cur; last_op = cur; break; case '!': /* Add NOT as a leaf node */ if (cur->r) goto error; cur->r = strfilter_node__alloc(OP_not, NULL, NULL); if (!cur->r) goto nomem; cur = cur->r; break; case '(': /* Recursively parses inside the parenthesis */ if (cur->r) goto error; cur->r = strfilter_node__new(s + 1, &s); if (!s) goto nomem; if (!cur->r || *s != ')') goto error; e = s + 1; break; default: if (cur->r) goto error; cur->r = strfilter_node__alloc(NULL, NULL, NULL); if (!cur->r) goto nomem; cur->r->p = strndup(s, e - s); if (!cur->r->p) goto nomem; } s = get_token(e, &e); } if (!cur->r) goto error; *ep = s; return root.r; nomem: s = NULL; error: *ep = s; strfilter_node__delete(root.r); return NULL; } /* * Parse filter rule and return new strfilter. * Return NULL if fail, and *ep == NULL if memory allocation failed. */ struct strfilter *strfilter__new(const char *rules, const char **err) { struct strfilter *ret = zalloc(sizeof(struct strfilter)); const char *ep = NULL; if (ret) ret->root = strfilter_node__new(rules, &ep); if (!ret || !ret->root || *ep != '\0') { if (err) *err = ep; strfilter__delete(ret); ret = NULL; } return ret; } static bool strfilter_node__compare(struct strfilter_node *self, const char *str) { if (!self || !self->p) return false; switch (*self->p) { case '|': /* OR */ return strfilter_node__compare(self->l, str) || strfilter_node__compare(self->r, str); case '&': /* AND */ return strfilter_node__compare(self->l, str) && strfilter_node__compare(self->r, str); case '!': /* NOT */ return !strfilter_node__compare(self->r, str); default: return strglobmatch(str, self->p); } } /* Return true if STR matches the filter rules */ bool strfilter__compare(struct strfilter *self, const char *str) { if (!self) return false; return strfilter_node__compare(self->root, str); }
gpl-2.0
invisiblek/android_kernel_samsung_jaspervzw
arch/arm/mach-msm/acpuclock-8x50.c
379
19749
/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/errno.h> #include <linux/cpufreq.h> #include <linux/clk.h> #include <linux/mfd/tps65023.h> #include <mach/board.h> #include <mach/msm_iomap.h> #include "acpuclock.h" #include "avs.h" #define SHOT_SWITCH 4 #define HOP_SWITCH 5 #define SIMPLE_SLEW 6 #define COMPLEX_SLEW 7 #define SPSS_CLK_CNTL_ADDR (MSM_CSR_BASE + 0x100) #define SPSS_CLK_SEL_ADDR (MSM_CSR_BASE + 0x104) /* Scorpion PLL registers */ #define SCPLL_CTL_ADDR (MSM_SCPLL_BASE + 0x4) #define SCPLL_STATUS_ADDR (MSM_SCPLL_BASE + 0x18) #define SCPLL_FSM_CTL_EXT_ADDR (MSM_SCPLL_BASE + 0x10) #ifdef CONFIG_QSD_SVS #define TPS65023_MAX_DCDC1 1600 #else #define TPS65023_MAX_DCDC1 CONFIG_QSD_PMIC_DEFAULT_DCDC1 #endif enum { ACPU_PLL_TCXO = -1, ACPU_PLL_0 = 0, ACPU_PLL_1, ACPU_PLL_2, ACPU_PLL_3, ACPU_PLL_END, }; struct clkctl_acpu_speed { unsigned int use_for_scaling; unsigned int acpuclk_khz; int pll; unsigned int acpuclk_src_sel; unsigned int acpuclk_src_div; unsigned int ahbclk_khz; unsigned int ahbclk_div; unsigned int axiclk_khz; unsigned int sc_core_src_sel_mask; unsigned int sc_l_value; int vdd; unsigned long lpj; /* loops_per_jiffy */ }; struct clkctl_acpu_speed acpu_freq_tbl_998[] = { { 0, 19200, ACPU_PLL_TCXO, 0, 0, 0, 0, 14000, 0, 0, 1000}, { 0, 128000, ACPU_PLL_1, 1, 5, 0, 0, 14000, 2, 0, 1000}, { 1, 245760, ACPU_PLL_0, 4, 0, 0, 0, 29000, 0, 0, 1000}, /* Update AXI_S and PLL0_S macros if above row numbers change. */ { 1, 384000, ACPU_PLL_3, 0, 0, 0, 0, 58000, 1, 0xA, 1000}, { 0, 422400, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xB, 1000}, { 0, 460800, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xC, 1000}, { 0, 499200, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xD, 1050}, { 0, 537600, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xE, 1050}, { 1, 576000, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xF, 1050}, { 0, 614400, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x10, 1075}, { 0, 652800, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x11, 1100}, { 0, 691200, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x12, 1125}, { 0, 729600, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x13, 1150}, { 1, 768000, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x14, 1150}, { 0, 806400, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x15, 1175}, { 0, 844800, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x16, 1225}, { 0, 883200, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x17, 1250}, { 0, 921600, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x18, 1300}, { 0, 960000, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x19, 1300}, { 1, 998400, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x1A, 1300}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, }; struct clkctl_acpu_speed acpu_freq_tbl_768[] = { { 0, 19200, ACPU_PLL_TCXO, 0, 0, 0, 0, 14000, 0, 0, 1000}, { 0, 128000, ACPU_PLL_1, 1, 5, 0, 0, 14000, 2, 0, 1000}, { 1, 245760, ACPU_PLL_0, 4, 0, 0, 0, 29000, 0, 0, 1000}, /* Update AXI_S and PLL0_S macros if above row numbers change. */ { 1, 384000, ACPU_PLL_3, 0, 0, 0, 0, 58000, 1, 0xA, 1075}, { 0, 422400, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xB, 1100}, { 0, 460800, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xC, 1125}, { 0, 499200, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xD, 1150}, { 0, 537600, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xE, 1150}, { 1, 576000, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xF, 1150}, { 0, 614400, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x10, 1175}, { 0, 652800, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x11, 1200}, { 0, 691200, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x12, 1225}, { 0, 729600, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x13, 1250}, { 1, 768000, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x14, 1250}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, }; static struct clkctl_acpu_speed *acpu_freq_tbl = acpu_freq_tbl_998; #define AXI_S (&acpu_freq_tbl[1]) #define PLL0_S (&acpu_freq_tbl[2]) /* Use 128MHz for PC since ACPU will auto-switch to AXI (128MHz) before * coming back up. This allows detection of return-from-PC, since 128MHz * is only used for power collapse. */ #define POWER_COLLAPSE_KHZ 128000 /* Use 245MHz (not 128MHz) for SWFI to avoid unnecessary steps between * 128MHz<->245MHz. Jumping to high frequencies from 128MHz directly * is not allowed. */ #define WAIT_FOR_IRQ_KHZ 245760 #ifdef CONFIG_CPU_FREQ_MSM static struct cpufreq_frequency_table freq_table[20]; static void __init cpufreq_table_init(void) { unsigned int i; unsigned int freq_cnt = 0; /* Construct the freq_table table from acpu_freq_tbl since the * freq_table values need to match frequencies specified in * acpu_freq_tbl and acpu_freq_tbl needs to be fixed up during init. */ for (i = 0; acpu_freq_tbl[i].acpuclk_khz != 0 && freq_cnt < ARRAY_SIZE(freq_table)-1; i++) { if (acpu_freq_tbl[i].use_for_scaling) { freq_table[freq_cnt].index = freq_cnt; freq_table[freq_cnt].frequency = acpu_freq_tbl[i].acpuclk_khz; freq_cnt++; } } /* freq_table not big enough to store all usable freqs. */ BUG_ON(acpu_freq_tbl[i].acpuclk_khz != 0); freq_table[freq_cnt].index = freq_cnt; freq_table[freq_cnt].frequency = CPUFREQ_TABLE_END; pr_info("%d scaling frequencies supported.\n", freq_cnt); } #endif struct clock_state { struct clkctl_acpu_speed *current_speed; struct mutex lock; struct clk *ebi1_clk; int (*acpu_set_vdd) (int mvolts); }; static struct clock_state drv_state = { 0 }; static void scpll_set_freq(uint32_t lval, unsigned freq_switch) { uint32_t regval; if (lval > 33) lval = 33; if (lval < 10) lval = 10; /* wait for any calibrations or frequency switches to finish */ while (readl(SCPLL_STATUS_ADDR) & 0x3) ; /* write the new L val and switch mode */ regval = readl(SCPLL_FSM_CTL_EXT_ADDR); regval &= ~(0x3f << 3); regval |= (lval << 3); if (freq_switch == SIMPLE_SLEW) regval |= (0x1 << 9); regval &= ~(0x3 << 0); regval |= (freq_switch << 0); writel(regval, SCPLL_FSM_CTL_EXT_ADDR); dmb(); /* put in normal mode */ regval = readl(SCPLL_CTL_ADDR); regval |= 0x7; writel(regval, SCPLL_CTL_ADDR); dmb(); /* wait for frequency switch to finish */ while (readl(SCPLL_STATUS_ADDR) & 0x1) ; /* status bit seems to clear early, using * 100us to handle the worst case. */ udelay(100); } static void scpll_apps_enable(bool state) { uint32_t regval; if (state) pr_debug("Enabling PLL 3\n"); else pr_debug("Disabling PLL 3\n"); /* Wait for any frequency switches to finish. */ while (readl(SCPLL_STATUS_ADDR) & 0x1) ; /* put the pll in standby mode */ regval = readl(SCPLL_CTL_ADDR); regval &= ~(0x7); regval |= (0x2); writel(regval, SCPLL_CTL_ADDR); dmb(); if (state) { /* put the pll in normal mode */ regval = readl(SCPLL_CTL_ADDR); regval |= (0x7); writel(regval, SCPLL_CTL_ADDR); udelay(200); } else { /* put the pll in power down mode */ regval = readl(SCPLL_CTL_ADDR); regval &= ~(0x7); writel(regval, SCPLL_CTL_ADDR); } udelay(62); if (state) pr_debug("PLL 3 Enabled\n"); else pr_debug("PLL 3 Disabled\n"); } static void scpll_init(void) { uint32_t regval; #define L_VAL_384MHZ 0xA #define L_VAL_768MHZ 0x14 pr_debug("Initializing PLL 3\n"); /* power down scpll */ writel(0x0, SCPLL_CTL_ADDR); dmb(); /* set bypassnl, put into standby */ writel(0x00400002, SCPLL_CTL_ADDR); /* set bypassnl, reset_n, full calibration */ writel(0x00600004, SCPLL_CTL_ADDR); /* Ensure register write to initiate calibration has taken effect before reading status flag */ dmb(); /* wait for cal_all_done */ while (readl(SCPLL_STATUS_ADDR) & 0x2) ; /* Start: Set of experimentally derived steps * to work around a h/w bug. */ /* Put the pll in normal mode */ scpll_apps_enable(1); /* SHOT switch to 384 MHz */ regval = readl(SCPLL_FSM_CTL_EXT_ADDR); regval &= ~(0x3f << 3); regval |= (L_VAL_384MHZ << 3); regval &= ~0x7; regval |= SHOT_SWITCH; writel(regval, SCPLL_FSM_CTL_EXT_ADDR); /* Trigger the freq switch by putting pll in normal mode. */ regval = readl(SCPLL_CTL_ADDR); regval |= (0x7); writel(regval, SCPLL_CTL_ADDR); /* Wait for frequency switch to finish */ while (readl(SCPLL_STATUS_ADDR) & 0x1) ; /* Status bit seems to clear early, using * 800 microseconds for the worst case. */ udelay(800); /* HOP switch to 768 MHz. */ regval = readl(SCPLL_FSM_CTL_EXT_ADDR); regval &= ~(0x3f << 3); regval |= (L_VAL_768MHZ << 3); regval &= ~0x7; regval |= HOP_SWITCH; writel(regval, SCPLL_FSM_CTL_EXT_ADDR); /* Trigger the freq switch by putting pll in normal mode. */ regval = readl(SCPLL_CTL_ADDR); regval |= (0x7); writel(regval, SCPLL_CTL_ADDR); /* Wait for frequency switch to finish */ while (readl(SCPLL_STATUS_ADDR) & 0x1) ; /* Status bit seems to clear early, using * 100 microseconds for the worst case. */ udelay(100); /* End: Work around for h/w bug */ /* Power down scpll */ scpll_apps_enable(0); } static void config_pll(struct clkctl_acpu_speed *s) { uint32_t regval; if (s->pll == ACPU_PLL_3) scpll_set_freq(s->sc_l_value, HOP_SWITCH); /* Configure the PLL divider mux if we plan to use it. */ else if (s->sc_core_src_sel_mask == 0) { /* get the current clock source selection */ regval = readl(SPSS_CLK_SEL_ADDR) & 0x1; /* configure the other clock source, then switch to it, * using the glitch free mux */ switch (regval) { case 0x0: regval = readl(SPSS_CLK_CNTL_ADDR); regval &= ~(0x7 << 4 | 0xf); regval |= (s->acpuclk_src_sel << 4); regval |= (s->acpuclk_src_div << 0); writel(regval, SPSS_CLK_CNTL_ADDR); regval = readl(SPSS_CLK_SEL_ADDR); regval |= 0x1; writel(regval, SPSS_CLK_SEL_ADDR); break; case 0x1: regval = readl(SPSS_CLK_CNTL_ADDR); regval &= ~(0x7 << 12 | 0xf << 8); regval |= (s->acpuclk_src_sel << 12); regval |= (s->acpuclk_src_div << 8); writel(regval, SPSS_CLK_CNTL_ADDR); regval = readl(SPSS_CLK_SEL_ADDR); regval &= ~0x1; writel(regval, SPSS_CLK_SEL_ADDR); break; } dmb(); } regval = readl(SPSS_CLK_SEL_ADDR); regval &= ~(0x3 << 1); regval |= (s->sc_core_src_sel_mask << 1); writel(regval, SPSS_CLK_SEL_ADDR); } static int acpuclk_set_vdd_level(int vdd) { if (drv_state.acpu_set_vdd) { pr_debug("Switching VDD to %d mV\n", vdd); return drv_state.acpu_set_vdd(vdd); } else { /* Assume that the PMIC supports scaling the processor * to its maximum frequency at its default voltage. */ return 0; } } static int acpuclk_8x50_set_rate(int cpu, unsigned long rate, enum setrate_reason reason) { struct clkctl_acpu_speed *tgt_s, *strt_s; int res, rc = 0; int freq_index = 0; if (reason == SETRATE_CPUFREQ) mutex_lock(&drv_state.lock); strt_s = drv_state.current_speed; if (rate == strt_s->acpuclk_khz) goto out; for (tgt_s = acpu_freq_tbl; tgt_s->acpuclk_khz != 0; tgt_s++) { if (tgt_s->acpuclk_khz == rate) break; freq_index++; } if (tgt_s->acpuclk_khz == 0) { rc = -EINVAL; goto out; } if (reason == SETRATE_CPUFREQ) { #ifdef CONFIG_MSM_CPU_AVS /* Notify avs before changing frequency */ rc = avs_adjust_freq(freq_index, 1); if (rc) { pr_err("Unable to increase ACPU vdd (%d)\n", rc); goto out; } #endif /* Increase VDD if needed. */ if (tgt_s->vdd > strt_s->vdd) { rc = acpuclk_set_vdd_level(tgt_s->vdd); if (rc) { pr_err("Unable to increase ACPU vdd (%d)\n", rc); goto out; } } } else if (reason == SETRATE_PC && rate != POWER_COLLAPSE_KHZ) { /* Returning from PC. ACPU is running on AXI source. * Step up to PLL0 before ramping up higher. */ config_pll(PLL0_S); } pr_debug("Switching from ACPU rate %u KHz -> %u KHz\n", strt_s->acpuclk_khz, tgt_s->acpuclk_khz); if (strt_s->pll != ACPU_PLL_3 && tgt_s->pll != ACPU_PLL_3) { config_pll(tgt_s); } else if (strt_s->pll != ACPU_PLL_3 && tgt_s->pll == ACPU_PLL_3) { scpll_apps_enable(1); config_pll(tgt_s); } else if (strt_s->pll == ACPU_PLL_3 && tgt_s->pll != ACPU_PLL_3) { config_pll(tgt_s); scpll_apps_enable(0); } else { /* Temporarily switch to PLL0 while reconfiguring PLL3. */ config_pll(PLL0_S); config_pll(tgt_s); } /* Update the driver state with the new clock freq */ drv_state.current_speed = tgt_s; /* Re-adjust lpj for the new clock speed. */ loops_per_jiffy = tgt_s->lpj; /* Nothing else to do for SWFI. */ if (reason == SETRATE_SWFI) goto out; if (strt_s->axiclk_khz != tgt_s->axiclk_khz) { res = clk_set_rate(drv_state.ebi1_clk, tgt_s->axiclk_khz * 1000); if (res < 0) pr_warning("Setting AXI min rate failed (%d)\n", res); } /* Nothing else to do for power collapse */ if (reason == SETRATE_PC) goto out; #ifdef CONFIG_MSM_CPU_AVS /* notify avs after changing frequency */ res = avs_adjust_freq(freq_index, 0); if (res) pr_warning("Unable to drop ACPU vdd (%d)\n", res); #endif /* Drop VDD level if we can. */ if (tgt_s->vdd < strt_s->vdd) { res = acpuclk_set_vdd_level(tgt_s->vdd); if (res) pr_warning("Unable to drop ACPU vdd (%d)\n", res); } pr_debug("ACPU speed change complete\n"); out: if (reason == SETRATE_CPUFREQ) mutex_unlock(&drv_state.lock); return rc; } static void __init acpuclk_hw_init(void) { struct clkctl_acpu_speed *speed; uint32_t div, sel, regval; int res; /* Determine the source of the Scorpion clock. */ regval = readl(SPSS_CLK_SEL_ADDR); switch ((regval & 0x6) >> 1) { case 0: /* raw source clock */ case 3: /* low jitter PLL1 (768Mhz) */ if (regval & 0x1) { sel = ((readl(SPSS_CLK_CNTL_ADDR) >> 4) & 0x7); div = ((readl(SPSS_CLK_CNTL_ADDR) >> 0) & 0xf); } else { sel = ((readl(SPSS_CLK_CNTL_ADDR) >> 12) & 0x7); div = ((readl(SPSS_CLK_CNTL_ADDR) >> 8) & 0xf); } /* Find the matching clock rate. */ for (speed = acpu_freq_tbl; speed->acpuclk_khz != 0; speed++) { if (speed->acpuclk_src_sel == sel && speed->acpuclk_src_div == div) break; } break; case 1: /* unbuffered scorpion pll (384Mhz to 998.4Mhz) */ sel = ((readl(SCPLL_FSM_CTL_EXT_ADDR) >> 3) & 0x3f); /* Find the matching clock rate. */ for (speed = acpu_freq_tbl; speed->acpuclk_khz != 0; speed++) { if (speed->sc_l_value == sel && speed->sc_core_src_sel_mask == 1) break; } break; case 2: /* AXI bus clock (128Mhz) */ speed = AXI_S; break; default: BUG(); } /* Initialize scpll only if it wasn't already initialized by the boot * loader. If the CPU is already running on scpll, then the scpll was * initialized by the boot loader. */ if (speed->pll != ACPU_PLL_3) scpll_init(); if (speed->acpuclk_khz == 0) { pr_err("Error - ACPU clock reports invalid speed\n"); return; } drv_state.current_speed = speed; res = clk_set_rate(drv_state.ebi1_clk, speed->axiclk_khz * 1000); if (res < 0) pr_warning("Setting AXI min rate failed (%d)\n", res); res = clk_enable(drv_state.ebi1_clk); if (res < 0) pr_warning("Enabling AXI clock failed (%d)\n", res); pr_info("ACPU running at %d KHz\n", speed->acpuclk_khz); } static unsigned long acpuclk_8x50_get_rate(int cpu) { return drv_state.current_speed->acpuclk_khz; } /* Spare register populated with efuse data on max ACPU freq. */ #define CT_CSR_PHYS 0xA8700000 #define TCSR_SPARE2_ADDR (ct_csr_base + 0x60) #define PLL0_M_VAL_ADDR (MSM_CLK_CTL_BASE + 0x308) static void __init acpu_freq_tbl_fixup(void) { void __iomem *ct_csr_base; uint32_t tcsr_spare2, pll0_m_val; unsigned int max_acpu_khz; unsigned int i; ct_csr_base = ioremap(CT_CSR_PHYS, PAGE_SIZE); BUG_ON(ct_csr_base == NULL); tcsr_spare2 = readl(TCSR_SPARE2_ADDR); /* Check if the register is supported and meaningful. */ if ((tcsr_spare2 & 0xF000) != 0xA000) { pr_info("Efuse data on Max ACPU freq not present.\n"); goto skip_efuse_fixup; } switch (tcsr_spare2 & 0xF0) { case 0x70: acpu_freq_tbl = acpu_freq_tbl_768; max_acpu_khz = 768000; break; case 0x30: case 0x00: max_acpu_khz = 998400; break; case 0x10: max_acpu_khz = 1267200; break; default: pr_warning("Invalid efuse data (%x) on Max ACPU freq!\n", tcsr_spare2); goto skip_efuse_fixup; } pr_info("Max ACPU freq from efuse data is %d KHz\n", max_acpu_khz); for (i = 0; acpu_freq_tbl[i].acpuclk_khz != 0; i++) { if (acpu_freq_tbl[i].acpuclk_khz > max_acpu_khz) { acpu_freq_tbl[i].acpuclk_khz = 0; break; } } skip_efuse_fixup: iounmap(ct_csr_base); /* pll0_m_val will be 36 when PLL0 is run at 235MHz * instead of the usual 245MHz. */ pll0_m_val = readl(PLL0_M_VAL_ADDR) & 0x7FFFF; if (pll0_m_val == 36) PLL0_S->acpuclk_khz = 235930; for (i = 0; acpu_freq_tbl[i].acpuclk_khz != 0; i++) { if (acpu_freq_tbl[i].vdd > TPS65023_MAX_DCDC1) { acpu_freq_tbl[i].acpuclk_khz = 0; break; } } } /* Initalize the lpj field in the acpu_freq_tbl. */ static void __init lpj_init(void) { int i; const struct clkctl_acpu_speed *base_clk = drv_state.current_speed; for (i = 0; acpu_freq_tbl[i].acpuclk_khz; i++) { acpu_freq_tbl[i].lpj = cpufreq_scale(loops_per_jiffy, base_clk->acpuclk_khz, acpu_freq_tbl[i].acpuclk_khz); } } #ifdef CONFIG_MSM_CPU_AVS static int __init acpu_avs_init(int (*set_vdd) (int), int khz) { int i; int freq_count = 0; int freq_index = -1; for (i = 0; acpu_freq_tbl[i].acpuclk_khz; i++) { freq_count++; if (acpu_freq_tbl[i].acpuclk_khz == khz) freq_index = i; } return avs_init(set_vdd, freq_count, freq_index); } #endif static int qsd8x50_tps65023_set_dcdc1(int mVolts) { int rc = 0; #ifdef CONFIG_QSD_SVS rc = tps65023_set_dcdc1_level(mVolts); /* * By default the TPS65023 will be initialized to 1.225V. * So we can safely switch to any frequency within this * voltage even if the device is not probed/ready. */ if (rc == -ENODEV && mVolts <= CONFIG_QSD_PMIC_DEFAULT_DCDC1) rc = 0; #else /* * Disallow frequencies not supported in the default PMIC * output voltage. */ if (mVolts > CONFIG_QSD_PMIC_DEFAULT_DCDC1) rc = -EFAULT; #endif return rc; } static struct acpuclk_data acpuclk_8x50_data = { .set_rate = acpuclk_8x50_set_rate, .get_rate = acpuclk_8x50_get_rate, .power_collapse_khz = POWER_COLLAPSE_KHZ, .wait_for_irq_khz = WAIT_FOR_IRQ_KHZ, .switch_time_us = 20, }; static int __init acpuclk_8x50_init(struct acpuclk_soc_data *soc_data) { mutex_init(&drv_state.lock); drv_state.acpu_set_vdd = qsd8x50_tps65023_set_dcdc1; drv_state.ebi1_clk = clk_get(NULL, "ebi1_acpu_clk"); BUG_ON(IS_ERR(drv_state.ebi1_clk)); acpu_freq_tbl_fixup(); acpuclk_hw_init(); lpj_init(); /* Set a lower bound for ACPU rate for boot. This limits the * maximum frequency hop caused by the first CPUFREQ switch. */ if (drv_state.current_speed->acpuclk_khz < PLL0_S->acpuclk_khz) acpuclk_set_rate(0, PLL0_S->acpuclk_khz, SETRATE_CPUFREQ); acpuclk_register(&acpuclk_8x50_data); #ifdef CONFIG_CPU_FREQ_MSM cpufreq_table_init(); cpufreq_frequency_table_get_attr(freq_table, smp_processor_id()); #endif #ifdef CONFIG_MSM_CPU_AVS if (!acpu_avs_init(drv_state.acpu_set_vdd, drv_state.current_speed->acpuclk_khz)) { /* avs init successful. avs will handle voltage changes */ drv_state.acpu_set_vdd = NULL; } #endif return 0; } struct acpuclk_soc_data acpuclk_8x50_soc_data __initdata = { .init = acpuclk_8x50_init, };
gpl-2.0
dabyv/trinity-f
dep/acelite/ace/ETCL/ETCL_Constraint.cpp
379
15421
// -*- C++ -*- // $Id: ETCL_Constraint.cpp 92173 2010-10-07 12:36:17Z olli $ #include "ace/ACE.h" #include "ace/ETCL/ETCL_Constraint.h" #include "ace/ETCL/ETCL_Constraint_Visitor.h" #if ! defined (__ACE_INLINE__) #include "ace/ETCL/ETCL_Constraint.inl" #endif /* __ACE_INLINE__ */ ACE_BEGIN_VERSIONED_NAMESPACE_DECL ETCL_Constraint::ETCL_Constraint (void) { } ETCL_Constraint::~ETCL_Constraint (void) { } int ETCL_Constraint::accept (ETCL_Constraint_Visitor * /* visitor */) { return 0; } // **************************************************************** ETCL_Literal_Constraint::ETCL_Literal_Constraint ( const ETCL_Literal_Constraint & lit ) : ETCL_Constraint(), type_ (ACE_ETCL_UNKNOWN) { this->copy (lit); } ETCL_Literal_Constraint::ETCL_Literal_Constraint ( ACE_CDR::ULong uinteger) : type_ (ACE_ETCL_UNSIGNED) { this->op_.uinteger_ = uinteger; } ETCL_Literal_Constraint::ETCL_Literal_Constraint ( ACE_CDR::Long integer) : type_ (ACE_ETCL_SIGNED) { this->op_.integer_ = integer; } ETCL_Literal_Constraint::ETCL_Literal_Constraint ( ACE_CDR::Boolean boolean ) : type_ (ACE_ETCL_BOOLEAN) { this->op_.bool_ = boolean; } ETCL_Literal_Constraint::ETCL_Literal_Constraint ( ACE_CDR::Double doub) : type_ (ACE_ETCL_DOUBLE) { this->op_.double_ = doub; } ETCL_Literal_Constraint::ETCL_Literal_Constraint ( const char* str) : type_ (ACE_ETCL_STRING) { this->op_.str_ = ACE::strnew (str); } ETCL_Literal_Constraint::~ETCL_Literal_Constraint (void) { if (this->type_ == ACE_ETCL_STRING) { ACE::strdelete (this->op_.str_); } } int ETCL_Literal_Constraint::accept (ETCL_Constraint_Visitor* visitor) { return visitor->visit_literal (this); } Literal_Type ETCL_Literal_Constraint::expr_type (void) const { return this->type_; } void ETCL_Literal_Constraint::operator= (const ETCL_Literal_Constraint& co) { this->copy (co); } ETCL_Literal_Constraint::operator ACE_CDR::Boolean (void) const { return (this->type_ == ACE_ETCL_BOOLEAN) ? this->op_.bool_ : false; } ETCL_Literal_Constraint::operator ACE_CDR::ULong (void) const { switch (this->type_) { case ACE_ETCL_UNSIGNED: return this->op_.uinteger_; case ACE_ETCL_SIGNED: case ACE_ETCL_INTEGER: return (this->op_.integer_ > 0) ? (ACE_CDR::ULong) this->op_.integer_ : 0; case ACE_ETCL_DOUBLE: return (this->op_.double_ > 0) ? ((this->op_.double_ > ACE_UINT32_MAX) ? ACE_UINT32_MAX : (ACE_CDR::ULong) this->op_.double_) : 0; default: return 0; } } ETCL_Literal_Constraint::operator ACE_CDR::Long (void) const { switch (this->type_) { case ACE_ETCL_SIGNED: case ACE_ETCL_INTEGER: return this->op_.integer_; case ACE_ETCL_UNSIGNED: return (this->op_.uinteger_ > (ACE_CDR::ULong) ACE_INT32_MAX) ? ACE_INT32_MAX : (ACE_CDR::Long) this->op_.uinteger_; case ACE_ETCL_DOUBLE: return (this->op_.double_ > 0) ? ((this->op_.double_ > ACE_INT32_MAX) ? ACE_INT32_MAX : (ACE_CDR::Long) this->op_.double_) : ((this->op_.double_ < ACE_INT32_MIN) ? ACE_INT32_MIN : (ACE_CDR::Long) this->op_.double_); default: return 0; } } ETCL_Literal_Constraint::operator ACE_CDR::Double (void) const { switch (this->type_) { case ACE_ETCL_DOUBLE: return this->op_.double_; case ACE_ETCL_SIGNED: case ACE_ETCL_INTEGER: return (ACE_CDR::Double) this->op_.integer_; case ACE_ETCL_UNSIGNED: return (ACE_CDR::Double) this->op_.uinteger_; default: return 0.0; } } ETCL_Literal_Constraint::operator const char* (void) const { switch (this->type_) { case ACE_ETCL_STRING: return this->op_.str_; default: return 0; } } bool ETCL_Literal_Constraint::operator== (const ETCL_Literal_Constraint & rhs) { bool return_value = false; Literal_Type widest_type = this->widest_type (rhs); switch (widest_type) { case ACE_ETCL_STRING: return_value = (ACE_OS::strcmp ((const char*) *this, (const char*) rhs) == 0); break; case ACE_ETCL_DOUBLE: return_value = ACE::is_equal ((ACE_CDR::Double) *this, (ACE_CDR::Double) rhs); break; case ACE_ETCL_INTEGER: case ACE_ETCL_SIGNED: return_value = (ACE_CDR::Long) *this == (ACE_CDR::Long) rhs; break; case ACE_ETCL_UNSIGNED: return_value = (ACE_CDR::ULong) *this == (ACE_CDR::ULong) rhs; break; case ACE_ETCL_BOOLEAN: return_value = (ACE_CDR::Boolean) *this == (ACE_CDR::Boolean) rhs; break; default: break; } return return_value; } bool ETCL_Literal_Constraint::operator< (const ETCL_Literal_Constraint & rhs) { bool return_value = false; Literal_Type widest_type = this->widest_type (rhs); switch (widest_type) { case ACE_ETCL_STRING: return_value = (ACE_OS::strcmp ((const char*) *this, (const char*) rhs) < 0); break; case ACE_ETCL_DOUBLE: return_value = (ACE_CDR::Double) *this < (ACE_CDR::Double) rhs; break; case ACE_ETCL_INTEGER: case ACE_ETCL_SIGNED: return_value = (ACE_CDR::Long) *this < (ACE_CDR::Long) rhs; break; case ACE_ETCL_UNSIGNED: return_value = (ACE_CDR::ULong) *this < (ACE_CDR::ULong) rhs; break; case ACE_ETCL_BOOLEAN: return_value = (ACE_CDR::Boolean) *this < (ACE_CDR::Boolean) rhs; break; default: break; } return return_value; } bool ETCL_Literal_Constraint::operator> (const ETCL_Literal_Constraint & rhs) { bool return_value = false; Literal_Type widest_type = this->widest_type (rhs); switch (widest_type) { case ACE_ETCL_STRING: return_value = (ACE_OS::strcmp ((const char*) *this, (const char*) rhs) > 0); break; case ACE_ETCL_DOUBLE: return_value = (ACE_CDR::Double) *this > (ACE_CDR::Double) rhs; break; case ACE_ETCL_INTEGER: case ACE_ETCL_SIGNED: return_value = (ACE_CDR::Long) *this > (ACE_CDR::Long) rhs; break; case ACE_ETCL_UNSIGNED: return_value = (ACE_CDR::ULong) *this > (ACE_CDR::ULong) rhs; break; default: break; } return return_value; } ETCL_Literal_Constraint ETCL_Literal_Constraint::operator+ (const ETCL_Literal_Constraint & rhs) { Literal_Type widest_type = this->widest_type (rhs); switch (widest_type) { case ACE_ETCL_DOUBLE: { ACE_CDR::Double result = (ACE_CDR::Double) *this + (ACE_CDR::Double) rhs; return ETCL_Literal_Constraint ((ACE_CDR::Double) result); } case ACE_ETCL_INTEGER: case ACE_ETCL_SIGNED: { ACE_CDR::Long result = (ACE_CDR::Long) *this + (ACE_CDR::Long) rhs; return ETCL_Literal_Constraint ((ACE_CDR::Long) result); } case ACE_ETCL_UNSIGNED: { ACE_CDR::ULong result = (ACE_CDR::ULong) *this + (ACE_CDR::ULong) rhs; return ETCL_Literal_Constraint ((ACE_CDR::ULong) result); } default: return ETCL_Literal_Constraint ((ACE_CDR::Long) 0); } } ETCL_Literal_Constraint ETCL_Literal_Constraint::operator- (const ETCL_Literal_Constraint & rhs) { Literal_Type widest_type = this->widest_type (rhs); switch (widest_type) { case ACE_ETCL_DOUBLE: { ACE_CDR::Double result = (ACE_CDR::Double) *this - (ACE_CDR::Double) rhs; return ETCL_Literal_Constraint ((ACE_CDR::Double) result); } case ACE_ETCL_INTEGER: case ACE_ETCL_SIGNED: { ACE_CDR::Long result = (ACE_CDR::Long) *this - (ACE_CDR::Long) rhs; return ETCL_Literal_Constraint ((ACE_CDR::Long) result); } case ACE_ETCL_UNSIGNED: { ACE_CDR::ULong result = (ACE_CDR::ULong) *this - (ACE_CDR::ULong) rhs; return ETCL_Literal_Constraint ((ACE_CDR::ULong) result); } default: return ETCL_Literal_Constraint ((ACE_CDR::Long) 0); } } ETCL_Literal_Constraint ETCL_Literal_Constraint::operator* (const ETCL_Literal_Constraint & rhs) { Literal_Type widest_type = this->widest_type (rhs); switch (widest_type) { case ACE_ETCL_DOUBLE: { ACE_CDR::Double result = (ACE_CDR::Double) *this * (ACE_CDR::Double) rhs; return ETCL_Literal_Constraint ((ACE_CDR::Double) result); } case ACE_ETCL_INTEGER: case ACE_ETCL_SIGNED: { ACE_CDR::Long result = (ACE_CDR::Long) *this * (ACE_CDR::Long) rhs; return ETCL_Literal_Constraint ((ACE_CDR::Long) result); } case ACE_ETCL_UNSIGNED: { ACE_CDR::ULong result = (ACE_CDR::ULong) *this * (ACE_CDR::ULong) rhs; return ETCL_Literal_Constraint ((ACE_CDR::ULong) result); } default: return ETCL_Literal_Constraint ((ACE_CDR::Long) 0); } } ETCL_Literal_Constraint ETCL_Literal_Constraint::operator/ (const ETCL_Literal_Constraint & rhs) { Literal_Type widest_type = this->widest_type (rhs); switch (widest_type) { case ACE_ETCL_DOUBLE: { if (ACE::is_equal ((ACE_CDR::Double) rhs, 0.0)) return ETCL_Literal_Constraint ((ACE_CDR::Double) 0.0); ACE_CDR::Double result = (ACE_CDR::Double) *this / (ACE_CDR::Double) rhs; return ETCL_Literal_Constraint ((ACE_CDR::Double) result); } case ACE_ETCL_INTEGER: case ACE_ETCL_SIGNED: { if ((ACE_CDR::Long) rhs == 0) return ETCL_Literal_Constraint ((ACE_CDR::Long) 0); ACE_CDR::Long result = (ACE_CDR::Long) *this / (ACE_CDR::Long) rhs; return ETCL_Literal_Constraint ((ACE_CDR::Long) result); } case ACE_ETCL_UNSIGNED: { if ((ACE_CDR::ULong) rhs == 0) return ETCL_Literal_Constraint ((ACE_CDR::ULong) 0); ACE_CDR::ULong result = (ACE_CDR::ULong) *this / (ACE_CDR::ULong) rhs; return ETCL_Literal_Constraint ((ACE_CDR::ULong) result); } default: return ETCL_Literal_Constraint ((ACE_CDR::Long) 0); } } ETCL_Literal_Constraint ETCL_Literal_Constraint::operator- (void) { switch (this->type_) { case ACE_ETCL_DOUBLE: return ETCL_Literal_Constraint (- this->op_.double_); case ACE_ETCL_INTEGER: case ACE_ETCL_SIGNED: return ETCL_Literal_Constraint (- this->op_.integer_); case ACE_ETCL_UNSIGNED: return ETCL_Literal_Constraint (- (ACE_CDR::Long) this->op_.uinteger_); default: return ETCL_Literal_Constraint ((ACE_CDR::Long) 0); } } Literal_Type ETCL_Literal_Constraint::widest_type (const ETCL_Literal_Constraint & rhs) { Literal_Type rhs_type = rhs.expr_type (); Literal_Type return_value = rhs_type; if (rhs_type != this->type_) { if (rhs_type > this->type_) { return_value = rhs_type; } else { return_value = this->type_; } } return return_value; } void ETCL_Literal_Constraint::copy (const ETCL_Literal_Constraint &lit) { if (this->type_ == ACE_ETCL_STRING) { ACE::strdelete (this->op_.str_); } this->type_ = lit.type_; switch (this->type_) { case ACE_ETCL_STRING: this->op_.str_ = ACE::strnew (lit.op_.str_); break; case ACE_ETCL_DOUBLE: this->op_.double_ = lit.op_.double_; break; case ACE_ETCL_UNSIGNED: this->op_.uinteger_ = lit.op_.uinteger_; break; case ACE_ETCL_INTEGER: case ACE_ETCL_SIGNED: this->op_.integer_ = lit.op_.integer_; break; case ACE_ETCL_BOOLEAN: this->op_.bool_ = lit.op_.bool_; break; default: this->type_ = ACE_ETCL_UNKNOWN; break; } } // **************************************************************** int ETCL_Identifier::accept (ETCL_Constraint_Visitor *visitor) { return visitor->visit_identifier (this); } // **************************************************************** ETCL_Union_Value::~ETCL_Union_Value (void) { delete this->string_; delete this->integer_; } int ETCL_Union_Value::accept (ETCL_Constraint_Visitor *visitor) { return visitor->visit_union_value (this); } // **************************************************************** ETCL_Union_Pos::~ETCL_Union_Pos (void) { delete this->component_; delete this->union_value_; } int ETCL_Union_Pos::accept (ETCL_Constraint_Visitor *visitor) { return visitor->visit_union_pos (this); } // **************************************************************** ETCL_Component_Pos::~ETCL_Component_Pos (void) { delete this->component_; delete this->integer_; } int ETCL_Component_Pos::accept (ETCL_Constraint_Visitor *visitor) { return visitor->visit_component_pos (this); } // **************************************************************** ETCL_Component_Assoc::~ETCL_Component_Assoc (void) { delete this->component_; delete this->identifier_; } int ETCL_Component_Assoc::accept (ETCL_Constraint_Visitor *visitor) { return visitor->visit_component_assoc (this); } // **************************************************************** ETCL_Component_Array::~ETCL_Component_Array (void) { delete this->component_; delete this->integer_; } int ETCL_Component_Array::accept (ETCL_Constraint_Visitor *visitor) { return visitor->visit_component_array (this); } // **************************************************************** ETCL_Special::~ETCL_Special (void) {} int ETCL_Special::accept (ETCL_Constraint_Visitor *visitor) { return visitor->visit_special (this); } // **************************************************************** ETCL_Component::~ETCL_Component (void) { delete this->component_; delete this->identifier_; } int ETCL_Component::accept (ETCL_Constraint_Visitor *visitor) { return visitor->visit_component (this); } // **************************************************************** ETCL_Dot::~ETCL_Dot (void) { delete this->component_; } int ETCL_Dot::accept (ETCL_Constraint_Visitor *visitor) { return visitor->visit_dot (this); } // **************************************************************** ETCL_Eval::~ETCL_Eval (void) { delete this->component_; } int ETCL_Eval::accept (ETCL_Constraint_Visitor *visitor) { return visitor->visit_eval (this); } // **************************************************************** ETCL_Default::~ETCL_Default (void) { delete this->component_; } int ETCL_Default::accept (ETCL_Constraint_Visitor *visitor) { return visitor->visit_default (this); } // **************************************************************** ETCL_Exist::~ETCL_Exist (void) { delete this->component_; } int ETCL_Exist::accept (ETCL_Constraint_Visitor *visitor) { return visitor->visit_exist (this); } // **************************************************************** ETCL_Unary_Expr::~ETCL_Unary_Expr (void) { delete this->subexpr_; } int ETCL_Unary_Expr::accept (ETCL_Constraint_Visitor *visitor) { return visitor->visit_unary_expr (this); } // **************************************************************** ETCL_Binary_Expr::~ETCL_Binary_Expr (void) { delete this->lhs_; delete this->rhs_; } int ETCL_Binary_Expr::accept (ETCL_Constraint_Visitor *visitor) { return visitor->visit_binary_expr (this); } // **************************************************************** ETCL_Preference::~ETCL_Preference (void) { delete this->subexpr_; } int ETCL_Preference::accept (ETCL_Constraint_Visitor *visitor) { return visitor->visit_preference (this); } ACE_END_VERSIONED_NAMESPACE_DECL
gpl-2.0
kerneldevs/caf-kernel
net/ipv4/netfilter/nf_nat_ftp.c
891
3637
/* FTP extension for TCP NAT alteration. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/netfilter_ipv4.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_helper.h> #include <net/netfilter/nf_nat_rule.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_expect.h> #include <linux/netfilter/nf_conntrack_ftp.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>"); MODULE_DESCRIPTION("ftp NAT helper"); MODULE_ALIAS("ip_nat_ftp"); /* FIXME: Time out? --RR */ static int nf_nat_ftp_fmt_cmd(enum nf_ct_ftp_type type, char *buffer, size_t buflen, __be32 addr, u16 port) { switch (type) { case NF_CT_FTP_PORT: case NF_CT_FTP_PASV: return snprintf(buffer, buflen, "%u,%u,%u,%u,%u,%u", ((unsigned char *)&addr)[0], ((unsigned char *)&addr)[1], ((unsigned char *)&addr)[2], ((unsigned char *)&addr)[3], port >> 8, port & 0xFF); case NF_CT_FTP_EPRT: return snprintf(buffer, buflen, "|1|%pI4|%u|", &addr, port); case NF_CT_FTP_EPSV: return snprintf(buffer, buflen, "|||%u|", port); } return 0; } /* So, this packet has hit the connection tracking matching code. Mangle it, and change the expectation to match the new version. */ static unsigned int nf_nat_ftp(struct sk_buff *skb, enum ip_conntrack_info ctinfo, enum nf_ct_ftp_type type, unsigned int matchoff, unsigned int matchlen, struct nf_conntrack_expect *exp) { __be32 newip; u_int16_t port; int dir = CTINFO2DIR(ctinfo); struct nf_conn *ct = exp->master; char buffer[sizeof("|1|255.255.255.255|65535|")]; unsigned int buflen; pr_debug("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen); /* Connection will come from wherever this packet goes, hence !dir */ newip = ct->tuplehash[!dir].tuple.dst.u3.ip; exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; exp->dir = !dir; /* When you see the packet, we need to NAT it the same as the * this one. */ exp->expectfn = nf_nat_follow_master; /* Try to get same port: if not, try to change it. */ for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { exp->tuple.dst.u.tcp.port = htons(port); if (nf_ct_expect_related(exp) == 0) break; } if (port == 0) return NF_DROP; buflen = nf_nat_ftp_fmt_cmd(type, buffer, sizeof(buffer), newip, port); if (!buflen) goto out; pr_debug("calling nf_nat_mangle_tcp_packet\n"); if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff, matchlen, buffer, buflen)) goto out; return NF_ACCEPT; out: nf_ct_unexpect_related(exp); return NF_DROP; } static void __exit nf_nat_ftp_fini(void) { rcu_assign_pointer(nf_nat_ftp_hook, NULL); synchronize_rcu(); } static int __init nf_nat_ftp_init(void) { BUG_ON(nf_nat_ftp_hook != NULL); rcu_assign_pointer(nf_nat_ftp_hook, nf_nat_ftp); return 0; } /* Prior to 2.6.11, we had a ports param. No longer, but don't break users. */ static int warn_set(const char *val, struct kernel_param *kp) { printk(KERN_INFO KBUILD_MODNAME ": kernel >= 2.6.10 only uses 'ports' for conntrack modules\n"); return 0; } module_param_call(ports, warn_set, NULL, NULL, 0); module_init(nf_nat_ftp_init); module_exit(nf_nat_ftp_fini);
gpl-2.0
rock12/ALPS.L1.MP6.V2.19_CENON6580_WE_1_L_KERNEL
drivers/usb/gadget/net2280.c
2171
76816
/* * Driver for the PLX NET2280 USB device controller. * Specs and errata are available from <http://www.plxtech.com>. * * PLX Technology Inc. (formerly NetChip Technology) supported the * development of this driver. * * * CODE STATUS HIGHLIGHTS * * This driver should work well with most "gadget" drivers, including * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers * as well as Gadget Zero and Gadgetfs. * * DMA is enabled by default. Drivers using transfer queues might use * DMA chaining to remove IRQ latencies between transfers. (Except when * short OUT transfers happen.) Drivers can use the req->no_interrupt * hint to completely eliminate some IRQs, if a later IRQ is guaranteed * and DMA chaining is enabled. * * Note that almost all the errata workarounds here are only needed for * rev1 chips. Rev1a silicon (0110) fixes almost all of them. */ /* * Copyright (C) 2003 David Brownell * Copyright (C) 2003-2005 PLX Technology, Inc. * * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility * with 2282 chip * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #undef DEBUG /* messages on error and most fault paths */ #undef VERBOSE /* extra debug messages (success too) */ #include <linux/module.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/prefetch.h> #include <asm/byteorder.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/unaligned.h> #define DRIVER_DESC "PLX NET228x USB Peripheral Controller" #define DRIVER_VERSION "2005 Sept 27" #define EP_DONTUSE 13 /* nonzero */ #define USE_RDK_LEDS /* GPIO pins control three LEDs */ static const char driver_name [] = "net2280"; static const char driver_desc [] = DRIVER_DESC; static const char ep0name [] = "ep0"; static const char *const ep_name [] = { ep0name, "ep-a", "ep-b", "ep-c", "ep-d", "ep-e", "ep-f", }; /* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO) * use_dma_chaining -- dma descriptor queueing gives even more irq reduction * * The net2280 DMA engines are not tightly integrated with their FIFOs; * not all cases are (yet) handled well in this driver or the silicon. * Some gadget drivers work better with the dma support here than others. * These two parameters let you use PIO or more aggressive DMA. */ static bool use_dma = 1; static bool use_dma_chaining = 0; /* "modprobe net2280 use_dma=n" etc */ module_param (use_dma, bool, S_IRUGO); module_param (use_dma_chaining, bool, S_IRUGO); /* mode 0 == ep-{a,b,c,d} 1K fifo each * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable */ static ushort fifo_mode = 0; /* "modprobe net2280 fifo_mode=1" etc */ module_param (fifo_mode, ushort, 0644); /* enable_suspend -- When enabled, the driver will respond to * USB suspend requests by powering down the NET2280. Otherwise, * USB suspend requests will be ignored. This is acceptable for * self-powered devices */ static bool enable_suspend = 0; /* "modprobe net2280 enable_suspend=1" etc */ module_param (enable_suspend, bool, S_IRUGO); /* force full-speed operation */ static bool full_speed; module_param(full_speed, bool, 0444); MODULE_PARM_DESC(full_speed, "force full-speed mode -- for testing only!"); #define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out") #if defined(CONFIG_USB_GADGET_DEBUG_FILES) || defined (DEBUG) static char *type_string (u8 bmAttributes) { switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) { case USB_ENDPOINT_XFER_BULK: return "bulk"; case USB_ENDPOINT_XFER_ISOC: return "iso"; case USB_ENDPOINT_XFER_INT: return "intr"; }; return "control"; } #endif #include "net2280.h" #define valid_bit cpu_to_le32 (1 << VALID_BIT) #define dma_done_ie cpu_to_le32 (1 << DMA_DONE_INTERRUPT_ENABLE) /*-------------------------------------------------------------------------*/ static int net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct net2280 *dev; struct net2280_ep *ep; u32 max, tmp; unsigned long flags; ep = container_of (_ep, struct net2280_ep, ep); if (!_ep || !desc || ep->desc || _ep->name == ep0name || desc->bDescriptorType != USB_DT_ENDPOINT) return -EINVAL; dev = ep->dev; if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; /* erratum 0119 workaround ties up an endpoint number */ if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE) return -EDOM; /* sanity check ep-e/ep-f since their fifos are small */ max = usb_endpoint_maxp (desc) & 0x1fff; if (ep->num > 4 && max > 64) return -ERANGE; spin_lock_irqsave (&dev->lock, flags); _ep->maxpacket = max & 0x7ff; ep->desc = desc; /* ep_reset() has already been called */ ep->stopped = 0; ep->wedged = 0; ep->out_overflow = 0; /* set speed-dependent max packet; may kick in high bandwidth */ set_idx_reg (dev->regs, REG_EP_MAXPKT (dev, ep->num), max); /* FIFO lines can't go to different packets. PIO is ok, so * use it instead of troublesome (non-bulk) multi-packet DMA. */ if (ep->dma && (max % 4) != 0 && use_dma_chaining) { DEBUG (ep->dev, "%s, no dma for maxpacket %d\n", ep->ep.name, ep->ep.maxpacket); ep->dma = NULL; } /* set type, direction, address; reset fifo counters */ writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat); tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK); if (tmp == USB_ENDPOINT_XFER_INT) { /* erratum 0105 workaround prevents hs NYET */ if (dev->chiprev == 0100 && dev->gadget.speed == USB_SPEED_HIGH && !(desc->bEndpointAddress & USB_DIR_IN)) writel ((1 << CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp); } else if (tmp == USB_ENDPOINT_XFER_BULK) { /* catch some particularly blatant driver bugs */ if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) || (dev->gadget.speed == USB_SPEED_FULL && max > 64)) { spin_unlock_irqrestore (&dev->lock, flags); return -ERANGE; } } ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC) ? 1 : 0; tmp <<= ENDPOINT_TYPE; tmp |= desc->bEndpointAddress; tmp |= (4 << ENDPOINT_BYTE_COUNT); /* default full fifo lines */ tmp |= 1 << ENDPOINT_ENABLE; wmb (); /* for OUT transfers, block the rx fifo until a read is posted */ ep->is_in = (tmp & USB_DIR_IN) != 0; if (!ep->is_in) writel ((1 << SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp); else if (dev->pdev->device != 0x2280) { /* Added for 2282, Don't use nak packets on an in endpoint, * this was ignored on 2280 */ writel ((1 << CLEAR_NAK_OUT_PACKETS) | (1 << CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp); } writel (tmp, &ep->regs->ep_cfg); /* enable irqs */ if (!ep->dma) { /* pio, per-packet */ tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0); writel (tmp, &dev->regs->pciirqenb0); tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE); if (dev->pdev->device == 0x2280) tmp |= readl (&ep->regs->ep_irqenb); writel (tmp, &ep->regs->ep_irqenb); } else { /* dma, per-request */ tmp = (1 << (8 + ep->num)); /* completion */ tmp |= readl (&dev->regs->pciirqenb1); writel (tmp, &dev->regs->pciirqenb1); /* for short OUT transfers, dma completions can't * advance the queue; do it pio-style, by hand. * NOTE erratum 0112 workaround #2 */ if ((desc->bEndpointAddress & USB_DIR_IN) == 0) { tmp = (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE); writel (tmp, &ep->regs->ep_irqenb); tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0); writel (tmp, &dev->regs->pciirqenb0); } } tmp = desc->bEndpointAddress; DEBUG (dev, "enabled %s (ep%d%s-%s) %s max %04x\n", _ep->name, tmp & 0x0f, DIR_STRING (tmp), type_string (desc->bmAttributes), ep->dma ? "dma" : "pio", max); /* pci writes may still be posted */ spin_unlock_irqrestore (&dev->lock, flags); return 0; } static int handshake (u32 __iomem *ptr, u32 mask, u32 done, int usec) { u32 result; do { result = readl (ptr); if (result == ~(u32)0) /* "device unplugged" */ return -ENODEV; result &= mask; if (result == done) return 0; udelay (1); usec--; } while (usec > 0); return -ETIMEDOUT; } static const struct usb_ep_ops net2280_ep_ops; static void ep_reset (struct net2280_regs __iomem *regs, struct net2280_ep *ep) { u32 tmp; ep->desc = NULL; INIT_LIST_HEAD (&ep->queue); ep->ep.maxpacket = ~0; ep->ep.ops = &net2280_ep_ops; /* disable the dma, irqs, endpoint... */ if (ep->dma) { writel (0, &ep->dma->dmactl); writel ( (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT) | (1 << DMA_TRANSACTION_DONE_INTERRUPT) | (1 << DMA_ABORT) , &ep->dma->dmastat); tmp = readl (&regs->pciirqenb0); tmp &= ~(1 << ep->num); writel (tmp, &regs->pciirqenb0); } else { tmp = readl (&regs->pciirqenb1); tmp &= ~(1 << (8 + ep->num)); /* completion */ writel (tmp, &regs->pciirqenb1); } writel (0, &ep->regs->ep_irqenb); /* init to our chosen defaults, notably so that we NAK OUT * packets until the driver queues a read (+note erratum 0112) */ if (!ep->is_in || ep->dev->pdev->device == 0x2280) { tmp = (1 << SET_NAK_OUT_PACKETS_MODE) | (1 << SET_NAK_OUT_PACKETS) | (1 << CLEAR_EP_HIDE_STATUS_PHASE) | (1 << CLEAR_INTERRUPT_MODE); } else { /* added for 2282 */ tmp = (1 << CLEAR_NAK_OUT_PACKETS_MODE) | (1 << CLEAR_NAK_OUT_PACKETS) | (1 << CLEAR_EP_HIDE_STATUS_PHASE) | (1 << CLEAR_INTERRUPT_MODE); } if (ep->num != 0) { tmp |= (1 << CLEAR_ENDPOINT_TOGGLE) | (1 << CLEAR_ENDPOINT_HALT); } writel (tmp, &ep->regs->ep_rsp); /* scrub most status bits, and flush any fifo state */ if (ep->dev->pdev->device == 0x2280) tmp = (1 << FIFO_OVERFLOW) | (1 << FIFO_UNDERFLOW); else tmp = 0; writel (tmp | (1 << TIMEOUT) | (1 << USB_STALL_SENT) | (1 << USB_IN_NAK_SENT) | (1 << USB_IN_ACK_RCVD) | (1 << USB_OUT_PING_NAK_SENT) | (1 << USB_OUT_ACK_SENT) | (1 << FIFO_FLUSH) | (1 << SHORT_PACKET_OUT_DONE_INTERRUPT) | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) | (1 << DATA_PACKET_RECEIVED_INTERRUPT) | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) | (1 << DATA_OUT_PING_TOKEN_INTERRUPT) | (1 << DATA_IN_TOKEN_INTERRUPT) , &ep->regs->ep_stat); /* fifo size is handled separately */ } static void nuke (struct net2280_ep *); static int net2280_disable (struct usb_ep *_ep) { struct net2280_ep *ep; unsigned long flags; ep = container_of (_ep, struct net2280_ep, ep); if (!_ep || !ep->desc || _ep->name == ep0name) return -EINVAL; spin_lock_irqsave (&ep->dev->lock, flags); nuke (ep); ep_reset (ep->dev->regs, ep); VDEBUG (ep->dev, "disabled %s %s\n", ep->dma ? "dma" : "pio", _ep->name); /* synch memory views with the device */ (void) readl (&ep->regs->ep_cfg); if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4) ep->dma = &ep->dev->dma [ep->num - 1]; spin_unlock_irqrestore (&ep->dev->lock, flags); return 0; } /*-------------------------------------------------------------------------*/ static struct usb_request * net2280_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags) { struct net2280_ep *ep; struct net2280_request *req; if (!_ep) return NULL; ep = container_of (_ep, struct net2280_ep, ep); req = kzalloc(sizeof(*req), gfp_flags); if (!req) return NULL; INIT_LIST_HEAD (&req->queue); /* this dma descriptor may be swapped with the previous dummy */ if (ep->dma) { struct net2280_dma *td; td = pci_pool_alloc (ep->dev->requests, gfp_flags, &req->td_dma); if (!td) { kfree (req); return NULL; } td->dmacount = 0; /* not VALID */ td->dmadesc = td->dmaaddr; req->td = td; } return &req->req; } static void net2280_free_request (struct usb_ep *_ep, struct usb_request *_req) { struct net2280_ep *ep; struct net2280_request *req; ep = container_of (_ep, struct net2280_ep, ep); if (!_ep || !_req) return; req = container_of (_req, struct net2280_request, req); WARN_ON (!list_empty (&req->queue)); if (req->td) pci_pool_free (ep->dev->requests, req->td, req->td_dma); kfree (req); } /*-------------------------------------------------------------------------*/ /* load a packet into the fifo we use for usb IN transfers. * works for all endpoints. * * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo * at a time, but this code is simpler because it knows it only writes * one packet. ep-a..ep-d should use dma instead. */ static void write_fifo (struct net2280_ep *ep, struct usb_request *req) { struct net2280_ep_regs __iomem *regs = ep->regs; u8 *buf; u32 tmp; unsigned count, total; /* INVARIANT: fifo is currently empty. (testable) */ if (req) { buf = req->buf + req->actual; prefetch (buf); total = req->length - req->actual; } else { total = 0; buf = NULL; } /* write just one packet at a time */ count = ep->ep.maxpacket; if (count > total) /* min() cannot be used on a bitfield */ count = total; VDEBUG (ep->dev, "write %s fifo (IN) %d bytes%s req %p\n", ep->ep.name, count, (count != ep->ep.maxpacket) ? " (short)" : "", req); while (count >= 4) { /* NOTE be careful if you try to align these. fifo lines * should normally be full (4 bytes) and successive partial * lines are ok only in certain cases. */ tmp = get_unaligned ((u32 *)buf); cpu_to_le32s (&tmp); writel (tmp, &regs->ep_data); buf += 4; count -= 4; } /* last fifo entry is "short" unless we wrote a full packet. * also explicitly validate last word in (periodic) transfers * when maxpacket is not a multiple of 4 bytes. */ if (count || total < ep->ep.maxpacket) { tmp = count ? get_unaligned ((u32 *)buf) : count; cpu_to_le32s (&tmp); set_fifo_bytecount (ep, count & 0x03); writel (tmp, &regs->ep_data); } /* pci writes may still be posted */ } /* work around erratum 0106: PCI and USB race over the OUT fifo. * caller guarantees chiprev 0100, out endpoint is NAKing, and * there's no real data in the fifo. * * NOTE: also used in cases where that erratum doesn't apply: * where the host wrote "too much" data to us. */ static void out_flush (struct net2280_ep *ep) { u32 __iomem *statp; u32 tmp; ASSERT_OUT_NAKING (ep); statp = &ep->regs->ep_stat; writel ( (1 << DATA_OUT_PING_TOKEN_INTERRUPT) | (1 << DATA_PACKET_RECEIVED_INTERRUPT) , statp); writel ((1 << FIFO_FLUSH), statp); mb (); tmp = readl (statp); if (tmp & (1 << DATA_OUT_PING_TOKEN_INTERRUPT) /* high speed did bulk NYET; fifo isn't filling */ && ep->dev->gadget.speed == USB_SPEED_FULL) { unsigned usec; usec = 50; /* 64 byte bulk/interrupt */ handshake (statp, (1 << USB_OUT_PING_NAK_SENT), (1 << USB_OUT_PING_NAK_SENT), usec); /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */ } } /* unload packet(s) from the fifo we use for usb OUT transfers. * returns true iff the request completed, because of short packet * or the request buffer having filled with full packets. * * for ep-a..ep-d this will read multiple packets out when they * have been accepted. */ static int read_fifo (struct net2280_ep *ep, struct net2280_request *req) { struct net2280_ep_regs __iomem *regs = ep->regs; u8 *buf = req->req.buf + req->req.actual; unsigned count, tmp, is_short; unsigned cleanup = 0, prevent = 0; /* erratum 0106 ... packets coming in during fifo reads might * be incompletely rejected. not all cases have workarounds. */ if (ep->dev->chiprev == 0x0100 && ep->dev->gadget.speed == USB_SPEED_FULL) { udelay (1); tmp = readl (&ep->regs->ep_stat); if ((tmp & (1 << NAK_OUT_PACKETS))) cleanup = 1; else if ((tmp & (1 << FIFO_FULL))) { start_out_naking (ep); prevent = 1; } /* else: hope we don't see the problem */ } /* never overflow the rx buffer. the fifo reads packets until * it sees a short one; we might not be ready for them all. */ prefetchw (buf); count = readl (&regs->ep_avail); if (unlikely (count == 0)) { udelay (1); tmp = readl (&ep->regs->ep_stat); count = readl (&regs->ep_avail); /* handled that data already? */ if (count == 0 && (tmp & (1 << NAK_OUT_PACKETS)) == 0) return 0; } tmp = req->req.length - req->req.actual; if (count > tmp) { /* as with DMA, data overflow gets flushed */ if ((tmp % ep->ep.maxpacket) != 0) { ERROR (ep->dev, "%s out fifo %d bytes, expected %d\n", ep->ep.name, count, tmp); req->req.status = -EOVERFLOW; cleanup = 1; /* NAK_OUT_PACKETS will be set, so flushing is safe; * the next read will start with the next packet */ } /* else it's a ZLP, no worries */ count = tmp; } req->req.actual += count; is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0); VDEBUG (ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n", ep->ep.name, count, is_short ? " (short)" : "", cleanup ? " flush" : "", prevent ? " nak" : "", req, req->req.actual, req->req.length); while (count >= 4) { tmp = readl (&regs->ep_data); cpu_to_le32s (&tmp); put_unaligned (tmp, (u32 *)buf); buf += 4; count -= 4; } if (count) { tmp = readl (&regs->ep_data); /* LE conversion is implicit here: */ do { *buf++ = (u8) tmp; tmp >>= 8; } while (--count); } if (cleanup) out_flush (ep); if (prevent) { writel ((1 << CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp); (void) readl (&ep->regs->ep_rsp); } return is_short || ((req->req.actual == req->req.length) && !req->req.zero); } /* fill out dma descriptor to match a given request */ static void fill_dma_desc (struct net2280_ep *ep, struct net2280_request *req, int valid) { struct net2280_dma *td = req->td; u32 dmacount = req->req.length; /* don't let DMA continue after a short OUT packet, * so overruns can't affect the next transfer. * in case of overruns on max-size packets, we can't * stop the fifo from filling but we can flush it. */ if (ep->is_in) dmacount |= (1 << DMA_DIRECTION); if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) || ep->dev->pdev->device != 0x2280) dmacount |= (1 << END_OF_CHAIN); req->valid = valid; if (valid) dmacount |= (1 << VALID_BIT); if (likely(!req->req.no_interrupt || !use_dma_chaining)) dmacount |= (1 << DMA_DONE_INTERRUPT_ENABLE); /* td->dmadesc = previously set by caller */ td->dmaaddr = cpu_to_le32 (req->req.dma); /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */ wmb (); td->dmacount = cpu_to_le32(dmacount); } static const u32 dmactl_default = (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT) | (1 << DMA_CLEAR_COUNT_ENABLE) /* erratum 0116 workaround part 1 (use POLLING) */ | (POLL_100_USEC << DESCRIPTOR_POLLING_RATE) | (1 << DMA_VALID_BIT_POLLING_ENABLE) | (1 << DMA_VALID_BIT_ENABLE) | (1 << DMA_SCATTER_GATHER_ENABLE) /* erratum 0116 workaround part 2 (no AUTOSTART) */ | (1 << DMA_ENABLE); static inline void spin_stop_dma (struct net2280_dma_regs __iomem *dma) { handshake (&dma->dmactl, (1 << DMA_ENABLE), 0, 50); } static inline void stop_dma (struct net2280_dma_regs __iomem *dma) { writel (readl (&dma->dmactl) & ~(1 << DMA_ENABLE), &dma->dmactl); spin_stop_dma (dma); } static void start_queue (struct net2280_ep *ep, u32 dmactl, u32 td_dma) { struct net2280_dma_regs __iomem *dma = ep->dma; unsigned int tmp = (1 << VALID_BIT) | (ep->is_in << DMA_DIRECTION); if (ep->dev->pdev->device != 0x2280) tmp |= (1 << END_OF_CHAIN); writel (tmp, &dma->dmacount); writel (readl (&dma->dmastat), &dma->dmastat); writel (td_dma, &dma->dmadesc); writel (dmactl, &dma->dmactl); /* erratum 0116 workaround part 3: pci arbiter away from net2280 */ (void) readl (&ep->dev->pci->pcimstctl); writel ((1 << DMA_START), &dma->dmastat); if (!ep->is_in) stop_out_naking (ep); } static void start_dma (struct net2280_ep *ep, struct net2280_request *req) { u32 tmp; struct net2280_dma_regs __iomem *dma = ep->dma; /* FIXME can't use DMA for ZLPs */ /* on this path we "know" there's no dma active (yet) */ WARN_ON (readl (&dma->dmactl) & (1 << DMA_ENABLE)); writel (0, &ep->dma->dmactl); /* previous OUT packet might have been short */ if (!ep->is_in && ((tmp = readl (&ep->regs->ep_stat)) & (1 << NAK_OUT_PACKETS)) != 0) { writel ((1 << SHORT_PACKET_TRANSFERRED_INTERRUPT), &ep->regs->ep_stat); tmp = readl (&ep->regs->ep_avail); if (tmp) { writel (readl (&dma->dmastat), &dma->dmastat); /* transfer all/some fifo data */ writel (req->req.dma, &dma->dmaaddr); tmp = min (tmp, req->req.length); /* dma irq, faking scatterlist status */ req->td->dmacount = cpu_to_le32 (req->req.length - tmp); writel ((1 << DMA_DONE_INTERRUPT_ENABLE) | tmp, &dma->dmacount); req->td->dmadesc = 0; req->valid = 1; writel ((1 << DMA_ENABLE), &dma->dmactl); writel ((1 << DMA_START), &dma->dmastat); return; } } tmp = dmactl_default; /* force packet boundaries between dma requests, but prevent the * controller from automagically writing a last "short" packet * (zero length) unless the driver explicitly said to do that. */ if (ep->is_in) { if (likely ((req->req.length % ep->ep.maxpacket) != 0 || req->req.zero)) { tmp |= (1 << DMA_FIFO_VALIDATE); ep->in_fifo_validate = 1; } else ep->in_fifo_validate = 0; } /* init req->td, pointing to the current dummy */ req->td->dmadesc = cpu_to_le32 (ep->td_dma); fill_dma_desc (ep, req, 1); if (!use_dma_chaining) req->td->dmacount |= cpu_to_le32 (1 << END_OF_CHAIN); start_queue (ep, tmp, req->td_dma); } static inline void queue_dma (struct net2280_ep *ep, struct net2280_request *req, int valid) { struct net2280_dma *end; dma_addr_t tmp; /* swap new dummy for old, link; fill and maybe activate */ end = ep->dummy; ep->dummy = req->td; req->td = end; tmp = ep->td_dma; ep->td_dma = req->td_dma; req->td_dma = tmp; end->dmadesc = cpu_to_le32 (ep->td_dma); fill_dma_desc (ep, req, valid); } static void done (struct net2280_ep *ep, struct net2280_request *req, int status) { struct net2280 *dev; unsigned stopped = ep->stopped; list_del_init (&req->queue); if (req->req.status == -EINPROGRESS) req->req.status = status; else status = req->req.status; dev = ep->dev; if (ep->dma) usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in); if (status && status != -ESHUTDOWN) VDEBUG (dev, "complete %s req %p stat %d len %u/%u\n", ep->ep.name, &req->req, status, req->req.actual, req->req.length); /* don't modify queue heads during completion callback */ ep->stopped = 1; spin_unlock (&dev->lock); req->req.complete (&ep->ep, &req->req); spin_lock (&dev->lock); ep->stopped = stopped; } /*-------------------------------------------------------------------------*/ static int net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct net2280_request *req; struct net2280_ep *ep; struct net2280 *dev; unsigned long flags; /* we always require a cpu-view buffer, so that we can * always use pio (as fallback or whatever). */ req = container_of (_req, struct net2280_request, req); if (!_req || !_req->complete || !_req->buf || !list_empty (&req->queue)) return -EINVAL; if (_req->length > (~0 & DMA_BYTE_COUNT_MASK)) return -EDOM; ep = container_of (_ep, struct net2280_ep, ep); if (!_ep || (!ep->desc && ep->num != 0)) return -EINVAL; dev = ep->dev; if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; /* FIXME implement PIO fallback for ZLPs with DMA */ if (ep->dma && _req->length == 0) return -EOPNOTSUPP; /* set up dma mapping in case the caller didn't */ if (ep->dma) { int ret; ret = usb_gadget_map_request(&dev->gadget, _req, ep->is_in); if (ret) return ret; } #if 0 VDEBUG (dev, "%s queue req %p, len %d buf %p\n", _ep->name, _req, _req->length, _req->buf); #endif spin_lock_irqsave (&dev->lock, flags); _req->status = -EINPROGRESS; _req->actual = 0; /* kickstart this i/o queue? */ if (list_empty (&ep->queue) && !ep->stopped) { /* use DMA if the endpoint supports it, else pio */ if (ep->dma) start_dma (ep, req); else { /* maybe there's no control data, just status ack */ if (ep->num == 0 && _req->length == 0) { allow_status (ep); done (ep, req, 0); VDEBUG (dev, "%s status ack\n", ep->ep.name); goto done; } /* PIO ... stuff the fifo, or unblock it. */ if (ep->is_in) write_fifo (ep, _req); else if (list_empty (&ep->queue)) { u32 s; /* OUT FIFO might have packet(s) buffered */ s = readl (&ep->regs->ep_stat); if ((s & (1 << FIFO_EMPTY)) == 0) { /* note: _req->short_not_ok is * ignored here since PIO _always_ * stops queue advance here, and * _req->status doesn't change for * short reads (only _req->actual) */ if (read_fifo (ep, req)) { done (ep, req, 0); if (ep->num == 0) allow_status (ep); /* don't queue it */ req = NULL; } else s = readl (&ep->regs->ep_stat); } /* don't NAK, let the fifo fill */ if (req && (s & (1 << NAK_OUT_PACKETS))) writel ((1 << CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp); } } } else if (ep->dma) { int valid = 1; if (ep->is_in) { int expect; /* preventing magic zlps is per-engine state, not * per-transfer; irq logic must recover hiccups. */ expect = likely (req->req.zero || (req->req.length % ep->ep.maxpacket) != 0); if (expect != ep->in_fifo_validate) valid = 0; } queue_dma (ep, req, valid); } /* else the irq handler advances the queue. */ ep->responded = 1; if (req) list_add_tail (&req->queue, &ep->queue); done: spin_unlock_irqrestore (&dev->lock, flags); /* pci writes may still be posted */ return 0; } static inline void dma_done ( struct net2280_ep *ep, struct net2280_request *req, u32 dmacount, int status ) { req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount); done (ep, req, status); } static void restart_dma (struct net2280_ep *ep); static void scan_dma_completions (struct net2280_ep *ep) { /* only look at descriptors that were "naturally" retired, * so fifo and list head state won't matter */ while (!list_empty (&ep->queue)) { struct net2280_request *req; u32 tmp; req = list_entry (ep->queue.next, struct net2280_request, queue); if (!req->valid) break; rmb (); tmp = le32_to_cpup (&req->td->dmacount); if ((tmp & (1 << VALID_BIT)) != 0) break; /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short" * cases where DMA must be aborted; this code handles * all non-abort DMA completions. */ if (unlikely (req->td->dmadesc == 0)) { /* paranoia */ tmp = readl (&ep->dma->dmacount); if (tmp & DMA_BYTE_COUNT_MASK) break; /* single transfer mode */ dma_done (ep, req, tmp, 0); break; } else if (!ep->is_in && (req->req.length % ep->ep.maxpacket) != 0) { tmp = readl (&ep->regs->ep_stat); /* AVOID TROUBLE HERE by not issuing short reads from * your gadget driver. That helps avoids errata 0121, * 0122, and 0124; not all cases trigger the warning. */ if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) { WARNING (ep->dev, "%s lost packet sync!\n", ep->ep.name); req->req.status = -EOVERFLOW; } else if ((tmp = readl (&ep->regs->ep_avail)) != 0) { /* fifo gets flushed later */ ep->out_overflow = 1; DEBUG (ep->dev, "%s dma, discard %d len %d\n", ep->ep.name, tmp, req->req.length); req->req.status = -EOVERFLOW; } } dma_done (ep, req, tmp, 0); } } static void restart_dma (struct net2280_ep *ep) { struct net2280_request *req; u32 dmactl = dmactl_default; if (ep->stopped) return; req = list_entry (ep->queue.next, struct net2280_request, queue); if (!use_dma_chaining) { start_dma (ep, req); return; } /* the 2280 will be processing the queue unless queue hiccups after * the previous transfer: * IN: wanted automagic zlp, head doesn't (or vice versa) * DMA_FIFO_VALIDATE doesn't init from dma descriptors. * OUT: was "usb-short", we must restart. */ if (ep->is_in && !req->valid) { struct net2280_request *entry, *prev = NULL; int reqmode, done = 0; DEBUG (ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td); ep->in_fifo_validate = likely (req->req.zero || (req->req.length % ep->ep.maxpacket) != 0); if (ep->in_fifo_validate) dmactl |= (1 << DMA_FIFO_VALIDATE); list_for_each_entry (entry, &ep->queue, queue) { __le32 dmacount; if (entry == req) continue; dmacount = entry->td->dmacount; if (!done) { reqmode = likely (entry->req.zero || (entry->req.length % ep->ep.maxpacket) != 0); if (reqmode == ep->in_fifo_validate) { entry->valid = 1; dmacount |= valid_bit; entry->td->dmacount = dmacount; prev = entry; continue; } else { /* force a hiccup */ prev->td->dmacount |= dma_done_ie; done = 1; } } /* walk the rest of the queue so unlinks behave */ entry->valid = 0; dmacount &= ~valid_bit; entry->td->dmacount = dmacount; prev = entry; } } writel (0, &ep->dma->dmactl); start_queue (ep, dmactl, req->td_dma); } static void abort_dma (struct net2280_ep *ep) { /* abort the current transfer */ if (likely (!list_empty (&ep->queue))) { /* FIXME work around errata 0121, 0122, 0124 */ writel ((1 << DMA_ABORT), &ep->dma->dmastat); spin_stop_dma (ep->dma); } else stop_dma (ep->dma); scan_dma_completions (ep); } /* dequeue ALL requests */ static void nuke (struct net2280_ep *ep) { struct net2280_request *req; /* called with spinlock held */ ep->stopped = 1; if (ep->dma) abort_dma (ep); while (!list_empty (&ep->queue)) { req = list_entry (ep->queue.next, struct net2280_request, queue); done (ep, req, -ESHUTDOWN); } } /* dequeue JUST ONE request */ static int net2280_dequeue (struct usb_ep *_ep, struct usb_request *_req) { struct net2280_ep *ep; struct net2280_request *req; unsigned long flags; u32 dmactl; int stopped; ep = container_of (_ep, struct net2280_ep, ep); if (!_ep || (!ep->desc && ep->num != 0) || !_req) return -EINVAL; spin_lock_irqsave (&ep->dev->lock, flags); stopped = ep->stopped; /* quiesce dma while we patch the queue */ dmactl = 0; ep->stopped = 1; if (ep->dma) { dmactl = readl (&ep->dma->dmactl); /* WARNING erratum 0127 may kick in ... */ stop_dma (ep->dma); scan_dma_completions (ep); } /* make sure it's still queued on this endpoint */ list_for_each_entry (req, &ep->queue, queue) { if (&req->req == _req) break; } if (&req->req != _req) { spin_unlock_irqrestore (&ep->dev->lock, flags); return -EINVAL; } /* queue head may be partially complete. */ if (ep->queue.next == &req->queue) { if (ep->dma) { DEBUG (ep->dev, "unlink (%s) dma\n", _ep->name); _req->status = -ECONNRESET; abort_dma (ep); if (likely (ep->queue.next == &req->queue)) { // NOTE: misreports single-transfer mode req->td->dmacount = 0; /* invalidate */ dma_done (ep, req, readl (&ep->dma->dmacount), -ECONNRESET); } } else { DEBUG (ep->dev, "unlink (%s) pio\n", _ep->name); done (ep, req, -ECONNRESET); } req = NULL; /* patch up hardware chaining data */ } else if (ep->dma && use_dma_chaining) { if (req->queue.prev == ep->queue.next) { writel (le32_to_cpu (req->td->dmadesc), &ep->dma->dmadesc); if (req->td->dmacount & dma_done_ie) writel (readl (&ep->dma->dmacount) | le32_to_cpu(dma_done_ie), &ep->dma->dmacount); } else { struct net2280_request *prev; prev = list_entry (req->queue.prev, struct net2280_request, queue); prev->td->dmadesc = req->td->dmadesc; if (req->td->dmacount & dma_done_ie) prev->td->dmacount |= dma_done_ie; } } if (req) done (ep, req, -ECONNRESET); ep->stopped = stopped; if (ep->dma) { /* turn off dma on inactive queues */ if (list_empty (&ep->queue)) stop_dma (ep->dma); else if (!ep->stopped) { /* resume current request, or start new one */ if (req) writel (dmactl, &ep->dma->dmactl); else start_dma (ep, list_entry (ep->queue.next, struct net2280_request, queue)); } } spin_unlock_irqrestore (&ep->dev->lock, flags); return 0; } /*-------------------------------------------------------------------------*/ static int net2280_fifo_status (struct usb_ep *_ep); static int net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged) { struct net2280_ep *ep; unsigned long flags; int retval = 0; ep = container_of (_ep, struct net2280_ep, ep); if (!_ep || (!ep->desc && ep->num != 0)) return -EINVAL; if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03) == USB_ENDPOINT_XFER_ISOC) return -EINVAL; spin_lock_irqsave (&ep->dev->lock, flags); if (!list_empty (&ep->queue)) retval = -EAGAIN; else if (ep->is_in && value && net2280_fifo_status (_ep) != 0) retval = -EAGAIN; else { VDEBUG (ep->dev, "%s %s %s\n", _ep->name, value ? "set" : "clear", wedged ? "wedge" : "halt"); /* set/clear, then synch memory views with the device */ if (value) { if (ep->num == 0) ep->dev->protocol_stall = 1; else set_halt (ep); if (wedged) ep->wedged = 1; } else { clear_halt (ep); ep->wedged = 0; } (void) readl (&ep->regs->ep_rsp); } spin_unlock_irqrestore (&ep->dev->lock, flags); return retval; } static int net2280_set_halt(struct usb_ep *_ep, int value) { return net2280_set_halt_and_wedge(_ep, value, 0); } static int net2280_set_wedge(struct usb_ep *_ep) { if (!_ep || _ep->name == ep0name) return -EINVAL; return net2280_set_halt_and_wedge(_ep, 1, 1); } static int net2280_fifo_status (struct usb_ep *_ep) { struct net2280_ep *ep; u32 avail; ep = container_of (_ep, struct net2280_ep, ep); if (!_ep || (!ep->desc && ep->num != 0)) return -ENODEV; if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; avail = readl (&ep->regs->ep_avail) & ((1 << 12) - 1); if (avail > ep->fifo_size) return -EOVERFLOW; if (ep->is_in) avail = ep->fifo_size - avail; return avail; } static void net2280_fifo_flush (struct usb_ep *_ep) { struct net2280_ep *ep; ep = container_of (_ep, struct net2280_ep, ep); if (!_ep || (!ep->desc && ep->num != 0)) return; if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) return; writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat); (void) readl (&ep->regs->ep_rsp); } static const struct usb_ep_ops net2280_ep_ops = { .enable = net2280_enable, .disable = net2280_disable, .alloc_request = net2280_alloc_request, .free_request = net2280_free_request, .queue = net2280_queue, .dequeue = net2280_dequeue, .set_halt = net2280_set_halt, .set_wedge = net2280_set_wedge, .fifo_status = net2280_fifo_status, .fifo_flush = net2280_fifo_flush, }; /*-------------------------------------------------------------------------*/ static int net2280_get_frame (struct usb_gadget *_gadget) { struct net2280 *dev; unsigned long flags; u16 retval; if (!_gadget) return -ENODEV; dev = container_of (_gadget, struct net2280, gadget); spin_lock_irqsave (&dev->lock, flags); retval = get_idx_reg (dev->regs, REG_FRAME) & 0x03ff; spin_unlock_irqrestore (&dev->lock, flags); return retval; } static int net2280_wakeup (struct usb_gadget *_gadget) { struct net2280 *dev; u32 tmp; unsigned long flags; if (!_gadget) return 0; dev = container_of (_gadget, struct net2280, gadget); spin_lock_irqsave (&dev->lock, flags); tmp = readl (&dev->usb->usbctl); if (tmp & (1 << DEVICE_REMOTE_WAKEUP_ENABLE)) writel (1 << GENERATE_RESUME, &dev->usb->usbstat); spin_unlock_irqrestore (&dev->lock, flags); /* pci writes may still be posted */ return 0; } static int net2280_set_selfpowered (struct usb_gadget *_gadget, int value) { struct net2280 *dev; u32 tmp; unsigned long flags; if (!_gadget) return 0; dev = container_of (_gadget, struct net2280, gadget); spin_lock_irqsave (&dev->lock, flags); tmp = readl (&dev->usb->usbctl); if (value) tmp |= (1 << SELF_POWERED_STATUS); else tmp &= ~(1 << SELF_POWERED_STATUS); writel (tmp, &dev->usb->usbctl); spin_unlock_irqrestore (&dev->lock, flags); return 0; } static int net2280_pullup(struct usb_gadget *_gadget, int is_on) { struct net2280 *dev; u32 tmp; unsigned long flags; if (!_gadget) return -ENODEV; dev = container_of (_gadget, struct net2280, gadget); spin_lock_irqsave (&dev->lock, flags); tmp = readl (&dev->usb->usbctl); dev->softconnect = (is_on != 0); if (is_on) tmp |= (1 << USB_DETECT_ENABLE); else tmp &= ~(1 << USB_DETECT_ENABLE); writel (tmp, &dev->usb->usbctl); spin_unlock_irqrestore (&dev->lock, flags); return 0; } static int net2280_start(struct usb_gadget *_gadget, struct usb_gadget_driver *driver); static int net2280_stop(struct usb_gadget *_gadget, struct usb_gadget_driver *driver); static const struct usb_gadget_ops net2280_ops = { .get_frame = net2280_get_frame, .wakeup = net2280_wakeup, .set_selfpowered = net2280_set_selfpowered, .pullup = net2280_pullup, .udc_start = net2280_start, .udc_stop = net2280_stop, }; /*-------------------------------------------------------------------------*/ #ifdef CONFIG_USB_GADGET_DEBUG_FILES /* FIXME move these into procfs, and use seq_file. * Sysfs _still_ doesn't behave for arbitrarily sized files, * and also doesn't help products using this with 2.4 kernels. */ /* "function" sysfs attribute */ static ssize_t show_function (struct device *_dev, struct device_attribute *attr, char *buf) { struct net2280 *dev = dev_get_drvdata (_dev); if (!dev->driver || !dev->driver->function || strlen (dev->driver->function) > PAGE_SIZE) return 0; return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function); } static DEVICE_ATTR (function, S_IRUGO, show_function, NULL); static ssize_t net2280_show_registers(struct device *_dev, struct device_attribute *attr, char *buf) { struct net2280 *dev; char *next; unsigned size, t; unsigned long flags; int i; u32 t1, t2; const char *s; dev = dev_get_drvdata (_dev); next = buf; size = PAGE_SIZE; spin_lock_irqsave (&dev->lock, flags); if (dev->driver) s = dev->driver->driver.name; else s = "(none)"; /* Main Control Registers */ t = scnprintf (next, size, "%s version " DRIVER_VERSION ", chiprev %04x, dma %s\n\n" "devinit %03x fifoctl %08x gadget '%s'\n" "pci irqenb0 %02x irqenb1 %08x " "irqstat0 %04x irqstat1 %08x\n", driver_name, dev->chiprev, use_dma ? (use_dma_chaining ? "chaining" : "enabled") : "disabled", readl (&dev->regs->devinit), readl (&dev->regs->fifoctl), s, readl (&dev->regs->pciirqenb0), readl (&dev->regs->pciirqenb1), readl (&dev->regs->irqstat0), readl (&dev->regs->irqstat1)); size -= t; next += t; /* USB Control Registers */ t1 = readl (&dev->usb->usbctl); t2 = readl (&dev->usb->usbstat); if (t1 & (1 << VBUS_PIN)) { if (t2 & (1 << HIGH_SPEED)) s = "high speed"; else if (dev->gadget.speed == USB_SPEED_UNKNOWN) s = "powered"; else s = "full speed"; /* full speed bit (6) not working?? */ } else s = "not attached"; t = scnprintf (next, size, "stdrsp %08x usbctl %08x usbstat %08x " "addr 0x%02x (%s)\n", readl (&dev->usb->stdrsp), t1, t2, readl (&dev->usb->ouraddr), s); size -= t; next += t; /* PCI Master Control Registers */ /* DMA Control Registers */ /* Configurable EP Control Registers */ for (i = 0; i < 7; i++) { struct net2280_ep *ep; ep = &dev->ep [i]; if (i && !ep->desc) continue; t1 = readl (&ep->regs->ep_cfg); t2 = readl (&ep->regs->ep_rsp) & 0xff; t = scnprintf (next, size, "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s" "irqenb %02x\n", ep->ep.name, t1, t2, (t2 & (1 << CLEAR_NAK_OUT_PACKETS)) ? "NAK " : "", (t2 & (1 << CLEAR_EP_HIDE_STATUS_PHASE)) ? "hide " : "", (t2 & (1 << CLEAR_EP_FORCE_CRC_ERROR)) ? "CRC " : "", (t2 & (1 << CLEAR_INTERRUPT_MODE)) ? "interrupt " : "", (t2 & (1<<CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "", (t2 & (1 << CLEAR_NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "", (t2 & (1 << CLEAR_ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ", (t2 & (1 << CLEAR_ENDPOINT_HALT)) ? "HALT " : "", readl (&ep->regs->ep_irqenb)); size -= t; next += t; t = scnprintf (next, size, "\tstat %08x avail %04x " "(ep%d%s-%s)%s\n", readl (&ep->regs->ep_stat), readl (&ep->regs->ep_avail), t1 & 0x0f, DIR_STRING (t1), type_string (t1 >> 8), ep->stopped ? "*" : ""); size -= t; next += t; if (!ep->dma) continue; t = scnprintf (next, size, " dma\tctl %08x stat %08x count %08x\n" "\taddr %08x desc %08x\n", readl (&ep->dma->dmactl), readl (&ep->dma->dmastat), readl (&ep->dma->dmacount), readl (&ep->dma->dmaaddr), readl (&ep->dma->dmadesc)); size -= t; next += t; } /* Indexed Registers */ // none yet /* Statistics */ t = scnprintf (next, size, "\nirqs: "); size -= t; next += t; for (i = 0; i < 7; i++) { struct net2280_ep *ep; ep = &dev->ep [i]; if (i && !ep->irqs) continue; t = scnprintf (next, size, " %s/%lu", ep->ep.name, ep->irqs); size -= t; next += t; } t = scnprintf (next, size, "\n"); size -= t; next += t; spin_unlock_irqrestore (&dev->lock, flags); return PAGE_SIZE - size; } static DEVICE_ATTR(registers, S_IRUGO, net2280_show_registers, NULL); static ssize_t show_queues (struct device *_dev, struct device_attribute *attr, char *buf) { struct net2280 *dev; char *next; unsigned size; unsigned long flags; int i; dev = dev_get_drvdata (_dev); next = buf; size = PAGE_SIZE; spin_lock_irqsave (&dev->lock, flags); for (i = 0; i < 7; i++) { struct net2280_ep *ep = &dev->ep [i]; struct net2280_request *req; int t; if (i != 0) { const struct usb_endpoint_descriptor *d; d = ep->desc; if (!d) continue; t = d->bEndpointAddress; t = scnprintf (next, size, "\n%s (ep%d%s-%s) max %04x %s fifo %d\n", ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK, (t & USB_DIR_IN) ? "in" : "out", ({ char *val; switch (d->bmAttributes & 0x03) { case USB_ENDPOINT_XFER_BULK: val = "bulk"; break; case USB_ENDPOINT_XFER_INT: val = "intr"; break; default: val = "iso"; break; }; val; }), usb_endpoint_maxp (d) & 0x1fff, ep->dma ? "dma" : "pio", ep->fifo_size ); } else /* ep0 should only have one transfer queued */ t = scnprintf (next, size, "ep0 max 64 pio %s\n", ep->is_in ? "in" : "out"); if (t <= 0 || t > size) goto done; size -= t; next += t; if (list_empty (&ep->queue)) { t = scnprintf (next, size, "\t(nothing queued)\n"); if (t <= 0 || t > size) goto done; size -= t; next += t; continue; } list_for_each_entry (req, &ep->queue, queue) { if (ep->dma && req->td_dma == readl (&ep->dma->dmadesc)) t = scnprintf (next, size, "\treq %p len %d/%d " "buf %p (dmacount %08x)\n", &req->req, req->req.actual, req->req.length, req->req.buf, readl (&ep->dma->dmacount)); else t = scnprintf (next, size, "\treq %p len %d/%d buf %p\n", &req->req, req->req.actual, req->req.length, req->req.buf); if (t <= 0 || t > size) goto done; size -= t; next += t; if (ep->dma) { struct net2280_dma *td; td = req->td; t = scnprintf (next, size, "\t td %08x " " count %08x buf %08x desc %08x\n", (u32) req->td_dma, le32_to_cpu (td->dmacount), le32_to_cpu (td->dmaaddr), le32_to_cpu (td->dmadesc)); if (t <= 0 || t > size) goto done; size -= t; next += t; } } } done: spin_unlock_irqrestore (&dev->lock, flags); return PAGE_SIZE - size; } static DEVICE_ATTR (queues, S_IRUGO, show_queues, NULL); #else #define device_create_file(a,b) (0) #define device_remove_file(a,b) do { } while (0) #endif /*-------------------------------------------------------------------------*/ /* another driver-specific mode might be a request type doing dma * to/from another device fifo instead of to/from memory. */ static void set_fifo_mode (struct net2280 *dev, int mode) { /* keeping high bits preserves BAR2 */ writel ((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl); /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */ INIT_LIST_HEAD (&dev->gadget.ep_list); list_add_tail (&dev->ep [1].ep.ep_list, &dev->gadget.ep_list); list_add_tail (&dev->ep [2].ep.ep_list, &dev->gadget.ep_list); switch (mode) { case 0: list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list); list_add_tail (&dev->ep [4].ep.ep_list, &dev->gadget.ep_list); dev->ep [1].fifo_size = dev->ep [2].fifo_size = 1024; break; case 1: dev->ep [1].fifo_size = dev->ep [2].fifo_size = 2048; break; case 2: list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list); dev->ep [1].fifo_size = 2048; dev->ep [2].fifo_size = 1024; break; } /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */ list_add_tail (&dev->ep [5].ep.ep_list, &dev->gadget.ep_list); list_add_tail (&dev->ep [6].ep.ep_list, &dev->gadget.ep_list); } /* keeping it simple: * - one bus driver, initted first; * - one function driver, initted second * * most of the work to support multiple net2280 controllers would * be to associate this gadget driver (yes?) with all of them, or * perhaps to bind specific drivers to specific devices. */ static void usb_reset (struct net2280 *dev) { u32 tmp; dev->gadget.speed = USB_SPEED_UNKNOWN; (void) readl (&dev->usb->usbctl); net2280_led_init (dev); /* disable automatic responses, and irqs */ writel (0, &dev->usb->stdrsp); writel (0, &dev->regs->pciirqenb0); writel (0, &dev->regs->pciirqenb1); /* clear old dma and irq state */ for (tmp = 0; tmp < 4; tmp++) { struct net2280_ep *ep = &dev->ep [tmp + 1]; if (ep->dma) abort_dma (ep); } writel (~0, &dev->regs->irqstat0), writel (~(1 << SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1), /* reset, and enable pci */ tmp = readl (&dev->regs->devinit) | (1 << PCI_ENABLE) | (1 << FIFO_SOFT_RESET) | (1 << USB_SOFT_RESET) | (1 << M8051_RESET); writel (tmp, &dev->regs->devinit); /* standard fifo and endpoint allocations */ set_fifo_mode (dev, (fifo_mode <= 2) ? fifo_mode : 0); } static void usb_reinit (struct net2280 *dev) { u32 tmp; int init_dma; /* use_dma changes are ignored till next device re-init */ init_dma = use_dma; /* basic endpoint init */ for (tmp = 0; tmp < 7; tmp++) { struct net2280_ep *ep = &dev->ep [tmp]; ep->ep.name = ep_name [tmp]; ep->dev = dev; ep->num = tmp; if (tmp > 0 && tmp <= 4) { ep->fifo_size = 1024; if (init_dma) ep->dma = &dev->dma [tmp - 1]; } else ep->fifo_size = 64; ep->regs = &dev->epregs [tmp]; ep_reset (dev->regs, ep); } dev->ep [0].ep.maxpacket = 64; dev->ep [5].ep.maxpacket = 64; dev->ep [6].ep.maxpacket = 64; dev->gadget.ep0 = &dev->ep [0].ep; dev->ep [0].stopped = 0; INIT_LIST_HEAD (&dev->gadget.ep0->ep_list); /* we want to prevent lowlevel/insecure access from the USB host, * but erratum 0119 means this enable bit is ignored */ for (tmp = 0; tmp < 5; tmp++) writel (EP_DONTUSE, &dev->dep [tmp].dep_cfg); } static void ep0_start (struct net2280 *dev) { writel ( (1 << CLEAR_EP_HIDE_STATUS_PHASE) | (1 << CLEAR_NAK_OUT_PACKETS) | (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE) , &dev->epregs [0].ep_rsp); /* * hardware optionally handles a bunch of standard requests * that the API hides from drivers anyway. have it do so. * endpoint status/features are handled in software, to * help pass tests for some dubious behavior. */ writel ( (1 << SET_TEST_MODE) | (1 << SET_ADDRESS) | (1 << DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) | (1 << GET_DEVICE_STATUS) | (1 << GET_INTERFACE_STATUS) , &dev->usb->stdrsp); writel ( (1 << USB_ROOT_PORT_WAKEUP_ENABLE) | (1 << SELF_POWERED_USB_DEVICE) | (1 << REMOTE_WAKEUP_SUPPORT) | (dev->softconnect << USB_DETECT_ENABLE) | (1 << SELF_POWERED_STATUS) , &dev->usb->usbctl); /* enable irqs so we can see ep0 and general operation */ writel ( (1 << SETUP_PACKET_INTERRUPT_ENABLE) | (1 << ENDPOINT_0_INTERRUPT_ENABLE) , &dev->regs->pciirqenb0); writel ( (1 << PCI_INTERRUPT_ENABLE) | (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) | (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) | (1 << PCI_RETRY_ABORT_INTERRUPT_ENABLE) | (1 << VBUS_INTERRUPT_ENABLE) | (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) | (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) , &dev->regs->pciirqenb1); /* don't leave any writes posted */ (void) readl (&dev->usb->usbctl); } /* when a driver is successfully registered, it will receive * control requests including set_configuration(), which enables * non-control requests. then usb traffic follows until a * disconnect is reported. then a host may connect again, or * the driver might get unbound. */ static int net2280_start(struct usb_gadget *_gadget, struct usb_gadget_driver *driver) { struct net2280 *dev; int retval; unsigned i; /* insist on high speed support from the driver, since * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE) * "must not be used in normal operation" */ if (!driver || driver->max_speed < USB_SPEED_HIGH || !driver->setup) return -EINVAL; dev = container_of (_gadget, struct net2280, gadget); for (i = 0; i < 7; i++) dev->ep [i].irqs = 0; /* hook up the driver ... */ dev->softconnect = 1; driver->driver.bus = NULL; dev->driver = driver; retval = device_create_file (&dev->pdev->dev, &dev_attr_function); if (retval) goto err_unbind; retval = device_create_file (&dev->pdev->dev, &dev_attr_queues); if (retval) goto err_func; /* Enable force-full-speed testing mode, if desired */ if (full_speed) writel(1 << FORCE_FULL_SPEED_MODE, &dev->usb->xcvrdiag); /* ... then enable host detection and ep0; and we're ready * for set_configuration as well as eventual disconnect. */ net2280_led_active (dev, 1); ep0_start (dev); DEBUG (dev, "%s ready, usbctl %08x stdrsp %08x\n", driver->driver.name, readl (&dev->usb->usbctl), readl (&dev->usb->stdrsp)); /* pci writes may still be posted */ return 0; err_func: device_remove_file (&dev->pdev->dev, &dev_attr_function); err_unbind: dev->driver = NULL; return retval; } static void stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver) { int i; /* don't disconnect if it's not connected */ if (dev->gadget.speed == USB_SPEED_UNKNOWN) driver = NULL; /* stop hardware; prevent new request submissions; * and kill any outstanding requests. */ usb_reset (dev); for (i = 0; i < 7; i++) nuke (&dev->ep [i]); /* report disconnect; the driver is already quiesced */ if (driver) { spin_unlock(&dev->lock); driver->disconnect(&dev->gadget); spin_lock(&dev->lock); } usb_reinit (dev); } static int net2280_stop(struct usb_gadget *_gadget, struct usb_gadget_driver *driver) { struct net2280 *dev; unsigned long flags; dev = container_of (_gadget, struct net2280, gadget); spin_lock_irqsave (&dev->lock, flags); stop_activity (dev, driver); spin_unlock_irqrestore (&dev->lock, flags); dev->driver = NULL; net2280_led_active (dev, 0); /* Disable full-speed test mode */ writel(0, &dev->usb->xcvrdiag); device_remove_file (&dev->pdev->dev, &dev_attr_function); device_remove_file (&dev->pdev->dev, &dev_attr_queues); DEBUG (dev, "unregistered driver '%s'\n", driver->driver.name); return 0; } /*-------------------------------------------------------------------------*/ /* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq. * also works for dma-capable endpoints, in pio mode or just * to manually advance the queue after short OUT transfers. */ static void handle_ep_small (struct net2280_ep *ep) { struct net2280_request *req; u32 t; /* 0 error, 1 mid-data, 2 done */ int mode = 1; if (!list_empty (&ep->queue)) req = list_entry (ep->queue.next, struct net2280_request, queue); else req = NULL; /* ack all, and handle what we care about */ t = readl (&ep->regs->ep_stat); ep->irqs++; #if 0 VDEBUG (ep->dev, "%s ack ep_stat %08x, req %p\n", ep->ep.name, t, req ? &req->req : 0); #endif if (!ep->is_in || ep->dev->pdev->device == 0x2280) writel (t & ~(1 << NAK_OUT_PACKETS), &ep->regs->ep_stat); else /* Added for 2282 */ writel (t, &ep->regs->ep_stat); /* for ep0, monitor token irqs to catch data stage length errors * and to synchronize on status. * * also, to defer reporting of protocol stalls ... here's where * data or status first appears, handling stalls here should never * cause trouble on the host side.. * * control requests could be slightly faster without token synch for * status, but status can jam up that way. */ if (unlikely (ep->num == 0)) { if (ep->is_in) { /* status; stop NAKing */ if (t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)) { if (ep->dev->protocol_stall) { ep->stopped = 1; set_halt (ep); } if (!req) allow_status (ep); mode = 2; /* reply to extra IN data tokens with a zlp */ } else if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) { if (ep->dev->protocol_stall) { ep->stopped = 1; set_halt (ep); mode = 2; } else if (ep->responded && !req && !ep->stopped) write_fifo (ep, NULL); } } else { /* status; stop NAKing */ if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) { if (ep->dev->protocol_stall) { ep->stopped = 1; set_halt (ep); } mode = 2; /* an extra OUT token is an error */ } else if (((t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)) && req && req->req.actual == req->req.length) || (ep->responded && !req)) { ep->dev->protocol_stall = 1; set_halt (ep); ep->stopped = 1; if (req) done (ep, req, -EOVERFLOW); req = NULL; } } } if (unlikely (!req)) return; /* manual DMA queue advance after short OUT */ if (likely (ep->dma)) { if (t & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) { u32 count; int stopped = ep->stopped; /* TRANSFERRED works around OUT_DONE erratum 0112. * we expect (N <= maxpacket) bytes; host wrote M. * iff (M < N) we won't ever see a DMA interrupt. */ ep->stopped = 1; for (count = 0; ; t = readl (&ep->regs->ep_stat)) { /* any preceding dma transfers must finish. * dma handles (M >= N), may empty the queue */ scan_dma_completions (ep); if (unlikely (list_empty (&ep->queue) || ep->out_overflow)) { req = NULL; break; } req = list_entry (ep->queue.next, struct net2280_request, queue); /* here either (M < N), a "real" short rx; * or (M == N) and the queue didn't empty */ if (likely (t & (1 << FIFO_EMPTY))) { count = readl (&ep->dma->dmacount); count &= DMA_BYTE_COUNT_MASK; if (readl (&ep->dma->dmadesc) != req->td_dma) req = NULL; break; } udelay(1); } /* stop DMA, leave ep NAKing */ writel ((1 << DMA_ABORT), &ep->dma->dmastat); spin_stop_dma (ep->dma); if (likely (req)) { req->td->dmacount = 0; t = readl (&ep->regs->ep_avail); dma_done (ep, req, count, (ep->out_overflow || t) ? -EOVERFLOW : 0); } /* also flush to prevent erratum 0106 trouble */ if (unlikely (ep->out_overflow || (ep->dev->chiprev == 0x0100 && ep->dev->gadget.speed == USB_SPEED_FULL))) { out_flush (ep); ep->out_overflow = 0; } /* (re)start dma if needed, stop NAKing */ ep->stopped = stopped; if (!list_empty (&ep->queue)) restart_dma (ep); } else DEBUG (ep->dev, "%s dma ep_stat %08x ??\n", ep->ep.name, t); return; /* data packet(s) received (in the fifo, OUT) */ } else if (t & (1 << DATA_PACKET_RECEIVED_INTERRUPT)) { if (read_fifo (ep, req) && ep->num != 0) mode = 2; /* data packet(s) transmitted (IN) */ } else if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)) { unsigned len; len = req->req.length - req->req.actual; if (len > ep->ep.maxpacket) len = ep->ep.maxpacket; req->req.actual += len; /* if we wrote it all, we're usually done */ if (req->req.actual == req->req.length) { if (ep->num == 0) { /* send zlps until the status stage */ } else if (!req->req.zero || len != ep->ep.maxpacket) mode = 2; } /* there was nothing to do ... */ } else if (mode == 1) return; /* done */ if (mode == 2) { /* stream endpoints often resubmit/unlink in completion */ done (ep, req, 0); /* maybe advance queue to next request */ if (ep->num == 0) { /* NOTE: net2280 could let gadget driver start the * status stage later. since not all controllers let * them control that, the api doesn't (yet) allow it. */ if (!ep->stopped) allow_status (ep); req = NULL; } else { if (!list_empty (&ep->queue) && !ep->stopped) req = list_entry (ep->queue.next, struct net2280_request, queue); else req = NULL; if (req && !ep->is_in) stop_out_naking (ep); } } /* is there a buffer for the next packet? * for best streaming performance, make sure there is one. */ if (req && !ep->stopped) { /* load IN fifo with next packet (may be zlp) */ if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)) write_fifo (ep, &req->req); } } static struct net2280_ep * get_ep_by_addr (struct net2280 *dev, u16 wIndex) { struct net2280_ep *ep; if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) return &dev->ep [0]; list_for_each_entry (ep, &dev->gadget.ep_list, ep.ep_list) { u8 bEndpointAddress; if (!ep->desc) continue; bEndpointAddress = ep->desc->bEndpointAddress; if ((wIndex ^ bEndpointAddress) & USB_DIR_IN) continue; if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f)) return ep; } return NULL; } static void handle_stat0_irqs (struct net2280 *dev, u32 stat) { struct net2280_ep *ep; u32 num, scratch; /* most of these don't need individual acks */ stat &= ~(1 << INTA_ASSERTED); if (!stat) return; // DEBUG (dev, "irqstat0 %04x\n", stat); /* starting a control request? */ if (unlikely (stat & (1 << SETUP_PACKET_INTERRUPT))) { union { u32 raw [2]; struct usb_ctrlrequest r; } u; int tmp; struct net2280_request *req; if (dev->gadget.speed == USB_SPEED_UNKNOWN) { if (readl (&dev->usb->usbstat) & (1 << HIGH_SPEED)) dev->gadget.speed = USB_SPEED_HIGH; else dev->gadget.speed = USB_SPEED_FULL; net2280_led_speed (dev, dev->gadget.speed); DEBUG(dev, "%s\n", usb_speed_string(dev->gadget.speed)); } ep = &dev->ep [0]; ep->irqs++; /* make sure any leftover request state is cleared */ stat &= ~(1 << ENDPOINT_0_INTERRUPT); while (!list_empty (&ep->queue)) { req = list_entry (ep->queue.next, struct net2280_request, queue); done (ep, req, (req->req.actual == req->req.length) ? 0 : -EPROTO); } ep->stopped = 0; dev->protocol_stall = 0; if (ep->dev->pdev->device == 0x2280) tmp = (1 << FIFO_OVERFLOW) | (1 << FIFO_UNDERFLOW); else tmp = 0; writel (tmp | (1 << TIMEOUT) | (1 << USB_STALL_SENT) | (1 << USB_IN_NAK_SENT) | (1 << USB_IN_ACK_RCVD) | (1 << USB_OUT_PING_NAK_SENT) | (1 << USB_OUT_ACK_SENT) | (1 << SHORT_PACKET_OUT_DONE_INTERRUPT) | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) | (1 << DATA_PACKET_RECEIVED_INTERRUPT) | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) | (1 << DATA_OUT_PING_TOKEN_INTERRUPT) | (1 << DATA_IN_TOKEN_INTERRUPT) , &ep->regs->ep_stat); u.raw [0] = readl (&dev->usb->setup0123); u.raw [1] = readl (&dev->usb->setup4567); cpu_to_le32s (&u.raw [0]); cpu_to_le32s (&u.raw [1]); tmp = 0; #define w_value le16_to_cpu(u.r.wValue) #define w_index le16_to_cpu(u.r.wIndex) #define w_length le16_to_cpu(u.r.wLength) /* ack the irq */ writel (1 << SETUP_PACKET_INTERRUPT, &dev->regs->irqstat0); stat ^= (1 << SETUP_PACKET_INTERRUPT); /* watch control traffic at the token level, and force * synchronization before letting the status stage happen. * FIXME ignore tokens we'll NAK, until driver responds. * that'll mean a lot less irqs for some drivers. */ ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0; if (ep->is_in) { scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) | (1 << DATA_OUT_PING_TOKEN_INTERRUPT) | (1 << DATA_IN_TOKEN_INTERRUPT); stop_out_naking (ep); } else scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT) | (1 << DATA_OUT_PING_TOKEN_INTERRUPT) | (1 << DATA_IN_TOKEN_INTERRUPT); writel (scratch, &dev->epregs [0].ep_irqenb); /* we made the hardware handle most lowlevel requests; * everything else goes uplevel to the gadget code. */ ep->responded = 1; switch (u.r.bRequest) { case USB_REQ_GET_STATUS: { struct net2280_ep *e; __le32 status; /* hw handles device and interface status */ if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT)) goto delegate; if ((e = get_ep_by_addr (dev, w_index)) == NULL || w_length > 2) goto do_stall; if (readl (&e->regs->ep_rsp) & (1 << SET_ENDPOINT_HALT)) status = cpu_to_le32 (1); else status = cpu_to_le32 (0); /* don't bother with a request object! */ writel (0, &dev->epregs [0].ep_irqenb); set_fifo_bytecount (ep, w_length); writel ((__force u32)status, &dev->epregs [0].ep_data); allow_status (ep); VDEBUG (dev, "%s stat %02x\n", ep->ep.name, status); goto next_endpoints; } break; case USB_REQ_CLEAR_FEATURE: { struct net2280_ep *e; /* hw handles device features */ if (u.r.bRequestType != USB_RECIP_ENDPOINT) goto delegate; if (w_value != USB_ENDPOINT_HALT || w_length != 0) goto do_stall; if ((e = get_ep_by_addr (dev, w_index)) == NULL) goto do_stall; if (e->wedged) { VDEBUG(dev, "%s wedged, halt not cleared\n", ep->ep.name); } else { VDEBUG(dev, "%s clear halt\n", ep->ep.name); clear_halt(e); } allow_status (ep); goto next_endpoints; } break; case USB_REQ_SET_FEATURE: { struct net2280_ep *e; /* hw handles device features */ if (u.r.bRequestType != USB_RECIP_ENDPOINT) goto delegate; if (w_value != USB_ENDPOINT_HALT || w_length != 0) goto do_stall; if ((e = get_ep_by_addr (dev, w_index)) == NULL) goto do_stall; if (e->ep.name == ep0name) goto do_stall; set_halt (e); allow_status (ep); VDEBUG (dev, "%s set halt\n", ep->ep.name); goto next_endpoints; } break; default: delegate: VDEBUG (dev, "setup %02x.%02x v%04x i%04x l%04x " "ep_cfg %08x\n", u.r.bRequestType, u.r.bRequest, w_value, w_index, w_length, readl (&ep->regs->ep_cfg)); ep->responded = 0; spin_unlock (&dev->lock); tmp = dev->driver->setup (&dev->gadget, &u.r); spin_lock (&dev->lock); } /* stall ep0 on error */ if (tmp < 0) { do_stall: VDEBUG (dev, "req %02x.%02x protocol STALL; stat %d\n", u.r.bRequestType, u.r.bRequest, tmp); dev->protocol_stall = 1; } /* some in/out token irq should follow; maybe stall then. * driver must queue a request (even zlp) or halt ep0 * before the host times out. */ } #undef w_value #undef w_index #undef w_length next_endpoints: /* endpoint data irq ? */ scratch = stat & 0x7f; stat &= ~0x7f; for (num = 0; scratch; num++) { u32 t; /* do this endpoint's FIFO and queue need tending? */ t = 1 << num; if ((scratch & t) == 0) continue; scratch ^= t; ep = &dev->ep [num]; handle_ep_small (ep); } if (stat) DEBUG (dev, "unhandled irqstat0 %08x\n", stat); } #define DMA_INTERRUPTS ( \ (1 << DMA_D_INTERRUPT) \ | (1 << DMA_C_INTERRUPT) \ | (1 << DMA_B_INTERRUPT) \ | (1 << DMA_A_INTERRUPT)) #define PCI_ERROR_INTERRUPTS ( \ (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT) \ | (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT) \ | (1 << PCI_RETRY_ABORT_INTERRUPT)) static void handle_stat1_irqs (struct net2280 *dev, u32 stat) { struct net2280_ep *ep; u32 tmp, num, mask, scratch; /* after disconnect there's nothing else to do! */ tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT); mask = (1 << HIGH_SPEED) | (1 << FULL_SPEED); /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set. * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT * only indicates a change in the reset state). */ if (stat & tmp) { writel (tmp, &dev->regs->irqstat1); if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) && ((readl (&dev->usb->usbstat) & mask) == 0)) || ((readl (&dev->usb->usbctl) & (1 << VBUS_PIN)) == 0) ) && ( dev->gadget.speed != USB_SPEED_UNKNOWN)) { DEBUG (dev, "disconnect %s\n", dev->driver->driver.name); stop_activity (dev, dev->driver); ep0_start (dev); return; } stat &= ~tmp; /* vBUS can bounce ... one of many reasons to ignore the * notion of hotplug events on bus connect/disconnect! */ if (!stat) return; } /* NOTE: chip stays in PCI D0 state for now, but it could * enter D1 to save more power */ tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT); if (stat & tmp) { writel (tmp, &dev->regs->irqstat1); if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) { if (dev->driver->suspend) dev->driver->suspend (&dev->gadget); if (!enable_suspend) stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT); } else { if (dev->driver->resume) dev->driver->resume (&dev->gadget); /* at high speed, note erratum 0133 */ } stat &= ~tmp; } /* clear any other status/irqs */ if (stat) writel (stat, &dev->regs->irqstat1); /* some status we can just ignore */ if (dev->pdev->device == 0x2280) stat &= ~((1 << CONTROL_STATUS_INTERRUPT) | (1 << SUSPEND_REQUEST_INTERRUPT) | (1 << RESUME_INTERRUPT) | (1 << SOF_INTERRUPT)); else stat &= ~((1 << CONTROL_STATUS_INTERRUPT) | (1 << RESUME_INTERRUPT) | (1 << SOF_DOWN_INTERRUPT) | (1 << SOF_INTERRUPT)); if (!stat) return; // DEBUG (dev, "irqstat1 %08x\n", stat); /* DMA status, for ep-{a,b,c,d} */ scratch = stat & DMA_INTERRUPTS; stat &= ~DMA_INTERRUPTS; scratch >>= 9; for (num = 0; scratch; num++) { struct net2280_dma_regs __iomem *dma; tmp = 1 << num; if ((tmp & scratch) == 0) continue; scratch ^= tmp; ep = &dev->ep [num + 1]; dma = ep->dma; if (!dma) continue; /* clear ep's dma status */ tmp = readl (&dma->dmastat); writel (tmp, &dma->dmastat); /* chaining should stop on abort, short OUT from fifo, * or (stat0 codepath) short OUT transfer. */ if (!use_dma_chaining) { if ((tmp & (1 << DMA_TRANSACTION_DONE_INTERRUPT)) == 0) { DEBUG (ep->dev, "%s no xact done? %08x\n", ep->ep.name, tmp); continue; } stop_dma (ep->dma); } /* OUT transfers terminate when the data from the * host is in our memory. Process whatever's done. * On this path, we know transfer's last packet wasn't * less than req->length. NAK_OUT_PACKETS may be set, * or the FIFO may already be holding new packets. * * IN transfers can linger in the FIFO for a very * long time ... we ignore that for now, accounting * precisely (like PIO does) needs per-packet irqs */ scan_dma_completions (ep); /* disable dma on inactive queues; else maybe restart */ if (list_empty (&ep->queue)) { if (use_dma_chaining) stop_dma (ep->dma); } else { tmp = readl (&dma->dmactl); if (!use_dma_chaining || (tmp & (1 << DMA_ENABLE)) == 0) restart_dma (ep); else if (ep->is_in && use_dma_chaining) { struct net2280_request *req; __le32 dmacount; /* the descriptor at the head of the chain * may still have VALID_BIT clear; that's * used to trigger changing DMA_FIFO_VALIDATE * (affects automagic zlp writes). */ req = list_entry (ep->queue.next, struct net2280_request, queue); dmacount = req->td->dmacount; dmacount &= cpu_to_le32 ( (1 << VALID_BIT) | DMA_BYTE_COUNT_MASK); if (dmacount && (dmacount & valid_bit) == 0) restart_dma (ep); } } ep->irqs++; } /* NOTE: there are other PCI errors we might usefully notice. * if they appear very often, here's where to try recovering. */ if (stat & PCI_ERROR_INTERRUPTS) { ERROR (dev, "pci dma error; stat %08x\n", stat); stat &= ~PCI_ERROR_INTERRUPTS; /* these are fatal errors, but "maybe" they won't * happen again ... */ stop_activity (dev, dev->driver); ep0_start (dev); stat = 0; } if (stat) DEBUG (dev, "unhandled irqstat1 %08x\n", stat); } static irqreturn_t net2280_irq (int irq, void *_dev) { struct net2280 *dev = _dev; /* shared interrupt, not ours */ if (!(readl(&dev->regs->irqstat0) & (1 << INTA_ASSERTED))) return IRQ_NONE; spin_lock (&dev->lock); /* handle disconnect, dma, and more */ handle_stat1_irqs (dev, readl (&dev->regs->irqstat1)); /* control requests and PIO */ handle_stat0_irqs (dev, readl (&dev->regs->irqstat0)); spin_unlock (&dev->lock); return IRQ_HANDLED; } /*-------------------------------------------------------------------------*/ static void gadget_release (struct device *_dev) { struct net2280 *dev = dev_get_drvdata (_dev); kfree (dev); } /* tear down the binding between this driver and the pci device */ static void net2280_remove (struct pci_dev *pdev) { struct net2280 *dev = pci_get_drvdata (pdev); usb_del_gadget_udc(&dev->gadget); BUG_ON(dev->driver); /* then clean up the resources we allocated during probe() */ net2280_led_shutdown (dev); if (dev->requests) { int i; for (i = 1; i < 5; i++) { if (!dev->ep [i].dummy) continue; pci_pool_free (dev->requests, dev->ep [i].dummy, dev->ep [i].td_dma); } pci_pool_destroy (dev->requests); } if (dev->got_irq) free_irq (pdev->irq, dev); if (dev->regs) iounmap (dev->regs); if (dev->region) release_mem_region (pci_resource_start (pdev, 0), pci_resource_len (pdev, 0)); if (dev->enabled) pci_disable_device (pdev); device_remove_file (&pdev->dev, &dev_attr_registers); pci_set_drvdata (pdev, NULL); INFO (dev, "unbind\n"); } /* wrap this driver around the specified device, but * don't respond over USB until a gadget driver binds to us. */ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id) { struct net2280 *dev; unsigned long resource, len; void __iomem *base = NULL; int retval, i; /* alloc, and start init */ dev = kzalloc (sizeof *dev, GFP_KERNEL); if (dev == NULL){ retval = -ENOMEM; goto done; } pci_set_drvdata (pdev, dev); spin_lock_init (&dev->lock); dev->pdev = pdev; dev->gadget.ops = &net2280_ops; dev->gadget.max_speed = USB_SPEED_HIGH; /* the "gadget" abstracts/virtualizes the controller */ dev->gadget.name = driver_name; /* now all the pci goodies ... */ if (pci_enable_device (pdev) < 0) { retval = -ENODEV; goto done; } dev->enabled = 1; /* BAR 0 holds all the registers * BAR 1 is 8051 memory; unused here (note erratum 0103) * BAR 2 is fifo memory; unused here */ resource = pci_resource_start (pdev, 0); len = pci_resource_len (pdev, 0); if (!request_mem_region (resource, len, driver_name)) { DEBUG (dev, "controller already in use\n"); retval = -EBUSY; goto done; } dev->region = 1; /* FIXME provide firmware download interface to put * 8051 code into the chip, e.g. to turn on PCI PM. */ base = ioremap_nocache (resource, len); if (base == NULL) { DEBUG (dev, "can't map memory\n"); retval = -EFAULT; goto done; } dev->regs = (struct net2280_regs __iomem *) base; dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080); dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100); dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180); dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200); dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300); /* put into initial config, link up all endpoints */ writel (0, &dev->usb->usbctl); usb_reset (dev); usb_reinit (dev); /* irq setup after old hardware is cleaned up */ if (!pdev->irq) { ERROR (dev, "No IRQ. Check PCI setup!\n"); retval = -ENODEV; goto done; } if (request_irq (pdev->irq, net2280_irq, IRQF_SHARED, driver_name, dev) != 0) { ERROR (dev, "request interrupt %d failed\n", pdev->irq); retval = -EBUSY; goto done; } dev->got_irq = 1; /* DMA setup */ /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */ dev->requests = pci_pool_create ("requests", pdev, sizeof (struct net2280_dma), 0 /* no alignment requirements */, 0 /* or page-crossing issues */); if (!dev->requests) { DEBUG (dev, "can't get request pool\n"); retval = -ENOMEM; goto done; } for (i = 1; i < 5; i++) { struct net2280_dma *td; td = pci_pool_alloc (dev->requests, GFP_KERNEL, &dev->ep [i].td_dma); if (!td) { DEBUG (dev, "can't get dummy %d\n", i); retval = -ENOMEM; goto done; } td->dmacount = 0; /* not VALID */ td->dmadesc = td->dmaaddr; dev->ep [i].dummy = td; } /* enable lower-overhead pci memory bursts during DMA */ writel ( (1 << DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) // 256 write retries may not be enough... // | (1 << PCI_RETRY_ABORT_ENABLE) | (1 << DMA_READ_MULTIPLE_ENABLE) | (1 << DMA_READ_LINE_ENABLE) , &dev->pci->pcimstctl); /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */ pci_set_master (pdev); pci_try_set_mwi (pdev); /* ... also flushes any posted pci writes */ dev->chiprev = get_idx_reg (dev->regs, REG_CHIPREV) & 0xffff; /* done */ INFO (dev, "%s\n", driver_desc); INFO (dev, "irq %d, pci mem %p, chip rev %04x\n", pdev->irq, base, dev->chiprev); INFO (dev, "version: " DRIVER_VERSION "; dma %s\n", use_dma ? (use_dma_chaining ? "chaining" : "enabled") : "disabled"); retval = device_create_file (&pdev->dev, &dev_attr_registers); if (retval) goto done; retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget, gadget_release); if (retval) goto done; return 0; done: if (dev) net2280_remove (pdev); return retval; } /* make sure the board is quiescent; otherwise it will continue * generating IRQs across the upcoming reboot. */ static void net2280_shutdown (struct pci_dev *pdev) { struct net2280 *dev = pci_get_drvdata (pdev); /* disable IRQs */ writel (0, &dev->regs->pciirqenb0); writel (0, &dev->regs->pciirqenb1); /* disable the pullup so the host will think we're gone */ writel (0, &dev->usb->usbctl); /* Disable full-speed test mode */ writel(0, &dev->usb->xcvrdiag); } /*-------------------------------------------------------------------------*/ static const struct pci_device_id pci_ids [] = { { .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), .class_mask = ~0, .vendor = 0x17cc, .device = 0x2280, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), .class_mask = ~0, .vendor = 0x17cc, .device = 0x2282, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE (pci, pci_ids); /* pci driver glue; this is a "new style" PCI driver module */ static struct pci_driver net2280_pci_driver = { .name = (char *) driver_name, .id_table = pci_ids, .probe = net2280_probe, .remove = net2280_remove, .shutdown = net2280_shutdown, /* FIXME add power management support */ }; MODULE_DESCRIPTION (DRIVER_DESC); MODULE_AUTHOR ("David Brownell"); MODULE_LICENSE ("GPL"); static int __init init (void) { if (!use_dma) use_dma_chaining = 0; return pci_register_driver (&net2280_pci_driver); } module_init (init); static void __exit cleanup (void) { pci_unregister_driver (&net2280_pci_driver); } module_exit (cleanup);
gpl-2.0
drewx2/android_kernel_htc_dlx
fs/ufs/inode.c
4731
25705
/* * linux/fs/ufs/inode.c * * Copyright (C) 1998 * Daniel Pirkl <daniel.pirkl@email.cz> * Charles University, Faculty of Mathematics and Physics * * from * * linux/fs/ext2/inode.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993 * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 */ #include <asm/uaccess.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/time.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include "ufs_fs.h" #include "ufs.h" #include "swab.h" #include "util.h" static u64 ufs_frag_map(struct inode *inode, sector_t frag, bool needs_lock); static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t offsets[4]) { struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; int ptrs = uspi->s_apb; int ptrs_bits = uspi->s_apbshift; const long direct_blocks = UFS_NDADDR, indirect_blocks = ptrs, double_blocks = (1 << (ptrs_bits * 2)); int n = 0; UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks); if (i_block < direct_blocks) { offsets[n++] = i_block; } else if ((i_block -= direct_blocks) < indirect_blocks) { offsets[n++] = UFS_IND_BLOCK; offsets[n++] = i_block; } else if ((i_block -= indirect_blocks) < double_blocks) { offsets[n++] = UFS_DIND_BLOCK; offsets[n++] = i_block >> ptrs_bits; offsets[n++] = i_block & (ptrs - 1); } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { offsets[n++] = UFS_TIND_BLOCK; offsets[n++] = i_block >> (ptrs_bits * 2); offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); offsets[n++] = i_block & (ptrs - 1); } else { ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big"); } return n; } /* * Returns the location of the fragment from * the beginning of the filesystem. */ static u64 ufs_frag_map(struct inode *inode, sector_t frag, bool needs_lock) { struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift; int shift = uspi->s_apbshift-uspi->s_fpbshift; sector_t offsets[4], *p; int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets); u64 ret = 0L; __fs32 block; __fs64 u2_block = 0L; unsigned flags = UFS_SB(sb)->s_flags; u64 temp = 0L; UFSD(": frag = %llu depth = %d\n", (unsigned long long)frag, depth); UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n", uspi->s_fpbshift, uspi->s_apbmask, (unsigned long long)mask); if (depth == 0) return 0; p = offsets; if (needs_lock) lock_ufs(sb); if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) goto ufs2; block = ufsi->i_u1.i_data[*p++]; if (!block) goto out; while (--depth) { struct buffer_head *bh; sector_t n = *p++; bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift)); if (!bh) goto out; block = ((__fs32 *) bh->b_data)[n & mask]; brelse (bh); if (!block) goto out; } ret = (u64) (uspi->s_sbbase + fs32_to_cpu(sb, block) + (frag & uspi->s_fpbmask)); goto out; ufs2: u2_block = ufsi->i_u1.u2_i_data[*p++]; if (!u2_block) goto out; while (--depth) { struct buffer_head *bh; sector_t n = *p++; temp = (u64)(uspi->s_sbbase) + fs64_to_cpu(sb, u2_block); bh = sb_bread(sb, temp +(u64) (n>>shift)); if (!bh) goto out; u2_block = ((__fs64 *)bh->b_data)[n & mask]; brelse(bh); if (!u2_block) goto out; } temp = (u64)uspi->s_sbbase + fs64_to_cpu(sb, u2_block); ret = temp + (u64) (frag & uspi->s_fpbmask); out: if (needs_lock) unlock_ufs(sb); return ret; } /** * ufs_inode_getfrag() - allocate new fragment(s) * @inode - pointer to inode * @fragment - number of `fragment' which hold pointer * to new allocated fragment(s) * @new_fragment - number of new allocated fragment(s) * @required - how many fragment(s) we require * @err - we set it if something wrong * @phys - pointer to where we save physical number of new allocated fragments, * NULL if we allocate not data(indirect blocks for example). * @new - we set it if we allocate new block * @locked_page - for ufs_new_fragments() */ static struct buffer_head * ufs_inode_getfrag(struct inode *inode, u64 fragment, sector_t new_fragment, unsigned int required, int *err, long *phys, int *new, struct page *locked_page) { struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct buffer_head * result; unsigned blockoff, lastblockoff; u64 tmp, goal, lastfrag, block, lastblock; void *p, *p2; UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, required %u, " "metadata %d\n", inode->i_ino, (unsigned long long)fragment, (unsigned long long)new_fragment, required, !phys); /* TODO : to be done for write support if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) goto ufs2; */ block = ufs_fragstoblks (fragment); blockoff = ufs_fragnum (fragment); p = ufs_get_direct_data_ptr(uspi, ufsi, block); goal = 0; repeat: tmp = ufs_data_ptr_to_cpu(sb, p); lastfrag = ufsi->i_lastfrag; if (tmp && fragment < lastfrag) { if (!phys) { result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); if (tmp == ufs_data_ptr_to_cpu(sb, p)) { UFSD("EXIT, result %llu\n", (unsigned long long)tmp + blockoff); return result; } brelse (result); goto repeat; } else { *phys = uspi->s_sbbase + tmp + blockoff; return NULL; } } lastblock = ufs_fragstoblks (lastfrag); lastblockoff = ufs_fragnum (lastfrag); /* * We will extend file into new block beyond last allocated block */ if (lastblock < block) { /* * We must reallocate last allocated block */ if (lastblockoff) { p2 = ufs_get_direct_data_ptr(uspi, ufsi, lastblock); tmp = ufs_new_fragments(inode, p2, lastfrag, ufs_data_ptr_to_cpu(sb, p2), uspi->s_fpb - lastblockoff, err, locked_page); if (!tmp) { if (lastfrag != ufsi->i_lastfrag) goto repeat; else return NULL; } lastfrag = ufsi->i_lastfrag; } tmp = ufs_data_ptr_to_cpu(sb, ufs_get_direct_data_ptr(uspi, ufsi, lastblock)); if (tmp) goal = tmp + uspi->s_fpb; tmp = ufs_new_fragments (inode, p, fragment - blockoff, goal, required + blockoff, err, phys != NULL ? locked_page : NULL); } else if (lastblock == block) { /* * We will extend last allocated block */ tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff), ufs_data_ptr_to_cpu(sb, p), required + (blockoff - lastblockoff), err, phys != NULL ? locked_page : NULL); } else /* (lastblock > block) */ { /* * We will allocate new block before last allocated block */ if (block) { tmp = ufs_data_ptr_to_cpu(sb, ufs_get_direct_data_ptr(uspi, ufsi, block - 1)); if (tmp) goal = tmp + uspi->s_fpb; } tmp = ufs_new_fragments(inode, p, fragment - blockoff, goal, uspi->s_fpb, err, phys != NULL ? locked_page : NULL); } if (!tmp) { if ((!blockoff && ufs_data_ptr_to_cpu(sb, p)) || (blockoff && lastfrag != ufsi->i_lastfrag)) goto repeat; *err = -ENOSPC; return NULL; } if (!phys) { result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); } else { *phys = uspi->s_sbbase + tmp + blockoff; result = NULL; *err = 0; *new = 1; } inode->i_ctime = CURRENT_TIME_SEC; if (IS_SYNC(inode)) ufs_sync_inode (inode); mark_inode_dirty(inode); UFSD("EXIT, result %llu\n", (unsigned long long)tmp + blockoff); return result; /* This part : To be implemented .... Required only for writing, not required for READ-ONLY. ufs2: u2_block = ufs_fragstoblks(fragment); u2_blockoff = ufs_fragnum(fragment); p = ufsi->i_u1.u2_i_data + block; goal = 0; repeat2: tmp = fs32_to_cpu(sb, *p); lastfrag = ufsi->i_lastfrag; */ } /** * ufs_inode_getblock() - allocate new block * @inode - pointer to inode * @bh - pointer to block which hold "pointer" to new allocated block * @fragment - number of `fragment' which hold pointer * to new allocated block * @new_fragment - number of new allocated fragment * (block will hold this fragment and also uspi->s_fpb-1) * @err - see ufs_inode_getfrag() * @phys - see ufs_inode_getfrag() * @new - see ufs_inode_getfrag() * @locked_page - see ufs_inode_getfrag() */ static struct buffer_head * ufs_inode_getblock(struct inode *inode, struct buffer_head *bh, u64 fragment, sector_t new_fragment, int *err, long *phys, int *new, struct page *locked_page) { struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct buffer_head * result; unsigned blockoff; u64 tmp, goal, block; void *p; block = ufs_fragstoblks (fragment); blockoff = ufs_fragnum (fragment); UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, metadata %d\n", inode->i_ino, (unsigned long long)fragment, (unsigned long long)new_fragment, !phys); result = NULL; if (!bh) goto out; if (!buffer_uptodate(bh)) { ll_rw_block (READ, 1, &bh); wait_on_buffer (bh); if (!buffer_uptodate(bh)) goto out; } if (uspi->fs_magic == UFS2_MAGIC) p = (__fs64 *)bh->b_data + block; else p = (__fs32 *)bh->b_data + block; repeat: tmp = ufs_data_ptr_to_cpu(sb, p); if (tmp) { if (!phys) { result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); if (tmp == ufs_data_ptr_to_cpu(sb, p)) goto out; brelse (result); goto repeat; } else { *phys = uspi->s_sbbase + tmp + blockoff; goto out; } } if (block && (uspi->fs_magic == UFS2_MAGIC ? (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[block-1])) : (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[block-1])))) goal = tmp + uspi->s_fpb; else goal = bh->b_blocknr + uspi->s_fpb; tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal, uspi->s_fpb, err, locked_page); if (!tmp) { if (ufs_data_ptr_to_cpu(sb, p)) goto repeat; goto out; } if (!phys) { result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); } else { *phys = uspi->s_sbbase + tmp + blockoff; *new = 1; } mark_buffer_dirty(bh); if (IS_SYNC(inode)) sync_dirty_buffer(bh); inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); UFSD("result %llu\n", (unsigned long long)tmp + blockoff); out: brelse (bh); UFSD("EXIT\n"); return result; } /** * ufs_getfrag_block() - `get_block_t' function, interface between UFS and * readpage, writepage and so on */ int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) { struct super_block * sb = inode->i_sb; struct ufs_sb_info * sbi = UFS_SB(sb); struct ufs_sb_private_info * uspi = sbi->s_uspi; struct buffer_head * bh; int ret, err, new; unsigned long ptr,phys; u64 phys64 = 0; bool needs_lock = (sbi->mutex_owner != current); if (!create) { phys64 = ufs_frag_map(inode, fragment, needs_lock); UFSD("phys64 = %llu\n", (unsigned long long)phys64); if (phys64) map_bh(bh_result, sb, phys64); return 0; } /* This code entered only while writing ....? */ err = -EIO; new = 0; ret = 0; bh = NULL; if (needs_lock) lock_ufs(sb); UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment); if (fragment > ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb) << uspi->s_fpbshift)) goto abort_too_big; err = 0; ptr = fragment; /* * ok, these macros clean the logic up a bit and make * it much more readable: */ #define GET_INODE_DATABLOCK(x) \ ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new,\ bh_result->b_page) #define GET_INODE_PTR(x) \ ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL,\ bh_result->b_page) #define GET_INDIRECT_DATABLOCK(x) \ ufs_inode_getblock(inode, bh, x, fragment, \ &err, &phys, &new, bh_result->b_page) #define GET_INDIRECT_PTR(x) \ ufs_inode_getblock(inode, bh, x, fragment, \ &err, NULL, NULL, NULL) if (ptr < UFS_NDIR_FRAGMENT) { bh = GET_INODE_DATABLOCK(ptr); goto out; } ptr -= UFS_NDIR_FRAGMENT; if (ptr < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) { bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift)); goto get_indirect; } ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift); if (ptr < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) { bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift)); goto get_double; } ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift); bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift)); bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask); get_double: bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask); get_indirect: bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask); #undef GET_INODE_DATABLOCK #undef GET_INODE_PTR #undef GET_INDIRECT_DATABLOCK #undef GET_INDIRECT_PTR out: if (err) goto abort; if (new) set_buffer_new(bh_result); map_bh(bh_result, sb, phys); abort: if (needs_lock) unlock_ufs(sb); return err; abort_too_big: ufs_warning(sb, "ufs_get_block", "block > big"); goto abort; } static int ufs_writepage(struct page *page, struct writeback_control *wbc) { return block_write_full_page(page,ufs_getfrag_block,wbc); } static int ufs_readpage(struct file *file, struct page *page) { return block_read_full_page(page,ufs_getfrag_block); } int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len) { return __block_write_begin(page, pos, len, ufs_getfrag_block); } static int ufs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; ret = block_write_begin(mapping, pos, len, flags, pagep, ufs_getfrag_block); if (unlikely(ret)) { loff_t isize = mapping->host->i_size; if (pos + len > isize) vmtruncate(mapping->host, isize); } return ret; } static sector_t ufs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,ufs_getfrag_block); } const struct address_space_operations ufs_aops = { .readpage = ufs_readpage, .writepage = ufs_writepage, .write_begin = ufs_write_begin, .write_end = generic_write_end, .bmap = ufs_bmap }; static void ufs_set_inode_ops(struct inode *inode) { if (S_ISREG(inode->i_mode)) { inode->i_op = &ufs_file_inode_operations; inode->i_fop = &ufs_file_operations; inode->i_mapping->a_ops = &ufs_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &ufs_dir_inode_operations; inode->i_fop = &ufs_dir_operations; inode->i_mapping->a_ops = &ufs_aops; } else if (S_ISLNK(inode->i_mode)) { if (!inode->i_blocks) inode->i_op = &ufs_fast_symlink_inode_operations; else { inode->i_op = &ufs_symlink_inode_operations; inode->i_mapping->a_ops = &ufs_aops; } } else init_special_inode(inode, inode->i_mode, ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); } static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) { struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block *sb = inode->i_sb; umode_t mode; /* * Copy data to the in-core inode. */ inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink)); if (inode->i_nlink == 0) { ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); return -1; } /* * Linux now has 32-bit uid and gid, so we can support EFT. */ inode->i_uid = ufs_get_inode_uid(sb, ufs_inode); inode->i_gid = ufs_get_inode_gid(sb, ufs_inode); inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); inode->i_mtime.tv_nsec = 0; inode->i_atime.tv_nsec = 0; inode->i_ctime.tv_nsec = 0; inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks); inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen); ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags); ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr, sizeof(ufs_inode->ui_u2.ui_addr)); } else { memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink, sizeof(ufs_inode->ui_u2.ui_symlink) - 1); ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0; } return 0; } static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) { struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block *sb = inode->i_sb; umode_t mode; UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino); /* * Copy data to the in-core inode. */ inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink)); if (inode->i_nlink == 0) { ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); return -1; } /* * Linux now has 32-bit uid and gid, so we can support EFT. */ inode->i_uid = fs32_to_cpu(sb, ufs2_inode->ui_uid); inode->i_gid = fs32_to_cpu(sb, ufs2_inode->ui_gid); inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size); inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime); inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime); inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime); inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec); inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec); inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec); inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks); inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen); ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags); /* ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); */ if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr, sizeof(ufs2_inode->ui_u2.ui_addr)); } else { memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink, sizeof(ufs2_inode->ui_u2.ui_symlink) - 1); ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0; } return 0; } struct inode *ufs_iget(struct super_block *sb, unsigned long ino) { struct ufs_inode_info *ufsi; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct buffer_head * bh; struct inode *inode; int err; UFSD("ENTER, ino %lu\n", ino); if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) { ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n", ino); return ERR_PTR(-EIO); } inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; ufsi = UFS_I(inode); bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); if (!bh) { ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); goto bad_inode; } if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; err = ufs2_read_inode(inode, ufs2_inode + ufs_inotofsbo(inode->i_ino)); } else { struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data; err = ufs1_read_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); } if (err) goto bad_inode; inode->i_version++; ufsi->i_lastfrag = (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; ufsi->i_dir_start_lookup = 0; ufsi->i_osync = 0; ufs_set_inode_ops(inode); brelse(bh); UFSD("EXIT\n"); unlock_new_inode(inode); return inode; bad_inode: iget_failed(inode); return ERR_PTR(-EIO); } static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) { struct super_block *sb = inode->i_sb; struct ufs_inode_info *ufsi = UFS_I(inode); ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); ufs_set_inode_uid(sb, ufs_inode, inode->i_uid); ufs_set_inode_gid(sb, ufs_inode, inode->i_gid); ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec); ufs_inode->ui_atime.tv_usec = 0; ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec); ufs_inode->ui_ctime.tv_usec = 0; ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec); ufs_inode->ui_mtime.tv_usec = 0; ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks); ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) { ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow); ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag); } if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0]; } else if (inode->i_blocks) { memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data, sizeof(ufs_inode->ui_u2.ui_addr)); } else { memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, sizeof(ufs_inode->ui_u2.ui_symlink)); } if (!inode->i_nlink) memset (ufs_inode, 0, sizeof(struct ufs_inode)); } static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode) { struct super_block *sb = inode->i_sb; struct ufs_inode_info *ufsi = UFS_I(inode); UFSD("ENTER\n"); ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); ufs_inode->ui_uid = cpu_to_fs32(sb, inode->i_uid); ufs_inode->ui_gid = cpu_to_fs32(sb, inode->i_gid); ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec); ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec); ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec); ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec); ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec); ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec); ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks); ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags); ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0]; } else if (inode->i_blocks) { memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data, sizeof(ufs_inode->ui_u2.ui_addr)); } else { memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink, sizeof(ufs_inode->ui_u2.ui_symlink)); } if (!inode->i_nlink) memset (ufs_inode, 0, sizeof(struct ufs2_inode)); UFSD("EXIT\n"); } static int ufs_update_inode(struct inode * inode, int do_sync) { struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct buffer_head * bh; UFSD("ENTER, ino %lu\n", inode->i_ino); if (inode->i_ino < UFS_ROOTINO || inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino); return -1; } bh = sb_bread(sb, ufs_inotofsba(inode->i_ino)); if (!bh) { ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); return -1; } if (uspi->fs_magic == UFS2_MAGIC) { struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data; ufs2_update_inode(inode, ufs2_inode + ufs_inotofsbo(inode->i_ino)); } else { struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data; ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); } mark_buffer_dirty(bh); if (do_sync) sync_dirty_buffer(bh); brelse (bh); UFSD("EXIT\n"); return 0; } int ufs_write_inode(struct inode *inode, struct writeback_control *wbc) { int ret; lock_ufs(inode->i_sb); ret = ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); unlock_ufs(inode->i_sb); return ret; } int ufs_sync_inode (struct inode *inode) { return ufs_update_inode (inode, 1); } void ufs_evict_inode(struct inode * inode) { int want_delete = 0; if (!inode->i_nlink && !is_bad_inode(inode)) want_delete = 1; truncate_inode_pages(&inode->i_data, 0); if (want_delete) { loff_t old_i_size; /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/ lock_ufs(inode->i_sb); mark_inode_dirty(inode); ufs_update_inode(inode, IS_SYNC(inode)); old_i_size = inode->i_size; inode->i_size = 0; if (inode->i_blocks && ufs_truncate(inode, old_i_size)) ufs_warning(inode->i_sb, __func__, "ufs_truncate failed\n"); unlock_ufs(inode->i_sb); } invalidate_inode_buffers(inode); end_writeback(inode); if (want_delete) { lock_ufs(inode->i_sb); ufs_free_inode (inode); unlock_ufs(inode->i_sb); } }
gpl-2.0
GalaxyTab4/android_kernel_samsung_matissewifi
drivers/scsi/sym53c416.c
4987
26175
/* * sym53c416.c * Low-level SCSI driver for sym53c416 chip. * Copyright (C) 1998 Lieven Willems (lw_linux@hotmail.com) * * Changes : * * Marcelo Tosatti <marcelo@conectiva.com.br> : Added io_request_lock locking * Alan Cox <alan@lxorguk.ukuu.org.uk> : Cleaned up code formatting * Fixed an irq locking bug * Added ISAPnP support * Bjoern A. Zeeb <bzeeb@zabbadoz.net> : Initial irq locking updates * Added another card with ISAPnP support * * LILO command line usage: sym53c416=<PORTBASE>[,<IRQ>] * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/spinlock.h> #include <asm/dma.h> #include <asm/io.h> #include <linux/blkdev.h> #include <linux/isapnp.h> #include "scsi.h" #include <scsi/scsi_host.h> #include "sym53c416.h" #define VERSION_STRING "Version 1.0.0-ac" #define TC_LOW 0x00 /* Transfer counter low */ #define TC_MID 0x01 /* Transfer counter mid */ #define SCSI_FIFO 0x02 /* SCSI FIFO register */ #define COMMAND_REG 0x03 /* Command Register */ #define STATUS_REG 0x04 /* Status Register (READ) */ #define DEST_BUS_ID 0x04 /* Destination Bus ID (WRITE) */ #define INT_REG 0x05 /* Interrupt Register (READ) */ #define TOM 0x05 /* Time out multiplier (WRITE) */ #define STP 0x06 /* Synchronous Transfer period */ #define SYNC_OFFSET 0x07 /* Synchronous Offset */ #define CONF_REG_1 0x08 /* Configuration register 1 */ #define CONF_REG_2 0x0B /* Configuration register 2 */ #define CONF_REG_3 0x0C /* Configuration register 3 */ #define CONF_REG_4 0x0D /* Configuration register 4 */ #define TC_HIGH 0x0E /* Transfer counter high */ #define PIO_FIFO_1 0x10 /* PIO FIFO register 1 */ #define PIO_FIFO_2 0x11 /* PIO FIFO register 2 */ #define PIO_FIFO_3 0x12 /* PIO FIFO register 3 */ #define PIO_FIFO_4 0x13 /* PIO FIFO register 4 */ #define PIO_FIFO_CNT 0x14 /* PIO FIFO count */ #define PIO_INT_REG 0x15 /* PIO interrupt register */ #define CONF_REG_5 0x16 /* Configuration register 5 */ #define FEATURE_EN 0x1D /* Feature Enable register */ /* Configuration register 1 entries: */ /* Bits 2-0: SCSI ID of host adapter */ #define SCM 0x80 /* Slow Cable Mode */ #define SRID 0x40 /* SCSI Reset Interrupt Disable */ #define PTM 0x20 /* Parity Test Mode */ #define EPC 0x10 /* Enable Parity Checking */ #define CTME 0x08 /* Special Test Mode */ /* Configuration register 2 entries: */ #define FE 0x40 /* Features Enable */ #define SCSI2 0x08 /* SCSI 2 Enable */ #define TBPA 0x04 /* Target Bad Parity Abort */ /* Configuration register 3 entries: */ #define IDMRC 0x80 /* ID Message Reserved Check */ #define QTE 0x40 /* Queue Tag Enable */ #define CDB10 0x20 /* Command Descriptor Block 10 */ #define FSCSI 0x10 /* FastSCSI */ #define FCLK 0x08 /* FastClock */ /* Configuration register 4 entries: */ #define RBS 0x08 /* Register bank select */ #define EAN 0x04 /* Enable Active Negotiation */ /* Configuration register 5 entries: */ #define LPSR 0x80 /* Lower Power SCSI Reset */ #define IE 0x20 /* Interrupt Enable */ #define LPM 0x02 /* Low Power Mode */ #define WSE0 0x01 /* 0WS Enable */ /* Interrupt register entries: */ #define SRST 0x80 /* SCSI Reset */ #define ILCMD 0x40 /* Illegal Command */ #define DIS 0x20 /* Disconnect */ #define BS 0x10 /* Bus Service */ #define FC 0x08 /* Function Complete */ #define RESEL 0x04 /* Reselected */ #define SI 0x03 /* Selection Interrupt */ /* Status Register Entries: */ #define SCI 0x80 /* SCSI Core Int */ #define GE 0x40 /* Gross Error */ #define PE 0x20 /* Parity Error */ #define TC 0x10 /* Terminal Count */ #define VGC 0x08 /* Valid Group Code */ #define PHBITS 0x07 /* Phase bits */ /* PIO Interrupt Register Entries: */ #define SCI 0x80 /* SCSI Core Int */ #define PFI 0x40 /* PIO FIFO Interrupt */ #define FULL 0x20 /* PIO FIFO Full */ #define EMPTY 0x10 /* PIO FIFO Empty */ #define CE 0x08 /* Collision Error */ #define OUE 0x04 /* Overflow / Underflow error */ #define FIE 0x02 /* Full Interrupt Enable */ #define EIE 0x01 /* Empty Interrupt Enable */ /* SYM53C416 SCSI phases (lower 3 bits of SYM53C416_STATUS_REG) */ #define PHASE_DATA_OUT 0x00 #define PHASE_DATA_IN 0x01 #define PHASE_COMMAND 0x02 #define PHASE_STATUS 0x03 #define PHASE_RESERVED_1 0x04 #define PHASE_RESERVED_2 0x05 #define PHASE_MESSAGE_OUT 0x06 #define PHASE_MESSAGE_IN 0x07 /* SYM53C416 core commands */ #define NOOP 0x00 #define FLUSH_FIFO 0x01 #define RESET_CHIP 0x02 #define RESET_SCSI_BUS 0x03 #define DISABLE_SEL_RESEL 0x45 #define RESEL_SEQ 0x40 #define SEL_WITHOUT_ATN_SEQ 0x41 #define SEL_WITH_ATN_SEQ 0x42 #define SEL_WITH_ATN_AND_STOP_SEQ 0x43 #define ENABLE_SEL_RESEL 0x44 #define SEL_WITH_ATN3_SEQ 0x46 #define RESEL3_SEQ 0x47 #define SND_MSG 0x20 #define SND_STAT 0x21 #define SND_DATA 0x22 #define DISCONNECT_SEQ 0x23 #define TERMINATE_SEQ 0x24 #define TARGET_COMM_COMPLETE_SEQ 0x25 #define DISCONN 0x27 #define RECV_MSG_SEQ 0x28 #define RECV_CMD 0x29 #define RECV_DATA 0x2A #define RECV_CMD_SEQ 0x2B #define TARGET_ABORT_PIO 0x04 #define TRANSFER_INFORMATION 0x10 #define INIT_COMM_COMPLETE_SEQ 0x11 #define MSG_ACCEPTED 0x12 #define TRANSFER_PAD 0x18 #define SET_ATN 0x1A #define RESET_ATN 0x1B #define ILLEGAL 0xFF #define PIO_MODE 0x80 #define IO_RANGE 0x20 /* 0x00 - 0x1F */ #define ID "sym53c416" /* Attention: copied to the sym53c416.h */ #define PIO_SIZE 128 /* Size of PIO fifo is 128 bytes */ #define READ_TIMEOUT 150 #define WRITE_TIMEOUT 150 #ifdef MODULE #define sym53c416_base sym53c416 #define sym53c416_base_1 sym53c416_1 #define sym53c416_base_2 sym53c416_2 #define sym53c416_base_3 sym53c416_3 static unsigned int sym53c416_base[2]; static unsigned int sym53c416_base_1[2]; static unsigned int sym53c416_base_2[2]; static unsigned int sym53c416_base_3[2]; #endif #define MAXHOSTS 4 #define SG_ADDRESS(buffer) ((char *) sg_virt((buffer))) enum phases { idle, data_out, data_in, command_ph, status_ph, message_out, message_in }; typedef struct { int base; int irq; int scsi_id; } host; static host hosts[MAXHOSTS] = { {0, 0, SYM53C416_SCSI_ID}, {0, 0, SYM53C416_SCSI_ID}, {0, 0, SYM53C416_SCSI_ID}, {0, 0, SYM53C416_SCSI_ID} }; static int host_index = 0; static char info[120]; static Scsi_Cmnd *current_command = NULL; static int fastpio = 1; static int probeaddrs[] = {0x200, 0x220, 0x240, 0}; static void sym53c416_set_transfer_counter(int base, unsigned int len) { /* Program Transfer Counter */ outb(len & 0x0000FF, base + TC_LOW); outb((len & 0x00FF00) >> 8, base + TC_MID); outb((len & 0xFF0000) >> 16, base + TC_HIGH); } static DEFINE_SPINLOCK(sym53c416_lock); /* Returns the number of bytes read */ static __inline__ unsigned int sym53c416_read(int base, unsigned char *buffer, unsigned int len) { unsigned int orig_len = len; unsigned long flags = 0; unsigned int bytes_left; unsigned long i; int timeout = READ_TIMEOUT; /* Do transfer */ spin_lock_irqsave(&sym53c416_lock, flags); while(len && timeout) { bytes_left = inb(base + PIO_FIFO_CNT); /* Number of bytes in the PIO FIFO */ if(fastpio && bytes_left > 3) { insl(base + PIO_FIFO_1, buffer, bytes_left >> 2); buffer += bytes_left & 0xFC; len -= bytes_left & 0xFC; } else if(bytes_left > 0) { len -= bytes_left; for(; bytes_left > 0; bytes_left--) *(buffer++) = inb(base + PIO_FIFO_1); } else { i = jiffies + timeout; spin_unlock_irqrestore(&sym53c416_lock, flags); while(time_before(jiffies, i) && (inb(base + PIO_INT_REG) & EMPTY) && timeout) if(inb(base + PIO_INT_REG) & SCI) timeout = 0; spin_lock_irqsave(&sym53c416_lock, flags); if(inb(base + PIO_INT_REG) & EMPTY) timeout = 0; } } spin_unlock_irqrestore(&sym53c416_lock, flags); return orig_len - len; } /* Returns the number of bytes written */ static __inline__ unsigned int sym53c416_write(int base, unsigned char *buffer, unsigned int len) { unsigned int orig_len = len; unsigned long flags = 0; unsigned int bufferfree; unsigned long i; unsigned int timeout = WRITE_TIMEOUT; /* Do transfer */ spin_lock_irqsave(&sym53c416_lock, flags); while(len && timeout) { bufferfree = PIO_SIZE - inb(base + PIO_FIFO_CNT); if(bufferfree > len) bufferfree = len; if(fastpio && bufferfree > 3) { outsl(base + PIO_FIFO_1, buffer, bufferfree >> 2); buffer += bufferfree & 0xFC; len -= bufferfree & 0xFC; } else if(bufferfree > 0) { len -= bufferfree; for(; bufferfree > 0; bufferfree--) outb(*(buffer++), base + PIO_FIFO_1); } else { i = jiffies + timeout; spin_unlock_irqrestore(&sym53c416_lock, flags); while(time_before(jiffies, i) && (inb(base + PIO_INT_REG) & FULL) && timeout) ; spin_lock_irqsave(&sym53c416_lock, flags); if(inb(base + PIO_INT_REG) & FULL) timeout = 0; } } spin_unlock_irqrestore(&sym53c416_lock, flags); return orig_len - len; } static irqreturn_t sym53c416_intr_handle(int irq, void *dev_id) { struct Scsi_Host *dev = dev_id; int base = dev->io_port; int i; unsigned long flags = 0; unsigned char status_reg, pio_int_reg, int_reg; struct scatterlist *sg; unsigned int tot_trans = 0; spin_lock_irqsave(dev->host_lock,flags); status_reg = inb(base + STATUS_REG); pio_int_reg = inb(base + PIO_INT_REG); int_reg = inb(base + INT_REG); spin_unlock_irqrestore(dev->host_lock, flags); /* First, we handle error conditions */ if(int_reg & SCI) /* SCSI Reset */ { printk(KERN_DEBUG "sym53c416: Reset received\n"); current_command->SCp.phase = idle; current_command->result = DID_RESET << 16; spin_lock_irqsave(dev->host_lock, flags); current_command->scsi_done(current_command); spin_unlock_irqrestore(dev->host_lock, flags); goto out; } if(int_reg & ILCMD) /* Illegal Command */ { printk(KERN_WARNING "sym53c416: Illegal Command: 0x%02x.\n", inb(base + COMMAND_REG)); current_command->SCp.phase = idle; current_command->result = DID_ERROR << 16; spin_lock_irqsave(dev->host_lock, flags); current_command->scsi_done(current_command); spin_unlock_irqrestore(dev->host_lock, flags); goto out; } if(status_reg & GE) /* Gross Error */ { printk(KERN_WARNING "sym53c416: Controller reports gross error.\n"); current_command->SCp.phase = idle; current_command->result = DID_ERROR << 16; spin_lock_irqsave(dev->host_lock, flags); current_command->scsi_done(current_command); spin_unlock_irqrestore(dev->host_lock, flags); goto out; } if(status_reg & PE) /* Parity Error */ { printk(KERN_WARNING "sym53c416:SCSI parity error.\n"); current_command->SCp.phase = idle; current_command->result = DID_PARITY << 16; spin_lock_irqsave(dev->host_lock, flags); current_command->scsi_done(current_command); spin_unlock_irqrestore(dev->host_lock, flags); goto out; } if(pio_int_reg & (CE | OUE)) { printk(KERN_WARNING "sym53c416: PIO interrupt error.\n"); current_command->SCp.phase = idle; current_command->result = DID_ERROR << 16; spin_lock_irqsave(dev->host_lock, flags); current_command->scsi_done(current_command); spin_unlock_irqrestore(dev->host_lock, flags); goto out; } if(int_reg & DIS) /* Disconnect */ { if(current_command->SCp.phase != message_in) current_command->result = DID_NO_CONNECT << 16; else current_command->result = (current_command->SCp.Status & 0xFF) | ((current_command->SCp.Message & 0xFF) << 8) | (DID_OK << 16); current_command->SCp.phase = idle; spin_lock_irqsave(dev->host_lock, flags); current_command->scsi_done(current_command); spin_unlock_irqrestore(dev->host_lock, flags); goto out; } /* Now we handle SCSI phases */ switch(status_reg & PHBITS) /* Filter SCSI phase out of status reg */ { case PHASE_DATA_OUT: { if(int_reg & BS) { current_command->SCp.phase = data_out; outb(FLUSH_FIFO, base + COMMAND_REG); sym53c416_set_transfer_counter(base, scsi_bufflen(current_command)); outb(TRANSFER_INFORMATION | PIO_MODE, base + COMMAND_REG); scsi_for_each_sg(current_command, sg, scsi_sg_count(current_command), i) { tot_trans += sym53c416_write(base, SG_ADDRESS(sg), sg->length); } if(tot_trans < current_command->underflow) printk(KERN_WARNING "sym53c416: Underflow, wrote %d bytes, request for %d bytes.\n", tot_trans, current_command->underflow); } break; } case PHASE_DATA_IN: { if(int_reg & BS) { current_command->SCp.phase = data_in; outb(FLUSH_FIFO, base + COMMAND_REG); sym53c416_set_transfer_counter(base, scsi_bufflen(current_command)); outb(TRANSFER_INFORMATION | PIO_MODE, base + COMMAND_REG); scsi_for_each_sg(current_command, sg, scsi_sg_count(current_command), i) { tot_trans += sym53c416_read(base, SG_ADDRESS(sg), sg->length); } if(tot_trans < current_command->underflow) printk(KERN_WARNING "sym53c416: Underflow, read %d bytes, request for %d bytes.\n", tot_trans, current_command->underflow); } break; } case PHASE_COMMAND: { current_command->SCp.phase = command_ph; printk(KERN_ERR "sym53c416: Unknown interrupt in command phase.\n"); break; } case PHASE_STATUS: { current_command->SCp.phase = status_ph; outb(FLUSH_FIFO, base + COMMAND_REG); outb(INIT_COMM_COMPLETE_SEQ, base + COMMAND_REG); break; } case PHASE_RESERVED_1: case PHASE_RESERVED_2: { printk(KERN_ERR "sym53c416: Reserved phase occurred.\n"); break; } case PHASE_MESSAGE_OUT: { current_command->SCp.phase = message_out; outb(SET_ATN, base + COMMAND_REG); outb(MSG_ACCEPTED, base + COMMAND_REG); break; } case PHASE_MESSAGE_IN: { current_command->SCp.phase = message_in; current_command->SCp.Status = inb(base + SCSI_FIFO); current_command->SCp.Message = inb(base + SCSI_FIFO); if(current_command->SCp.Message == SAVE_POINTERS || current_command->SCp.Message == DISCONNECT) outb(SET_ATN, base + COMMAND_REG); outb(MSG_ACCEPTED, base + COMMAND_REG); break; } } out: return IRQ_HANDLED; } static void sym53c416_init(int base, int scsi_id) { outb(RESET_CHIP, base + COMMAND_REG); outb(NOOP, base + COMMAND_REG); outb(0x99, base + TOM); /* Time out of 250 ms */ outb(0x05, base + STP); outb(0x00, base + SYNC_OFFSET); outb(EPC | scsi_id, base + CONF_REG_1); outb(FE | SCSI2 | TBPA, base + CONF_REG_2); outb(IDMRC | QTE | CDB10 | FSCSI | FCLK, base + CONF_REG_3); outb(0x83 | EAN, base + CONF_REG_4); outb(IE | WSE0, base + CONF_REG_5); outb(0, base + FEATURE_EN); } static int sym53c416_probeirq(int base, int scsi_id) { int irq, irqs; unsigned long i; /* Clear interrupt register */ inb(base + INT_REG); /* Start probing for irq's */ irqs = probe_irq_on(); /* Reinit chip */ sym53c416_init(base, scsi_id); /* Cause interrupt */ outb(NOOP, base + COMMAND_REG); outb(ILLEGAL, base + COMMAND_REG); outb(0x07, base + DEST_BUS_ID); outb(0x00, base + DEST_BUS_ID); /* Wait for interrupt to occur */ i = jiffies + 20; while(time_before(jiffies, i) && !(inb(base + STATUS_REG) & SCI)) barrier(); if(time_before_eq(i, jiffies)) /* timed out */ return 0; /* Get occurred irq */ irq = probe_irq_off(irqs); sym53c416_init(base, scsi_id); return irq; } /* Setup: sym53c416=base,irq */ void sym53c416_setup(char *str, int *ints) { int i; if(host_index >= MAXHOSTS) { printk(KERN_WARNING "sym53c416: Too many hosts defined\n"); return; } if(ints[0] < 1 || ints[0] > 2) { printk(KERN_ERR "sym53c416: Wrong number of parameters:\n"); printk(KERN_ERR "sym53c416: usage: sym53c416=<base>[,<irq>]\n"); return; } for(i = 0; i < host_index && i >= 0; i++) if(hosts[i].base == ints[1]) i = -2; if(i >= 0) { hosts[host_index].base = ints[1]; hosts[host_index].irq = (ints[0] == 2)? ints[2] : 0; host_index++; } } static int sym53c416_test(int base) { outb(RESET_CHIP, base + COMMAND_REG); outb(NOOP, base + COMMAND_REG); if(inb(base + COMMAND_REG) != NOOP) return 0; if(!inb(base + TC_HIGH) || inb(base + TC_HIGH) == 0xFF) return 0; if((inb(base + PIO_INT_REG) & (FULL | EMPTY | CE | OUE | FIE | EIE)) != EMPTY) return 0; return 1; } static struct isapnp_device_id id_table[] __devinitdata = { { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('S','L','I'), ISAPNP_FUNCTION(0x4161), 0 }, { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('S','L','I'), ISAPNP_FUNCTION(0x4163), 0 }, { ISAPNP_DEVICE_SINGLE_END } }; MODULE_DEVICE_TABLE(isapnp, id_table); static void sym53c416_probe(void) { int *base = probeaddrs; int ints[2]; ints[0] = 1; for(; *base; base++) { if (request_region(*base, IO_RANGE, ID)) { if (sym53c416_test(*base)) { ints[1] = *base; sym53c416_setup(NULL, ints); } release_region(*base, IO_RANGE); } } } int __init sym53c416_detect(struct scsi_host_template *tpnt) { unsigned long flags; struct Scsi_Host * shpnt = NULL; int i; int count; struct pnp_dev *idev = NULL; #ifdef MODULE int ints[3]; ints[0] = 2; if(sym53c416_base[0]) { ints[1] = sym53c416_base[0]; ints[2] = sym53c416_base[1]; sym53c416_setup(NULL, ints); } if(sym53c416_base_1[0]) { ints[1] = sym53c416_base_1[0]; ints[2] = sym53c416_base_1[1]; sym53c416_setup(NULL, ints); } if(sym53c416_base_2[0]) { ints[1] = sym53c416_base_2[0]; ints[2] = sym53c416_base_2[1]; sym53c416_setup(NULL, ints); } if(sym53c416_base_3[0]) { ints[1] = sym53c416_base_3[0]; ints[2] = sym53c416_base_3[1]; sym53c416_setup(NULL, ints); } #endif printk(KERN_INFO "sym53c416.c: %s\n", VERSION_STRING); for (i=0; id_table[i].vendor != 0; i++) { while((idev=pnp_find_dev(NULL, id_table[i].vendor, id_table[i].function, idev))!=NULL) { int i[3]; if(pnp_device_attach(idev)<0) { printk(KERN_WARNING "sym53c416: unable to attach PnP device.\n"); continue; } if(pnp_activate_dev(idev) < 0) { printk(KERN_WARNING "sym53c416: unable to activate PnP device.\n"); pnp_device_detach(idev); continue; } i[0] = 2; i[1] = pnp_port_start(idev, 0); i[2] = pnp_irq(idev, 0); printk(KERN_INFO "sym53c416: ISAPnP card found and configured at 0x%X, IRQ %d.\n", i[1], i[2]); sym53c416_setup(NULL, i); } } sym53c416_probe(); /* Now we register and set up each host adapter found... */ for(count = 0, i = 0; i < host_index; i++) { if (!request_region(hosts[i].base, IO_RANGE, ID)) continue; if (!sym53c416_test(hosts[i].base)) { printk(KERN_WARNING "No sym53c416 found at address 0x%03x\n", hosts[i].base); goto fail_release_region; } /* We don't have an irq yet, so we should probe for one */ if (!hosts[i].irq) hosts[i].irq = sym53c416_probeirq(hosts[i].base, hosts[i].scsi_id); if (!hosts[i].irq) goto fail_release_region; shpnt = scsi_register(tpnt, 0); if (!shpnt) goto fail_release_region; /* Request for specified IRQ */ if (request_irq(hosts[i].irq, sym53c416_intr_handle, 0, ID, shpnt)) goto fail_free_host; spin_lock_irqsave(&sym53c416_lock, flags); shpnt->unique_id = hosts[i].base; shpnt->io_port = hosts[i].base; shpnt->n_io_port = IO_RANGE; shpnt->irq = hosts[i].irq; shpnt->this_id = hosts[i].scsi_id; sym53c416_init(hosts[i].base, hosts[i].scsi_id); count++; spin_unlock_irqrestore(&sym53c416_lock, flags); continue; fail_free_host: scsi_unregister(shpnt); fail_release_region: release_region(hosts[i].base, IO_RANGE); } return count; } const char *sym53c416_info(struct Scsi_Host *SChost) { int i; int base = SChost->io_port; int irq = SChost->irq; int scsi_id = 0; int rev = inb(base + TC_HIGH); for(i = 0; i < host_index; i++) if(hosts[i].base == base) scsi_id = hosts[i].scsi_id; sprintf(info, "Symbios Logic 53c416 (rev. %d) at 0x%03x, irq %d, SCSI-ID %d, %s pio", rev, base, irq, scsi_id, (fastpio)? "fast" : "slow"); return info; } static int sym53c416_queuecommand_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) { int base; unsigned long flags = 0; int i; /* Store base register as we can have more than one controller in the system */ base = SCpnt->device->host->io_port; current_command = SCpnt; /* set current command */ current_command->scsi_done = done; /* set ptr to done function */ current_command->SCp.phase = command_ph; /* currect phase is the command phase */ current_command->SCp.Status = 0; current_command->SCp.Message = 0; spin_lock_irqsave(&sym53c416_lock, flags); outb(scmd_id(SCpnt), base + DEST_BUS_ID); /* Set scsi id target */ outb(FLUSH_FIFO, base + COMMAND_REG); /* Flush SCSI and PIO FIFO's */ /* Write SCSI command into the SCSI fifo */ for(i = 0; i < SCpnt->cmd_len; i++) outb(SCpnt->cmnd[i], base + SCSI_FIFO); /* Start selection sequence */ outb(SEL_WITHOUT_ATN_SEQ, base + COMMAND_REG); /* Now an interrupt will be generated which we will catch in out interrupt routine */ spin_unlock_irqrestore(&sym53c416_lock, flags); return 0; } DEF_SCSI_QCMD(sym53c416_queuecommand) static int sym53c416_host_reset(Scsi_Cmnd *SCpnt) { int base; int scsi_id = -1; int i; unsigned long flags; spin_lock_irqsave(&sym53c416_lock, flags); /* printk("sym53c416_reset\n"); */ base = SCpnt->device->host->io_port; /* search scsi_id - fixme, we shouldn't need to iterate for this! */ for(i = 0; i < host_index && scsi_id == -1; i++) if(hosts[i].base == base) scsi_id = hosts[i].scsi_id; outb(RESET_CHIP, base + COMMAND_REG); outb(NOOP | PIO_MODE, base + COMMAND_REG); outb(RESET_SCSI_BUS, base + COMMAND_REG); sym53c416_init(base, scsi_id); spin_unlock_irqrestore(&sym53c416_lock, flags); return SUCCESS; } static int sym53c416_release(struct Scsi_Host *shost) { if (shost->irq) free_irq(shost->irq, shost); if (shost->io_port && shost->n_io_port) release_region(shost->io_port, shost->n_io_port); return 0; } static int sym53c416_bios_param(struct scsi_device *sdev, struct block_device *dev, sector_t capacity, int *ip) { int size; size = capacity; ip[0] = 64; /* heads */ ip[1] = 32; /* sectors */ if((ip[2] = size >> 11) > 1024) /* cylinders, test for big disk */ { ip[0] = 255; /* heads */ ip[1] = 63; /* sectors */ ip[2] = size / (255 * 63); /* cylinders */ } return 0; } /* Loadable module support */ #ifdef MODULE MODULE_AUTHOR("Lieven Willems"); MODULE_LICENSE("GPL"); module_param_array(sym53c416, uint, NULL, 0); module_param_array(sym53c416_1, uint, NULL, 0); module_param_array(sym53c416_2, uint, NULL, 0); module_param_array(sym53c416_3, uint, NULL, 0); #endif static struct scsi_host_template driver_template = { .proc_name = "sym53c416", .name = "Symbios Logic 53c416", .detect = sym53c416_detect, .info = sym53c416_info, .queuecommand = sym53c416_queuecommand, .eh_host_reset_handler =sym53c416_host_reset, .release = sym53c416_release, .bios_param = sym53c416_bios_param, .can_queue = 1, .this_id = SYM53C416_SCSI_ID, .sg_tablesize = 32, .cmd_per_lun = 1, .unchecked_isa_dma = 1, .use_clustering = ENABLE_CLUSTERING, }; #include "scsi_module.c"
gpl-2.0
MoKee/android_kernel_samsung_hlte
drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
4987
62846
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include "qlge.h" /* Read a NIC register from the alternate function. */ static u32 ql_read_other_func_reg(struct ql_adapter *qdev, u32 reg) { u32 register_to_read; u32 reg_val; unsigned int status = 0; register_to_read = MPI_NIC_REG_BLOCK | MPI_NIC_READ | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) | reg; status = ql_read_mpi_reg(qdev, register_to_read, &reg_val); if (status != 0) return 0xffffffff; return reg_val; } /* Write a NIC register from the alternate function. */ static int ql_write_other_func_reg(struct ql_adapter *qdev, u32 reg, u32 reg_val) { u32 register_to_read; int status = 0; register_to_read = MPI_NIC_REG_BLOCK | MPI_NIC_READ | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) | reg; status = ql_write_mpi_reg(qdev, register_to_read, reg_val); return status; } static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) { u32 temp; int count = 10; while (count) { temp = ql_read_other_func_reg(qdev, reg); /* check for errors */ if (temp & err_bit) return -1; else if (temp & bit) return 0; mdelay(10); count--; } return -1; } static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data) { int status; /* wait for reg to come ready */ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, XG_SERDES_ADDR_RDY, 0); if (status) goto exit; /* set up for reg read */ ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R); /* wait for reg to come ready */ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, XG_SERDES_ADDR_RDY, 0); if (status) goto exit; /* get the data */ *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4)); exit: return status; } /* Read out the SERDES registers */ static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data) { int status; /* wait for reg to come ready */ status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); if (status) goto exit; /* set up for reg read */ ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R); /* wait for reg to come ready */ status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); if (status) goto exit; /* get the data */ *data = ql_read32(qdev, XG_SERDES_DATA); exit: return status; } static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr, u32 *direct_ptr, u32 *indirect_ptr, unsigned int direct_valid, unsigned int indirect_valid) { unsigned int status; status = 1; if (direct_valid) status = ql_read_serdes_reg(qdev, addr, direct_ptr); /* Dead fill any failures or invalids. */ if (status) *direct_ptr = 0xDEADBEEF; status = 1; if (indirect_valid) status = ql_read_other_func_serdes_reg( qdev, addr, indirect_ptr); /* Dead fill any failures or invalids. */ if (status) *indirect_ptr = 0xDEADBEEF; } static int ql_get_serdes_regs(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) { int status; unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid; unsigned int xaui_indirect_valid, i; u32 *direct_ptr, temp; u32 *indirect_ptr; xfi_direct_valid = xfi_indirect_valid = 0; xaui_direct_valid = xaui_indirect_valid = 1; /* The XAUI needs to be read out per port */ if (qdev->func & 1) { /* We are NIC 2 */ status = ql_read_other_func_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp); if (status) temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == XG_SERDES_ADDR_XAUI_PWR_DOWN) xaui_indirect_valid = 0; status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp); if (status) temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == XG_SERDES_ADDR_XAUI_PWR_DOWN) xaui_direct_valid = 0; } else { /* We are NIC 1 */ status = ql_read_other_func_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp); if (status) temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == XG_SERDES_ADDR_XAUI_PWR_DOWN) xaui_indirect_valid = 0; status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp); if (status) temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == XG_SERDES_ADDR_XAUI_PWR_DOWN) xaui_direct_valid = 0; } /* * XFI register is shared so only need to read one * functions and then check the bits. */ status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp); if (status) temp = 0; if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) == XG_SERDES_ADDR_XFI1_PWR_UP) { /* now see if i'm NIC 1 or NIC 2 */ if (qdev->func & 1) /* I'm NIC 2, so the indirect (NIC1) xfi is up. */ xfi_indirect_valid = 1; else xfi_direct_valid = 1; } if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) == XG_SERDES_ADDR_XFI2_PWR_UP) { /* now see if i'm NIC 1 or NIC 2 */ if (qdev->func & 1) /* I'm NIC 2, so the indirect (NIC1) xfi is up. */ xfi_direct_valid = 1; else xfi_indirect_valid = 1; } /* Get XAUI_AN register block. */ if (qdev->func & 1) { /* Function 2 is direct */ direct_ptr = mpi_coredump->serdes2_xaui_an; indirect_ptr = mpi_coredump->serdes_xaui_an; } else { /* Function 1 is direct */ direct_ptr = mpi_coredump->serdes_xaui_an; indirect_ptr = mpi_coredump->serdes2_xaui_an; } for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, xaui_direct_valid, xaui_indirect_valid); /* Get XAUI_HSS_PCS register block. */ if (qdev->func & 1) { direct_ptr = mpi_coredump->serdes2_xaui_hss_pcs; indirect_ptr = mpi_coredump->serdes_xaui_hss_pcs; } else { direct_ptr = mpi_coredump->serdes_xaui_hss_pcs; indirect_ptr = mpi_coredump->serdes2_xaui_hss_pcs; } for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, xaui_direct_valid, xaui_indirect_valid); /* Get XAUI_XFI_AN register block. */ if (qdev->func & 1) { direct_ptr = mpi_coredump->serdes2_xfi_an; indirect_ptr = mpi_coredump->serdes_xfi_an; } else { direct_ptr = mpi_coredump->serdes_xfi_an; indirect_ptr = mpi_coredump->serdes2_xfi_an; } for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, xfi_direct_valid, xfi_indirect_valid); /* Get XAUI_XFI_TRAIN register block. */ if (qdev->func & 1) { direct_ptr = mpi_coredump->serdes2_xfi_train; indirect_ptr = mpi_coredump->serdes_xfi_train; } else { direct_ptr = mpi_coredump->serdes_xfi_train; indirect_ptr = mpi_coredump->serdes2_xfi_train; } for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, xfi_direct_valid, xfi_indirect_valid); /* Get XAUI_XFI_HSS_PCS register block. */ if (qdev->func & 1) { direct_ptr = mpi_coredump->serdes2_xfi_hss_pcs; indirect_ptr = mpi_coredump->serdes_xfi_hss_pcs; } else { direct_ptr = mpi_coredump->serdes_xfi_hss_pcs; indirect_ptr = mpi_coredump->serdes2_xfi_hss_pcs; } for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, xfi_direct_valid, xfi_indirect_valid); /* Get XAUI_XFI_HSS_TX register block. */ if (qdev->func & 1) { direct_ptr = mpi_coredump->serdes2_xfi_hss_tx; indirect_ptr = mpi_coredump->serdes_xfi_hss_tx; } else { direct_ptr = mpi_coredump->serdes_xfi_hss_tx; indirect_ptr = mpi_coredump->serdes2_xfi_hss_tx; } for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, xfi_direct_valid, xfi_indirect_valid); /* Get XAUI_XFI_HSS_RX register block. */ if (qdev->func & 1) { direct_ptr = mpi_coredump->serdes2_xfi_hss_rx; indirect_ptr = mpi_coredump->serdes_xfi_hss_rx; } else { direct_ptr = mpi_coredump->serdes_xfi_hss_rx; indirect_ptr = mpi_coredump->serdes2_xfi_hss_rx; } for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, xfi_direct_valid, xfi_indirect_valid); /* Get XAUI_XFI_HSS_PLL register block. */ if (qdev->func & 1) { direct_ptr = mpi_coredump->serdes2_xfi_hss_pll; indirect_ptr = mpi_coredump->serdes_xfi_hss_pll; } else { direct_ptr = mpi_coredump->serdes_xfi_hss_pll; indirect_ptr = mpi_coredump->serdes2_xfi_hss_pll; } for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++) ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, xfi_direct_valid, xfi_indirect_valid); return 0; } static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data) { int status = 0; /* wait for reg to come ready */ status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); if (status) goto exit; /* set up for reg read */ ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R); /* wait for reg to come ready */ status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); if (status) goto exit; /* get the data */ *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4); exit: return status; } /* Read the 400 xgmac control/statistics registers * skipping unused locations. */ static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf, unsigned int other_function) { int status = 0; int i; for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) { /* We're reading 400 xgmac registers, but we filter out * serveral locations that are non-responsive to reads. */ if ((i == 0x00000114) || (i == 0x00000118) || (i == 0x0000013c) || (i == 0x00000140) || (i > 0x00000150 && i < 0x000001fc) || (i > 0x00000278 && i < 0x000002a0) || (i > 0x000002c0 && i < 0x000002cf) || (i > 0x000002dc && i < 0x000002f0) || (i > 0x000003c8 && i < 0x00000400) || (i > 0x00000400 && i < 0x00000410) || (i > 0x00000410 && i < 0x00000420) || (i > 0x00000420 && i < 0x00000430) || (i > 0x00000430 && i < 0x00000440) || (i > 0x00000440 && i < 0x00000450) || (i > 0x00000450 && i < 0x00000500) || (i > 0x0000054c && i < 0x00000568) || (i > 0x000005c8 && i < 0x00000600)) { if (other_function) status = ql_read_other_func_xgmac_reg(qdev, i, buf); else status = ql_read_xgmac_reg(qdev, i, buf); if (status) *buf = 0xdeadbeef; break; } } return status; } static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf) { int status = 0; int i; for (i = 0; i < 8; i++, buf++) { ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000); *buf = ql_read32(qdev, NIC_ETS); } for (i = 0; i < 2; i++, buf++) { ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000); *buf = ql_read32(qdev, CNA_ETS); } return status; } static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf) { int i; for (i = 0; i < qdev->rx_ring_count; i++, buf++) { ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask); *buf = ql_read32(qdev, INTR_EN); } } static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf) { int i, status; u32 value[3]; status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) return status; for (i = 0; i < 16; i++) { status = ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed read of mac index register\n"); goto err; } *buf++ = value[0]; /* lower MAC address */ *buf++ = value[1]; /* upper MAC address */ *buf++ = value[2]; /* output */ } for (i = 0; i < 32; i++) { status = ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed read of mac index register\n"); goto err; } *buf++ = value[0]; /* lower Mcast address */ *buf++ = value[1]; /* upper Mcast address */ } err: ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); return status; } static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf) { int status; u32 value, i; status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); if (status) return status; for (i = 0; i < 16; i++) { status = ql_get_routing_reg(qdev, i, &value); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed read of routing index register\n"); goto err; } else { *buf++ = value; } } err: ql_sem_unlock(qdev, SEM_RT_IDX_MASK); return status; } /* Read the MPI Processor shadow registers */ static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf) { u32 i; int status; for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) { status = ql_write_mpi_reg(qdev, RISC_124, (SHADOW_OFFSET | i << SHADOW_REG_SHIFT)); if (status) goto end; status = ql_read_mpi_reg(qdev, RISC_127, buf); if (status) goto end; } end: return status; } /* Read the MPI Processor core registers */ static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf, u32 offset, u32 count) { int i, status = 0; for (i = 0; i < count; i++, buf++) { status = ql_read_mpi_reg(qdev, offset + i, buf); if (status) return status; } return status; } /* Read the ASIC probe dump */ static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock, u32 valid, u32 *buf) { u32 module, mux_sel, probe, lo_val, hi_val; for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) { if (!((valid >> module) & 1)) continue; for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) { probe = clock | PRB_MX_ADDR_ARE | mux_sel | (module << PRB_MX_ADDR_MOD_SEL_SHIFT); ql_write32(qdev, PRB_MX_ADDR, probe); lo_val = ql_read32(qdev, PRB_MX_DATA); if (mux_sel == 0) { *buf = probe; buf++; } probe |= PRB_MX_ADDR_UP; ql_write32(qdev, PRB_MX_ADDR, probe); hi_val = ql_read32(qdev, PRB_MX_DATA); *buf = lo_val; buf++; *buf = hi_val; buf++; } } return buf; } static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf) { /* First we have to enable the probe mux */ ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN); buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK, PRB_MX_ADDR_VALID_SYS_MOD, buf); buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK, PRB_MX_ADDR_VALID_PCI_MOD, buf); buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK, PRB_MX_ADDR_VALID_XGM_MOD, buf); buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK, PRB_MX_ADDR_VALID_FC_MOD, buf); return 0; } /* Read out the routing index registers */ static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf) { int status; u32 type, index, index_max; u32 result_index; u32 result_data; u32 val; status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); if (status) return status; for (type = 0; type < 4; type++) { if (type < 2) index_max = 8; else index_max = 16; for (index = 0; index < index_max; index++) { val = RT_IDX_RS | (type << RT_IDX_TYPE_SHIFT) | (index << RT_IDX_IDX_SHIFT); ql_write32(qdev, RT_IDX, val); result_index = 0; while ((result_index & RT_IDX_MR) == 0) result_index = ql_read32(qdev, RT_IDX); result_data = ql_read32(qdev, RT_DATA); *buf = type; buf++; *buf = index; buf++; *buf = result_index; buf++; *buf = result_data; buf++; } } ql_sem_unlock(qdev, SEM_RT_IDX_MASK); return status; } /* Read out the MAC protocol registers */ static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf) { u32 result_index, result_data; u32 type; u32 index; u32 offset; u32 val; u32 initial_val = MAC_ADDR_RS; u32 max_index; u32 max_offset; for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) { switch (type) { case 0: /* CAM */ initial_val |= MAC_ADDR_ADR; max_index = MAC_ADDR_MAX_CAM_ENTRIES; max_offset = MAC_ADDR_MAX_CAM_WCOUNT; break; case 1: /* Multicast MAC Address */ max_index = MAC_ADDR_MAX_CAM_WCOUNT; max_offset = MAC_ADDR_MAX_CAM_WCOUNT; break; case 2: /* VLAN filter mask */ case 3: /* MC filter mask */ max_index = MAC_ADDR_MAX_CAM_WCOUNT; max_offset = MAC_ADDR_MAX_CAM_WCOUNT; break; case 4: /* FC MAC addresses */ max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES; max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT; break; case 5: /* Mgmt MAC addresses */ max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES; max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT; break; case 6: /* Mgmt VLAN addresses */ max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES; max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT; break; case 7: /* Mgmt IPv4 address */ max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES; max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT; break; case 8: /* Mgmt IPv6 address */ max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES; max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT; break; case 9: /* Mgmt TCP/UDP Dest port */ max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES; max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT; break; default: pr_err("Bad type!!! 0x%08x\n", type); max_index = 0; max_offset = 0; break; } for (index = 0; index < max_index; index++) { for (offset = 0; offset < max_offset; offset++) { val = initial_val | (type << MAC_ADDR_TYPE_SHIFT) | (index << MAC_ADDR_IDX_SHIFT) | (offset); ql_write32(qdev, MAC_ADDR_IDX, val); result_index = 0; while ((result_index & MAC_ADDR_MR) == 0) { result_index = ql_read32(qdev, MAC_ADDR_IDX); } result_data = ql_read32(qdev, MAC_ADDR_DATA); *buf = result_index; buf++; *buf = result_data; buf++; } } } } static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf) { u32 func_num, reg, reg_val; int status; for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) { reg = MPI_NIC_REG_BLOCK | (func_num << MPI_NIC_FUNCTION_SHIFT) | (SEM / 4); status = ql_read_mpi_reg(qdev, reg, &reg_val); *buf = reg_val; /* if the read failed then dead fill the element. */ if (!status) *buf = 0xdeadbeef; buf++; } } /* Create a coredump segment header */ static void ql_build_coredump_seg_header( struct mpi_coredump_segment_header *seg_hdr, u32 seg_number, u32 seg_size, u8 *desc) { memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header)); seg_hdr->cookie = MPI_COREDUMP_COOKIE; seg_hdr->segNum = seg_number; seg_hdr->segSize = seg_size; memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); } /* * This function should be called when a coredump / probedump * is to be extracted from the HBA. It is assumed there is a * qdev structure that contains the base address of the register * space for this function as well as a coredump structure that * will contain the dump. */ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) { int status; int i; if (!mpi_coredump) { netif_err(qdev, drv, qdev->ndev, "No memory available\n"); return -ENOMEM; } /* Try to get the spinlock, but dont worry if * it isn't available. If the firmware died it * might be holding the sem. */ ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); status = ql_pause_mpi_risc(qdev); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed RISC pause. Status = 0x%.08x\n", status); goto err; } /* Insert the global header */ memset(&(mpi_coredump->mpi_global_header), 0, sizeof(struct mpi_coredump_global_header)); mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; mpi_coredump->mpi_global_header.headerSize = sizeof(struct mpi_coredump_global_header); mpi_coredump->mpi_global_header.imageSize = sizeof(struct ql_mpi_coredump); memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", sizeof(mpi_coredump->mpi_global_header.idString)); /* Get generic NIC reg dump */ ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, NIC1_CONTROL_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic_regs), "NIC1 Registers"); ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr, NIC2_CONTROL_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic2_regs), "NIC2 Registers"); /* Get XGMac registers. (Segment 18, Rev C. step 21) */ ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr, NIC1_XGMAC_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers"); ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr, NIC2_XGMAC_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers"); if (qdev->func & 1) { /* Odd means our function is NIC 2 */ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) mpi_coredump->nic2_regs[i] = ql_read32(qdev, i * sizeof(u32)); for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) mpi_coredump->nic_regs[i] = ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0); ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1); } else { /* Even means our function is NIC 1 */ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32)); for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) mpi_coredump->nic2_regs[i] = ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0); ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1); } /* Rev C. Step 20a */ ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr, XAUI_AN_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xaui_an), "XAUI AN Registers"); /* Rev C. Step 20b */ ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr, XAUI_HSS_PCS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xaui_hss_pcs), "XAUI HSS PCS Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xfi_an), "XFI AN Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr, XFI_TRAIN_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xfi_train), "XFI TRAIN Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr, XFI_HSS_PCS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xfi_hss_pcs), "XFI HSS PCS Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr, XFI_HSS_TX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xfi_hss_tx), "XFI HSS TX Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr, XFI_HSS_RX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xfi_hss_rx), "XFI HSS RX Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr, XFI_HSS_PLL_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes_xfi_hss_pll), "XFI HSS PLL Registers"); ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr, XAUI2_AN_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xaui_an), "XAUI2 AN Registers"); ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr, XAUI2_HSS_PCS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xaui_hss_pcs), "XAUI2 HSS PCS Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr, XFI2_AN_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xfi_an), "XFI2 AN Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr, XFI2_TRAIN_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xfi_train), "XFI2 TRAIN Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr, XFI2_HSS_PCS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xfi_hss_pcs), "XFI2 HSS PCS Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr, XFI2_HSS_TX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xfi_hss_tx), "XFI2 HSS TX Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr, XFI2_HSS_RX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xfi_hss_rx), "XFI2 HSS RX Registers"); ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr, XFI2_HSS_PLL_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->serdes2_xfi_hss_pll), "XFI2 HSS PLL Registers"); status = ql_get_serdes_regs(qdev, mpi_coredump); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed Dump of Serdes Registers. Status = 0x%.08x\n", status); goto err; } ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr, CORE_SEG_NUM, sizeof(mpi_coredump->core_regs_seg_hdr) + sizeof(mpi_coredump->mpi_core_regs) + sizeof(mpi_coredump->mpi_core_sh_regs), "Core Registers"); /* Get the MPI Core Registers */ status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0], MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT); if (status) goto err; /* Get the 16 MPI shadow registers */ status = ql_get_mpi_shadow_regs(qdev, &mpi_coredump->mpi_core_sh_regs[0]); if (status) goto err; /* Get the Test Logic Registers */ ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr, TEST_LOGIC_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->test_logic_regs), "Test Logic Regs"); status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0], TEST_REGS_ADDR, TEST_REGS_CNT); if (status) goto err; /* Get the RMII Registers */ ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr, RMII_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->rmii_regs), "RMII Registers"); status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0], RMII_REGS_ADDR, RMII_REGS_CNT); if (status) goto err; /* Get the FCMAC1 Registers */ ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr, FCMAC1_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->fcmac1_regs), "FCMAC1 Registers"); status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0], FCMAC1_REGS_ADDR, FCMAC_REGS_CNT); if (status) goto err; /* Get the FCMAC2 Registers */ ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr, FCMAC2_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->fcmac2_regs), "FCMAC2 Registers"); status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0], FCMAC2_REGS_ADDR, FCMAC_REGS_CNT); if (status) goto err; /* Get the FC1 MBX Registers */ ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr, FC1_MBOX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->fc1_mbx_regs), "FC1 MBox Regs"); status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0], FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT); if (status) goto err; /* Get the IDE Registers */ ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr, IDE_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->ide_regs), "IDE Registers"); status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0], IDE_REGS_ADDR, IDE_REGS_CNT); if (status) goto err; /* Get the NIC1 MBX Registers */ ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr, NIC1_MBOX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic1_mbx_regs), "NIC1 MBox Regs"); status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0], NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); if (status) goto err; /* Get the SMBus Registers */ ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr, SMBUS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->smbus_regs), "SMBus Registers"); status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0], SMBUS_REGS_ADDR, SMBUS_REGS_CNT); if (status) goto err; /* Get the FC2 MBX Registers */ ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr, FC2_MBOX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->fc2_mbx_regs), "FC2 MBox Regs"); status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0], FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT); if (status) goto err; /* Get the NIC2 MBX Registers */ ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr, NIC2_MBOX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic2_mbx_regs), "NIC2 MBox Regs"); status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0], NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); if (status) goto err; /* Get the I2C Registers */ ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr, I2C_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->i2c_regs), "I2C Registers"); status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0], I2C_REGS_ADDR, I2C_REGS_CNT); if (status) goto err; /* Get the MEMC Registers */ ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr, MEMC_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->memc_regs), "MEMC Registers"); status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0], MEMC_REGS_ADDR, MEMC_REGS_CNT); if (status) goto err; /* Get the PBus Registers */ ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr, PBUS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->pbus_regs), "PBUS Registers"); status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0], PBUS_REGS_ADDR, PBUS_REGS_CNT); if (status) goto err; /* Get the MDE Registers */ ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr, MDE_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->mde_regs), "MDE Registers"); status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0], MDE_REGS_ADDR, MDE_REGS_CNT); if (status) goto err; ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, MISC_NIC_INFO_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->misc_nic_info), "MISC NIC INFO"); mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; mpi_coredump->misc_nic_info.function = qdev->func; /* Segment 31 */ /* Get indexed register values. */ ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, INTR_STATES_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->intr_states), "INTR States"); ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, CAM_ENTRIES_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->cam_entries), "CAM Entries"); status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); if (status) goto err; ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, ROUTING_WORDS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic_routing_words), "Routing Words"); status = ql_get_routing_entries(qdev, &mpi_coredump->nic_routing_words[0]); if (status) goto err; /* Segment 34 (Rev C. step 23) */ ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, ETS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->ets), "ETS Registers"); status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); if (status) goto err; ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr, PROBE_DUMP_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->probe_dump), "Probe Dump"); ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]); ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr, ROUTING_INDEX_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->routing_regs), "Routing Regs"); status = ql_get_routing_index_registers(qdev, &mpi_coredump->routing_regs[0]); if (status) goto err; ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr, MAC_PROTOCOL_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->mac_prot_regs), "MAC Prot Regs"); ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]); /* Get the semaphore registers for all 5 functions */ ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr, SEM_REGS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->sem_regs), "Sem Registers"); ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]); /* Prevent the mpi restarting while we dump the memory.*/ ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC); /* clear the pause */ status = ql_unpause_mpi_risc(qdev); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed RISC unpause. Status = 0x%.08x\n", status); goto err; } /* Reset the RISC so we can dump RAM */ status = ql_hard_reset_mpi_risc(qdev); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed RISC reset. Status = 0x%.08x\n", status); goto err; } ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr, WCS_RAM_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->code_ram), "WCS RAM"); status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0], CODE_RAM_ADDR, CODE_RAM_CNT); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed Dump of CODE RAM. Status = 0x%.08x\n", status); goto err; } /* Insert the segment header */ ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr, MEMC_RAM_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->memc_ram), "MEMC RAM"); status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0], MEMC_RAM_ADDR, MEMC_RAM_CNT); if (status) { netif_err(qdev, drv, qdev->ndev, "Failed Dump of MEMC RAM. Status = 0x%.08x\n", status); goto err; } err: ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ return status; } static void ql_get_core_dump(struct ql_adapter *qdev) { if (!ql_own_firmware(qdev)) { netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); return; } if (!netif_running(qdev->ndev)) { netif_err(qdev, ifup, qdev->ndev, "Force Coredump can only be done from interface that is up\n"); return; } ql_queue_fw_error(qdev); } void ql_gen_reg_dump(struct ql_adapter *qdev, struct ql_reg_dump *mpi_coredump) { int i, status; memset(&(mpi_coredump->mpi_global_header), 0, sizeof(struct mpi_coredump_global_header)); mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; mpi_coredump->mpi_global_header.headerSize = sizeof(struct mpi_coredump_global_header); mpi_coredump->mpi_global_header.imageSize = sizeof(struct ql_reg_dump); memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", sizeof(mpi_coredump->mpi_global_header.idString)); /* segment 16 */ ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, MISC_NIC_INFO_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->misc_nic_info), "MISC NIC INFO"); mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; mpi_coredump->misc_nic_info.function = qdev->func; /* Segment 16, Rev C. Step 18 */ ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, NIC1_CONTROL_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic_regs), "NIC Registers"); /* Get generic reg dump */ for (i = 0; i < 64; i++) mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32)); /* Segment 31 */ /* Get indexed register values. */ ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, INTR_STATES_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->intr_states), "INTR States"); ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, CAM_ENTRIES_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->cam_entries), "CAM Entries"); status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); if (status) return; ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, ROUTING_WORDS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->nic_routing_words), "Routing Words"); status = ql_get_routing_entries(qdev, &mpi_coredump->nic_routing_words[0]); if (status) return; /* Segment 34 (Rev C. step 23) */ ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, ETS_SEG_NUM, sizeof(struct mpi_coredump_segment_header) + sizeof(mpi_coredump->ets), "ETS Registers"); status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); if (status) return; } void ql_get_dump(struct ql_adapter *qdev, void *buff) { /* * If the dump has already been taken and is stored * in our internal buffer and if force dump is set then * just start the spool to dump it to the log file * and also, take a snapshot of the general regs to * to the user's buffer or else take complete dump * to the user's buffer if force is not set. */ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) { if (!ql_core_dump(qdev, buff)) ql_soft_reset_mpi_risc(qdev); else netif_err(qdev, drv, qdev->ndev, "coredump failed!\n"); } else { ql_gen_reg_dump(qdev, buff); ql_get_core_dump(qdev); } } /* Coredump to messages log file using separate worker thread */ void ql_mpi_core_to_log(struct work_struct *work) { struct ql_adapter *qdev = container_of(work, struct ql_adapter, mpi_core_to_log.work); u32 *tmp, count; int i; count = sizeof(struct ql_mpi_coredump) / sizeof(u32); tmp = (u32 *)qdev->mpi_coredump; netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, "Core is dumping to log file!\n"); for (i = 0; i < count; i += 8) { pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x " "%.08x %.08x %.08x\n", i, tmp[i + 0], tmp[i + 1], tmp[i + 2], tmp[i + 3], tmp[i + 4], tmp[i + 5], tmp[i + 6], tmp[i + 7]); msleep(5); } } #ifdef QL_REG_DUMP static void ql_dump_intr_states(struct ql_adapter *qdev) { int i; u32 value; for (i = 0; i < qdev->intr_count; i++) { ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask); value = ql_read32(qdev, INTR_EN); pr_err("%s: Interrupt %d is %s\n", qdev->ndev->name, i, (value & INTR_EN_EN ? "enabled" : "disabled")); } } #define DUMP_XGMAC(qdev, reg) \ do { \ u32 data; \ ql_read_xgmac_reg(qdev, reg, &data); \ pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \ } while (0) void ql_dump_xgmac_control_regs(struct ql_adapter *qdev) { if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { pr_err("%s: Couldn't get xgmac sem\n", __func__); return; } DUMP_XGMAC(qdev, PAUSE_SRC_LO); DUMP_XGMAC(qdev, PAUSE_SRC_HI); DUMP_XGMAC(qdev, GLOBAL_CFG); DUMP_XGMAC(qdev, TX_CFG); DUMP_XGMAC(qdev, RX_CFG); DUMP_XGMAC(qdev, FLOW_CTL); DUMP_XGMAC(qdev, PAUSE_OPCODE); DUMP_XGMAC(qdev, PAUSE_TIMER); DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO); DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI); DUMP_XGMAC(qdev, MAC_TX_PARAMS); DUMP_XGMAC(qdev, MAC_RX_PARAMS); DUMP_XGMAC(qdev, MAC_SYS_INT); DUMP_XGMAC(qdev, MAC_SYS_INT_MASK); DUMP_XGMAC(qdev, MAC_MGMT_INT); DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK); DUMP_XGMAC(qdev, EXT_ARB_MODE); ql_sem_unlock(qdev, qdev->xg_sem_mask); } static void ql_dump_ets_regs(struct ql_adapter *qdev) { } static void ql_dump_cam_entries(struct ql_adapter *qdev) { int i; u32 value[3]; i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (i) return; for (i = 0; i < 4; i++) { if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) { pr_err("%s: Failed read of mac index register\n", __func__); return; } else { if (value[0]) pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n", qdev->ndev->name, i, value[1], value[0], value[2]); } } for (i = 0; i < 32; i++) { if (ql_get_mac_addr_reg (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) { pr_err("%s: Failed read of mac index register\n", __func__); return; } else { if (value[0]) pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n", qdev->ndev->name, i, value[1], value[0]); } } ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); } void ql_dump_routing_entries(struct ql_adapter *qdev) { int i; u32 value; i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); if (i) return; for (i = 0; i < 16; i++) { value = 0; if (ql_get_routing_reg(qdev, i, &value)) { pr_err("%s: Failed read of routing index register\n", __func__); return; } else { if (value) pr_err("%s: Routing Mask %d = 0x%.08x\n", qdev->ndev->name, i, value); } } ql_sem_unlock(qdev, SEM_RT_IDX_MASK); } #define DUMP_REG(qdev, reg) \ pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg)) void ql_dump_regs(struct ql_adapter *qdev) { pr_err("reg dump for function #%d\n", qdev->func); DUMP_REG(qdev, SYS); DUMP_REG(qdev, RST_FO); DUMP_REG(qdev, FSC); DUMP_REG(qdev, CSR); DUMP_REG(qdev, ICB_RID); DUMP_REG(qdev, ICB_L); DUMP_REG(qdev, ICB_H); DUMP_REG(qdev, CFG); DUMP_REG(qdev, BIOS_ADDR); DUMP_REG(qdev, STS); DUMP_REG(qdev, INTR_EN); DUMP_REG(qdev, INTR_MASK); DUMP_REG(qdev, ISR1); DUMP_REG(qdev, ISR2); DUMP_REG(qdev, ISR3); DUMP_REG(qdev, ISR4); DUMP_REG(qdev, REV_ID); DUMP_REG(qdev, FRC_ECC_ERR); DUMP_REG(qdev, ERR_STS); DUMP_REG(qdev, RAM_DBG_ADDR); DUMP_REG(qdev, RAM_DBG_DATA); DUMP_REG(qdev, ECC_ERR_CNT); DUMP_REG(qdev, SEM); DUMP_REG(qdev, GPIO_1); DUMP_REG(qdev, GPIO_2); DUMP_REG(qdev, GPIO_3); DUMP_REG(qdev, XGMAC_ADDR); DUMP_REG(qdev, XGMAC_DATA); DUMP_REG(qdev, NIC_ETS); DUMP_REG(qdev, CNA_ETS); DUMP_REG(qdev, FLASH_ADDR); DUMP_REG(qdev, FLASH_DATA); DUMP_REG(qdev, CQ_STOP); DUMP_REG(qdev, PAGE_TBL_RID); DUMP_REG(qdev, WQ_PAGE_TBL_LO); DUMP_REG(qdev, WQ_PAGE_TBL_HI); DUMP_REG(qdev, CQ_PAGE_TBL_LO); DUMP_REG(qdev, CQ_PAGE_TBL_HI); DUMP_REG(qdev, COS_DFLT_CQ1); DUMP_REG(qdev, COS_DFLT_CQ2); DUMP_REG(qdev, SPLT_HDR); DUMP_REG(qdev, FC_PAUSE_THRES); DUMP_REG(qdev, NIC_PAUSE_THRES); DUMP_REG(qdev, FC_ETHERTYPE); DUMP_REG(qdev, FC_RCV_CFG); DUMP_REG(qdev, NIC_RCV_CFG); DUMP_REG(qdev, FC_COS_TAGS); DUMP_REG(qdev, NIC_COS_TAGS); DUMP_REG(qdev, MGMT_RCV_CFG); DUMP_REG(qdev, XG_SERDES_ADDR); DUMP_REG(qdev, XG_SERDES_DATA); DUMP_REG(qdev, PRB_MX_ADDR); DUMP_REG(qdev, PRB_MX_DATA); ql_dump_intr_states(qdev); ql_dump_xgmac_control_regs(qdev); ql_dump_ets_regs(qdev); ql_dump_cam_entries(qdev); ql_dump_routing_entries(qdev); } #endif #ifdef QL_STAT_DUMP #define DUMP_STAT(qdev, stat) \ pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat) void ql_dump_stat(struct ql_adapter *qdev) { pr_err("%s: Enter\n", __func__); DUMP_STAT(qdev, tx_pkts); DUMP_STAT(qdev, tx_bytes); DUMP_STAT(qdev, tx_mcast_pkts); DUMP_STAT(qdev, tx_bcast_pkts); DUMP_STAT(qdev, tx_ucast_pkts); DUMP_STAT(qdev, tx_ctl_pkts); DUMP_STAT(qdev, tx_pause_pkts); DUMP_STAT(qdev, tx_64_pkt); DUMP_STAT(qdev, tx_65_to_127_pkt); DUMP_STAT(qdev, tx_128_to_255_pkt); DUMP_STAT(qdev, tx_256_511_pkt); DUMP_STAT(qdev, tx_512_to_1023_pkt); DUMP_STAT(qdev, tx_1024_to_1518_pkt); DUMP_STAT(qdev, tx_1519_to_max_pkt); DUMP_STAT(qdev, tx_undersize_pkt); DUMP_STAT(qdev, tx_oversize_pkt); DUMP_STAT(qdev, rx_bytes); DUMP_STAT(qdev, rx_bytes_ok); DUMP_STAT(qdev, rx_pkts); DUMP_STAT(qdev, rx_pkts_ok); DUMP_STAT(qdev, rx_bcast_pkts); DUMP_STAT(qdev, rx_mcast_pkts); DUMP_STAT(qdev, rx_ucast_pkts); DUMP_STAT(qdev, rx_undersize_pkts); DUMP_STAT(qdev, rx_oversize_pkts); DUMP_STAT(qdev, rx_jabber_pkts); DUMP_STAT(qdev, rx_undersize_fcerr_pkts); DUMP_STAT(qdev, rx_drop_events); DUMP_STAT(qdev, rx_fcerr_pkts); DUMP_STAT(qdev, rx_align_err); DUMP_STAT(qdev, rx_symbol_err); DUMP_STAT(qdev, rx_mac_err); DUMP_STAT(qdev, rx_ctl_pkts); DUMP_STAT(qdev, rx_pause_pkts); DUMP_STAT(qdev, rx_64_pkts); DUMP_STAT(qdev, rx_65_to_127_pkts); DUMP_STAT(qdev, rx_128_255_pkts); DUMP_STAT(qdev, rx_256_511_pkts); DUMP_STAT(qdev, rx_512_to_1023_pkts); DUMP_STAT(qdev, rx_1024_to_1518_pkts); DUMP_STAT(qdev, rx_1519_to_max_pkts); DUMP_STAT(qdev, rx_len_err_pkts); }; #endif #ifdef QL_DEV_DUMP #define DUMP_QDEV_FIELD(qdev, type, field) \ pr_err("qdev->%-24s = " type "\n", #field, qdev->field) #define DUMP_QDEV_DMA_FIELD(qdev, field) \ pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field) #define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \ pr_err("%s[%d].%s = " type "\n", \ #array, index, #field, qdev->array[index].field); void ql_dump_qdev(struct ql_adapter *qdev) { int i; DUMP_QDEV_FIELD(qdev, "%lx", flags); DUMP_QDEV_FIELD(qdev, "%p", vlgrp); DUMP_QDEV_FIELD(qdev, "%p", pdev); DUMP_QDEV_FIELD(qdev, "%p", ndev); DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id); DUMP_QDEV_FIELD(qdev, "%p", reg_base); DUMP_QDEV_FIELD(qdev, "%p", doorbell_area); DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size); DUMP_QDEV_FIELD(qdev, "%x", msg_enable); DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area); DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma); DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area); DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma); DUMP_QDEV_FIELD(qdev, "%d", intr_count); if (qdev->msi_x_entry) for (i = 0; i < qdev->intr_count; i++) { DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector); DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry); } for (i = 0; i < qdev->intr_count; i++) { DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev); DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr); DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked); DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask); DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask); DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask); } DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count); DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count); DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size); DUMP_QDEV_FIELD(qdev, "%p", ring_mem); DUMP_QDEV_FIELD(qdev, "%d", intr_count); DUMP_QDEV_FIELD(qdev, "%p", tx_ring); DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count); DUMP_QDEV_FIELD(qdev, "%p", rx_ring); DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue); DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask); DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up); DUMP_QDEV_FIELD(qdev, "0x%08x", port_init); } #endif #ifdef QL_CB_DUMP void ql_dump_wqicb(struct wqicb *wqicb) { pr_err("Dumping wqicb stuff...\n"); pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len)); pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags)); pr_err("wqicb->cq_id_rss = %d\n", le16_to_cpu(wqicb->cq_id_rss)); pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid)); pr_err("wqicb->wq_addr = 0x%llx\n", (unsigned long long) le64_to_cpu(wqicb->addr)); pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n", (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr)); } void ql_dump_tx_ring(struct tx_ring *tx_ring) { if (tx_ring == NULL) return; pr_err("===================== Dumping tx_ring %d ===============\n", tx_ring->wq_id); pr_err("tx_ring->base = %p\n", tx_ring->wq_base); pr_err("tx_ring->base_dma = 0x%llx\n", (unsigned long long) tx_ring->wq_base_dma); pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n", tx_ring->cnsmr_idx_sh_reg, tx_ring->cnsmr_idx_sh_reg ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0); pr_err("tx_ring->size = %d\n", tx_ring->wq_size); pr_err("tx_ring->len = %d\n", tx_ring->wq_len); pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg); pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg); pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx); pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id); pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id); pr_err("tx_ring->q = %p\n", tx_ring->q); pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count)); } void ql_dump_ricb(struct ricb *ricb) { int i; pr_err("===================== Dumping ricb ===============\n"); pr_err("Dumping ricb stuff...\n"); pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f); pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n", ricb->base_cq & RSS_L4K ? "RSS_L4K " : "", ricb->flags & RSS_L6K ? "RSS_L6K " : "", ricb->flags & RSS_LI ? "RSS_LI " : "", ricb->flags & RSS_LB ? "RSS_LB " : "", ricb->flags & RSS_LM ? "RSS_LM " : "", ricb->flags & RSS_RI4 ? "RSS_RI4 " : "", ricb->flags & RSS_RT4 ? "RSS_RT4 " : "", ricb->flags & RSS_RI6 ? "RSS_RI6 " : "", ricb->flags & RSS_RT6 ? "RSS_RT6 " : ""); pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask)); for (i = 0; i < 16; i++) pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i, le32_to_cpu(ricb->hash_cq_id[i])); for (i = 0; i < 10; i++) pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i, le32_to_cpu(ricb->ipv6_hash_key[i])); for (i = 0; i < 4; i++) pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i, le32_to_cpu(ricb->ipv4_hash_key[i])); } void ql_dump_cqicb(struct cqicb *cqicb) { pr_err("Dumping cqicb stuff...\n"); pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect); pr_err("cqicb->flags = %x\n", cqicb->flags); pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len)); pr_err("cqicb->addr = 0x%llx\n", (unsigned long long) le64_to_cpu(cqicb->addr)); pr_err("cqicb->prod_idx_addr = 0x%llx\n", (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr)); pr_err("cqicb->pkt_delay = 0x%.04x\n", le16_to_cpu(cqicb->pkt_delay)); pr_err("cqicb->irq_delay = 0x%.04x\n", le16_to_cpu(cqicb->irq_delay)); pr_err("cqicb->lbq_addr = 0x%llx\n", (unsigned long long) le64_to_cpu(cqicb->lbq_addr)); pr_err("cqicb->lbq_buf_size = 0x%.04x\n", le16_to_cpu(cqicb->lbq_buf_size)); pr_err("cqicb->lbq_len = 0x%.04x\n", le16_to_cpu(cqicb->lbq_len)); pr_err("cqicb->sbq_addr = 0x%llx\n", (unsigned long long) le64_to_cpu(cqicb->sbq_addr)); pr_err("cqicb->sbq_buf_size = 0x%.04x\n", le16_to_cpu(cqicb->sbq_buf_size)); pr_err("cqicb->sbq_len = 0x%.04x\n", le16_to_cpu(cqicb->sbq_len)); } void ql_dump_rx_ring(struct rx_ring *rx_ring) { if (rx_ring == NULL) return; pr_err("===================== Dumping rx_ring %d ===============\n", rx_ring->cq_id); pr_err("Dumping rx_ring %d, type = %s%s%s\n", rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "", rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "", rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : ""); pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb); pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base); pr_err("rx_ring->cq_base_dma = %llx\n", (unsigned long long) rx_ring->cq_base_dma); pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size); pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len); pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n", rx_ring->prod_idx_sh_reg, rx_ring->prod_idx_sh_reg ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0); pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n", (unsigned long long) rx_ring->prod_idx_sh_reg_dma); pr_err("rx_ring->cnsmr_idx_db_reg = %p\n", rx_ring->cnsmr_idx_db_reg); pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx); pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry); pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg); pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base); pr_err("rx_ring->lbq_base_dma = %llx\n", (unsigned long long) rx_ring->lbq_base_dma); pr_err("rx_ring->lbq_base_indirect = %p\n", rx_ring->lbq_base_indirect); pr_err("rx_ring->lbq_base_indirect_dma = %llx\n", (unsigned long long) rx_ring->lbq_base_indirect_dma); pr_err("rx_ring->lbq = %p\n", rx_ring->lbq); pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len); pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size); pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n", rx_ring->lbq_prod_idx_db_reg); pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx); pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx); pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx); pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt); pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size); pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base); pr_err("rx_ring->sbq_base_dma = %llx\n", (unsigned long long) rx_ring->sbq_base_dma); pr_err("rx_ring->sbq_base_indirect = %p\n", rx_ring->sbq_base_indirect); pr_err("rx_ring->sbq_base_indirect_dma = %llx\n", (unsigned long long) rx_ring->sbq_base_indirect_dma); pr_err("rx_ring->sbq = %p\n", rx_ring->sbq); pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len); pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size); pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n", rx_ring->sbq_prod_idx_db_reg); pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx); pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx); pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx); pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt); pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size); pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id); pr_err("rx_ring->irq = %d\n", rx_ring->irq); pr_err("rx_ring->cpu = %d\n", rx_ring->cpu); pr_err("rx_ring->qdev = %p\n", rx_ring->qdev); } void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) { void *ptr; pr_err("%s: Enter\n", __func__); ptr = kmalloc(size, GFP_ATOMIC); if (ptr == NULL) return; if (ql_write_cfg(qdev, ptr, size, bit, q_id)) { pr_err("%s: Failed to upload control block!\n", __func__); goto fail_it; } switch (bit) { case CFG_DRQ: ql_dump_wqicb((struct wqicb *)ptr); break; case CFG_DCQ: ql_dump_cqicb((struct cqicb *)ptr); break; case CFG_DR: ql_dump_ricb((struct ricb *)ptr); break; default: pr_err("%s: Invalid bit value = %x\n", __func__, bit); break; } fail_it: kfree(ptr); } #endif #ifdef QL_OB_DUMP void ql_dump_tx_desc(struct tx_buf_desc *tbd) { pr_err("tbd->addr = 0x%llx\n", le64_to_cpu((u64) tbd->addr)); pr_err("tbd->len = %d\n", le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); pr_err("tbd->flags = %s %s\n", tbd->len & TX_DESC_C ? "C" : ".", tbd->len & TX_DESC_E ? "E" : "."); tbd++; pr_err("tbd->addr = 0x%llx\n", le64_to_cpu((u64) tbd->addr)); pr_err("tbd->len = %d\n", le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); pr_err("tbd->flags = %s %s\n", tbd->len & TX_DESC_C ? "C" : ".", tbd->len & TX_DESC_E ? "E" : "."); tbd++; pr_err("tbd->addr = 0x%llx\n", le64_to_cpu((u64) tbd->addr)); pr_err("tbd->len = %d\n", le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); pr_err("tbd->flags = %s %s\n", tbd->len & TX_DESC_C ? "C" : ".", tbd->len & TX_DESC_E ? "E" : "."); } void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb) { struct ob_mac_tso_iocb_req *ob_mac_tso_iocb = (struct ob_mac_tso_iocb_req *)ob_mac_iocb; struct tx_buf_desc *tbd; u16 frame_len; pr_err("%s\n", __func__); pr_err("opcode = %s\n", (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO"); pr_err("flags1 = %s %s %s %s %s\n", ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "", ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "", ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "", ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "", ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : ""); pr_err("flags2 = %s %s %s\n", ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "", ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "", ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : ""); pr_err("flags3 = %s %s %s\n", ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "", ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "", ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : ""); pr_err("tid = %x\n", ob_mac_iocb->tid); pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx); pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci); if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) { pr_err("frame_len = %d\n", le32_to_cpu(ob_mac_tso_iocb->frame_len)); pr_err("mss = %d\n", le16_to_cpu(ob_mac_tso_iocb->mss)); pr_err("prot_hdr_len = %d\n", le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len)); pr_err("hdr_offset = 0x%.04x\n", le16_to_cpu(ob_mac_tso_iocb->net_trans_offset)); frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len); } else { pr_err("frame_len = %d\n", le16_to_cpu(ob_mac_iocb->frame_len)); frame_len = le16_to_cpu(ob_mac_iocb->frame_len); } tbd = &ob_mac_iocb->tbd[0]; ql_dump_tx_desc(tbd); } void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp) { pr_err("%s\n", __func__); pr_err("opcode = %d\n", ob_mac_rsp->opcode); pr_err("flags = %s %s %s %s %s %s %s\n", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".", ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : "."); pr_err("tid = %x\n", ob_mac_rsp->tid); } #endif #ifdef QL_IB_DUMP void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) { pr_err("%s\n", __func__); pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode); pr_err("flags1 = %s%s%s%s%s%s\n", ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "", ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "", ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "", ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "", ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "", ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : ""); if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) pr_err("%s%s%s Multicast\n", (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); pr_err("flags2 = %s%s%s%s%s\n", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : ""); if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) pr_err("%s%s%s%s%s error\n", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "", (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : ""); pr_err("flags3 = %s%s\n", ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "", ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : ""); if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) pr_err("RSS flags = %s%s%s%s\n", ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "", ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "", ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "", ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : ""); pr_err("data_len = %d\n", le32_to_cpu(ib_mac_rsp->data_len)); pr_err("data_addr = 0x%llx\n", (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr)); if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) pr_err("rss = %x\n", le32_to_cpu(ib_mac_rsp->rss)); if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) pr_err("vlan_id = %x\n", le16_to_cpu(ib_mac_rsp->vlan_id)); pr_err("flags4 = %s%s%s\n", ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "", ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "", ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : ""); if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { pr_err("hdr length = %d\n", le32_to_cpu(ib_mac_rsp->hdr_len)); pr_err("hdr addr = 0x%llx\n", (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr)); } } #endif #ifdef QL_ALL_DUMP void ql_dump_all(struct ql_adapter *qdev) { int i; QL_DUMP_REGS(qdev); QL_DUMP_QDEV(qdev); for (i = 0; i < qdev->tx_ring_count; i++) { QL_DUMP_TX_RING(&qdev->tx_ring[i]); QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]); } for (i = 0; i < qdev->rx_ring_count; i++) { QL_DUMP_RX_RING(&qdev->rx_ring[i]); QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]); } } #endif
gpl-2.0
Bogdacutu/android_kernel_nvidia_shieldtablet
drivers/ide/ide-probe.c
10619
38318
/* * Copyright (C) 1994-1998 Linus Torvalds & authors (see below) * Copyright (C) 2005, 2007 Bartlomiej Zolnierkiewicz */ /* * Mostly written by Mark Lord <mlord@pobox.com> * and Gadi Oxman <gadio@netvision.net.il> * and Andre Hedrick <andre@linux-ide.org> * * See linux/MAINTAINERS for address of current maintainer. * * This is the IDE probe module, as evolved from hd.c and ide.c. * * -- increase WAIT_PIDENTIFY to avoid CD-ROM locking at boot * by Andrea Arcangeli */ #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/genhd.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/ide.h> #include <linux/spinlock.h> #include <linux/kmod.h> #include <linux/pci.h> #include <linux/scatterlist.h> #include <asm/byteorder.h> #include <asm/irq.h> #include <asm/uaccess.h> #include <asm/io.h> /** * generic_id - add a generic drive id * @drive: drive to make an ID block for * * Add a fake id field to the drive we are passed. This allows * use to skip a ton of NULL checks (which people always miss) * and make drive properties unconditional outside of this file */ static void generic_id(ide_drive_t *drive) { u16 *id = drive->id; id[ATA_ID_CUR_CYLS] = id[ATA_ID_CYLS] = drive->cyl; id[ATA_ID_CUR_HEADS] = id[ATA_ID_HEADS] = drive->head; id[ATA_ID_CUR_SECTORS] = id[ATA_ID_SECTORS] = drive->sect; } static void ide_disk_init_chs(ide_drive_t *drive) { u16 *id = drive->id; /* Extract geometry if we did not already have one for the drive */ if (!drive->cyl || !drive->head || !drive->sect) { drive->cyl = drive->bios_cyl = id[ATA_ID_CYLS]; drive->head = drive->bios_head = id[ATA_ID_HEADS]; drive->sect = drive->bios_sect = id[ATA_ID_SECTORS]; } /* Handle logical geometry translation by the drive */ if (ata_id_current_chs_valid(id)) { drive->cyl = id[ATA_ID_CUR_CYLS]; drive->head = id[ATA_ID_CUR_HEADS]; drive->sect = id[ATA_ID_CUR_SECTORS]; } /* Use physical geometry if what we have still makes no sense */ if (drive->head > 16 && id[ATA_ID_HEADS] && id[ATA_ID_HEADS] <= 16) { drive->cyl = id[ATA_ID_CYLS]; drive->head = id[ATA_ID_HEADS]; drive->sect = id[ATA_ID_SECTORS]; } } static void ide_disk_init_mult_count(ide_drive_t *drive) { u16 *id = drive->id; u8 max_multsect = id[ATA_ID_MAX_MULTSECT] & 0xff; if (max_multsect) { if ((max_multsect / 2) > 1) id[ATA_ID_MULTSECT] = max_multsect | 0x100; else id[ATA_ID_MULTSECT] &= ~0x1ff; drive->mult_req = id[ATA_ID_MULTSECT] & 0xff; if (drive->mult_req) drive->special_flags |= IDE_SFLAG_SET_MULTMODE; } } static void ide_classify_ata_dev(ide_drive_t *drive) { u16 *id = drive->id; char *m = (char *)&id[ATA_ID_PROD]; int is_cfa = ata_id_is_cfa(id); /* CF devices are *not* removable in Linux definition of the term */ if (is_cfa == 0 && (id[ATA_ID_CONFIG] & (1 << 7))) drive->dev_flags |= IDE_DFLAG_REMOVABLE; drive->media = ide_disk; if (!ata_id_has_unload(drive->id)) drive->dev_flags |= IDE_DFLAG_NO_UNLOAD; printk(KERN_INFO "%s: %s, %s DISK drive\n", drive->name, m, is_cfa ? "CFA" : "ATA"); } static void ide_classify_atapi_dev(ide_drive_t *drive) { u16 *id = drive->id; char *m = (char *)&id[ATA_ID_PROD]; u8 type = (id[ATA_ID_CONFIG] >> 8) & 0x1f; printk(KERN_INFO "%s: %s, ATAPI ", drive->name, m); switch (type) { case ide_floppy: if (!strstr(m, "CD-ROM")) { if (!strstr(m, "oppy") && !strstr(m, "poyp") && !strstr(m, "ZIP")) printk(KERN_CONT "cdrom or floppy?, assuming "); if (drive->media != ide_cdrom) { printk(KERN_CONT "FLOPPY"); drive->dev_flags |= IDE_DFLAG_REMOVABLE; break; } } /* Early cdrom models used zero */ type = ide_cdrom; case ide_cdrom: drive->dev_flags |= IDE_DFLAG_REMOVABLE; #ifdef CONFIG_PPC /* kludge for Apple PowerBook internal zip */ if (!strstr(m, "CD-ROM") && strstr(m, "ZIP")) { printk(KERN_CONT "FLOPPY"); type = ide_floppy; break; } #endif printk(KERN_CONT "CD/DVD-ROM"); break; case ide_tape: printk(KERN_CONT "TAPE"); break; case ide_optical: printk(KERN_CONT "OPTICAL"); drive->dev_flags |= IDE_DFLAG_REMOVABLE; break; default: printk(KERN_CONT "UNKNOWN (type %d)", type); break; } printk(KERN_CONT " drive\n"); drive->media = type; /* an ATAPI device ignores DRDY */ drive->ready_stat = 0; if (ata_id_cdb_intr(id)) drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT; drive->dev_flags |= IDE_DFLAG_DOORLOCKING; /* we don't do head unloading on ATAPI devices */ drive->dev_flags |= IDE_DFLAG_NO_UNLOAD; } /** * do_identify - identify a drive * @drive: drive to identify * @cmd: command used * @id: buffer for IDENTIFY data * * Called when we have issued a drive identify command to * read and parse the results. This function is run with * interrupts disabled. */ static void do_identify(ide_drive_t *drive, u8 cmd, u16 *id) { ide_hwif_t *hwif = drive->hwif; char *m = (char *)&id[ATA_ID_PROD]; unsigned long flags; int bswap = 1; /* local CPU only; some systems need this */ local_irq_save(flags); /* read 512 bytes of id info */ hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); local_irq_restore(flags); drive->dev_flags |= IDE_DFLAG_ID_READ; #ifdef DEBUG printk(KERN_INFO "%s: dumping identify data\n", drive->name); ide_dump_identify((u8 *)id); #endif ide_fix_driveid(id); /* * ATA_CMD_ID_ATA returns little-endian info, * ATA_CMD_ID_ATAPI *usually* returns little-endian info. */ if (cmd == ATA_CMD_ID_ATAPI) { if ((m[0] == 'N' && m[1] == 'E') || /* NEC */ (m[0] == 'F' && m[1] == 'X') || /* Mitsumi */ (m[0] == 'P' && m[1] == 'i')) /* Pioneer */ /* Vertos drives may still be weird */ bswap ^= 1; } ide_fixstring(m, ATA_ID_PROD_LEN, bswap); ide_fixstring((char *)&id[ATA_ID_FW_REV], ATA_ID_FW_REV_LEN, bswap); ide_fixstring((char *)&id[ATA_ID_SERNO], ATA_ID_SERNO_LEN, bswap); /* we depend on this a lot! */ m[ATA_ID_PROD_LEN - 1] = '\0'; if (strstr(m, "E X A B Y T E N E S T")) drive->dev_flags &= ~IDE_DFLAG_PRESENT; else drive->dev_flags |= IDE_DFLAG_PRESENT; } /** * ide_dev_read_id - send ATA/ATAPI IDENTIFY command * @drive: drive to identify * @cmd: command to use * @id: buffer for IDENTIFY data * @irq_ctx: flag set when called from the IRQ context * * Sends an ATA(PI) IDENTIFY request to a drive and waits for a response. * * Returns: 0 device was identified * 1 device timed-out (no response to identify request) * 2 device aborted the command (refused to identify itself) */ int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id, int irq_ctx) { ide_hwif_t *hwif = drive->hwif; struct ide_io_ports *io_ports = &hwif->io_ports; const struct ide_tp_ops *tp_ops = hwif->tp_ops; int use_altstatus = 0, rc; unsigned long timeout; u8 s = 0, a = 0; /* * Disable device IRQ. Otherwise we'll get spurious interrupts * during the identify phase that the IRQ handler isn't expecting. */ if (io_ports->ctl_addr) tp_ops->write_devctl(hwif, ATA_NIEN | ATA_DEVCTL_OBS); /* take a deep breath */ if (irq_ctx) mdelay(50); else msleep(50); if (io_ports->ctl_addr && (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0) { a = tp_ops->read_altstatus(hwif); s = tp_ops->read_status(hwif); if ((a ^ s) & ~ATA_IDX) /* ancient Seagate drives, broken interfaces */ printk(KERN_INFO "%s: probing with STATUS(0x%02x) " "instead of ALTSTATUS(0x%02x)\n", drive->name, s, a); else /* use non-intrusive polling */ use_altstatus = 1; } /* set features register for atapi * identify command to be sure of reply */ if (cmd == ATA_CMD_ID_ATAPI) { struct ide_taskfile tf; memset(&tf, 0, sizeof(tf)); /* disable DMA & overlap */ tp_ops->tf_load(drive, &tf, IDE_VALID_FEATURE); } /* ask drive for ID */ tp_ops->exec_command(hwif, cmd); timeout = ((cmd == ATA_CMD_ID_ATA) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2; /* wait for IRQ and ATA_DRQ */ if (irq_ctx) { rc = __ide_wait_stat(drive, ATA_DRQ, BAD_R_STAT, timeout, &s); if (rc) return 1; } else { rc = ide_busy_sleep(drive, timeout, use_altstatus); if (rc) return 1; msleep(50); s = tp_ops->read_status(hwif); } if (OK_STAT(s, ATA_DRQ, BAD_R_STAT)) { /* drive returned ID */ do_identify(drive, cmd, id); /* drive responded with ID */ rc = 0; /* clear drive IRQ */ (void)tp_ops->read_status(hwif); } else { /* drive refused ID */ rc = 2; } return rc; } int ide_busy_sleep(ide_drive_t *drive, unsigned long timeout, int altstatus) { ide_hwif_t *hwif = drive->hwif; u8 stat; timeout += jiffies; do { msleep(50); /* give drive a breather */ stat = altstatus ? hwif->tp_ops->read_altstatus(hwif) : hwif->tp_ops->read_status(hwif); if ((stat & ATA_BUSY) == 0) return 0; } while (time_before(jiffies, timeout)); printk(KERN_ERR "%s: timeout in %s\n", drive->name, __func__); return 1; /* drive timed-out */ } static u8 ide_read_device(ide_drive_t *drive) { struct ide_taskfile tf; drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_DEVICE); return tf.device; } /** * do_probe - probe an IDE device * @drive: drive to probe * @cmd: command to use * * do_probe() has the difficult job of finding a drive if it exists, * without getting hung up if it doesn't exist, without trampling on * ethernet cards, and without leaving any IRQs dangling to haunt us later. * * If a drive is "known" to exist (from CMOS or kernel parameters), * but does not respond right away, the probe will "hang in there" * for the maximum wait time (about 30 seconds), otherwise it will * exit much more quickly. * * Returns: 0 device was identified * 1 device timed-out (no response to identify request) * 2 device aborted the command (refused to identify itself) * 3 bad status from device (possible for ATAPI drives) * 4 probe was not attempted because failure was obvious */ static int do_probe (ide_drive_t *drive, u8 cmd) { ide_hwif_t *hwif = drive->hwif; const struct ide_tp_ops *tp_ops = hwif->tp_ops; u16 *id = drive->id; int rc; u8 present = !!(drive->dev_flags & IDE_DFLAG_PRESENT), stat; /* avoid waiting for inappropriate probes */ if (present && drive->media != ide_disk && cmd == ATA_CMD_ID_ATA) return 4; #ifdef DEBUG printk(KERN_INFO "probing for %s: present=%d, media=%d, probetype=%s\n", drive->name, present, drive->media, (cmd == ATA_CMD_ID_ATA) ? "ATA" : "ATAPI"); #endif /* needed for some systems * (e.g. crw9624 as drive0 with disk as slave) */ msleep(50); tp_ops->dev_select(drive); msleep(50); if (ide_read_device(drive) != drive->select && present == 0) { if (drive->dn & 1) { /* exit with drive0 selected */ tp_ops->dev_select(hwif->devices[0]); /* allow ATA_BUSY to assert & clear */ msleep(50); } /* no i/f present: mmm.. this should be a 4 -ml */ return 3; } stat = tp_ops->read_status(hwif); if (OK_STAT(stat, ATA_DRDY, ATA_BUSY) || present || cmd == ATA_CMD_ID_ATAPI) { rc = ide_dev_read_id(drive, cmd, id, 0); if (rc) /* failed: try again */ rc = ide_dev_read_id(drive, cmd, id, 0); stat = tp_ops->read_status(hwif); if (stat == (ATA_BUSY | ATA_DRDY)) return 4; if (rc == 1 && cmd == ATA_CMD_ID_ATAPI) { printk(KERN_ERR "%s: no response (status = 0x%02x), " "resetting drive\n", drive->name, stat); msleep(50); tp_ops->dev_select(drive); msleep(50); tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET); (void)ide_busy_sleep(drive, WAIT_WORSTCASE, 0); rc = ide_dev_read_id(drive, cmd, id, 0); } /* ensure drive IRQ is clear */ stat = tp_ops->read_status(hwif); if (rc == 1) printk(KERN_ERR "%s: no response (status = 0x%02x)\n", drive->name, stat); } else { /* not present or maybe ATAPI */ rc = 3; } if (drive->dn & 1) { /* exit with drive0 selected */ tp_ops->dev_select(hwif->devices[0]); msleep(50); /* ensure drive irq is clear */ (void)tp_ops->read_status(hwif); } return rc; } /** * probe_for_drives - upper level drive probe * @drive: drive to probe for * * probe_for_drive() tests for existence of a given drive using do_probe() * and presents things to the user as needed. * * Returns: 0 no device was found * 1 device was found * (note: IDE_DFLAG_PRESENT might still be not set) */ static u8 probe_for_drive(ide_drive_t *drive) { char *m; int rc; u8 cmd; drive->dev_flags &= ~IDE_DFLAG_ID_READ; m = (char *)&drive->id[ATA_ID_PROD]; strcpy(m, "UNKNOWN"); /* skip probing? */ if ((drive->dev_flags & IDE_DFLAG_NOPROBE) == 0) { /* if !(success||timed-out) */ cmd = ATA_CMD_ID_ATA; rc = do_probe(drive, cmd); if (rc >= 2) { /* look for ATAPI device */ cmd = ATA_CMD_ID_ATAPI; rc = do_probe(drive, cmd); } if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) return 0; /* identification failed? */ if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) { if (drive->media == ide_disk) { printk(KERN_INFO "%s: non-IDE drive, CHS=%d/%d/%d\n", drive->name, drive->cyl, drive->head, drive->sect); } else if (drive->media == ide_cdrom) { printk(KERN_INFO "%s: ATAPI cdrom (?)\n", drive->name); } else { /* nuke it */ printk(KERN_WARNING "%s: Unknown device on bus refused identification. Ignoring.\n", drive->name); drive->dev_flags &= ~IDE_DFLAG_PRESENT; } } else { if (cmd == ATA_CMD_ID_ATAPI) ide_classify_atapi_dev(drive); else ide_classify_ata_dev(drive); } } if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) return 0; /* The drive wasn't being helpful. Add generic info only */ if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) { generic_id(drive); return 1; } if (drive->media == ide_disk) { ide_disk_init_chs(drive); ide_disk_init_mult_count(drive); } return 1; } static void hwif_release_dev(struct device *dev) { ide_hwif_t *hwif = container_of(dev, ide_hwif_t, gendev); complete(&hwif->gendev_rel_comp); } static int ide_register_port(ide_hwif_t *hwif) { int ret; /* register with global device tree */ dev_set_name(&hwif->gendev, hwif->name); dev_set_drvdata(&hwif->gendev, hwif); if (hwif->gendev.parent == NULL) hwif->gendev.parent = hwif->dev; hwif->gendev.release = hwif_release_dev; ret = device_register(&hwif->gendev); if (ret < 0) { printk(KERN_WARNING "IDE: %s: device_register error: %d\n", __func__, ret); goto out; } hwif->portdev = device_create(ide_port_class, &hwif->gendev, MKDEV(0, 0), hwif, hwif->name); if (IS_ERR(hwif->portdev)) { ret = PTR_ERR(hwif->portdev); device_unregister(&hwif->gendev); } out: return ret; } /** * ide_port_wait_ready - wait for port to become ready * @hwif: IDE port * * This is needed on some PPCs and a bunch of BIOS-less embedded * platforms. Typical cases are: * * - The firmware hard reset the disk before booting the kernel, * the drive is still doing it's poweron-reset sequence, that * can take up to 30 seconds. * * - The firmware does nothing (or no firmware), the device is * still in POST state (same as above actually). * * - Some CD/DVD/Writer combo drives tend to drive the bus during * their reset sequence even when they are non-selected slave * devices, thus preventing discovery of the main HD. * * Doing this wait-for-non-busy should not harm any existing * configuration and fix some issues like the above. * * BenH. * * Returns 0 on success, error code (< 0) otherwise. */ static int ide_port_wait_ready(ide_hwif_t *hwif) { const struct ide_tp_ops *tp_ops = hwif->tp_ops; ide_drive_t *drive; int i, rc; printk(KERN_DEBUG "Probing IDE interface %s...\n", hwif->name); /* Let HW settle down a bit from whatever init state we * come from */ mdelay(2); /* Wait for BSY bit to go away, spec timeout is 30 seconds, * I know of at least one disk who takes 31 seconds, I use 35 * here to be safe */ rc = ide_wait_not_busy(hwif, 35000); if (rc) return rc; /* Now make sure both master & slave are ready */ ide_port_for_each_dev(i, drive, hwif) { /* Ignore disks that we will not probe for later. */ if ((drive->dev_flags & IDE_DFLAG_NOPROBE) == 0 || (drive->dev_flags & IDE_DFLAG_PRESENT)) { tp_ops->dev_select(drive); tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); mdelay(2); rc = ide_wait_not_busy(hwif, 35000); if (rc) goto out; } else printk(KERN_DEBUG "%s: ide_wait_not_busy() skipped\n", drive->name); } out: /* Exit function with master reselected (let's be sane) */ if (i) tp_ops->dev_select(hwif->devices[0]); return rc; } /** * ide_undecoded_slave - look for bad CF adapters * @dev1: slave device * * Analyse the drives on the interface and attempt to decide if we * have the same drive viewed twice. This occurs with crap CF adapters * and PCMCIA sometimes. */ void ide_undecoded_slave(ide_drive_t *dev1) { ide_drive_t *dev0 = dev1->hwif->devices[0]; if ((dev1->dn & 1) == 0 || (dev0->dev_flags & IDE_DFLAG_PRESENT) == 0) return; /* If the models don't match they are not the same product */ if (strcmp((char *)&dev0->id[ATA_ID_PROD], (char *)&dev1->id[ATA_ID_PROD])) return; /* Serial numbers do not match */ if (strncmp((char *)&dev0->id[ATA_ID_SERNO], (char *)&dev1->id[ATA_ID_SERNO], ATA_ID_SERNO_LEN)) return; /* No serial number, thankfully very rare for CF */ if (*(char *)&dev0->id[ATA_ID_SERNO] == 0) return; /* Appears to be an IDE flash adapter with decode bugs */ printk(KERN_WARNING "ide-probe: ignoring undecoded slave\n"); dev1->dev_flags &= ~IDE_DFLAG_PRESENT; } EXPORT_SYMBOL_GPL(ide_undecoded_slave); static int ide_probe_port(ide_hwif_t *hwif) { ide_drive_t *drive; unsigned int irqd; int i, rc = -ENODEV; BUG_ON(hwif->present); if ((hwif->devices[0]->dev_flags & IDE_DFLAG_NOPROBE) && (hwif->devices[1]->dev_flags & IDE_DFLAG_NOPROBE)) return -EACCES; /* * We must always disable IRQ, as probe_for_drive will assert IRQ, but * we'll install our IRQ driver much later... */ irqd = hwif->irq; if (irqd) disable_irq(hwif->irq); if (ide_port_wait_ready(hwif) == -EBUSY) printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name); /* * Second drive should only exist if first drive was found, * but a lot of cdrom drives are configured as single slaves. */ ide_port_for_each_dev(i, drive, hwif) { (void) probe_for_drive(drive); if (drive->dev_flags & IDE_DFLAG_PRESENT) rc = 0; } /* * Use cached IRQ number. It might be (and is...) changed by probe * code above */ if (irqd) enable_irq(irqd); return rc; } static void ide_port_tune_devices(ide_hwif_t *hwif) { const struct ide_port_ops *port_ops = hwif->port_ops; ide_drive_t *drive; int i; ide_port_for_each_present_dev(i, drive, hwif) { ide_check_nien_quirk_list(drive); if (port_ops && port_ops->quirkproc) port_ops->quirkproc(drive); } ide_port_for_each_present_dev(i, drive, hwif) { ide_set_max_pio(drive); drive->dev_flags |= IDE_DFLAG_NICE1; if (hwif->dma_ops) ide_set_dma(drive); } } /* * init request queue */ static int ide_init_queue(ide_drive_t *drive) { struct request_queue *q; ide_hwif_t *hwif = drive->hwif; int max_sectors = 256; int max_sg_entries = PRD_ENTRIES; /* * Our default set up assumes the normal IDE case, * that is 64K segmenting, standard PRD setup * and LBA28. Some drivers then impose their own * limits and LBA48 we could raise it but as yet * do not. */ q = blk_init_queue_node(do_ide_request, NULL, hwif_to_node(hwif)); if (!q) return 1; q->queuedata = drive; blk_queue_segment_boundary(q, 0xffff); if (hwif->rqsize < max_sectors) max_sectors = hwif->rqsize; blk_queue_max_hw_sectors(q, max_sectors); #ifdef CONFIG_PCI /* When we have an IOMMU, we may have a problem where pci_map_sg() * creates segments that don't completely match our boundary * requirements and thus need to be broken up again. Because it * doesn't align properly either, we may actually have to break up * to more segments than what was we got in the first place, a max * worst case is twice as many. * This will be fixed once we teach pci_map_sg() about our boundary * requirements, hopefully soon. *FIXME* */ if (!PCI_DMA_BUS_IS_PHYS) max_sg_entries >>= 1; #endif /* CONFIG_PCI */ blk_queue_max_segments(q, max_sg_entries); /* assign drive queue */ drive->queue = q; /* needs drive->queue to be set */ ide_toggle_bounce(drive, 1); return 0; } static DEFINE_MUTEX(ide_cfg_mtx); /* * For any present drive: * - allocate the block device queue */ static int ide_port_setup_devices(ide_hwif_t *hwif) { ide_drive_t *drive; int i, j = 0; mutex_lock(&ide_cfg_mtx); ide_port_for_each_present_dev(i, drive, hwif) { if (ide_init_queue(drive)) { printk(KERN_ERR "ide: failed to init %s\n", drive->name); drive->dev_flags &= ~IDE_DFLAG_PRESENT; continue; } j++; } mutex_unlock(&ide_cfg_mtx); return j; } static void ide_host_enable_irqs(struct ide_host *host) { ide_hwif_t *hwif; int i; ide_host_for_each_port(i, hwif, host) { if (hwif == NULL) continue; /* clear any pending IRQs */ hwif->tp_ops->read_status(hwif); /* unmask IRQs */ if (hwif->io_ports.ctl_addr) hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); } } /* * This routine sets up the IRQ for an IDE interface. */ static int init_irq (ide_hwif_t *hwif) { struct ide_io_ports *io_ports = &hwif->io_ports; struct ide_host *host = hwif->host; irq_handler_t irq_handler = host->irq_handler; int sa = host->irq_flags; if (irq_handler == NULL) irq_handler = ide_intr; if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif)) goto out_up; #if !defined(__mc68000__) printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name, io_ports->data_addr, io_ports->status_addr, io_ports->ctl_addr, hwif->irq); #else printk(KERN_INFO "%s at 0x%08lx on irq %d", hwif->name, io_ports->data_addr, hwif->irq); #endif /* __mc68000__ */ if (hwif->host->host_flags & IDE_HFLAG_SERIALIZE) printk(KERN_CONT " (serialized)"); printk(KERN_CONT "\n"); return 0; out_up: return 1; } static int ata_lock(dev_t dev, void *data) { /* FIXME: we want to pin hwif down */ return 0; } static struct kobject *ata_probe(dev_t dev, int *part, void *data) { ide_hwif_t *hwif = data; int unit = *part >> PARTN_BITS; ide_drive_t *drive = hwif->devices[unit]; if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) return NULL; if (drive->media == ide_disk) request_module("ide-disk"); if (drive->media == ide_cdrom || drive->media == ide_optical) request_module("ide-cd"); if (drive->media == ide_tape) request_module("ide-tape"); if (drive->media == ide_floppy) request_module("ide-floppy"); return NULL; } static struct kobject *exact_match(dev_t dev, int *part, void *data) { struct gendisk *p = data; *part &= (1 << PARTN_BITS) - 1; return &disk_to_dev(p)->kobj; } static int exact_lock(dev_t dev, void *data) { struct gendisk *p = data; if (!get_disk(p)) return -1; return 0; } void ide_register_region(struct gendisk *disk) { blk_register_region(MKDEV(disk->major, disk->first_minor), disk->minors, NULL, exact_match, exact_lock, disk); } EXPORT_SYMBOL_GPL(ide_register_region); void ide_unregister_region(struct gendisk *disk) { blk_unregister_region(MKDEV(disk->major, disk->first_minor), disk->minors); } EXPORT_SYMBOL_GPL(ide_unregister_region); void ide_init_disk(struct gendisk *disk, ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; unsigned int unit = drive->dn & 1; disk->major = hwif->major; disk->first_minor = unit << PARTN_BITS; sprintf(disk->disk_name, "hd%c", 'a' + hwif->index * MAX_DRIVES + unit); disk->queue = drive->queue; } EXPORT_SYMBOL_GPL(ide_init_disk); static void drive_release_dev (struct device *dev) { ide_drive_t *drive = container_of(dev, ide_drive_t, gendev); ide_proc_unregister_device(drive); blk_cleanup_queue(drive->queue); drive->queue = NULL; drive->dev_flags &= ~IDE_DFLAG_PRESENT; complete(&drive->gendev_rel_comp); } static int hwif_init(ide_hwif_t *hwif) { if (!hwif->irq) { printk(KERN_ERR "%s: disabled, no IRQ\n", hwif->name); return 0; } if (register_blkdev(hwif->major, hwif->name)) return 0; if (!hwif->sg_max_nents) hwif->sg_max_nents = PRD_ENTRIES; hwif->sg_table = kmalloc(sizeof(struct scatterlist)*hwif->sg_max_nents, GFP_KERNEL); if (!hwif->sg_table) { printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name); goto out; } sg_init_table(hwif->sg_table, hwif->sg_max_nents); if (init_irq(hwif)) { printk(KERN_ERR "%s: disabled, unable to get IRQ %d\n", hwif->name, hwif->irq); goto out; } blk_register_region(MKDEV(hwif->major, 0), MAX_DRIVES << PARTN_BITS, THIS_MODULE, ata_probe, ata_lock, hwif); return 1; out: unregister_blkdev(hwif->major, hwif->name); return 0; } static void hwif_register_devices(ide_hwif_t *hwif) { ide_drive_t *drive; unsigned int i; ide_port_for_each_present_dev(i, drive, hwif) { struct device *dev = &drive->gendev; int ret; dev_set_name(dev, "%u.%u", hwif->index, i); dev_set_drvdata(dev, drive); dev->parent = &hwif->gendev; dev->bus = &ide_bus_type; dev->release = drive_release_dev; ret = device_register(dev); if (ret < 0) printk(KERN_WARNING "IDE: %s: device_register error: " "%d\n", __func__, ret); } } static void ide_port_init_devices(ide_hwif_t *hwif) { const struct ide_port_ops *port_ops = hwif->port_ops; ide_drive_t *drive; int i; ide_port_for_each_dev(i, drive, hwif) { drive->dn = i + hwif->channel * 2; if (hwif->host_flags & IDE_HFLAG_IO_32BIT) drive->io_32bit = 1; if (hwif->host_flags & IDE_HFLAG_NO_IO_32BIT) drive->dev_flags |= IDE_DFLAG_NO_IO_32BIT; if (hwif->host_flags & IDE_HFLAG_UNMASK_IRQS) drive->dev_flags |= IDE_DFLAG_UNMASK; if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS) drive->dev_flags |= IDE_DFLAG_NO_UNMASK; drive->pio_mode = XFER_PIO_0; if (port_ops && port_ops->init_dev) port_ops->init_dev(drive); } } static void ide_init_port(ide_hwif_t *hwif, unsigned int port, const struct ide_port_info *d) { hwif->channel = port; hwif->chipset = d->chipset ? d->chipset : ide_pci; if (d->init_iops) d->init_iops(hwif); /* ->host_flags may be set by ->init_iops (or even earlier...) */ hwif->host_flags |= d->host_flags; hwif->pio_mask = d->pio_mask; if (d->tp_ops) hwif->tp_ops = d->tp_ops; /* ->set_pio_mode for DTC2278 is currently limited to port 0 */ if ((hwif->host_flags & IDE_HFLAG_DTC2278) == 0 || hwif->channel == 0) hwif->port_ops = d->port_ops; hwif->swdma_mask = d->swdma_mask; hwif->mwdma_mask = d->mwdma_mask; hwif->ultra_mask = d->udma_mask; if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) { int rc; hwif->dma_ops = d->dma_ops; if (d->init_dma) rc = d->init_dma(hwif, d); else rc = ide_hwif_setup_dma(hwif, d); if (rc < 0) { printk(KERN_INFO "%s: DMA disabled\n", hwif->name); hwif->dma_ops = NULL; hwif->dma_base = 0; hwif->swdma_mask = 0; hwif->mwdma_mask = 0; hwif->ultra_mask = 0; } } if ((d->host_flags & IDE_HFLAG_SERIALIZE) || ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base)) hwif->host->host_flags |= IDE_HFLAG_SERIALIZE; if (d->max_sectors) hwif->rqsize = d->max_sectors; else { if ((hwif->host_flags & IDE_HFLAG_NO_LBA48) || (hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA)) hwif->rqsize = 256; else hwif->rqsize = 65536; } /* call chipset specific routine for each enabled port */ if (d->init_hwif) d->init_hwif(hwif); } static void ide_port_cable_detect(ide_hwif_t *hwif) { const struct ide_port_ops *port_ops = hwif->port_ops; if (port_ops && port_ops->cable_detect && (hwif->ultra_mask & 0x78)) { if (hwif->cbl != ATA_CBL_PATA40_SHORT) hwif->cbl = port_ops->cable_detect(hwif); } } static const u8 ide_hwif_to_major[] = { IDE0_MAJOR, IDE1_MAJOR, IDE2_MAJOR, IDE3_MAJOR, IDE4_MAJOR, IDE5_MAJOR, IDE6_MAJOR, IDE7_MAJOR, IDE8_MAJOR, IDE9_MAJOR }; static void ide_port_init_devices_data(ide_hwif_t *hwif) { ide_drive_t *drive; int i; ide_port_for_each_dev(i, drive, hwif) { u8 j = (hwif->index * MAX_DRIVES) + i; u16 *saved_id = drive->id; memset(drive, 0, sizeof(*drive)); memset(saved_id, 0, SECTOR_SIZE); drive->id = saved_id; drive->media = ide_disk; drive->select = (i << 4) | ATA_DEVICE_OBS; drive->hwif = hwif; drive->ready_stat = ATA_DRDY; drive->bad_wstat = BAD_W_STAT; drive->special_flags = IDE_SFLAG_RECALIBRATE | IDE_SFLAG_SET_GEOMETRY; drive->name[0] = 'h'; drive->name[1] = 'd'; drive->name[2] = 'a' + j; drive->max_failures = IDE_DEFAULT_MAX_FAILURES; INIT_LIST_HEAD(&drive->list); init_completion(&drive->gendev_rel_comp); } } static void ide_init_port_data(ide_hwif_t *hwif, unsigned int index) { /* fill in any non-zero initial values */ hwif->index = index; hwif->major = ide_hwif_to_major[index]; hwif->name[0] = 'i'; hwif->name[1] = 'd'; hwif->name[2] = 'e'; hwif->name[3] = '0' + index; spin_lock_init(&hwif->lock); init_timer(&hwif->timer); hwif->timer.function = &ide_timer_expiry; hwif->timer.data = (unsigned long)hwif; init_completion(&hwif->gendev_rel_comp); hwif->tp_ops = &default_tp_ops; ide_port_init_devices_data(hwif); } static void ide_init_port_hw(ide_hwif_t *hwif, struct ide_hw *hw) { memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports)); hwif->irq = hw->irq; hwif->dev = hw->dev; hwif->gendev.parent = hw->parent ? hw->parent : hw->dev; hwif->config_data = hw->config; } static unsigned int ide_indexes; /** * ide_find_port_slot - find free port slot * @d: IDE port info * * Return the new port slot index or -ENOENT if we are out of free slots. */ static int ide_find_port_slot(const struct ide_port_info *d) { int idx = -ENOENT; u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1; u8 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0; /* * Claim an unassigned slot. * * Give preference to claiming other slots before claiming ide0/ide1, * just in case there's another interface yet-to-be-scanned * which uses ports 0x1f0/0x170 (the ide0/ide1 defaults). * * Unless there is a bootable card that does not use the standard * ports 0x1f0/0x170 (the ide0/ide1 defaults). */ mutex_lock(&ide_cfg_mtx); if (bootable) { if ((ide_indexes | i) != (1 << MAX_HWIFS) - 1) idx = ffz(ide_indexes | i); } else { if ((ide_indexes | 3) != (1 << MAX_HWIFS) - 1) idx = ffz(ide_indexes | 3); else if ((ide_indexes & 3) != 3) idx = ffz(ide_indexes); } if (idx >= 0) ide_indexes |= (1 << idx); mutex_unlock(&ide_cfg_mtx); return idx; } static void ide_free_port_slot(int idx) { mutex_lock(&ide_cfg_mtx); ide_indexes &= ~(1 << idx); mutex_unlock(&ide_cfg_mtx); } static void ide_port_free_devices(ide_hwif_t *hwif) { ide_drive_t *drive; int i; ide_port_for_each_dev(i, drive, hwif) { kfree(drive->id); kfree(drive); } } static int ide_port_alloc_devices(ide_hwif_t *hwif, int node) { int i; for (i = 0; i < MAX_DRIVES; i++) { ide_drive_t *drive; drive = kzalloc_node(sizeof(*drive), GFP_KERNEL, node); if (drive == NULL) goto out_nomem; /* * In order to keep things simple we have an id * block for all drives at all times. If the device * is pre ATA or refuses ATA/ATAPI identify we * will add faked data to this. * * Also note that 0 everywhere means "can't do X" */ drive->id = kzalloc_node(SECTOR_SIZE, GFP_KERNEL, node); if (drive->id == NULL) goto out_nomem; hwif->devices[i] = drive; } return 0; out_nomem: ide_port_free_devices(hwif); return -ENOMEM; } struct ide_host *ide_host_alloc(const struct ide_port_info *d, struct ide_hw **hws, unsigned int n_ports) { struct ide_host *host; struct device *dev = hws[0] ? hws[0]->dev : NULL; int node = dev ? dev_to_node(dev) : -1; int i; host = kzalloc_node(sizeof(*host), GFP_KERNEL, node); if (host == NULL) return NULL; for (i = 0; i < n_ports; i++) { ide_hwif_t *hwif; int idx; if (hws[i] == NULL) continue; hwif = kzalloc_node(sizeof(*hwif), GFP_KERNEL, node); if (hwif == NULL) continue; if (ide_port_alloc_devices(hwif, node) < 0) { kfree(hwif); continue; } idx = ide_find_port_slot(d); if (idx < 0) { printk(KERN_ERR "%s: no free slot for interface\n", d ? d->name : "ide"); ide_port_free_devices(hwif); kfree(hwif); continue; } ide_init_port_data(hwif, idx); hwif->host = host; host->ports[i] = hwif; host->n_ports++; } if (host->n_ports == 0) { kfree(host); return NULL; } host->dev[0] = dev; if (d) { host->init_chipset = d->init_chipset; host->get_lock = d->get_lock; host->release_lock = d->release_lock; host->host_flags = d->host_flags; host->irq_flags = d->irq_flags; } return host; } EXPORT_SYMBOL_GPL(ide_host_alloc); static void ide_port_free(ide_hwif_t *hwif) { ide_port_free_devices(hwif); ide_free_port_slot(hwif->index); kfree(hwif); } static void ide_disable_port(ide_hwif_t *hwif) { struct ide_host *host = hwif->host; int i; printk(KERN_INFO "%s: disabling port\n", hwif->name); for (i = 0; i < MAX_HOST_PORTS; i++) { if (host->ports[i] == hwif) { host->ports[i] = NULL; host->n_ports--; } } ide_port_free(hwif); } int ide_host_register(struct ide_host *host, const struct ide_port_info *d, struct ide_hw **hws) { ide_hwif_t *hwif, *mate = NULL; int i, j = 0; ide_host_for_each_port(i, hwif, host) { if (hwif == NULL) { mate = NULL; continue; } ide_init_port_hw(hwif, hws[i]); ide_port_apply_params(hwif); if ((i & 1) && mate) { hwif->mate = mate; mate->mate = hwif; } mate = (i & 1) ? NULL : hwif; ide_init_port(hwif, i & 1, d); ide_port_cable_detect(hwif); hwif->port_flags |= IDE_PFLAG_PROBING; ide_port_init_devices(hwif); } ide_host_for_each_port(i, hwif, host) { if (hwif == NULL) continue; if (ide_probe_port(hwif) == 0) hwif->present = 1; hwif->port_flags &= ~IDE_PFLAG_PROBING; if ((hwif->host_flags & IDE_HFLAG_4DRIVES) == 0 || hwif->mate == NULL || hwif->mate->present == 0) { if (ide_register_port(hwif)) { ide_disable_port(hwif); continue; } } if (hwif->present) ide_port_tune_devices(hwif); } ide_host_enable_irqs(host); ide_host_for_each_port(i, hwif, host) { if (hwif == NULL) continue; if (hwif_init(hwif) == 0) { printk(KERN_INFO "%s: failed to initialize IDE " "interface\n", hwif->name); device_unregister(&hwif->gendev); ide_disable_port(hwif); continue; } if (hwif->present) if (ide_port_setup_devices(hwif) == 0) { hwif->present = 0; continue; } j++; ide_acpi_init_port(hwif); if (hwif->present) ide_acpi_port_init_devices(hwif); } ide_host_for_each_port(i, hwif, host) { if (hwif == NULL) continue; ide_sysfs_register_port(hwif); ide_proc_register_port(hwif); if (hwif->present) { ide_proc_port_register_devices(hwif); hwif_register_devices(hwif); } } return j ? 0 : -1; } EXPORT_SYMBOL_GPL(ide_host_register); int ide_host_add(const struct ide_port_info *d, struct ide_hw **hws, unsigned int n_ports, struct ide_host **hostp) { struct ide_host *host; int rc; host = ide_host_alloc(d, hws, n_ports); if (host == NULL) return -ENOMEM; rc = ide_host_register(host, d, hws); if (rc) { ide_host_free(host); return rc; } if (hostp) *hostp = host; return 0; } EXPORT_SYMBOL_GPL(ide_host_add); static void __ide_port_unregister_devices(ide_hwif_t *hwif) { ide_drive_t *drive; int i; ide_port_for_each_present_dev(i, drive, hwif) { device_unregister(&drive->gendev); wait_for_completion(&drive->gendev_rel_comp); } } void ide_port_unregister_devices(ide_hwif_t *hwif) { mutex_lock(&ide_cfg_mtx); __ide_port_unregister_devices(hwif); hwif->present = 0; ide_port_init_devices_data(hwif); mutex_unlock(&ide_cfg_mtx); } EXPORT_SYMBOL_GPL(ide_port_unregister_devices); /** * ide_unregister - free an IDE interface * @hwif: IDE interface * * Perform the final unregister of an IDE interface. * * Locking: * The caller must not hold the IDE locks. * * It is up to the caller to be sure there is no pending I/O here, * and that the interface will not be reopened (present/vanishing * locking isn't yet done BTW). */ static void ide_unregister(ide_hwif_t *hwif) { BUG_ON(in_interrupt()); BUG_ON(irqs_disabled()); mutex_lock(&ide_cfg_mtx); if (hwif->present) { __ide_port_unregister_devices(hwif); hwif->present = 0; } ide_proc_unregister_port(hwif); free_irq(hwif->irq, hwif); device_unregister(hwif->portdev); device_unregister(&hwif->gendev); wait_for_completion(&hwif->gendev_rel_comp); /* * Remove us from the kernel's knowledge */ blk_unregister_region(MKDEV(hwif->major, 0), MAX_DRIVES<<PARTN_BITS); kfree(hwif->sg_table); unregister_blkdev(hwif->major, hwif->name); ide_release_dma_engine(hwif); mutex_unlock(&ide_cfg_mtx); } void ide_host_free(struct ide_host *host) { ide_hwif_t *hwif; int i; ide_host_for_each_port(i, hwif, host) { if (hwif) ide_port_free(hwif); } kfree(host); } EXPORT_SYMBOL_GPL(ide_host_free); void ide_host_remove(struct ide_host *host) { ide_hwif_t *hwif; int i; ide_host_for_each_port(i, hwif, host) { if (hwif) ide_unregister(hwif); } ide_host_free(host); } EXPORT_SYMBOL_GPL(ide_host_remove); void ide_port_scan(ide_hwif_t *hwif) { int rc; ide_port_apply_params(hwif); ide_port_cable_detect(hwif); hwif->port_flags |= IDE_PFLAG_PROBING; ide_port_init_devices(hwif); rc = ide_probe_port(hwif); hwif->port_flags &= ~IDE_PFLAG_PROBING; if (rc < 0) return; hwif->present = 1; ide_port_tune_devices(hwif); ide_port_setup_devices(hwif); ide_acpi_port_init_devices(hwif); hwif_register_devices(hwif); ide_proc_port_register_devices(hwif); } EXPORT_SYMBOL_GPL(ide_port_scan);
gpl-2.0
Zenfone2-development/android_kernel_asus_moorefield
arch/tile/lib/delay.c
12155
1184
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/thread_info.h> #include <asm/timex.h> void __udelay(unsigned long usecs) { if (usecs > ULONG_MAX / 1000) { WARN_ON_ONCE(usecs > ULONG_MAX / 1000); usecs = ULONG_MAX / 1000; } __ndelay(usecs * 1000); } EXPORT_SYMBOL(__udelay); void __ndelay(unsigned long nsecs) { cycles_t target = get_cycles(); target += ns2cycles(nsecs); while (get_cycles() < target) cpu_relax(); } EXPORT_SYMBOL(__ndelay); void __delay(unsigned long cycles) { cycles_t target = get_cycles() + cycles; while (get_cycles() < target) cpu_relax(); } EXPORT_SYMBOL(__delay);
gpl-2.0
meimz/linux
arch/tile/lib/delay.c
12155
1184
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/thread_info.h> #include <asm/timex.h> void __udelay(unsigned long usecs) { if (usecs > ULONG_MAX / 1000) { WARN_ON_ONCE(usecs > ULONG_MAX / 1000); usecs = ULONG_MAX / 1000; } __ndelay(usecs * 1000); } EXPORT_SYMBOL(__udelay); void __ndelay(unsigned long nsecs) { cycles_t target = get_cycles(); target += ns2cycles(nsecs); while (get_cycles() < target) cpu_relax(); } EXPORT_SYMBOL(__ndelay); void __delay(unsigned long cycles) { cycles_t target = get_cycles() + cycles; while (get_cycles() < target) cpu_relax(); } EXPORT_SYMBOL(__delay);
gpl-2.0
Jovy23/N900TUVUDNF9_Kernel
arch/powerpc/boot/mv64x60_i2c.c
13947
5704
/* * Bootloader version of the i2c driver for the MV64x60. * * Author: Dale Farnsworth <dfarnsworth@mvista.com> * Maintained by: Mark A. Greer <mgreer@mvista.com> * * 2003, 2007 (c) MontaVista, Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program is * licensed "as is" without any warranty of any kind, whether express or * implied. */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "page.h" #include "string.h" #include "stdio.h" #include "io.h" #include "ops.h" #include "mv64x60.h" /* Register defines */ #define MV64x60_I2C_REG_SLAVE_ADDR 0x00 #define MV64x60_I2C_REG_DATA 0x04 #define MV64x60_I2C_REG_CONTROL 0x08 #define MV64x60_I2C_REG_STATUS 0x0c #define MV64x60_I2C_REG_BAUD 0x0c #define MV64x60_I2C_REG_EXT_SLAVE_ADDR 0x10 #define MV64x60_I2C_REG_SOFT_RESET 0x1c #define MV64x60_I2C_CONTROL_ACK 0x04 #define MV64x60_I2C_CONTROL_IFLG 0x08 #define MV64x60_I2C_CONTROL_STOP 0x10 #define MV64x60_I2C_CONTROL_START 0x20 #define MV64x60_I2C_CONTROL_TWSIEN 0x40 #define MV64x60_I2C_CONTROL_INTEN 0x80 #define MV64x60_I2C_STATUS_BUS_ERR 0x00 #define MV64x60_I2C_STATUS_MAST_START 0x08 #define MV64x60_I2C_STATUS_MAST_REPEAT_START 0x10 #define MV64x60_I2C_STATUS_MAST_WR_ADDR_ACK 0x18 #define MV64x60_I2C_STATUS_MAST_WR_ADDR_NO_ACK 0x20 #define MV64x60_I2C_STATUS_MAST_WR_ACK 0x28 #define MV64x60_I2C_STATUS_MAST_WR_NO_ACK 0x30 #define MV64x60_I2C_STATUS_MAST_LOST_ARB 0x38 #define MV64x60_I2C_STATUS_MAST_RD_ADDR_ACK 0x40 #define MV64x60_I2C_STATUS_MAST_RD_ADDR_NO_ACK 0x48 #define MV64x60_I2C_STATUS_MAST_RD_DATA_ACK 0x50 #define MV64x60_I2C_STATUS_MAST_RD_DATA_NO_ACK 0x58 #define MV64x60_I2C_STATUS_MAST_WR_ADDR_2_ACK 0xd0 #define MV64x60_I2C_STATUS_MAST_WR_ADDR_2_NO_ACK 0xd8 #define MV64x60_I2C_STATUS_MAST_RD_ADDR_2_ACK 0xe0 #define MV64x60_I2C_STATUS_MAST_RD_ADDR_2_NO_ACK 0xe8 #define MV64x60_I2C_STATUS_NO_STATUS 0xf8 static u8 *ctlr_base; static int mv64x60_i2c_wait_for_status(int wanted) { int i; int status; for (i=0; i<1000; i++) { udelay(10); status = in_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_STATUS)) & 0xff; if (status == wanted) return status; } return -status; } static int mv64x60_i2c_control(int control, int status) { out_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_CONTROL), control & 0xff); return mv64x60_i2c_wait_for_status(status); } static int mv64x60_i2c_read_byte(int control, int status) { out_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_CONTROL), control & 0xff); if (mv64x60_i2c_wait_for_status(status) < 0) return -1; return in_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_DATA)) & 0xff; } static int mv64x60_i2c_write_byte(int data, int control, int status) { out_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_DATA), data & 0xff); out_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_CONTROL), control & 0xff); return mv64x60_i2c_wait_for_status(status); } int mv64x60_i2c_read(u32 devaddr, u8 *buf, u32 offset, u32 offset_size, u32 count) { int i; int data; int control; int status; if (ctlr_base == NULL) return -1; /* send reset */ out_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_SOFT_RESET), 0); out_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_SLAVE_ADDR), 0); out_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_EXT_SLAVE_ADDR), 0); out_le32((u32 *)(ctlr_base + MV64x60_I2C_REG_BAUD), (4 << 3) | 0x4); if (mv64x60_i2c_control(MV64x60_I2C_CONTROL_TWSIEN, MV64x60_I2C_STATUS_NO_STATUS) < 0) return -1; /* send start */ control = MV64x60_I2C_CONTROL_START | MV64x60_I2C_CONTROL_TWSIEN; status = MV64x60_I2C_STATUS_MAST_START; if (mv64x60_i2c_control(control, status) < 0) return -1; /* select device for writing */ data = devaddr & ~0x1; control = MV64x60_I2C_CONTROL_TWSIEN; status = MV64x60_I2C_STATUS_MAST_WR_ADDR_ACK; if (mv64x60_i2c_write_byte(data, control, status) < 0) return -1; /* send offset of data */ control = MV64x60_I2C_CONTROL_TWSIEN; status = MV64x60_I2C_STATUS_MAST_WR_ACK; if (offset_size > 1) { if (mv64x60_i2c_write_byte(offset >> 8, control, status) < 0) return -1; } if (mv64x60_i2c_write_byte(offset, control, status) < 0) return -1; /* resend start */ control = MV64x60_I2C_CONTROL_START | MV64x60_I2C_CONTROL_TWSIEN; status = MV64x60_I2C_STATUS_MAST_REPEAT_START; if (mv64x60_i2c_control(control, status) < 0) return -1; /* select device for reading */ data = devaddr | 0x1; control = MV64x60_I2C_CONTROL_TWSIEN; status = MV64x60_I2C_STATUS_MAST_RD_ADDR_ACK; if (mv64x60_i2c_write_byte(data, control, status) < 0) return -1; /* read all but last byte of data */ control = MV64x60_I2C_CONTROL_ACK | MV64x60_I2C_CONTROL_TWSIEN; status = MV64x60_I2C_STATUS_MAST_RD_DATA_ACK; for (i=1; i<count; i++) { data = mv64x60_i2c_read_byte(control, status); if (data < 0) { printf("errors on iteration %d\n", i); return -1; } *buf++ = data; } /* read last byte of data */ control = MV64x60_I2C_CONTROL_TWSIEN; status = MV64x60_I2C_STATUS_MAST_RD_DATA_NO_ACK; data = mv64x60_i2c_read_byte(control, status); if (data < 0) return -1; *buf++ = data; /* send stop */ control = MV64x60_I2C_CONTROL_STOP | MV64x60_I2C_CONTROL_TWSIEN; status = MV64x60_I2C_STATUS_NO_STATUS; if (mv64x60_i2c_control(control, status) < 0) return -1; return count; } int mv64x60_i2c_open(void) { u32 v; void *devp; devp = find_node_by_compatible(NULL, "marvell,mv64360-i2c"); if (devp == NULL) goto err_out; if (getprop(devp, "virtual-reg", &v, sizeof(v)) != sizeof(v)) goto err_out; ctlr_base = (u8 *)v; return 0; err_out: return -1; } void mv64x60_i2c_close(void) { ctlr_base = NULL; }
gpl-2.0
filippz/kernel-adaptation-n950-n9
drivers/mtd/ubi/debug.c
124
14309
/* * Copyright (c) International Business Machines Corp., 2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author: Artem Bityutskiy (Битюцкий Артём) */ #include "ubi.h" #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/module.h> /** * ubi_dump_flash - dump a region of flash. * @ubi: UBI device description object * @pnum: the physical eraseblock number to dump * @offset: the starting offset within the physical eraseblock to dump * @len: the length of the region to dump */ void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len) { int err; size_t read; void *buf; loff_t addr = (loff_t)pnum * ubi->peb_size + offset; buf = vmalloc(len); if (!buf) return; err = mtd_read(ubi->mtd, addr, len, &read, buf); if (err && err != -EUCLEAN) { ubi_err("error %d while reading %d bytes from PEB %d:%d, " "read %zd bytes", err, len, pnum, offset, read); goto out; } ubi_msg("dumping %d bytes of data from PEB %d, offset %d", len, pnum, offset); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1); out: vfree(buf); return; } /** * ubi_dump_ec_hdr - dump an erase counter header. * @ec_hdr: the erase counter header to dump */ void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) { printk(KERN_DEBUG "Erase counter header dump:\n"); printk(KERN_DEBUG "\tmagic %#08x\n", be32_to_cpu(ec_hdr->magic)); printk(KERN_DEBUG "\tversion %d\n", (int)ec_hdr->version); printk(KERN_DEBUG "\tec %llu\n", (long long)be64_to_cpu(ec_hdr->ec)); printk(KERN_DEBUG "\tvid_hdr_offset %d\n", be32_to_cpu(ec_hdr->vid_hdr_offset)); printk(KERN_DEBUG "\tdata_offset %d\n", be32_to_cpu(ec_hdr->data_offset)); printk(KERN_DEBUG "\timage_seq %d\n", be32_to_cpu(ec_hdr->image_seq)); printk(KERN_DEBUG "\thdr_crc %#08x\n", be32_to_cpu(ec_hdr->hdr_crc)); printk(KERN_DEBUG "erase counter header hexdump:\n"); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, ec_hdr, UBI_EC_HDR_SIZE, 1); } /** * ubi_dump_vid_hdr - dump a volume identifier header. * @vid_hdr: the volume identifier header to dump */ void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) { printk(KERN_DEBUG "Volume identifier header dump:\n"); printk(KERN_DEBUG "\tmagic %08x\n", be32_to_cpu(vid_hdr->magic)); printk(KERN_DEBUG "\tversion %d\n", (int)vid_hdr->version); printk(KERN_DEBUG "\tvol_type %d\n", (int)vid_hdr->vol_type); printk(KERN_DEBUG "\tcopy_flag %d\n", (int)vid_hdr->copy_flag); printk(KERN_DEBUG "\tcompat %d\n", (int)vid_hdr->compat); printk(KERN_DEBUG "\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id)); printk(KERN_DEBUG "\tlnum %d\n", be32_to_cpu(vid_hdr->lnum)); printk(KERN_DEBUG "\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size)); printk(KERN_DEBUG "\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs)); printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad)); printk(KERN_DEBUG "\tsqnum %llu\n", (unsigned long long)be64_to_cpu(vid_hdr->sqnum)); printk(KERN_DEBUG "\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc)); printk(KERN_DEBUG "Volume identifier header hexdump:\n"); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, vid_hdr, UBI_VID_HDR_SIZE, 1); } /** * ubi_dump_vol_info - dump volume information. * @vol: UBI volume description object */ void ubi_dump_vol_info(const struct ubi_volume *vol) { printk(KERN_DEBUG "Volume information dump:\n"); printk(KERN_DEBUG "\tvol_id %d\n", vol->vol_id); printk(KERN_DEBUG "\treserved_pebs %d\n", vol->reserved_pebs); printk(KERN_DEBUG "\talignment %d\n", vol->alignment); printk(KERN_DEBUG "\tdata_pad %d\n", vol->data_pad); printk(KERN_DEBUG "\tvol_type %d\n", vol->vol_type); printk(KERN_DEBUG "\tname_len %d\n", vol->name_len); printk(KERN_DEBUG "\tusable_leb_size %d\n", vol->usable_leb_size); printk(KERN_DEBUG "\tused_ebs %d\n", vol->used_ebs); printk(KERN_DEBUG "\tused_bytes %lld\n", vol->used_bytes); printk(KERN_DEBUG "\tlast_eb_bytes %d\n", vol->last_eb_bytes); printk(KERN_DEBUG "\tcorrupted %d\n", vol->corrupted); printk(KERN_DEBUG "\tupd_marker %d\n", vol->upd_marker); if (vol->name_len <= UBI_VOL_NAME_MAX && strnlen(vol->name, vol->name_len + 1) == vol->name_len) { printk(KERN_DEBUG "\tname %s\n", vol->name); } else { printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n", vol->name[0], vol->name[1], vol->name[2], vol->name[3], vol->name[4]); } } /** * ubi_dump_vtbl_record - dump a &struct ubi_vtbl_record object. * @r: the object to dump * @idx: volume table index */ void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx) { int name_len = be16_to_cpu(r->name_len); printk(KERN_DEBUG "Volume table record %d dump:\n", idx); printk(KERN_DEBUG "\treserved_pebs %d\n", be32_to_cpu(r->reserved_pebs)); printk(KERN_DEBUG "\talignment %d\n", be32_to_cpu(r->alignment)); printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(r->data_pad)); printk(KERN_DEBUG "\tvol_type %d\n", (int)r->vol_type); printk(KERN_DEBUG "\tupd_marker %d\n", (int)r->upd_marker); printk(KERN_DEBUG "\tname_len %d\n", name_len); if (r->name[0] == '\0') { printk(KERN_DEBUG "\tname NULL\n"); return; } if (name_len <= UBI_VOL_NAME_MAX && strnlen(&r->name[0], name_len + 1) == name_len) { printk(KERN_DEBUG "\tname %s\n", &r->name[0]); } else { printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n", r->name[0], r->name[1], r->name[2], r->name[3], r->name[4]); } printk(KERN_DEBUG "\tcrc %#08x\n", be32_to_cpu(r->crc)); } /** * ubi_dump_av - dump a &struct ubi_ainf_volume object. * @av: the object to dump */ void ubi_dump_av(const struct ubi_ainf_volume *av) { printk(KERN_DEBUG "Volume attaching information dump:\n"); printk(KERN_DEBUG "\tvol_id %d\n", av->vol_id); printk(KERN_DEBUG "\thighest_lnum %d\n", av->highest_lnum); printk(KERN_DEBUG "\tleb_count %d\n", av->leb_count); printk(KERN_DEBUG "\tcompat %d\n", av->compat); printk(KERN_DEBUG "\tvol_type %d\n", av->vol_type); printk(KERN_DEBUG "\tused_ebs %d\n", av->used_ebs); printk(KERN_DEBUG "\tlast_data_size %d\n", av->last_data_size); printk(KERN_DEBUG "\tdata_pad %d\n", av->data_pad); } /** * ubi_dump_aeb - dump a &struct ubi_ainf_peb object. * @aeb: the object to dump * @type: object type: 0 - not corrupted, 1 - corrupted */ void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type) { printk(KERN_DEBUG "eraseblock attaching information dump:\n"); printk(KERN_DEBUG "\tec %d\n", aeb->ec); printk(KERN_DEBUG "\tpnum %d\n", aeb->pnum); if (type == 0) { printk(KERN_DEBUG "\tlnum %d\n", aeb->lnum); printk(KERN_DEBUG "\tscrub %d\n", aeb->scrub); printk(KERN_DEBUG "\tsqnum %llu\n", aeb->sqnum); } } /** * ubi_dump_mkvol_req - dump a &struct ubi_mkvol_req object. * @req: the object to dump */ void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req) { char nm[17]; printk(KERN_DEBUG "Volume creation request dump:\n"); printk(KERN_DEBUG "\tvol_id %d\n", req->vol_id); printk(KERN_DEBUG "\talignment %d\n", req->alignment); printk(KERN_DEBUG "\tbytes %lld\n", (long long)req->bytes); printk(KERN_DEBUG "\tvol_type %d\n", req->vol_type); printk(KERN_DEBUG "\tname_len %d\n", req->name_len); memcpy(nm, req->name, 16); nm[16] = 0; printk(KERN_DEBUG "\t1st 16 characters of name: %s\n", nm); } /** * ubi_debugging_init_dev - initialize debugging for an UBI device. * @ubi: UBI device description object * * This function initializes debugging-related data for UBI device @ubi. * Returns zero in case of success and a negative error code in case of * failure. */ int ubi_debugging_init_dev(struct ubi_device *ubi) { ubi->dbg = kzalloc(sizeof(struct ubi_debug_info), GFP_KERNEL); if (!ubi->dbg) return -ENOMEM; return 0; } /** * ubi_debugging_exit_dev - free debugging data for an UBI device. * @ubi: UBI device description object */ void ubi_debugging_exit_dev(struct ubi_device *ubi) { kfree(ubi->dbg); } /* * Root directory for UBI stuff in debugfs. Contains sub-directories which * contain the stuff specific to particular UBI devices. */ static struct dentry *dfs_rootdir; /** * ubi_debugfs_init - create UBI debugfs directory. * * Create UBI debugfs directory. Returns zero in case of success and a negative * error code in case of failure. */ int ubi_debugfs_init(void) { if (!IS_ENABLED(CONFIG_DEBUG_FS)) return 0; dfs_rootdir = debugfs_create_dir("ubi", NULL); if (IS_ERR_OR_NULL(dfs_rootdir)) { int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir); ubi_err("cannot create \"ubi\" debugfs directory, error %d\n", err); return err; } return 0; } /** * ubi_debugfs_exit - remove UBI debugfs directory. */ void ubi_debugfs_exit(void) { if (IS_ENABLED(CONFIG_DEBUG_FS)) debugfs_remove(dfs_rootdir); } /* Read an UBI debugfs file */ static ssize_t dfs_file_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { unsigned long ubi_num = (unsigned long)file->private_data; struct dentry *dent = file->f_path.dentry; struct ubi_device *ubi; struct ubi_debug_info *d; char buf[3]; int val; ubi = ubi_get_device(ubi_num); if (!ubi) return -ENODEV; d = ubi->dbg; if (dent == d->dfs_chk_gen) val = d->chk_gen; else if (dent == d->dfs_chk_io) val = d->chk_io; else if (dent == d->dfs_disable_bgt) val = d->disable_bgt; else if (dent == d->dfs_emulate_bitflips) val = d->emulate_bitflips; else if (dent == d->dfs_emulate_io_failures) val = d->emulate_io_failures; else { count = -EINVAL; goto out; } if (val) buf[0] = '1'; else buf[0] = '0'; buf[1] = '\n'; buf[2] = 0x00; count = simple_read_from_buffer(user_buf, count, ppos, buf, 2); out: ubi_put_device(ubi); return count; } /* Write an UBI debugfs file */ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { unsigned long ubi_num = (unsigned long)file->private_data; struct dentry *dent = file->f_path.dentry; struct ubi_device *ubi; struct ubi_debug_info *d; size_t buf_size; char buf[8]; int val; ubi = ubi_get_device(ubi_num); if (!ubi) return -ENODEV; d = ubi->dbg; buf_size = min_t(size_t, count, (sizeof(buf) - 1)); if (copy_from_user(buf, user_buf, buf_size)) { count = -EFAULT; goto out; } if (buf[0] == '1') val = 1; else if (buf[0] == '0') val = 0; else { count = -EINVAL; goto out; } if (dent == d->dfs_chk_gen) d->chk_gen = val; else if (dent == d->dfs_chk_io) d->chk_io = val; else if (dent == d->dfs_disable_bgt) d->disable_bgt = val; else if (dent == d->dfs_emulate_bitflips) d->emulate_bitflips = val; else if (dent == d->dfs_emulate_io_failures) d->emulate_io_failures = val; else count = -EINVAL; out: ubi_put_device(ubi); return count; } /* File operations for all UBI debugfs files */ static const struct file_operations dfs_fops = { .read = dfs_file_read, .write = dfs_file_write, .open = simple_open, .llseek = no_llseek, .owner = THIS_MODULE, }; /** * ubi_debugfs_init_dev - initialize debugfs for an UBI device. * @ubi: UBI device description object * * This function creates all debugfs files for UBI device @ubi. Returns zero in * case of success and a negative error code in case of failure. */ int ubi_debugfs_init_dev(struct ubi_device *ubi) { int err, n; unsigned long ubi_num = ubi->ubi_num; const char *fname; struct dentry *dent; struct ubi_debug_info *d = ubi->dbg; if (!IS_ENABLED(CONFIG_DEBUG_FS)) return 0; n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME, ubi->ubi_num); if (n == UBI_DFS_DIR_LEN) { /* The array size is too small */ fname = UBI_DFS_DIR_NAME; dent = ERR_PTR(-EINVAL); goto out; } fname = d->dfs_dir_name; dent = debugfs_create_dir(fname, dfs_rootdir); if (IS_ERR_OR_NULL(dent)) goto out; d->dfs_dir = dent; fname = "chk_gen"; dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num, &dfs_fops); if (IS_ERR_OR_NULL(dent)) goto out_remove; d->dfs_chk_gen = dent; fname = "chk_io"; dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num, &dfs_fops); if (IS_ERR_OR_NULL(dent)) goto out_remove; d->dfs_chk_io = dent; fname = "tst_disable_bgt"; dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num, &dfs_fops); if (IS_ERR_OR_NULL(dent)) goto out_remove; d->dfs_disable_bgt = dent; fname = "tst_emulate_bitflips"; dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num, &dfs_fops); if (IS_ERR_OR_NULL(dent)) goto out_remove; d->dfs_emulate_bitflips = dent; fname = "tst_emulate_io_failures"; dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num, &dfs_fops); if (IS_ERR_OR_NULL(dent)) goto out_remove; d->dfs_emulate_io_failures = dent; return 0; out_remove: debugfs_remove_recursive(d->dfs_dir); out: err = dent ? PTR_ERR(dent) : -ENODEV; ubi_err("cannot create \"%s\" debugfs file or directory, error %d\n", fname, err); return err; } /** * dbg_debug_exit_dev - free all debugfs files corresponding to device @ubi * @ubi: UBI device description object */ void ubi_debugfs_exit_dev(struct ubi_device *ubi) { if (IS_ENABLED(CONFIG_DEBUG_FS)) debugfs_remove_recursive(ubi->dbg->dfs_dir); }
gpl-2.0
virtuous/kernel-7x30-gingerbread-v4
drivers/net/stmmac/stmmac_mdio.c
892
6174
/******************************************************************************* STMMAC Ethernet Driver -- MDIO bus implementation Provides Bus interface for MII registers Copyright (C) 2007-2009 STMicroelectronics Ltd This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Author: Carl Shaw <carl.shaw@st.com> Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ #include <linux/mii.h> #include <linux/phy.h> #include <linux/slab.h> #include "stmmac.h" #define MII_BUSY 0x00000001 #define MII_WRITE 0x00000002 /** * stmmac_mdio_read * @bus: points to the mii_bus structure * @phyaddr: MII addr reg bits 15-11 * @phyreg: MII addr reg bits 10-6 * Description: it reads data from the MII register from within the phy device. * For the 7111 GMAC, we must set the bit 0 in the MII address register while * accessing the PHY registers. * Fortunately, it seems this has no drawback for the 7109 MAC. */ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) { struct net_device *ndev = bus->priv; struct stmmac_priv *priv = netdev_priv(ndev); unsigned long ioaddr = ndev->base_addr; unsigned int mii_address = priv->hw->mii.addr; unsigned int mii_data = priv->hw->mii.data; int data; u16 regValue = (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))); regValue |= MII_BUSY; /* in case of GMAC */ do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); writel(regValue, ioaddr + mii_address); do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); /* Read the data from the MII data register */ data = (int)readl(ioaddr + mii_data); return data; } /** * stmmac_mdio_write * @bus: points to the mii_bus structure * @phyaddr: MII addr reg bits 15-11 * @phyreg: MII addr reg bits 10-6 * @phydata: phy data * Description: it writes the data into the MII register from within the device. */ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, u16 phydata) { struct net_device *ndev = bus->priv; struct stmmac_priv *priv = netdev_priv(ndev); unsigned long ioaddr = ndev->base_addr; unsigned int mii_address = priv->hw->mii.addr; unsigned int mii_data = priv->hw->mii.data; u16 value = (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) | MII_WRITE; value |= MII_BUSY; /* Wait until any existing MII operation is complete */ do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); /* Set the MII address register to write */ writel(phydata, ioaddr + mii_data); writel(value, ioaddr + mii_address); /* Wait until any existing MII operation is complete */ do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); return 0; } /** * stmmac_mdio_reset * @bus: points to the mii_bus structure * Description: reset the MII bus */ static int stmmac_mdio_reset(struct mii_bus *bus) { struct net_device *ndev = bus->priv; struct stmmac_priv *priv = netdev_priv(ndev); unsigned long ioaddr = ndev->base_addr; unsigned int mii_address = priv->hw->mii.addr; if (priv->phy_reset) { pr_debug("stmmac_mdio_reset: calling phy_reset\n"); priv->phy_reset(priv->bsp_priv); } /* This is a workaround for problems with the STE101P PHY. * It doesn't complete its reset until at least one clock cycle * on MDC, so perform a dummy mdio read. */ writel(0, ioaddr + mii_address); return 0; } /** * stmmac_mdio_register * @ndev: net device structure * Description: it registers the MII bus */ int stmmac_mdio_register(struct net_device *ndev) { int err = 0; struct mii_bus *new_bus; int *irqlist; struct stmmac_priv *priv = netdev_priv(ndev); int addr, found; new_bus = mdiobus_alloc(); if (new_bus == NULL) return -ENOMEM; irqlist = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (irqlist == NULL) { err = -ENOMEM; goto irqlist_alloc_fail; } /* Assign IRQ to phy at address phy_addr */ if (priv->phy_addr != -1) irqlist[priv->phy_addr] = priv->phy_irq; new_bus->name = "STMMAC MII Bus"; new_bus->read = &stmmac_mdio_read; new_bus->write = &stmmac_mdio_write; new_bus->reset = &stmmac_mdio_reset; snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id); new_bus->priv = ndev; new_bus->irq = irqlist; new_bus->phy_mask = priv->phy_mask; new_bus->parent = priv->device; err = mdiobus_register(new_bus); if (err != 0) { pr_err("%s: Cannot register as MDIO bus\n", new_bus->name); goto bus_register_fail; } priv->mii = new_bus; found = 0; for (addr = 0; addr < 32; addr++) { struct phy_device *phydev = new_bus->phy_map[addr]; if (phydev) { if (priv->phy_addr == -1) { priv->phy_addr = addr; phydev->irq = priv->phy_irq; irqlist[addr] = priv->phy_irq; } pr_info("%s: PHY ID %08x at %d IRQ %d (%s)%s\n", ndev->name, phydev->phy_id, addr, phydev->irq, dev_name(&phydev->dev), (addr == priv->phy_addr) ? " active" : ""); found = 1; } } if (!found) pr_warning("%s: No PHY found\n", ndev->name); return 0; bus_register_fail: kfree(irqlist); irqlist_alloc_fail: kfree(new_bus); return err; } /** * stmmac_mdio_unregister * @ndev: net device structure * Description: it unregisters the MII bus */ int stmmac_mdio_unregister(struct net_device *ndev) { struct stmmac_priv *priv = netdev_priv(ndev); mdiobus_unregister(priv->mii); priv->mii->priv = NULL; kfree(priv->mii); return 0; }
gpl-2.0
ilikenwf/android_kernel_ms_surfacepro3
arch/s390/appldata/appldata_base.c
1148
13886
/* * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1. * Exports appldata_register_ops() and appldata_unregister_ops() for the * data gathering modules. * * Copyright IBM Corp. 2003, 2009 * * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> */ #define KMSG_COMPONENT "appldata" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/pagemap.h> #include <linux/sysctl.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/workqueue.h> #include <linux/suspend.h> #include <linux/platform_device.h> #include <asm/appldata.h> #include <asm/vtimer.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/smp.h> #include "appldata.h" #define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for sampling interval in milliseconds */ #define TOD_MICRO 0x01000 /* nr. of TOD clock units for 1 microsecond */ static struct platform_device *appldata_pdev; /* * /proc entries (sysctl) */ static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata"; static int appldata_timer_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos); static int appldata_interval_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos); static struct ctl_table_header *appldata_sysctl_header; static struct ctl_table appldata_table[] = { { .procname = "timer", .mode = S_IRUGO | S_IWUSR, .proc_handler = appldata_timer_handler, }, { .procname = "interval", .mode = S_IRUGO | S_IWUSR, .proc_handler = appldata_interval_handler, }, { }, }; static struct ctl_table appldata_dir_table[] = { { .procname = appldata_proc_name, .maxlen = 0, .mode = S_IRUGO | S_IXUGO, .child = appldata_table, }, { }, }; /* * Timer */ static struct vtimer_list appldata_timer; static DEFINE_SPINLOCK(appldata_timer_lock); static int appldata_interval = APPLDATA_CPU_INTERVAL; static int appldata_timer_active; static int appldata_timer_suspended = 0; /* * Work queue */ static struct workqueue_struct *appldata_wq; static void appldata_work_fn(struct work_struct *work); static DECLARE_WORK(appldata_work, appldata_work_fn); /* * Ops list */ static DEFINE_MUTEX(appldata_ops_mutex); static LIST_HEAD(appldata_ops_list); /*************************** timer, work, DIAG *******************************/ /* * appldata_timer_function() * * schedule work and reschedule timer */ static void appldata_timer_function(unsigned long data) { queue_work(appldata_wq, (struct work_struct *) data); } /* * appldata_work_fn() * * call data gathering function for each (active) module */ static void appldata_work_fn(struct work_struct *work) { struct list_head *lh; struct appldata_ops *ops; mutex_lock(&appldata_ops_mutex); list_for_each(lh, &appldata_ops_list) { ops = list_entry(lh, struct appldata_ops, list); if (ops->active == 1) { ops->callback(ops->data); } } mutex_unlock(&appldata_ops_mutex); } /* * appldata_diag() * * prepare parameter list, issue DIAG 0xDC */ int appldata_diag(char record_nr, u16 function, unsigned long buffer, u16 length, char *mod_lvl) { struct appldata_product_id id = { .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4, 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */ .prod_fn = 0xD5D3, /* "NL" */ .version_nr = 0xF2F6, /* "26" */ .release_nr = 0xF0F1, /* "01" */ }; id.record_nr = record_nr; id.mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1]; return appldata_asm(&id, function, (void *) buffer, length); } /************************ timer, work, DIAG <END> ****************************/ /****************************** /proc stuff **********************************/ #define APPLDATA_ADD_TIMER 0 #define APPLDATA_DEL_TIMER 1 #define APPLDATA_MOD_TIMER 2 /* * __appldata_vtimer_setup() * * Add, delete or modify virtual timers on all online cpus. * The caller needs to get the appldata_timer_lock spinlock. */ static void __appldata_vtimer_setup(int cmd) { u64 timer_interval = (u64) appldata_interval * 1000 * TOD_MICRO; switch (cmd) { case APPLDATA_ADD_TIMER: if (appldata_timer_active) break; appldata_timer.expires = timer_interval; add_virt_timer_periodic(&appldata_timer); appldata_timer_active = 1; break; case APPLDATA_DEL_TIMER: del_virt_timer(&appldata_timer); if (!appldata_timer_active) break; appldata_timer_active = 0; break; case APPLDATA_MOD_TIMER: if (!appldata_timer_active) break; mod_virt_timer_periodic(&appldata_timer, timer_interval); } } /* * appldata_timer_handler() * * Start/Stop timer, show status of timer (0 = not active, 1 = active) */ static int appldata_timer_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { unsigned int len; char buf[2]; if (!*lenp || *ppos) { *lenp = 0; return 0; } if (!write) { strncpy(buf, appldata_timer_active ? "1\n" : "0\n", ARRAY_SIZE(buf)); len = strnlen(buf, ARRAY_SIZE(buf)); if (len > *lenp) len = *lenp; if (copy_to_user(buffer, buf, len)) return -EFAULT; goto out; } len = *lenp; if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) return -EFAULT; spin_lock(&appldata_timer_lock); if (buf[0] == '1') __appldata_vtimer_setup(APPLDATA_ADD_TIMER); else if (buf[0] == '0') __appldata_vtimer_setup(APPLDATA_DEL_TIMER); spin_unlock(&appldata_timer_lock); out: *lenp = len; *ppos += len; return 0; } /* * appldata_interval_handler() * * Set (CPU) timer interval for collection of data (in milliseconds), show * current timer interval. */ static int appldata_interval_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { unsigned int len; int interval; char buf[16]; if (!*lenp || *ppos) { *lenp = 0; return 0; } if (!write) { len = sprintf(buf, "%i\n", appldata_interval); if (len > *lenp) len = *lenp; if (copy_to_user(buffer, buf, len)) return -EFAULT; goto out; } len = *lenp; if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) return -EFAULT; interval = 0; sscanf(buf, "%i", &interval); if (interval <= 0) return -EINVAL; spin_lock(&appldata_timer_lock); appldata_interval = interval; __appldata_vtimer_setup(APPLDATA_MOD_TIMER); spin_unlock(&appldata_timer_lock); out: *lenp = len; *ppos += len; return 0; } /* * appldata_generic_handler() * * Generic start/stop monitoring and DIAG, show status of * monitoring (0 = not in process, 1 = in process) */ static int appldata_generic_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct appldata_ops *ops = NULL, *tmp_ops; unsigned int len; int rc, found; char buf[2]; struct list_head *lh; found = 0; mutex_lock(&appldata_ops_mutex); list_for_each(lh, &appldata_ops_list) { tmp_ops = list_entry(lh, struct appldata_ops, list); if (&tmp_ops->ctl_table[2] == ctl) { found = 1; } } if (!found) { mutex_unlock(&appldata_ops_mutex); return -ENODEV; } ops = ctl->data; if (!try_module_get(ops->owner)) { // protect this function mutex_unlock(&appldata_ops_mutex); return -ENODEV; } mutex_unlock(&appldata_ops_mutex); if (!*lenp || *ppos) { *lenp = 0; module_put(ops->owner); return 0; } if (!write) { strncpy(buf, ops->active ? "1\n" : "0\n", ARRAY_SIZE(buf)); len = strnlen(buf, ARRAY_SIZE(buf)); if (len > *lenp) len = *lenp; if (copy_to_user(buffer, buf, len)) { module_put(ops->owner); return -EFAULT; } goto out; } len = *lenp; if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) { module_put(ops->owner); return -EFAULT; } mutex_lock(&appldata_ops_mutex); if ((buf[0] == '1') && (ops->active == 0)) { // protect work queue callback if (!try_module_get(ops->owner)) { mutex_unlock(&appldata_ops_mutex); module_put(ops->owner); return -ENODEV; } ops->callback(ops->data); // init record rc = appldata_diag(ops->record_nr, APPLDATA_START_INTERVAL_REC, (unsigned long) ops->data, ops->size, ops->mod_lvl); if (rc != 0) { pr_err("Starting the data collection for %s " "failed with rc=%d\n", ops->name, rc); module_put(ops->owner); } else ops->active = 1; } else if ((buf[0] == '0') && (ops->active == 1)) { ops->active = 0; rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, (unsigned long) ops->data, ops->size, ops->mod_lvl); if (rc != 0) pr_err("Stopping the data collection for %s " "failed with rc=%d\n", ops->name, rc); module_put(ops->owner); } mutex_unlock(&appldata_ops_mutex); out: *lenp = len; *ppos += len; module_put(ops->owner); return 0; } /*************************** /proc stuff <END> *******************************/ /************************* module-ops management *****************************/ /* * appldata_register_ops() * * update ops list, register /proc/sys entries */ int appldata_register_ops(struct appldata_ops *ops) { if (ops->size > APPLDATA_MAX_REC_SIZE) return -EINVAL; ops->ctl_table = kzalloc(4 * sizeof(struct ctl_table), GFP_KERNEL); if (!ops->ctl_table) return -ENOMEM; mutex_lock(&appldata_ops_mutex); list_add(&ops->list, &appldata_ops_list); mutex_unlock(&appldata_ops_mutex); ops->ctl_table[0].procname = appldata_proc_name; ops->ctl_table[0].maxlen = 0; ops->ctl_table[0].mode = S_IRUGO | S_IXUGO; ops->ctl_table[0].child = &ops->ctl_table[2]; ops->ctl_table[2].procname = ops->name; ops->ctl_table[2].mode = S_IRUGO | S_IWUSR; ops->ctl_table[2].proc_handler = appldata_generic_handler; ops->ctl_table[2].data = ops; ops->sysctl_header = register_sysctl_table(ops->ctl_table); if (!ops->sysctl_header) goto out; return 0; out: mutex_lock(&appldata_ops_mutex); list_del(&ops->list); mutex_unlock(&appldata_ops_mutex); kfree(ops->ctl_table); return -ENOMEM; } /* * appldata_unregister_ops() * * update ops list, unregister /proc entries, stop DIAG if necessary */ void appldata_unregister_ops(struct appldata_ops *ops) { mutex_lock(&appldata_ops_mutex); list_del(&ops->list); mutex_unlock(&appldata_ops_mutex); unregister_sysctl_table(ops->sysctl_header); kfree(ops->ctl_table); } /********************** module-ops management <END> **************************/ /**************************** suspend / resume *******************************/ static int appldata_freeze(struct device *dev) { struct appldata_ops *ops; int rc; struct list_head *lh; spin_lock(&appldata_timer_lock); if (appldata_timer_active) { __appldata_vtimer_setup(APPLDATA_DEL_TIMER); appldata_timer_suspended = 1; } spin_unlock(&appldata_timer_lock); mutex_lock(&appldata_ops_mutex); list_for_each(lh, &appldata_ops_list) { ops = list_entry(lh, struct appldata_ops, list); if (ops->active == 1) { rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, (unsigned long) ops->data, ops->size, ops->mod_lvl); if (rc != 0) pr_err("Stopping the data collection for %s " "failed with rc=%d\n", ops->name, rc); } } mutex_unlock(&appldata_ops_mutex); return 0; } static int appldata_restore(struct device *dev) { struct appldata_ops *ops; int rc; struct list_head *lh; spin_lock(&appldata_timer_lock); if (appldata_timer_suspended) { __appldata_vtimer_setup(APPLDATA_ADD_TIMER); appldata_timer_suspended = 0; } spin_unlock(&appldata_timer_lock); mutex_lock(&appldata_ops_mutex); list_for_each(lh, &appldata_ops_list) { ops = list_entry(lh, struct appldata_ops, list); if (ops->active == 1) { ops->callback(ops->data); // init record rc = appldata_diag(ops->record_nr, APPLDATA_START_INTERVAL_REC, (unsigned long) ops->data, ops->size, ops->mod_lvl); if (rc != 0) { pr_err("Starting the data collection for %s " "failed with rc=%d\n", ops->name, rc); } } } mutex_unlock(&appldata_ops_mutex); return 0; } static int appldata_thaw(struct device *dev) { return appldata_restore(dev); } static const struct dev_pm_ops appldata_pm_ops = { .freeze = appldata_freeze, .thaw = appldata_thaw, .restore = appldata_restore, }; static struct platform_driver appldata_pdrv = { .driver = { .name = "appldata", .pm = &appldata_pm_ops, }, }; /************************* suspend / resume <END> ****************************/ /******************************* init / exit *********************************/ /* * appldata_init() * * init timer, register /proc entries */ static int __init appldata_init(void) { int rc; init_virt_timer(&appldata_timer); appldata_timer.function = appldata_timer_function; appldata_timer.data = (unsigned long) &appldata_work; rc = platform_driver_register(&appldata_pdrv); if (rc) return rc; appldata_pdev = platform_device_register_simple("appldata", -1, NULL, 0); if (IS_ERR(appldata_pdev)) { rc = PTR_ERR(appldata_pdev); goto out_driver; } appldata_wq = create_singlethread_workqueue("appldata"); if (!appldata_wq) { rc = -ENOMEM; goto out_device; } appldata_sysctl_header = register_sysctl_table(appldata_dir_table); return 0; out_device: platform_device_unregister(appldata_pdev); out_driver: platform_driver_unregister(&appldata_pdrv); return rc; } __initcall(appldata_init); /**************************** init / exit <END> ******************************/ EXPORT_SYMBOL_GPL(appldata_register_ops); EXPORT_SYMBOL_GPL(appldata_unregister_ops); EXPORT_SYMBOL_GPL(appldata_diag); #ifdef CONFIG_SWAP EXPORT_SYMBOL_GPL(si_swapinfo); #endif EXPORT_SYMBOL_GPL(nr_threads); EXPORT_SYMBOL_GPL(nr_running); EXPORT_SYMBOL_GPL(nr_iowait);
gpl-2.0
motley-git/Kernel-GT-P73xx-v2
net/netfilter/ipvs/ip_vs_rr.c
1660
2997
/* * IPVS: Round-Robin Scheduling module * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * Peter Kese <peter.kese@ijs.si> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Fixes/Changes: * Wensong Zhang : changed the ip_vs_rr_schedule to return dest * Julian Anastasov : fixed the NULL pointer access bug in debugging * Wensong Zhang : changed some comestics things for debugging * Wensong Zhang : changed for the d-linked destination list * Wensong Zhang : added the ip_vs_rr_update_svc * Wensong Zhang : added any dest with weight=0 is quiesced * */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <net/ip_vs.h> static int ip_vs_rr_init_svc(struct ip_vs_service *svc) { svc->sched_data = &svc->destinations; return 0; } static int ip_vs_rr_update_svc(struct ip_vs_service *svc) { svc->sched_data = &svc->destinations; return 0; } /* * Round-Robin Scheduling */ static struct ip_vs_dest * ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) { struct list_head *p, *q; struct ip_vs_dest *dest; IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); write_lock(&svc->sched_lock); p = (struct list_head *)svc->sched_data; p = p->next; q = p; do { /* skip list head */ if (q == &svc->destinations) { q = q->next; continue; } dest = list_entry(q, struct ip_vs_dest, n_list); if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && atomic_read(&dest->weight) > 0) /* HIT */ goto out; q = q->next; } while (q != p); write_unlock(&svc->sched_lock); IP_VS_ERR_RL("RR: no destination available\n"); return NULL; out: svc->sched_data = q; write_unlock(&svc->sched_lock); IP_VS_DBG_BUF(6, "RR: server %s:%u " "activeconns %d refcnt %d weight %d\n", IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port), atomic_read(&dest->activeconns), atomic_read(&dest->refcnt), atomic_read(&dest->weight)); return dest; } static struct ip_vs_scheduler ip_vs_rr_scheduler = { .name = "rr", /* name */ .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list), .init_service = ip_vs_rr_init_svc, .update_service = ip_vs_rr_update_svc, .schedule = ip_vs_rr_schedule, }; static int __init ip_vs_rr_init(void) { return register_ip_vs_scheduler(&ip_vs_rr_scheduler); } static void __exit ip_vs_rr_cleanup(void) { unregister_ip_vs_scheduler(&ip_vs_rr_scheduler); } module_init(ip_vs_rr_init); module_exit(ip_vs_rr_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
abeobk/sam9x35
drivers/video/fbdev/sbuslib.c
2172
7000
/* sbuslib.c: Helper library for SBUS framebuffer drivers. * * Copyright (C) 2003 David S. Miller (davem@redhat.com) */ #include <linux/compat.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/fb.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/of_device.h> #include <asm/fbio.h> #include "sbuslib.h" void sbusfb_fill_var(struct fb_var_screeninfo *var, struct device_node *dp, int bpp) { memset(var, 0, sizeof(*var)); var->xres = of_getintprop_default(dp, "width", 1152); var->yres = of_getintprop_default(dp, "height", 900); var->xres_virtual = var->xres; var->yres_virtual = var->yres; var->bits_per_pixel = bpp; } EXPORT_SYMBOL(sbusfb_fill_var); static unsigned long sbusfb_mmapsize(long size, unsigned long fbsize) { if (size == SBUS_MMAP_EMPTY) return 0; if (size >= 0) return size; return fbsize * (-size); } int sbusfb_mmap_helper(struct sbus_mmap_map *map, unsigned long physbase, unsigned long fbsize, unsigned long iospace, struct vm_area_struct *vma) { unsigned int size, page, r, map_size; unsigned long map_offset = 0; unsigned long off; int i; if (!(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) return -EINVAL; size = vma->vm_end - vma->vm_start; if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) return -EINVAL; off = vma->vm_pgoff << PAGE_SHIFT; /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* Each page, see which map applies */ for (page = 0; page < size; ){ map_size = 0; for (i = 0; map[i].size; i++) if (map[i].voff == off+page) { map_size = sbusfb_mmapsize(map[i].size, fbsize); #ifdef __sparc_v9__ #define POFF_MASK (PAGE_MASK|0x1UL) #else #define POFF_MASK (PAGE_MASK) #endif map_offset = (physbase + map[i].poff) & POFF_MASK; break; } if (!map_size) { page += PAGE_SIZE; continue; } if (page + map_size > size) map_size = size - page; r = io_remap_pfn_range(vma, vma->vm_start + page, MK_IOSPACE_PFN(iospace, map_offset >> PAGE_SHIFT), map_size, vma->vm_page_prot); if (r) return -EAGAIN; page += map_size; } return 0; } EXPORT_SYMBOL(sbusfb_mmap_helper); int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, struct fb_info *info, int type, int fb_depth, unsigned long fb_size) { switch(cmd) { case FBIOGTYPE: { struct fbtype __user *f = (struct fbtype __user *) arg; if (put_user(type, &f->fb_type) || __put_user(info->var.yres, &f->fb_height) || __put_user(info->var.xres, &f->fb_width) || __put_user(fb_depth, &f->fb_depth) || __put_user(0, &f->fb_cmsize) || __put_user(fb_size, &f->fb_cmsize)) return -EFAULT; return 0; } case FBIOPUTCMAP_SPARC: { struct fbcmap __user *c = (struct fbcmap __user *) arg; struct fb_cmap cmap; u16 red, green, blue; u8 red8, green8, blue8; unsigned char __user *ured; unsigned char __user *ugreen; unsigned char __user *ublue; int index, count, i; if (get_user(index, &c->index) || __get_user(count, &c->count) || __get_user(ured, &c->red) || __get_user(ugreen, &c->green) || __get_user(ublue, &c->blue)) return -EFAULT; cmap.len = 1; cmap.red = &red; cmap.green = &green; cmap.blue = &blue; cmap.transp = NULL; for (i = 0; i < count; i++) { int err; if (get_user(red8, &ured[i]) || get_user(green8, &ugreen[i]) || get_user(blue8, &ublue[i])) return -EFAULT; red = red8 << 8; green = green8 << 8; blue = blue8 << 8; cmap.start = index + i; err = fb_set_cmap(&cmap, info); if (err) return err; } return 0; } case FBIOGETCMAP_SPARC: { struct fbcmap __user *c = (struct fbcmap __user *) arg; unsigned char __user *ured; unsigned char __user *ugreen; unsigned char __user *ublue; struct fb_cmap *cmap = &info->cmap; int index, count, i; u8 red, green, blue; if (get_user(index, &c->index) || __get_user(count, &c->count) || __get_user(ured, &c->red) || __get_user(ugreen, &c->green) || __get_user(ublue, &c->blue)) return -EFAULT; if (index + count > cmap->len) return -EINVAL; for (i = 0; i < count; i++) { red = cmap->red[index + i] >> 8; green = cmap->green[index + i] >> 8; blue = cmap->blue[index + i] >> 8; if (put_user(red, &ured[i]) || put_user(green, &ugreen[i]) || put_user(blue, &ublue[i])) return -EFAULT; } return 0; } default: return -EINVAL; } } EXPORT_SYMBOL(sbusfb_ioctl_helper); #ifdef CONFIG_COMPAT static int fbiogetputcmap(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct fbcmap32 __user *argp = (void __user *)arg; struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p)); u32 addr; int ret; ret = copy_in_user(p, argp, 2 * sizeof(int)); ret |= get_user(addr, &argp->red); ret |= put_user(compat_ptr(addr), &p->red); ret |= get_user(addr, &argp->green); ret |= put_user(compat_ptr(addr), &p->green); ret |= get_user(addr, &argp->blue); ret |= put_user(compat_ptr(addr), &p->blue); if (ret) return -EFAULT; return info->fbops->fb_ioctl(info, (cmd == FBIOPUTCMAP32) ? FBIOPUTCMAP_SPARC : FBIOGETCMAP_SPARC, (unsigned long)p); } static int fbiogscursor(struct fb_info *info, unsigned long arg) { struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p)); struct fbcursor32 __user *argp = (void __user *)arg; compat_uptr_t addr; int ret; ret = copy_in_user(p, argp, 2 * sizeof (short) + 2 * sizeof(struct fbcurpos)); ret |= copy_in_user(&p->size, &argp->size, sizeof(struct fbcurpos)); ret |= copy_in_user(&p->cmap, &argp->cmap, 2 * sizeof(int)); ret |= get_user(addr, &argp->cmap.red); ret |= put_user(compat_ptr(addr), &p->cmap.red); ret |= get_user(addr, &argp->cmap.green); ret |= put_user(compat_ptr(addr), &p->cmap.green); ret |= get_user(addr, &argp->cmap.blue); ret |= put_user(compat_ptr(addr), &p->cmap.blue); ret |= get_user(addr, &argp->mask); ret |= put_user(compat_ptr(addr), &p->mask); ret |= get_user(addr, &argp->image); ret |= put_user(compat_ptr(addr), &p->image); if (ret) return -EFAULT; return info->fbops->fb_ioctl(info, FBIOSCURSOR, (unsigned long)p); } int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { switch (cmd) { case FBIOGTYPE: case FBIOSATTR: case FBIOGATTR: case FBIOSVIDEO: case FBIOGVIDEO: case FBIOGCURSOR32: /* This is not implemented yet. Later it should be converted... */ case FBIOSCURPOS: case FBIOGCURPOS: case FBIOGCURMAX: return info->fbops->fb_ioctl(info, cmd, arg); case FBIOPUTCMAP32: return fbiogetputcmap(info, cmd, arg); case FBIOGETCMAP32: return fbiogetputcmap(info, cmd, arg); case FBIOSCURSOR32: return fbiogscursor(info, arg); default: return -ENOIOCTLCMD; } } EXPORT_SYMBOL(sbusfb_compat_ioctl); #endif
gpl-2.0
Quallenauge/kernel-archos
drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c
2428
9516
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * @file bcmsdh_linux.c */ #define __UNDEF_NO_VERSION__ #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/completion.h> #include <pcicfg.h> #include <bcmdefs.h> #include <bcmdevs.h> #include <bcmutils.h> #if defined(OOB_INTR_ONLY) #include <linux/irq.h> extern void dhdsdio_isr(void *args); #include <dngl_stats.h> #include <dhd.h> #endif /* defined(OOB_INTR_ONLY) */ #if defined(CONFIG_MACH_SANDGATE2G) || defined(CONFIG_MACH_LOGICPD_PXA270) #if !defined(BCMPLATFORM_BUS) #define BCMPLATFORM_BUS #endif /* !defined(BCMPLATFORM_BUS) */ #include <linux/platform_device.h> #endif /* CONFIG_MACH_SANDGATE2G */ #include "dngl_stats.h" #include "dhd.h" /** * SDIO Host Controller info */ typedef struct bcmsdh_hc bcmsdh_hc_t; struct bcmsdh_hc { bcmsdh_hc_t *next; #ifdef BCMPLATFORM_BUS struct device *dev; /* platform device handle */ #else struct pci_dev *dev; /* pci device handle */ #endif /* BCMPLATFORM_BUS */ void *regs; /* SDIO Host Controller address */ bcmsdh_info_t *sdh; /* SDIO Host Controller handle */ void *ch; unsigned int oob_irq; unsigned long oob_flags; /* OOB Host specifiction as edge and etc */ bool oob_irq_registered; #if defined(OOB_INTR_ONLY) spinlock_t irq_lock; #endif }; static bcmsdh_hc_t *sdhcinfo; /* driver info, initialized when bcmsdh_register is called */ static bcmsdh_driver_t drvinfo = { NULL, NULL }; /* debugging macros */ #define SDLX_MSG(x) /** * Checks to see if vendor and device IDs match a supported SDIO Host Controller. */ bool bcmsdh_chipmatch(u16 vendor, u16 device) { /* Add other vendors and devices as required */ #ifdef BCMSDIOH_STD /* Check for Arasan host controller */ if (vendor == VENDOR_SI_IMAGE) return true; /* Check for BRCM 27XX Standard host controller */ if (device == BCM27XX_SDIOH_ID && vendor == PCI_VENDOR_ID_BROADCOM) return true; /* Check for BRCM Standard host controller */ if (device == SDIOH_FPGA_ID && vendor == PCI_VENDOR_ID_BROADCOM) return true; /* Check for TI PCIxx21 Standard host controller */ if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) return true; if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) return true; /* Ricoh R5C822 Standard SDIO Host */ if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) return true; /* JMicron Standard SDIO Host */ if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) return true; #endif /* BCMSDIOH_STD */ #ifdef BCMSDIOH_SPI /* This is the PciSpiHost. */ if (device == SPIH_FPGA_ID && vendor == PCI_VENDOR_ID_BROADCOM) { return true; } #endif /* BCMSDIOH_SPI */ return false; } #if defined(BCMPLATFORM_BUS) #if defined(BCMLXSDMMC) /* forward declarations */ int bcmsdh_probe(struct device *dev); EXPORT_SYMBOL(bcmsdh_probe); int bcmsdh_remove(struct device *dev); EXPORT_SYMBOL(bcmsdh_remove); #else /* forward declarations */ static int __devinit bcmsdh_probe(struct device *dev); static int __devexit bcmsdh_remove(struct device *dev); #endif /* BCMLXSDMMC */ #ifndef BCMLXSDMMC static #endif /* BCMLXSDMMC */ int bcmsdh_probe(struct device *dev) { bcmsdh_hc_t *sdhc = NULL; unsigned long regs = 0; bcmsdh_info_t *sdh = NULL; #if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS) struct platform_device *pdev; struct resource *r; #endif /* BCMLXSDMMC */ int irq = 0; u32 vendevid; unsigned long irq_flags = 0; #if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS) pdev = to_platform_device(dev); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!r || irq == NO_IRQ) return -ENXIO; #endif /* BCMLXSDMMC */ #if defined(OOB_INTR_ONLY) #ifdef HW_OOB irq_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; #else irq_flags = IRQF_TRIGGER_FALLING; #endif /* HW_OOB */ irq = dhd_customer_oob_irq_map(&irq_flags); if (irq < 0) { SDLX_MSG(("%s: Host irq is not defined\n", __func__)); return 1; } #endif /* defined(OOB_INTR_ONLY) */ /* allocate SDIO Host Controller state info */ sdhc = kzalloc(sizeof(bcmsdh_hc_t), GFP_ATOMIC); if (!sdhc) { SDLX_MSG(("%s: out of memory\n", __func__)); goto err; } sdhc->dev = (void *)dev; #ifdef BCMLXSDMMC sdh = bcmsdh_attach((void *)0, (void **)&regs, irq); if (!sdh) { SDLX_MSG(("%s: bcmsdh_attach failed\n", __func__)); goto err; } #else sdh = bcmsdh_attach((void *)r->start, (void **)&regs, irq); if (!sdh) { SDLX_MSG(("%s: bcmsdh_attach failed\n", __func__)); goto err; } #endif /* BCMLXSDMMC */ sdhc->sdh = sdh; sdhc->oob_irq = irq; sdhc->oob_flags = irq_flags; sdhc->oob_irq_registered = false; /* to make sure.. */ #if defined(OOB_INTR_ONLY) spin_lock_init(&sdhc->irq_lock); #endif /* chain SDIO Host Controller info together */ sdhc->next = sdhcinfo; sdhcinfo = sdhc; /* Read the vendor/device ID from the CIS */ vendevid = bcmsdh_query_device(sdh); /* try to attach to the target device */ sdhc->ch = drvinfo.attach((vendevid >> 16), (vendevid & 0xFFFF), 0, 0, 0, 0, (void *)regs, sdh); if (!sdhc->ch) { SDLX_MSG(("%s: device attach failed\n", __func__)); goto err; } return 0; /* error handling */ err: if (sdhc) { if (sdhc->sdh) bcmsdh_detach(sdhc->sdh); kfree(sdhc); } return -ENODEV; } #ifndef BCMLXSDMMC static #endif /* BCMLXSDMMC */ int bcmsdh_remove(struct device *dev) { bcmsdh_hc_t *sdhc, *prev; sdhc = sdhcinfo; drvinfo.detach(sdhc->ch); bcmsdh_detach(sdhc->sdh); /* find the SDIO Host Controller state for this pdev and take it out from the list */ for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) { if (sdhc->dev == (void *)dev) { if (prev) prev->next = sdhc->next; else sdhcinfo = NULL; break; } prev = sdhc; } if (!sdhc) { SDLX_MSG(("%s: failed\n", __func__)); return 0; } /* release SDIO Host Controller info */ kfree(sdhc); #if !defined(BCMLXSDMMC) dev_set_drvdata(dev, NULL); #endif /* !defined(BCMLXSDMMC) */ return 0; } #endif /* BCMPLATFORM_BUS */ extern int sdio_function_init(void); int bcmsdh_register(bcmsdh_driver_t *driver) { drvinfo = *driver; SDLX_MSG(("Linux Kernel SDIO/MMC Driver\n")); return sdio_function_init(); } extern void sdio_function_cleanup(void); void bcmsdh_unregister(void) { sdio_function_cleanup(); } #if defined(OOB_INTR_ONLY) void bcmsdh_oob_intr_set(bool enable) { static bool curstate = 1; unsigned long flags; spin_lock_irqsave(&sdhcinfo->irq_lock, flags); if (curstate != enable) { if (enable) enable_irq(sdhcinfo->oob_irq); else disable_irq_nosync(sdhcinfo->oob_irq); curstate = enable; } spin_unlock_irqrestore(&sdhcinfo->irq_lock, flags); } static irqreturn_t wlan_oob_irq(int irq, void *dev_id) { dhd_pub_t *dhdp; dhdp = (dhd_pub_t *) dev_get_drvdata(sdhcinfo->dev); bcmsdh_oob_intr_set(0); if (dhdp == NULL) { SDLX_MSG(("Out of band GPIO interrupt fired way too early\n")); return IRQ_HANDLED; } dhdsdio_isr((void *)dhdp->bus); return IRQ_HANDLED; } int bcmsdh_register_oob_intr(void *dhdp) { int error = 0; SDLX_MSG(("%s Enter\n", __func__)); sdhcinfo->oob_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; dev_set_drvdata(sdhcinfo->dev, dhdp); if (!sdhcinfo->oob_irq_registered) { SDLX_MSG(("%s IRQ=%d Type=%X\n", __func__, (int)sdhcinfo->oob_irq, (int)sdhcinfo->oob_flags)); /* Refer to customer Host IRQ docs about proper irqflags definition */ error = request_irq(sdhcinfo->oob_irq, wlan_oob_irq, sdhcinfo->oob_flags, "bcmsdh_sdmmc", NULL); if (error) return -ENODEV; irq_set_irq_wake(sdhcinfo->oob_irq, 1); sdhcinfo->oob_irq_registered = true; } return 0; } void bcmsdh_unregister_oob_intr(void) { SDLX_MSG(("%s: Enter\n", __func__)); irq_set_irq_wake(sdhcinfo->oob_irq, 0); disable_irq(sdhcinfo->oob_irq); /* just in case.. */ free_irq(sdhcinfo->oob_irq, NULL); sdhcinfo->oob_irq_registered = false; } #endif /* defined(OOB_INTR_ONLY) */ /* Module parameters specific to each host-controller driver */ extern uint sd_msglevel; /* Debug message level */ module_param(sd_msglevel, uint, 0); extern uint sd_power; /* 0 = SD Power OFF, 1 = SD Power ON. */ module_param(sd_power, uint, 0); extern uint sd_clock; /* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */ module_param(sd_clock, uint, 0); extern uint sd_divisor; /* Divisor (-1 means external clock) */ module_param(sd_divisor, uint, 0); extern uint sd_sdmode; /* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */ module_param(sd_sdmode, uint, 0); extern uint sd_hiok; /* Ok to use hi-speed mode */ module_param(sd_hiok, uint, 0); extern uint sd_f2_blocksize; module_param(sd_f2_blocksize, int, 0);
gpl-2.0
chongzi865458/android4.04_kernel
arch/arm/mach-omap1/board-nokia770.c
2428
6695
/* * linux/arch/arm/mach-omap1/board-nokia770.c * * Modified from board-generic.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/clk.h> #include <linux/omapfb.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/gpio.h> #include <plat/mux.h> #include <plat/usb.h> #include <plat/board.h> #include <plat/keypad.h> #include <plat/common.h> #include <plat/hwa742.h> #include <plat/lcd_mipid.h> #include <plat/mmc.h> #include <plat/clock.h> #define ADS7846_PENDOWN_GPIO 15 static void __init omap_nokia770_init_irq(void) { /* On Nokia 770, the SleepX signal is masked with an * MPUIO line by default. It has to be unmasked for it * to become functional */ /* SleepX mask direction */ omap_writew((omap_readw(0xfffb5008) & ~2), 0xfffb5008); /* Unmask SleepX signal */ omap_writew((omap_readw(0xfffb5004) & ~2), 0xfffb5004); omap1_init_common_hw(); omap_init_irq(); } static const unsigned int nokia770_keymap[] = { KEY(1, 0, GROUP_0 | KEY_UP), KEY(2, 0, GROUP_1 | KEY_F5), KEY(0, 1, GROUP_0 | KEY_LEFT), KEY(1, 1, GROUP_0 | KEY_ENTER), KEY(2, 1, GROUP_0 | KEY_RIGHT), KEY(0, 2, GROUP_1 | KEY_ESC), KEY(1, 2, GROUP_0 | KEY_DOWN), KEY(2, 2, GROUP_1 | KEY_F4), KEY(0, 3, GROUP_2 | KEY_F7), KEY(1, 3, GROUP_2 | KEY_F8), KEY(2, 3, GROUP_2 | KEY_F6), }; static struct resource nokia770_kp_resources[] = { [0] = { .start = INT_KEYBOARD, .end = INT_KEYBOARD, .flags = IORESOURCE_IRQ, }, }; static const struct matrix_keymap_data nokia770_keymap_data = { .keymap = nokia770_keymap, .keymap_size = ARRAY_SIZE(nokia770_keymap), }; static struct omap_kp_platform_data nokia770_kp_data = { .rows = 8, .cols = 8, .keymap_data = &nokia770_keymap_data, .delay = 4, }; static struct platform_device nokia770_kp_device = { .name = "omap-keypad", .id = -1, .dev = { .platform_data = &nokia770_kp_data, }, .num_resources = ARRAY_SIZE(nokia770_kp_resources), .resource = nokia770_kp_resources, }; static struct platform_device *nokia770_devices[] __initdata = { &nokia770_kp_device, }; static void mipid_shutdown(struct mipid_platform_data *pdata) { if (pdata->nreset_gpio != -1) { printk(KERN_INFO "shutdown LCD\n"); gpio_set_value(pdata->nreset_gpio, 0); msleep(120); } } static struct mipid_platform_data nokia770_mipid_platform_data = { .shutdown = mipid_shutdown, }; static void __init mipid_dev_init(void) { const struct omap_lcd_config *conf; conf = omap_get_config(OMAP_TAG_LCD, struct omap_lcd_config); if (conf != NULL) { nokia770_mipid_platform_data.nreset_gpio = conf->nreset_gpio; nokia770_mipid_platform_data.data_lines = conf->data_lines; } } static void __init ads7846_dev_init(void) { if (gpio_request(ADS7846_PENDOWN_GPIO, "ADS7846 pendown") < 0) printk(KERN_ERR "can't get ads7846 pen down GPIO\n"); } static int ads7846_get_pendown_state(void) { return !gpio_get_value(ADS7846_PENDOWN_GPIO); } static struct ads7846_platform_data nokia770_ads7846_platform_data __initdata = { .x_max = 0x0fff, .y_max = 0x0fff, .x_plate_ohms = 180, .pressure_max = 255, .debounce_max = 10, .debounce_tol = 3, .debounce_rep = 1, .get_pendown_state = ads7846_get_pendown_state, }; static struct spi_board_info nokia770_spi_board_info[] __initdata = { [0] = { .modalias = "lcd_mipid", .bus_num = 2, .chip_select = 3, .max_speed_hz = 12000000, .platform_data = &nokia770_mipid_platform_data, }, [1] = { .modalias = "ads7846", .bus_num = 2, .chip_select = 0, .max_speed_hz = 2500000, .irq = OMAP_GPIO_IRQ(15), .platform_data = &nokia770_ads7846_platform_data, }, }; static struct hwa742_platform_data nokia770_hwa742_platform_data = { .te_connected = 1, }; static void __init hwa742_dev_init(void) { clk_add_alias("hwa_sys_ck", NULL, "bclk", NULL); omapfb_set_ctrl_platform_data(&nokia770_hwa742_platform_data); } /* assume no Mini-AB port */ static struct omap_usb_config nokia770_usb_config __initdata = { .otg = 1, .register_host = 1, .register_dev = 1, .hmc_mode = 16, .pins[0] = 6, }; #if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) #define NOKIA770_GPIO_MMC_POWER 41 #define NOKIA770_GPIO_MMC_SWITCH 23 static int nokia770_mmc_set_power(struct device *dev, int slot, int power_on, int vdd) { gpio_set_value(NOKIA770_GPIO_MMC_POWER, power_on); return 0; } static int nokia770_mmc_get_cover_state(struct device *dev, int slot) { return gpio_get_value(NOKIA770_GPIO_MMC_SWITCH); } static struct omap_mmc_platform_data nokia770_mmc2_data = { .nr_slots = 1, .dma_mask = 0xffffffff, .max_freq = 12000000, .slots[0] = { .set_power = nokia770_mmc_set_power, .get_cover_state = nokia770_mmc_get_cover_state, .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .name = "mmcblk", }, }; static struct omap_mmc_platform_data *nokia770_mmc_data[OMAP16XX_NR_MMC]; static void __init nokia770_mmc_init(void) { int ret; ret = gpio_request(NOKIA770_GPIO_MMC_POWER, "MMC power"); if (ret < 0) return; gpio_direction_output(NOKIA770_GPIO_MMC_POWER, 0); ret = gpio_request(NOKIA770_GPIO_MMC_SWITCH, "MMC cover"); if (ret < 0) { gpio_free(NOKIA770_GPIO_MMC_POWER); return; } gpio_direction_input(NOKIA770_GPIO_MMC_SWITCH); /* Only the second MMC controller is used */ nokia770_mmc_data[1] = &nokia770_mmc2_data; omap1_init_mmc(nokia770_mmc_data, OMAP16XX_NR_MMC); } #else static inline void nokia770_mmc_init(void) { } #endif static void __init omap_nokia770_init(void) { platform_add_devices(nokia770_devices, ARRAY_SIZE(nokia770_devices)); spi_register_board_info(nokia770_spi_board_info, ARRAY_SIZE(nokia770_spi_board_info)); omap_serial_init(); omap_register_i2c_bus(1, 100, NULL, 0); hwa742_dev_init(); ads7846_dev_init(); mipid_dev_init(); omap1_usb_init(&nokia770_usb_config); nokia770_mmc_init(); } static void __init omap_nokia770_map_io(void) { omap1_map_common_io(); } MACHINE_START(NOKIA770, "Nokia 770") .boot_params = 0x10000100, .map_io = omap_nokia770_map_io, .reserve = omap_reserve, .init_irq = omap_nokia770_init_irq, .init_machine = omap_nokia770_init, .timer = &omap_timer, MACHINE_END
gpl-2.0
SlimRoms/kernel_lge_msm7x27a-common
arch/powerpc/kernel/dbell.c
2940
1153
/* * Author: Kumar Gala <galak@kernel.crashing.org> * * Copyright 2009 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/smp.h> #include <linux/threads.h> #include <linux/hardirq.h> #include <asm/dbell.h> #include <asm/irq_regs.h> #ifdef CONFIG_SMP void doorbell_setup_this_cpu(void) { unsigned long tag = mfspr(SPRN_PIR) & 0x3fff; smp_muxed_ipi_set_data(smp_processor_id(), tag); } void doorbell_cause_ipi(int cpu, unsigned long data) { ppc_msgsnd(PPC_DBELL, 0, data); } void doorbell_exception(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); may_hard_irq_enable(); smp_ipi_demux(); irq_exit(); set_irq_regs(old_regs); } #else /* CONFIG_SMP */ void doorbell_exception(struct pt_regs *regs) { printk(KERN_WARNING "Received doorbell on non-smp system\n"); } #endif /* CONFIG_SMP */
gpl-2.0
XPerience-AOSP-Lollipop/android_kernel_asus_grouper
drivers/target/target_core_rd.c
3452
13903
/******************************************************************************* * Filename: target_core_rd.c * * This file contains the Storage Engine <-> Ramdisk transport * specific functions. * * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. * Copyright (c) 2005, 2006, 2007 SBE, Inc. * Copyright (c) 2007-2010 Rising Tide Systems * Copyright (c) 2008-2010 Linux-iSCSI.org * * Nicholas A. Bellinger <nab@kernel.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ******************************************************************************/ #include <linux/string.h> #include <linux/parser.h> #include <linux/timer.h> #include <linux/blkdev.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> #include "target_core_rd.h" static struct se_subsystem_api rd_mcp_template; /* rd_attach_hba(): (Part of se_subsystem_api_t template) * * */ static int rd_attach_hba(struct se_hba *hba, u32 host_id) { struct rd_host *rd_host; rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL); if (!rd_host) { pr_err("Unable to allocate memory for struct rd_host\n"); return -ENOMEM; } rd_host->rd_host_id = host_id; hba->hba_ptr = rd_host; pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" " MaxSectors: %u\n", hba->hba_id, rd_host->rd_host_id, RD_MAX_SECTORS); return 0; } static void rd_detach_hba(struct se_hba *hba) { struct rd_host *rd_host = hba->hba_ptr; pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from" " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); kfree(rd_host); hba->hba_ptr = NULL; } /* rd_release_device_space(): * * */ static void rd_release_device_space(struct rd_dev *rd_dev) { u32 i, j, page_count = 0, sg_per_table; struct rd_dev_sg_table *sg_table; struct page *pg; struct scatterlist *sg; if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) return; sg_table = rd_dev->sg_table_array; for (i = 0; i < rd_dev->sg_table_count; i++) { sg = sg_table[i].sg_table; sg_per_table = sg_table[i].rd_sg_count; for (j = 0; j < sg_per_table; j++) { pg = sg_page(&sg[j]); if (pg) { __free_page(pg); page_count++; } } kfree(sg); } pr_debug("CORE_RD[%u] - Released device space for Ramdisk" " Device ID: %u, pages %u in %u tables total bytes %lu\n", rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); kfree(sg_table); rd_dev->sg_table_array = NULL; rd_dev->sg_table_count = 0; } /* rd_build_device_space(): * * */ static int rd_build_device_space(struct rd_dev *rd_dev) { u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed; u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / sizeof(struct scatterlist)); struct rd_dev_sg_table *sg_table; struct page *pg; struct scatterlist *sg; if (rd_dev->rd_page_count <= 0) { pr_err("Illegal page count: %u for Ramdisk device\n", rd_dev->rd_page_count); return -EINVAL; } total_sg_needed = rd_dev->rd_page_count; sg_tables = (total_sg_needed / max_sg_per_table) + 1; sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); if (!sg_table) { pr_err("Unable to allocate memory for Ramdisk" " scatterlist tables\n"); return -ENOMEM; } rd_dev->sg_table_array = sg_table; rd_dev->sg_table_count = sg_tables; while (total_sg_needed) { sg_per_table = (total_sg_needed > max_sg_per_table) ? max_sg_per_table : total_sg_needed; sg = kzalloc(sg_per_table * sizeof(struct scatterlist), GFP_KERNEL); if (!sg) { pr_err("Unable to allocate scatterlist array" " for struct rd_dev\n"); return -ENOMEM; } sg_init_table(sg, sg_per_table); sg_table[i].sg_table = sg; sg_table[i].rd_sg_count = sg_per_table; sg_table[i].page_start_offset = page_offset; sg_table[i++].page_end_offset = (page_offset + sg_per_table) - 1; for (j = 0; j < sg_per_table; j++) { pg = alloc_pages(GFP_KERNEL, 0); if (!pg) { pr_err("Unable to allocate scatterlist" " pages for struct rd_dev_sg_table\n"); return -ENOMEM; } sg_assign_page(&sg[j], pg); sg[j].length = PAGE_SIZE; } page_offset += sg_per_table; total_sg_needed -= sg_per_table; } pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count, rd_dev->sg_table_count); return 0; } static void *rd_allocate_virtdevice( struct se_hba *hba, const char *name, int rd_direct) { struct rd_dev *rd_dev; struct rd_host *rd_host = hba->hba_ptr; rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL); if (!rd_dev) { pr_err("Unable to allocate memory for struct rd_dev\n"); return NULL; } rd_dev->rd_host = rd_host; rd_dev->rd_direct = rd_direct; return rd_dev; } static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name) { return rd_allocate_virtdevice(hba, name, 0); } /* rd_create_virtdevice(): * * */ static struct se_device *rd_create_virtdevice( struct se_hba *hba, struct se_subsystem_dev *se_dev, void *p, int rd_direct) { struct se_device *dev; struct se_dev_limits dev_limits; struct rd_dev *rd_dev = p; struct rd_host *rd_host = hba->hba_ptr; int dev_flags = 0, ret; char prod[16], rev[4]; memset(&dev_limits, 0, sizeof(struct se_dev_limits)); ret = rd_build_device_space(rd_dev); if (ret < 0) goto fail; snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP"); snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION : RD_MCP_VERSION); dev_limits.limits.logical_block_size = RD_BLOCKSIZE; dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS; dev_limits.limits.max_sectors = RD_MAX_SECTORS; dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH; dev = transport_add_device_to_core_hba(hba, &rd_mcp_template, se_dev, dev_flags, rd_dev, &dev_limits, prod, rev); if (!dev) goto fail; rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; rd_dev->rd_queue_depth = dev->queue_depth; pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of" " %u pages in %u tables, %lu total bytes\n", rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" : "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count, rd_dev->sg_table_count, (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); return dev; fail: rd_release_device_space(rd_dev); return ERR_PTR(ret); } static struct se_device *rd_MEMCPY_create_virtdevice( struct se_hba *hba, struct se_subsystem_dev *se_dev, void *p) { return rd_create_virtdevice(hba, se_dev, p, 0); } /* rd_free_device(): (Part of se_subsystem_api_t template) * * */ static void rd_free_device(void *p) { struct rd_dev *rd_dev = p; rd_release_device_space(rd_dev); kfree(rd_dev); } static inline struct rd_request *RD_REQ(struct se_task *task) { return container_of(task, struct rd_request, rd_task); } static struct se_task * rd_alloc_task(unsigned char *cdb) { struct rd_request *rd_req; rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL); if (!rd_req) { pr_err("Unable to allocate struct rd_request\n"); return NULL; } return &rd_req->rd_task; } /* rd_get_sg_table(): * * */ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) { u32 i; struct rd_dev_sg_table *sg_table; for (i = 0; i < rd_dev->sg_table_count; i++) { sg_table = &rd_dev->sg_table_array[i]; if ((sg_table->page_start_offset <= page) && (sg_table->page_end_offset >= page)) return sg_table; } pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n", page); return NULL; } static int rd_MEMCPY(struct rd_request *req, u32 read_rd) { struct se_task *task = &req->rd_task; struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; struct rd_dev_sg_table *table; struct scatterlist *rd_sg; struct sg_mapping_iter m; u32 rd_offset = req->rd_offset; u32 src_len; table = rd_get_sg_table(dev, req->rd_page); if (!table) return -EINVAL; rd_sg = &table->sg_table[req->rd_page - table->page_start_offset]; pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", dev->rd_dev_id, read_rd ? "Read" : "Write", task->task_lba, req->rd_size, req->rd_page, rd_offset); src_len = PAGE_SIZE - rd_offset; sg_miter_start(&m, task->task_sg, task->task_sg_nents, read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG); while (req->rd_size) { u32 len; void *rd_addr; sg_miter_next(&m); len = min((u32)m.length, src_len); m.consumed = len; rd_addr = sg_virt(rd_sg) + rd_offset; if (read_rd) memcpy(m.addr, rd_addr, len); else memcpy(rd_addr, m.addr, len); req->rd_size -= len; if (!req->rd_size) continue; src_len -= len; if (src_len) { rd_offset += len; continue; } /* rd page completed, next one please */ req->rd_page++; rd_offset = 0; src_len = PAGE_SIZE; if (req->rd_page <= table->page_end_offset) { rd_sg++; continue; } table = rd_get_sg_table(dev, req->rd_page); if (!table) { sg_miter_stop(&m); return -EINVAL; } /* since we increment, the first sg entry is correct */ rd_sg = table->sg_table; } sg_miter_stop(&m); return 0; } /* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template) * * */ static int rd_MEMCPY_do_task(struct se_task *task) { struct se_device *dev = task->task_se_cmd->se_dev; struct rd_request *req = RD_REQ(task); u64 tmp; int ret; tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; req->rd_offset = do_div(tmp, PAGE_SIZE); req->rd_page = tmp; req->rd_size = task->task_size; ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE); if (ret != 0) return ret; task->task_scsi_status = GOOD; transport_complete_task(task, 1); return 0; } /* rd_free_task(): (Part of se_subsystem_api_t template) * * */ static void rd_free_task(struct se_task *task) { kfree(RD_REQ(task)); } enum { Opt_rd_pages, Opt_err }; static match_table_t tokens = { {Opt_rd_pages, "rd_pages=%d"}, {Opt_err, NULL} }; static ssize_t rd_set_configfs_dev_params( struct se_hba *hba, struct se_subsystem_dev *se_dev, const char *page, ssize_t count) { struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; char *orig, *ptr, *opts; substring_t args[MAX_OPT_ARGS]; int ret = 0, arg, token; opts = kstrdup(page, GFP_KERNEL); if (!opts) return -ENOMEM; orig = opts; while ((ptr = strsep(&opts, ",\n")) != NULL) { if (!*ptr) continue; token = match_token(ptr, tokens, args); switch (token) { case Opt_rd_pages: match_int(args, &arg); rd_dev->rd_page_count = arg; pr_debug("RAMDISK: Referencing Page" " Count: %u\n", rd_dev->rd_page_count); rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; break; default: break; } } kfree(orig); return (!ret) ? count : ret; } static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) { struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { pr_debug("Missing rd_pages= parameter\n"); return -EINVAL; } return 0; } static ssize_t rd_show_configfs_dev_params( struct se_hba *hba, struct se_subsystem_dev *se_dev, char *b) { struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n", rd_dev->rd_dev_id, (rd_dev->rd_direct) ? "rd_direct" : "rd_mcp"); bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" " SG_table_count: %u\n", rd_dev->rd_page_count, PAGE_SIZE, rd_dev->sg_table_count); return bl; } static u32 rd_get_device_rev(struct se_device *dev) { return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ } static u32 rd_get_device_type(struct se_device *dev) { return TYPE_DISK; } static sector_t rd_get_blocks(struct se_device *dev) { struct rd_dev *rd_dev = dev->dev_ptr; unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / dev->se_sub_dev->se_dev_attrib.block_size) - 1; return blocks_long; } static struct se_subsystem_api rd_mcp_template = { .name = "rd_mcp", .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, .attach_hba = rd_attach_hba, .detach_hba = rd_detach_hba, .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice, .create_virtdevice = rd_MEMCPY_create_virtdevice, .free_device = rd_free_device, .alloc_task = rd_alloc_task, .do_task = rd_MEMCPY_do_task, .free_task = rd_free_task, .check_configfs_dev_params = rd_check_configfs_dev_params, .set_configfs_dev_params = rd_set_configfs_dev_params, .show_configfs_dev_params = rd_show_configfs_dev_params, .get_device_rev = rd_get_device_rev, .get_device_type = rd_get_device_type, .get_blocks = rd_get_blocks, }; int __init rd_module_init(void) { int ret; ret = transport_subsystem_register(&rd_mcp_template); if (ret < 0) { return ret; } return 0; } void rd_module_exit(void) { transport_subsystem_release(&rd_mcp_template); }
gpl-2.0
SteveLinCH/linux
drivers/infiniband/hw/nes/nes_mgt.c
4476
36764
/* * Copyright (c) 2006 - 2011 Intel-NE, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/kthread.h> #include <linux/ip.h> #include <linux/tcp.h> #include <net/tcp.h> #include "nes.h" #include "nes_mgt.h" atomic_t pau_qps_created; atomic_t pau_qps_destroyed; static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic) { unsigned long flags; dma_addr_t bus_address; struct sk_buff *skb; struct nes_hw_nic_rq_wqe *nic_rqe; struct nes_hw_mgt *nesmgt; struct nes_device *nesdev; struct nes_rskb_cb *cb; u32 rx_wqes_posted = 0; nesmgt = &mgtvnic->mgt; nesdev = mgtvnic->nesvnic->nesdev; spin_lock_irqsave(&nesmgt->rq_lock, flags); if (nesmgt->replenishing_rq != 0) { if (((nesmgt->rq_size - 1) == atomic_read(&mgtvnic->rx_skbs_needed)) && (atomic_read(&mgtvnic->rx_skb_timer_running) == 0)) { atomic_set(&mgtvnic->rx_skb_timer_running, 1); spin_unlock_irqrestore(&nesmgt->rq_lock, flags); mgtvnic->rq_wqes_timer.expires = jiffies + (HZ / 2); /* 1/2 second */ add_timer(&mgtvnic->rq_wqes_timer); } else { spin_unlock_irqrestore(&nesmgt->rq_lock, flags); } return; } nesmgt->replenishing_rq = 1; spin_unlock_irqrestore(&nesmgt->rq_lock, flags); do { skb = dev_alloc_skb(mgtvnic->nesvnic->max_frame_size); if (skb) { skb->dev = mgtvnic->nesvnic->netdev; bus_address = pci_map_single(nesdev->pcidev, skb->data, mgtvnic->nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); cb = (struct nes_rskb_cb *)&skb->cb[0]; cb->busaddr = bus_address; cb->maplen = mgtvnic->nesvnic->max_frame_size; nic_rqe = &nesmgt->rq_vbase[mgtvnic->mgt.rq_head]; nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32(mgtvnic->nesvnic->max_frame_size); nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0; nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)bus_address); nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)bus_address >> 32)); nesmgt->rx_skb[nesmgt->rq_head] = skb; nesmgt->rq_head++; nesmgt->rq_head &= nesmgt->rq_size - 1; atomic_dec(&mgtvnic->rx_skbs_needed); barrier(); if (++rx_wqes_posted == 255) { nes_write32(nesdev->regs + NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesmgt->qp_id); rx_wqes_posted = 0; } } else { spin_lock_irqsave(&nesmgt->rq_lock, flags); if (((nesmgt->rq_size - 1) == atomic_read(&mgtvnic->rx_skbs_needed)) && (atomic_read(&mgtvnic->rx_skb_timer_running) == 0)) { atomic_set(&mgtvnic->rx_skb_timer_running, 1); spin_unlock_irqrestore(&nesmgt->rq_lock, flags); mgtvnic->rq_wqes_timer.expires = jiffies + (HZ / 2); /* 1/2 second */ add_timer(&mgtvnic->rq_wqes_timer); } else { spin_unlock_irqrestore(&nesmgt->rq_lock, flags); } break; } } while (atomic_read(&mgtvnic->rx_skbs_needed)); barrier(); if (rx_wqes_posted) nes_write32(nesdev->regs + NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesmgt->qp_id); nesmgt->replenishing_rq = 0; } /** * nes_mgt_rq_wqes_timeout */ static void nes_mgt_rq_wqes_timeout(unsigned long parm) { struct nes_vnic_mgt *mgtvnic = (struct nes_vnic_mgt *)parm; atomic_set(&mgtvnic->rx_skb_timer_running, 0); if (atomic_read(&mgtvnic->rx_skbs_needed)) nes_replenish_mgt_rq(mgtvnic); } /** * nes_mgt_free_skb - unmap and free skb */ static void nes_mgt_free_skb(struct nes_device *nesdev, struct sk_buff *skb, u32 dir) { struct nes_rskb_cb *cb; cb = (struct nes_rskb_cb *)&skb->cb[0]; pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen, dir); cb->busaddr = 0; dev_kfree_skb_any(skb); } /** * nes_download_callback - handle download completions */ static void nes_download_callback(struct nes_device *nesdev, struct nes_cqp_request *cqp_request) { struct pau_fpdu_info *fpdu_info = cqp_request->cqp_callback_pointer; struct nes_qp *nesqp = fpdu_info->nesqp; struct sk_buff *skb; int i; for (i = 0; i < fpdu_info->frag_cnt; i++) { skb = fpdu_info->frags[i].skb; if (fpdu_info->frags[i].cmplt) { nes_mgt_free_skb(nesdev, skb, PCI_DMA_TODEVICE); nes_rem_ref_cm_node(nesqp->cm_node); } } if (fpdu_info->hdr_vbase) pci_free_consistent(nesdev->pcidev, fpdu_info->hdr_len, fpdu_info->hdr_vbase, fpdu_info->hdr_pbase); kfree(fpdu_info); } /** * nes_get_seq - Get the seq, ack_seq and window from the packet */ static u32 nes_get_seq(struct sk_buff *skb, u32 *ack, u16 *wnd, u32 *fin_rcvd, u32 *rst_rcvd) { struct nes_rskb_cb *cb = (struct nes_rskb_cb *)&skb->cb[0]; struct iphdr *iph = (struct iphdr *)(cb->data_start + ETH_HLEN); struct tcphdr *tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl)); *ack = be32_to_cpu(tcph->ack_seq); *wnd = be16_to_cpu(tcph->window); *fin_rcvd = tcph->fin; *rst_rcvd = tcph->rst; return be32_to_cpu(tcph->seq); } /** * nes_get_next_skb - Get the next skb based on where current skb is in the queue */ static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp *nesqp, struct sk_buff *skb, u32 nextseq, u32 *ack, u16 *wnd, u32 *fin_rcvd, u32 *rst_rcvd) { u32 seq; bool processacks; struct sk_buff *old_skb; if (skb) { /* Continue processing fpdu */ if (skb->next == (struct sk_buff *)&nesqp->pau_list) goto out; skb = skb->next; processacks = false; } else { /* Starting a new one */ if (skb_queue_empty(&nesqp->pau_list)) goto out; skb = skb_peek(&nesqp->pau_list); processacks = true; } while (1) { if (skb_queue_empty(&nesqp->pau_list)) goto out; seq = nes_get_seq(skb, ack, wnd, fin_rcvd, rst_rcvd); if (seq == nextseq) { if (skb->len || processacks) break; } else if (after(seq, nextseq)) { goto out; } old_skb = skb; skb = skb->next; skb_unlink(old_skb, &nesqp->pau_list); nes_mgt_free_skb(nesdev, old_skb, PCI_DMA_TODEVICE); nes_rem_ref_cm_node(nesqp->cm_node); if (skb == (struct sk_buff *)&nesqp->pau_list) goto out; } return skb; out: return NULL; } /** * get_fpdu_info - Find the next complete fpdu and return its fragments. */ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp, struct pau_fpdu_info **pau_fpdu_info) { struct sk_buff *skb; struct iphdr *iph; struct tcphdr *tcph; struct nes_rskb_cb *cb; struct pau_fpdu_info *fpdu_info = NULL; struct pau_fpdu_frag frags[MAX_FPDU_FRAGS]; u32 fpdu_len = 0; u32 tmp_len; int frag_cnt = 0; u32 tot_len; u32 frag_tot; u32 ack; u32 fin_rcvd; u32 rst_rcvd; u16 wnd; int i; int rc = 0; *pau_fpdu_info = NULL; skb = nes_get_next_skb(nesdev, nesqp, NULL, nesqp->pau_rcv_nxt, &ack, &wnd, &fin_rcvd, &rst_rcvd); if (!skb) goto out; cb = (struct nes_rskb_cb *)&skb->cb[0]; if (skb->len) { fpdu_len = be16_to_cpu(*(__be16 *) skb->data) + MPA_FRAMING; fpdu_len = (fpdu_len + 3) & 0xfffffffc; tmp_len = fpdu_len; /* See if we have all of the fpdu */ frag_tot = 0; memset(&frags, 0, sizeof frags); for (i = 0; i < MAX_FPDU_FRAGS; i++) { frags[i].physaddr = cb->busaddr; frags[i].physaddr += skb->data - cb->data_start; frags[i].frag_len = min(tmp_len, skb->len); frags[i].skb = skb; frags[i].cmplt = (skb->len == frags[i].frag_len); frag_tot += frags[i].frag_len; frag_cnt++; tmp_len -= frags[i].frag_len; if (tmp_len == 0) break; skb = nes_get_next_skb(nesdev, nesqp, skb, nesqp->pau_rcv_nxt + frag_tot, &ack, &wnd, &fin_rcvd, &rst_rcvd); if (!skb) goto out; if (rst_rcvd) { /* rst received in the middle of fpdu */ for (; i >= 0; i--) { skb_unlink(frags[i].skb, &nesqp->pau_list); nes_mgt_free_skb(nesdev, frags[i].skb, PCI_DMA_TODEVICE); } cb = (struct nes_rskb_cb *)&skb->cb[0]; frags[0].physaddr = cb->busaddr; frags[0].physaddr += skb->data - cb->data_start; frags[0].frag_len = skb->len; frags[0].skb = skb; frags[0].cmplt = true; frag_cnt = 1; break; } cb = (struct nes_rskb_cb *)&skb->cb[0]; } } else { /* no data */ frags[0].physaddr = cb->busaddr; frags[0].frag_len = 0; frags[0].skb = skb; frags[0].cmplt = true; frag_cnt = 1; } /* Found one */ fpdu_info = kzalloc(sizeof(*fpdu_info), GFP_ATOMIC); if (fpdu_info == NULL) { nes_debug(NES_DBG_PAU, "Failed to alloc a fpdu_info.\n"); rc = -ENOMEM; goto out; } fpdu_info->cqp_request = nes_get_cqp_request(nesdev); if (fpdu_info->cqp_request == NULL) { nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n"); rc = -ENOMEM; goto out; } cb = (struct nes_rskb_cb *)&frags[0].skb->cb[0]; iph = (struct iphdr *)(cb->data_start + ETH_HLEN); tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl)); fpdu_info->hdr_len = (((unsigned char *)tcph) + 4 * (tcph->doff)) - cb->data_start; fpdu_info->data_len = fpdu_len; tot_len = fpdu_info->hdr_len + fpdu_len - ETH_HLEN; if (frags[0].cmplt) { fpdu_info->hdr_pbase = cb->busaddr; fpdu_info->hdr_vbase = NULL; } else { fpdu_info->hdr_vbase = pci_alloc_consistent(nesdev->pcidev, fpdu_info->hdr_len, &fpdu_info->hdr_pbase); if (!fpdu_info->hdr_vbase) { nes_debug(NES_DBG_PAU, "Unable to allocate memory for pau first frag\n"); rc = -ENOMEM; goto out; } /* Copy hdrs, adjusting len and seqnum */ memcpy(fpdu_info->hdr_vbase, cb->data_start, fpdu_info->hdr_len); iph = (struct iphdr *)(fpdu_info->hdr_vbase + ETH_HLEN); tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl)); } iph->tot_len = cpu_to_be16(tot_len); iph->saddr = cpu_to_be32(0x7f000001); tcph->seq = cpu_to_be32(nesqp->pau_rcv_nxt); tcph->ack_seq = cpu_to_be32(ack); tcph->window = cpu_to_be16(wnd); nesqp->pau_rcv_nxt += fpdu_len + fin_rcvd; memcpy(fpdu_info->frags, frags, sizeof(fpdu_info->frags)); fpdu_info->frag_cnt = frag_cnt; fpdu_info->nesqp = nesqp; *pau_fpdu_info = fpdu_info; /* Update skb's for next pass */ for (i = 0; i < frag_cnt; i++) { cb = (struct nes_rskb_cb *)&frags[i].skb->cb[0]; skb_pull(frags[i].skb, frags[i].frag_len); if (frags[i].skb->len == 0) { /* Pull skb off the list - it will be freed in the callback */ if (!skb_queue_empty(&nesqp->pau_list)) skb_unlink(frags[i].skb, &nesqp->pau_list); } else { /* Last skb still has data so update the seq */ iph = (struct iphdr *)(cb->data_start + ETH_HLEN); tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl)); tcph->seq = cpu_to_be32(nesqp->pau_rcv_nxt); } } out: if (rc) { if (fpdu_info) { if (fpdu_info->cqp_request) nes_put_cqp_request(nesdev, fpdu_info->cqp_request); kfree(fpdu_info); } } return rc; } /** * forward_fpdu - send complete fpdus, one at a time */ static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp) { struct nes_device *nesdev = nesvnic->nesdev; struct pau_fpdu_info *fpdu_info; struct nes_hw_cqp_wqe *cqp_wqe; struct nes_cqp_request *cqp_request; unsigned long flags; u64 u64tmp; u32 u32tmp; int rc; while (1) { spin_lock_irqsave(&nesqp->pau_lock, flags); rc = get_fpdu_info(nesdev, nesqp, &fpdu_info); if (rc || (fpdu_info == NULL)) { spin_unlock_irqrestore(&nesqp->pau_lock, flags); return rc; } cqp_request = fpdu_info->cqp_request; cqp_wqe = &cqp_request->cqp_wqe; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_DL_OPCODE_IDX, NES_CQP_DOWNLOAD_SEGMENT | (((u32)nesvnic->logical_port) << NES_CQP_OP_LOGICAL_PORT_SHIFT)); u32tmp = fpdu_info->hdr_len << 16; u32tmp |= fpdu_info->hdr_len + (u32)fpdu_info->data_len; set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_DL_LENGTH_0_TOTAL_IDX, u32tmp); u32tmp = (fpdu_info->frags[1].frag_len << 16) | fpdu_info->frags[0].frag_len; set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_LENGTH_2_1_IDX, u32tmp); u32tmp = (fpdu_info->frags[3].frag_len << 16) | fpdu_info->frags[2].frag_len; set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_LENGTH_4_3_IDX, u32tmp); u64tmp = (u64)fpdu_info->hdr_pbase; set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX, lower_32_bits(u64tmp)); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_HIGH_IDX, upper_32_bits(u64tmp)); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX, lower_32_bits(fpdu_info->frags[0].physaddr)); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_HIGH_IDX, upper_32_bits(fpdu_info->frags[0].physaddr)); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG2_LOW_IDX, lower_32_bits(fpdu_info->frags[1].physaddr)); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG2_HIGH_IDX, upper_32_bits(fpdu_info->frags[1].physaddr)); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG3_LOW_IDX, lower_32_bits(fpdu_info->frags[2].physaddr)); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG3_HIGH_IDX, upper_32_bits(fpdu_info->frags[2].physaddr)); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG4_LOW_IDX, lower_32_bits(fpdu_info->frags[3].physaddr)); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG4_HIGH_IDX, upper_32_bits(fpdu_info->frags[3].physaddr)); cqp_request->cqp_callback_pointer = fpdu_info; cqp_request->callback = 1; cqp_request->cqp_callback = nes_download_callback; atomic_set(&cqp_request->refcount, 1); nes_post_cqp_request(nesdev, cqp_request); spin_unlock_irqrestore(&nesqp->pau_lock, flags); } return 0; } static void process_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp) { int again = 1; unsigned long flags; do { /* Ignore rc - if it failed, tcp retries will cause it to try again */ forward_fpdus(nesvnic, nesqp); spin_lock_irqsave(&nesqp->pau_lock, flags); if (nesqp->pau_pending) { nesqp->pau_pending = 0; } else { nesqp->pau_busy = 0; again = 0; } spin_unlock_irqrestore(&nesqp->pau_lock, flags); } while (again); } /** * queue_fpdus - Handle fpdu's that hw passed up to sw */ static void queue_fpdus(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp) { struct sk_buff *tmpskb; struct nes_rskb_cb *cb; struct iphdr *iph; struct tcphdr *tcph; unsigned char *tcph_end; u32 rcv_nxt; u32 rcv_wnd; u32 seqnum; u32 len; bool process_it = false; unsigned long flags; /* Move data ptr to after tcp header */ iph = (struct iphdr *)skb->data; tcph = (struct tcphdr *)(((char *)iph) + (4 * iph->ihl)); seqnum = be32_to_cpu(tcph->seq); tcph_end = (((char *)tcph) + (4 * tcph->doff)); len = be16_to_cpu(iph->tot_len); if (skb->len > len) skb_trim(skb, len); skb_pull(skb, tcph_end - skb->data); /* Initialize tracking values */ cb = (struct nes_rskb_cb *)&skb->cb[0]; cb->seqnum = seqnum; /* Make sure data is in the receive window */ rcv_nxt = nesqp->pau_rcv_nxt; rcv_wnd = le32_to_cpu(nesqp->nesqp_context->rcv_wnd); if (!between(seqnum, rcv_nxt, (rcv_nxt + rcv_wnd))) { nes_mgt_free_skb(nesvnic->nesdev, skb, PCI_DMA_TODEVICE); nes_rem_ref_cm_node(nesqp->cm_node); return; } spin_lock_irqsave(&nesqp->pau_lock, flags); if (nesqp->pau_busy) nesqp->pau_pending = 1; else nesqp->pau_busy = 1; /* Queue skb by sequence number */ if (skb_queue_len(&nesqp->pau_list) == 0) { skb_queue_head(&nesqp->pau_list, skb); } else { tmpskb = nesqp->pau_list.next; while (tmpskb != (struct sk_buff *)&nesqp->pau_list) { cb = (struct nes_rskb_cb *)&tmpskb->cb[0]; if (before(seqnum, cb->seqnum)) break; tmpskb = tmpskb->next; } skb_insert(tmpskb, skb, &nesqp->pau_list); } if (nesqp->pau_state == PAU_READY) process_it = true; spin_unlock_irqrestore(&nesqp->pau_lock, flags); if (process_it) process_fpdus(nesvnic, nesqp); return; } /** * mgt_thread - Handle mgt skbs in a safe context */ static int mgt_thread(void *context) { struct nes_vnic *nesvnic = context; struct sk_buff *skb; struct nes_rskb_cb *cb; while (!kthread_should_stop()) { wait_event_interruptible(nesvnic->mgt_wait_queue, skb_queue_len(&nesvnic->mgt_skb_list) || kthread_should_stop()); while ((skb_queue_len(&nesvnic->mgt_skb_list)) && !kthread_should_stop()) { skb = skb_dequeue(&nesvnic->mgt_skb_list); cb = (struct nes_rskb_cb *)&skb->cb[0]; cb->data_start = skb->data - ETH_HLEN; cb->busaddr = pci_map_single(nesvnic->nesdev->pcidev, cb->data_start, nesvnic->max_frame_size, PCI_DMA_TODEVICE); queue_fpdus(skb, nesvnic, cb->nesqp); } } /* Closing down so delete any entries on the queue */ while (skb_queue_len(&nesvnic->mgt_skb_list)) { skb = skb_dequeue(&nesvnic->mgt_skb_list); cb = (struct nes_rskb_cb *)&skb->cb[0]; nes_rem_ref_cm_node(cb->nesqp->cm_node); dev_kfree_skb_any(skb); } return 0; } /** * nes_queue_skbs - Queue skb so it can be handled in a thread context */ void nes_queue_mgt_skbs(struct sk_buff *skb, struct nes_vnic *nesvnic, struct nes_qp *nesqp) { struct nes_rskb_cb *cb; cb = (struct nes_rskb_cb *)&skb->cb[0]; cb->nesqp = nesqp; skb_queue_tail(&nesvnic->mgt_skb_list, skb); wake_up_interruptible(&nesvnic->mgt_wait_queue); } void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp) { struct sk_buff *skb; unsigned long flags; atomic_inc(&pau_qps_destroyed); /* Free packets that have not yet been forwarded */ /* Lock is acquired by skb_dequeue when removing the skb */ spin_lock_irqsave(&nesqp->pau_lock, flags); while (skb_queue_len(&nesqp->pau_list)) { skb = skb_dequeue(&nesqp->pau_list); nes_mgt_free_skb(nesdev, skb, PCI_DMA_TODEVICE); nes_rem_ref_cm_node(nesqp->cm_node); } spin_unlock_irqrestore(&nesqp->pau_lock, flags); } static void nes_chg_qh_handler(struct nes_device *nesdev, struct nes_cqp_request *cqp_request) { struct pau_qh_chg *qh_chg = cqp_request->cqp_callback_pointer; struct nes_cqp_request *new_request; struct nes_hw_cqp_wqe *cqp_wqe; struct nes_adapter *nesadapter; struct nes_qp *nesqp; struct nes_v4_quad nes_quad; u32 crc_value; u64 u64temp; nesadapter = nesdev->nesadapter; nesqp = qh_chg->nesqp; /* Should we handle the bad completion */ if (cqp_request->major_code) WARN(1, PFX "Invalid cqp_request major_code=0x%x\n", cqp_request->major_code); switch (nesqp->pau_state) { case PAU_DEL_QH: /* Old hash code deleted, now set the new one */ nesqp->pau_state = PAU_ADD_LB_QH; new_request = nes_get_cqp_request(nesdev); if (new_request == NULL) { nes_debug(NES_DBG_PAU, "Failed to get a new_request.\n"); WARN_ON(1); return; } memset(&nes_quad, 0, sizeof(nes_quad)); nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24); nes_quad.SrcIpadr = cpu_to_be32(0x7f000001); nes_quad.TcpPorts[0] = swab16(nesqp->nesqp_context->tcpPorts[1]); nes_quad.TcpPorts[1] = swab16(nesqp->nesqp_context->tcpPorts[0]); /* Produce hash key */ crc_value = get_crc_value(&nes_quad); nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); nes_debug(NES_DBG_PAU, "new HTE Index = 0x%08X, CRC = 0x%08X\n", nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask); nesqp->hte_index &= nesadapter->hte_index_mask; nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index); nesqp->nesqp_context->ip0 = cpu_to_le32(0x7f000001); nesqp->nesqp_context->rcv_nxt = cpu_to_le32(nesqp->pau_rcv_nxt); cqp_wqe = &new_request->cqp_wqe; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, NES_CQP_MANAGE_QUAD_HASH | NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_CONTEXT_VALID | NES_CQP_QP_IWARP_STATE_RTS); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id); u64temp = (u64)nesqp->nesqp_context_pbase; set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp); nes_debug(NES_DBG_PAU, "Waiting for CQP completion for adding the quad hash.\n"); new_request->cqp_callback_pointer = qh_chg; new_request->callback = 1; new_request->cqp_callback = nes_chg_qh_handler; atomic_set(&new_request->refcount, 1); nes_post_cqp_request(nesdev, new_request); break; case PAU_ADD_LB_QH: /* Start processing the queued fpdu's */ nesqp->pau_state = PAU_READY; process_fpdus(qh_chg->nesvnic, qh_chg->nesqp); kfree(qh_chg); break; } } /** * nes_change_quad_hash */ static int nes_change_quad_hash(struct nes_device *nesdev, struct nes_vnic *nesvnic, struct nes_qp *nesqp) { struct nes_cqp_request *cqp_request = NULL; struct pau_qh_chg *qh_chg = NULL; u64 u64temp; struct nes_hw_cqp_wqe *cqp_wqe; int ret = 0; cqp_request = nes_get_cqp_request(nesdev); if (cqp_request == NULL) { nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n"); ret = -ENOMEM; goto chg_qh_err; } qh_chg = kmalloc(sizeof *qh_chg, GFP_ATOMIC); if (qh_chg == NULL) { nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n"); ret = -ENOMEM; goto chg_qh_err; } qh_chg->nesdev = nesdev; qh_chg->nesvnic = nesvnic; qh_chg->nesqp = nesqp; nesqp->pau_state = PAU_DEL_QH; cqp_wqe = &cqp_request->cqp_wqe; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, NES_CQP_MANAGE_QUAD_HASH | NES_CQP_QP_DEL_HTE | NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_CONTEXT_VALID | NES_CQP_QP_IWARP_STATE_RTS); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id); u64temp = (u64)nesqp->nesqp_context_pbase; set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp); nes_debug(NES_DBG_PAU, "Waiting for CQP completion for deleting the quad hash.\n"); cqp_request->cqp_callback_pointer = qh_chg; cqp_request->callback = 1; cqp_request->cqp_callback = nes_chg_qh_handler; atomic_set(&cqp_request->refcount, 1); nes_post_cqp_request(nesdev, cqp_request); return ret; chg_qh_err: kfree(qh_chg); if (cqp_request) nes_put_cqp_request(nesdev, cqp_request); return ret; } /** * nes_mgt_ce_handler * This management code deals with any packed and unaligned (pau) fpdu's * that the hardware cannot handle. */ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) { struct nes_vnic_mgt *mgtvnic = container_of(cq, struct nes_vnic_mgt, mgt_cq); struct nes_adapter *nesadapter = nesdev->nesadapter; u32 head; u32 cq_size; u32 cqe_count = 0; u32 cqe_misc; u32 qp_id = 0; u32 skbs_needed; unsigned long context; struct nes_qp *nesqp; struct sk_buff *rx_skb; struct nes_rskb_cb *cb; head = cq->cq_head; cq_size = cq->cq_size; while (1) { cqe_misc = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]); if (!(cqe_misc & NES_NIC_CQE_VALID)) break; nesqp = NULL; if (cqe_misc & NES_NIC_CQE_ACCQP_VALID) { qp_id = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_ACCQP_ID_IDX]); qp_id &= 0x001fffff; if (qp_id < nesadapter->max_qp) { context = (unsigned long)nesadapter->qp_table[qp_id - NES_FIRST_QPN]; nesqp = (struct nes_qp *)context; } } if (nesqp) { if (nesqp->pau_mode == false) { nesqp->pau_mode = true; /* First time for this qp */ nesqp->pau_rcv_nxt = le32_to_cpu( cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]); skb_queue_head_init(&nesqp->pau_list); spin_lock_init(&nesqp->pau_lock); atomic_inc(&pau_qps_created); nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp); } rx_skb = mgtvnic->mgt.rx_skb[mgtvnic->mgt.rq_tail]; rx_skb->len = 0; skb_put(rx_skb, cqe_misc & 0x0000ffff); rx_skb->protocol = eth_type_trans(rx_skb, mgtvnic->nesvnic->netdev); cb = (struct nes_rskb_cb *)&rx_skb->cb[0]; pci_unmap_single(nesdev->pcidev, cb->busaddr, cb->maplen, PCI_DMA_FROMDEVICE); cb->busaddr = 0; mgtvnic->mgt.rq_tail++; mgtvnic->mgt.rq_tail &= mgtvnic->mgt.rq_size - 1; nes_add_ref_cm_node(nesqp->cm_node); nes_queue_mgt_skbs(rx_skb, mgtvnic->nesvnic, nesqp); } else { printk(KERN_ERR PFX "Invalid QP %d for packed/unaligned handling\n", qp_id); } cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX] = 0; cqe_count++; if (++head >= cq_size) head = 0; if (cqe_count == 255) { /* Replenish mgt CQ */ nes_write32(nesdev->regs + NES_CQE_ALLOC, cq->cq_number | (cqe_count << 16)); nesdev->currcq_count += cqe_count; cqe_count = 0; } skbs_needed = atomic_inc_return(&mgtvnic->rx_skbs_needed); if (skbs_needed > (mgtvnic->mgt.rq_size >> 1)) nes_replenish_mgt_rq(mgtvnic); } cq->cq_head = head; nes_write32(nesdev->regs + NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | cq->cq_number | (cqe_count << 16)); nes_read32(nesdev->regs + NES_CQE_ALLOC); nesdev->currcq_count += cqe_count; } /** * nes_init_mgt_qp */ int nes_init_mgt_qp(struct nes_device *nesdev, struct net_device *netdev, struct nes_vnic *nesvnic) { struct nes_vnic_mgt *mgtvnic; u32 counter; void *vmem; dma_addr_t pmem; struct nes_hw_cqp_wqe *cqp_wqe; u32 cqp_head; unsigned long flags; struct nes_hw_nic_qp_context *mgt_context; u64 u64temp; struct nes_hw_nic_rq_wqe *mgt_rqe; struct sk_buff *skb; u32 wqe_count; struct nes_rskb_cb *cb; u32 mgt_mem_size; void *mgt_vbase; dma_addr_t mgt_pbase; int i; int ret; /* Allocate space the all mgt QPs once */ mgtvnic = kzalloc(NES_MGT_QP_COUNT * sizeof(struct nes_vnic_mgt), GFP_KERNEL); if (mgtvnic == NULL) { nes_debug(NES_DBG_INIT, "Unable to allocate memory for mgt structure\n"); return -ENOMEM; } /* Allocate fragment, RQ, and CQ; Reuse CEQ based on the PCI function */ /* We are not sending from this NIC so sq is not allocated */ mgt_mem_size = 256 + (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe)) + (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_cqe)) + sizeof(struct nes_hw_nic_qp_context); mgt_mem_size = (mgt_mem_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); mgt_vbase = pci_alloc_consistent(nesdev->pcidev, NES_MGT_QP_COUNT * mgt_mem_size, &mgt_pbase); if (!mgt_vbase) { kfree(mgtvnic); nes_debug(NES_DBG_INIT, "Unable to allocate memory for mgt host descriptor rings\n"); return -ENOMEM; } nesvnic->mgt_mem_size = NES_MGT_QP_COUNT * mgt_mem_size; nesvnic->mgt_vbase = mgt_vbase; nesvnic->mgt_pbase = mgt_pbase; skb_queue_head_init(&nesvnic->mgt_skb_list); init_waitqueue_head(&nesvnic->mgt_wait_queue); nesvnic->mgt_thread = kthread_run(mgt_thread, nesvnic, "nes_mgt_thread"); for (i = 0; i < NES_MGT_QP_COUNT; i++) { mgtvnic->nesvnic = nesvnic; mgtvnic->mgt.qp_id = nesdev->mac_index + NES_MGT_QP_OFFSET + i; memset(mgt_vbase, 0, mgt_mem_size); nes_debug(NES_DBG_INIT, "Allocated mgt QP structures at %p (phys = %016lX), size = %u.\n", mgt_vbase, (unsigned long)mgt_pbase, mgt_mem_size); vmem = (void *)(((unsigned long)mgt_vbase + (256 - 1)) & ~(unsigned long)(256 - 1)); pmem = (dma_addr_t)(((unsigned long long)mgt_pbase + (256 - 1)) & ~(unsigned long long)(256 - 1)); spin_lock_init(&mgtvnic->mgt.rq_lock); /* setup the RQ */ mgtvnic->mgt.rq_vbase = vmem; mgtvnic->mgt.rq_pbase = pmem; mgtvnic->mgt.rq_head = 0; mgtvnic->mgt.rq_tail = 0; mgtvnic->mgt.rq_size = NES_MGT_WQ_COUNT; /* setup the CQ */ vmem += (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe)); pmem += (NES_MGT_WQ_COUNT * sizeof(struct nes_hw_nic_rq_wqe)); mgtvnic->mgt_cq.cq_number = mgtvnic->mgt.qp_id; mgtvnic->mgt_cq.cq_vbase = vmem; mgtvnic->mgt_cq.cq_pbase = pmem; mgtvnic->mgt_cq.cq_head = 0; mgtvnic->mgt_cq.cq_size = NES_MGT_WQ_COUNT; mgtvnic->mgt_cq.ce_handler = nes_mgt_ce_handler; /* Send CreateCQ request to CQP */ spin_lock_irqsave(&nesdev->cqp.lock, flags); cqp_head = nesdev->cqp.sq_head; cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32( NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID | ((u32)mgtvnic->mgt_cq.cq_size << 16)); cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32( mgtvnic->mgt_cq.cq_number | ((u32)nesdev->ceq_index << 16)); u64temp = (u64)mgtvnic->mgt_cq.cq_pbase; set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp); cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0; u64temp = (unsigned long)&mgtvnic->mgt_cq; cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] = cpu_to_le32((u32)(u64temp >> 1)); cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF); cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0; if (++cqp_head >= nesdev->cqp.sq_size) cqp_head = 0; cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); /* Send CreateQP request to CQP */ mgt_context = (void *)(&mgtvnic->mgt_cq.cq_vbase[mgtvnic->mgt_cq.cq_size]); mgt_context->context_words[NES_NIC_CTX_MISC_IDX] = cpu_to_le32((u32)NES_MGT_CTX_SIZE | ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 12)); nes_debug(NES_DBG_INIT, "RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x%08X, RX_WINDOW_BUFFER_SIZE = 0x%08X\n", nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE), nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE)); if (nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE) != 0) mgt_context->context_words[NES_NIC_CTX_MISC_IDX] |= cpu_to_le32(NES_NIC_BACK_STORE); u64temp = (u64)mgtvnic->mgt.rq_pbase; mgt_context->context_words[NES_NIC_CTX_SQ_LOW_IDX] = cpu_to_le32((u32)u64temp); mgt_context->context_words[NES_NIC_CTX_SQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32)); u64temp = (u64)mgtvnic->mgt.rq_pbase; mgt_context->context_words[NES_NIC_CTX_RQ_LOW_IDX] = cpu_to_le32((u32)u64temp); mgt_context->context_words[NES_NIC_CTX_RQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32)); cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_CREATE_QP | NES_CQP_QP_TYPE_NIC); cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(mgtvnic->mgt.qp_id); u64temp = (u64)mgtvnic->mgt_cq.cq_pbase + (mgtvnic->mgt_cq.cq_size * sizeof(struct nes_hw_nic_cqe)); set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp); if (++cqp_head >= nesdev->cqp.sq_size) cqp_head = 0; nesdev->cqp.sq_head = cqp_head; barrier(); /* Ring doorbell (2 WQEs) */ nes_write32(nesdev->regs + NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id); spin_unlock_irqrestore(&nesdev->cqp.lock, flags); nes_debug(NES_DBG_INIT, "Waiting for create MGT QP%u to complete.\n", mgtvnic->mgt.qp_id); ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head), NES_EVENT_TIMEOUT); nes_debug(NES_DBG_INIT, "Create MGT QP%u completed, wait_event_timeout ret = %u.\n", mgtvnic->mgt.qp_id, ret); if (!ret) { nes_debug(NES_DBG_INIT, "MGT QP%u create timeout expired\n", mgtvnic->mgt.qp_id); if (i == 0) { pci_free_consistent(nesdev->pcidev, nesvnic->mgt_mem_size, nesvnic->mgt_vbase, nesvnic->mgt_pbase); kfree(mgtvnic); } else { nes_destroy_mgt(nesvnic); } return -EIO; } /* Populate the RQ */ for (counter = 0; counter < (NES_MGT_WQ_COUNT - 1); counter++) { skb = dev_alloc_skb(nesvnic->max_frame_size); if (!skb) { nes_debug(NES_DBG_INIT, "%s: out of memory for receive skb\n", netdev->name); return -ENOMEM; } skb->dev = netdev; pmem = pci_map_single(nesdev->pcidev, skb->data, nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); cb = (struct nes_rskb_cb *)&skb->cb[0]; cb->busaddr = pmem; cb->maplen = nesvnic->max_frame_size; mgt_rqe = &mgtvnic->mgt.rq_vbase[counter]; mgt_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32((u32)nesvnic->max_frame_size); mgt_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0; mgt_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)pmem); mgt_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)pmem >> 32)); mgtvnic->mgt.rx_skb[counter] = skb; } init_timer(&mgtvnic->rq_wqes_timer); mgtvnic->rq_wqes_timer.function = nes_mgt_rq_wqes_timeout; mgtvnic->rq_wqes_timer.data = (unsigned long)mgtvnic; wqe_count = NES_MGT_WQ_COUNT - 1; mgtvnic->mgt.rq_head = wqe_count; barrier(); do { counter = min(wqe_count, ((u32)255)); wqe_count -= counter; nes_write32(nesdev->regs + NES_WQE_ALLOC, (counter << 24) | mgtvnic->mgt.qp_id); } while (wqe_count); nes_write32(nesdev->regs + NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | mgtvnic->mgt_cq.cq_number); nes_read32(nesdev->regs + NES_CQE_ALLOC); mgt_vbase += mgt_mem_size; mgt_pbase += mgt_mem_size; nesvnic->mgtvnic[i] = mgtvnic++; } return 0; } void nes_destroy_mgt(struct nes_vnic *nesvnic) { struct nes_device *nesdev = nesvnic->nesdev; struct nes_vnic_mgt *mgtvnic; struct nes_vnic_mgt *first_mgtvnic; unsigned long flags; struct nes_hw_cqp_wqe *cqp_wqe; u32 cqp_head; struct sk_buff *rx_skb; int i; int ret; kthread_stop(nesvnic->mgt_thread); /* Free remaining NIC receive buffers */ first_mgtvnic = nesvnic->mgtvnic[0]; for (i = 0; i < NES_MGT_QP_COUNT; i++) { mgtvnic = nesvnic->mgtvnic[i]; if (mgtvnic == NULL) continue; while (mgtvnic->mgt.rq_head != mgtvnic->mgt.rq_tail) { rx_skb = mgtvnic->mgt.rx_skb[mgtvnic->mgt.rq_tail]; nes_mgt_free_skb(nesdev, rx_skb, PCI_DMA_FROMDEVICE); mgtvnic->mgt.rq_tail++; mgtvnic->mgt.rq_tail &= (mgtvnic->mgt.rq_size - 1); } spin_lock_irqsave(&nesdev->cqp.lock, flags); /* Destroy NIC QP */ cqp_head = nesdev->cqp.sq_head; cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, (NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_NIC)); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, mgtvnic->mgt.qp_id); if (++cqp_head >= nesdev->cqp.sq_size) cqp_head = 0; cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head]; /* Destroy NIC CQ */ nes_fill_init_cqp_wqe(cqp_wqe, nesdev); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, (NES_CQP_DESTROY_CQ | ((u32)mgtvnic->mgt_cq.cq_size << 16))); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, (mgtvnic->mgt_cq.cq_number | ((u32)nesdev->ceq_index << 16))); if (++cqp_head >= nesdev->cqp.sq_size) cqp_head = 0; nesdev->cqp.sq_head = cqp_head; barrier(); /* Ring doorbell (2 WQEs) */ nes_write32(nesdev->regs + NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id); spin_unlock_irqrestore(&nesdev->cqp.lock, flags); nes_debug(NES_DBG_SHUTDOWN, "Waiting for CQP, cqp_head=%u, cqp.sq_head=%u," " cqp.sq_tail=%u, cqp.sq_size=%u\n", cqp_head, nesdev->cqp.sq_head, nesdev->cqp.sq_tail, nesdev->cqp.sq_size); ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head), NES_EVENT_TIMEOUT); nes_debug(NES_DBG_SHUTDOWN, "Destroy MGT QP returned, wait_event_timeout ret = %u, cqp_head=%u," " cqp.sq_head=%u, cqp.sq_tail=%u\n", ret, cqp_head, nesdev->cqp.sq_head, nesdev->cqp.sq_tail); if (!ret) nes_debug(NES_DBG_SHUTDOWN, "MGT QP%u destroy timeout expired\n", mgtvnic->mgt.qp_id); nesvnic->mgtvnic[i] = NULL; } if (nesvnic->mgt_vbase) { pci_free_consistent(nesdev->pcidev, nesvnic->mgt_mem_size, nesvnic->mgt_vbase, nesvnic->mgt_pbase); nesvnic->mgt_vbase = NULL; nesvnic->mgt_pbase = 0; } kfree(first_mgtvnic); }
gpl-2.0
lollipop-og/hellsgod-kernel
arch/arm/mach-davinci/dm365.c
4732
32604
/* * TI DaVinci DM365 chip specific setup * * Copyright (C) 2009 Texas Instruments * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/clk.h> #include <linux/serial_8250.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/spi/spi.h> #include <asm/mach/map.h> #include <mach/cputype.h> #include <mach/edma.h> #include <mach/psc.h> #include <mach/mux.h> #include <mach/irqs.h> #include <mach/time.h> #include <mach/serial.h> #include <mach/common.h> #include <mach/asp.h> #include <mach/keyscan.h> #include <mach/spi.h> #include <mach/gpio-davinci.h> #include "davinci.h" #include "clock.h" #include "mux.h" #define DM365_REF_FREQ 24000000 /* 24 MHz on the DM365 EVM */ /* Base of key scan register bank */ #define DM365_KEYSCAN_BASE 0x01c69400 #define DM365_RTC_BASE 0x01c69000 #define DAVINCI_DM365_VC_BASE 0x01d0c000 #define DAVINCI_DMA_VC_TX 2 #define DAVINCI_DMA_VC_RX 3 #define DM365_EMAC_BASE 0x01d07000 #define DM365_EMAC_MDIO_BASE (DM365_EMAC_BASE + 0x4000) #define DM365_EMAC_CNTRL_OFFSET 0x0000 #define DM365_EMAC_CNTRL_MOD_OFFSET 0x3000 #define DM365_EMAC_CNTRL_RAM_OFFSET 0x1000 #define DM365_EMAC_CNTRL_RAM_SIZE 0x2000 static struct pll_data pll1_data = { .num = 1, .phys_base = DAVINCI_PLL1_BASE, .flags = PLL_HAS_POSTDIV | PLL_HAS_PREDIV, }; static struct pll_data pll2_data = { .num = 2, .phys_base = DAVINCI_PLL2_BASE, .flags = PLL_HAS_POSTDIV | PLL_HAS_PREDIV, }; static struct clk ref_clk = { .name = "ref_clk", .rate = DM365_REF_FREQ, }; static struct clk pll1_clk = { .name = "pll1", .parent = &ref_clk, .flags = CLK_PLL, .pll_data = &pll1_data, }; static struct clk pll1_aux_clk = { .name = "pll1_aux_clk", .parent = &pll1_clk, .flags = CLK_PLL | PRE_PLL, }; static struct clk pll1_sysclkbp = { .name = "pll1_sysclkbp", .parent = &pll1_clk, .flags = CLK_PLL | PRE_PLL, .div_reg = BPDIV }; static struct clk clkout0_clk = { .name = "clkout0", .parent = &pll1_clk, .flags = CLK_PLL | PRE_PLL, }; static struct clk pll1_sysclk1 = { .name = "pll1_sysclk1", .parent = &pll1_clk, .flags = CLK_PLL, .div_reg = PLLDIV1, }; static struct clk pll1_sysclk2 = { .name = "pll1_sysclk2", .parent = &pll1_clk, .flags = CLK_PLL, .div_reg = PLLDIV2, }; static struct clk pll1_sysclk3 = { .name = "pll1_sysclk3", .parent = &pll1_clk, .flags = CLK_PLL, .div_reg = PLLDIV3, }; static struct clk pll1_sysclk4 = { .name = "pll1_sysclk4", .parent = &pll1_clk, .flags = CLK_PLL, .div_reg = PLLDIV4, }; static struct clk pll1_sysclk5 = { .name = "pll1_sysclk5", .parent = &pll1_clk, .flags = CLK_PLL, .div_reg = PLLDIV5, }; static struct clk pll1_sysclk6 = { .name = "pll1_sysclk6", .parent = &pll1_clk, .flags = CLK_PLL, .div_reg = PLLDIV6, }; static struct clk pll1_sysclk7 = { .name = "pll1_sysclk7", .parent = &pll1_clk, .flags = CLK_PLL, .div_reg = PLLDIV7, }; static struct clk pll1_sysclk8 = { .name = "pll1_sysclk8", .parent = &pll1_clk, .flags = CLK_PLL, .div_reg = PLLDIV8, }; static struct clk pll1_sysclk9 = { .name = "pll1_sysclk9", .parent = &pll1_clk, .flags = CLK_PLL, .div_reg = PLLDIV9, }; static struct clk pll2_clk = { .name = "pll2", .parent = &ref_clk, .flags = CLK_PLL, .pll_data = &pll2_data, }; static struct clk pll2_aux_clk = { .name = "pll2_aux_clk", .parent = &pll2_clk, .flags = CLK_PLL | PRE_PLL, }; static struct clk clkout1_clk = { .name = "clkout1", .parent = &pll2_clk, .flags = CLK_PLL | PRE_PLL, }; static struct clk pll2_sysclk1 = { .name = "pll2_sysclk1", .parent = &pll2_clk, .flags = CLK_PLL, .div_reg = PLLDIV1, }; static struct clk pll2_sysclk2 = { .name = "pll2_sysclk2", .parent = &pll2_clk, .flags = CLK_PLL, .div_reg = PLLDIV2, }; static struct clk pll2_sysclk3 = { .name = "pll2_sysclk3", .parent = &pll2_clk, .flags = CLK_PLL, .div_reg = PLLDIV3, }; static struct clk pll2_sysclk4 = { .name = "pll2_sysclk4", .parent = &pll2_clk, .flags = CLK_PLL, .div_reg = PLLDIV4, }; static struct clk pll2_sysclk5 = { .name = "pll2_sysclk5", .parent = &pll2_clk, .flags = CLK_PLL, .div_reg = PLLDIV5, }; static struct clk pll2_sysclk6 = { .name = "pll2_sysclk6", .parent = &pll2_clk, .flags = CLK_PLL, .div_reg = PLLDIV6, }; static struct clk pll2_sysclk7 = { .name = "pll2_sysclk7", .parent = &pll2_clk, .flags = CLK_PLL, .div_reg = PLLDIV7, }; static struct clk pll2_sysclk8 = { .name = "pll2_sysclk8", .parent = &pll2_clk, .flags = CLK_PLL, .div_reg = PLLDIV8, }; static struct clk pll2_sysclk9 = { .name = "pll2_sysclk9", .parent = &pll2_clk, .flags = CLK_PLL, .div_reg = PLLDIV9, }; static struct clk vpss_dac_clk = { .name = "vpss_dac", .parent = &pll1_sysclk3, .lpsc = DM365_LPSC_DAC_CLK, }; static struct clk vpss_master_clk = { .name = "vpss_master", .parent = &pll1_sysclk5, .lpsc = DM365_LPSC_VPSSMSTR, .flags = CLK_PSC, }; static struct clk arm_clk = { .name = "arm_clk", .parent = &pll2_sysclk2, .lpsc = DAVINCI_LPSC_ARM, .flags = ALWAYS_ENABLED, }; static struct clk uart0_clk = { .name = "uart0", .parent = &pll1_aux_clk, .lpsc = DAVINCI_LPSC_UART0, }; static struct clk uart1_clk = { .name = "uart1", .parent = &pll1_sysclk4, .lpsc = DAVINCI_LPSC_UART1, }; static struct clk i2c_clk = { .name = "i2c", .parent = &pll1_aux_clk, .lpsc = DAVINCI_LPSC_I2C, }; static struct clk mmcsd0_clk = { .name = "mmcsd0", .parent = &pll1_sysclk8, .lpsc = DAVINCI_LPSC_MMC_SD, }; static struct clk mmcsd1_clk = { .name = "mmcsd1", .parent = &pll1_sysclk4, .lpsc = DM365_LPSC_MMC_SD1, }; static struct clk spi0_clk = { .name = "spi0", .parent = &pll1_sysclk4, .lpsc = DAVINCI_LPSC_SPI, }; static struct clk spi1_clk = { .name = "spi1", .parent = &pll1_sysclk4, .lpsc = DM365_LPSC_SPI1, }; static struct clk spi2_clk = { .name = "spi2", .parent = &pll1_sysclk4, .lpsc = DM365_LPSC_SPI2, }; static struct clk spi3_clk = { .name = "spi3", .parent = &pll1_sysclk4, .lpsc = DM365_LPSC_SPI3, }; static struct clk spi4_clk = { .name = "spi4", .parent = &pll1_aux_clk, .lpsc = DM365_LPSC_SPI4, }; static struct clk gpio_clk = { .name = "gpio", .parent = &pll1_sysclk4, .lpsc = DAVINCI_LPSC_GPIO, }; static struct clk aemif_clk = { .name = "aemif", .parent = &pll1_sysclk4, .lpsc = DAVINCI_LPSC_AEMIF, }; static struct clk pwm0_clk = { .name = "pwm0", .parent = &pll1_aux_clk, .lpsc = DAVINCI_LPSC_PWM0, }; static struct clk pwm1_clk = { .name = "pwm1", .parent = &pll1_aux_clk, .lpsc = DAVINCI_LPSC_PWM1, }; static struct clk pwm2_clk = { .name = "pwm2", .parent = &pll1_aux_clk, .lpsc = DAVINCI_LPSC_PWM2, }; static struct clk pwm3_clk = { .name = "pwm3", .parent = &ref_clk, .lpsc = DM365_LPSC_PWM3, }; static struct clk timer0_clk = { .name = "timer0", .parent = &pll1_aux_clk, .lpsc = DAVINCI_LPSC_TIMER0, }; static struct clk timer1_clk = { .name = "timer1", .parent = &pll1_aux_clk, .lpsc = DAVINCI_LPSC_TIMER1, }; static struct clk timer2_clk = { .name = "timer2", .parent = &pll1_aux_clk, .lpsc = DAVINCI_LPSC_TIMER2, .usecount = 1, }; static struct clk timer3_clk = { .name = "timer3", .parent = &pll1_aux_clk, .lpsc = DM365_LPSC_TIMER3, }; static struct clk usb_clk = { .name = "usb", .parent = &pll1_aux_clk, .lpsc = DAVINCI_LPSC_USB, }; static struct clk emac_clk = { .name = "emac", .parent = &pll1_sysclk4, .lpsc = DM365_LPSC_EMAC, }; static struct clk voicecodec_clk = { .name = "voice_codec", .parent = &pll2_sysclk4, .lpsc = DM365_LPSC_VOICE_CODEC, }; static struct clk asp0_clk = { .name = "asp0", .parent = &pll1_sysclk4, .lpsc = DM365_LPSC_McBSP1, }; static struct clk rto_clk = { .name = "rto", .parent = &pll1_sysclk4, .lpsc = DM365_LPSC_RTO, }; static struct clk mjcp_clk = { .name = "mjcp", .parent = &pll1_sysclk3, .lpsc = DM365_LPSC_MJCP, }; static struct clk_lookup dm365_clks[] = { CLK(NULL, "ref", &ref_clk), CLK(NULL, "pll1", &pll1_clk), CLK(NULL, "pll1_aux", &pll1_aux_clk), CLK(NULL, "pll1_sysclkbp", &pll1_sysclkbp), CLK(NULL, "clkout0", &clkout0_clk), CLK(NULL, "pll1_sysclk1", &pll1_sysclk1), CLK(NULL, "pll1_sysclk2", &pll1_sysclk2), CLK(NULL, "pll1_sysclk3", &pll1_sysclk3), CLK(NULL, "pll1_sysclk4", &pll1_sysclk4), CLK(NULL, "pll1_sysclk5", &pll1_sysclk5), CLK(NULL, "pll1_sysclk6", &pll1_sysclk6), CLK(NULL, "pll1_sysclk7", &pll1_sysclk7), CLK(NULL, "pll1_sysclk8", &pll1_sysclk8), CLK(NULL, "pll1_sysclk9", &pll1_sysclk9), CLK(NULL, "pll2", &pll2_clk), CLK(NULL, "pll2_aux", &pll2_aux_clk), CLK(NULL, "clkout1", &clkout1_clk), CLK(NULL, "pll2_sysclk1", &pll2_sysclk1), CLK(NULL, "pll2_sysclk2", &pll2_sysclk2), CLK(NULL, "pll2_sysclk3", &pll2_sysclk3), CLK(NULL, "pll2_sysclk4", &pll2_sysclk4), CLK(NULL, "pll2_sysclk5", &pll2_sysclk5), CLK(NULL, "pll2_sysclk6", &pll2_sysclk6), CLK(NULL, "pll2_sysclk7", &pll2_sysclk7), CLK(NULL, "pll2_sysclk8", &pll2_sysclk8), CLK(NULL, "pll2_sysclk9", &pll2_sysclk9), CLK(NULL, "vpss_dac", &vpss_dac_clk), CLK(NULL, "vpss_master", &vpss_master_clk), CLK(NULL, "arm", &arm_clk), CLK(NULL, "uart0", &uart0_clk), CLK(NULL, "uart1", &uart1_clk), CLK("i2c_davinci.1", NULL, &i2c_clk), CLK("davinci_mmc.0", NULL, &mmcsd0_clk), CLK("davinci_mmc.1", NULL, &mmcsd1_clk), CLK("spi_davinci.0", NULL, &spi0_clk), CLK("spi_davinci.1", NULL, &spi1_clk), CLK("spi_davinci.2", NULL, &spi2_clk), CLK("spi_davinci.3", NULL, &spi3_clk), CLK("spi_davinci.4", NULL, &spi4_clk), CLK(NULL, "gpio", &gpio_clk), CLK(NULL, "aemif", &aemif_clk), CLK(NULL, "pwm0", &pwm0_clk), CLK(NULL, "pwm1", &pwm1_clk), CLK(NULL, "pwm2", &pwm2_clk), CLK(NULL, "pwm3", &pwm3_clk), CLK(NULL, "timer0", &timer0_clk), CLK(NULL, "timer1", &timer1_clk), CLK("watchdog", NULL, &timer2_clk), CLK(NULL, "timer3", &timer3_clk), CLK(NULL, "usb", &usb_clk), CLK("davinci_emac.1", NULL, &emac_clk), CLK("davinci_voicecodec", NULL, &voicecodec_clk), CLK("davinci-mcbsp", NULL, &asp0_clk), CLK(NULL, "rto", &rto_clk), CLK(NULL, "mjcp", &mjcp_clk), CLK(NULL, NULL, NULL), }; /*----------------------------------------------------------------------*/ #define INTMUX 0x18 #define EVTMUX 0x1c static const struct mux_config dm365_pins[] = { #ifdef CONFIG_DAVINCI_MUX MUX_CFG(DM365, MMCSD0, 0, 24, 1, 0, false) MUX_CFG(DM365, SD1_CLK, 0, 16, 3, 1, false) MUX_CFG(DM365, SD1_CMD, 4, 30, 3, 1, false) MUX_CFG(DM365, SD1_DATA3, 4, 28, 3, 1, false) MUX_CFG(DM365, SD1_DATA2, 4, 26, 3, 1, false) MUX_CFG(DM365, SD1_DATA1, 4, 24, 3, 1, false) MUX_CFG(DM365, SD1_DATA0, 4, 22, 3, 1, false) MUX_CFG(DM365, I2C_SDA, 3, 23, 3, 2, false) MUX_CFG(DM365, I2C_SCL, 3, 21, 3, 2, false) MUX_CFG(DM365, AEMIF_AR_A14, 2, 0, 3, 1, false) MUX_CFG(DM365, AEMIF_AR_BA0, 2, 0, 3, 2, false) MUX_CFG(DM365, AEMIF_A3, 2, 2, 3, 1, false) MUX_CFG(DM365, AEMIF_A7, 2, 4, 3, 1, false) MUX_CFG(DM365, AEMIF_D15_8, 2, 6, 1, 1, false) MUX_CFG(DM365, AEMIF_CE0, 2, 7, 1, 0, false) MUX_CFG(DM365, AEMIF_CE1, 2, 8, 1, 0, false) MUX_CFG(DM365, AEMIF_WE_OE, 2, 9, 1, 0, false) MUX_CFG(DM365, MCBSP0_BDX, 0, 23, 1, 1, false) MUX_CFG(DM365, MCBSP0_X, 0, 22, 1, 1, false) MUX_CFG(DM365, MCBSP0_BFSX, 0, 21, 1, 1, false) MUX_CFG(DM365, MCBSP0_BDR, 0, 20, 1, 1, false) MUX_CFG(DM365, MCBSP0_R, 0, 19, 1, 1, false) MUX_CFG(DM365, MCBSP0_BFSR, 0, 18, 1, 1, false) MUX_CFG(DM365, SPI0_SCLK, 3, 28, 1, 1, false) MUX_CFG(DM365, SPI0_SDI, 3, 26, 3, 1, false) MUX_CFG(DM365, SPI0_SDO, 3, 25, 1, 1, false) MUX_CFG(DM365, SPI0_SDENA0, 3, 29, 3, 1, false) MUX_CFG(DM365, SPI0_SDENA1, 3, 26, 3, 2, false) MUX_CFG(DM365, UART0_RXD, 3, 20, 1, 1, false) MUX_CFG(DM365, UART0_TXD, 3, 19, 1, 1, false) MUX_CFG(DM365, UART1_RXD, 3, 17, 3, 2, false) MUX_CFG(DM365, UART1_TXD, 3, 15, 3, 2, false) MUX_CFG(DM365, UART1_RTS, 3, 23, 3, 1, false) MUX_CFG(DM365, UART1_CTS, 3, 21, 3, 1, false) MUX_CFG(DM365, EMAC_TX_EN, 3, 17, 3, 1, false) MUX_CFG(DM365, EMAC_TX_CLK, 3, 15, 3, 1, false) MUX_CFG(DM365, EMAC_COL, 3, 14, 1, 1, false) MUX_CFG(DM365, EMAC_TXD3, 3, 13, 1, 1, false) MUX_CFG(DM365, EMAC_TXD2, 3, 12, 1, 1, false) MUX_CFG(DM365, EMAC_TXD1, 3, 11, 1, 1, false) MUX_CFG(DM365, EMAC_TXD0, 3, 10, 1, 1, false) MUX_CFG(DM365, EMAC_RXD3, 3, 9, 1, 1, false) MUX_CFG(DM365, EMAC_RXD2, 3, 8, 1, 1, false) MUX_CFG(DM365, EMAC_RXD1, 3, 7, 1, 1, false) MUX_CFG(DM365, EMAC_RXD0, 3, 6, 1, 1, false) MUX_CFG(DM365, EMAC_RX_CLK, 3, 5, 1, 1, false) MUX_CFG(DM365, EMAC_RX_DV, 3, 4, 1, 1, false) MUX_CFG(DM365, EMAC_RX_ER, 3, 3, 1, 1, false) MUX_CFG(DM365, EMAC_CRS, 3, 2, 1, 1, false) MUX_CFG(DM365, EMAC_MDIO, 3, 1, 1, 1, false) MUX_CFG(DM365, EMAC_MDCLK, 3, 0, 1, 1, false) MUX_CFG(DM365, KEYSCAN, 2, 0, 0x3f, 0x3f, false) MUX_CFG(DM365, PWM0, 1, 0, 3, 2, false) MUX_CFG(DM365, PWM0_G23, 3, 26, 3, 3, false) MUX_CFG(DM365, PWM1, 1, 2, 3, 2, false) MUX_CFG(DM365, PWM1_G25, 3, 29, 3, 2, false) MUX_CFG(DM365, PWM2_G87, 1, 10, 3, 2, false) MUX_CFG(DM365, PWM2_G88, 1, 8, 3, 2, false) MUX_CFG(DM365, PWM2_G89, 1, 6, 3, 2, false) MUX_CFG(DM365, PWM2_G90, 1, 4, 3, 2, false) MUX_CFG(DM365, PWM3_G80, 1, 20, 3, 3, false) MUX_CFG(DM365, PWM3_G81, 1, 18, 3, 3, false) MUX_CFG(DM365, PWM3_G85, 1, 14, 3, 2, false) MUX_CFG(DM365, PWM3_G86, 1, 12, 3, 2, false) MUX_CFG(DM365, SPI1_SCLK, 4, 2, 3, 1, false) MUX_CFG(DM365, SPI1_SDI, 3, 31, 1, 1, false) MUX_CFG(DM365, SPI1_SDO, 4, 0, 3, 1, false) MUX_CFG(DM365, SPI1_SDENA0, 4, 4, 3, 1, false) MUX_CFG(DM365, SPI1_SDENA1, 4, 0, 3, 2, false) MUX_CFG(DM365, SPI2_SCLK, 4, 10, 3, 1, false) MUX_CFG(DM365, SPI2_SDI, 4, 6, 3, 1, false) MUX_CFG(DM365, SPI2_SDO, 4, 8, 3, 1, false) MUX_CFG(DM365, SPI2_SDENA0, 4, 12, 3, 1, false) MUX_CFG(DM365, SPI2_SDENA1, 4, 8, 3, 2, false) MUX_CFG(DM365, SPI3_SCLK, 0, 0, 3, 2, false) MUX_CFG(DM365, SPI3_SDI, 0, 2, 3, 2, false) MUX_CFG(DM365, SPI3_SDO, 0, 6, 3, 2, false) MUX_CFG(DM365, SPI3_SDENA0, 0, 4, 3, 2, false) MUX_CFG(DM365, SPI3_SDENA1, 0, 6, 3, 3, false) MUX_CFG(DM365, SPI4_SCLK, 4, 18, 3, 1, false) MUX_CFG(DM365, SPI4_SDI, 4, 14, 3, 1, false) MUX_CFG(DM365, SPI4_SDO, 4, 16, 3, 1, false) MUX_CFG(DM365, SPI4_SDENA0, 4, 20, 3, 1, false) MUX_CFG(DM365, SPI4_SDENA1, 4, 16, 3, 2, false) MUX_CFG(DM365, CLKOUT0, 4, 20, 3, 3, false) MUX_CFG(DM365, CLKOUT1, 4, 16, 3, 3, false) MUX_CFG(DM365, CLKOUT2, 4, 8, 3, 3, false) MUX_CFG(DM365, GPIO20, 3, 21, 3, 0, false) MUX_CFG(DM365, GPIO30, 4, 6, 3, 0, false) MUX_CFG(DM365, GPIO31, 4, 8, 3, 0, false) MUX_CFG(DM365, GPIO32, 4, 10, 3, 0, false) MUX_CFG(DM365, GPIO33, 4, 12, 3, 0, false) MUX_CFG(DM365, GPIO40, 4, 26, 3, 0, false) MUX_CFG(DM365, GPIO64_57, 2, 6, 1, 0, false) MUX_CFG(DM365, VOUT_FIELD, 1, 18, 3, 1, false) MUX_CFG(DM365, VOUT_FIELD_G81, 1, 18, 3, 0, false) MUX_CFG(DM365, VOUT_HVSYNC, 1, 16, 1, 0, false) MUX_CFG(DM365, VOUT_COUTL_EN, 1, 0, 0xff, 0x55, false) MUX_CFG(DM365, VOUT_COUTH_EN, 1, 8, 0xff, 0x55, false) MUX_CFG(DM365, VIN_CAM_WEN, 0, 14, 3, 0, false) MUX_CFG(DM365, VIN_CAM_VD, 0, 13, 1, 0, false) MUX_CFG(DM365, VIN_CAM_HD, 0, 12, 1, 0, false) MUX_CFG(DM365, VIN_YIN4_7_EN, 0, 0, 0xff, 0, false) MUX_CFG(DM365, VIN_YIN0_3_EN, 0, 8, 0xf, 0, false) INT_CFG(DM365, INT_EDMA_CC, 2, 1, 1, false) INT_CFG(DM365, INT_EDMA_TC0_ERR, 3, 1, 1, false) INT_CFG(DM365, INT_EDMA_TC1_ERR, 4, 1, 1, false) INT_CFG(DM365, INT_EDMA_TC2_ERR, 22, 1, 1, false) INT_CFG(DM365, INT_EDMA_TC3_ERR, 23, 1, 1, false) INT_CFG(DM365, INT_PRTCSS, 10, 1, 1, false) INT_CFG(DM365, INT_EMAC_RXTHRESH, 14, 1, 1, false) INT_CFG(DM365, INT_EMAC_RXPULSE, 15, 1, 1, false) INT_CFG(DM365, INT_EMAC_TXPULSE, 16, 1, 1, false) INT_CFG(DM365, INT_EMAC_MISCPULSE, 17, 1, 1, false) INT_CFG(DM365, INT_IMX0_ENABLE, 0, 1, 0, false) INT_CFG(DM365, INT_IMX0_DISABLE, 0, 1, 1, false) INT_CFG(DM365, INT_HDVICP_ENABLE, 0, 1, 1, false) INT_CFG(DM365, INT_HDVICP_DISABLE, 0, 1, 0, false) INT_CFG(DM365, INT_IMX1_ENABLE, 24, 1, 1, false) INT_CFG(DM365, INT_IMX1_DISABLE, 24, 1, 0, false) INT_CFG(DM365, INT_NSF_ENABLE, 25, 1, 1, false) INT_CFG(DM365, INT_NSF_DISABLE, 25, 1, 0, false) EVT_CFG(DM365, EVT2_ASP_TX, 0, 1, 0, false) EVT_CFG(DM365, EVT3_ASP_RX, 1, 1, 0, false) EVT_CFG(DM365, EVT2_VC_TX, 0, 1, 1, false) EVT_CFG(DM365, EVT3_VC_RX, 1, 1, 1, false) #endif }; static u64 dm365_spi0_dma_mask = DMA_BIT_MASK(32); static struct davinci_spi_platform_data dm365_spi0_pdata = { .version = SPI_VERSION_1, .num_chipselect = 2, .dma_event_q = EVENTQ_3, }; static struct resource dm365_spi0_resources[] = { { .start = 0x01c66000, .end = 0x01c667ff, .flags = IORESOURCE_MEM, }, { .start = IRQ_DM365_SPIINT0_0, .flags = IORESOURCE_IRQ, }, { .start = 17, .flags = IORESOURCE_DMA, }, { .start = 16, .flags = IORESOURCE_DMA, }, }; static struct platform_device dm365_spi0_device = { .name = "spi_davinci", .id = 0, .dev = { .dma_mask = &dm365_spi0_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &dm365_spi0_pdata, }, .num_resources = ARRAY_SIZE(dm365_spi0_resources), .resource = dm365_spi0_resources, }; void __init dm365_init_spi0(unsigned chipselect_mask, struct spi_board_info *info, unsigned len) { davinci_cfg_reg(DM365_SPI0_SCLK); davinci_cfg_reg(DM365_SPI0_SDI); davinci_cfg_reg(DM365_SPI0_SDO); /* not all slaves will be wired up */ if (chipselect_mask & BIT(0)) davinci_cfg_reg(DM365_SPI0_SDENA0); if (chipselect_mask & BIT(1)) davinci_cfg_reg(DM365_SPI0_SDENA1); spi_register_board_info(info, len); platform_device_register(&dm365_spi0_device); } static struct emac_platform_data dm365_emac_pdata = { .ctrl_reg_offset = DM365_EMAC_CNTRL_OFFSET, .ctrl_mod_reg_offset = DM365_EMAC_CNTRL_MOD_OFFSET, .ctrl_ram_offset = DM365_EMAC_CNTRL_RAM_OFFSET, .ctrl_ram_size = DM365_EMAC_CNTRL_RAM_SIZE, .version = EMAC_VERSION_2, }; static struct resource dm365_emac_resources[] = { { .start = DM365_EMAC_BASE, .end = DM365_EMAC_BASE + SZ_16K - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_DM365_EMAC_RXTHRESH, .end = IRQ_DM365_EMAC_RXTHRESH, .flags = IORESOURCE_IRQ, }, { .start = IRQ_DM365_EMAC_RXPULSE, .end = IRQ_DM365_EMAC_RXPULSE, .flags = IORESOURCE_IRQ, }, { .start = IRQ_DM365_EMAC_TXPULSE, .end = IRQ_DM365_EMAC_TXPULSE, .flags = IORESOURCE_IRQ, }, { .start = IRQ_DM365_EMAC_MISCPULSE, .end = IRQ_DM365_EMAC_MISCPULSE, .flags = IORESOURCE_IRQ, }, }; static struct platform_device dm365_emac_device = { .name = "davinci_emac", .id = 1, .dev = { .platform_data = &dm365_emac_pdata, }, .num_resources = ARRAY_SIZE(dm365_emac_resources), .resource = dm365_emac_resources, }; static struct resource dm365_mdio_resources[] = { { .start = DM365_EMAC_MDIO_BASE, .end = DM365_EMAC_MDIO_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device dm365_mdio_device = { .name = "davinci_mdio", .id = 0, .num_resources = ARRAY_SIZE(dm365_mdio_resources), .resource = dm365_mdio_resources, }; static u8 dm365_default_priorities[DAVINCI_N_AINTC_IRQ] = { [IRQ_VDINT0] = 2, [IRQ_VDINT1] = 6, [IRQ_VDINT2] = 6, [IRQ_HISTINT] = 6, [IRQ_H3AINT] = 6, [IRQ_PRVUINT] = 6, [IRQ_RSZINT] = 6, [IRQ_DM365_INSFINT] = 7, [IRQ_VENCINT] = 6, [IRQ_ASQINT] = 6, [IRQ_IMXINT] = 6, [IRQ_DM365_IMCOPINT] = 4, [IRQ_USBINT] = 4, [IRQ_DM365_RTOINT] = 7, [IRQ_DM365_TINT5] = 7, [IRQ_DM365_TINT6] = 5, [IRQ_CCINT0] = 5, [IRQ_CCERRINT] = 5, [IRQ_TCERRINT0] = 5, [IRQ_TCERRINT] = 7, [IRQ_PSCIN] = 4, [IRQ_DM365_SPINT2_1] = 7, [IRQ_DM365_TINT7] = 7, [IRQ_DM365_SDIOINT0] = 7, [IRQ_MBXINT] = 7, [IRQ_MBRINT] = 7, [IRQ_MMCINT] = 7, [IRQ_DM365_MMCINT1] = 7, [IRQ_DM365_PWMINT3] = 7, [IRQ_AEMIFINT] = 2, [IRQ_DM365_SDIOINT1] = 2, [IRQ_TINT0_TINT12] = 7, [IRQ_TINT0_TINT34] = 7, [IRQ_TINT1_TINT12] = 7, [IRQ_TINT1_TINT34] = 7, [IRQ_PWMINT0] = 7, [IRQ_PWMINT1] = 3, [IRQ_PWMINT2] = 3, [IRQ_I2C] = 3, [IRQ_UARTINT0] = 3, [IRQ_UARTINT1] = 3, [IRQ_DM365_RTCINT] = 3, [IRQ_DM365_SPIINT0_0] = 3, [IRQ_DM365_SPIINT3_0] = 3, [IRQ_DM365_GPIO0] = 3, [IRQ_DM365_GPIO1] = 7, [IRQ_DM365_GPIO2] = 4, [IRQ_DM365_GPIO3] = 4, [IRQ_DM365_GPIO4] = 7, [IRQ_DM365_GPIO5] = 7, [IRQ_DM365_GPIO6] = 7, [IRQ_DM365_GPIO7] = 7, [IRQ_DM365_EMAC_RXTHRESH] = 7, [IRQ_DM365_EMAC_RXPULSE] = 7, [IRQ_DM365_EMAC_TXPULSE] = 7, [IRQ_DM365_EMAC_MISCPULSE] = 7, [IRQ_DM365_GPIO12] = 7, [IRQ_DM365_GPIO13] = 7, [IRQ_DM365_GPIO14] = 7, [IRQ_DM365_GPIO15] = 7, [IRQ_DM365_KEYINT] = 7, [IRQ_DM365_TCERRINT2] = 7, [IRQ_DM365_TCERRINT3] = 7, [IRQ_DM365_EMUINT] = 7, }; /* Four Transfer Controllers on DM365 */ static const s8 dm365_queue_tc_mapping[][2] = { /* {event queue no, TC no} */ {0, 0}, {1, 1}, {2, 2}, {3, 3}, {-1, -1}, }; static const s8 dm365_queue_priority_mapping[][2] = { /* {event queue no, Priority} */ {0, 7}, {1, 7}, {2, 7}, {3, 0}, {-1, -1}, }; static struct edma_soc_info edma_cc0_info = { .n_channel = 64, .n_region = 4, .n_slot = 256, .n_tc = 4, .n_cc = 1, .queue_tc_mapping = dm365_queue_tc_mapping, .queue_priority_mapping = dm365_queue_priority_mapping, .default_queue = EVENTQ_3, }; static struct edma_soc_info *dm365_edma_info[EDMA_MAX_CC] = { &edma_cc0_info, }; static struct resource edma_resources[] = { { .name = "edma_cc0", .start = 0x01c00000, .end = 0x01c00000 + SZ_64K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma_tc0", .start = 0x01c10000, .end = 0x01c10000 + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma_tc1", .start = 0x01c10400, .end = 0x01c10400 + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma_tc2", .start = 0x01c10800, .end = 0x01c10800 + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma_tc3", .start = 0x01c10c00, .end = 0x01c10c00 + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma0", .start = IRQ_CCINT0, .flags = IORESOURCE_IRQ, }, { .name = "edma0_err", .start = IRQ_CCERRINT, .flags = IORESOURCE_IRQ, }, /* not using TC*_ERR */ }; static struct platform_device dm365_edma_device = { .name = "edma", .id = 0, .dev.platform_data = dm365_edma_info, .num_resources = ARRAY_SIZE(edma_resources), .resource = edma_resources, }; static struct resource dm365_asp_resources[] = { { .start = DAVINCI_DM365_ASP0_BASE, .end = DAVINCI_DM365_ASP0_BASE + SZ_8K - 1, .flags = IORESOURCE_MEM, }, { .start = DAVINCI_DMA_ASP0_TX, .end = DAVINCI_DMA_ASP0_TX, .flags = IORESOURCE_DMA, }, { .start = DAVINCI_DMA_ASP0_RX, .end = DAVINCI_DMA_ASP0_RX, .flags = IORESOURCE_DMA, }, }; static struct platform_device dm365_asp_device = { .name = "davinci-mcbsp", .id = -1, .num_resources = ARRAY_SIZE(dm365_asp_resources), .resource = dm365_asp_resources, }; static struct resource dm365_vc_resources[] = { { .start = DAVINCI_DM365_VC_BASE, .end = DAVINCI_DM365_VC_BASE + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .start = DAVINCI_DMA_VC_TX, .end = DAVINCI_DMA_VC_TX, .flags = IORESOURCE_DMA, }, { .start = DAVINCI_DMA_VC_RX, .end = DAVINCI_DMA_VC_RX, .flags = IORESOURCE_DMA, }, }; static struct platform_device dm365_vc_device = { .name = "davinci_voicecodec", .id = -1, .num_resources = ARRAY_SIZE(dm365_vc_resources), .resource = dm365_vc_resources, }; static struct resource dm365_rtc_resources[] = { { .start = DM365_RTC_BASE, .end = DM365_RTC_BASE + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_DM365_RTCINT, .flags = IORESOURCE_IRQ, }, }; static struct platform_device dm365_rtc_device = { .name = "rtc_davinci", .id = 0, .num_resources = ARRAY_SIZE(dm365_rtc_resources), .resource = dm365_rtc_resources, }; static struct map_desc dm365_io_desc[] = { { .virtual = IO_VIRT, .pfn = __phys_to_pfn(IO_PHYS), .length = IO_SIZE, .type = MT_DEVICE }, { .virtual = SRAM_VIRT, .pfn = __phys_to_pfn(0x00010000), .length = SZ_32K, .type = MT_MEMORY_NONCACHED, }, }; static struct resource dm365_ks_resources[] = { { /* registers */ .start = DM365_KEYSCAN_BASE, .end = DM365_KEYSCAN_BASE + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { /* interrupt */ .start = IRQ_DM365_KEYINT, .end = IRQ_DM365_KEYINT, .flags = IORESOURCE_IRQ, }, }; static struct platform_device dm365_ks_device = { .name = "davinci_keyscan", .id = 0, .num_resources = ARRAY_SIZE(dm365_ks_resources), .resource = dm365_ks_resources, }; /* Contents of JTAG ID register used to identify exact cpu type */ static struct davinci_id dm365_ids[] = { { .variant = 0x0, .part_no = 0xb83e, .manufacturer = 0x017, .cpu_id = DAVINCI_CPU_ID_DM365, .name = "dm365_rev1.1", }, { .variant = 0x8, .part_no = 0xb83e, .manufacturer = 0x017, .cpu_id = DAVINCI_CPU_ID_DM365, .name = "dm365_rev1.2", }, }; static u32 dm365_psc_bases[] = { DAVINCI_PWR_SLEEP_CNTRL_BASE }; static struct davinci_timer_info dm365_timer_info = { .timers = davinci_timer_instance, .clockevent_id = T0_BOT, .clocksource_id = T0_TOP, }; #define DM365_UART1_BASE (IO_PHYS + 0x106000) static struct plat_serial8250_port dm365_serial_platform_data[] = { { .mapbase = DAVINCI_UART0_BASE, .irq = IRQ_UARTINT0, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, .regshift = 2, }, { .mapbase = DM365_UART1_BASE, .irq = IRQ_UARTINT1, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, .regshift = 2, }, { .flags = 0 }, }; static struct platform_device dm365_serial_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = dm365_serial_platform_data, }, }; static struct davinci_soc_info davinci_soc_info_dm365 = { .io_desc = dm365_io_desc, .io_desc_num = ARRAY_SIZE(dm365_io_desc), .jtag_id_reg = 0x01c40028, .ids = dm365_ids, .ids_num = ARRAY_SIZE(dm365_ids), .cpu_clks = dm365_clks, .psc_bases = dm365_psc_bases, .psc_bases_num = ARRAY_SIZE(dm365_psc_bases), .pinmux_base = DAVINCI_SYSTEM_MODULE_BASE, .pinmux_pins = dm365_pins, .pinmux_pins_num = ARRAY_SIZE(dm365_pins), .intc_base = DAVINCI_ARM_INTC_BASE, .intc_type = DAVINCI_INTC_TYPE_AINTC, .intc_irq_prios = dm365_default_priorities, .intc_irq_num = DAVINCI_N_AINTC_IRQ, .timer_info = &dm365_timer_info, .gpio_type = GPIO_TYPE_DAVINCI, .gpio_base = DAVINCI_GPIO_BASE, .gpio_num = 104, .gpio_irq = IRQ_DM365_GPIO0, .gpio_unbanked = 8, /* really 16 ... skip muxed GPIOs */ .serial_dev = &dm365_serial_device, .emac_pdata = &dm365_emac_pdata, .sram_dma = 0x00010000, .sram_len = SZ_32K, }; void __init dm365_init_asp(struct snd_platform_data *pdata) { davinci_cfg_reg(DM365_MCBSP0_BDX); davinci_cfg_reg(DM365_MCBSP0_X); davinci_cfg_reg(DM365_MCBSP0_BFSX); davinci_cfg_reg(DM365_MCBSP0_BDR); davinci_cfg_reg(DM365_MCBSP0_R); davinci_cfg_reg(DM365_MCBSP0_BFSR); davinci_cfg_reg(DM365_EVT2_ASP_TX); davinci_cfg_reg(DM365_EVT3_ASP_RX); dm365_asp_device.dev.platform_data = pdata; platform_device_register(&dm365_asp_device); } void __init dm365_init_vc(struct snd_platform_data *pdata) { davinci_cfg_reg(DM365_EVT2_VC_TX); davinci_cfg_reg(DM365_EVT3_VC_RX); dm365_vc_device.dev.platform_data = pdata; platform_device_register(&dm365_vc_device); } void __init dm365_init_ks(struct davinci_ks_platform_data *pdata) { dm365_ks_device.dev.platform_data = pdata; platform_device_register(&dm365_ks_device); } void __init dm365_init_rtc(void) { davinci_cfg_reg(DM365_INT_PRTCSS); platform_device_register(&dm365_rtc_device); } void __init dm365_init(void) { davinci_common_init(&davinci_soc_info_dm365); davinci_map_sysmod(); } static struct resource dm365_vpss_resources[] = { { /* VPSS ISP5 Base address */ .name = "isp5", .start = 0x01c70000, .end = 0x01c70000 + 0xff, .flags = IORESOURCE_MEM, }, { /* VPSS CLK Base address */ .name = "vpss", .start = 0x01c70200, .end = 0x01c70200 + 0xff, .flags = IORESOURCE_MEM, }, }; static struct platform_device dm365_vpss_device = { .name = "vpss", .id = -1, .dev.platform_data = "dm365_vpss", .num_resources = ARRAY_SIZE(dm365_vpss_resources), .resource = dm365_vpss_resources, }; static struct resource vpfe_resources[] = { { .start = IRQ_VDINT0, .end = IRQ_VDINT0, .flags = IORESOURCE_IRQ, }, { .start = IRQ_VDINT1, .end = IRQ_VDINT1, .flags = IORESOURCE_IRQ, }, }; static u64 vpfe_capture_dma_mask = DMA_BIT_MASK(32); static struct platform_device vpfe_capture_dev = { .name = CAPTURE_DRV_NAME, .id = -1, .num_resources = ARRAY_SIZE(vpfe_resources), .resource = vpfe_resources, .dev = { .dma_mask = &vpfe_capture_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, }; static void dm365_isif_setup_pinmux(void) { davinci_cfg_reg(DM365_VIN_CAM_WEN); davinci_cfg_reg(DM365_VIN_CAM_VD); davinci_cfg_reg(DM365_VIN_CAM_HD); davinci_cfg_reg(DM365_VIN_YIN4_7_EN); davinci_cfg_reg(DM365_VIN_YIN0_3_EN); } static struct resource isif_resource[] = { /* ISIF Base address */ { .start = 0x01c71000, .end = 0x01c71000 + 0x1ff, .flags = IORESOURCE_MEM, }, /* ISIF Linearization table 0 */ { .start = 0x1C7C000, .end = 0x1C7C000 + 0x2ff, .flags = IORESOURCE_MEM, }, /* ISIF Linearization table 1 */ { .start = 0x1C7C400, .end = 0x1C7C400 + 0x2ff, .flags = IORESOURCE_MEM, }, }; static struct platform_device dm365_isif_dev = { .name = "isif", .id = -1, .num_resources = ARRAY_SIZE(isif_resource), .resource = isif_resource, .dev = { .dma_mask = &vpfe_capture_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = dm365_isif_setup_pinmux, }, }; static int __init dm365_init_devices(void) { if (!cpu_is_davinci_dm365()) return 0; davinci_cfg_reg(DM365_INT_EDMA_CC); platform_device_register(&dm365_edma_device); platform_device_register(&dm365_mdio_device); platform_device_register(&dm365_emac_device); clk_add_alias(NULL, dev_name(&dm365_mdio_device.dev), NULL, &dm365_emac_device.dev); /* Add isif clock alias */ clk_add_alias("master", dm365_isif_dev.name, "vpss_master", NULL); platform_device_register(&dm365_vpss_device); platform_device_register(&dm365_isif_dev); platform_device_register(&vpfe_capture_dev); return 0; } postcore_initcall(dm365_init_devices); void dm365_set_vpfe_config(struct vpfe_config *cfg) { vpfe_capture_dev.dev.platform_data = cfg; }
gpl-2.0
Vegaviet-DevTeam/android_kernel_pantech_ef63
arch/arm/mach-orion5x/dns323-setup.c
4732
18869
/* * arch/arm/mach-orion5x/dns323-setup.c * * Copyright (C) 2007 Herbert Valerio Riedel <hvr@gnu.org> * * Support for HW Rev C1: * * Copyright (C) 2010 Benjamin Herrenschmidt <benh@kernel.crashing.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * */ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/pci.h> #include <linux/irq.h> #include <linux/mtd/physmap.h> #include <linux/mv643xx_eth.h> #include <linux/leds.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/i2c.h> #include <linux/ata_platform.h> #include <linux/phy.h> #include <linux/marvell_phy.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/pci.h> #include <asm/system_info.h> #include <mach/orion5x.h> #include "common.h" #include "mpp.h" /* Rev A1 and B1 */ #define DNS323_GPIO_LED_RIGHT_AMBER 1 #define DNS323_GPIO_LED_LEFT_AMBER 2 #define DNS323_GPIO_SYSTEM_UP 3 #define DNS323_GPIO_LED_POWER1 4 #define DNS323_GPIO_LED_POWER2 5 #define DNS323_GPIO_OVERTEMP 6 #define DNS323_GPIO_RTC 7 #define DNS323_GPIO_POWER_OFF 8 #define DNS323_GPIO_KEY_POWER 9 #define DNS323_GPIO_KEY_RESET 10 /* Rev C1 */ #define DNS323C_GPIO_KEY_POWER 1 #define DNS323C_GPIO_POWER_OFF 2 #define DNS323C_GPIO_LED_RIGHT_AMBER 8 #define DNS323C_GPIO_LED_LEFT_AMBER 9 #define DNS323C_GPIO_LED_POWER 17 #define DNS323C_GPIO_FAN_BIT1 18 #define DNS323C_GPIO_FAN_BIT0 19 /* Exposed to userspace, do not change */ enum { DNS323_REV_A1, /* 0 */ DNS323_REV_B1, /* 1 */ DNS323_REV_C1, /* 2 */ }; /**************************************************************************** * PCI setup */ static int __init dns323_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq; /* * Check for devices with hard-wired IRQs. */ irq = orion5x_pci_map_irq(dev, slot, pin); if (irq != -1) return irq; return -1; } static struct hw_pci dns323_pci __initdata = { .nr_controllers = 2, .swizzle = pci_std_swizzle, .setup = orion5x_pci_sys_setup, .scan = orion5x_pci_sys_scan_bus, .map_irq = dns323_pci_map_irq, }; static int __init dns323_pci_init(void) { /* Rev B1 and C1 doesn't really use its PCI bus, and initialising PCI * gets in the way of initialising the SATA controller. */ if (machine_is_dns323() && system_rev == DNS323_REV_A1) pci_common_init(&dns323_pci); return 0; } subsys_initcall(dns323_pci_init); /**************************************************************************** * 8MiB NOR flash (Spansion S29GL064M90TFIR4) * * Layout as used by D-Link: * 0x00000000-0x00010000 : "MTD1" * 0x00010000-0x00020000 : "MTD2" * 0x00020000-0x001a0000 : "Linux Kernel" * 0x001a0000-0x007d0000 : "File System" * 0x007d0000-0x00800000 : "u-boot" */ #define DNS323_NOR_BOOT_BASE 0xf4000000 #define DNS323_NOR_BOOT_SIZE SZ_8M static struct mtd_partition dns323_partitions[] = { { .name = "MTD1", .size = 0x00010000, .offset = 0, }, { .name = "MTD2", .size = 0x00010000, .offset = 0x00010000, }, { .name = "Linux Kernel", .size = 0x00180000, .offset = 0x00020000, }, { .name = "File System", .size = 0x00630000, .offset = 0x001A0000, }, { .name = "u-boot", .size = 0x00030000, .offset = 0x007d0000, }, }; static struct physmap_flash_data dns323_nor_flash_data = { .width = 1, .parts = dns323_partitions, .nr_parts = ARRAY_SIZE(dns323_partitions) }; static struct resource dns323_nor_flash_resource = { .flags = IORESOURCE_MEM, .start = DNS323_NOR_BOOT_BASE, .end = DNS323_NOR_BOOT_BASE + DNS323_NOR_BOOT_SIZE - 1, }; static struct platform_device dns323_nor_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &dns323_nor_flash_data, }, .resource = &dns323_nor_flash_resource, .num_resources = 1, }; /**************************************************************************** * Ethernet */ static struct mv643xx_eth_platform_data dns323_eth_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; /* dns323_parse_hex_*() taken from tsx09-common.c; should a common copy of these * functions be kept somewhere? */ static int __init dns323_parse_hex_nibble(char n) { if (n >= '0' && n <= '9') return n - '0'; if (n >= 'A' && n <= 'F') return n - 'A' + 10; if (n >= 'a' && n <= 'f') return n - 'a' + 10; return -1; } static int __init dns323_parse_hex_byte(const char *b) { int hi; int lo; hi = dns323_parse_hex_nibble(b[0]); lo = dns323_parse_hex_nibble(b[1]); if (hi < 0 || lo < 0) return -1; return (hi << 4) | lo; } static int __init dns323_read_mac_addr(void) { u_int8_t addr[6]; int i; char *mac_page; /* MAC address is stored as a regular ol' string in /dev/mtdblock4 * (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80). */ mac_page = ioremap(DNS323_NOR_BOOT_BASE + 0x7d0000 + 196480, 1024); if (!mac_page) return -ENOMEM; /* Sanity check the string we're looking at */ for (i = 0; i < 5; i++) { if (*(mac_page + (i * 3) + 2) != ':') { goto error_fail; } } for (i = 0; i < 6; i++) { int byte; byte = dns323_parse_hex_byte(mac_page + (i * 3)); if (byte < 0) { goto error_fail; } addr[i] = byte; } iounmap(mac_page); printk("DNS-323: Found ethernet MAC address: "); for (i = 0; i < 6; i++) printk("%.2x%s", addr[i], (i < 5) ? ":" : ".\n"); memcpy(dns323_eth_data.mac_addr, addr, 6); return 0; error_fail: iounmap(mac_page); return -EINVAL; } /**************************************************************************** * GPIO LEDs (simple - doesn't use hardware blinking support) */ #define ORION_BLINK_HALF_PERIOD 100 /* ms */ static int dns323_gpio_blink_set(unsigned gpio, int state, unsigned long *delay_on, unsigned long *delay_off) { if (delay_on && delay_off && !*delay_on && !*delay_off) *delay_on = *delay_off = ORION_BLINK_HALF_PERIOD; switch(state) { case GPIO_LED_NO_BLINK_LOW: case GPIO_LED_NO_BLINK_HIGH: orion_gpio_set_blink(gpio, 0); gpio_set_value(gpio, state); break; case GPIO_LED_BLINK: orion_gpio_set_blink(gpio, 1); } return 0; } static struct gpio_led dns323ab_leds[] = { { .name = "power:blue", .gpio = DNS323_GPIO_LED_POWER2, .default_trigger = "default-on", }, { .name = "right:amber", .gpio = DNS323_GPIO_LED_RIGHT_AMBER, .active_low = 1, }, { .name = "left:amber", .gpio = DNS323_GPIO_LED_LEFT_AMBER, .active_low = 1, }, }; static struct gpio_led dns323c_leds[] = { { .name = "power:blue", .gpio = DNS323C_GPIO_LED_POWER, .default_trigger = "timer", .active_low = 1, }, { .name = "right:amber", .gpio = DNS323C_GPIO_LED_RIGHT_AMBER, .active_low = 1, }, { .name = "left:amber", .gpio = DNS323C_GPIO_LED_LEFT_AMBER, .active_low = 1, }, }; static struct gpio_led_platform_data dns323ab_led_data = { .num_leds = ARRAY_SIZE(dns323ab_leds), .leds = dns323ab_leds, .gpio_blink_set = dns323_gpio_blink_set, }; static struct gpio_led_platform_data dns323c_led_data = { .num_leds = ARRAY_SIZE(dns323c_leds), .leds = dns323c_leds, .gpio_blink_set = dns323_gpio_blink_set, }; static struct platform_device dns323_gpio_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &dns323ab_led_data, }, }; /**************************************************************************** * GPIO Attached Keys */ static struct gpio_keys_button dns323ab_buttons[] = { { .code = KEY_RESTART, .gpio = DNS323_GPIO_KEY_RESET, .desc = "Reset Button", .active_low = 1, }, { .code = KEY_POWER, .gpio = DNS323_GPIO_KEY_POWER, .desc = "Power Button", .active_low = 1, }, }; static struct gpio_keys_platform_data dns323ab_button_data = { .buttons = dns323ab_buttons, .nbuttons = ARRAY_SIZE(dns323ab_buttons), }; static struct gpio_keys_button dns323c_buttons[] = { { .code = KEY_POWER, .gpio = DNS323C_GPIO_KEY_POWER, .desc = "Power Button", .active_low = 1, }, }; static struct gpio_keys_platform_data dns323c_button_data = { .buttons = dns323c_buttons, .nbuttons = ARRAY_SIZE(dns323c_buttons), }; static struct platform_device dns323_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &dns323ab_button_data, }, }; /***************************************************************************** * SATA */ static struct mv_sata_platform_data dns323_sata_data = { .n_ports = 2, }; /**************************************************************************** * General Setup */ static unsigned int dns323a_mpp_modes[] __initdata = { MPP0_PCIE_RST_OUTn, MPP1_GPIO, /* right amber LED (sata ch0) */ MPP2_GPIO, /* left amber LED (sata ch1) */ MPP3_UNUSED, MPP4_GPIO, /* power button LED */ MPP5_GPIO, /* power button LED */ MPP6_GPIO, /* GMT G751-2f overtemp */ MPP7_GPIO, /* M41T80 nIRQ/OUT/SQW */ MPP8_GPIO, /* triggers power off */ MPP9_GPIO, /* power button switch */ MPP10_GPIO, /* reset button switch */ MPP11_UNUSED, MPP12_UNUSED, MPP13_UNUSED, MPP14_UNUSED, MPP15_UNUSED, MPP16_UNUSED, MPP17_UNUSED, MPP18_UNUSED, MPP19_UNUSED, 0, }; static unsigned int dns323b_mpp_modes[] __initdata = { MPP0_UNUSED, MPP1_GPIO, /* right amber LED (sata ch0) */ MPP2_GPIO, /* left amber LED (sata ch1) */ MPP3_GPIO, /* system up flag */ MPP4_GPIO, /* power button LED */ MPP5_GPIO, /* power button LED */ MPP6_GPIO, /* GMT G751-2f overtemp */ MPP7_GPIO, /* M41T80 nIRQ/OUT/SQW */ MPP8_GPIO, /* triggers power off */ MPP9_GPIO, /* power button switch */ MPP10_GPIO, /* reset button switch */ MPP11_UNUSED, MPP12_SATA_LED, MPP13_SATA_LED, MPP14_SATA_LED, MPP15_SATA_LED, MPP16_UNUSED, MPP17_UNUSED, MPP18_UNUSED, MPP19_UNUSED, 0, }; static unsigned int dns323c_mpp_modes[] __initdata = { MPP0_GPIO, /* ? input */ MPP1_GPIO, /* input power switch (0 = pressed) */ MPP2_GPIO, /* output power off */ MPP3_UNUSED, /* ? output */ MPP4_UNUSED, /* ? output */ MPP5_UNUSED, /* ? output */ MPP6_UNUSED, /* ? output */ MPP7_UNUSED, /* ? output */ MPP8_GPIO, /* i/o right amber LED */ MPP9_GPIO, /* i/o left amber LED */ MPP10_GPIO, /* input */ MPP11_UNUSED, MPP12_SATA_LED, MPP13_SATA_LED, MPP14_SATA_LED, MPP15_SATA_LED, MPP16_UNUSED, MPP17_GPIO, /* power button LED */ MPP18_GPIO, /* fan speed bit 0 */ MPP19_GPIO, /* fan speed bit 1 */ 0, }; /* Rev C1 Fan speed notes: * * The fan is controlled by 2 GPIOs on this board. The settings * of the bits is as follow: * * GPIO 18 GPIO 19 Fan * * 0 0 stopped * 0 1 low speed * 1 0 high speed * 1 1 don't do that (*) * * (*) I think the two bits control two feed-in resistors into a fixed * PWN circuit, setting both bits will basically go a 'bit' faster * than high speed, but d-link doesn't do it and you may get out of * HW spec so don't do it. */ /* * On the DNS-323 A1 and B1 the following devices are attached via I2C: * * i2c addr | chip | description * 0x3e | GMT G760Af | fan speed PWM controller * 0x48 | GMT G751-2f | temp. sensor and therm. watchdog (LM75 compatible) * 0x68 | ST M41T80 | RTC w/ alarm */ static struct i2c_board_info __initdata dns323ab_i2c_devices[] = { { I2C_BOARD_INFO("g760a", 0x3e), }, { I2C_BOARD_INFO("lm75", 0x48), }, { I2C_BOARD_INFO("m41t80", 0x68), }, }; /* * On the DNS-323 C1 the following devices are attached via I2C: * * i2c addr | chip | description * 0x48 | GMT G751-2f | temp. sensor and therm. watchdog (LM75 compatible) * 0x68 | ST M41T80 | RTC w/ alarm */ static struct i2c_board_info __initdata dns323c_i2c_devices[] = { { I2C_BOARD_INFO("lm75", 0x48), }, { I2C_BOARD_INFO("m41t80", 0x68), }, }; /* DNS-323 rev. A specific power off method */ static void dns323a_power_off(void) { pr_info("DNS-323: Triggering power-off...\n"); gpio_set_value(DNS323_GPIO_POWER_OFF, 1); } /* DNS-323 rev B specific power off method */ static void dns323b_power_off(void) { pr_info("DNS-323: Triggering power-off...\n"); /* Pin has to be changed to 1 and back to 0 to do actual power off. */ gpio_set_value(DNS323_GPIO_POWER_OFF, 1); mdelay(100); gpio_set_value(DNS323_GPIO_POWER_OFF, 0); } /* DNS-323 rev. C specific power off method */ static void dns323c_power_off(void) { pr_info("DNS-323: Triggering power-off...\n"); gpio_set_value(DNS323C_GPIO_POWER_OFF, 1); } static int dns323c_phy_fixup(struct phy_device *phy) { phy->dev_flags |= MARVELL_PHY_M1118_DNS323_LEDS; return 0; } static int __init dns323_identify_rev(void) { u32 dev, rev, i, reg; pr_debug("DNS-323: Identifying board ... \n"); /* Rev A1 has a 5181 */ orion5x_pcie_id(&dev, &rev); if (dev == MV88F5181_DEV_ID) { pr_debug("DNS-323: 5181 found, board is A1\n"); return DNS323_REV_A1; } pr_debug("DNS-323: 5182 found, board is B1 or C1, checking PHY...\n"); /* Rev B1 and C1 both have 5182, let's poke at the eth PHY. This is * a bit gross but we want to do that without links into the eth * driver so let's poke at it directly. We default to rev B1 in * case the accesses fail */ #define ETH_SMI_REG (ORION5X_ETH_VIRT_BASE + 0x2000 + 0x004) #define SMI_BUSY 0x10000000 #define SMI_READ_VALID 0x08000000 #define SMI_OPCODE_READ 0x04000000 #define SMI_OPCODE_WRITE 0x00000000 for (i = 0; i < 1000; i++) { reg = readl(ETH_SMI_REG); if (!(reg & SMI_BUSY)) break; } if (i >= 1000) { pr_warning("DNS-323: Timeout accessing PHY, assuming rev B1\n"); return DNS323_REV_B1; } writel((3 << 21) /* phy ID reg */ | (8 << 16) /* phy addr */ | SMI_OPCODE_READ, ETH_SMI_REG); for (i = 0; i < 1000; i++) { reg = readl(ETH_SMI_REG); if (reg & SMI_READ_VALID) break; } if (i >= 1000) { pr_warning("DNS-323: Timeout reading PHY, assuming rev B1\n"); return DNS323_REV_B1; } pr_debug("DNS-323: Ethernet PHY ID 0x%x\n", reg & 0xffff); /* Note: the Marvell tools mask the ID with 0x3f0 before comparison * but I don't see that making a difference here, at least with * any known Marvell PHY ID */ switch(reg & 0xfff0) { case 0x0cc0: /* MV88E1111 */ return DNS323_REV_B1; case 0x0e10: /* MV88E1118 */ return DNS323_REV_C1; default: pr_warning("DNS-323: Unknown PHY ID 0x%04x, assuming rev B1\n", reg & 0xffff); } return DNS323_REV_B1; } static void __init dns323_init(void) { /* Setup basic Orion functions. Need to be called early. */ orion5x_init(); /* Identify revision */ system_rev = dns323_identify_rev(); pr_info("DNS-323: Identified HW revision %c1\n", 'A' + system_rev); /* Just to be tricky, the 5182 has a completely different * set of MPP modes to the 5181. */ switch(system_rev) { case DNS323_REV_A1: orion5x_mpp_conf(dns323a_mpp_modes); writel(0, MPP_DEV_CTRL); /* DEV_D[31:16] */ break; case DNS323_REV_B1: orion5x_mpp_conf(dns323b_mpp_modes); break; case DNS323_REV_C1: orion5x_mpp_conf(dns323c_mpp_modes); break; } /* setup flash mapping * CS3 holds a 8 MB Spansion S29GL064M90TFIR4 */ orion5x_setup_dev_boot_win(DNS323_NOR_BOOT_BASE, DNS323_NOR_BOOT_SIZE); platform_device_register(&dns323_nor_flash); /* Sort out LEDs, Buttons and i2c devices */ switch(system_rev) { case DNS323_REV_A1: /* The 5181 power LED is active low and requires * DNS323_GPIO_LED_POWER1 to also be low. */ dns323ab_leds[0].active_low = 1; gpio_request(DNS323_GPIO_LED_POWER1, "Power Led Enable"); gpio_direction_output(DNS323_GPIO_LED_POWER1, 0); /* Fall through */ case DNS323_REV_B1: i2c_register_board_info(0, dns323ab_i2c_devices, ARRAY_SIZE(dns323ab_i2c_devices)); break; case DNS323_REV_C1: /* Hookup LEDs & Buttons */ dns323_gpio_leds.dev.platform_data = &dns323c_led_data; dns323_button_device.dev.platform_data = &dns323c_button_data; /* Hookup i2c devices and fan driver */ i2c_register_board_info(0, dns323c_i2c_devices, ARRAY_SIZE(dns323c_i2c_devices)); platform_device_register_simple("dns323c-fan", 0, NULL, 0); /* Register fixup for the PHY LEDs */ phy_register_fixup_for_uid(MARVELL_PHY_ID_88E1118, MARVELL_PHY_ID_MASK, dns323c_phy_fixup); } platform_device_register(&dns323_gpio_leds); platform_device_register(&dns323_button_device); /* * Configure peripherals. */ if (dns323_read_mac_addr() < 0) printk("DNS-323: Failed to read MAC address\n"); orion5x_ehci0_init(); orion5x_eth_init(&dns323_eth_data); orion5x_i2c_init(); orion5x_uart0_init(); /* Remaining GPIOs */ switch(system_rev) { case DNS323_REV_A1: /* Poweroff GPIO */ if (gpio_request(DNS323_GPIO_POWER_OFF, "POWEROFF") != 0 || gpio_direction_output(DNS323_GPIO_POWER_OFF, 0) != 0) pr_err("DNS-323: failed to setup power-off GPIO\n"); pm_power_off = dns323a_power_off; break; case DNS323_REV_B1: /* 5182 built-in SATA init */ orion5x_sata_init(&dns323_sata_data); /* The DNS323 rev B1 has flag to indicate the system is up. * Without this flag set, power LED will flash and cannot be * controlled via leds-gpio. */ if (gpio_request(DNS323_GPIO_SYSTEM_UP, "SYS_READY") == 0) gpio_direction_output(DNS323_GPIO_SYSTEM_UP, 1); /* Poweroff GPIO */ if (gpio_request(DNS323_GPIO_POWER_OFF, "POWEROFF") != 0 || gpio_direction_output(DNS323_GPIO_POWER_OFF, 0) != 0) pr_err("DNS-323: failed to setup power-off GPIO\n"); pm_power_off = dns323b_power_off; break; case DNS323_REV_C1: /* 5182 built-in SATA init */ orion5x_sata_init(&dns323_sata_data); /* Poweroff GPIO */ if (gpio_request(DNS323C_GPIO_POWER_OFF, "POWEROFF") != 0 || gpio_direction_output(DNS323C_GPIO_POWER_OFF, 0) != 0) pr_err("DNS-323: failed to setup power-off GPIO\n"); pm_power_off = dns323c_power_off; /* Now, -this- should theorically be done by the sata_mv driver * once I figure out what's going on there. Maybe the behaviour * of the LEDs should be somewhat passed via the platform_data. * for now, just whack the register and make the LEDs happy * * Note: AFAIK, rev B1 needs the same treatement but I'll let * somebody else test it. */ writel(0x5, ORION5X_SATA_VIRT_BASE | 0x2c); break; } } /* Warning: D-Link uses a wrong mach-type (=526) in their bootloader */ MACHINE_START(DNS323, "D-Link DNS-323") /* Maintainer: Herbert Valerio Riedel <hvr@gnu.org> */ .atag_offset = 0x100, .init_machine = dns323_init, .map_io = orion5x_map_io, .init_early = orion5x_init_early, .init_irq = orion5x_init_irq, .timer = &orion5x_timer, .fixup = tag_fixup_mem32, .restart = orion5x_restart, MACHINE_END
gpl-2.0
akw28888/kernel_zte_msm8x25q
drivers/net/ethernet/dec/tulip/media.c
4988
16713
/* drivers/net/ethernet/dec/tulip/media.c Copyright 2000,2001 The Linux Kernel Team Written/copyright 1994-2001 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. Please submit bugs to http://bugzilla.kernel.org/ . */ #include <linux/kernel.h> #include <linux/mii.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pci.h> #include "tulip.h" /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually met by back-to-back PCI I/O cycles, but we insert a delay to avoid "overclocking" issues or future 66Mhz PCI. */ #define mdio_delay() ioread32(mdio_addr) /* Read and write the MII registers using software-generated serial MDIO protocol. It is just different enough from the EEPROM protocol to not share code. The maxium data clock rate is 2.5 Mhz. */ #define MDIO_SHIFT_CLK 0x10000 #define MDIO_DATA_WRITE0 0x00000 #define MDIO_DATA_WRITE1 0x20000 #define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */ #define MDIO_ENB_IN 0x40000 #define MDIO_DATA_READ 0x80000 static const unsigned char comet_miireg2offset[32] = { 0xB4, 0xB8, 0xBC, 0xC0, 0xC4, 0xC8, 0xCC, 0, 0,0,0,0, 0,0,0,0, 0,0xD0,0,0, 0,0,0,0, 0,0,0,0, 0, 0xD4, 0xD8, 0xDC, }; /* MII transceiver control section. Read and write the MII registers using software-generated serial MDIO protocol. See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management functions") or DP83840A data sheet for more details. */ int tulip_mdio_read(struct net_device *dev, int phy_id, int location) { struct tulip_private *tp = netdev_priv(dev); int i; int read_cmd = (0xf6 << 10) | ((phy_id & 0x1f) << 5) | location; int retval = 0; void __iomem *ioaddr = tp->base_addr; void __iomem *mdio_addr = ioaddr + CSR9; unsigned long flags; if (location & ~0x1f) return 0xffff; if (tp->chip_id == COMET && phy_id == 30) { if (comet_miireg2offset[location]) return ioread32(ioaddr + comet_miireg2offset[location]); return 0xffff; } spin_lock_irqsave(&tp->mii_lock, flags); if (tp->chip_id == LC82C168) { iowrite32(0x60020000 + (phy_id<<23) + (location<<18), ioaddr + 0xA0); ioread32(ioaddr + 0xA0); ioread32(ioaddr + 0xA0); for (i = 1000; i >= 0; --i) { barrier(); if ( ! ((retval = ioread32(ioaddr + 0xA0)) & 0x80000000)) break; } spin_unlock_irqrestore(&tp->mii_lock, flags); return retval & 0xffff; } /* Establish sync by sending at least 32 logic ones. */ for (i = 32; i >= 0; i--) { iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Shift the read command bits out. */ for (i = 15; i >= 0; i--) { int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0; iowrite32(MDIO_ENB | dataval, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Read the two transition, 16 data, and wire-idle bits. */ for (i = 19; i > 0; i--) { iowrite32(MDIO_ENB_IN, mdio_addr); mdio_delay(); retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DATA_READ) ? 1 : 0); iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } spin_unlock_irqrestore(&tp->mii_lock, flags); return (retval>>1) & 0xffff; } void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val) { struct tulip_private *tp = netdev_priv(dev); int i; int cmd = (0x5002 << 16) | ((phy_id & 0x1f) << 23) | (location<<18) | (val & 0xffff); void __iomem *ioaddr = tp->base_addr; void __iomem *mdio_addr = ioaddr + CSR9; unsigned long flags; if (location & ~0x1f) return; if (tp->chip_id == COMET && phy_id == 30) { if (comet_miireg2offset[location]) iowrite32(val, ioaddr + comet_miireg2offset[location]); return; } spin_lock_irqsave(&tp->mii_lock, flags); if (tp->chip_id == LC82C168) { iowrite32(cmd, ioaddr + 0xA0); for (i = 1000; i >= 0; --i) { barrier(); if ( ! (ioread32(ioaddr + 0xA0) & 0x80000000)) break; } spin_unlock_irqrestore(&tp->mii_lock, flags); return; } /* Establish sync by sending 32 logic ones. */ for (i = 32; i >= 0; i--) { iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Shift the command bits out. */ for (i = 31; i >= 0; i--) { int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0; iowrite32(MDIO_ENB | dataval, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Clear out extra bits. */ for (i = 2; i > 0; i--) { iowrite32(MDIO_ENB_IN, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } spin_unlock_irqrestore(&tp->mii_lock, flags); } /* Set up the transceiver control registers for the selected media type. */ void tulip_select_media(struct net_device *dev, int startup) { struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; struct mediatable *mtable = tp->mtable; u32 new_csr6; int i; if (mtable) { struct medialeaf *mleaf = &mtable->mleaf[tp->cur_index]; unsigned char *p = mleaf->leafdata; switch (mleaf->type) { case 0: /* 21140 non-MII xcvr. */ if (tulip_debug > 1) netdev_dbg(dev, "Using a 21140 non-MII transceiver with control setting %02x\n", p[1]); dev->if_port = p[0]; if (startup) iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12); iowrite32(p[1], ioaddr + CSR12); new_csr6 = 0x02000000 | ((p[2] & 0x71) << 18); break; case 2: case 4: { u16 setup[5]; u32 csr13val, csr14val, csr15dir, csr15val; for (i = 0; i < 5; i++) setup[i] = get_u16(&p[i*2 + 1]); dev->if_port = p[0] & MEDIA_MASK; if (tulip_media_cap[dev->if_port] & MediaAlwaysFD) tp->full_duplex = 1; if (startup && mtable->has_reset) { struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; unsigned char *rst = rleaf->leafdata; if (tulip_debug > 1) netdev_dbg(dev, "Resetting the transceiver\n"); for (i = 0; i < rst[0]; i++) iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); } if (tulip_debug > 1) netdev_dbg(dev, "21143 non-MII %s transceiver control %04x/%04x\n", medianame[dev->if_port], setup[0], setup[1]); if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */ csr13val = setup[0]; csr14val = setup[1]; csr15dir = (setup[3]<<16) | setup[2]; csr15val = (setup[4]<<16) | setup[2]; iowrite32(0, ioaddr + CSR13); iowrite32(csr14val, ioaddr + CSR14); iowrite32(csr15dir, ioaddr + CSR15); /* Direction */ iowrite32(csr15val, ioaddr + CSR15); /* Data */ iowrite32(csr13val, ioaddr + CSR13); } else { csr13val = 1; csr14val = 0; csr15dir = (setup[0]<<16) | 0x0008; csr15val = (setup[1]<<16) | 0x0008; if (dev->if_port <= 4) csr14val = t21142_csr14[dev->if_port]; if (startup) { iowrite32(0, ioaddr + CSR13); iowrite32(csr14val, ioaddr + CSR14); } iowrite32(csr15dir, ioaddr + CSR15); /* Direction */ iowrite32(csr15val, ioaddr + CSR15); /* Data */ if (startup) iowrite32(csr13val, ioaddr + CSR13); } if (tulip_debug > 1) netdev_dbg(dev, "Setting CSR15 to %08x/%08x\n", csr15dir, csr15val); if (mleaf->type == 4) new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18); else new_csr6 = 0x82420000; break; } case 1: case 3: { int phy_num = p[0]; int init_length = p[1]; u16 *misc_info, tmp_info; dev->if_port = 11; new_csr6 = 0x020E0000; if (mleaf->type == 3) { /* 21142 */ u16 *init_sequence = (u16*)(p+2); u16 *reset_sequence = &((u16*)(p+3))[init_length]; int reset_length = p[2 + init_length*2]; misc_info = reset_sequence + reset_length; if (startup) { int timeout = 10; /* max 1 ms */ for (i = 0; i < reset_length; i++) iowrite32(get_u16(&reset_sequence[i]) << 16, ioaddr + CSR15); /* flush posted writes */ ioread32(ioaddr + CSR15); /* Sect 3.10.3 in DP83840A.pdf (p39) */ udelay(500); /* Section 4.2 in DP83840A.pdf (p43) */ /* and IEEE 802.3 "22.2.4.1.1 Reset" */ while (timeout-- && (tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET)) udelay(100); } for (i = 0; i < init_length; i++) iowrite32(get_u16(&init_sequence[i]) << 16, ioaddr + CSR15); ioread32(ioaddr + CSR15); /* flush posted writes */ } else { u8 *init_sequence = p + 2; u8 *reset_sequence = p + 3 + init_length; int reset_length = p[2 + init_length]; misc_info = (u16*)(reset_sequence + reset_length); if (startup) { int timeout = 10; /* max 1 ms */ iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12); for (i = 0; i < reset_length; i++) iowrite32(reset_sequence[i], ioaddr + CSR12); /* flush posted writes */ ioread32(ioaddr + CSR12); /* Sect 3.10.3 in DP83840A.pdf (p39) */ udelay(500); /* Section 4.2 in DP83840A.pdf (p43) */ /* and IEEE 802.3 "22.2.4.1.1 Reset" */ while (timeout-- && (tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET)) udelay(100); } for (i = 0; i < init_length; i++) iowrite32(init_sequence[i], ioaddr + CSR12); ioread32(ioaddr + CSR12); /* flush posted writes */ } tmp_info = get_u16(&misc_info[1]); if (tmp_info) tp->advertising[phy_num] = tmp_info | 1; if (tmp_info && startup < 2) { if (tp->mii_advertise == 0) tp->mii_advertise = tp->advertising[phy_num]; if (tulip_debug > 1) netdev_dbg(dev, " Advertising %04x on MII %d\n", tp->mii_advertise, tp->phys[phy_num]); tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise); } break; } case 5: case 6: { u16 setup[5]; new_csr6 = 0; /* FIXME */ for (i = 0; i < 5; i++) setup[i] = get_u16(&p[i*2 + 1]); if (startup && mtable->has_reset) { struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; unsigned char *rst = rleaf->leafdata; if (tulip_debug > 1) netdev_dbg(dev, "Resetting the transceiver\n"); for (i = 0; i < rst[0]; i++) iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); } break; } default: netdev_dbg(dev, " Invalid media table selection %d\n", mleaf->type); new_csr6 = 0x020E0000; } if (tulip_debug > 1) netdev_dbg(dev, "Using media type %s, CSR12 is %02x\n", medianame[dev->if_port], ioread32(ioaddr + CSR12) & 0xff); } else if (tp->chip_id == LC82C168) { if (startup && ! tp->medialock) dev->if_port = tp->mii_cnt ? 11 : 0; if (tulip_debug > 1) netdev_dbg(dev, "PNIC PHY status is %3.3x, media %s\n", ioread32(ioaddr + 0xB8), medianame[dev->if_port]); if (tp->mii_cnt) { new_csr6 = 0x810C0000; iowrite32(0x0001, ioaddr + CSR15); iowrite32(0x0201B07A, ioaddr + 0xB8); } else if (startup) { /* Start with 10mbps to do autonegotiation. */ iowrite32(0x32, ioaddr + CSR12); new_csr6 = 0x00420000; iowrite32(0x0001B078, ioaddr + 0xB8); iowrite32(0x0201B078, ioaddr + 0xB8); } else if (dev->if_port == 3 || dev->if_port == 5) { iowrite32(0x33, ioaddr + CSR12); new_csr6 = 0x01860000; /* Trigger autonegotiation. */ iowrite32(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8); } else { iowrite32(0x32, ioaddr + CSR12); new_csr6 = 0x00420000; iowrite32(0x1F078, ioaddr + 0xB8); } } else { /* Unknown chip type with no media table. */ if (tp->default_port == 0) dev->if_port = tp->mii_cnt ? 11 : 3; if (tulip_media_cap[dev->if_port] & MediaIsMII) { new_csr6 = 0x020E0000; } else if (tulip_media_cap[dev->if_port] & MediaIsFx) { new_csr6 = 0x02860000; } else new_csr6 = 0x03860000; if (tulip_debug > 1) netdev_dbg(dev, "No media description table, assuming %s transceiver, CSR12 %02x\n", medianame[dev->if_port], ioread32(ioaddr + CSR12)); } tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0); mdelay(1); } /* Check the MII negotiated duplex and change the CSR6 setting if required. Return 0 if everything is OK. Return < 0 if the transceiver is missing or has no link beat. */ int tulip_check_duplex(struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); unsigned int bmsr, lpa, negotiated, new_csr6; bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR); lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA); if (tulip_debug > 1) dev_info(&dev->dev, "MII status %04x, Link partner report %04x\n", bmsr, lpa); if (bmsr == 0xffff) return -2; if ((bmsr & BMSR_LSTATUS) == 0) { int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR); if ((new_bmsr & BMSR_LSTATUS) == 0) { if (tulip_debug > 1) dev_info(&dev->dev, "No link beat on the MII interface, status %04x\n", new_bmsr); return -1; } } negotiated = lpa & tp->advertising[0]; tp->full_duplex = mii_duplex(tp->full_duplex_lock, negotiated); new_csr6 = tp->csr6; if (negotiated & LPA_100) new_csr6 &= ~TxThreshold; else new_csr6 |= TxThreshold; if (tp->full_duplex) new_csr6 |= FullDuplex; else new_csr6 &= ~FullDuplex; if (new_csr6 != tp->csr6) { tp->csr6 = new_csr6; tulip_restart_rxtx(tp); if (tulip_debug > 0) dev_info(&dev->dev, "Setting %s-duplex based on MII#%d link partner capability of %04x\n", tp->full_duplex ? "full" : "half", tp->phys[0], lpa); return 1; } return 0; } void __devinit tulip_find_mii (struct net_device *dev, int board_idx) { struct tulip_private *tp = netdev_priv(dev); int phyn, phy_idx = 0; int mii_reg0; int mii_advert; unsigned int to_advert, new_bmcr, ane_switch; /* Find the connected MII xcvrs. Doing this in open() would allow detecting external xcvrs later, but takes much time. */ for (phyn = 1; phyn <= 32 && phy_idx < sizeof (tp->phys); phyn++) { int phy = phyn & 0x1f; int mii_status = tulip_mdio_read (dev, phy, MII_BMSR); if ((mii_status & 0x8301) == 0x8001 || ((mii_status & BMSR_100BASE4) == 0 && (mii_status & 0x7800) != 0)) { /* preserve Becker logic, gain indentation level */ } else { continue; } mii_reg0 = tulip_mdio_read (dev, phy, MII_BMCR); mii_advert = tulip_mdio_read (dev, phy, MII_ADVERTISE); ane_switch = 0; /* if not advertising at all, gen an * advertising value from the capability * bits in BMSR */ if ((mii_advert & ADVERTISE_ALL) == 0) { unsigned int tmpadv = tulip_mdio_read (dev, phy, MII_BMSR); mii_advert = ((tmpadv >> 6) & 0x3e0) | 1; } if (tp->mii_advertise) { tp->advertising[phy_idx] = to_advert = tp->mii_advertise; } else if (tp->advertising[phy_idx]) { to_advert = tp->advertising[phy_idx]; } else { tp->advertising[phy_idx] = tp->mii_advertise = to_advert = mii_advert; } tp->phys[phy_idx++] = phy; pr_info("tulip%d: MII transceiver #%d config %04x status %04x advertising %04x\n", board_idx, phy, mii_reg0, mii_status, mii_advert); /* Fixup for DLink with miswired PHY. */ if (mii_advert != to_advert) { pr_debug("tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n", board_idx, to_advert, phy, mii_advert); tulip_mdio_write (dev, phy, 4, to_advert); } /* Enable autonegotiation: some boards default to off. */ if (tp->default_port == 0) { new_bmcr = mii_reg0 | BMCR_ANENABLE; if (new_bmcr != mii_reg0) { new_bmcr |= BMCR_ANRESTART; ane_switch = 1; } } /* ...or disable nway, if forcing media */ else { new_bmcr = mii_reg0 & ~BMCR_ANENABLE; if (new_bmcr != mii_reg0) ane_switch = 1; } /* clear out bits we never want at this point */ new_bmcr &= ~(BMCR_CTST | BMCR_FULLDPLX | BMCR_ISOLATE | BMCR_PDOWN | BMCR_SPEED100 | BMCR_LOOPBACK | BMCR_RESET); if (tp->full_duplex) new_bmcr |= BMCR_FULLDPLX; if (tulip_media_cap[tp->default_port] & MediaIs100) new_bmcr |= BMCR_SPEED100; if (new_bmcr != mii_reg0) { /* some phys need the ANE switch to * happen before forced media settings * will "take." However, we write the * same value twice in order not to * confuse the sane phys. */ if (ane_switch) { tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr); udelay (10); } tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr); } } tp->mii_cnt = phy_idx; if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) { pr_info("tulip%d: ***WARNING***: No MII transceiver found!\n", board_idx); tp->phys[0] = 1; } }
gpl-2.0
zlaja/android_kernel_lge_msm8610
drivers/staging/media/go7007/s2250-board.c
4988
16241
/* * Copyright (C) 2008 Sensoray Company Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/usb.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/v4l2-common.h> #include <media/v4l2-subdev.h> #include "go7007-priv.h" MODULE_DESCRIPTION("Sensoray 2250/2251 i2c v4l2 subdev driver"); MODULE_LICENSE("GPL v2"); #define TLV320_ADDRESS 0x34 #define VPX322_ADDR_ANALOGCONTROL1 0x02 #define VPX322_ADDR_BRIGHTNESS0 0x0127 #define VPX322_ADDR_BRIGHTNESS1 0x0131 #define VPX322_ADDR_CONTRAST0 0x0128 #define VPX322_ADDR_CONTRAST1 0x0132 #define VPX322_ADDR_HUE 0x00dc #define VPX322_ADDR_SAT 0x0030 struct go7007_usb_board { unsigned int flags; struct go7007_board_info main_info; }; struct go7007_usb { struct go7007_usb_board *board; struct mutex i2c_lock; struct usb_device *usbdev; struct urb *video_urbs[8]; struct urb *audio_urbs[8]; struct urb *intr_urb; }; static unsigned char aud_regs[] = { 0x1e, 0x00, 0x00, 0x17, 0x02, 0x17, 0x04, 0xf9, 0x06, 0xf9, 0x08, 0x02, 0x0a, 0x00, 0x0c, 0x00, 0x0a, 0x00, 0x0c, 0x00, 0x0e, 0x02, 0x10, 0x00, 0x12, 0x01, 0x00, 0x00, }; static unsigned char vid_regs[] = { 0xF2, 0x0f, 0xAA, 0x00, 0xF8, 0xff, 0x00, 0x00, }; static u16 vid_regs_fp[] = { 0x028, 0x067, 0x120, 0x016, 0x121, 0xcF2, 0x122, 0x0F2, 0x123, 0x00c, 0x124, 0x2d0, 0x125, 0x2e0, 0x126, 0x004, 0x128, 0x1E0, 0x12A, 0x016, 0x12B, 0x0F2, 0x12C, 0x0F2, 0x12D, 0x00c, 0x12E, 0x2d0, 0x12F, 0x2e0, 0x130, 0x004, 0x132, 0x1E0, 0x140, 0x060, 0x153, 0x00C, 0x154, 0x200, 0x150, 0x801, 0x000, 0x000 }; /* PAL specific values */ static u16 vid_regs_fp_pal[] = { 0x120, 0x017, 0x121, 0xd22, 0x122, 0x122, 0x12A, 0x017, 0x12B, 0x122, 0x12C, 0x122, 0x140, 0x060, 0x000, 0x000, }; struct s2250 { struct v4l2_subdev sd; v4l2_std_id std; int input; int brightness; int contrast; int saturation; int hue; int reg12b_val; int audio_input; struct i2c_client *audio; }; static inline struct s2250 *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct s2250, sd); } /* from go7007-usb.c which is Copyright (C) 2005-2006 Micronas USA Inc.*/ static int go7007_usb_vendor_request(struct go7007 *go, u16 request, u16 value, u16 index, void *transfer_buffer, int length, int in) { struct go7007_usb *usb = go->hpi_context; int timeout = 5000; if (in) { return usb_control_msg(usb->usbdev, usb_rcvctrlpipe(usb->usbdev, 0), request, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, value, index, transfer_buffer, length, timeout); } else { return usb_control_msg(usb->usbdev, usb_sndctrlpipe(usb->usbdev, 0), request, USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, transfer_buffer, length, timeout); } } /* end from go7007-usb.c which is Copyright (C) 2005-2006 Micronas USA Inc.*/ static int write_reg(struct i2c_client *client, u8 reg, u8 value) { struct go7007 *go = i2c_get_adapdata(client->adapter); struct go7007_usb *usb; int rc; int dev_addr = client->addr << 1; /* firmware wants 8-bit address */ u8 *buf; if (go == NULL) return -ENODEV; if (go->status == STATUS_SHUTDOWN) return -EBUSY; buf = kzalloc(16, GFP_KERNEL); if (buf == NULL) return -ENOMEM; usb = go->hpi_context; if (mutex_lock_interruptible(&usb->i2c_lock) != 0) { printk(KERN_INFO "i2c lock failed\n"); kfree(buf); return -EINTR; } rc = go7007_usb_vendor_request(go, 0x55, dev_addr, (reg<<8 | value), buf, 16, 1); mutex_unlock(&usb->i2c_lock); kfree(buf); return rc; } static int write_reg_fp(struct i2c_client *client, u16 addr, u16 val) { struct go7007 *go = i2c_get_adapdata(client->adapter); struct go7007_usb *usb; int rc; u8 *buf; struct s2250 *dec = i2c_get_clientdata(client); if (go == NULL) return -ENODEV; if (go->status == STATUS_SHUTDOWN) return -EBUSY; buf = kzalloc(16, GFP_KERNEL); if (buf == NULL) return -ENOMEM; memset(buf, 0xcd, 6); usb = go->hpi_context; if (mutex_lock_interruptible(&usb->i2c_lock) != 0) { printk(KERN_INFO "i2c lock failed\n"); kfree(buf); return -EINTR; } rc = go7007_usb_vendor_request(go, 0x57, addr, val, buf, 16, 1); mutex_unlock(&usb->i2c_lock); if (rc < 0) { kfree(buf); return rc; } if (buf[0] == 0) { unsigned int subaddr, val_read; subaddr = (buf[4] << 8) + buf[5]; val_read = (buf[2] << 8) + buf[3]; kfree(buf); if (val_read != val) { printk(KERN_INFO "invalid fp write %x %x\n", val_read, val); return -EFAULT; } if (subaddr != addr) { printk(KERN_INFO "invalid fp write addr %x %x\n", subaddr, addr); return -EFAULT; } } else { kfree(buf); return -EFAULT; } /* save last 12b value */ if (addr == 0x12b) dec->reg12b_val = val; return 0; } static int read_reg_fp(struct i2c_client *client, u16 addr, u16 *val) { struct go7007 *go = i2c_get_adapdata(client->adapter); struct go7007_usb *usb; int rc; u8 *buf; if (go == NULL) return -ENODEV; if (go->status == STATUS_SHUTDOWN) return -EBUSY; buf = kzalloc(16, GFP_KERNEL); if (buf == NULL) return -ENOMEM; memset(buf, 0xcd, 6); usb = go->hpi_context; if (mutex_lock_interruptible(&usb->i2c_lock) != 0) { printk(KERN_INFO "i2c lock failed\n"); kfree(buf); return -EINTR; } rc = go7007_usb_vendor_request(go, 0x58, addr, 0, buf, 16, 1); mutex_unlock(&usb->i2c_lock); if (rc < 0) { kfree(buf); return rc; } *val = (buf[0] << 8) | buf[1]; kfree(buf); return 0; } static int write_regs(struct i2c_client *client, u8 *regs) { int i; for (i = 0; !((regs[i] == 0x00) && (regs[i+1] == 0x00)); i += 2) { if (write_reg(client, regs[i], regs[i+1]) < 0) { printk(KERN_INFO "s2250: failed\n"); return -1; } } return 0; } static int write_regs_fp(struct i2c_client *client, u16 *regs) { int i; for (i = 0; !((regs[i] == 0x00) && (regs[i+1] == 0x00)); i += 2) { if (write_reg_fp(client, regs[i], regs[i+1]) < 0) { printk(KERN_INFO "s2250: failed fp\n"); return -1; } } return 0; } /* ------------------------------------------------------------------------- */ static int s2250_s_video_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct s2250 *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int vidsys; vidsys = (state->std == V4L2_STD_NTSC) ? 0x01 : 0x00; if (input == 0) { /* composite */ write_reg_fp(client, 0x20, 0x020 | vidsys); write_reg_fp(client, 0x21, 0x662); write_reg_fp(client, 0x140, 0x060); } else if (input == 1) { /* S-Video */ write_reg_fp(client, 0x20, 0x040 | vidsys); write_reg_fp(client, 0x21, 0x666); write_reg_fp(client, 0x140, 0x060); } else { return -EINVAL; } state->input = input; return 0; } static int s2250_s_std(struct v4l2_subdev *sd, v4l2_std_id norm) { struct s2250 *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); u16 vidsource; vidsource = (state->input == 1) ? 0x040 : 0x020; switch (norm) { case V4L2_STD_NTSC: write_regs_fp(client, vid_regs_fp); write_reg_fp(client, 0x20, vidsource | 1); break; case V4L2_STD_PAL: write_regs_fp(client, vid_regs_fp); write_regs_fp(client, vid_regs_fp_pal); write_reg_fp(client, 0x20, vidsource); break; default: return -EINVAL; } state->std = norm; return 0; } static int s2250_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *query) { switch (query->id) { case V4L2_CID_BRIGHTNESS: return v4l2_ctrl_query_fill(query, 0, 100, 1, 50); case V4L2_CID_CONTRAST: return v4l2_ctrl_query_fill(query, 0, 100, 1, 50); case V4L2_CID_SATURATION: return v4l2_ctrl_query_fill(query, 0, 100, 1, 50); case V4L2_CID_HUE: return v4l2_ctrl_query_fill(query, -50, 50, 1, 0); default: return -EINVAL; } return 0; } static int s2250_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct s2250 *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int value1; u16 oldvalue; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: if (ctrl->value > 100) state->brightness = 100; else if (ctrl->value < 0) state->brightness = 0; else state->brightness = ctrl->value; value1 = (state->brightness - 50) * 255 / 100; read_reg_fp(client, VPX322_ADDR_BRIGHTNESS0, &oldvalue); write_reg_fp(client, VPX322_ADDR_BRIGHTNESS0, value1 | (oldvalue & ~0xff)); read_reg_fp(client, VPX322_ADDR_BRIGHTNESS1, &oldvalue); write_reg_fp(client, VPX322_ADDR_BRIGHTNESS1, value1 | (oldvalue & ~0xff)); write_reg_fp(client, 0x140, 0x60); break; case V4L2_CID_CONTRAST: if (ctrl->value > 100) state->contrast = 100; else if (ctrl->value < 0) state->contrast = 0; else state->contrast = ctrl->value; value1 = state->contrast * 0x40 / 100; if (value1 > 0x3f) value1 = 0x3f; /* max */ read_reg_fp(client, VPX322_ADDR_CONTRAST0, &oldvalue); write_reg_fp(client, VPX322_ADDR_CONTRAST0, value1 | (oldvalue & ~0x3f)); read_reg_fp(client, VPX322_ADDR_CONTRAST1, &oldvalue); write_reg_fp(client, VPX322_ADDR_CONTRAST1, value1 | (oldvalue & ~0x3f)); write_reg_fp(client, 0x140, 0x60); break; case V4L2_CID_SATURATION: if (ctrl->value > 100) state->saturation = 100; else if (ctrl->value < 0) state->saturation = 0; else state->saturation = ctrl->value; value1 = state->saturation * 4140 / 100; if (value1 > 4094) value1 = 4094; write_reg_fp(client, VPX322_ADDR_SAT, value1); break; case V4L2_CID_HUE: if (ctrl->value > 50) state->hue = 50; else if (ctrl->value < -50) state->hue = -50; else state->hue = ctrl->value; /* clamp the hue range */ value1 = state->hue * 280 / 50; write_reg_fp(client, VPX322_ADDR_HUE, value1); break; default: return -EINVAL; } return 0; } static int s2250_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct s2250 *state = to_state(sd); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: ctrl->value = state->brightness; break; case V4L2_CID_CONTRAST: ctrl->value = state->contrast; break; case V4L2_CID_SATURATION: ctrl->value = state->saturation; break; case V4L2_CID_HUE: ctrl->value = state->hue; break; default: return -EINVAL; } return 0; } static int s2250_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt) { struct s2250 *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); if (fmt->height < 640) { write_reg_fp(client, 0x12b, state->reg12b_val | 0x400); write_reg_fp(client, 0x140, 0x060); } else { write_reg_fp(client, 0x12b, state->reg12b_val & ~0x400); write_reg_fp(client, 0x140, 0x060); } return 0; } static int s2250_s_audio_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct s2250 *state = to_state(sd); switch (input) { case 0: write_reg(state->audio, 0x08, 0x02); /* Line In */ break; case 1: write_reg(state->audio, 0x08, 0x04); /* Mic */ break; case 2: write_reg(state->audio, 0x08, 0x05); /* Mic Boost */ break; default: return -EINVAL; } state->audio_input = input; return 0; } static int s2250_log_status(struct v4l2_subdev *sd) { struct s2250 *state = to_state(sd); v4l2_info(sd, "Standard: %s\n", state->std == V4L2_STD_NTSC ? "NTSC" : state->std == V4L2_STD_PAL ? "PAL" : state->std == V4L2_STD_SECAM ? "SECAM" : "unknown"); v4l2_info(sd, "Input: %s\n", state->input == 0 ? "Composite" : state->input == 1 ? "S-video" : "error"); v4l2_info(sd, "Brightness: %d\n", state->brightness); v4l2_info(sd, "Contrast: %d\n", state->contrast); v4l2_info(sd, "Saturation: %d\n", state->saturation); v4l2_info(sd, "Hue: %d\n", state->hue); return 0; v4l2_info(sd, "Audio input: %s\n", state->audio_input == 0 ? "Line In" : state->audio_input == 1 ? "Mic" : state->audio_input == 2 ? "Mic Boost" : "error"); return 0; } /* --------------------------------------------------------------------------*/ static const struct v4l2_subdev_core_ops s2250_core_ops = { .log_status = s2250_log_status, .g_ctrl = s2250_g_ctrl, .s_ctrl = s2250_s_ctrl, .queryctrl = s2250_queryctrl, .s_std = s2250_s_std, }; static const struct v4l2_subdev_audio_ops s2250_audio_ops = { .s_routing = s2250_s_audio_routing, }; static const struct v4l2_subdev_video_ops s2250_video_ops = { .s_routing = s2250_s_video_routing, .s_mbus_fmt = s2250_s_mbus_fmt, }; static const struct v4l2_subdev_ops s2250_ops = { .core = &s2250_core_ops, .audio = &s2250_audio_ops, .video = &s2250_video_ops, }; /* --------------------------------------------------------------------------*/ static int s2250_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_client *audio; struct i2c_adapter *adapter = client->adapter; struct s2250 *state; struct v4l2_subdev *sd; u8 *data; struct go7007 *go = i2c_get_adapdata(adapter); struct go7007_usb *usb = go->hpi_context; audio = i2c_new_dummy(adapter, TLV320_ADDRESS >> 1); if (audio == NULL) return -ENOMEM; state = kmalloc(sizeof(struct s2250), GFP_KERNEL); if (state == NULL) { i2c_unregister_device(audio); return -ENOMEM; } sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &s2250_ops); v4l2_info(sd, "initializing %s at address 0x%x on %s\n", "Sensoray 2250/2251", client->addr, client->adapter->name); state->std = V4L2_STD_NTSC; state->brightness = 50; state->contrast = 50; state->saturation = 50; state->hue = 0; state->audio = audio; /* initialize the audio */ if (write_regs(audio, aud_regs) < 0) { printk(KERN_ERR "s2250: error initializing audio\n"); i2c_unregister_device(audio); kfree(state); return 0; } if (write_regs(client, vid_regs) < 0) { printk(KERN_ERR "s2250: error initializing decoder\n"); i2c_unregister_device(audio); kfree(state); return 0; } if (write_regs_fp(client, vid_regs_fp) < 0) { printk(KERN_ERR "s2250: error initializing decoder\n"); i2c_unregister_device(audio); kfree(state); return 0; } /* set default channel */ /* composite */ write_reg_fp(client, 0x20, 0x020 | 1); write_reg_fp(client, 0x21, 0x662); write_reg_fp(client, 0x140, 0x060); /* set default audio input */ state->audio_input = 0; write_reg(client, 0x08, 0x02); /* Line In */ if (mutex_lock_interruptible(&usb->i2c_lock) == 0) { data = kzalloc(16, GFP_KERNEL); if (data != NULL) { int rc; rc = go7007_usb_vendor_request(go, 0x41, 0, 0, data, 16, 1); if (rc > 0) { u8 mask; data[0] = 0; mask = 1<<5; data[0] &= ~mask; data[1] |= mask; go7007_usb_vendor_request(go, 0x40, 0, (data[1]<<8) + data[1], data, 16, 0); } kfree(data); } mutex_unlock(&usb->i2c_lock); } v4l2_info(sd, "initialized successfully\n"); return 0; } static int s2250_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(to_state(sd)); return 0; } static const struct i2c_device_id s2250_id[] = { { "s2250", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, s2250_id); static struct i2c_driver s2250_driver = { .driver = { .owner = THIS_MODULE, .name = "s2250", }, .probe = s2250_probe, .remove = s2250_remove, .id_table = s2250_id, }; static __init int init_s2250(void) { return i2c_add_driver(&s2250_driver); } static __exit void exit_s2250(void) { i2c_del_driver(&s2250_driver); } module_init(init_s2250); module_exit(exit_s2250);
gpl-2.0
CallMeAldy/AK-Mako
drivers/isdn/hisax/asuscom.c
4988
11621
/* $Id: asuscom.c,v 1.14.2.4 2004/01/13 23:48:39 keil Exp $ * * low level stuff for ASUSCOM NETWORK INC. ISDNLink cards * * Author Karsten Keil * Copyright by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * Thanks to ASUSCOM NETWORK INC. Taiwan and Dynalink NL for information * */ #include <linux/init.h> #include <linux/isapnp.h> #include "hisax.h" #include "isac.h" #include "ipac.h" #include "hscx.h" #include "isdnl1.h" static const char *Asuscom_revision = "$Revision: 1.14.2.4 $"; #define byteout(addr, val) outb(val, addr) #define bytein(addr) inb(addr) #define ASUS_ISAC 0 #define ASUS_HSCX 1 #define ASUS_ADR 2 #define ASUS_CTRL_U7 3 #define ASUS_CTRL_POTS 5 #define ASUS_IPAC_ALE 0 #define ASUS_IPAC_DATA 1 #define ASUS_ISACHSCX 1 #define ASUS_IPAC 2 /* CARD_ADR (Write) */ #define ASUS_RESET 0x80 /* Bit 7 Reset-Leitung */ static inline u_char readreg(unsigned int ale, unsigned int adr, u_char off) { register u_char ret; byteout(ale, off); ret = bytein(adr); return (ret); } static inline void readfifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size) { byteout(ale, off); insb(adr, data, size); } static inline void writereg(unsigned int ale, unsigned int adr, u_char off, u_char data) { byteout(ale, off); byteout(adr, data); } static inline void writefifo(unsigned int ale, unsigned int adr, u_char off, u_char *data, int size) { byteout(ale, off); outsb(adr, data, size); } /* Interface functions */ static u_char ReadISAC(struct IsdnCardState *cs, u_char offset) { return (readreg(cs->hw.asus.adr, cs->hw.asus.isac, offset)); } static void WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value) { writereg(cs->hw.asus.adr, cs->hw.asus.isac, offset, value); } static void ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size) { readfifo(cs->hw.asus.adr, cs->hw.asus.isac, 0, data, size); } static void WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size) { writefifo(cs->hw.asus.adr, cs->hw.asus.isac, 0, data, size); } static u_char ReadISAC_IPAC(struct IsdnCardState *cs, u_char offset) { return (readreg(cs->hw.asus.adr, cs->hw.asus.isac, offset | 0x80)); } static void WriteISAC_IPAC(struct IsdnCardState *cs, u_char offset, u_char value) { writereg(cs->hw.asus.adr, cs->hw.asus.isac, offset | 0x80, value); } static void ReadISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size) { readfifo(cs->hw.asus.adr, cs->hw.asus.isac, 0x80, data, size); } static void WriteISACfifo_IPAC(struct IsdnCardState *cs, u_char *data, int size) { writefifo(cs->hw.asus.adr, cs->hw.asus.isac, 0x80, data, size); } static u_char ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset) { return (readreg(cs->hw.asus.adr, cs->hw.asus.hscx, offset + (hscx ? 0x40 : 0))); } static void WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value) { writereg(cs->hw.asus.adr, cs->hw.asus.hscx, offset + (hscx ? 0x40 : 0), value); } /* * fast interrupt HSCX stuff goes here */ #define READHSCX(cs, nr, reg) readreg(cs->hw.asus.adr, \ cs->hw.asus.hscx, reg + (nr ? 0x40 : 0)) #define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.asus.adr, \ cs->hw.asus.hscx, reg + (nr ? 0x40 : 0), data) #define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.asus.adr, \ cs->hw.asus.hscx, (nr ? 0x40 : 0), ptr, cnt) #define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.asus.adr, \ cs->hw.asus.hscx, (nr ? 0x40 : 0), ptr, cnt) #include "hscx_irq.c" static irqreturn_t asuscom_interrupt(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char val; u_long flags; spin_lock_irqsave(&cs->lock, flags); val = readreg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_ISTA + 0x40); Start_HSCX: if (val) hscx_int_main(cs, val); val = readreg(cs->hw.asus.adr, cs->hw.asus.isac, ISAC_ISTA); Start_ISAC: if (val) isac_interrupt(cs, val); val = readreg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_ISTA + 0x40); if (val) { if (cs->debug & L1_DEB_HSCX) debugl1(cs, "HSCX IntStat after IntRoutine"); goto Start_HSCX; } val = readreg(cs->hw.asus.adr, cs->hw.asus.isac, ISAC_ISTA); if (val) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "ISAC IntStat after IntRoutine"); goto Start_ISAC; } writereg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_MASK, 0xFF); writereg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_MASK + 0x40, 0xFF); writereg(cs->hw.asus.adr, cs->hw.asus.isac, ISAC_MASK, 0xFF); writereg(cs->hw.asus.adr, cs->hw.asus.isac, ISAC_MASK, 0x0); writereg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_MASK, 0x0); writereg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_MASK + 0x40, 0x0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static irqreturn_t asuscom_interrupt_ipac(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char ista, val, icnt = 5; u_long flags; spin_lock_irqsave(&cs->lock, flags); ista = readreg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_ISTA); Start_IPAC: if (cs->debug & L1_DEB_IPAC) debugl1(cs, "IPAC ISTA %02X", ista); if (ista & 0x0f) { val = readreg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_ISTA + 0x40); if (ista & 0x01) val |= 0x01; if (ista & 0x04) val |= 0x02; if (ista & 0x08) val |= 0x04; if (val) hscx_int_main(cs, val); } if (ista & 0x20) { val = 0xfe & readreg(cs->hw.asus.adr, cs->hw.asus.isac, ISAC_ISTA | 0x80); if (val) { isac_interrupt(cs, val); } } if (ista & 0x10) { val = 0x01; isac_interrupt(cs, val); } ista = readreg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_ISTA); if ((ista & 0x3f) && icnt) { icnt--; goto Start_IPAC; } if (!icnt) printk(KERN_WARNING "ASUS IRQ LOOP\n"); writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_MASK, 0xFF); writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_MASK, 0xC0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static void release_io_asuscom(struct IsdnCardState *cs) { int bytecnt = 8; if (cs->hw.asus.cfg_reg) release_region(cs->hw.asus.cfg_reg, bytecnt); } static void reset_asuscom(struct IsdnCardState *cs) { if (cs->subtyp == ASUS_IPAC) writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_POTA2, 0x20); else byteout(cs->hw.asus.adr, ASUS_RESET); /* Reset On */ mdelay(10); if (cs->subtyp == ASUS_IPAC) writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_POTA2, 0x0); else byteout(cs->hw.asus.adr, 0); /* Reset Off */ mdelay(10); if (cs->subtyp == ASUS_IPAC) { writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_CONF, 0x0); writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_ACFG, 0xff); writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_AOE, 0x0); writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_MASK, 0xc0); writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_PCFG, 0x12); } } static int Asus_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_long flags; switch (mt) { case CARD_RESET: spin_lock_irqsave(&cs->lock, flags); reset_asuscom(cs); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_RELEASE: release_io_asuscom(cs); return (0); case CARD_INIT: spin_lock_irqsave(&cs->lock, flags); cs->debug |= L1_DEB_IPAC; inithscxisac(cs, 3); spin_unlock_irqrestore(&cs->lock, flags); return (0); case CARD_TEST: return (0); } return (0); } #ifdef __ISAPNP__ static struct isapnp_device_id asus_ids[] __devinitdata = { { ISAPNP_VENDOR('A', 'S', 'U'), ISAPNP_FUNCTION(0x1688), ISAPNP_VENDOR('A', 'S', 'U'), ISAPNP_FUNCTION(0x1688), (unsigned long) "Asus1688 PnP" }, { ISAPNP_VENDOR('A', 'S', 'U'), ISAPNP_FUNCTION(0x1690), ISAPNP_VENDOR('A', 'S', 'U'), ISAPNP_FUNCTION(0x1690), (unsigned long) "Asus1690 PnP" }, { ISAPNP_VENDOR('S', 'I', 'E'), ISAPNP_FUNCTION(0x0020), ISAPNP_VENDOR('S', 'I', 'E'), ISAPNP_FUNCTION(0x0020), (unsigned long) "Isurf2 PnP" }, { ISAPNP_VENDOR('E', 'L', 'F'), ISAPNP_FUNCTION(0x0000), ISAPNP_VENDOR('E', 'L', 'F'), ISAPNP_FUNCTION(0x0000), (unsigned long) "Iscas TE320" }, { 0, } }; static struct isapnp_device_id *ipid __devinitdata = &asus_ids[0]; static struct pnp_card *pnp_c __devinitdata = NULL; #endif int __devinit setup_asuscom(struct IsdnCard *card) { int bytecnt; struct IsdnCardState *cs = card->cs; u_char val; char tmp[64]; strcpy(tmp, Asuscom_revision); printk(KERN_INFO "HiSax: Asuscom ISDNLink driver Rev. %s\n", HiSax_getrev(tmp)); if (cs->typ != ISDN_CTYPE_ASUSCOM) return (0); #ifdef __ISAPNP__ if (!card->para[1] && isapnp_present()) { struct pnp_dev *pnp_d; while (ipid->card_vendor) { if ((pnp_c = pnp_find_card(ipid->card_vendor, ipid->card_device, pnp_c))) { pnp_d = NULL; if ((pnp_d = pnp_find_dev(pnp_c, ipid->vendor, ipid->function, pnp_d))) { int err; printk(KERN_INFO "HiSax: %s detected\n", (char *)ipid->driver_data); pnp_disable_dev(pnp_d); err = pnp_activate_dev(pnp_d); if (err < 0) { printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n", __func__, err); return (0); } card->para[1] = pnp_port_start(pnp_d, 0); card->para[0] = pnp_irq(pnp_d, 0); if (!card->para[0] || !card->para[1]) { printk(KERN_ERR "AsusPnP:some resources are missing %ld/%lx\n", card->para[0], card->para[1]); pnp_disable_dev(pnp_d); return (0); } break; } else { printk(KERN_ERR "AsusPnP: PnP error card found, no device\n"); } } ipid++; pnp_c = NULL; } if (!ipid->card_vendor) { printk(KERN_INFO "AsusPnP: no ISAPnP card found\n"); return (0); } } #endif bytecnt = 8; cs->hw.asus.cfg_reg = card->para[1]; cs->irq = card->para[0]; if (!request_region(cs->hw.asus.cfg_reg, bytecnt, "asuscom isdn")) { printk(KERN_WARNING "HiSax: ISDNLink config port %x-%x already in use\n", cs->hw.asus.cfg_reg, cs->hw.asus.cfg_reg + bytecnt); return (0); } printk(KERN_INFO "ISDNLink: defined at 0x%x IRQ %d\n", cs->hw.asus.cfg_reg, cs->irq); setup_isac(cs); cs->BC_Read_Reg = &ReadHSCX; cs->BC_Write_Reg = &WriteHSCX; cs->BC_Send_Data = &hscx_fill_fifo; cs->cardmsg = &Asus_card_msg; val = readreg(cs->hw.asus.cfg_reg + ASUS_IPAC_ALE, cs->hw.asus.cfg_reg + ASUS_IPAC_DATA, IPAC_ID); if ((val == 1) || (val == 2)) { cs->subtyp = ASUS_IPAC; cs->hw.asus.adr = cs->hw.asus.cfg_reg + ASUS_IPAC_ALE; cs->hw.asus.isac = cs->hw.asus.cfg_reg + ASUS_IPAC_DATA; cs->hw.asus.hscx = cs->hw.asus.cfg_reg + ASUS_IPAC_DATA; test_and_set_bit(HW_IPAC, &cs->HW_Flags); cs->readisac = &ReadISAC_IPAC; cs->writeisac = &WriteISAC_IPAC; cs->readisacfifo = &ReadISACfifo_IPAC; cs->writeisacfifo = &WriteISACfifo_IPAC; cs->irq_func = &asuscom_interrupt_ipac; printk(KERN_INFO "Asus: IPAC version %x\n", val); } else { cs->subtyp = ASUS_ISACHSCX; cs->hw.asus.adr = cs->hw.asus.cfg_reg + ASUS_ADR; cs->hw.asus.isac = cs->hw.asus.cfg_reg + ASUS_ISAC; cs->hw.asus.hscx = cs->hw.asus.cfg_reg + ASUS_HSCX; cs->hw.asus.u7 = cs->hw.asus.cfg_reg + ASUS_CTRL_U7; cs->hw.asus.pots = cs->hw.asus.cfg_reg + ASUS_CTRL_POTS; cs->readisac = &ReadISAC; cs->writeisac = &WriteISAC; cs->readisacfifo = &ReadISACfifo; cs->writeisacfifo = &WriteISACfifo; cs->irq_func = &asuscom_interrupt; ISACVersion(cs, "ISDNLink:"); if (HscxVersion(cs, "ISDNLink:")) { printk(KERN_WARNING "ISDNLink: wrong HSCX versions check IO address\n"); release_io_asuscom(cs); return (0); } } return (1); }
gpl-2.0
mythos234/AndromedaBacon-CM12.1
arch/x86/crypto/sha1_ssse3_glue.c
4988
5932
/* * Cryptographic API. * * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using * Supplemental SSE3 instructions. * * This file is based on sha1_generic.c * * Copyright (c) Alan Smithee. * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> * Copyright (c) Mathias Krause <minipli@googlemail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/cryptohash.h> #include <linux/types.h> #include <crypto/sha.h> #include <asm/byteorder.h> #include <asm/i387.h> #include <asm/xcr.h> #include <asm/xsave.h> asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data, unsigned int rounds); #ifdef SHA1_ENABLE_AVX_SUPPORT asmlinkage void sha1_transform_avx(u32 *digest, const char *data, unsigned int rounds); #endif static asmlinkage void (*sha1_transform_asm)(u32 *, const char *, unsigned int); static int sha1_ssse3_init(struct shash_desc *desc) { struct sha1_state *sctx = shash_desc_ctx(desc); *sctx = (struct sha1_state){ .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, }; return 0; } static int __sha1_ssse3_update(struct shash_desc *desc, const u8 *data, unsigned int len, unsigned int partial) { struct sha1_state *sctx = shash_desc_ctx(desc); unsigned int done = 0; sctx->count += len; if (partial) { done = SHA1_BLOCK_SIZE - partial; memcpy(sctx->buffer + partial, data, done); sha1_transform_asm(sctx->state, sctx->buffer, 1); } if (len - done >= SHA1_BLOCK_SIZE) { const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE; sha1_transform_asm(sctx->state, data + done, rounds); done += rounds * SHA1_BLOCK_SIZE; } memcpy(sctx->buffer, data + done, len - done); return 0; } static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha1_state *sctx = shash_desc_ctx(desc); unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; int res; /* Handle the fast case right here */ if (partial + len < SHA1_BLOCK_SIZE) { sctx->count += len; memcpy(sctx->buffer + partial, data, len); return 0; } if (!irq_fpu_usable()) { res = crypto_sha1_update(desc, data, len); } else { kernel_fpu_begin(); res = __sha1_ssse3_update(desc, data, len, partial); kernel_fpu_end(); } return res; } /* Add padding and return the message digest. */ static int sha1_ssse3_final(struct shash_desc *desc, u8 *out) { struct sha1_state *sctx = shash_desc_ctx(desc); unsigned int i, index, padlen; __be32 *dst = (__be32 *)out; __be64 bits; static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, }; bits = cpu_to_be64(sctx->count << 3); /* Pad out to 56 mod 64 and append length */ index = sctx->count % SHA1_BLOCK_SIZE; padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index); if (!irq_fpu_usable()) { crypto_sha1_update(desc, padding, padlen); crypto_sha1_update(desc, (const u8 *)&bits, sizeof(bits)); } else { kernel_fpu_begin(); /* We need to fill a whole block for __sha1_ssse3_update() */ if (padlen <= 56) { sctx->count += padlen; memcpy(sctx->buffer + index, padding, padlen); } else { __sha1_ssse3_update(desc, padding, padlen, index); } __sha1_ssse3_update(desc, (const u8 *)&bits, sizeof(bits), 56); kernel_fpu_end(); } /* Store state in digest */ for (i = 0; i < 5; i++) dst[i] = cpu_to_be32(sctx->state[i]); /* Wipe context */ memset(sctx, 0, sizeof(*sctx)); return 0; } static int sha1_ssse3_export(struct shash_desc *desc, void *out) { struct sha1_state *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int sha1_ssse3_import(struct shash_desc *desc, const void *in) { struct sha1_state *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_ssse3_init, .update = sha1_ssse3_update, .final = sha1_ssse3_final, .export = sha1_ssse3_export, .import = sha1_ssse3_import, .descsize = sizeof(struct sha1_state), .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name= "sha1-ssse3", .cra_priority = 150, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; #ifdef SHA1_ENABLE_AVX_SUPPORT static bool __init avx_usable(void) { u64 xcr0; if (!cpu_has_avx || !cpu_has_osxsave) return false; xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) { pr_info("AVX detected but unusable.\n"); return false; } return true; } #endif static int __init sha1_ssse3_mod_init(void) { /* test for SSSE3 first */ if (cpu_has_ssse3) sha1_transform_asm = sha1_transform_ssse3; #ifdef SHA1_ENABLE_AVX_SUPPORT /* allow AVX to override SSSE3, it's a little faster */ if (avx_usable()) sha1_transform_asm = sha1_transform_avx; #endif if (sha1_transform_asm) { pr_info("Using %s optimized SHA-1 implementation\n", sha1_transform_asm == sha1_transform_ssse3 ? "SSSE3" : "AVX"); return crypto_register_shash(&alg); } pr_info("Neither AVX nor SSSE3 is available/usable.\n"); return -ENODEV; } static void __exit sha1_ssse3_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(sha1_ssse3_mod_init); module_exit(sha1_ssse3_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated"); MODULE_ALIAS("sha1");
gpl-2.0
kozmikkick/tripndroid-endeavoru-3.5.7
drivers/isdn/hisax/w6692.c
4988
29406
/* $Id: w6692.c,v 1.18.2.4 2004/02/11 13:21:34 keil Exp $ * * Winbond W6692 specific routines * * Author Petr Novak * Copyright by Petr Novak <petr.novak@i.cz> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include "hisax.h" #include "w6692.h" #include "isdnl1.h" #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> /* table entry in the PCI devices list */ typedef struct { int vendor_id; int device_id; char *vendor_name; char *card_name; } PCI_ENTRY; static const PCI_ENTRY id_list[] = { {PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_6692, "Winbond", "W6692"}, {PCI_VENDOR_ID_DYNALINK, PCI_DEVICE_ID_DYNALINK_IS64PH, "Dynalink/AsusCom", "IS64PH"}, {0, 0, "U.S.Robotics", "ISDN PCI Card TA"} }; #define W6692_SV_USR 0x16ec #define W6692_SD_USR 0x3409 #define W6692_WINBOND 0 #define W6692_DYNALINK 1 #define W6692_USR 2 static const char *w6692_revision = "$Revision: 1.18.2.4 $"; #define DBUSY_TIMER_VALUE 80 static char *W6692Ver[] = {"W6692 V00", "W6692 V01", "W6692 V10", "W6692 V11"}; static void W6692Version(struct IsdnCardState *cs, char *s) { int val; val = cs->readW6692(cs, W_D_RBCH); printk(KERN_INFO "%s Winbond W6692 version (%x): %s\n", s, val, W6692Ver[(val >> 6) & 3]); } static void ph_command(struct IsdnCardState *cs, unsigned int command) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "ph_command %x", command); cs->writeisac(cs, W_CIX, command); } static void W6692_new_ph(struct IsdnCardState *cs) { switch (cs->dc.w6692.ph_state) { case (W_L1CMD_RST): ph_command(cs, W_L1CMD_DRC); l1_msg(cs, HW_RESET | INDICATION, NULL); /* fallthru */ case (W_L1IND_CD): l1_msg(cs, HW_DEACTIVATE | CONFIRM, NULL); break; case (W_L1IND_DRD): l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL); break; case (W_L1IND_CE): l1_msg(cs, HW_POWERUP | CONFIRM, NULL); break; case (W_L1IND_LD): l1_msg(cs, HW_RSYNC | INDICATION, NULL); break; case (W_L1IND_ARD): l1_msg(cs, HW_INFO2 | INDICATION, NULL); break; case (W_L1IND_AI8): l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL); break; case (W_L1IND_AI10): l1_msg(cs, HW_INFO4_P10 | INDICATION, NULL); break; default: break; } } static void W6692_bh(struct work_struct *work) { struct IsdnCardState *cs = container_of(work, struct IsdnCardState, tqueue); struct PStack *stptr; if (test_and_clear_bit(D_CLEARBUSY, &cs->event)) { if (cs->debug) debugl1(cs, "D-Channel Busy cleared"); stptr = cs->stlist; while (stptr != NULL) { stptr->l1.l1l2(stptr, PH_PAUSE | CONFIRM, NULL); stptr = stptr->next; } } if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) W6692_new_ph(cs); if (test_and_clear_bit(D_RCVBUFREADY, &cs->event)) DChannel_proc_rcv(cs); if (test_and_clear_bit(D_XMTBUFREADY, &cs->event)) DChannel_proc_xmt(cs); /* if (test_and_clear_bit(D_RX_MON1, &cs->event)) arcofi_fsm(cs, ARCOFI_RX_END, NULL); if (test_and_clear_bit(D_TX_MON1, &cs->event)) arcofi_fsm(cs, ARCOFI_TX_END, NULL); */ } static void W6692_empty_fifo(struct IsdnCardState *cs, int count) { u_char *ptr; if ((cs->debug & L1_DEB_ISAC) && !(cs->debug & L1_DEB_ISAC_FIFO)) debugl1(cs, "W6692_empty_fifo"); if ((cs->rcvidx + count) >= MAX_DFRAME_LEN_L1) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692_empty_fifo overrun %d", cs->rcvidx + count); cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RACK); cs->rcvidx = 0; return; } ptr = cs->rcvbuf + cs->rcvidx; cs->rcvidx += count; cs->readW6692fifo(cs, ptr, count); cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RACK); if (cs->debug & L1_DEB_ISAC_FIFO) { char *t = cs->dlog; t += sprintf(t, "W6692_empty_fifo cnt %d", count); QuickHex(t, ptr, count); debugl1(cs, cs->dlog); } } static void W6692_fill_fifo(struct IsdnCardState *cs) { int count, more; u_char *ptr; if ((cs->debug & L1_DEB_ISAC) && !(cs->debug & L1_DEB_ISAC_FIFO)) debugl1(cs, "W6692_fill_fifo"); if (!cs->tx_skb) return; count = cs->tx_skb->len; if (count <= 0) return; more = 0; if (count > W_D_FIFO_THRESH) { more = !0; count = W_D_FIFO_THRESH; } ptr = cs->tx_skb->data; skb_pull(cs->tx_skb, count); cs->tx_cnt += count; cs->writeW6692fifo(cs, ptr, count); cs->writeW6692(cs, W_D_CMDR, more ? W_D_CMDR_XMS : (W_D_CMDR_XMS | W_D_CMDR_XME)); if (test_and_set_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) { debugl1(cs, "W6692_fill_fifo dbusytimer running"); del_timer(&cs->dbusytimer); } init_timer(&cs->dbusytimer); cs->dbusytimer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ) / 1000); add_timer(&cs->dbusytimer); if (cs->debug & L1_DEB_ISAC_FIFO) { char *t = cs->dlog; t += sprintf(t, "W6692_fill_fifo cnt %d", count); QuickHex(t, ptr, count); debugl1(cs, cs->dlog); } } static void W6692B_empty_fifo(struct BCState *bcs, int count) { u_char *ptr; struct IsdnCardState *cs = bcs->cs; if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO)) debugl1(cs, "W6692B_empty_fifo"); if (bcs->hw.w6692.rcvidx + count > HSCX_BUFMAX) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692B_empty_fifo: incoming packet too large"); cs->BC_Write_Reg(cs, bcs->channel, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT); bcs->hw.w6692.rcvidx = 0; return; } ptr = bcs->hw.w6692.rcvbuf + bcs->hw.w6692.rcvidx; bcs->hw.w6692.rcvidx += count; READW6692BFIFO(cs, bcs->channel, ptr, count); cs->BC_Write_Reg(cs, bcs->channel, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT); if (cs->debug & L1_DEB_HSCX_FIFO) { char *t = bcs->blog; t += sprintf(t, "W6692B_empty_fifo %c cnt %d", bcs->channel + '1', count); QuickHex(t, ptr, count); debugl1(cs, bcs->blog); } } static void W6692B_fill_fifo(struct BCState *bcs) { struct IsdnCardState *cs = bcs->cs; int more, count; u_char *ptr; if (!bcs->tx_skb) return; if (bcs->tx_skb->len <= 0) return; more = (bcs->mode == L1_MODE_TRANS) ? 1 : 0; if (bcs->tx_skb->len > W_B_FIFO_THRESH) { more = 1; count = W_B_FIFO_THRESH; } else count = bcs->tx_skb->len; if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO)) debugl1(cs, "W6692B_fill_fifo%s%d", (more ? " " : " last "), count); ptr = bcs->tx_skb->data; skb_pull(bcs->tx_skb, count); bcs->tx_cnt -= count; bcs->hw.w6692.count += count; WRITEW6692BFIFO(cs, bcs->channel, ptr, count); cs->BC_Write_Reg(cs, bcs->channel, W_B_CMDR, W_B_CMDR_RACT | W_B_CMDR_XMS | (more ? 0 : W_B_CMDR_XME)); if (cs->debug & L1_DEB_HSCX_FIFO) { char *t = bcs->blog; t += sprintf(t, "W6692B_fill_fifo %c cnt %d", bcs->channel + '1', count); QuickHex(t, ptr, count); debugl1(cs, bcs->blog); } } static void W6692B_interrupt(struct IsdnCardState *cs, u_char bchan) { u_char val; u_char r; struct BCState *bcs; struct sk_buff *skb; int count; bcs = (cs->bcs->channel == bchan) ? cs->bcs : (cs->bcs + 1); val = cs->BC_Read_Reg(cs, bchan, W_B_EXIR); debugl1(cs, "W6692B chan %d B_EXIR 0x%02X", bchan, val); if (!test_bit(BC_FLG_INIT, &bcs->Flag)) { debugl1(cs, "W6692B not INIT yet"); return; } if (val & W_B_EXI_RME) { /* RME */ r = cs->BC_Read_Reg(cs, bchan, W_B_STAR); if (r & (W_B_STAR_RDOV | W_B_STAR_CRCE | W_B_STAR_RMB)) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 B STAR %x", r); if ((r & W_B_STAR_RDOV) && bcs->mode) if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 B RDOV mode=%d", bcs->mode); if (r & W_B_STAR_CRCE) if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 B CRC error"); cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RRST | W_B_CMDR_RACT); } else { count = cs->BC_Read_Reg(cs, bchan, W_B_RBCL) & (W_B_FIFO_THRESH - 1); if (count == 0) count = W_B_FIFO_THRESH; W6692B_empty_fifo(bcs, count); if ((count = bcs->hw.w6692.rcvidx) > 0) { if (cs->debug & L1_DEB_HSCX_FIFO) debugl1(cs, "W6692 Bchan Frame %d", count); if (!(skb = dev_alloc_skb(count))) printk(KERN_WARNING "W6692: Bchan receive out of memory\n"); else { memcpy(skb_put(skb, count), bcs->hw.w6692.rcvbuf, count); skb_queue_tail(&bcs->rqueue, skb); } } } bcs->hw.w6692.rcvidx = 0; schedule_event(bcs, B_RCVBUFREADY); } if (val & W_B_EXI_RMR) { /* RMR */ W6692B_empty_fifo(bcs, W_B_FIFO_THRESH); r = cs->BC_Read_Reg(cs, bchan, W_B_STAR); if (r & W_B_STAR_RDOV) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 B RDOV(RMR) mode=%d", bcs->mode); cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RRST | W_B_CMDR_RACT); if (bcs->mode != L1_MODE_TRANS) bcs->hw.w6692.rcvidx = 0; } if (bcs->mode == L1_MODE_TRANS) { /* receive audio data */ if (!(skb = dev_alloc_skb(W_B_FIFO_THRESH))) printk(KERN_WARNING "HiSax: receive out of memory\n"); else { memcpy(skb_put(skb, W_B_FIFO_THRESH), bcs->hw.w6692.rcvbuf, W_B_FIFO_THRESH); skb_queue_tail(&bcs->rqueue, skb); } bcs->hw.w6692.rcvidx = 0; schedule_event(bcs, B_RCVBUFREADY); } } if (val & W_B_EXI_XDUN) { /* XDUN */ cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_XRST | W_B_CMDR_RACT); if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 B EXIR %x Lost TX", val); if (bcs->mode == 1) W6692B_fill_fifo(bcs); else { /* Here we lost an TX interrupt, so * restart transmitting the whole frame. */ if (bcs->tx_skb) { skb_push(bcs->tx_skb, bcs->hw.w6692.count); bcs->tx_cnt += bcs->hw.w6692.count; bcs->hw.w6692.count = 0; } } return; } if (val & W_B_EXI_XFR) { /* XFR */ r = cs->BC_Read_Reg(cs, bchan, W_B_STAR); if (r & W_B_STAR_XDOW) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 B STAR %x XDOW", r); cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_XRST | W_B_CMDR_RACT); if (bcs->tx_skb && (bcs->mode != 1)) { skb_push(bcs->tx_skb, bcs->hw.w6692.count); bcs->tx_cnt += bcs->hw.w6692.count; bcs->hw.w6692.count = 0; } } if (bcs->tx_skb) { if (bcs->tx_skb->len) { W6692B_fill_fifo(bcs); return; } else { if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) && (PACKET_NOACK != bcs->tx_skb->pkt_type)) { u_long flags; spin_lock_irqsave(&bcs->aclock, flags); bcs->ackcnt += bcs->hw.w6692.count; spin_unlock_irqrestore(&bcs->aclock, flags); schedule_event(bcs, B_ACKPENDING); } dev_kfree_skb_irq(bcs->tx_skb); bcs->hw.w6692.count = 0; bcs->tx_skb = NULL; } } if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) { bcs->hw.w6692.count = 0; test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); W6692B_fill_fifo(bcs); } else { test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); schedule_event(bcs, B_XMTBUFREADY); } } } static irqreturn_t W6692_interrupt(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char val, exval, v1; struct sk_buff *skb; u_int count; u_long flags; int icnt = 5; spin_lock_irqsave(&cs->lock, flags); val = cs->readW6692(cs, W_ISTA); if (!val) { spin_unlock_irqrestore(&cs->lock, flags); return IRQ_NONE; } StartW6692: if (cs->debug & L1_DEB_ISAC) debugl1(cs, "W6692 ISTA %x", val); if (val & W_INT_D_RME) { /* RME */ exval = cs->readW6692(cs, W_D_RSTA); if (exval & (W_D_RSTA_RDOV | W_D_RSTA_CRCE | W_D_RSTA_RMB)) { if (exval & W_D_RSTA_RDOV) if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 RDOV"); if (exval & W_D_RSTA_CRCE) if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 D-channel CRC error"); if (exval & W_D_RSTA_RMB) if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 D-channel ABORT"); cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RACK | W_D_CMDR_RRST); } else { count = cs->readW6692(cs, W_D_RBCL) & (W_D_FIFO_THRESH - 1); if (count == 0) count = W_D_FIFO_THRESH; W6692_empty_fifo(cs, count); if ((count = cs->rcvidx) > 0) { cs->rcvidx = 0; if (!(skb = alloc_skb(count, GFP_ATOMIC))) printk(KERN_WARNING "HiSax: D receive out of memory\n"); else { memcpy(skb_put(skb, count), cs->rcvbuf, count); skb_queue_tail(&cs->rq, skb); } } } cs->rcvidx = 0; schedule_event(cs, D_RCVBUFREADY); } if (val & W_INT_D_RMR) { /* RMR */ W6692_empty_fifo(cs, W_D_FIFO_THRESH); } if (val & W_INT_D_XFR) { /* XFR */ if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) del_timer(&cs->dbusytimer); if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags)) schedule_event(cs, D_CLEARBUSY); if (cs->tx_skb) { if (cs->tx_skb->len) { W6692_fill_fifo(cs); goto afterXFR; } else { dev_kfree_skb_irq(cs->tx_skb); cs->tx_cnt = 0; cs->tx_skb = NULL; } } if ((cs->tx_skb = skb_dequeue(&cs->sq))) { cs->tx_cnt = 0; W6692_fill_fifo(cs); } else schedule_event(cs, D_XMTBUFREADY); } afterXFR: if (val & (W_INT_XINT0 | W_INT_XINT1)) { /* XINT0/1 - never */ if (cs->debug & L1_DEB_ISAC) debugl1(cs, "W6692 spurious XINT!"); } if (val & W_INT_D_EXI) { /* EXI */ exval = cs->readW6692(cs, W_D_EXIR); if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692 D_EXIR %02x", exval); if (exval & (W_D_EXI_XDUN | W_D_EXI_XCOL)) { /* Transmit underrun/collision */ debugl1(cs, "W6692 D-chan underrun/collision"); printk(KERN_WARNING "HiSax: W6692 XDUN/XCOL\n"); if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) del_timer(&cs->dbusytimer); if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags)) schedule_event(cs, D_CLEARBUSY); if (cs->tx_skb) { /* Restart frame */ skb_push(cs->tx_skb, cs->tx_cnt); cs->tx_cnt = 0; W6692_fill_fifo(cs); } else { printk(KERN_WARNING "HiSax: W6692 XDUN/XCOL no skb\n"); debugl1(cs, "W6692 XDUN/XCOL no skb"); cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_XRST); } } if (exval & W_D_EXI_RDOV) { /* RDOV */ debugl1(cs, "W6692 D-channel RDOV"); printk(KERN_WARNING "HiSax: W6692 D-RDOV\n"); cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RRST); } if (exval & W_D_EXI_TIN2) { /* TIN2 - never */ debugl1(cs, "W6692 spurious TIN2 interrupt"); } if (exval & W_D_EXI_MOC) { /* MOC - not supported */ debugl1(cs, "W6692 spurious MOC interrupt"); v1 = cs->readW6692(cs, W_MOSR); debugl1(cs, "W6692 MOSR %02x", v1); } if (exval & W_D_EXI_ISC) { /* ISC - Level1 change */ v1 = cs->readW6692(cs, W_CIR); if (cs->debug & L1_DEB_ISAC) debugl1(cs, "W6692 ISC CIR=0x%02X", v1); if (v1 & W_CIR_ICC) { cs->dc.w6692.ph_state = v1 & W_CIR_COD_MASK; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "ph_state_change %x", cs->dc.w6692.ph_state); schedule_event(cs, D_L1STATECHANGE); } if (v1 & W_CIR_SCC) { v1 = cs->readW6692(cs, W_SQR); debugl1(cs, "W6692 SCC SQR=0x%02X", v1); } } if (exval & W_D_EXI_WEXP) { debugl1(cs, "W6692 spurious WEXP interrupt!"); } if (exval & W_D_EXI_TEXP) { debugl1(cs, "W6692 spurious TEXP interrupt!"); } } if (val & W_INT_B1_EXI) { debugl1(cs, "W6692 B channel 1 interrupt"); W6692B_interrupt(cs, 0); } if (val & W_INT_B2_EXI) { debugl1(cs, "W6692 B channel 2 interrupt"); W6692B_interrupt(cs, 1); } val = cs->readW6692(cs, W_ISTA); if (val && icnt) { icnt--; goto StartW6692; } if (!icnt) { printk(KERN_WARNING "W6692 IRQ LOOP\n"); cs->writeW6692(cs, W_IMASK, 0xff); } spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static void W6692_l1hw(struct PStack *st, int pr, void *arg) { struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware; struct sk_buff *skb = arg; u_long flags; int val; switch (pr) { case (PH_DATA | REQUEST): if (cs->debug & DEB_DLOG_HEX) LogFrame(cs, skb->data, skb->len); if (cs->debug & DEB_DLOG_VERBOSE) dlogframe(cs, skb, 0); spin_lock_irqsave(&cs->lock, flags); if (cs->tx_skb) { skb_queue_tail(&cs->sq, skb); #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA Queued", 0); #endif } else { cs->tx_skb = skb; cs->tx_cnt = 0; #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA", 0); #endif W6692_fill_fifo(cs); } spin_unlock_irqrestore(&cs->lock, flags); break; case (PH_PULL | INDICATION): spin_lock_irqsave(&cs->lock, flags); if (cs->tx_skb) { if (cs->debug & L1_DEB_WARN) debugl1(cs, " l2l1 tx_skb exist this shouldn't happen"); skb_queue_tail(&cs->sq, skb); spin_unlock_irqrestore(&cs->lock, flags); break; } if (cs->debug & DEB_DLOG_HEX) LogFrame(cs, skb->data, skb->len); if (cs->debug & DEB_DLOG_VERBOSE) dlogframe(cs, skb, 0); cs->tx_skb = skb; cs->tx_cnt = 0; #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA_PULLED", 0); #endif W6692_fill_fifo(cs); spin_unlock_irqrestore(&cs->lock, flags); break; case (PH_PULL | REQUEST): #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) debugl1(cs, "-> PH_REQUEST_PULL"); #endif if (!cs->tx_skb) { test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags); st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); } else test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); break; case (HW_RESET | REQUEST): spin_lock_irqsave(&cs->lock, flags); if ((cs->dc.w6692.ph_state == W_L1IND_DRD)) { ph_command(cs, W_L1CMD_ECK); spin_unlock_irqrestore(&cs->lock, flags); } else { ph_command(cs, W_L1CMD_RST); cs->dc.w6692.ph_state = W_L1CMD_RST; spin_unlock_irqrestore(&cs->lock, flags); W6692_new_ph(cs); } break; case (HW_ENABLE | REQUEST): spin_lock_irqsave(&cs->lock, flags); ph_command(cs, W_L1CMD_ECK); spin_unlock_irqrestore(&cs->lock, flags); break; case (HW_INFO3 | REQUEST): spin_lock_irqsave(&cs->lock, flags); ph_command(cs, W_L1CMD_AR8); spin_unlock_irqrestore(&cs->lock, flags); break; case (HW_TESTLOOP | REQUEST): val = 0; if (1 & (long) arg) val |= 0x0c; if (2 & (long) arg) val |= 0x3; /* !!! not implemented yet */ break; case (HW_DEACTIVATE | RESPONSE): skb_queue_purge(&cs->rq); skb_queue_purge(&cs->sq); if (cs->tx_skb) { dev_kfree_skb_any(cs->tx_skb); cs->tx_skb = NULL; } if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) del_timer(&cs->dbusytimer); if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags)) schedule_event(cs, D_CLEARBUSY); break; default: if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692_l1hw unknown %04x", pr); break; } } static void setstack_W6692(struct PStack *st, struct IsdnCardState *cs) { st->l1.l1hw = W6692_l1hw; } static void DC_Close_W6692(struct IsdnCardState *cs) { } static void dbusy_timer_handler(struct IsdnCardState *cs) { struct PStack *stptr; int rbch, star; u_long flags; spin_lock_irqsave(&cs->lock, flags); if (test_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) { rbch = cs->readW6692(cs, W_D_RBCH); star = cs->readW6692(cs, W_D_STAR); if (cs->debug) debugl1(cs, "D-Channel Busy D_RBCH %02x D_STAR %02x", rbch, star); if (star & W_D_STAR_XBZ) { /* D-Channel Busy */ test_and_set_bit(FLG_L1_DBUSY, &cs->HW_Flags); stptr = cs->stlist; while (stptr != NULL) { stptr->l1.l1l2(stptr, PH_PAUSE | INDICATION, NULL); stptr = stptr->next; } } else { /* discard frame; reset transceiver */ test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags); if (cs->tx_skb) { dev_kfree_skb_any(cs->tx_skb); cs->tx_cnt = 0; cs->tx_skb = NULL; } else { printk(KERN_WARNING "HiSax: W6692 D-Channel Busy no skb\n"); debugl1(cs, "D-Channel Busy no skb"); } cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_XRST); /* Transmitter reset */ spin_unlock_irqrestore(&cs->lock, flags); cs->irq_func(cs->irq, cs); return; } } spin_unlock_irqrestore(&cs->lock, flags); } static void W6692Bmode(struct BCState *bcs, int mode, int bchan) { struct IsdnCardState *cs = bcs->cs; if (cs->debug & L1_DEB_HSCX) debugl1(cs, "w6692 %c mode %d ichan %d", '1' + bchan, mode, bchan); bcs->mode = mode; bcs->channel = bchan; bcs->hw.w6692.bchan = bchan; switch (mode) { case (L1_MODE_NULL): cs->BC_Write_Reg(cs, bchan, W_B_MODE, 0); break; case (L1_MODE_TRANS): cs->BC_Write_Reg(cs, bchan, W_B_MODE, W_B_MODE_MMS); break; case (L1_MODE_HDLC): cs->BC_Write_Reg(cs, bchan, W_B_MODE, W_B_MODE_ITF); cs->BC_Write_Reg(cs, bchan, W_B_ADM1, 0xff); cs->BC_Write_Reg(cs, bchan, W_B_ADM2, 0xff); break; } if (mode) cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_RRST | W_B_CMDR_RACT | W_B_CMDR_XRST); cs->BC_Write_Reg(cs, bchan, W_B_EXIM, 0x00); } static void W6692_l2l1(struct PStack *st, int pr, void *arg) { struct sk_buff *skb = arg; struct BCState *bcs = st->l1.bcs; u_long flags; switch (pr) { case (PH_DATA | REQUEST): spin_lock_irqsave(&bcs->cs->lock, flags); if (bcs->tx_skb) { skb_queue_tail(&bcs->squeue, skb); } else { bcs->tx_skb = skb; test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->hw.w6692.count = 0; bcs->cs->BC_Send_Data(bcs); } spin_unlock_irqrestore(&bcs->cs->lock, flags); break; case (PH_PULL | INDICATION): if (bcs->tx_skb) { printk(KERN_WARNING "W6692_l2l1: this shouldn't happen\n"); break; } spin_lock_irqsave(&bcs->cs->lock, flags); test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); bcs->tx_skb = skb; bcs->hw.w6692.count = 0; bcs->cs->BC_Send_Data(bcs); spin_unlock_irqrestore(&bcs->cs->lock, flags); break; case (PH_PULL | REQUEST): if (!bcs->tx_skb) { test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags); st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); } else test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); break; case (PH_ACTIVATE | REQUEST): spin_lock_irqsave(&bcs->cs->lock, flags); test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag); W6692Bmode(bcs, st->l1.mode, st->l1.bc); spin_unlock_irqrestore(&bcs->cs->lock, flags); l1_msg_b(st, pr, arg); break; case (PH_DEACTIVATE | REQUEST): l1_msg_b(st, pr, arg); break; case (PH_DEACTIVATE | CONFIRM): spin_lock_irqsave(&bcs->cs->lock, flags); test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag); test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); W6692Bmode(bcs, 0, st->l1.bc); spin_unlock_irqrestore(&bcs->cs->lock, flags); st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL); break; } } static void close_w6692state(struct BCState *bcs) { W6692Bmode(bcs, 0, bcs->channel); if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) { kfree(bcs->hw.w6692.rcvbuf); bcs->hw.w6692.rcvbuf = NULL; kfree(bcs->blog); bcs->blog = NULL; skb_queue_purge(&bcs->rqueue); skb_queue_purge(&bcs->squeue); if (bcs->tx_skb) { dev_kfree_skb_any(bcs->tx_skb); bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); } } } static int open_w6692state(struct IsdnCardState *cs, struct BCState *bcs) { if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) { if (!(bcs->hw.w6692.rcvbuf = kmalloc(HSCX_BUFMAX, GFP_ATOMIC))) { printk(KERN_WARNING "HiSax: No memory for w6692.rcvbuf\n"); test_and_clear_bit(BC_FLG_INIT, &bcs->Flag); return (1); } if (!(bcs->blog = kmalloc(MAX_BLOG_SPACE, GFP_ATOMIC))) { printk(KERN_WARNING "HiSax: No memory for bcs->blog\n"); test_and_clear_bit(BC_FLG_INIT, &bcs->Flag); kfree(bcs->hw.w6692.rcvbuf); bcs->hw.w6692.rcvbuf = NULL; return (2); } skb_queue_head_init(&bcs->rqueue); skb_queue_head_init(&bcs->squeue); } bcs->tx_skb = NULL; test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); bcs->event = 0; bcs->hw.w6692.rcvidx = 0; bcs->tx_cnt = 0; return (0); } static int setstack_w6692(struct PStack *st, struct BCState *bcs) { bcs->channel = st->l1.bc; if (open_w6692state(st->l1.hardware, bcs)) return (-1); st->l1.bcs = bcs; st->l2.l2l1 = W6692_l2l1; setstack_manager(st); bcs->st = st; setstack_l1_B(st); return (0); } static void resetW6692(struct IsdnCardState *cs) { cs->writeW6692(cs, W_D_CTL, W_D_CTL_SRST); mdelay(10); cs->writeW6692(cs, W_D_CTL, 0x00); mdelay(10); cs->writeW6692(cs, W_IMASK, 0xff); cs->writeW6692(cs, W_D_SAM, 0xff); cs->writeW6692(cs, W_D_TAM, 0xff); cs->writeW6692(cs, W_D_EXIM, 0x00); cs->writeW6692(cs, W_D_MODE, W_D_MODE_RACT); cs->writeW6692(cs, W_IMASK, 0x18); if (cs->subtyp == W6692_USR) { /* seems that USR implemented some power control features * Pin 79 is connected to the oscilator circuit so we * have to handle it here */ cs->writeW6692(cs, W_PCTL, 0x80); cs->writeW6692(cs, W_XDATA, 0x00); } } static void initW6692(struct IsdnCardState *cs, int part) { if (part & 1) { cs->setstack_d = setstack_W6692; cs->DC_Close = DC_Close_W6692; cs->dbusytimer.function = (void *) dbusy_timer_handler; cs->dbusytimer.data = (long) cs; init_timer(&cs->dbusytimer); resetW6692(cs); ph_command(cs, W_L1CMD_RST); cs->dc.w6692.ph_state = W_L1CMD_RST; W6692_new_ph(cs); ph_command(cs, W_L1CMD_ECK); cs->bcs[0].BC_SetStack = setstack_w6692; cs->bcs[1].BC_SetStack = setstack_w6692; cs->bcs[0].BC_Close = close_w6692state; cs->bcs[1].BC_Close = close_w6692state; W6692Bmode(cs->bcs, 0, 0); W6692Bmode(cs->bcs + 1, 0, 0); } if (part & 2) { /* Reenable all IRQ */ cs->writeW6692(cs, W_IMASK, 0x18); cs->writeW6692(cs, W_D_EXIM, 0x00); cs->BC_Write_Reg(cs, 0, W_B_EXIM, 0x00); cs->BC_Write_Reg(cs, 1, W_B_EXIM, 0x00); /* Reset D-chan receiver and transmitter */ cs->writeW6692(cs, W_D_CMDR, W_D_CMDR_RRST | W_D_CMDR_XRST); } } /* Interface functions */ static u_char ReadW6692(struct IsdnCardState *cs, u_char offset) { return (inb(cs->hw.w6692.iobase + offset)); } static void WriteW6692(struct IsdnCardState *cs, u_char offset, u_char value) { outb(value, cs->hw.w6692.iobase + offset); } static void ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size) { insb(cs->hw.w6692.iobase + W_D_RFIFO, data, size); } static void WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size) { outsb(cs->hw.w6692.iobase + W_D_XFIFO, data, size); } static u_char ReadW6692B(struct IsdnCardState *cs, int bchan, u_char offset) { return (inb(cs->hw.w6692.iobase + (bchan ? 0x40 : 0) + offset)); } static void WriteW6692B(struct IsdnCardState *cs, int bchan, u_char offset, u_char value) { outb(value, cs->hw.w6692.iobase + (bchan ? 0x40 : 0) + offset); } static int w6692_card_msg(struct IsdnCardState *cs, int mt, void *arg) { switch (mt) { case CARD_RESET: resetW6692(cs); return (0); case CARD_RELEASE: cs->writeW6692(cs, W_IMASK, 0xff); release_region(cs->hw.w6692.iobase, 256); if (cs->subtyp == W6692_USR) { cs->writeW6692(cs, W_XDATA, 0x04); } return (0); case CARD_INIT: initW6692(cs, 3); return (0); case CARD_TEST: return (0); } return (0); } static int id_idx; static struct pci_dev *dev_w6692 __devinitdata = NULL; int __devinit setup_w6692(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; char tmp[64]; u_char found = 0; u_char pci_irq = 0; u_int pci_ioaddr = 0; strcpy(tmp, w6692_revision); printk(KERN_INFO "HiSax: W6692 driver Rev. %s\n", HiSax_getrev(tmp)); if (cs->typ != ISDN_CTYPE_W6692) return (0); while (id_list[id_idx].vendor_id) { dev_w6692 = hisax_find_pci_device(id_list[id_idx].vendor_id, id_list[id_idx].device_id, dev_w6692); if (dev_w6692) { if (pci_enable_device(dev_w6692)) continue; cs->subtyp = id_idx; break; } id_idx++; } if (dev_w6692) { found = 1; pci_irq = dev_w6692->irq; /* I think address 0 is allways the configuration area */ /* and address 1 is the real IO space KKe 03.09.99 */ pci_ioaddr = pci_resource_start(dev_w6692, 1); /* USR ISDN PCI card TA need some special handling */ if (cs->subtyp == W6692_WINBOND) { if ((W6692_SV_USR == dev_w6692->subsystem_vendor) && (W6692_SD_USR == dev_w6692->subsystem_device)) { cs->subtyp = W6692_USR; } } } if (!found) { printk(KERN_WARNING "W6692: No PCI card found\n"); return (0); } cs->irq = pci_irq; if (!cs->irq) { printk(KERN_WARNING "W6692: No IRQ for PCI card found\n"); return (0); } if (!pci_ioaddr) { printk(KERN_WARNING "W6692: NO I/O Base Address found\n"); return (0); } cs->hw.w6692.iobase = pci_ioaddr; printk(KERN_INFO "Found: %s %s, I/O base: 0x%x, irq: %d\n", id_list[cs->subtyp].vendor_name, id_list[cs->subtyp].card_name, pci_ioaddr, pci_irq); if (!request_region(cs->hw.w6692.iobase, 256, id_list[cs->subtyp].card_name)) { printk(KERN_WARNING "HiSax: %s I/O ports %x-%x already in use\n", id_list[cs->subtyp].card_name, cs->hw.w6692.iobase, cs->hw.w6692.iobase + 255); return (0); } printk(KERN_INFO "HiSax: %s config irq:%d I/O:%x\n", id_list[cs->subtyp].card_name, cs->irq, cs->hw.w6692.iobase); INIT_WORK(&cs->tqueue, W6692_bh); cs->readW6692 = &ReadW6692; cs->writeW6692 = &WriteW6692; cs->readisacfifo = &ReadISACfifo; cs->writeisacfifo = &WriteISACfifo; cs->BC_Read_Reg = &ReadW6692B; cs->BC_Write_Reg = &WriteW6692B; cs->BC_Send_Data = &W6692B_fill_fifo; cs->cardmsg = &w6692_card_msg; cs->irq_func = &W6692_interrupt; cs->irq_flags |= IRQF_SHARED; W6692Version(cs, "W6692:"); printk(KERN_INFO "W6692 ISTA=0x%X\n", ReadW6692(cs, W_ISTA)); printk(KERN_INFO "W6692 IMASK=0x%X\n", ReadW6692(cs, W_IMASK)); printk(KERN_INFO "W6692 D_EXIR=0x%X\n", ReadW6692(cs, W_D_EXIR)); printk(KERN_INFO "W6692 D_EXIM=0x%X\n", ReadW6692(cs, W_D_EXIM)); printk(KERN_INFO "W6692 D_RSTA=0x%X\n", ReadW6692(cs, W_D_RSTA)); return (1); }
gpl-2.0
davidmueller13/aospmmkernel2
drivers/rtc/rtc-ds1511.c
4988
14380
/* * An rtc driver for the Dallas DS1511 * * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp> * Copyright (C) 2007 Andrew Sharp <andy.sharp@lsi.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Real time clock driver for the Dallas 1511 chip, which also * contains a watchdog timer. There is a tiny amount of code that * platform code could use to mess with the watchdog device a little * bit, but not a full watchdog driver. */ #include <linux/bcd.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/rtc.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/module.h> #define DRV_VERSION "0.6" enum ds1511reg { DS1511_SEC = 0x0, DS1511_MIN = 0x1, DS1511_HOUR = 0x2, DS1511_DOW = 0x3, DS1511_DOM = 0x4, DS1511_MONTH = 0x5, DS1511_YEAR = 0x6, DS1511_CENTURY = 0x7, DS1511_AM1_SEC = 0x8, DS1511_AM2_MIN = 0x9, DS1511_AM3_HOUR = 0xa, DS1511_AM4_DATE = 0xb, DS1511_WD_MSEC = 0xc, DS1511_WD_SEC = 0xd, DS1511_CONTROL_A = 0xe, DS1511_CONTROL_B = 0xf, DS1511_RAMADDR_LSB = 0x10, DS1511_RAMDATA = 0x13 }; #define DS1511_BLF1 0x80 #define DS1511_BLF2 0x40 #define DS1511_PRS 0x20 #define DS1511_PAB 0x10 #define DS1511_TDF 0x08 #define DS1511_KSF 0x04 #define DS1511_WDF 0x02 #define DS1511_IRQF 0x01 #define DS1511_TE 0x80 #define DS1511_CS 0x40 #define DS1511_BME 0x20 #define DS1511_TPE 0x10 #define DS1511_TIE 0x08 #define DS1511_KIE 0x04 #define DS1511_WDE 0x02 #define DS1511_WDS 0x01 #define DS1511_RAM_MAX 0xff #define RTC_CMD DS1511_CONTROL_B #define RTC_CMD1 DS1511_CONTROL_A #define RTC_ALARM_SEC DS1511_AM1_SEC #define RTC_ALARM_MIN DS1511_AM2_MIN #define RTC_ALARM_HOUR DS1511_AM3_HOUR #define RTC_ALARM_DATE DS1511_AM4_DATE #define RTC_SEC DS1511_SEC #define RTC_MIN DS1511_MIN #define RTC_HOUR DS1511_HOUR #define RTC_DOW DS1511_DOW #define RTC_DOM DS1511_DOM #define RTC_MON DS1511_MONTH #define RTC_YEAR DS1511_YEAR #define RTC_CENTURY DS1511_CENTURY #define RTC_TIE DS1511_TIE #define RTC_TE DS1511_TE struct rtc_plat_data { struct rtc_device *rtc; void __iomem *ioaddr; /* virtual base address */ int size; /* amount of memory mapped */ int irq; unsigned int irqen; int alrm_sec; int alrm_min; int alrm_hour; int alrm_mday; spinlock_t lock; }; static DEFINE_SPINLOCK(ds1511_lock); static __iomem char *ds1511_base; static u32 reg_spacing = 1; static noinline void rtc_write(uint8_t val, uint32_t reg) { writeb(val, ds1511_base + (reg * reg_spacing)); } static inline void rtc_write_alarm(uint8_t val, enum ds1511reg reg) { rtc_write((val | 0x80), reg); } static noinline uint8_t rtc_read(enum ds1511reg reg) { return readb(ds1511_base + (reg * reg_spacing)); } static inline void rtc_disable_update(void) { rtc_write((rtc_read(RTC_CMD) & ~RTC_TE), RTC_CMD); } static void rtc_enable_update(void) { rtc_write((rtc_read(RTC_CMD) | RTC_TE), RTC_CMD); } /* * #define DS1511_WDOG_RESET_SUPPORT * * Uncomment this if you want to use these routines in * some platform code. */ #ifdef DS1511_WDOG_RESET_SUPPORT /* * just enough code to set the watchdog timer so that it * will reboot the system */ void ds1511_wdog_set(unsigned long deciseconds) { /* * the wdog timer can take 99.99 seconds */ deciseconds %= 10000; /* * set the wdog values in the wdog registers */ rtc_write(bin2bcd(deciseconds % 100), DS1511_WD_MSEC); rtc_write(bin2bcd(deciseconds / 100), DS1511_WD_SEC); /* * set wdog enable and wdog 'steering' bit to issue a reset */ rtc_write(DS1511_WDE | DS1511_WDS, RTC_CMD); } void ds1511_wdog_disable(void) { /* * clear wdog enable and wdog 'steering' bits */ rtc_write(rtc_read(RTC_CMD) & ~(DS1511_WDE | DS1511_WDS), RTC_CMD); /* * clear the wdog counter */ rtc_write(0, DS1511_WD_MSEC); rtc_write(0, DS1511_WD_SEC); } #endif /* * set the rtc chip's idea of the time. * stupidly, some callers call with year unmolested; * and some call with year = year - 1900. thanks. */ static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm) { u8 mon, day, dow, hrs, min, sec, yrs, cen; unsigned long flags; /* * won't have to change this for a while */ if (rtc_tm->tm_year < 1900) { rtc_tm->tm_year += 1900; } if (rtc_tm->tm_year < 1970) { return -EINVAL; } yrs = rtc_tm->tm_year % 100; cen = rtc_tm->tm_year / 100; mon = rtc_tm->tm_mon + 1; /* tm_mon starts at zero */ day = rtc_tm->tm_mday; dow = rtc_tm->tm_wday & 0x7; /* automatic BCD */ hrs = rtc_tm->tm_hour; min = rtc_tm->tm_min; sec = rtc_tm->tm_sec; if ((mon > 12) || (day == 0)) { return -EINVAL; } if (day > rtc_month_days(rtc_tm->tm_mon, rtc_tm->tm_year)) { return -EINVAL; } if ((hrs >= 24) || (min >= 60) || (sec >= 60)) { return -EINVAL; } /* * each register is a different number of valid bits */ sec = bin2bcd(sec) & 0x7f; min = bin2bcd(min) & 0x7f; hrs = bin2bcd(hrs) & 0x3f; day = bin2bcd(day) & 0x3f; mon = bin2bcd(mon) & 0x1f; yrs = bin2bcd(yrs) & 0xff; cen = bin2bcd(cen) & 0xff; spin_lock_irqsave(&ds1511_lock, flags); rtc_disable_update(); rtc_write(cen, RTC_CENTURY); rtc_write(yrs, RTC_YEAR); rtc_write((rtc_read(RTC_MON) & 0xe0) | mon, RTC_MON); rtc_write(day, RTC_DOM); rtc_write(hrs, RTC_HOUR); rtc_write(min, RTC_MIN); rtc_write(sec, RTC_SEC); rtc_write(dow, RTC_DOW); rtc_enable_update(); spin_unlock_irqrestore(&ds1511_lock, flags); return 0; } static int ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm) { unsigned int century; unsigned long flags; spin_lock_irqsave(&ds1511_lock, flags); rtc_disable_update(); rtc_tm->tm_sec = rtc_read(RTC_SEC) & 0x7f; rtc_tm->tm_min = rtc_read(RTC_MIN) & 0x7f; rtc_tm->tm_hour = rtc_read(RTC_HOUR) & 0x3f; rtc_tm->tm_mday = rtc_read(RTC_DOM) & 0x3f; rtc_tm->tm_wday = rtc_read(RTC_DOW) & 0x7; rtc_tm->tm_mon = rtc_read(RTC_MON) & 0x1f; rtc_tm->tm_year = rtc_read(RTC_YEAR) & 0x7f; century = rtc_read(RTC_CENTURY); rtc_enable_update(); spin_unlock_irqrestore(&ds1511_lock, flags); rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday); rtc_tm->tm_wday = bcd2bin(rtc_tm->tm_wday); rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon); rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); century = bcd2bin(century) * 100; /* * Account for differences between how the RTC uses the values * and how they are defined in a struct rtc_time; */ century += rtc_tm->tm_year; rtc_tm->tm_year = century - 1900; rtc_tm->tm_mon--; if (rtc_valid_tm(rtc_tm) < 0) { dev_err(dev, "retrieved date/time is not valid.\n"); rtc_time_to_tm(0, rtc_tm); } return 0; } /* * write the alarm register settings * * we only have the use to interrupt every second, otherwise * known as the update interrupt, or the interrupt if the whole * date/hours/mins/secs matches. the ds1511 has many more * permutations, but the kernel doesn't. */ static void ds1511_rtc_update_alarm(struct rtc_plat_data *pdata) { unsigned long flags; spin_lock_irqsave(&pdata->lock, flags); rtc_write(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ? 0x80 : bin2bcd(pdata->alrm_mday) & 0x3f, RTC_ALARM_DATE); rtc_write(pdata->alrm_hour < 0 || (pdata->irqen & RTC_UF) ? 0x80 : bin2bcd(pdata->alrm_hour) & 0x3f, RTC_ALARM_HOUR); rtc_write(pdata->alrm_min < 0 || (pdata->irqen & RTC_UF) ? 0x80 : bin2bcd(pdata->alrm_min) & 0x7f, RTC_ALARM_MIN); rtc_write(pdata->alrm_sec < 0 || (pdata->irqen & RTC_UF) ? 0x80 : bin2bcd(pdata->alrm_sec) & 0x7f, RTC_ALARM_SEC); rtc_write(rtc_read(RTC_CMD) | (pdata->irqen ? RTC_TIE : 0), RTC_CMD); rtc_read(RTC_CMD1); /* clear interrupts */ spin_unlock_irqrestore(&pdata->lock, flags); } static int ds1511_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); if (pdata->irq <= 0) return -EINVAL; pdata->alrm_mday = alrm->time.tm_mday; pdata->alrm_hour = alrm->time.tm_hour; pdata->alrm_min = alrm->time.tm_min; pdata->alrm_sec = alrm->time.tm_sec; if (alrm->enabled) { pdata->irqen |= RTC_AF; } ds1511_rtc_update_alarm(pdata); return 0; } static int ds1511_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); if (pdata->irq <= 0) return -EINVAL; alrm->time.tm_mday = pdata->alrm_mday < 0 ? 0 : pdata->alrm_mday; alrm->time.tm_hour = pdata->alrm_hour < 0 ? 0 : pdata->alrm_hour; alrm->time.tm_min = pdata->alrm_min < 0 ? 0 : pdata->alrm_min; alrm->time.tm_sec = pdata->alrm_sec < 0 ? 0 : pdata->alrm_sec; alrm->enabled = (pdata->irqen & RTC_AF) ? 1 : 0; return 0; } static irqreturn_t ds1511_interrupt(int irq, void *dev_id) { struct platform_device *pdev = dev_id; struct rtc_plat_data *pdata = platform_get_drvdata(pdev); unsigned long events = 0; spin_lock(&pdata->lock); /* * read and clear interrupt */ if (rtc_read(RTC_CMD1) & DS1511_IRQF) { events = RTC_IRQF; if (rtc_read(RTC_ALARM_SEC) & 0x80) events |= RTC_UF; else events |= RTC_AF; if (likely(pdata->rtc)) rtc_update_irq(pdata->rtc, 1, events); } spin_unlock(&pdata->lock); return events ? IRQ_HANDLED : IRQ_NONE; } static int ds1511_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); if (pdata->irq <= 0) return -EINVAL; if (enabled) pdata->irqen |= RTC_AF; else pdata->irqen &= ~RTC_AF; ds1511_rtc_update_alarm(pdata); return 0; } static const struct rtc_class_ops ds1511_rtc_ops = { .read_time = ds1511_rtc_read_time, .set_time = ds1511_rtc_set_time, .read_alarm = ds1511_rtc_read_alarm, .set_alarm = ds1511_rtc_set_alarm, .alarm_irq_enable = ds1511_rtc_alarm_irq_enable, }; static ssize_t ds1511_nvram_read(struct file *filp, struct kobject *kobj, struct bin_attribute *ba, char *buf, loff_t pos, size_t size) { ssize_t count; /* * if count is more than one, turn on "burst" mode * turn it off when you're done */ if (size > 1) { rtc_write((rtc_read(RTC_CMD) | DS1511_BME), RTC_CMD); } if (pos > DS1511_RAM_MAX) { pos = DS1511_RAM_MAX; } if (size + pos > DS1511_RAM_MAX + 1) { size = DS1511_RAM_MAX - pos + 1; } rtc_write(pos, DS1511_RAMADDR_LSB); for (count = 0; size > 0; count++, size--) { *buf++ = rtc_read(DS1511_RAMDATA); } if (count > 1) { rtc_write((rtc_read(RTC_CMD) & ~DS1511_BME), RTC_CMD); } return count; } static ssize_t ds1511_nvram_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t size) { ssize_t count; /* * if count is more than one, turn on "burst" mode * turn it off when you're done */ if (size > 1) { rtc_write((rtc_read(RTC_CMD) | DS1511_BME), RTC_CMD); } if (pos > DS1511_RAM_MAX) { pos = DS1511_RAM_MAX; } if (size + pos > DS1511_RAM_MAX + 1) { size = DS1511_RAM_MAX - pos + 1; } rtc_write(pos, DS1511_RAMADDR_LSB); for (count = 0; size > 0; count++, size--) { rtc_write(*buf++, DS1511_RAMDATA); } if (count > 1) { rtc_write((rtc_read(RTC_CMD) & ~DS1511_BME), RTC_CMD); } return count; } static struct bin_attribute ds1511_nvram_attr = { .attr = { .name = "nvram", .mode = S_IRUGO | S_IWUSR, }, .size = DS1511_RAM_MAX, .read = ds1511_nvram_read, .write = ds1511_nvram_write, }; static int __devinit ds1511_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; struct resource *res; struct rtc_plat_data *pdata; int ret = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { return -ENODEV; } pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; pdata->size = resource_size(res); if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size, pdev->name)) return -EBUSY; ds1511_base = devm_ioremap(&pdev->dev, res->start, pdata->size); if (!ds1511_base) return -ENOMEM; pdata->ioaddr = ds1511_base; pdata->irq = platform_get_irq(pdev, 0); /* * turn on the clock and the crystal, etc. */ rtc_write(0, RTC_CMD); rtc_write(0, RTC_CMD1); /* * clear the wdog counter */ rtc_write(0, DS1511_WD_MSEC); rtc_write(0, DS1511_WD_SEC); /* * start the clock */ rtc_enable_update(); /* * check for a dying bat-tree */ if (rtc_read(RTC_CMD1) & DS1511_BLF1) { dev_warn(&pdev->dev, "voltage-low detected.\n"); } spin_lock_init(&pdata->lock); platform_set_drvdata(pdev, pdata); /* * if the platform has an interrupt in mind for this device, * then by all means, set it */ if (pdata->irq > 0) { rtc_read(RTC_CMD1); if (devm_request_irq(&pdev->dev, pdata->irq, ds1511_interrupt, IRQF_SHARED, pdev->name, pdev) < 0) { dev_warn(&pdev->dev, "interrupt not available.\n"); pdata->irq = 0; } } rtc = rtc_device_register(pdev->name, &pdev->dev, &ds1511_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) return PTR_ERR(rtc); pdata->rtc = rtc; ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr); if (ret) rtc_device_unregister(pdata->rtc); return ret; } static int __devexit ds1511_rtc_remove(struct platform_device *pdev) { struct rtc_plat_data *pdata = platform_get_drvdata(pdev); sysfs_remove_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr); rtc_device_unregister(pdata->rtc); if (pdata->irq > 0) { /* * disable the alarm interrupt */ rtc_write(rtc_read(RTC_CMD) & ~RTC_TIE, RTC_CMD); rtc_read(RTC_CMD1); } return 0; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:ds1511"); static struct platform_driver ds1511_rtc_driver = { .probe = ds1511_rtc_probe, .remove = __devexit_p(ds1511_rtc_remove), .driver = { .name = "ds1511", .owner = THIS_MODULE, }, }; module_platform_driver(ds1511_rtc_driver); MODULE_AUTHOR("Andrew Sharp <andy.sharp@lsi.com>"); MODULE_DESCRIPTION("Dallas DS1511 RTC driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
alexpotter1/QuantumKernel_msm8974_d802
drivers/gpu/drm/nouveau/nv04_pm.c
5500
3328
/* * Copyright 2010 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_hw.h" #include "nouveau_pm.h" int nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) { int ret; ret = nouveau_hw_get_clock(dev, PLL_CORE); if (ret < 0) return ret; perflvl->core = ret; ret = nouveau_hw_get_clock(dev, PLL_MEMORY); if (ret < 0) return ret; perflvl->memory = ret; return 0; } struct nv04_pm_clock { struct pll_lims pll; struct nouveau_pll_vals calc; }; struct nv04_pm_state { struct nv04_pm_clock core; struct nv04_pm_clock memory; }; static int calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk) { int ret; ret = get_pll_limits(dev, id, &clk->pll); if (ret) return ret; ret = nouveau_calc_pll_mnp(dev, &clk->pll, khz, &clk->calc); if (!ret) return -EINVAL; return 0; } void * nv04_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) { struct nv04_pm_state *info; int ret; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return ERR_PTR(-ENOMEM); ret = calc_pll(dev, PLL_CORE, perflvl->core, &info->core); if (ret) goto error; if (perflvl->memory) { ret = calc_pll(dev, PLL_MEMORY, perflvl->memory, &info->memory); if (ret) goto error; } return info; error: kfree(info); return ERR_PTR(ret); } static void prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk) { struct drm_nouveau_private *dev_priv = dev->dev_private; u32 reg = clk->pll.reg; /* thank the insane nouveau_hw_setpll() interface for this */ if (dev_priv->card_type >= NV_40) reg += 4; nouveau_hw_setpll(dev, reg, &clk->calc); } int nv04_pm_clocks_set(struct drm_device *dev, void *pre_state) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; struct nv04_pm_state *state = pre_state; prog_pll(dev, &state->core); if (state->memory.pll.reg) { prog_pll(dev, &state->memory); if (dev_priv->card_type < NV_30) { if (dev_priv->card_type == NV_20) nv_mask(dev, 0x1002c4, 0, 1 << 20); /* Reset the DLLs */ nv_mask(dev, 0x1002c0, 0, 1 << 8); } } ptimer->init(dev); kfree(state); return 0; }
gpl-2.0
bigzz/shamu_flar2
sound/soc/fsl/efika-audio-fabric.c
9596
2150
/* * Efika driver for the PSC of the Freescale MPC52xx * configured as AC97 interface * * Copyright 2008 Jon Smirl, Digispeaker * Author: Jon Smirl <jonsmirl@gmail.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include <sound/soc.h> #include "mpc5200_dma.h" #include "mpc5200_psc_ac97.h" #include "../codecs/stac9766.h" #define DRV_NAME "efika-audio-fabric" static struct snd_soc_dai_link efika_fabric_dai[] = { { .name = "AC97", .stream_name = "AC97 Analog", .codec_dai_name = "stac9766-hifi-analog", .cpu_dai_name = "mpc5200-psc-ac97.0", .platform_name = "mpc5200-pcm-audio", .codec_name = "stac9766-codec", }, { .name = "AC97", .stream_name = "AC97 IEC958", .codec_dai_name = "stac9766-hifi-IEC958", .cpu_dai_name = "mpc5200-psc-ac97.1", .platform_name = "mpc5200-pcm-audio", .codec_name = "stac9766-codec", }, }; static struct snd_soc_card card = { .name = "Efika", .owner = THIS_MODULE, .dai_link = efika_fabric_dai, .num_links = ARRAY_SIZE(efika_fabric_dai), }; static __init int efika_fabric_init(void) { struct platform_device *pdev; int rc; if (!of_machine_is_compatible("bplan,efika")) return -ENODEV; pdev = platform_device_alloc("soc-audio", 1); if (!pdev) { pr_err("efika_fabric_init: platform_device_alloc() failed\n"); return -ENODEV; } platform_set_drvdata(pdev, &card); rc = platform_device_add(pdev); if (rc) { pr_err("efika_fabric_init: platform_device_add() failed\n"); platform_device_put(pdev); return -ENODEV; } return 0; } module_init(efika_fabric_init); MODULE_AUTHOR("Jon Smirl <jonsmirl@gmail.com>"); MODULE_DESCRIPTION(DRV_NAME ": mpc5200 Efika fabric driver"); MODULE_LICENSE("GPL");
gpl-2.0
madmack/i747_kernel_ics
net/llc/llc_c_st.c
13436
153432
/* * llc_c_st.c - This module contains state transition of connection component. * * Description of event functions and actions there is in 802.2 LLC standard, * or in "llc_c_ac.c" and "llc_c_ev.c" modules. * * Copyright (c) 1997 by Procom Technology, Inc. * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program can be redistributed or modified under the terms of the * GNU General Public License as published by the Free Software Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License for more details. */ #include <linux/types.h> #include <net/llc_if.h> #include <net/llc_sap.h> #include <net/llc_c_ev.h> #include <net/llc_c_ac.h> #include <net/llc_c_st.h> #define NONE NULL /* COMMON CONNECTION STATE transitions * Common transitions for * LLC_CONN_STATE_NORMAL, * LLC_CONN_STATE_BUSY, * LLC_CONN_STATE_REJ, * LLC_CONN_STATE_AWAIT, * LLC_CONN_STATE_AWAIT_BUSY and * LLC_CONN_STATE_AWAIT_REJ states */ /* State transitions for LLC_CONN_EV_DISC_REQ event */ static llc_conn_action_t llc_common_actions_1[] = { [0] = llc_conn_ac_send_disc_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, [3] = llc_conn_ac_set_retry_cnt_0, [4] = llc_conn_ac_set_cause_flag_1, [5] = NULL, }; static struct llc_conn_state_trans llc_common_state_trans_1 = { .ev = llc_conn_ev_disc_req, .next_state = LLC_CONN_STATE_D_CONN, .ev_qualifiers = NONE, .ev_actions = llc_common_actions_1, }; /* State transitions for LLC_CONN_EV_RESET_REQ event */ static llc_conn_action_t llc_common_actions_2[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, [3] = llc_conn_ac_set_retry_cnt_0, [4] = llc_conn_ac_set_cause_flag_1, [5] = NULL, }; static struct llc_conn_state_trans llc_common_state_trans_2 = { .ev = llc_conn_ev_rst_req, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = NONE, .ev_actions = llc_common_actions_2, }; /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */ static llc_conn_action_t llc_common_actions_3[] = { [0] = llc_conn_ac_stop_all_timers, [1] = llc_conn_ac_set_vs_0, [2] = llc_conn_ac_set_vr_0, [3] = llc_conn_ac_send_ua_rsp_f_set_p, [4] = llc_conn_ac_rst_ind, [5] = llc_conn_ac_set_p_flag_0, [6] = llc_conn_ac_set_remote_busy_0, [7] = llc_conn_reset, [8] = NULL, }; static struct llc_conn_state_trans llc_common_state_trans_3 = { .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, .ev_actions = llc_common_actions_3, }; /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */ static llc_conn_action_t llc_common_actions_4[] = { [0] = llc_conn_ac_stop_all_timers, [1] = llc_conn_ac_send_ua_rsp_f_set_p, [2] = llc_conn_ac_disc_ind, [3] = llc_conn_disc, [4] = NULL, }; static struct llc_conn_state_trans llc_common_state_trans_4 = { .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = NONE, .ev_actions = llc_common_actions_4, }; /* State transitions for LLC_CONN_EV_RX_FRMR_RSP_Fbit_SET_X event */ static llc_conn_action_t llc_common_actions_5[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, [3] = llc_conn_ac_set_retry_cnt_0, [4] = llc_conn_ac_rst_ind, [5] = llc_conn_ac_set_cause_flag_0, [6] = llc_conn_reset, [7] = NULL, }; static struct llc_conn_state_trans llc_common_state_trans_5 = { .ev = llc_conn_ev_rx_frmr_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = NONE, .ev_actions = llc_common_actions_5, }; /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event */ static llc_conn_action_t llc_common_actions_6[] = { [0] = llc_conn_ac_disc_ind, [1] = llc_conn_ac_stop_all_timers, [2] = llc_conn_disc, [3] = NULL, }; static struct llc_conn_state_trans llc_common_state_trans_6 = { .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = NONE, .ev_actions = llc_common_actions_6, }; /* State transitions for LLC_CONN_EV_RX_ZZZ_CMD_Pbit_SET_X_INVAL_Nr event */ static llc_conn_action_t llc_common_actions_7a[] = { [0] = llc_conn_ac_send_frmr_rsp_f_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, [3] = llc_conn_ac_set_retry_cnt_0, [4] = NULL, }; static struct llc_conn_state_trans llc_common_state_trans_7a = { .ev = llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = NONE, .ev_actions = llc_common_actions_7a, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_X_INVAL_Ns event */ static llc_conn_action_t llc_common_actions_7b[] = { [0] = llc_conn_ac_send_frmr_rsp_f_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, [3] = llc_conn_ac_set_retry_cnt_0, [4] = NULL, }; static struct llc_conn_state_trans llc_common_state_trans_7b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = NONE, .ev_actions = llc_common_actions_7b, }; /* State transitions for LLC_CONN_EV_RX_ZZZ_RSP_Fbit_SET_X_INVAL_Nr event */ static llc_conn_action_t llc_common_actions_8a[] = { [0] = llc_conn_ac_send_frmr_rsp_f_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, [3] = llc_conn_ac_set_retry_cnt_0, [4] = NULL, }; static struct llc_conn_state_trans llc_common_state_trans_8a = { .ev = llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = NONE, .ev_actions = llc_common_actions_8a, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X_INVAL_Ns event */ static llc_conn_action_t llc_common_actions_8b[] = { [0] = llc_conn_ac_send_frmr_rsp_f_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, [3] = llc_conn_ac_set_retry_cnt_0, [4] = NULL, }; static struct llc_conn_state_trans llc_common_state_trans_8b = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = NONE, .ev_actions = llc_common_actions_8b, }; /* State transitions for LLC_CONN_EV_RX_BAD_PDU event */ static llc_conn_action_t llc_common_actions_8c[] = { [0] = llc_conn_ac_send_frmr_rsp_f_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, [3] = llc_conn_ac_set_retry_cnt_0, [4] = NULL, }; static struct llc_conn_state_trans llc_common_state_trans_8c = { .ev = llc_conn_ev_rx_bad_pdu, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = NONE, .ev_actions = llc_common_actions_8c, }; /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event */ static llc_conn_action_t llc_common_actions_9[] = { [0] = llc_conn_ac_send_frmr_rsp_f_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, [3] = llc_conn_ac_set_retry_cnt_0, [4] = NULL, }; static struct llc_conn_state_trans llc_common_state_trans_9 = { .ev = llc_conn_ev_rx_ua_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = NONE, .ev_actions = llc_common_actions_9, }; /* State transitions for LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_1 event */ #if 0 static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_10[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; static llc_conn_action_t llc_common_actions_10[] = { [0] = llc_conn_ac_send_frmr_rsp_f_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, [3] = llc_conn_ac_set_retry_cnt_0, [4] = NULL, }; static struct llc_conn_state_trans llc_common_state_trans_10 = { .ev = llc_conn_ev_rx_xxx_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = llc_common_ev_qfyrs_10, .ev_actions = llc_common_actions_10, }; #endif /* State transitions for LLC_CONN_EV_P_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11a[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = NULL, }; static llc_conn_action_t llc_common_actions_11a[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, [3] = llc_conn_ac_set_retry_cnt_0, [4] = llc_conn_ac_set_cause_flag_0, [5] = NULL, }; static struct llc_conn_state_trans llc_common_state_trans_11a = { .ev = llc_conn_ev_p_tmr_exp, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = llc_common_ev_qfyrs_11a, .ev_actions = llc_common_actions_11a, }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11b[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = NULL, }; static llc_conn_action_t llc_common_actions_11b[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, [3] = llc_conn_ac_set_retry_cnt_0, [4] = llc_conn_ac_set_cause_flag_0, [5] = NULL, }; static struct llc_conn_state_trans llc_common_state_trans_11b = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = llc_common_ev_qfyrs_11b, .ev_actions = llc_common_actions_11b, }; /* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11c[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = NULL, }; static llc_conn_action_t llc_common_actions_11c[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, [3] = llc_conn_ac_set_retry_cnt_0, [4] = llc_conn_ac_set_cause_flag_0, [5] = NULL, }; static struct llc_conn_state_trans llc_common_state_trans_11c = { .ev = llc_conn_ev_rej_tmr_exp, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = llc_common_ev_qfyrs_11c, .ev_actions = llc_common_actions_11c, }; /* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_common_ev_qfyrs_11d[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = NULL, }; static llc_conn_action_t llc_common_actions_11d[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_stop_other_timers, [3] = llc_conn_ac_set_retry_cnt_0, [4] = llc_conn_ac_set_cause_flag_0, [5] = NULL, }; static struct llc_conn_state_trans llc_common_state_trans_11d = { .ev = llc_conn_ev_busy_tmr_exp, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = llc_common_ev_qfyrs_11d, .ev_actions = llc_common_actions_11d, }; /* * Common dummy state transition; must be last entry for all state * transition groups - it'll be on .bss, so will be zeroed. */ static struct llc_conn_state_trans llc_common_state_trans_end; /* LLC_CONN_STATE_ADM transitions */ /* State transitions for LLC_CONN_EV_CONN_REQ event */ static llc_conn_action_t llc_adm_actions_1[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_set_retry_cnt_0, [3] = llc_conn_ac_set_s_flag_0, [4] = NULL, }; static struct llc_conn_state_trans llc_adm_state_trans_1 = { .ev = llc_conn_ev_conn_req, .next_state = LLC_CONN_STATE_SETUP, .ev_qualifiers = NONE, .ev_actions = llc_adm_actions_1, }; /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */ static llc_conn_action_t llc_adm_actions_2[] = { [0] = llc_conn_ac_send_ua_rsp_f_set_p, [1] = llc_conn_ac_set_vs_0, [2] = llc_conn_ac_set_vr_0, [3] = llc_conn_ac_set_retry_cnt_0, [4] = llc_conn_ac_set_p_flag_0, [5] = llc_conn_ac_set_remote_busy_0, [6] = llc_conn_ac_conn_ind, [7] = NULL, }; static struct llc_conn_state_trans llc_adm_state_trans_2 = { .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, .ev_actions = llc_adm_actions_2, }; /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */ static llc_conn_action_t llc_adm_actions_3[] = { [0] = llc_conn_ac_send_dm_rsp_f_set_p, [1] = llc_conn_disc, [2] = NULL, }; static struct llc_conn_state_trans llc_adm_state_trans_3 = { .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = NONE, .ev_actions = llc_adm_actions_3, }; /* State transitions for LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_adm_actions_4[] = { [0] = llc_conn_ac_send_dm_rsp_f_set_1, [1] = llc_conn_disc, [2] = NULL, }; static struct llc_conn_state_trans llc_adm_state_trans_4 = { .ev = llc_conn_ev_rx_xxx_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = NONE, .ev_actions = llc_adm_actions_4, }; /* State transitions for LLC_CONN_EV_RX_XXX_YYY event */ static llc_conn_action_t llc_adm_actions_5[] = { [0] = llc_conn_disc, [1] = NULL, }; static struct llc_conn_state_trans llc_adm_state_trans_5 = { .ev = llc_conn_ev_rx_any_frame, .next_state = LLC_CONN_OUT_OF_SVC, .ev_qualifiers = NONE, .ev_actions = llc_adm_actions_5, }; /* * Array of pointers; * one to each transition */ static struct llc_conn_state_trans *llc_adm_state_transitions[] = { [0] = &llc_adm_state_trans_1, /* Request */ [1] = &llc_common_state_trans_end, [2] = &llc_common_state_trans_end, /* local_busy */ [3] = &llc_common_state_trans_end, /* init_pf_cycle */ [4] = &llc_common_state_trans_end, /* timer */ [5] = &llc_adm_state_trans_2, /* Receive frame */ [6] = &llc_adm_state_trans_3, [7] = &llc_adm_state_trans_4, [8] = &llc_adm_state_trans_5, [9] = &llc_common_state_trans_end, }; /* LLC_CONN_STATE_SETUP transitions */ /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */ static llc_conn_action_t llc_setup_actions_1[] = { [0] = llc_conn_ac_send_ua_rsp_f_set_p, [1] = llc_conn_ac_set_vs_0, [2] = llc_conn_ac_set_vr_0, [3] = llc_conn_ac_set_s_flag_1, [4] = NULL, }; static struct llc_conn_state_trans llc_setup_state_trans_1 = { .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_SETUP, .ev_qualifiers = NONE, .ev_actions = llc_setup_actions_1, }; /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event */ static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_2[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = llc_conn_ev_qlfy_set_status_conn, [2] = NULL, }; static llc_conn_action_t llc_setup_actions_2[] = { [0] = llc_conn_ac_stop_ack_timer, [1] = llc_conn_ac_set_vs_0, [2] = llc_conn_ac_set_vr_0, [3] = llc_conn_ac_upd_p_flag, [4] = llc_conn_ac_set_remote_busy_0, [5] = llc_conn_ac_conn_confirm, [6] = NULL, }; static struct llc_conn_state_trans llc_setup_state_trans_2 = { .ev = llc_conn_ev_rx_ua_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_setup_ev_qfyrs_2, .ev_actions = llc_setup_actions_2, }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_3[] = { [0] = llc_conn_ev_qlfy_s_flag_eq_1, [1] = llc_conn_ev_qlfy_set_status_conn, [2] = NULL, }; static llc_conn_action_t llc_setup_actions_3[] = { [0] = llc_conn_ac_set_p_flag_0, [1] = llc_conn_ac_set_remote_busy_0, [2] = llc_conn_ac_conn_confirm, [3] = NULL, }; static struct llc_conn_state_trans llc_setup_state_trans_3 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_setup_ev_qfyrs_3, .ev_actions = llc_setup_actions_3, }; /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */ static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_4[] = { [0] = llc_conn_ev_qlfy_set_status_disc, [1] = NULL, }; static llc_conn_action_t llc_setup_actions_4[] = { [0] = llc_conn_ac_send_dm_rsp_f_set_p, [1] = llc_conn_ac_stop_ack_timer, [2] = llc_conn_ac_conn_confirm, [3] = llc_conn_disc, [4] = NULL, }; static struct llc_conn_state_trans llc_setup_state_trans_4 = { .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_setup_ev_qfyrs_4, .ev_actions = llc_setup_actions_4, }; /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event */ static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_5[] = { [0] = llc_conn_ev_qlfy_set_status_disc, [1] = NULL, }; static llc_conn_action_t llc_setup_actions_5[] = { [0] = llc_conn_ac_stop_ack_timer, [1] = llc_conn_ac_conn_confirm, [2] = llc_conn_disc, [3] = NULL, }; static struct llc_conn_state_trans llc_setup_state_trans_5 = { .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_setup_ev_qfyrs_5, .ev_actions = llc_setup_actions_5, }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_7[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = llc_conn_ev_qlfy_s_flag_eq_0, [2] = NULL, }; static llc_conn_action_t llc_setup_actions_7[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, [3] = NULL, }; static struct llc_conn_state_trans llc_setup_state_trans_7 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_SETUP, .ev_qualifiers = llc_setup_ev_qfyrs_7, .ev_actions = llc_setup_actions_7, }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_setup_ev_qfyrs_8[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = llc_conn_ev_qlfy_s_flag_eq_0, [2] = llc_conn_ev_qlfy_set_status_failed, [3] = NULL, }; static llc_conn_action_t llc_setup_actions_8[] = { [0] = llc_conn_ac_conn_confirm, [1] = llc_conn_disc, [2] = NULL, }; static struct llc_conn_state_trans llc_setup_state_trans_8 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_setup_ev_qfyrs_8, .ev_actions = llc_setup_actions_8, }; /* * Array of pointers; * one to each transition */ static struct llc_conn_state_trans *llc_setup_state_transitions[] = { [0] = &llc_common_state_trans_end, /* Request */ [1] = &llc_common_state_trans_end, /* local busy */ [2] = &llc_common_state_trans_end, /* init_pf_cycle */ [3] = &llc_setup_state_trans_3, /* Timer */ [4] = &llc_setup_state_trans_7, [5] = &llc_setup_state_trans_8, [6] = &llc_common_state_trans_end, [7] = &llc_setup_state_trans_1, /* Receive frame */ [8] = &llc_setup_state_trans_2, [9] = &llc_setup_state_trans_4, [10] = &llc_setup_state_trans_5, [11] = &llc_common_state_trans_end, }; /* LLC_CONN_STATE_NORMAL transitions */ /* State transitions for LLC_CONN_EV_DATA_REQ event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_1[] = { [0] = llc_conn_ev_qlfy_remote_busy_eq_0, [1] = llc_conn_ev_qlfy_p_flag_eq_0, [2] = llc_conn_ev_qlfy_last_frame_eq_0, [3] = NULL, }; static llc_conn_action_t llc_normal_actions_1[] = { [0] = llc_conn_ac_send_i_as_ack, [1] = llc_conn_ac_start_ack_tmr_if_not_running, [2] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_1 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_1, .ev_actions = llc_normal_actions_1, }; /* State transitions for LLC_CONN_EV_DATA_REQ event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_2[] = { [0] = llc_conn_ev_qlfy_remote_busy_eq_0, [1] = llc_conn_ev_qlfy_p_flag_eq_0, [2] = llc_conn_ev_qlfy_last_frame_eq_1, [3] = NULL, }; static llc_conn_action_t llc_normal_actions_2[] = { [0] = llc_conn_ac_send_i_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_2 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_2, .ev_actions = llc_normal_actions_2, }; /* State transitions for LLC_CONN_EV_DATA_REQ event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_2_1[] = { [0] = llc_conn_ev_qlfy_remote_busy_eq_1, [1] = llc_conn_ev_qlfy_set_status_remote_busy, [2] = NULL, }; /* just one member, NULL, .bss zeroes it */ static llc_conn_action_t llc_normal_actions_2_1[1]; static struct llc_conn_state_trans llc_normal_state_trans_2_1 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_2_1, .ev_actions = llc_normal_actions_2_1, }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_3[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_3[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rnr_xxx_x_set_0, [2] = llc_conn_ac_set_data_flag_0, [3] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_3 = { .ev = llc_conn_ev_local_busy_detected, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_normal_ev_qfyrs_3, .ev_actions = llc_normal_actions_3, }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_4[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_4[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rnr_xxx_x_set_0, [2] = llc_conn_ac_set_data_flag_0, [3] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_4 = { .ev = llc_conn_ev_local_busy_detected, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_normal_ev_qfyrs_4, .ev_actions = llc_normal_actions_4, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_5a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_5a[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rej_xxx_x_set_0, [2] = llc_conn_ac_upd_nr_received, [3] = llc_conn_ac_upd_p_flag, [4] = llc_conn_ac_start_rej_timer, [5] = llc_conn_ac_clear_remote_busy_if_f_eq_1, [6] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_5a = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_normal_ev_qfyrs_5a, .ev_actions = llc_normal_actions_5a, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_5b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_5b[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rej_xxx_x_set_0, [2] = llc_conn_ac_upd_nr_received, [3] = llc_conn_ac_upd_p_flag, [4] = llc_conn_ac_start_rej_timer, [5] = llc_conn_ac_clear_remote_busy_if_f_eq_1, [6] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_5b = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_normal_ev_qfyrs_5b, .ev_actions = llc_normal_actions_5b, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_5c[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_5c[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rej_xxx_x_set_0, [2] = llc_conn_ac_upd_nr_received, [3] = llc_conn_ac_upd_p_flag, [4] = llc_conn_ac_start_rej_timer, [5] = llc_conn_ac_clear_remote_busy_if_f_eq_1, [6] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_5c = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_normal_ev_qfyrs_5c, .ev_actions = llc_normal_actions_5c, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_6a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_6a[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rej_xxx_x_set_0, [2] = llc_conn_ac_upd_nr_received, [3] = llc_conn_ac_start_rej_timer, [4] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_6a = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_normal_ev_qfyrs_6a, .ev_actions = llc_normal_actions_6a, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_6b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_6b[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rej_xxx_x_set_0, [2] = llc_conn_ac_upd_nr_received, [3] = llc_conn_ac_start_rej_timer, [4] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_6b = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_normal_ev_qfyrs_6b, .ev_actions = llc_normal_actions_6b, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */ static llc_conn_action_t llc_normal_actions_7[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rej_rsp_f_set_1, [2] = llc_conn_ac_upd_nr_received, [3] = llc_conn_ac_start_rej_timer, [4] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_7 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, .ev_actions = llc_normal_actions_7, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_8a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_8[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_upd_p_flag, [3] = llc_conn_ac_upd_nr_received, [4] = llc_conn_ac_clear_remote_busy_if_f_eq_1, [5] = llc_conn_ac_send_ack_if_needed, [6] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_8a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_8a, .ev_actions = llc_normal_actions_8, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_8b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_8b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_8b, .ev_actions = llc_normal_actions_8, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_9a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_9a[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_data_ind, [3] = llc_conn_ac_send_ack_if_needed, [4] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_9a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_9a, .ev_actions = llc_normal_actions_9a, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_9b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_9b[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_data_ind, [3] = llc_conn_ac_send_ack_if_needed, [4] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_9b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_9b, .ev_actions = llc_normal_actions_9b, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_normal_actions_10[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_send_ack_rsp_f_set_1, [2] = llc_conn_ac_rst_sendack_flag, [3] = llc_conn_ac_upd_nr_received, [4] = llc_conn_ac_data_ind, [5] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_10 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, .ev_actions = llc_normal_actions_10, }; /* State transitions for * LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */ static llc_conn_action_t llc_normal_actions_11a[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_11a = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, .ev_actions = llc_normal_actions_11a, }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */ static llc_conn_action_t llc_normal_actions_11b[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_11b = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, .ev_actions = llc_normal_actions_11b, }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_11c[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_11c[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_inc_tx_win_size, [3] = llc_conn_ac_clear_remote_busy, [4] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_11c = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_11c, .ev_actions = llc_normal_actions_11c, }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_normal_actions_12[] = { [0] = llc_conn_ac_send_ack_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_adjust_npta_by_rr, [3] = llc_conn_ac_rst_sendack_flag, [4] = llc_conn_ac_clear_remote_busy, [5] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_12 = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, .ev_actions = llc_normal_actions_12, }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */ static llc_conn_action_t llc_normal_actions_13a[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_13a = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, .ev_actions = llc_normal_actions_13a, }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */ static llc_conn_action_t llc_normal_actions_13b[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_13b = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, .ev_actions = llc_normal_actions_13b, }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_13c[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_13c[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_13c = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_13c, .ev_actions = llc_normal_actions_13c, }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_normal_actions_14[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_adjust_npta_by_rnr, [3] = llc_conn_ac_rst_sendack_flag, [4] = llc_conn_ac_set_remote_busy, [5] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_14 = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, .ev_actions = llc_normal_actions_14, }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_15a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_15a[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_p_flag, [3] = llc_conn_ac_dec_tx_win_size, [4] = llc_conn_ac_resend_i_xxx_x_set_0, [5] = llc_conn_ac_clear_remote_busy, [6] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_15a = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_15a, .ev_actions = llc_normal_actions_15a, }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_15b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_15b[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_p_flag, [3] = llc_conn_ac_dec_tx_win_size, [4] = llc_conn_ac_resend_i_xxx_x_set_0, [5] = llc_conn_ac_clear_remote_busy, [6] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_15b = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_15b, .ev_actions = llc_normal_actions_15b, }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_16a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_16a[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_dec_tx_win_size, [3] = llc_conn_ac_resend_i_xxx_x_set_0, [4] = llc_conn_ac_clear_remote_busy, [5] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_16a = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_16a, .ev_actions = llc_normal_actions_16a, }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_16b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_16b[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_dec_tx_win_size, [3] = llc_conn_ac_resend_i_xxx_x_set_0, [4] = llc_conn_ac_clear_remote_busy, [5] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_16b = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_16b, .ev_actions = llc_normal_actions_16b, }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_normal_actions_17[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_dec_tx_win_size, [3] = llc_conn_ac_resend_i_rsp_f_set_1, [4] = llc_conn_ac_clear_remote_busy, [5] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_17 = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, .ev_actions = llc_normal_actions_17, }; /* State transitions for LLC_CONN_EV_INIT_P_F_CYCLE event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_18[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_18[] = { [0] = llc_conn_ac_send_rr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_18 = { .ev = llc_conn_ev_init_p_f_cycle, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_18, .ev_actions = llc_normal_actions_18, }; /* State transitions for LLC_CONN_EV_P_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_19[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_19[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rr_cmd_p_set_1, [2] = llc_conn_ac_rst_vs, [3] = llc_conn_ac_start_p_timer, [4] = llc_conn_ac_inc_retry_cnt_by_1, [5] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_19 = { .ev = llc_conn_ev_p_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = llc_normal_ev_qfyrs_19, .ev_actions = llc_normal_actions_19, }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_20a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [2] = NULL, }; static llc_conn_action_t llc_normal_actions_20a[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rr_cmd_p_set_1, [2] = llc_conn_ac_rst_vs, [3] = llc_conn_ac_start_p_timer, [4] = llc_conn_ac_inc_retry_cnt_by_1, [5] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_20a = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = llc_normal_ev_qfyrs_20a, .ev_actions = llc_normal_actions_20a, }; /* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_20b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [2] = NULL, }; static llc_conn_action_t llc_normal_actions_20b[] = { [0] = llc_conn_ac_rst_sendack_flag, [1] = llc_conn_ac_send_rr_cmd_p_set_1, [2] = llc_conn_ac_rst_vs, [3] = llc_conn_ac_start_p_timer, [4] = llc_conn_ac_inc_retry_cnt_by_1, [5] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_20b = { .ev = llc_conn_ev_busy_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = llc_normal_ev_qfyrs_20b, .ev_actions = llc_normal_actions_20b, }; /* State transitions for LLC_CONN_EV_TX_BUFF_FULL event */ static llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_21[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; static llc_conn_action_t llc_normal_actions_21[] = { [0] = llc_conn_ac_send_rr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = NULL, }; static struct llc_conn_state_trans llc_normal_state_trans_21 = { .ev = llc_conn_ev_tx_buffer_full, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_21, .ev_actions = llc_normal_actions_21, }; /* * Array of pointers; * one to each transition */ static struct llc_conn_state_trans *llc_normal_state_transitions[] = { [0] = &llc_normal_state_trans_1, /* Requests */ [1] = &llc_normal_state_trans_2, [2] = &llc_normal_state_trans_2_1, [3] = &llc_common_state_trans_1, [4] = &llc_common_state_trans_2, [5] = &llc_common_state_trans_end, [6] = &llc_normal_state_trans_21, [7] = &llc_normal_state_trans_3, /* Local busy */ [8] = &llc_normal_state_trans_4, [9] = &llc_common_state_trans_end, [10] = &llc_normal_state_trans_18, /* Init pf cycle */ [11] = &llc_common_state_trans_end, [12] = &llc_common_state_trans_11a, /* Timers */ [13] = &llc_common_state_trans_11b, [14] = &llc_common_state_trans_11c, [15] = &llc_common_state_trans_11d, [16] = &llc_normal_state_trans_19, [17] = &llc_normal_state_trans_20a, [18] = &llc_normal_state_trans_20b, [19] = &llc_common_state_trans_end, [20] = &llc_normal_state_trans_8b, /* Receive frames */ [21] = &llc_normal_state_trans_9b, [22] = &llc_normal_state_trans_10, [23] = &llc_normal_state_trans_11b, [24] = &llc_normal_state_trans_11c, [25] = &llc_normal_state_trans_5a, [26] = &llc_normal_state_trans_5b, [27] = &llc_normal_state_trans_5c, [28] = &llc_normal_state_trans_6a, [29] = &llc_normal_state_trans_6b, [30] = &llc_normal_state_trans_7, [31] = &llc_normal_state_trans_8a, [32] = &llc_normal_state_trans_9a, [33] = &llc_normal_state_trans_11a, [34] = &llc_normal_state_trans_12, [35] = &llc_normal_state_trans_13a, [36] = &llc_normal_state_trans_13b, [37] = &llc_normal_state_trans_13c, [38] = &llc_normal_state_trans_14, [39] = &llc_normal_state_trans_15a, [40] = &llc_normal_state_trans_15b, [41] = &llc_normal_state_trans_16a, [42] = &llc_normal_state_trans_16b, [43] = &llc_normal_state_trans_17, [44] = &llc_common_state_trans_3, [45] = &llc_common_state_trans_4, [46] = &llc_common_state_trans_5, [47] = &llc_common_state_trans_6, [48] = &llc_common_state_trans_7a, [49] = &llc_common_state_trans_7b, [50] = &llc_common_state_trans_8a, [51] = &llc_common_state_trans_8b, [52] = &llc_common_state_trans_8c, [53] = &llc_common_state_trans_9, /* [54] = &llc_common_state_trans_10, */ [54] = &llc_common_state_trans_end, }; /* LLC_CONN_STATE_BUSY transitions */ /* State transitions for LLC_CONN_EV_DATA_REQ event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_1[] = { [0] = llc_conn_ev_qlfy_remote_busy_eq_0, [1] = llc_conn_ev_qlfy_p_flag_eq_0, [2] = NULL, }; static llc_conn_action_t llc_busy_actions_1[] = { [0] = llc_conn_ac_send_i_xxx_x_set_0, [1] = llc_conn_ac_start_ack_tmr_if_not_running, [2] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_1 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_1, .ev_actions = llc_busy_actions_1, }; /* State transitions for LLC_CONN_EV_DATA_REQ event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_2[] = { [0] = llc_conn_ev_qlfy_remote_busy_eq_0, [1] = llc_conn_ev_qlfy_p_flag_eq_1, [2] = NULL, }; static llc_conn_action_t llc_busy_actions_2[] = { [0] = llc_conn_ac_send_i_xxx_x_set_0, [1] = llc_conn_ac_start_ack_tmr_if_not_running, [2] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_2 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_2, .ev_actions = llc_busy_actions_2, }; /* State transitions for LLC_CONN_EV_DATA_REQ event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_2_1[] = { [0] = llc_conn_ev_qlfy_remote_busy_eq_1, [1] = llc_conn_ev_qlfy_set_status_remote_busy, [2] = NULL, }; /* just one member, NULL, .bss zeroes it */ static llc_conn_action_t llc_busy_actions_2_1[1]; static struct llc_conn_state_trans llc_busy_state_trans_2_1 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_2_1, .ev_actions = llc_busy_actions_2_1, }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_3[] = { [0] = llc_conn_ev_qlfy_data_flag_eq_1, [1] = llc_conn_ev_qlfy_p_flag_eq_0, [2] = NULL, }; static llc_conn_action_t llc_busy_actions_3[] = { [0] = llc_conn_ac_send_rej_xxx_x_set_0, [1] = llc_conn_ac_start_rej_timer, [2] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_3 = { .ev = llc_conn_ev_local_busy_cleared, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_busy_ev_qfyrs_3, .ev_actions = llc_busy_actions_3, }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_4[] = { [0] = llc_conn_ev_qlfy_data_flag_eq_1, [1] = llc_conn_ev_qlfy_p_flag_eq_1, [2] = NULL, }; static llc_conn_action_t llc_busy_actions_4[] = { [0] = llc_conn_ac_send_rej_xxx_x_set_0, [1] = llc_conn_ac_start_rej_timer, [2] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_4 = { .ev = llc_conn_ev_local_busy_cleared, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_busy_ev_qfyrs_4, .ev_actions = llc_busy_actions_4, }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_5[] = { [0] = llc_conn_ev_qlfy_data_flag_eq_0, [1] = llc_conn_ev_qlfy_p_flag_eq_0, [2] = NULL, }; static llc_conn_action_t llc_busy_actions_5[] = { [0] = llc_conn_ac_send_rr_xxx_x_set_0, [1] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_5 = { .ev = llc_conn_ev_local_busy_cleared, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_busy_ev_qfyrs_5, .ev_actions = llc_busy_actions_5, }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_6[] = { [0] = llc_conn_ev_qlfy_data_flag_eq_0, [1] = llc_conn_ev_qlfy_p_flag_eq_1, [2] = NULL, }; static llc_conn_action_t llc_busy_actions_6[] = { [0] = llc_conn_ac_send_rr_xxx_x_set_0, [1] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_6 = { .ev = llc_conn_ev_local_busy_cleared, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_busy_ev_qfyrs_6, .ev_actions = llc_busy_actions_6, }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_7[] = { [0] = llc_conn_ev_qlfy_data_flag_eq_2, [1] = llc_conn_ev_qlfy_p_flag_eq_0, [2] = NULL, }; static llc_conn_action_t llc_busy_actions_7[] = { [0] = llc_conn_ac_send_rr_xxx_x_set_0, [1] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_7 = { .ev = llc_conn_ev_local_busy_cleared, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_busy_ev_qfyrs_7, .ev_actions = llc_busy_actions_7, }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_8[] = { [0] = llc_conn_ev_qlfy_data_flag_eq_2, [1] = llc_conn_ev_qlfy_p_flag_eq_1, [2] = NULL, }; static llc_conn_action_t llc_busy_actions_8[] = { [0] = llc_conn_ac_send_rr_xxx_x_set_0, [1] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_8 = { .ev = llc_conn_ev_local_busy_cleared, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_busy_ev_qfyrs_8, .ev_actions = llc_busy_actions_8, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X_UNEXPD_Ns event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_9a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = NULL, }; static llc_conn_action_t llc_busy_actions_9a[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_upd_p_flag, [2] = llc_conn_ac_upd_nr_received, [3] = llc_conn_ac_set_data_flag_1_if_data_flag_eq_0, [4] = llc_conn_ac_clear_remote_busy_if_f_eq_1, [5] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_9a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_9a, .ev_actions = llc_busy_actions_9a, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_9b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; static llc_conn_action_t llc_busy_actions_9b[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_upd_p_flag, [2] = llc_conn_ac_upd_nr_received, [3] = llc_conn_ac_set_data_flag_1_if_data_flag_eq_0, [4] = llc_conn_ac_clear_remote_busy_if_f_eq_1, [5] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_9b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_9b, .ev_actions = llc_busy_actions_9b, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_10a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_busy_actions_10a[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_data_flag_1_if_data_flag_eq_0, [3] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_10a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_10a, .ev_actions = llc_busy_actions_10a, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_10b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_busy_actions_10b[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_data_flag_1_if_data_flag_eq_0, [3] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_10b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_10b, .ev_actions = llc_busy_actions_10b, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */ static llc_conn_action_t llc_busy_actions_11[] = { [0] = llc_conn_ac_send_rnr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_data_flag_1_if_data_flag_eq_0, [3] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_11 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_busy_actions_11, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_busy_actions_12[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_rnr_rsp_f_set_1, [3] = llc_conn_ac_upd_nr_received, [4] = llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2, [5] = llc_conn_ac_set_data_flag_0, [6] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_12 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_busy_actions_12, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_13a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = NULL, }; static llc_conn_action_t llc_busy_actions_13a[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_upd_p_flag, [3] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [4] = llc_conn_ac_upd_nr_received, [5] = llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2, [6] = llc_conn_ac_set_data_flag_0, [7] = llc_conn_ac_clear_remote_busy_if_f_eq_1, [8] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_13a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_13a, .ev_actions = llc_busy_actions_13a, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_13b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; static llc_conn_action_t llc_busy_actions_13b[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_upd_p_flag, [3] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [4] = llc_conn_ac_upd_nr_received, [5] = llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2, [6] = llc_conn_ac_set_data_flag_0, [7] = llc_conn_ac_clear_remote_busy_if_f_eq_1, [8] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_13b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_13b, .ev_actions = llc_busy_actions_13b, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_14a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_busy_actions_14a[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [3] = llc_conn_ac_upd_nr_received, [4] = llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2, [5] = llc_conn_ac_set_data_flag_0, [6] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_14a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_14a, .ev_actions = llc_busy_actions_14a, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_14b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_busy_actions_14b[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [3] = llc_conn_ac_upd_nr_received, [4] = llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2, [5] = llc_conn_ac_set_data_flag_0, [6] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_14b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_14b, .ev_actions = llc_busy_actions_14b, }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */ static llc_conn_action_t llc_busy_actions_15a[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_15a = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_busy_actions_15a, }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */ static llc_conn_action_t llc_busy_actions_15b[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_15b = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_busy_actions_15b, }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_15c[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_busy_actions_15c[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_15c = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_15c, .ev_actions = llc_busy_actions_15c, }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_busy_actions_16[] = { [0] = llc_conn_ac_send_rnr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_16 = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_busy_actions_16, }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */ static llc_conn_action_t llc_busy_actions_17a[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_17a = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_busy_actions_17a, }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */ static llc_conn_action_t llc_busy_actions_17b[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_17b = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_busy_actions_17b, }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_17c[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_busy_actions_17c[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_17c = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_17c, .ev_actions = llc_busy_actions_17c, }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_busy_actions_18[] = { [0] = llc_conn_ac_send_rnr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_18 = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_busy_actions_18, }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_19a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; static llc_conn_action_t llc_busy_actions_19a[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_p_flag, [3] = llc_conn_ac_resend_i_xxx_x_set_0, [4] = llc_conn_ac_clear_remote_busy, [5] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_19a = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_19a, .ev_actions = llc_busy_actions_19a, }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_19b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = NULL, }; static llc_conn_action_t llc_busy_actions_19b[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_p_flag, [3] = llc_conn_ac_resend_i_xxx_x_set_0, [4] = llc_conn_ac_clear_remote_busy, [5] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_19b = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_19b, .ev_actions = llc_busy_actions_19b, }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_20a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_busy_actions_20a[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_resend_i_xxx_x_set_0, [3] = llc_conn_ac_clear_remote_busy, [4] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_20a = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_20a, .ev_actions = llc_busy_actions_20a, }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_20b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_busy_actions_20b[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_resend_i_xxx_x_set_0, [3] = llc_conn_ac_clear_remote_busy, [4] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_20b = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_20b, .ev_actions = llc_busy_actions_20b, }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_busy_actions_21[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_send_rnr_rsp_f_set_1, [3] = llc_conn_ac_resend_i_xxx_x_set_0, [4] = llc_conn_ac_clear_remote_busy, [5] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_21 = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_busy_actions_21, }; /* State transitions for LLC_CONN_EV_INIT_P_F_CYCLE event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_22[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; static llc_conn_action_t llc_busy_actions_22[] = { [0] = llc_conn_ac_send_rnr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_22 = { .ev = llc_conn_ev_init_p_f_cycle, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_22, .ev_actions = llc_busy_actions_22, }; /* State transitions for LLC_CONN_EV_P_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_23[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = NULL, }; static llc_conn_action_t llc_busy_actions_23[] = { [0] = llc_conn_ac_send_rnr_cmd_p_set_1, [1] = llc_conn_ac_rst_vs, [2] = llc_conn_ac_start_p_timer, [3] = llc_conn_ac_inc_retry_cnt_by_1, [4] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_23 = { .ev = llc_conn_ev_p_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_23, .ev_actions = llc_busy_actions_23, }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_24a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [2] = NULL, }; static llc_conn_action_t llc_busy_actions_24a[] = { [0] = llc_conn_ac_send_rnr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, [3] = llc_conn_ac_rst_vs, [4] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_24a = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_24a, .ev_actions = llc_busy_actions_24a, }; /* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_24b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [2] = NULL, }; static llc_conn_action_t llc_busy_actions_24b[] = { [0] = llc_conn_ac_send_rnr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, [3] = llc_conn_ac_rst_vs, [4] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_24b = { .ev = llc_conn_ev_busy_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_24b, .ev_actions = llc_busy_actions_24b, }; /* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_25[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [2] = NULL, }; static llc_conn_action_t llc_busy_actions_25[] = { [0] = llc_conn_ac_send_rnr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, [3] = llc_conn_ac_rst_vs, [4] = llc_conn_ac_set_data_flag_1, [5] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_25 = { .ev = llc_conn_ev_rej_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_25, .ev_actions = llc_busy_actions_25, }; /* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_26[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [2] = NULL, }; static llc_conn_action_t llc_busy_actions_26[] = { [0] = llc_conn_ac_set_data_flag_1, [1] = NULL, }; static struct llc_conn_state_trans llc_busy_state_trans_26 = { .ev = llc_conn_ev_rej_tmr_exp, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_26, .ev_actions = llc_busy_actions_26, }; /* * Array of pointers; * one to each transition */ static struct llc_conn_state_trans *llc_busy_state_transitions[] = { [0] = &llc_common_state_trans_1, /* Request */ [1] = &llc_common_state_trans_2, [2] = &llc_busy_state_trans_1, [3] = &llc_busy_state_trans_2, [4] = &llc_busy_state_trans_2_1, [5] = &llc_common_state_trans_end, [6] = &llc_busy_state_trans_3, /* Local busy */ [7] = &llc_busy_state_trans_4, [8] = &llc_busy_state_trans_5, [9] = &llc_busy_state_trans_6, [10] = &llc_busy_state_trans_7, [11] = &llc_busy_state_trans_8, [12] = &llc_common_state_trans_end, [13] = &llc_busy_state_trans_22, /* Initiate PF cycle */ [14] = &llc_common_state_trans_end, [15] = &llc_common_state_trans_11a, /* Timer */ [16] = &llc_common_state_trans_11b, [17] = &llc_common_state_trans_11c, [18] = &llc_common_state_trans_11d, [19] = &llc_busy_state_trans_23, [20] = &llc_busy_state_trans_24a, [21] = &llc_busy_state_trans_24b, [22] = &llc_busy_state_trans_25, [23] = &llc_busy_state_trans_26, [24] = &llc_common_state_trans_end, [25] = &llc_busy_state_trans_9a, /* Receive frame */ [26] = &llc_busy_state_trans_9b, [27] = &llc_busy_state_trans_10a, [28] = &llc_busy_state_trans_10b, [29] = &llc_busy_state_trans_11, [30] = &llc_busy_state_trans_12, [31] = &llc_busy_state_trans_13a, [32] = &llc_busy_state_trans_13b, [33] = &llc_busy_state_trans_14a, [34] = &llc_busy_state_trans_14b, [35] = &llc_busy_state_trans_15a, [36] = &llc_busy_state_trans_15b, [37] = &llc_busy_state_trans_15c, [38] = &llc_busy_state_trans_16, [39] = &llc_busy_state_trans_17a, [40] = &llc_busy_state_trans_17b, [41] = &llc_busy_state_trans_17c, [42] = &llc_busy_state_trans_18, [43] = &llc_busy_state_trans_19a, [44] = &llc_busy_state_trans_19b, [45] = &llc_busy_state_trans_20a, [46] = &llc_busy_state_trans_20b, [47] = &llc_busy_state_trans_21, [48] = &llc_common_state_trans_3, [49] = &llc_common_state_trans_4, [50] = &llc_common_state_trans_5, [51] = &llc_common_state_trans_6, [52] = &llc_common_state_trans_7a, [53] = &llc_common_state_trans_7b, [54] = &llc_common_state_trans_8a, [55] = &llc_common_state_trans_8b, [56] = &llc_common_state_trans_8c, [57] = &llc_common_state_trans_9, /* [58] = &llc_common_state_trans_10, */ [58] = &llc_common_state_trans_end, }; /* LLC_CONN_STATE_REJ transitions */ /* State transitions for LLC_CONN_EV_DATA_REQ event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_1[] = { [0] = llc_conn_ev_qlfy_remote_busy_eq_0, [1] = llc_conn_ev_qlfy_p_flag_eq_0, [2] = NULL, }; static llc_conn_action_t llc_reject_actions_1[] = { [0] = llc_conn_ac_send_i_xxx_x_set_0, [1] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_1 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_1, .ev_actions = llc_reject_actions_1, }; /* State transitions for LLC_CONN_EV_DATA_REQ event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_2[] = { [0] = llc_conn_ev_qlfy_remote_busy_eq_0, [1] = llc_conn_ev_qlfy_p_flag_eq_1, [2] = NULL, }; static llc_conn_action_t llc_reject_actions_2[] = { [0] = llc_conn_ac_send_i_xxx_x_set_0, [1] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_2 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_2, .ev_actions = llc_reject_actions_2, }; /* State transitions for LLC_CONN_EV_DATA_REQ event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_2_1[] = { [0] = llc_conn_ev_qlfy_remote_busy_eq_1, [1] = llc_conn_ev_qlfy_set_status_remote_busy, [2] = NULL, }; /* just one member, NULL, .bss zeroes it */ static llc_conn_action_t llc_reject_actions_2_1[1]; static struct llc_conn_state_trans llc_reject_state_trans_2_1 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_2_1, .ev_actions = llc_reject_actions_2_1, }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_3[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; static llc_conn_action_t llc_reject_actions_3[] = { [0] = llc_conn_ac_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_set_data_flag_2, [2] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_3 = { .ev = llc_conn_ev_local_busy_detected, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_reject_ev_qfyrs_3, .ev_actions = llc_reject_actions_3, }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_4[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_reject_actions_4[] = { [0] = llc_conn_ac_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_set_data_flag_2, [2] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_4 = { .ev = llc_conn_ev_local_busy_detected, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_reject_ev_qfyrs_4, .ev_actions = llc_reject_actions_4, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ static llc_conn_action_t llc_reject_actions_5a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_p_flag, [2] = llc_conn_ac_clear_remote_busy_if_f_eq_1, [3] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_5a = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, .ev_actions = llc_reject_actions_5a, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ static llc_conn_action_t llc_reject_actions_5b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_p_flag, [2] = llc_conn_ac_clear_remote_busy_if_f_eq_1, [3] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_5b = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, .ev_actions = llc_reject_actions_5b, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_5c[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_reject_actions_5c[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_p_flag, [2] = llc_conn_ac_clear_remote_busy_if_f_eq_1, [3] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_5c = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_5c, .ev_actions = llc_reject_actions_5c, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */ static llc_conn_action_t llc_reject_actions_6[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_6 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, .ev_actions = llc_reject_actions_6, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_X event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_7a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = NULL, }; static llc_conn_action_t llc_reject_actions_7a[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_upd_p_flag, [3] = llc_conn_ac_send_ack_xxx_x_set_0, [4] = llc_conn_ac_upd_nr_received, [5] = llc_conn_ac_clear_remote_busy_if_f_eq_1, [6] = llc_conn_ac_stop_rej_timer, [7] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_7a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_reject_ev_qfyrs_7a, .ev_actions = llc_reject_actions_7a, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_7b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; static llc_conn_action_t llc_reject_actions_7b[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_upd_p_flag, [3] = llc_conn_ac_send_ack_xxx_x_set_0, [4] = llc_conn_ac_upd_nr_received, [5] = llc_conn_ac_clear_remote_busy_if_f_eq_1, [6] = llc_conn_ac_stop_rej_timer, [7] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_7b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_reject_ev_qfyrs_7b, .ev_actions = llc_reject_actions_7b, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_8a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_reject_actions_8a[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_ack_xxx_x_set_0, [3] = llc_conn_ac_upd_nr_received, [4] = llc_conn_ac_stop_rej_timer, [5] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_8a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_reject_ev_qfyrs_8a, .ev_actions = llc_reject_actions_8a, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_8b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_reject_actions_8b[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_ack_xxx_x_set_0, [3] = llc_conn_ac_upd_nr_received, [4] = llc_conn_ac_stop_rej_timer, [5] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_8b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_reject_ev_qfyrs_8b, .ev_actions = llc_reject_actions_8b, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_reject_actions_9[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_ack_rsp_f_set_1, [3] = llc_conn_ac_upd_nr_received, [4] = llc_conn_ac_stop_rej_timer, [5] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_9 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, .ev_actions = llc_reject_actions_9, }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */ static llc_conn_action_t llc_reject_actions_10a[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_10a = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, .ev_actions = llc_reject_actions_10a, }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */ static llc_conn_action_t llc_reject_actions_10b[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_10b = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, .ev_actions = llc_reject_actions_10b, }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_10c[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_reject_actions_10c[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_10c = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_10c, .ev_actions = llc_reject_actions_10c, }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_reject_actions_11[] = { [0] = llc_conn_ac_send_ack_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_11 = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, .ev_actions = llc_reject_actions_11, }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */ static llc_conn_action_t llc_reject_actions_12a[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_12a = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, .ev_actions = llc_reject_actions_12a, }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */ static llc_conn_action_t llc_reject_actions_12b[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_12b = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, .ev_actions = llc_reject_actions_12b, }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_12c[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_reject_actions_12c[] = { [0] = llc_conn_ac_upd_p_flag, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_12c = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_12c, .ev_actions = llc_reject_actions_12c, }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_reject_actions_13[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_set_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_13 = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, .ev_actions = llc_reject_actions_13, }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_14a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; static llc_conn_action_t llc_reject_actions_14a[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_p_flag, [3] = llc_conn_ac_resend_i_xxx_x_set_0, [4] = llc_conn_ac_clear_remote_busy, [5] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_14a = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_14a, .ev_actions = llc_reject_actions_14a, }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_X event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_14b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = NULL, }; static llc_conn_action_t llc_reject_actions_14b[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_p_flag, [3] = llc_conn_ac_resend_i_xxx_x_set_0, [4] = llc_conn_ac_clear_remote_busy, [5] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_14b = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_14b, .ev_actions = llc_reject_actions_14b, }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_15a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_reject_actions_15a[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_resend_i_xxx_x_set_0, [3] = llc_conn_ac_clear_remote_busy, [4] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_15a = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_15a, .ev_actions = llc_reject_actions_15a, }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_15b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_reject_actions_15b[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_resend_i_xxx_x_set_0, [3] = llc_conn_ac_clear_remote_busy, [4] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_15b = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_15b, .ev_actions = llc_reject_actions_15b, }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_reject_actions_16[] = { [0] = llc_conn_ac_set_vs_nr, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_resend_i_rsp_f_set_1, [3] = llc_conn_ac_clear_remote_busy, [4] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_16 = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, .ev_actions = llc_reject_actions_16, }; /* State transitions for LLC_CONN_EV_INIT_P_F_CYCLE event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_17[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = NULL, }; static llc_conn_action_t llc_reject_actions_17[] = { [0] = llc_conn_ac_send_rr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_17 = { .ev = llc_conn_ev_init_p_f_cycle, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_17, .ev_actions = llc_reject_actions_17, }; /* State transitions for LLC_CONN_EV_REJ_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_18[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [2] = NULL, }; static llc_conn_action_t llc_reject_actions_18[] = { [0] = llc_conn_ac_send_rej_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = llc_conn_ac_start_rej_timer, [3] = llc_conn_ac_inc_retry_cnt_by_1, [4] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_18 = { .ev = llc_conn_ev_rej_tmr_exp, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_18, .ev_actions = llc_reject_actions_18, }; /* State transitions for LLC_CONN_EV_P_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_19[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = NULL, }; static llc_conn_action_t llc_reject_actions_19[] = { [0] = llc_conn_ac_send_rr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = llc_conn_ac_start_rej_timer, [3] = llc_conn_ac_inc_retry_cnt_by_1, [4] = llc_conn_ac_rst_vs, [5] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_19 = { .ev = llc_conn_ev_p_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_19, .ev_actions = llc_reject_actions_19, }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_20a[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [2] = NULL, }; static llc_conn_action_t llc_reject_actions_20a[] = { [0] = llc_conn_ac_send_rr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = llc_conn_ac_start_rej_timer, [3] = llc_conn_ac_inc_retry_cnt_by_1, [4] = llc_conn_ac_rst_vs, [5] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_20a = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_20a, .ev_actions = llc_reject_actions_20a, }; /* State transitions for LLC_CONN_EV_BUSY_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_20b[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_0, [1] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [2] = NULL, }; static llc_conn_action_t llc_reject_actions_20b[] = { [0] = llc_conn_ac_send_rr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = llc_conn_ac_start_rej_timer, [3] = llc_conn_ac_inc_retry_cnt_by_1, [4] = llc_conn_ac_rst_vs, [5] = NULL, }; static struct llc_conn_state_trans llc_reject_state_trans_20b = { .ev = llc_conn_ev_busy_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_20b, .ev_actions = llc_reject_actions_20b, }; /* * Array of pointers; * one to each transition */ static struct llc_conn_state_trans *llc_reject_state_transitions[] = { [0] = &llc_common_state_trans_1, /* Request */ [1] = &llc_common_state_trans_2, [2] = &llc_common_state_trans_end, [3] = &llc_reject_state_trans_1, [4] = &llc_reject_state_trans_2, [5] = &llc_reject_state_trans_2_1, [6] = &llc_reject_state_trans_3, /* Local busy */ [7] = &llc_reject_state_trans_4, [8] = &llc_common_state_trans_end, [9] = &llc_reject_state_trans_17, /* Initiate PF cycle */ [10] = &llc_common_state_trans_end, [11] = &llc_common_state_trans_11a, /* Timer */ [12] = &llc_common_state_trans_11b, [13] = &llc_common_state_trans_11c, [14] = &llc_common_state_trans_11d, [15] = &llc_reject_state_trans_18, [16] = &llc_reject_state_trans_19, [17] = &llc_reject_state_trans_20a, [18] = &llc_reject_state_trans_20b, [19] = &llc_common_state_trans_end, [20] = &llc_common_state_trans_3, /* Receive frame */ [21] = &llc_common_state_trans_4, [22] = &llc_common_state_trans_5, [23] = &llc_common_state_trans_6, [24] = &llc_common_state_trans_7a, [25] = &llc_common_state_trans_7b, [26] = &llc_common_state_trans_8a, [27] = &llc_common_state_trans_8b, [28] = &llc_common_state_trans_8c, [29] = &llc_common_state_trans_9, /* [30] = &llc_common_state_trans_10, */ [30] = &llc_reject_state_trans_5a, [31] = &llc_reject_state_trans_5b, [32] = &llc_reject_state_trans_5c, [33] = &llc_reject_state_trans_6, [34] = &llc_reject_state_trans_7a, [35] = &llc_reject_state_trans_7b, [36] = &llc_reject_state_trans_8a, [37] = &llc_reject_state_trans_8b, [38] = &llc_reject_state_trans_9, [39] = &llc_reject_state_trans_10a, [40] = &llc_reject_state_trans_10b, [41] = &llc_reject_state_trans_10c, [42] = &llc_reject_state_trans_11, [43] = &llc_reject_state_trans_12a, [44] = &llc_reject_state_trans_12b, [45] = &llc_reject_state_trans_12c, [46] = &llc_reject_state_trans_13, [47] = &llc_reject_state_trans_14a, [48] = &llc_reject_state_trans_14b, [49] = &llc_reject_state_trans_15a, [50] = &llc_reject_state_trans_15b, [51] = &llc_reject_state_trans_16, [52] = &llc_common_state_trans_end, }; /* LLC_CONN_STATE_AWAIT transitions */ /* State transitions for LLC_CONN_EV_DATA_REQ event */ static llc_conn_ev_qfyr_t llc_await_ev_qfyrs_1_0[] = { [0] = llc_conn_ev_qlfy_set_status_refuse, [1] = NULL, }; /* just one member, NULL, .bss zeroes it */ static llc_conn_action_t llc_await_actions_1_0[1]; static struct llc_conn_state_trans llc_await_state_trans_1_0 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = llc_await_ev_qfyrs_1_0, .ev_actions = llc_await_actions_1_0, }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */ static llc_conn_action_t llc_await_actions_1[] = { [0] = llc_conn_ac_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_set_data_flag_0, [2] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_1 = { .ev = llc_conn_ev_local_busy_detected, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_1, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */ static llc_conn_action_t llc_await_actions_2[] = { [0] = llc_conn_ac_send_rej_xxx_x_set_0, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, [3] = llc_conn_ac_stop_p_timer, [4] = llc_conn_ac_resend_i_xxx_x_set_0, [5] = llc_conn_ac_start_rej_timer, [6] = llc_conn_ac_clear_remote_busy, [7] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_2 = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_2, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ static llc_conn_action_t llc_await_actions_3a[] = { [0] = llc_conn_ac_send_rej_xxx_x_set_0, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, [3] = llc_conn_ac_start_rej_timer, [4] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_3a = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_3a, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ static llc_conn_action_t llc_await_actions_3b[] = { [0] = llc_conn_ac_send_rej_xxx_x_set_0, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, [3] = llc_conn_ac_start_rej_timer, [4] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_3b = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_3b, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */ static llc_conn_action_t llc_await_actions_4[] = { [0] = llc_conn_ac_send_rej_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, [3] = llc_conn_ac_start_rej_timer, [4] = llc_conn_ac_start_p_timer, [5] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_4 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_4, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 event */ static llc_conn_action_t llc_await_actions_5[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_stop_p_timer, [3] = llc_conn_ac_upd_nr_received, [4] = llc_conn_ac_upd_vs, [5] = llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr, [6] = llc_conn_ac_clear_remote_busy, [7] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_5 = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_5, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */ static llc_conn_action_t llc_await_actions_6a[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_rr_xxx_x_set_0, [3] = llc_conn_ac_upd_nr_received, [4] = llc_conn_ac_upd_vs, [5] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_6a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_6a, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ static llc_conn_action_t llc_await_actions_6b[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_rr_xxx_x_set_0, [3] = llc_conn_ac_upd_nr_received, [4] = llc_conn_ac_upd_vs, [5] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_6b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_6b, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_await_actions_7[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_rr_rsp_f_set_1, [3] = llc_conn_ac_upd_nr_received, [4] = llc_conn_ac_upd_vs, [5] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_7 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_7, }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */ static llc_conn_action_t llc_await_actions_8a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, [3] = llc_conn_ac_resend_i_xxx_x_set_0, [4] = llc_conn_ac_clear_remote_busy, [5] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_8a = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_8a, }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 event */ static llc_conn_action_t llc_await_actions_8b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, [3] = llc_conn_ac_resend_i_xxx_x_set_0, [4] = llc_conn_ac_clear_remote_busy, [5] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_8b = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_8b, }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */ static llc_conn_action_t llc_await_actions_9a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_9a = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_9a, }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */ static llc_conn_action_t llc_await_actions_9b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_9b = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_9b, }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ static llc_conn_action_t llc_await_actions_9c[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_9c = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_9c, }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */ static llc_conn_action_t llc_await_actions_9d[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_9d = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_9d, }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_await_actions_10a[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, [3] = llc_conn_ac_clear_remote_busy, [4] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_10a = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_10a, }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_await_actions_10b[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, [3] = llc_conn_ac_clear_remote_busy, [4] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_10b = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_10b, }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */ static llc_conn_action_t llc_await_actions_11[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, [3] = llc_conn_ac_set_remote_busy, [4] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_11 = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_11, }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */ static llc_conn_action_t llc_await_actions_12a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_set_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_12a = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_12a, }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */ static llc_conn_action_t llc_await_actions_12b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_set_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_12b = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_12b, }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_await_actions_13[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, [3] = llc_conn_ac_set_remote_busy, [4] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_13 = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, .ev_actions = llc_await_actions_13, }; /* State transitions for LLC_CONN_EV_P_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_await_ev_qfyrs_14[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = NULL, }; static llc_conn_action_t llc_await_actions_14[] = { [0] = llc_conn_ac_send_rr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, [3] = NULL, }; static struct llc_conn_state_trans llc_await_state_trans_14 = { .ev = llc_conn_ev_p_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = llc_await_ev_qfyrs_14, .ev_actions = llc_await_actions_14, }; /* * Array of pointers; * one to each transition */ static struct llc_conn_state_trans *llc_await_state_transitions[] = { [0] = &llc_common_state_trans_1, /* Request */ [1] = &llc_common_state_trans_2, [2] = &llc_await_state_trans_1_0, [3] = &llc_common_state_trans_end, [4] = &llc_await_state_trans_1, /* Local busy */ [5] = &llc_common_state_trans_end, [6] = &llc_common_state_trans_end, /* Initiate PF Cycle */ [7] = &llc_common_state_trans_11a, /* Timer */ [8] = &llc_common_state_trans_11b, [9] = &llc_common_state_trans_11c, [10] = &llc_common_state_trans_11d, [11] = &llc_await_state_trans_14, [12] = &llc_common_state_trans_end, [13] = &llc_common_state_trans_3, /* Receive frame */ [14] = &llc_common_state_trans_4, [15] = &llc_common_state_trans_5, [16] = &llc_common_state_trans_6, [17] = &llc_common_state_trans_7a, [18] = &llc_common_state_trans_7b, [19] = &llc_common_state_trans_8a, [20] = &llc_common_state_trans_8b, [21] = &llc_common_state_trans_8c, [22] = &llc_common_state_trans_9, /* [23] = &llc_common_state_trans_10, */ [23] = &llc_await_state_trans_2, [24] = &llc_await_state_trans_3a, [25] = &llc_await_state_trans_3b, [26] = &llc_await_state_trans_4, [27] = &llc_await_state_trans_5, [28] = &llc_await_state_trans_6a, [29] = &llc_await_state_trans_6b, [30] = &llc_await_state_trans_7, [31] = &llc_await_state_trans_8a, [32] = &llc_await_state_trans_8b, [33] = &llc_await_state_trans_9a, [34] = &llc_await_state_trans_9b, [35] = &llc_await_state_trans_9c, [36] = &llc_await_state_trans_9d, [37] = &llc_await_state_trans_10a, [38] = &llc_await_state_trans_10b, [39] = &llc_await_state_trans_11, [40] = &llc_await_state_trans_12a, [41] = &llc_await_state_trans_12b, [42] = &llc_await_state_trans_13, [43] = &llc_common_state_trans_end, }; /* LLC_CONN_STATE_AWAIT_BUSY transitions */ /* State transitions for LLC_CONN_EV_DATA_CONN_REQ event */ static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_1_0[] = { [0] = llc_conn_ev_qlfy_set_status_refuse, [1] = NULL, }; /* just one member, NULL, .bss zeroes it */ static llc_conn_action_t llc_await_busy_actions_1_0[1]; static struct llc_conn_state_trans llc_await_busy_state_trans_1_0 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = llc_await_busy_ev_qfyrs_1_0, .ev_actions = llc_await_busy_actions_1_0, }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_1[] = { [0] = llc_conn_ev_qlfy_data_flag_eq_1, [1] = NULL, }; static llc_conn_action_t llc_await_busy_actions_1[] = { [0] = llc_conn_ac_send_rej_xxx_x_set_0, [1] = llc_conn_ac_start_rej_timer, [2] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_1 = { .ev = llc_conn_ev_local_busy_cleared, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = llc_await_busy_ev_qfyrs_1, .ev_actions = llc_await_busy_actions_1, }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_2[] = { [0] = llc_conn_ev_qlfy_data_flag_eq_0, [1] = NULL, }; static llc_conn_action_t llc_await_busy_actions_2[] = { [0] = llc_conn_ac_send_rr_xxx_x_set_0, [1] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_2 = { .ev = llc_conn_ev_local_busy_cleared, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = llc_await_busy_ev_qfyrs_2, .ev_actions = llc_await_busy_actions_2, }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_CLEARED event */ static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_3[] = { [0] = llc_conn_ev_qlfy_data_flag_eq_2, [1] = NULL, }; static llc_conn_action_t llc_await_busy_actions_3[] = { [0] = llc_conn_ac_send_rr_xxx_x_set_0, [1] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_3 = { .ev = llc_conn_ev_local_busy_cleared, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = llc_await_busy_ev_qfyrs_3, .ev_actions = llc_await_busy_actions_3, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */ static llc_conn_action_t llc_await_busy_actions_4[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, [3] = llc_conn_ac_stop_p_timer, [4] = llc_conn_ac_set_data_flag_1, [5] = llc_conn_ac_clear_remote_busy, [6] = llc_conn_ac_resend_i_xxx_x_set_0, [7] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_4 = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_4, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ static llc_conn_action_t llc_await_busy_actions_5a[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, [3] = llc_conn_ac_set_data_flag_1, [4] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_5a = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_5a, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ static llc_conn_action_t llc_await_busy_actions_5b[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, [3] = llc_conn_ac_set_data_flag_1, [4] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_5b = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_5b, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */ static llc_conn_action_t llc_await_busy_actions_6[] = { [0] = llc_conn_ac_send_rnr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, [3] = llc_conn_ac_set_data_flag_1, [4] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_6 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_6, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 event */ static llc_conn_action_t llc_await_busy_actions_7[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_inc_vr_by_1, [2] = llc_conn_ac_data_ind, [3] = llc_conn_ac_stop_p_timer, [4] = llc_conn_ac_upd_nr_received, [5] = llc_conn_ac_upd_vs, [6] = llc_conn_ac_set_data_flag_0, [7] = llc_conn_ac_clear_remote_busy, [8] = llc_conn_ac_resend_i_xxx_x_set_0, [9] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_7 = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_7, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */ static llc_conn_action_t llc_await_busy_actions_8a[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_inc_vr_by_1, [2] = llc_conn_ac_data_ind, [3] = llc_conn_ac_upd_nr_received, [4] = llc_conn_ac_upd_vs, [5] = llc_conn_ac_set_data_flag_0, [6] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_8a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_8a, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ static llc_conn_action_t llc_await_busy_actions_8b[] = { [0] = llc_conn_ac_opt_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_inc_vr_by_1, [2] = llc_conn_ac_data_ind, [3] = llc_conn_ac_upd_nr_received, [4] = llc_conn_ac_upd_vs, [5] = llc_conn_ac_set_data_flag_0, [6] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_8b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_8b, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_await_busy_actions_9[] = { [0] = llc_conn_ac_send_rnr_rsp_f_set_1, [1] = llc_conn_ac_inc_vr_by_1, [2] = llc_conn_ac_data_ind, [3] = llc_conn_ac_upd_nr_received, [4] = llc_conn_ac_upd_vs, [5] = llc_conn_ac_set_data_flag_0, [6] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_9 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_9, }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */ static llc_conn_action_t llc_await_busy_actions_10a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, [3] = llc_conn_ac_resend_i_xxx_x_set_0, [4] = llc_conn_ac_clear_remote_busy, [5] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_10a = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_10a, }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 event */ static llc_conn_action_t llc_await_busy_actions_10b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, [3] = llc_conn_ac_resend_i_xxx_x_set_0, [4] = llc_conn_ac_clear_remote_busy, [5] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_10b = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_10b, }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */ static llc_conn_action_t llc_await_busy_actions_11a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_11a = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_11a, }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */ static llc_conn_action_t llc_await_busy_actions_11b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_11b = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_11b, }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ static llc_conn_action_t llc_await_busy_actions_11c[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_11c = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_11c, }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */ static llc_conn_action_t llc_await_busy_actions_11d[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_11d = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_11d, }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_await_busy_actions_12a[] = { [0] = llc_conn_ac_send_rnr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, [3] = llc_conn_ac_clear_remote_busy, [4] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_12a = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_12a, }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_await_busy_actions_12b[] = { [0] = llc_conn_ac_send_rnr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, [3] = llc_conn_ac_clear_remote_busy, [4] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_12b = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_12b, }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */ static llc_conn_action_t llc_await_busy_actions_13[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, [3] = llc_conn_ac_set_remote_busy, [4] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_13 = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_13, }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */ static llc_conn_action_t llc_await_busy_actions_14a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_set_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_14a = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_14a, }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */ static llc_conn_action_t llc_await_busy_actions_14b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_set_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_14b = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_14b, }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_await_busy_actions_15[] = { [0] = llc_conn_ac_send_rnr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, [3] = llc_conn_ac_set_remote_busy, [4] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_15 = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_busy_actions_15, }; /* State transitions for LLC_CONN_EV_P_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_16[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = NULL, }; static llc_conn_action_t llc_await_busy_actions_16[] = { [0] = llc_conn_ac_send_rnr_cmd_p_set_1, [1] = llc_conn_ac_start_p_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, [3] = NULL, }; static struct llc_conn_state_trans llc_await_busy_state_trans_16 = { .ev = llc_conn_ev_p_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = llc_await_busy_ev_qfyrs_16, .ev_actions = llc_await_busy_actions_16, }; /* * Array of pointers; * one to each transition */ static struct llc_conn_state_trans *llc_await_busy_state_transitions[] = { [0] = &llc_common_state_trans_1, /* Request */ [1] = &llc_common_state_trans_2, [2] = &llc_await_busy_state_trans_1_0, [3] = &llc_common_state_trans_end, [4] = &llc_await_busy_state_trans_1, /* Local busy */ [5] = &llc_await_busy_state_trans_2, [6] = &llc_await_busy_state_trans_3, [7] = &llc_common_state_trans_end, [8] = &llc_common_state_trans_end, /* Initiate PF cycle */ [9] = &llc_common_state_trans_11a, /* Timer */ [10] = &llc_common_state_trans_11b, [11] = &llc_common_state_trans_11c, [12] = &llc_common_state_trans_11d, [13] = &llc_await_busy_state_trans_16, [14] = &llc_common_state_trans_end, [15] = &llc_await_busy_state_trans_4, /* Receive frame */ [16] = &llc_await_busy_state_trans_5a, [17] = &llc_await_busy_state_trans_5b, [18] = &llc_await_busy_state_trans_6, [19] = &llc_await_busy_state_trans_7, [20] = &llc_await_busy_state_trans_8a, [21] = &llc_await_busy_state_trans_8b, [22] = &llc_await_busy_state_trans_9, [23] = &llc_await_busy_state_trans_10a, [24] = &llc_await_busy_state_trans_10b, [25] = &llc_await_busy_state_trans_11a, [26] = &llc_await_busy_state_trans_11b, [27] = &llc_await_busy_state_trans_11c, [28] = &llc_await_busy_state_trans_11d, [29] = &llc_await_busy_state_trans_12a, [30] = &llc_await_busy_state_trans_12b, [31] = &llc_await_busy_state_trans_13, [32] = &llc_await_busy_state_trans_14a, [33] = &llc_await_busy_state_trans_14b, [34] = &llc_await_busy_state_trans_15, [35] = &llc_common_state_trans_3, [36] = &llc_common_state_trans_4, [37] = &llc_common_state_trans_5, [38] = &llc_common_state_trans_6, [39] = &llc_common_state_trans_7a, [40] = &llc_common_state_trans_7b, [41] = &llc_common_state_trans_8a, [42] = &llc_common_state_trans_8b, [43] = &llc_common_state_trans_8c, [44] = &llc_common_state_trans_9, /* [45] = &llc_common_state_trans_10, */ [45] = &llc_common_state_trans_end, }; /* ----------------- LLC_CONN_STATE_AWAIT_REJ transitions --------------- */ /* State transitions for LLC_CONN_EV_DATA_CONN_REQ event */ static llc_conn_ev_qfyr_t llc_await_reject_ev_qfyrs_1_0[] = { [0] = llc_conn_ev_qlfy_set_status_refuse, [1] = NULL, }; /* just one member, NULL, .bss zeroes it */ static llc_conn_action_t llc_await_reject_actions_1_0[1]; static struct llc_conn_state_trans llc_await_reject_state_trans_1_0 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = llc_await_reject_ev_qfyrs_1_0, .ev_actions = llc_await_reject_actions_1_0, }; /* State transitions for LLC_CONN_EV_LOCAL_BUSY_DETECTED event */ static llc_conn_action_t llc_await_rejct_actions_1[] = { [0] = llc_conn_ac_send_rnr_xxx_x_set_0, [1] = llc_conn_ac_set_data_flag_2, [2] = NULL }; static struct llc_conn_state_trans llc_await_rejct_state_trans_1 = { .ev = llc_conn_ev_local_busy_detected, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_1, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0_UNEXPD_Ns event */ static llc_conn_action_t llc_await_rejct_actions_2a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = NULL }; static struct llc_conn_state_trans llc_await_rejct_state_trans_2a = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_2a, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0_UNEXPD_Ns event */ static llc_conn_action_t llc_await_rejct_actions_2b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = NULL }; static struct llc_conn_state_trans llc_await_rejct_state_trans_2b = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_2b, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1_UNEXPD_Ns event */ static llc_conn_action_t llc_await_rejct_actions_3[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, [3] = NULL }; static struct llc_conn_state_trans llc_await_rejct_state_trans_3 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_3, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1 event */ static llc_conn_action_t llc_await_rejct_actions_4[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_stop_p_timer, [3] = llc_conn_ac_stop_rej_timer, [4] = llc_conn_ac_upd_nr_received, [5] = llc_conn_ac_upd_vs, [6] = llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr, [7] = llc_conn_ac_clear_remote_busy, [8] = NULL, }; static struct llc_conn_state_trans llc_await_rejct_state_trans_4 = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_4, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_0 event */ static llc_conn_action_t llc_await_rejct_actions_5a[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_rr_xxx_x_set_0, [3] = llc_conn_ac_stop_rej_timer, [4] = llc_conn_ac_upd_nr_received, [5] = llc_conn_ac_upd_vs, [6] = NULL, }; static struct llc_conn_state_trans llc_await_rejct_state_trans_5a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_5a, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_0 event */ static llc_conn_action_t llc_await_rejct_actions_5b[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_rr_xxx_x_set_0, [3] = llc_conn_ac_stop_rej_timer, [4] = llc_conn_ac_upd_nr_received, [5] = llc_conn_ac_upd_vs, [6] = NULL, }; static struct llc_conn_state_trans llc_await_rejct_state_trans_5b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_5b, }; /* State transitions for LLC_CONN_EV_RX_I_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_await_rejct_actions_6[] = { [0] = llc_conn_ac_inc_vr_by_1, [1] = llc_conn_ac_data_ind, [2] = llc_conn_ac_send_rr_rsp_f_set_1, [3] = llc_conn_ac_stop_rej_timer, [4] = llc_conn_ac_upd_nr_received, [5] = llc_conn_ac_upd_vs, [6] = NULL, }; static struct llc_conn_state_trans llc_await_rejct_state_trans_6 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_6, }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_1 event */ static llc_conn_action_t llc_await_rejct_actions_7a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, [3] = llc_conn_ac_resend_i_xxx_x_set_0, [4] = llc_conn_ac_clear_remote_busy, [5] = NULL, }; static struct llc_conn_state_trans llc_await_rejct_state_trans_7a = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_7a, }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_1 event */ static llc_conn_action_t llc_await_rejct_actions_7b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, [3] = llc_conn_ac_resend_i_xxx_x_set_0, [4] = llc_conn_ac_clear_remote_busy, [5] = NULL, }; static struct llc_conn_state_trans llc_await_rejct_state_trans_7b = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_7b, }; /* State transitions for LLC_CONN_EV_RX_I_RSP_Fbit_SET_1_UNEXPD_Ns event */ static llc_conn_action_t llc_await_rejct_actions_7c[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, [3] = llc_conn_ac_resend_i_xxx_x_set_0, [4] = llc_conn_ac_clear_remote_busy, [5] = NULL, }; static struct llc_conn_state_trans llc_await_rejct_state_trans_7c = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_7c, }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_0 event */ static llc_conn_action_t llc_await_rejct_actions_8a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_await_rejct_state_trans_8a = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_8a, }; /* State transitions for LLC_CONN_EV_RX_RR_RSP_Fbit_SET_0 event */ static llc_conn_action_t llc_await_rejct_actions_8b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_await_rejct_state_trans_8b = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_8b, }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_0 event */ static llc_conn_action_t llc_await_rejct_actions_8c[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_await_rejct_state_trans_8c = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_8c, }; /* State transitions for LLC_CONN_EV_RX_REJ_RSP_Fbit_SET_0 event */ static llc_conn_action_t llc_await_rejct_actions_8d[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_clear_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_await_rejct_state_trans_8d = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_8d, }; /* State transitions for LLC_CONN_EV_RX_RR_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_await_rejct_actions_9a[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, [3] = llc_conn_ac_clear_remote_busy, [4] = NULL, }; static struct llc_conn_state_trans llc_await_rejct_state_trans_9a = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_9a, }; /* State transitions for LLC_CONN_EV_RX_REJ_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_await_rejct_actions_9b[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, [3] = llc_conn_ac_clear_remote_busy, [4] = NULL, }; static struct llc_conn_state_trans llc_await_rejct_state_trans_9b = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_9b, }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_1 event */ static llc_conn_action_t llc_await_rejct_actions_10[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_stop_p_timer, [3] = llc_conn_ac_set_remote_busy, [4] = NULL, }; static struct llc_conn_state_trans llc_await_rejct_state_trans_10 = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_10, }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_0 event */ static llc_conn_action_t llc_await_rejct_actions_11a[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_set_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_await_rejct_state_trans_11a = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_11a, }; /* State transitions for LLC_CONN_EV_RX_RNR_RSP_Fbit_SET_0 event */ static llc_conn_action_t llc_await_rejct_actions_11b[] = { [0] = llc_conn_ac_upd_nr_received, [1] = llc_conn_ac_upd_vs, [2] = llc_conn_ac_set_remote_busy, [3] = NULL, }; static struct llc_conn_state_trans llc_await_rejct_state_trans_11b = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_11b, }; /* State transitions for LLC_CONN_EV_RX_RNR_CMD_Pbit_SET_1 event */ static llc_conn_action_t llc_await_rejct_actions_12[] = { [0] = llc_conn_ac_send_rr_rsp_f_set_1, [1] = llc_conn_ac_upd_nr_received, [2] = llc_conn_ac_upd_vs, [3] = llc_conn_ac_set_remote_busy, [4] = NULL, }; static struct llc_conn_state_trans llc_await_rejct_state_trans_12 = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, .ev_actions = llc_await_rejct_actions_12, }; /* State transitions for LLC_CONN_EV_P_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_await_rejct_ev_qfyrs_13[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = NULL, }; static llc_conn_action_t llc_await_rejct_actions_13[] = { [0] = llc_conn_ac_send_rej_cmd_p_set_1, [1] = llc_conn_ac_stop_p_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, [3] = NULL, }; static struct llc_conn_state_trans llc_await_rejct_state_trans_13 = { .ev = llc_conn_ev_p_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = llc_await_rejct_ev_qfyrs_13, .ev_actions = llc_await_rejct_actions_13, }; /* * Array of pointers; * one to each transition */ static struct llc_conn_state_trans *llc_await_rejct_state_transitions[] = { [0] = &llc_await_reject_state_trans_1_0, [1] = &llc_common_state_trans_1, /* requests */ [2] = &llc_common_state_trans_2, [3] = &llc_common_state_trans_end, [4] = &llc_await_rejct_state_trans_1, /* local busy */ [5] = &llc_common_state_trans_end, [6] = &llc_common_state_trans_end, /* Initiate PF cycle */ [7] = &llc_await_rejct_state_trans_13, /* timers */ [8] = &llc_common_state_trans_11a, [9] = &llc_common_state_trans_11b, [10] = &llc_common_state_trans_11c, [11] = &llc_common_state_trans_11d, [12] = &llc_common_state_trans_end, [13] = &llc_await_rejct_state_trans_2a, /* receive frames */ [14] = &llc_await_rejct_state_trans_2b, [15] = &llc_await_rejct_state_trans_3, [16] = &llc_await_rejct_state_trans_4, [17] = &llc_await_rejct_state_trans_5a, [18] = &llc_await_rejct_state_trans_5b, [19] = &llc_await_rejct_state_trans_6, [20] = &llc_await_rejct_state_trans_7a, [21] = &llc_await_rejct_state_trans_7b, [22] = &llc_await_rejct_state_trans_7c, [23] = &llc_await_rejct_state_trans_8a, [24] = &llc_await_rejct_state_trans_8b, [25] = &llc_await_rejct_state_trans_8c, [26] = &llc_await_rejct_state_trans_8d, [27] = &llc_await_rejct_state_trans_9a, [28] = &llc_await_rejct_state_trans_9b, [29] = &llc_await_rejct_state_trans_10, [30] = &llc_await_rejct_state_trans_11a, [31] = &llc_await_rejct_state_trans_11b, [32] = &llc_await_rejct_state_trans_12, [33] = &llc_common_state_trans_3, [34] = &llc_common_state_trans_4, [35] = &llc_common_state_trans_5, [36] = &llc_common_state_trans_6, [37] = &llc_common_state_trans_7a, [38] = &llc_common_state_trans_7b, [39] = &llc_common_state_trans_8a, [40] = &llc_common_state_trans_8b, [41] = &llc_common_state_trans_8c, [42] = &llc_common_state_trans_9, /* [43] = &llc_common_state_trans_10, */ [43] = &llc_common_state_trans_end, }; /* LLC_CONN_STATE_D_CONN transitions */ /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event, * cause_flag = 1 */ static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_1[] = { [0] = llc_conn_ev_qlfy_cause_flag_eq_1, [1] = llc_conn_ev_qlfy_set_status_conflict, [2] = NULL, }; static llc_conn_action_t llc_d_conn_actions_1[] = { [0] = llc_conn_ac_send_dm_rsp_f_set_p, [1] = llc_conn_ac_stop_ack_timer, [2] = llc_conn_ac_disc_confirm, [3] = llc_conn_disc, [4] = NULL, }; static struct llc_conn_state_trans llc_d_conn_state_trans_1 = { .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_d_conn_ev_qfyrs_1, .ev_actions = llc_d_conn_actions_1, }; /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event, * cause_flag = 0 */ static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_1_1[] = { [0] = llc_conn_ev_qlfy_cause_flag_eq_0, [1] = llc_conn_ev_qlfy_set_status_conflict, [2] = NULL, }; static llc_conn_action_t llc_d_conn_actions_1_1[] = { [0] = llc_conn_ac_send_dm_rsp_f_set_p, [1] = llc_conn_ac_stop_ack_timer, [2] = llc_conn_disc, [3] = NULL, }; static struct llc_conn_state_trans llc_d_conn_state_trans_1_1 = { .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_d_conn_ev_qfyrs_1_1, .ev_actions = llc_d_conn_actions_1_1, }; /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event, * cause_flag = 1 */ static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_2[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = llc_conn_ev_qlfy_cause_flag_eq_1, [2] = llc_conn_ev_qlfy_set_status_disc, [3] = NULL, }; static llc_conn_action_t llc_d_conn_actions_2[] = { [0] = llc_conn_ac_stop_ack_timer, [1] = llc_conn_ac_disc_confirm, [2] = llc_conn_disc, [3] = NULL, }; static struct llc_conn_state_trans llc_d_conn_state_trans_2 = { .ev = llc_conn_ev_rx_ua_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_d_conn_ev_qfyrs_2, .ev_actions = llc_d_conn_actions_2, }; /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event, * cause_flag = 0 */ static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_2_1[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = llc_conn_ev_qlfy_cause_flag_eq_0, [2] = llc_conn_ev_qlfy_set_status_disc, [3] = NULL, }; static llc_conn_action_t llc_d_conn_actions_2_1[] = { [0] = llc_conn_ac_stop_ack_timer, [1] = llc_conn_disc, [2] = NULL, }; static struct llc_conn_state_trans llc_d_conn_state_trans_2_1 = { .ev = llc_conn_ev_rx_ua_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_d_conn_ev_qfyrs_2_1, .ev_actions = llc_d_conn_actions_2_1, }; /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */ static llc_conn_action_t llc_d_conn_actions_3[] = { [0] = llc_conn_ac_send_ua_rsp_f_set_p, [1] = NULL, }; static struct llc_conn_state_trans llc_d_conn_state_trans_3 = { .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_D_CONN, .ev_qualifiers = NONE, .ev_actions = llc_d_conn_actions_3, }; /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event, * cause_flag = 1 */ static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_4[] = { [0] = llc_conn_ev_qlfy_cause_flag_eq_1, [1] = llc_conn_ev_qlfy_set_status_disc, [2] = NULL, }; static llc_conn_action_t llc_d_conn_actions_4[] = { [0] = llc_conn_ac_stop_ack_timer, [1] = llc_conn_ac_disc_confirm, [2] = llc_conn_disc, [3] = NULL, }; static struct llc_conn_state_trans llc_d_conn_state_trans_4 = { .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_d_conn_ev_qfyrs_4, .ev_actions = llc_d_conn_actions_4, }; /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event, * cause_flag = 0 */ static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_4_1[] = { [0] = llc_conn_ev_qlfy_cause_flag_eq_0, [1] = llc_conn_ev_qlfy_set_status_disc, [2] = NULL, }; static llc_conn_action_t llc_d_conn_actions_4_1[] = { [0] = llc_conn_ac_stop_ack_timer, [1] = llc_conn_disc, [2] = NULL, }; static struct llc_conn_state_trans llc_d_conn_state_trans_4_1 = { .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_d_conn_ev_qfyrs_4_1, .ev_actions = llc_d_conn_actions_4_1, }; /* * State transition for * LLC_CONN_EV_DATA_CONN_REQ event */ static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_5[] = { [0] = llc_conn_ev_qlfy_set_status_refuse, [1] = NULL, }; /* just one member, NULL, .bss zeroes it */ static llc_conn_action_t llc_d_conn_actions_5[1]; static struct llc_conn_state_trans llc_d_conn_state_trans_5 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_D_CONN, .ev_qualifiers = llc_d_conn_ev_qfyrs_5, .ev_actions = llc_d_conn_actions_5, }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_6[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = NULL, }; static llc_conn_action_t llc_d_conn_actions_6[] = { [0] = llc_conn_ac_send_disc_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, [3] = NULL, }; static struct llc_conn_state_trans llc_d_conn_state_trans_6 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_D_CONN, .ev_qualifiers = llc_d_conn_ev_qfyrs_6, .ev_actions = llc_d_conn_actions_6, }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event, cause_flag = 1 */ static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_7[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = llc_conn_ev_qlfy_cause_flag_eq_1, [2] = llc_conn_ev_qlfy_set_status_failed, [3] = NULL, }; static llc_conn_action_t llc_d_conn_actions_7[] = { [0] = llc_conn_ac_disc_confirm, [1] = llc_conn_disc, [2] = NULL, }; static struct llc_conn_state_trans llc_d_conn_state_trans_7 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_d_conn_ev_qfyrs_7, .ev_actions = llc_d_conn_actions_7, }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event, cause_flag = 0 */ static llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_8[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = llc_conn_ev_qlfy_cause_flag_eq_0, [2] = llc_conn_ev_qlfy_set_status_failed, [3] = NULL, }; static llc_conn_action_t llc_d_conn_actions_8[] = { [0] = llc_conn_disc, [1] = NULL, }; static struct llc_conn_state_trans llc_d_conn_state_trans_8 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_d_conn_ev_qfyrs_8, .ev_actions = llc_d_conn_actions_8, }; /* * Array of pointers; * one to each transition */ static struct llc_conn_state_trans *llc_d_conn_state_transitions[] = { [0] = &llc_d_conn_state_trans_5, /* Request */ [1] = &llc_common_state_trans_end, [2] = &llc_common_state_trans_end, /* Local busy */ [3] = &llc_common_state_trans_end, /* Initiate PF cycle */ [4] = &llc_d_conn_state_trans_6, /* Timer */ [5] = &llc_d_conn_state_trans_7, [6] = &llc_d_conn_state_trans_8, [7] = &llc_common_state_trans_end, [8] = &llc_d_conn_state_trans_1, /* Receive frame */ [9] = &llc_d_conn_state_trans_1_1, [10] = &llc_d_conn_state_trans_2, [11] = &llc_d_conn_state_trans_2_1, [12] = &llc_d_conn_state_trans_3, [13] = &llc_d_conn_state_trans_4, [14] = &llc_d_conn_state_trans_4_1, [15] = &llc_common_state_trans_end, }; /* LLC_CONN_STATE_RESET transitions */ /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */ static llc_conn_action_t llc_rst_actions_1[] = { [0] = llc_conn_ac_set_vs_0, [1] = llc_conn_ac_set_vr_0, [2] = llc_conn_ac_set_s_flag_1, [3] = llc_conn_ac_send_ua_rsp_f_set_p, [4] = NULL, }; static struct llc_conn_state_trans llc_rst_state_trans_1 = { .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = NONE, .ev_actions = llc_rst_actions_1, }; /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event, * cause_flag = 1 */ static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_2[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = llc_conn_ev_qlfy_cause_flag_eq_1, [2] = llc_conn_ev_qlfy_set_status_conn, [3] = NULL, }; static llc_conn_action_t llc_rst_actions_2[] = { [0] = llc_conn_ac_stop_ack_timer, [1] = llc_conn_ac_set_vs_0, [2] = llc_conn_ac_set_vr_0, [3] = llc_conn_ac_upd_p_flag, [4] = llc_conn_ac_rst_confirm, [5] = llc_conn_ac_set_remote_busy_0, [6] = llc_conn_reset, [7] = NULL, }; static struct llc_conn_state_trans llc_rst_state_trans_2 = { .ev = llc_conn_ev_rx_ua_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_rst_ev_qfyrs_2, .ev_actions = llc_rst_actions_2, }; /* State transitions for LLC_CONN_EV_RX_UA_RSP_Fbit_SET_X event, * cause_flag = 0 */ static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_2_1[] = { [0] = llc_conn_ev_qlfy_p_flag_eq_f, [1] = llc_conn_ev_qlfy_cause_flag_eq_0, [2] = llc_conn_ev_qlfy_set_status_rst_done, [3] = NULL, }; static llc_conn_action_t llc_rst_actions_2_1[] = { [0] = llc_conn_ac_stop_ack_timer, [1] = llc_conn_ac_set_vs_0, [2] = llc_conn_ac_set_vr_0, [3] = llc_conn_ac_upd_p_flag, [4] = llc_conn_ac_rst_confirm, [5] = llc_conn_ac_set_remote_busy_0, [6] = llc_conn_reset, [7] = NULL, }; static struct llc_conn_state_trans llc_rst_state_trans_2_1 = { .ev = llc_conn_ev_rx_ua_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_rst_ev_qfyrs_2_1, .ev_actions = llc_rst_actions_2_1, }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_3[] = { [0] = llc_conn_ev_qlfy_s_flag_eq_1, [1] = llc_conn_ev_qlfy_set_status_rst_done, [2] = NULL, }; static llc_conn_action_t llc_rst_actions_3[] = { [0] = llc_conn_ac_set_p_flag_0, [1] = llc_conn_ac_set_remote_busy_0, [2] = NULL, }; static struct llc_conn_state_trans llc_rst_state_trans_3 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_rst_ev_qfyrs_3, .ev_actions = llc_rst_actions_3, }; /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event, * cause_flag = 1 */ static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_4[] = { [0] = llc_conn_ev_qlfy_cause_flag_eq_1, [1] = llc_conn_ev_qlfy_set_status_disc, [2] = NULL, }; static llc_conn_action_t llc_rst_actions_4[] = { [0] = llc_conn_ac_send_dm_rsp_f_set_p, [1] = llc_conn_ac_disc_ind, [2] = llc_conn_ac_stop_ack_timer, [3] = llc_conn_disc, [4] = NULL, }; static struct llc_conn_state_trans llc_rst_state_trans_4 = { .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_rst_ev_qfyrs_4, .ev_actions = llc_rst_actions_4, }; /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event, * cause_flag = 0 */ static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_4_1[] = { [0] = llc_conn_ev_qlfy_cause_flag_eq_0, [1] = llc_conn_ev_qlfy_set_status_refuse, [2] = NULL, }; static llc_conn_action_t llc_rst_actions_4_1[] = { [0] = llc_conn_ac_send_dm_rsp_f_set_p, [1] = llc_conn_ac_stop_ack_timer, [2] = llc_conn_disc, [3] = NULL, }; static struct llc_conn_state_trans llc_rst_state_trans_4_1 = { .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_rst_ev_qfyrs_4_1, .ev_actions = llc_rst_actions_4_1, }; /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event, * cause_flag = 1 */ static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_5[] = { [0] = llc_conn_ev_qlfy_cause_flag_eq_1, [1] = llc_conn_ev_qlfy_set_status_disc, [2] = NULL, }; static llc_conn_action_t llc_rst_actions_5[] = { [0] = llc_conn_ac_disc_ind, [1] = llc_conn_ac_stop_ack_timer, [2] = llc_conn_disc, [3] = NULL, }; static struct llc_conn_state_trans llc_rst_state_trans_5 = { .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_rst_ev_qfyrs_5, .ev_actions = llc_rst_actions_5, }; /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event, * cause_flag = 0 */ static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_5_1[] = { [0] = llc_conn_ev_qlfy_cause_flag_eq_0, [1] = llc_conn_ev_qlfy_set_status_refuse, [2] = NULL, }; static llc_conn_action_t llc_rst_actions_5_1[] = { [0] = llc_conn_ac_stop_ack_timer, [1] = llc_conn_disc, [2] = NULL, }; static struct llc_conn_state_trans llc_rst_state_trans_5_1 = { .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_rst_ev_qfyrs_5_1, .ev_actions = llc_rst_actions_5_1, }; /* State transitions for DATA_CONN_REQ event */ static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_6[] = { [0] = llc_conn_ev_qlfy_set_status_refuse, [1] = NULL, }; /* just one member, NULL, .bss zeroes it */ static llc_conn_action_t llc_rst_actions_6[1]; static struct llc_conn_state_trans llc_rst_state_trans_6 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = llc_rst_ev_qfyrs_6, .ev_actions = llc_rst_actions_6, }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_7[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = llc_conn_ev_qlfy_s_flag_eq_0, [2] = NULL, }; static llc_conn_action_t llc_rst_actions_7[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, [3] = NULL, }; static struct llc_conn_state_trans llc_rst_state_trans_7 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = llc_rst_ev_qfyrs_7, .ev_actions = llc_rst_actions_7, }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_8[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = llc_conn_ev_qlfy_s_flag_eq_0, [2] = llc_conn_ev_qlfy_cause_flag_eq_1, [3] = llc_conn_ev_qlfy_set_status_failed, [4] = NULL, }; static llc_conn_action_t llc_rst_actions_8[] = { [0] = llc_conn_ac_disc_ind, [1] = llc_conn_disc, [2] = NULL, }; static struct llc_conn_state_trans llc_rst_state_trans_8 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_rst_ev_qfyrs_8, .ev_actions = llc_rst_actions_8, }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_8_1[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = llc_conn_ev_qlfy_s_flag_eq_0, [2] = llc_conn_ev_qlfy_cause_flag_eq_0, [3] = llc_conn_ev_qlfy_set_status_failed, [4] = NULL, }; static llc_conn_action_t llc_rst_actions_8_1[] = { [0] = llc_conn_ac_disc_ind, [1] = llc_conn_disc, [2] = NULL, }; static struct llc_conn_state_trans llc_rst_state_trans_8_1 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_rst_ev_qfyrs_8_1, .ev_actions = llc_rst_actions_8_1, }; /* * Array of pointers; * one to each transition */ static struct llc_conn_state_trans *llc_rst_state_transitions[] = { [0] = &llc_rst_state_trans_6, /* Request */ [1] = &llc_common_state_trans_end, [2] = &llc_common_state_trans_end, /* Local busy */ [3] = &llc_common_state_trans_end, /* Initiate PF cycle */ [4] = &llc_rst_state_trans_3, /* Timer */ [5] = &llc_rst_state_trans_7, [6] = &llc_rst_state_trans_8, [7] = &llc_rst_state_trans_8_1, [8] = &llc_common_state_trans_end, [9] = &llc_rst_state_trans_1, /* Receive frame */ [10] = &llc_rst_state_trans_2, [11] = &llc_rst_state_trans_2_1, [12] = &llc_rst_state_trans_4, [13] = &llc_rst_state_trans_4_1, [14] = &llc_rst_state_trans_5, [15] = &llc_rst_state_trans_5_1, [16] = &llc_common_state_trans_end, }; /* LLC_CONN_STATE_ERROR transitions */ /* State transitions for LLC_CONN_EV_RX_SABME_CMD_Pbit_SET_X event */ static llc_conn_action_t llc_error_actions_1[] = { [0] = llc_conn_ac_set_vs_0, [1] = llc_conn_ac_set_vr_0, [2] = llc_conn_ac_send_ua_rsp_f_set_p, [3] = llc_conn_ac_rst_ind, [4] = llc_conn_ac_set_p_flag_0, [5] = llc_conn_ac_set_remote_busy_0, [6] = llc_conn_ac_stop_ack_timer, [7] = llc_conn_reset, [8] = NULL, }; static struct llc_conn_state_trans llc_error_state_trans_1 = { .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, .ev_actions = llc_error_actions_1, }; /* State transitions for LLC_CONN_EV_RX_DISC_CMD_Pbit_SET_X event */ static llc_conn_action_t llc_error_actions_2[] = { [0] = llc_conn_ac_send_ua_rsp_f_set_p, [1] = llc_conn_ac_disc_ind, [2] = llc_conn_ac_stop_ack_timer, [3] = llc_conn_disc, [4] = NULL, }; static struct llc_conn_state_trans llc_error_state_trans_2 = { .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = NONE, .ev_actions = llc_error_actions_2, }; /* State transitions for LLC_CONN_EV_RX_DM_RSP_Fbit_SET_X event */ static llc_conn_action_t llc_error_actions_3[] = { [0] = llc_conn_ac_disc_ind, [1] = llc_conn_ac_stop_ack_timer, [2] = llc_conn_disc, [3] = NULL, }; static struct llc_conn_state_trans llc_error_state_trans_3 = { .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = NONE, .ev_actions = llc_error_actions_3, }; /* State transitions for LLC_CONN_EV_RX_FRMR_RSP_Fbit_SET_X event */ static llc_conn_action_t llc_error_actions_4[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_set_retry_cnt_0, [3] = llc_conn_ac_set_cause_flag_0, [4] = NULL, }; static struct llc_conn_state_trans llc_error_state_trans_4 = { .ev = llc_conn_ev_rx_frmr_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = NONE, .ev_actions = llc_error_actions_4, }; /* State transitions for LLC_CONN_EV_RX_XXX_CMD_Pbit_SET_X event */ static llc_conn_action_t llc_error_actions_5[] = { [0] = llc_conn_ac_resend_frmr_rsp_f_set_p, [1] = NULL, }; static struct llc_conn_state_trans llc_error_state_trans_5 = { .ev = llc_conn_ev_rx_xxx_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = NONE, .ev_actions = llc_error_actions_5, }; /* State transitions for LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_X event */ static struct llc_conn_state_trans llc_error_state_trans_6 = { .ev = llc_conn_ev_rx_xxx_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = NONE, .ev_actions = NONE, }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_error_ev_qfyrs_7[] = { [0] = llc_conn_ev_qlfy_retry_cnt_lt_n2, [1] = NULL, }; static llc_conn_action_t llc_error_actions_7[] = { [0] = llc_conn_ac_resend_frmr_rsp_f_set_0, [1] = llc_conn_ac_start_ack_timer, [2] = llc_conn_ac_inc_retry_cnt_by_1, [3] = NULL, }; static struct llc_conn_state_trans llc_error_state_trans_7 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = llc_error_ev_qfyrs_7, .ev_actions = llc_error_actions_7, }; /* State transitions for LLC_CONN_EV_ACK_TMR_EXP event */ static llc_conn_ev_qfyr_t llc_error_ev_qfyrs_8[] = { [0] = llc_conn_ev_qlfy_retry_cnt_gte_n2, [1] = NULL, }; static llc_conn_action_t llc_error_actions_8[] = { [0] = llc_conn_ac_send_sabme_cmd_p_set_x, [1] = llc_conn_ac_set_s_flag_0, [2] = llc_conn_ac_start_ack_timer, [3] = llc_conn_ac_set_retry_cnt_0, [4] = llc_conn_ac_set_cause_flag_0, [5] = NULL, }; static struct llc_conn_state_trans llc_error_state_trans_8 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = llc_error_ev_qfyrs_8, .ev_actions = llc_error_actions_8, }; /* State transitions for LLC_CONN_EV_DATA_CONN_REQ event */ static llc_conn_ev_qfyr_t llc_error_ev_qfyrs_9[] = { [0] = llc_conn_ev_qlfy_set_status_refuse, [1] = NULL, }; /* just one member, NULL, .bss zeroes it */ static llc_conn_action_t llc_error_actions_9[1]; static struct llc_conn_state_trans llc_error_state_trans_9 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = llc_error_ev_qfyrs_9, .ev_actions = llc_error_actions_9, }; /* * Array of pointers; * one to each transition */ static struct llc_conn_state_trans *llc_error_state_transitions[] = { [0] = &llc_error_state_trans_9, /* Request */ [1] = &llc_common_state_trans_end, [2] = &llc_common_state_trans_end, /* Local busy */ [3] = &llc_common_state_trans_end, /* Initiate PF cycle */ [4] = &llc_error_state_trans_7, /* Timer */ [5] = &llc_error_state_trans_8, [6] = &llc_common_state_trans_end, [7] = &llc_error_state_trans_1, /* Receive frame */ [8] = &llc_error_state_trans_2, [9] = &llc_error_state_trans_3, [10] = &llc_error_state_trans_4, [11] = &llc_error_state_trans_5, [12] = &llc_error_state_trans_6, [13] = &llc_common_state_trans_end, }; /* LLC_CONN_STATE_TEMP transitions */ /* State transitions for LLC_CONN_EV_DISC_REQ event */ static llc_conn_action_t llc_temp_actions_1[] = { [0] = llc_conn_ac_stop_all_timers, [1] = llc_conn_ac_send_disc_cmd_p_set_x, [2] = llc_conn_disc, [3] = NULL, }; static struct llc_conn_state_trans llc_temp_state_trans_1 = { .ev = llc_conn_ev_disc_req, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = NONE, .ev_actions = llc_temp_actions_1, }; /* * Array of pointers; * one to each transition */ static struct llc_conn_state_trans *llc_temp_state_transitions[] = { [0] = &llc_temp_state_trans_1, /* requests */ [1] = &llc_common_state_trans_end, [2] = &llc_common_state_trans_end, /* local busy */ [3] = &llc_common_state_trans_end, /* init_pf_cycle */ [4] = &llc_common_state_trans_end, /* timer */ [5] = &llc_common_state_trans_end, /* receive */ }; /* Connection State Transition Table */ struct llc_conn_state llc_conn_state_table[NBR_CONN_STATES] = { [LLC_CONN_STATE_ADM - 1] = { .current_state = LLC_CONN_STATE_ADM, .transitions = llc_adm_state_transitions, }, [LLC_CONN_STATE_SETUP - 1] = { .current_state = LLC_CONN_STATE_SETUP, .transitions = llc_setup_state_transitions, }, [LLC_CONN_STATE_NORMAL - 1] = { .current_state = LLC_CONN_STATE_NORMAL, .transitions = llc_normal_state_transitions, }, [LLC_CONN_STATE_BUSY - 1] = { .current_state = LLC_CONN_STATE_BUSY, .transitions = llc_busy_state_transitions, }, [LLC_CONN_STATE_REJ - 1] = { .current_state = LLC_CONN_STATE_REJ, .transitions = llc_reject_state_transitions, }, [LLC_CONN_STATE_AWAIT - 1] = { .current_state = LLC_CONN_STATE_AWAIT, .transitions = llc_await_state_transitions, }, [LLC_CONN_STATE_AWAIT_BUSY - 1] = { .current_state = LLC_CONN_STATE_AWAIT_BUSY, .transitions = llc_await_busy_state_transitions, }, [LLC_CONN_STATE_AWAIT_REJ - 1] = { .current_state = LLC_CONN_STATE_AWAIT_REJ, .transitions = llc_await_rejct_state_transitions, }, [LLC_CONN_STATE_D_CONN - 1] = { .current_state = LLC_CONN_STATE_D_CONN, .transitions = llc_d_conn_state_transitions, }, [LLC_CONN_STATE_RESET - 1] = { .current_state = LLC_CONN_STATE_RESET, .transitions = llc_rst_state_transitions, }, [LLC_CONN_STATE_ERROR - 1] = { .current_state = LLC_CONN_STATE_ERROR, .transitions = llc_error_state_transitions, }, [LLC_CONN_STATE_TEMP - 1] = { .current_state = LLC_CONN_STATE_TEMP, .transitions = llc_temp_state_transitions, }, };
gpl-2.0
AOKP/kernel_samsung_msm8660-common
arch/arm/perfmon/perf-v7.c
893
21762
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* perf-v7.c DESCRIPTION Manipulation, initialization of the ARMV7 Performance counter register. EXTERNALIZED FUNCTIONS INITIALIZATION AND SEQUENCING REQUIREMENTS */ /* INCLUDE FILES FOR MODULE */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/time.h> #include <linux/device.h> #include <linux/interrupt.h> #include <asm/io.h> #include <asm/irq.h> #include "cp15_registers.h" /* DEFINITIONS AND DECLARATIONS FOR MODULE This section contains definitions for constants, macros, types, variables and other items needed by this module. */ /* Constant / Define Declarations */ #define PM_NUM_COUNTERS 4 #define PM_V7_ERR -1 /*------------------------------------------------------------------------ * Global control bits ------------------------------------------------------------------------*/ #define PM_GLOBAL_ENABLE (1<<0) #define PM_EVENT_RESET (1<<1) #define PM_CYCLE_RESET (1<<2) #define PM_CLKDIV (1<<3) #define PM_GLOBAL_TRACE (1<<4) #define PM_DISABLE_PROHIBIT (1<<5) /*--------------------------------------------------------------------------- * Enable and clear bits for each event/trigger ----------------------------------------------------------------------------*/ #define PM_EV0_ENABLE (1<<0) #define PM_EV1_ENABLE (1<<1) #define PM_EV2_ENABLE (1<<2) #define PM_EV3_ENABLE (1<<3) #define PM_COUNT_ENABLE (1<<31) #define PM_ALL_ENABLE (0x8000000F) /*----------------------------------------------------------------------------- * Overflow actions ------------------------------------------------------------------------------*/ #define PM_OVERFLOW_NOACTION (0) #define PM_OVERFLOW_HALT (1) #define PM_OVERFLOW_STOP (2) #define PM_OVERFLOW_SKIP (3) /* * Shifts for each trigger type */ #define PM_STOP_SHIFT 24 #define PM_RELOAD_SHIFT 22 #define PM_RESUME_SHIFT 20 #define PM_SUSPEND_SHIFT 18 #define PM_START_SHIFT 16 #define PM_STOPALL_SHIFT 15 #define PM_STOPCOND_SHIFT 12 #define PM_RELOADCOND_SHIFT 9 #define PM_RESUMECOND_SHIFT 6 #define PM_SUSPENDCOND_SHIFT 3 #define PM_STARTCOND_SHIFT 0 /*--------------------------------------------------------------------------- External control register. What todo when various events happen. Triggering events, etc. ----------------------------------------------------------------------------*/ #define PM_EXTTR0 0 #define PM_EXTTR1 1 #define PM_EXTTR2 2 #define PM_EXTTR3 3 #define PM_COND_NO_STOP 0 #define PM_COND_STOP_CNTOVRFLW 1 #define PM_COND_STOP_EXTERNAL 4 #define PM_COND_STOP_TRACE 5 #define PM_COND_STOP_EVOVRFLW 6 #define PM_COND_STOP_EVTYPER 7 /*-------------------------------------------------------------------------- Protect against concurrent access. There is an index register that is used to select the appropriate bank of registers. If multiple processes are writting this at different times we could have a mess... ---------------------------------------------------------------------------*/ #define PM_LOCK() #define PM_UNLOCK() #define PRINT printk /*-------------------------------------------------------------------------- The Event definitions --------------------------------------------------------------------------*/ #define PM_EVT_SW_INCREMENT 0 #define PM_EVT_L1_I_MISS 1 #define PM_EVT_ITLB_MISS 2 #define PM_EVT_L1_D_MISS 3 #define PM_EVT_L1_D_ACCESS 4 #define PM_EVT_DTLB_MISS 5 #define PM_EVT_DATA_READ 6 #define PM_EVT_DATA_WRITE 7 #define PM_EVT_INSTRUCTION 8 #define PM_EVT_EXCEPTIONS 9 #define PM_EVT_EXCEPTION_RET 10 #define PM_EVT_CTX_CHANGE 11 #define PM_EVT_PC_CHANGE 12 #define PM_EVT_BRANCH 13 #define PM_EVT_RETURN 14 #define PM_EVT_UNALIGNED 15 #define PM_EVT_BRANCH_MISS 16 #define PM_EVT_EXTERNAL0 0x40 #define PM_EVT_EXTERNAL1 0x41 #define PM_EVT_EXTERNAL2 0x42 #define PM_EVT_EXTERNAL3 0x43 #define PM_EVT_TRACE0 0x44 #define PM_EVT_TRACE1 0x45 #define PM_EVT_TRACE2 0x46 #define PM_EVT_TRACE3 0x47 #define PM_EVT_PM0 0x48 #define PM_EVT_PM1 0x49 #define PM_EVT_PM2 0x4a #define PM_EVT_PM3 0x4b #define PM_EVT_LPM0_EVT0 0x4c #define PM_EVT_LPM0_EVT1 0x4d #define PM_EVT_LPM0_EVT2 0x4e #define PM_EVT_LPM0_EVT3 0x4f #define PM_EVT_LPM1_EVT0 0x50 #define PM_EVT_LPM1_EVT1 0x51 #define PM_EVT_LPM1_EVT2 0x52 #define PM_EVT_LPM1_EVT3 0x53 #define PM_EVT_LPM2_EVT0 0x54 #define PM_EVT_LPM2_EVT1 0x55 #define PM_EVT_LPM2_EVT2 0x56 #define PM_EVT_LPM2_EVT3 0x57 #define PM_EVT_L2_EVT0 0x58 #define PM_EVT_L2_EVT1 0x59 #define PM_EVT_L2_EVT2 0x5a #define PM_EVT_L2_EVT3 0x5b #define PM_EVT_VLP_EVT0 0x5c #define PM_EVT_VLP_EVT1 0x5d #define PM_EVT_VLP_EVT2 0x5e #define PM_EVT_VLP_EVT3 0x5f /* Type Declarations */ /*-------------------------------------------------------------------------- A performance monitor trigger setup/initialization structure. Contains all of the fields necessary to setup a complex trigger with the internal performance monitor. ---------------------------------------------------------------------------*/ struct pm_trigger_s { int index; int event_type; bool interrupt; bool overflow_enable; bool event_export; unsigned char overflow_action; unsigned char stop_index; unsigned char reload_index; unsigned char resume_index; unsigned char suspend_index; unsigned char start_index; bool overflow_stop; unsigned char stop_condition; unsigned char reload_condition; unsigned char resume_condition; unsigned char suspend_condition; unsigned char start_condition; }; /* * Name and index place holder so we can display the event */ struct pm_name_s { unsigned long index; char *name; }; /* Local Object Definitions */ unsigned long pm_cycle_overflow_count; unsigned long pm_overflow_count[PM_NUM_COUNTERS]; /*--------------------------------------------------------------------------- Max number of events read from the config registers ---------------------------------------------------------------------------*/ static int pm_max_events; /*-------------------------------------------------------------------------- Storage area for each of the triggers *---------------------------------------------------------------------------*/ static struct pm_trigger_s pm_triggers[4]; /*-------------------------------------------------------------------------- Names and indexes of the events --------------------------------------------------------------------------*/ static struct pm_name_s pm_names[] = { { PM_EVT_SW_INCREMENT, "SW Increment"}, { PM_EVT_L1_I_MISS, "L1 I MISS"}, { PM_EVT_ITLB_MISS, "L1 ITLB MISS"}, { PM_EVT_L1_D_MISS, "L1 D MISS"}, { PM_EVT_L1_D_ACCESS, "L1 D ACCESS"}, { PM_EVT_DTLB_MISS, "DTLB MISS"}, { PM_EVT_DATA_READ, "DATA READ"}, { PM_EVT_DATA_WRITE, "DATA WRITE"}, { PM_EVT_INSTRUCTION, "INSTRUCTIONS"}, { PM_EVT_EXCEPTIONS, "EXCEPTIONS"}, { PM_EVT_EXCEPTION_RET, "EXCEPTION RETURN"}, { PM_EVT_CTX_CHANGE, "CTX CHANGE"}, { PM_EVT_PC_CHANGE, "PC CHANGE"}, { PM_EVT_BRANCH, "BRANCH"}, { PM_EVT_RETURN, "RETURN"}, { PM_EVT_UNALIGNED, "UNALIGNED"}, { PM_EVT_BRANCH_MISS, "BRANCH MISS"}, { PM_EVT_EXTERNAL0, "EXTERNAL 0"}, { PM_EVT_EXTERNAL1, "EXTERNAL 1"}, { PM_EVT_EXTERNAL2, "EXTERNAL 2"}, { PM_EVT_EXTERNAL3, "EXTERNAL 3"}, { PM_EVT_TRACE0, "TRACE 0"}, { PM_EVT_TRACE1, "TRACE 1"}, { PM_EVT_TRACE2, "TRACE 2"}, { PM_EVT_TRACE3, "TRACE 3"}, { PM_EVT_PM0, "PM0"}, { PM_EVT_PM1, "PM1"}, { PM_EVT_PM2, "PM2"}, { PM_EVT_PM3, "PM3"}, { PM_EVT_LPM0_EVT0, "LPM0 E0"}, { PM_EVT_LPM0_EVT1, "LPM0 E1"}, { PM_EVT_LPM0_EVT2 , "LPM0 E2"}, { PM_EVT_LPM0_EVT3, "LPM0 E3"}, { PM_EVT_LPM1_EVT0, "LPM1 E0"}, { PM_EVT_LPM1_EVT1, "LPM1 E1"}, { PM_EVT_LPM1_EVT2, "LPM1 E2"}, { PM_EVT_LPM1_EVT3, "LPM1 E3"}, { PM_EVT_LPM2_EVT0, "LPM2 E0"}, { PM_EVT_LPM2_EVT1 , "LPM2 E1"}, { PM_EVT_LPM2_EVT2, "LPM2 E2"}, { PM_EVT_LPM2_EVT3, "LPM2 E3"}, { PM_EVT_L2_EVT0 , "L2 E0"}, { PM_EVT_L2_EVT1, "L2 E1"}, { PM_EVT_L2_EVT2, "L2 E2"}, { PM_EVT_L2_EVT3 , "L2 E3"}, { PM_EVT_VLP_EVT0 , "VLP E0"}, { PM_EVT_VLP_EVT1, "VLP E1"}, { PM_EVT_VLP_EVT2, "VLP E2"}, { PM_EVT_VLP_EVT3, "VLP E3"}, }; static int irqid; /* Function Definitions */ /* FUNCTION pm_find_event_name DESCRIPTION Find the name associated with the event index passed and return the pointer. DEPENDENCIES RETURN VALUE Pointer to text string containing the name of the event or pointer to an error string. Either way access to the returned string will not cause an access error. SIDE EFFECTS */ char *pm_find_event_name(unsigned long index) { unsigned long i = 0; while (pm_names[i].index != -1) { if (pm_names[i].index == index) return pm_names[i].name; i++; } return "BAD INDEX"; } /* FUNCTION pm_group_stop DESCRIPTION Stop a group of the performance monitors. Event monitor 0 is bit 0, event monitor 1 bit 1, etc. The cycle count can also be disabled with bit 31. Macros are provided for all of the indexes including an ALL. DEPENDENCIES RETURN VALUE None SIDE EFFECTS Stops the performance monitoring for the index passed. */ void pm_group_stop(unsigned long mask) { WCP15_PMCNTENCLR(mask); } /* FUNCTION pm_group_start DESCRIPTION Start a group of the performance monitors. Event monitor 0 is bit 0, event monitor 1 bit 1, etc. The cycle count can also be enabled with bit 31. Macros are provided for all of the indexes including an ALL. DEPENDENCIES RETURN VALUE None SIDE EFFECTS Starts the performance monitoring for the index passed. */ void pm_group_start(unsigned long mask) { WCP15_PMCNTENSET(mask); } /* FUNCTION pm_cycle_overflow_action DESCRIPTION Action to take for an overflow of the cycle counter. DEPENDENCIES RETURN VALUE None SIDE EFFECTS Modify the state actions for overflow */ void pm_cycle_overflow_action(int action) { unsigned long reg = 0; if ((action > PM_OVERFLOW_SKIP) || (action < 0)) return; RCP15_PMACTLR(reg); reg &= ~(1<<30); /*clear it*/ WCP15_PMACTLR(reg | (action<<30)); } /* FUNCTION pm_get_overflow DESCRIPTION Return the overflow condition for the index passed. DEPENDENCIES RETURN VALUE 0 no overflow !0 (anything else) overflow; SIDE EFFECTS */ unsigned long pm_get_overflow(int index) { unsigned long overflow = 0; /* * Range check */ if (index > pm_max_events) return PM_V7_ERR; RCP15_PMOVSR(overflow); return overflow & (1<<index); } /* FUNCTION pm_get_cycle_overflow DESCRIPTION Returns if the cycle counter has overflowed or not. DEPENDENCIES RETURN VALUE 0 no overflow !0 (anything else) overflow; SIDE EFFECTS */ unsigned long pm_get_cycle_overflow(void) { unsigned long overflow = 0; RCP15_PMOVSR(overflow); return overflow & PM_COUNT_ENABLE; } /* FUNCTION pm_reset_overflow DESCRIPTION Reset the cycle counter overflow bit. DEPENDENCIES RETURN VALUE None SIDE EFFECTS */ void pm_reset_overflow(int index) { WCP15_PMOVSR(1<<index); } /* FUNCTION pm_reset_cycle_overflow DESCRIPTION Reset the cycle counter overflow bit. DEPENDENCIES RETURN VALUE None SIDE EFFECTS */ void pm_reset_cycle_overflow(void) { WCP15_PMOVSR(PM_COUNT_ENABLE); } /* FUNCTION pm_get_cycle_count DESCRIPTION return the count in the cycle count register. DEPENDENCIES RETURN VALUE The value in the cycle count register. SIDE EFFECTS */ unsigned long pm_get_cycle_count(void) { unsigned long cnt = 0; RCP15_PMCCNTR(cnt); return cnt; } /* FUNCTION pm_reset_cycle_count DESCRIPTION reset the value in the cycle count register DEPENDENCIES RETURN VALUE NONE SIDE EFFECTS Resets the performance monitor cycle count register. Any interrupts period based on this overflow will be changed */ void pm_reset_cycle_count(void) { WCP15_PMCNTENCLR(PM_COUNT_ENABLE); } /* FUNCTION pm_cycle_div_64 DESCRIPTION Set the cycle counter to count every 64th cycle instead of every cycle when the value passed is 1, otherwise counts every cycle. DEPENDENCIES RETURN VALUE none SIDE EFFECTS Changes the rate at which cycles are counted. Anything that is reading the cycle count (pmGetCyucleCount) may get different results. */ void pm_cycle_div_64(int enable) { unsigned long enables = 0; RCP15_PMCR(enables); if (enable) WCP15_PMCR(enables | PM_CLKDIV); else WCP15_PMCR(enables & ~PM_CLKDIV); } /* FUNCTION pm_enable_cycle_counter DESCRIPTION Enable the cycle counter. Sets the bit in the enable register so the performance monitor counter starts up counting. DEPENDENCIES RETURN VALUE none SIDE EFFECTS */ void pm_enable_cycle_counter(void) { /* * Enable the counter. */ WCP15_PMCNTENSET(PM_COUNT_ENABLE); } /* FUNCTION pm_disable_counter DESCRIPTION Disable a single counter based on the index passed. DEPENDENCIES RETURN VALUE none SIDE EFFECTS Any triggers that are based on the stoped counter may not trigger... */ void pm_disable_counter(int index) { /* * Range check */ if (index > pm_max_events) return; WCP15_PMCNTENCLR(1<<index); } /* FUNCTION pm_enable_counter DESCRIPTION Enable the counter with the index passed. DEPENDENCIES RETURN VALUE none. SIDE EFFECTS */ void pm_enable_counter(int index) { /* * Range check */ if (index > pm_max_events) return; WCP15_PMCNTENSET(1<<index); } /* FUNCTION pm_set_count DESCRIPTION Set the number of events in a register, used for resets passed. DEPENDENCIES RETURN VALUE -1 if the index is out of range SIDE EFFECTS */ int pm_set_count(int index, unsigned long new_value) { unsigned long reg = 0; /* * Range check */ if (index > pm_max_events) return PM_V7_ERR; /* * Lock, select the index and read the count...unlock */ PM_LOCK(); WCP15_PMSELR(index); WCP15_PMXEVCNTR(new_value); PM_UNLOCK(); return reg; } int pm_reset_count(int index) { return pm_set_count(index, 0); } /* FUNCTION pm_get_count DESCRIPTION Return the number of events that have happened for the index passed. DEPENDENCIES RETURN VALUE -1 if the index is out of range The number of events if inrange SIDE EFFECTS */ unsigned long pm_get_count(int index) { unsigned long reg = 0; /* * Range check */ if (index > pm_max_events) return PM_V7_ERR; /* * Lock, select the index and read the count...unlock */ PM_LOCK(); WCP15_PMSELR(index); RCP15_PMXEVCNTR(reg); PM_UNLOCK(); return reg; } /* FUNCTION pm_show_event_info DESCRIPTION Display (print) the information about the event at the index passed. Shows the index, name and count if a valid index is passed. If the index is not valid, then nothing is displayed. DEPENDENCIES RETURN VALUE None SIDE EFFECTS */ void pm_show_event_info(unsigned long index) { unsigned long count; unsigned long event_type; if (index > pm_max_events) return; if (pm_triggers[index].index > pm_max_events) return; count = pm_get_count(index); event_type = pm_triggers[index].event_type; PRINT("Event %ld Trigger %s(%ld) count:%ld\n", index, pm_find_event_name(event_type), event_type, count); } /* FUNCTION pm_event_init DESCRIPTION Given the struct pm_trigger_s info passed, configure the event. This can be a complex trigger or a simple trigger. Any old values in the event are lost. DEPENDENCIES RETURN VALUE status SIDE EFFECTS stops and clears the event at the index passed. */ int pm_event_init(struct pm_trigger_s *data) { unsigned long trigger; unsigned long actlr = 0; if (0 == data) return PM_V7_ERR; if (data->index > pm_max_events) return PM_V7_ERR; /* * Setup the trigger based ont he passed values */ trigger = ((data->overflow_enable&1)<<31) | ((data->event_export&1)<<30) | ((data->stop_index&3)<<PM_STOP_SHIFT) | ((data->reload_index&3)<<PM_RELOAD_SHIFT) | ((data->resume_index&3)<<PM_RESUME_SHIFT) | ((data->suspend_index&3)<<PM_SUSPEND_SHIFT) | ((data->start_index&3)<<PM_START_SHIFT) | ((data->overflow_stop&1)<<PM_STOPALL_SHIFT) | ((data->stop_condition&7)<<PM_STOPCOND_SHIFT) | ((data->reload_condition&7)<<PM_RELOADCOND_SHIFT) | ((data->resume_condition&7)<<PM_RESUMECOND_SHIFT) | ((data->suspend_condition&7)<<PM_SUSPENDCOND_SHIFT) | ((data->start_condition&7)<<PM_STARTCOND_SHIFT); /* * Disable this counter while we are updating. */ pm_disable_counter(data->index); /* * Lock, select the bank, set the trigger event and the event type * then unlock. */ PM_LOCK(); RCP15_PMACTLR(actlr); actlr &= ~(3<<(data->index<<1)); WCP15_PMACTLR(actlr | ((data->overflow_action&3) << (data->index<<1))); WCP15_PMSELR(data->index); WCP15_PMXEVTYPER(data->event_type); WCP15_PMXEVCNTCR(trigger); PM_UNLOCK(); /* * Make a copy of the trigger so we know what it is when/if it triggers. */ memcpy(&pm_triggers[data->index], data, sizeof(*data)); /* * We do not re-enable this here so events can be started together with * pm_group_start() that way an accurate measure can be taken... */ return 0; } int pm_set_event(int index, unsigned long event) { unsigned long reg = 0; /* * Range check */ if (index > pm_max_events) return PM_V7_ERR; /* * Lock, select the index and read the count...unlock */ PM_LOCK(); WCP15_PMSELR(index); WCP15_PMXEVTYPER(event); PM_UNLOCK(); return reg; } /* FUNCTION pm_set_local_iu DESCRIPTION Set the local IU triggers. Note that the MSB determines if these are enabled or not. DEPENDENCIES RETURN VALUE NONE SIDE EFFECTS */ void pm_set_local_iu(unsigned long value) { WCP15_LPM0EVTYPER(value); } /* FUNCTION pm_set_local_iu DESCRIPTION Set the local IU triggers. Note that the MSB determines if these are enabled or not. DEPENDENCIES RETURN VALUE NONE SIDE EFFECTS */ void pm_set_local_xu(unsigned long value) { WCP15_LPM1EVTYPER(value); } /* FUNCTION pm_set_local_su DESCRIPTION Set the local SU triggers. Note that the MSB determines if these are enabled or not. DEPENDENCIES RETURN VALUE NONE SIDE EFFECTS */ void pm_set_local_su(unsigned long value) { WCP15_LPM2EVTYPER(value); } /* FUNCTION pm_set_local_l2 DESCRIPTION Set the local L2 triggers. Note that the MSB determines if these are enabled or not. DEPENDENCIES RETURN VALUE NONE SIDE EFFECTS */ void pm_set_local_l2(unsigned long value) { WCP15_L2LPMEVTYPER(value); } /* FUNCTION pm_set_local_vu DESCRIPTION Set the local VU triggers. Note that the MSB determines if these are enabled or not. DEPENDENCIES RETURN VALUE NONE SIDE EFFECTS */ void pm_set_local_vu(unsigned long value) { WCP15_VLPMEVTYPER(value); } /* FUNCTION pm_isr DESCRIPTION: Performance Monitor interrupt service routine to capture overflows DEPENDENCIES RETURN VALUE SIDE EFFECTS */ static irqreturn_t pm_isr(int irq, void *d) { int i; for (i = 0; i < PM_NUM_COUNTERS; i++) { if (pm_get_overflow(i)) { pm_overflow_count[i]++; pm_reset_overflow(i); } } if (pm_get_cycle_overflow()) { pm_cycle_overflow_count++; pm_reset_cycle_overflow(); } return IRQ_HANDLED; } void pm_stop_all(void) { WCP15_PMCNTENCLR(0xFFFFFFFF); } void pm_reset_all(void) { WCP15_PMCR(0xF); WCP15_PMOVSR(PM_ALL_ENABLE); /* overflow clear */ } void pm_start_all(void) { WCP15_PMCNTENSET(PM_ALL_ENABLE); } /* FUNCTION pm_initialize DESCRIPTION Initialize the performanca monitoring for the v7 processor. Ensures the cycle count is running and the event counters are enabled. DEPENDENCIES RETURN VALUE NONE SIDE EFFECTS */ void pm_initialize(void) { unsigned long reg = 0; unsigned char imp; unsigned char id; unsigned char num; unsigned long enables = 0; static int initialized; if (initialized) return; initialized = 1; irqid = INT_ARMQC_PERFMON; RCP15_PMCR(reg); imp = (reg>>24) & 0xFF; id = (reg>>16) & 0xFF; pm_max_events = num = (reg>>11) & 0xFF; PRINT("V7Performance Monitor Capabilities\n"); PRINT(" Implementor %c(%d)\n", imp, imp); PRINT(" Id %d %x\n", id, id); PRINT(" Num Events %d %x\n", num, num); PRINT("\nCycle counter enabled by default...\n"); /* * Global enable, ensure the global enable is set so all * subsequent actions take effect. Also resets the counts */ RCP15_PMCR(enables); WCP15_PMCR(enables | PM_GLOBAL_ENABLE | PM_EVENT_RESET | PM_CYCLE_RESET | PM_CLKDIV); /* * Enable access from user space */ WCP15_PMUSERENR(1); WCP15_PMACTLR(1); /* * Install interrupt handler and the enable the interrupts */ pm_reset_cycle_overflow(); pm_reset_overflow(0); pm_reset_overflow(1); pm_reset_overflow(2); pm_reset_overflow(3); if (0 != request_irq(irqid, pm_isr, 0, "perfmon", 0)) printk(KERN_ERR "%s:%d request_irq returned error\n", __FILE__, __LINE__); WCP15_PMINTENSET(PM_ALL_ENABLE); /* * Enable the cycle counter. Default, count 1:1 no divisor. */ pm_enable_cycle_counter(); } void pm_free_irq(void) { free_irq(irqid, 0); } void pm_deinitialize(void) { unsigned long enables = 0; RCP15_PMCR(enables); WCP15_PMCR(enables & ~PM_GLOBAL_ENABLE); }
gpl-2.0
MichaelQQ/Linux-PE
drivers/net/wireless/ath/wil6210/debug.c
1149
1687
/* * Copyright (c) 2013 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "wil6210.h" #include "trace.h" int wil_err(struct wil6210_priv *wil, const char *fmt, ...) { struct net_device *ndev = wil_to_ndev(wil); struct va_format vaf = { .fmt = fmt, }; va_list args; int ret; va_start(args, fmt); vaf.va = &args; ret = netdev_err(ndev, "%pV", &vaf); trace_wil6210_log_err(&vaf); va_end(args); return ret; } int wil_info(struct wil6210_priv *wil, const char *fmt, ...) { struct net_device *ndev = wil_to_ndev(wil); struct va_format vaf = { .fmt = fmt, }; va_list args; int ret; va_start(args, fmt); vaf.va = &args; ret = netdev_info(ndev, "%pV", &vaf); trace_wil6210_log_info(&vaf); va_end(args); return ret; } int wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); vaf.va = &args; trace_wil6210_log_dbg(&vaf); va_end(args); return 0; }
gpl-2.0
wkpark/lge-kernel-cx2
drivers/scsi/atari_scsi.c
2685
35496
/* * atari_scsi.c -- Device dependent functions for the Atari generic SCSI port * * Copyright 1994 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de> * * Loosely based on the work of Robert De Vries' team and added: * - working real DMA * - Falcon support (untested yet!) ++bjoern fixed and now it works * - lots of extensions and bug fixes. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. * */ /**************************************************************************/ /* */ /* Notes for Falcon SCSI: */ /* ---------------------- */ /* */ /* Since the Falcon SCSI uses the ST-DMA chip, that is shared among */ /* several device drivers, locking and unlocking the access to this */ /* chip is required. But locking is not possible from an interrupt, */ /* since it puts the process to sleep if the lock is not available. */ /* This prevents "late" locking of the DMA chip, i.e. locking it just */ /* before using it, since in case of disconnection-reconnection */ /* commands, the DMA is started from the reselection interrupt. */ /* */ /* Two possible schemes for ST-DMA-locking would be: */ /* 1) The lock is taken for each command separately and disconnecting */ /* is forbidden (i.e. can_queue = 1). */ /* 2) The DMA chip is locked when the first command comes in and */ /* released when the last command is finished and all queues are */ /* empty. */ /* The first alternative would result in bad performance, since the */ /* interleaving of commands would not be used. The second is unfair to */ /* other drivers using the ST-DMA, because the queues will seldom be */ /* totally empty if there is a lot of disk traffic. */ /* */ /* For this reasons I decided to employ a more elaborate scheme: */ /* - First, we give up the lock every time we can (for fairness), this */ /* means every time a command finishes and there are no other commands */ /* on the disconnected queue. */ /* - If there are others waiting to lock the DMA chip, we stop */ /* issuing commands, i.e. moving them onto the issue queue. */ /* Because of that, the disconnected queue will run empty in a */ /* while. Instead we go to sleep on a 'fairness_queue'. */ /* - If the lock is released, all processes waiting on the fairness */ /* queue will be woken. The first of them tries to re-lock the DMA, */ /* the others wait for the first to finish this task. After that, */ /* they can all run on and do their commands... */ /* This sounds complicated (and it is it :-(), but it seems to be a */ /* good compromise between fairness and performance: As long as no one */ /* else wants to work with the ST-DMA chip, SCSI can go along as */ /* usual. If now someone else comes, this behaviour is changed to a */ /* "fairness mode": just already initiated commands are finished and */ /* then the lock is released. The other one waiting will probably win */ /* the race for locking the DMA, since it was waiting for longer. And */ /* after it has finished, SCSI can go ahead again. Finally: I hope I */ /* have not produced any deadlock possibilities! */ /* */ /**************************************************************************/ #include <linux/module.h> #define NDEBUG (0) #define NDEBUG_ABORT 0x00100000 #define NDEBUG_TAGS 0x00200000 #define NDEBUG_MERGING 0x00400000 #define AUTOSENSE /* For the Atari version, use only polled IO or REAL_DMA */ #define REAL_DMA /* Support tagged queuing? (on devices that are able to... :-) */ #define SUPPORT_TAGS #define MAX_TAGS 32 #include <linux/types.h> #include <linux/stddef.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/nvram.h> #include <linux/bitops.h> #include <asm/setup.h> #include <asm/atarihw.h> #include <asm/atariints.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/traps.h> #include "scsi.h" #include <scsi/scsi_host.h> #include "atari_scsi.h" #include "NCR5380.h" #include <asm/atari_stdma.h> #include <asm/atari_stram.h> #include <asm/io.h> #include <linux/stat.h> #define IS_A_TT() ATARIHW_PRESENT(TT_SCSI) #define SCSI_DMA_WRITE_P(elt,val) \ do { \ unsigned long v = val; \ tt_scsi_dma.elt##_lo = v & 0xff; \ v >>= 8; \ tt_scsi_dma.elt##_lmd = v & 0xff; \ v >>= 8; \ tt_scsi_dma.elt##_hmd = v & 0xff; \ v >>= 8; \ tt_scsi_dma.elt##_hi = v & 0xff; \ } while(0) #define SCSI_DMA_READ_P(elt) \ (((((((unsigned long)tt_scsi_dma.elt##_hi << 8) | \ (unsigned long)tt_scsi_dma.elt##_hmd) << 8) | \ (unsigned long)tt_scsi_dma.elt##_lmd) << 8) | \ (unsigned long)tt_scsi_dma.elt##_lo) static inline void SCSI_DMA_SETADR(unsigned long adr) { st_dma.dma_lo = (unsigned char)adr; MFPDELAY(); adr >>= 8; st_dma.dma_md = (unsigned char)adr; MFPDELAY(); adr >>= 8; st_dma.dma_hi = (unsigned char)adr; MFPDELAY(); } static inline unsigned long SCSI_DMA_GETADR(void) { unsigned long adr; adr = st_dma.dma_lo; MFPDELAY(); adr |= (st_dma.dma_md & 0xff) << 8; MFPDELAY(); adr |= (st_dma.dma_hi & 0xff) << 16; MFPDELAY(); return adr; } static inline void ENABLE_IRQ(void) { if (IS_A_TT()) atari_enable_irq(IRQ_TT_MFP_SCSI); else atari_enable_irq(IRQ_MFP_FSCSI); } static inline void DISABLE_IRQ(void) { if (IS_A_TT()) atari_disable_irq(IRQ_TT_MFP_SCSI); else atari_disable_irq(IRQ_MFP_FSCSI); } #define HOSTDATA_DMALEN (((struct NCR5380_hostdata *) \ (atari_scsi_host->hostdata))->dma_len) /* Time (in jiffies) to wait after a reset; the SCSI standard calls for 250ms, * we usually do 0.5s to be on the safe side. But Toshiba CD-ROMs once more * need ten times the standard value... */ #ifndef CONFIG_ATARI_SCSI_TOSHIBA_DELAY #define AFTER_RESET_DELAY (HZ/2) #else #define AFTER_RESET_DELAY (5*HZ/2) #endif /***************************** Prototypes *****************************/ #ifdef REAL_DMA static int scsi_dma_is_ignored_buserr(unsigned char dma_stat); static void atari_scsi_fetch_restbytes(void); static long atari_scsi_dma_residual(struct Scsi_Host *instance); static int falcon_classify_cmd(Scsi_Cmnd *cmd); static unsigned long atari_dma_xfer_len(unsigned long wanted_len, Scsi_Cmnd *cmd, int write_flag); #endif static irqreturn_t scsi_tt_intr(int irq, void *dummy); static irqreturn_t scsi_falcon_intr(int irq, void *dummy); static void falcon_release_lock_if_possible(struct NCR5380_hostdata *hostdata); static void falcon_get_lock(void); #ifdef CONFIG_ATARI_SCSI_RESET_BOOT static void atari_scsi_reset_boot(void); #endif static unsigned char atari_scsi_tt_reg_read(unsigned char reg); static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value); static unsigned char atari_scsi_falcon_reg_read(unsigned char reg); static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value); /************************* End of Prototypes **************************/ static struct Scsi_Host *atari_scsi_host; static unsigned char (*atari_scsi_reg_read)(unsigned char reg); static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value); #ifdef REAL_DMA static unsigned long atari_dma_residual, atari_dma_startaddr; static short atari_dma_active; /* pointer to the dribble buffer */ static char *atari_dma_buffer; /* precalculated physical address of the dribble buffer */ static unsigned long atari_dma_phys_buffer; /* != 0 tells the Falcon int handler to copy data from the dribble buffer */ static char *atari_dma_orig_addr; /* size of the dribble buffer; 4k seems enough, since the Falcon cannot use * scatter-gather anyway, so most transfers are 1024 byte only. In the rare * cases where requests to physical contiguous buffers have been merged, this * request is <= 4k (one page). So I don't think we have to split transfers * just due to this buffer size... */ #define STRAM_BUFFER_SIZE (4096) /* mask for address bits that can't be used with the ST-DMA */ static unsigned long atari_dma_stram_mask; #define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0) /* number of bytes to cut from a transfer to handle NCR overruns */ static int atari_read_overruns; #endif static int setup_can_queue = -1; module_param(setup_can_queue, int, 0); static int setup_cmd_per_lun = -1; module_param(setup_cmd_per_lun, int, 0); static int setup_sg_tablesize = -1; module_param(setup_sg_tablesize, int, 0); #ifdef SUPPORT_TAGS static int setup_use_tagged_queuing = -1; module_param(setup_use_tagged_queuing, int, 0); #endif static int setup_hostid = -1; module_param(setup_hostid, int, 0); #if defined(REAL_DMA) static int scsi_dma_is_ignored_buserr(unsigned char dma_stat) { int i; unsigned long addr = SCSI_DMA_READ_P(dma_addr), end_addr; if (dma_stat & 0x01) { /* A bus error happens when DMA-ing from the last page of a * physical memory chunk (DMA prefetch!), but that doesn't hurt. * Check for this case: */ for (i = 0; i < m68k_num_memory; ++i) { end_addr = m68k_memory[i].addr + m68k_memory[i].size; if (end_addr <= addr && addr <= end_addr + 4) return 1; } } return 0; } #if 0 /* Dead code... wasn't called anyway :-) and causes some trouble, because at * end-of-DMA, both SCSI ints are triggered simultaneously, so the NCR int has * to clear the DMA int pending bit before it allows other level 6 interrupts. */ static void scsi_dma_buserr(int irq, void *dummy) { unsigned char dma_stat = tt_scsi_dma.dma_ctrl; /* Don't do anything if a NCR interrupt is pending. Probably it's just * masked... */ if (atari_irq_pending(IRQ_TT_MFP_SCSI)) return; printk("Bad SCSI DMA interrupt! dma_addr=0x%08lx dma_stat=%02x dma_cnt=%08lx\n", SCSI_DMA_READ_P(dma_addr), dma_stat, SCSI_DMA_READ_P(dma_cnt)); if (dma_stat & 0x80) { if (!scsi_dma_is_ignored_buserr(dma_stat)) printk("SCSI DMA bus error -- bad DMA programming!\n"); } else { /* Under normal circumstances we never should get to this point, * since both interrupts are triggered simultaneously and the 5380 * int has higher priority. When this irq is handled, that DMA * interrupt is cleared. So a warning message is printed here. */ printk("SCSI DMA intr ?? -- this shouldn't happen!\n"); } } #endif #endif static irqreturn_t scsi_tt_intr(int irq, void *dummy) { #ifdef REAL_DMA int dma_stat; dma_stat = tt_scsi_dma.dma_ctrl; INT_PRINTK("scsi%d: NCR5380 interrupt, DMA status = %02x\n", atari_scsi_host->host_no, dma_stat & 0xff); /* Look if it was the DMA that has interrupted: First possibility * is that a bus error occurred... */ if (dma_stat & 0x80) { if (!scsi_dma_is_ignored_buserr(dma_stat)) { printk(KERN_ERR "SCSI DMA caused bus error near 0x%08lx\n", SCSI_DMA_READ_P(dma_addr)); printk(KERN_CRIT "SCSI DMA bus error -- bad DMA programming!"); } } /* If the DMA is active but not finished, we have the case * that some other 5380 interrupt occurred within the DMA transfer. * This means we have residual bytes, if the desired end address * is not yet reached. Maybe we have to fetch some bytes from the * rest data register, too. The residual must be calculated from * the address pointer, not the counter register, because only the * addr reg counts bytes not yet written and pending in the rest * data reg! */ if ((dma_stat & 0x02) && !(dma_stat & 0x40)) { atari_dma_residual = HOSTDATA_DMALEN - (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr); DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", atari_dma_residual); if ((signed int)atari_dma_residual < 0) atari_dma_residual = 0; if ((dma_stat & 1) == 0) { /* * After read operations, we maybe have to * transport some rest bytes */ atari_scsi_fetch_restbytes(); } else { /* * There seems to be a nasty bug in some SCSI-DMA/NCR * combinations: If a target disconnects while a write * operation is going on, the address register of the * DMA may be a few bytes farer than it actually read. * This is probably due to DMA prefetching and a delay * between DMA and NCR. Experiments showed that the * dma_addr is 9 bytes to high, but this could vary. * The problem is, that the residual is thus calculated * wrong and the next transfer will start behind where * it should. So we round up the residual to the next * multiple of a sector size, if it isn't already a * multiple and the originally expected transfer size * was. The latter condition is there to ensure that * the correction is taken only for "real" data * transfers and not for, e.g., the parameters of some * other command. These shouldn't disconnect anyway. */ if (atari_dma_residual & 0x1ff) { DMA_PRINTK("SCSI DMA: DMA bug corrected, " "difference %ld bytes\n", 512 - (atari_dma_residual & 0x1ff)); atari_dma_residual = (atari_dma_residual + 511) & ~0x1ff; } } tt_scsi_dma.dma_ctrl = 0; } /* If the DMA is finished, fetch the rest bytes and turn it off */ if (dma_stat & 0x40) { atari_dma_residual = 0; if ((dma_stat & 1) == 0) atari_scsi_fetch_restbytes(); tt_scsi_dma.dma_ctrl = 0; } #endif /* REAL_DMA */ NCR5380_intr(irq, dummy); #if 0 /* To be sure the int is not masked */ atari_enable_irq(IRQ_TT_MFP_SCSI); #endif return IRQ_HANDLED; } static irqreturn_t scsi_falcon_intr(int irq, void *dummy) { #ifdef REAL_DMA int dma_stat; /* Turn off DMA and select sector counter register before * accessing the status register (Atari recommendation!) */ st_dma.dma_mode_status = 0x90; dma_stat = st_dma.dma_mode_status; /* Bit 0 indicates some error in the DMA process... don't know * what happened exactly (no further docu). */ if (!(dma_stat & 0x01)) { /* DMA error */ printk(KERN_CRIT "SCSI DMA error near 0x%08lx!\n", SCSI_DMA_GETADR()); } /* If the DMA was active, but now bit 1 is not clear, it is some * other 5380 interrupt that finishes the DMA transfer. We have to * calculate the number of residual bytes and give a warning if * bytes are stuck in the ST-DMA fifo (there's no way to reach them!) */ if (atari_dma_active && (dma_stat & 0x02)) { unsigned long transferred; transferred = SCSI_DMA_GETADR() - atari_dma_startaddr; /* The ST-DMA address is incremented in 2-byte steps, but the * data are written only in 16-byte chunks. If the number of * transferred bytes is not divisible by 16, the remainder is * lost somewhere in outer space. */ if (transferred & 15) printk(KERN_ERR "SCSI DMA error: %ld bytes lost in " "ST-DMA fifo\n", transferred & 15); atari_dma_residual = HOSTDATA_DMALEN - transferred; DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", atari_dma_residual); } else atari_dma_residual = 0; atari_dma_active = 0; if (atari_dma_orig_addr) { /* If the dribble buffer was used on a read operation, copy the DMA-ed * data to the original destination address. */ memcpy(atari_dma_orig_addr, phys_to_virt(atari_dma_startaddr), HOSTDATA_DMALEN - atari_dma_residual); atari_dma_orig_addr = NULL; } #endif /* REAL_DMA */ NCR5380_intr(irq, dummy); return IRQ_HANDLED; } #ifdef REAL_DMA static void atari_scsi_fetch_restbytes(void) { int nr; char *src, *dst; unsigned long phys_dst; /* fetch rest bytes in the DMA register */ phys_dst = SCSI_DMA_READ_P(dma_addr); nr = phys_dst & 3; if (nr) { /* there are 'nr' bytes left for the last long address before the DMA pointer */ phys_dst ^= nr; DMA_PRINTK("SCSI DMA: there are %d rest bytes for phys addr 0x%08lx", nr, phys_dst); /* The content of the DMA pointer is a physical address! */ dst = phys_to_virt(phys_dst); DMA_PRINTK(" = virt addr %p\n", dst); for (src = (char *)&tt_scsi_dma.dma_restdata; nr != 0; --nr) *dst++ = *src++; } } #endif /* REAL_DMA */ static int falcon_got_lock = 0; static DECLARE_WAIT_QUEUE_HEAD(falcon_fairness_wait); static int falcon_trying_lock = 0; static DECLARE_WAIT_QUEUE_HEAD(falcon_try_wait); static int falcon_dont_release = 0; /* This function releases the lock on the DMA chip if there is no * connected command and the disconnected queue is empty. On * releasing, instances of falcon_get_lock are awoken, that put * themselves to sleep for fairness. They can now try to get the lock * again (but others waiting longer more probably will win). */ static void falcon_release_lock_if_possible(struct NCR5380_hostdata *hostdata) { unsigned long flags; if (IS_A_TT()) return; local_irq_save(flags); if (falcon_got_lock && !hostdata->disconnected_queue && !hostdata->issue_queue && !hostdata->connected) { if (falcon_dont_release) { #if 0 printk("WARNING: Lock release not allowed. Ignored\n"); #endif local_irq_restore(flags); return; } falcon_got_lock = 0; stdma_release(); wake_up(&falcon_fairness_wait); } local_irq_restore(flags); } /* This function manages the locking of the ST-DMA. * If the DMA isn't locked already for SCSI, it tries to lock it by * calling stdma_lock(). But if the DMA is locked by the SCSI code and * there are other drivers waiting for the chip, we do not issue the * command immediately but wait on 'falcon_fairness_queue'. We will be * waked up when the DMA is unlocked by some SCSI interrupt. After that * we try to get the lock again. * But we must be prepared that more than one instance of * falcon_get_lock() is waiting on the fairness queue. They should not * try all at once to call stdma_lock(), one is enough! For that, the * first one sets 'falcon_trying_lock', others that see that variable * set wait on the queue 'falcon_try_wait'. * Complicated, complicated.... Sigh... */ static void falcon_get_lock(void) { unsigned long flags; if (IS_A_TT()) return; local_irq_save(flags); while (!in_irq() && falcon_got_lock && stdma_others_waiting()) sleep_on(&falcon_fairness_wait); while (!falcon_got_lock) { if (in_irq()) panic("Falcon SCSI hasn't ST-DMA lock in interrupt"); if (!falcon_trying_lock) { falcon_trying_lock = 1; stdma_lock(scsi_falcon_intr, NULL); falcon_got_lock = 1; falcon_trying_lock = 0; wake_up(&falcon_try_wait); } else { sleep_on(&falcon_try_wait); } } local_irq_restore(flags); if (!falcon_got_lock) panic("Falcon SCSI: someone stole the lock :-(\n"); } int __init atari_scsi_detect(struct scsi_host_template *host) { static int called = 0; struct Scsi_Host *instance; if (!MACH_IS_ATARI || (!ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(TT_SCSI)) || called) return 0; host->proc_name = "Atari"; atari_scsi_reg_read = IS_A_TT() ? atari_scsi_tt_reg_read : atari_scsi_falcon_reg_read; atari_scsi_reg_write = IS_A_TT() ? atari_scsi_tt_reg_write : atari_scsi_falcon_reg_write; /* setup variables */ host->can_queue = (setup_can_queue > 0) ? setup_can_queue : IS_A_TT() ? ATARI_TT_CAN_QUEUE : ATARI_FALCON_CAN_QUEUE; host->cmd_per_lun = (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : IS_A_TT() ? ATARI_TT_CMD_PER_LUN : ATARI_FALCON_CMD_PER_LUN; /* Force sg_tablesize to 0 on a Falcon! */ host->sg_tablesize = !IS_A_TT() ? ATARI_FALCON_SG_TABLESIZE : (setup_sg_tablesize >= 0) ? setup_sg_tablesize : ATARI_TT_SG_TABLESIZE; if (setup_hostid >= 0) host->this_id = setup_hostid; else { /* use 7 as default */ host->this_id = 7; /* Test if a host id is set in the NVRam */ if (ATARIHW_PRESENT(TT_CLK) && nvram_check_checksum()) { unsigned char b = nvram_read_byte( 14 ); /* Arbitration enabled? (for TOS) If yes, use configured host ID */ if (b & 0x80) host->this_id = b & 7; } } #ifdef SUPPORT_TAGS if (setup_use_tagged_queuing < 0) setup_use_tagged_queuing = DEFAULT_USE_TAGGED_QUEUING; #endif #ifdef REAL_DMA /* If running on a Falcon and if there's TT-Ram (i.e., more than one * memory block, since there's always ST-Ram in a Falcon), then allocate a * STRAM_BUFFER_SIZE byte dribble buffer for transfers from/to alternative * Ram. */ if (MACH_IS_ATARI && ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(EXTD_DMA) && m68k_num_memory > 1) { atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI"); if (!atari_dma_buffer) { printk(KERN_ERR "atari_scsi_detect: can't allocate ST-RAM " "double buffer\n"); return 0; } atari_dma_phys_buffer = virt_to_phys(atari_dma_buffer); atari_dma_orig_addr = 0; } #endif instance = scsi_register(host, sizeof(struct NCR5380_hostdata)); if (instance == NULL) { atari_stram_free(atari_dma_buffer); atari_dma_buffer = 0; return 0; } atari_scsi_host = instance; /* * Set irq to 0, to avoid that the mid-level code disables our interrupt * during queue_command calls. This is completely unnecessary, and even * worse causes bad problems on the Falcon, where the int is shared with * IDE and floppy! */ instance->irq = 0; #ifdef CONFIG_ATARI_SCSI_RESET_BOOT atari_scsi_reset_boot(); #endif NCR5380_init(instance, 0); if (IS_A_TT()) { /* This int is actually "pseudo-slow", i.e. it acts like a slow * interrupt after having cleared the pending flag for the DMA * interrupt. */ if (request_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr, IRQ_TYPE_SLOW, "SCSI NCR5380", instance)) { printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting",IRQ_TT_MFP_SCSI); scsi_unregister(atari_scsi_host); atari_stram_free(atari_dma_buffer); atari_dma_buffer = 0; return 0; } tt_mfp.active_edge |= 0x80; /* SCSI int on L->H */ #ifdef REAL_DMA tt_scsi_dma.dma_ctrl = 0; atari_dma_residual = 0; if (MACH_IS_MEDUSA) { /* While the read overruns (described by Drew Eckhardt in * NCR5380.c) never happened on TTs, they do in fact on the Medusa * (This was the cause why SCSI didn't work right for so long * there.) Since handling the overruns slows down a bit, I turned * the #ifdef's into a runtime condition. * * In principle it should be sufficient to do max. 1 byte with * PIO, but there is another problem on the Medusa with the DMA * rest data register. So 'atari_read_overruns' is currently set * to 4 to avoid having transfers that aren't a multiple of 4. If * the rest data bug is fixed, this can be lowered to 1. */ atari_read_overruns = 4; } #endif /*REAL_DMA*/ } else { /* ! IS_A_TT */ /* Nothing to do for the interrupt: the ST-DMA is initialized * already by atari_init_INTS() */ #ifdef REAL_DMA atari_dma_residual = 0; atari_dma_active = 0; atari_dma_stram_mask = (ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 : 0xff000000); #endif } printk(KERN_INFO "scsi%d: options CAN_QUEUE=%d CMD_PER_LUN=%d SCAT-GAT=%d " #ifdef SUPPORT_TAGS "TAGGED-QUEUING=%s " #endif "HOSTID=%d", instance->host_no, instance->hostt->can_queue, instance->hostt->cmd_per_lun, instance->hostt->sg_tablesize, #ifdef SUPPORT_TAGS setup_use_tagged_queuing ? "yes" : "no", #endif instance->hostt->this_id ); NCR5380_print_options(instance); printk("\n"); called = 1; return 1; } int atari_scsi_release(struct Scsi_Host *sh) { if (IS_A_TT()) free_irq(IRQ_TT_MFP_SCSI, sh); if (atari_dma_buffer) atari_stram_free(atari_dma_buffer); return 1; } void __init atari_scsi_setup(char *str, int *ints) { /* Format of atascsi parameter is: * atascsi=<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags> * Defaults depend on TT or Falcon, hostid determined at run time. * Negative values mean don't change. */ if (ints[0] < 1) { printk("atari_scsi_setup: no arguments!\n"); return; } if (ints[0] >= 1) { if (ints[1] > 0) /* no limits on this, just > 0 */ setup_can_queue = ints[1]; } if (ints[0] >= 2) { if (ints[2] > 0) setup_cmd_per_lun = ints[2]; } if (ints[0] >= 3) { if (ints[3] >= 0) { setup_sg_tablesize = ints[3]; /* Must be <= SG_ALL (255) */ if (setup_sg_tablesize > SG_ALL) setup_sg_tablesize = SG_ALL; } } if (ints[0] >= 4) { /* Must be between 0 and 7 */ if (ints[4] >= 0 && ints[4] <= 7) setup_hostid = ints[4]; else if (ints[4] > 7) printk("atari_scsi_setup: invalid host ID %d !\n", ints[4]); } #ifdef SUPPORT_TAGS if (ints[0] >= 5) { if (ints[5] >= 0) setup_use_tagged_queuing = !!ints[5]; } #endif } int atari_scsi_bus_reset(Scsi_Cmnd *cmd) { int rv; struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)cmd->device->host->hostdata; /* For doing the reset, SCSI interrupts must be disabled first, * since the 5380 raises its IRQ line while _RST is active and we * can't disable interrupts completely, since we need the timer. */ /* And abort a maybe active DMA transfer */ if (IS_A_TT()) { atari_turnoff_irq(IRQ_TT_MFP_SCSI); #ifdef REAL_DMA tt_scsi_dma.dma_ctrl = 0; #endif /* REAL_DMA */ } else { atari_turnoff_irq(IRQ_MFP_FSCSI); #ifdef REAL_DMA st_dma.dma_mode_status = 0x90; atari_dma_active = 0; atari_dma_orig_addr = NULL; #endif /* REAL_DMA */ } rv = NCR5380_bus_reset(cmd); /* Re-enable ints */ if (IS_A_TT()) { atari_turnon_irq(IRQ_TT_MFP_SCSI); } else { atari_turnon_irq(IRQ_MFP_FSCSI); } if ((rv & SCSI_RESET_ACTION) == SCSI_RESET_SUCCESS) falcon_release_lock_if_possible(hostdata); return rv; } #ifdef CONFIG_ATARI_SCSI_RESET_BOOT static void __init atari_scsi_reset_boot(void) { unsigned long end; /* * Do a SCSI reset to clean up the bus during initialization. No messing * with the queues, interrupts, or locks necessary here. */ printk("Atari SCSI: resetting the SCSI bus..."); /* get in phase */ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG))); /* assert RST */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST); /* The min. reset hold time is 25us, so 40us should be enough */ udelay(50); /* reset RST and interrupt */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_read(RESET_PARITY_INTERRUPT_REG); end = jiffies + AFTER_RESET_DELAY; while (time_before(jiffies, end)) barrier(); printk(" done\n"); } #endif const char *atari_scsi_info(struct Scsi_Host *host) { /* atari_scsi_detect() is verbose enough... */ static const char string[] = "Atari native SCSI"; return string; } #if defined(REAL_DMA) unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, void *data, unsigned long count, int dir) { unsigned long addr = virt_to_phys(data); DMA_PRINTK("scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, " "dir = %d\n", instance->host_no, data, addr, count, dir); if (!IS_A_TT() && !STRAM_ADDR(addr)) { /* If we have a non-DMAable address on a Falcon, use the dribble * buffer; 'orig_addr' != 0 in the read case tells the interrupt * handler to copy data from the dribble buffer to the originally * wanted address. */ if (dir) memcpy(atari_dma_buffer, data, count); else atari_dma_orig_addr = data; addr = atari_dma_phys_buffer; } atari_dma_startaddr = addr; /* Needed for calculating residual later. */ /* Cache cleanup stuff: On writes, push any dirty cache out before sending * it to the peripheral. (Must be done before DMA setup, since at least * the ST-DMA begins to fill internal buffers right after setup. For * reads, invalidate any cache, may be altered after DMA without CPU * knowledge. * * ++roman: For the Medusa, there's no need at all for that cache stuff, * because the hardware does bus snooping (fine!). */ dma_cache_maintenance(addr, count, dir); if (count == 0) printk(KERN_NOTICE "SCSI warning: DMA programmed for 0 bytes !\n"); if (IS_A_TT()) { tt_scsi_dma.dma_ctrl = dir; SCSI_DMA_WRITE_P(dma_addr, addr); SCSI_DMA_WRITE_P(dma_cnt, count); tt_scsi_dma.dma_ctrl = dir | 2; } else { /* ! IS_A_TT */ /* set address */ SCSI_DMA_SETADR(addr); /* toggle direction bit to clear FIFO and set DMA direction */ dir <<= 8; st_dma.dma_mode_status = 0x90 | dir; st_dma.dma_mode_status = 0x90 | (dir ^ 0x100); st_dma.dma_mode_status = 0x90 | dir; udelay(40); /* On writes, round up the transfer length to the next multiple of 512 * (see also comment at atari_dma_xfer_len()). */ st_dma.fdc_acces_seccount = (count + (dir ? 511 : 0)) >> 9; udelay(40); st_dma.dma_mode_status = 0x10 | dir; udelay(40); /* need not restore value of dir, only boolean value is tested */ atari_dma_active = 1; } return count; } static long atari_scsi_dma_residual(struct Scsi_Host *instance) { return atari_dma_residual; } #define CMD_SURELY_BLOCK_MODE 0 #define CMD_SURELY_BYTE_MODE 1 #define CMD_MODE_UNKNOWN 2 static int falcon_classify_cmd(Scsi_Cmnd *cmd) { unsigned char opcode = cmd->cmnd[0]; if (opcode == READ_DEFECT_DATA || opcode == READ_LONG || opcode == READ_BUFFER) return CMD_SURELY_BYTE_MODE; else if (opcode == READ_6 || opcode == READ_10 || opcode == 0xa8 /* READ_12 */ || opcode == READ_REVERSE || opcode == RECOVER_BUFFERED_DATA) { /* In case of a sequential-access target (tape), special care is * needed here: The transfer is block-mode only if the 'fixed' bit is * set! */ if (cmd->device->type == TYPE_TAPE && !(cmd->cmnd[1] & 1)) return CMD_SURELY_BYTE_MODE; else return CMD_SURELY_BLOCK_MODE; } else return CMD_MODE_UNKNOWN; } /* This function calculates the number of bytes that can be transferred via * DMA. On the TT, this is arbitrary, but on the Falcon we have to use the * ST-DMA chip. There are only multiples of 512 bytes possible and max. * 255*512 bytes :-( This means also, that defining READ_OVERRUNS is not * possible on the Falcon, since that would require to program the DMA for * n*512 - atari_read_overrun bytes. But it seems that the Falcon doesn't have * the overrun problem, so this question is academic :-) */ static unsigned long atari_dma_xfer_len(unsigned long wanted_len, Scsi_Cmnd *cmd, int write_flag) { unsigned long possible_len, limit; if (IS_A_TT()) /* TT SCSI DMA can transfer arbitrary #bytes */ return wanted_len; /* ST DMA chip is stupid -- only multiples of 512 bytes! (and max. * 255*512 bytes, but this should be enough) * * ++roman: Aaargl! Another Falcon-SCSI problem... There are some commands * that return a number of bytes which cannot be known beforehand. In this * case, the given transfer length is an "allocation length". Now it * can happen that this allocation length is a multiple of 512 bytes and * the DMA is used. But if not n*512 bytes really arrive, some input data * will be lost in the ST-DMA's FIFO :-( Thus, we have to distinguish * between commands that do block transfers and those that do byte * transfers. But this isn't easy... there are lots of vendor specific * commands, and the user can issue any command via the * SCSI_IOCTL_SEND_COMMAND. * * The solution: We classify SCSI commands in 1) surely block-mode cmd.s, * 2) surely byte-mode cmd.s and 3) cmd.s with unknown mode. In case 1) * and 3), the thing to do is obvious: allow any number of blocks via DMA * or none. In case 2), we apply some heuristic: Byte mode is assumed if * the transfer (allocation) length is < 1024, hoping that no cmd. not * explicitly known as byte mode have such big allocation lengths... * BTW, all the discussion above applies only to reads. DMA writes are * unproblematic anyways, since the targets aborts the transfer after * receiving a sufficient number of bytes. * * Another point: If the transfer is from/to an non-ST-RAM address, we * use the dribble buffer and thus can do only STRAM_BUFFER_SIZE bytes. */ if (write_flag) { /* Write operation can always use the DMA, but the transfer size must * be rounded up to the next multiple of 512 (atari_dma_setup() does * this). */ possible_len = wanted_len; } else { /* Read operations: if the wanted transfer length is not a multiple of * 512, we cannot use DMA, since the ST-DMA cannot split transfers * (no interrupt on DMA finished!) */ if (wanted_len & 0x1ff) possible_len = 0; else { /* Now classify the command (see above) and decide whether it is * allowed to do DMA at all */ switch (falcon_classify_cmd(cmd)) { case CMD_SURELY_BLOCK_MODE: possible_len = wanted_len; break; case CMD_SURELY_BYTE_MODE: possible_len = 0; /* DMA prohibited */ break; case CMD_MODE_UNKNOWN: default: /* For unknown commands assume block transfers if the transfer * size/allocation length is >= 1024 */ possible_len = (wanted_len < 1024) ? 0 : wanted_len; break; } } } /* Last step: apply the hard limit on DMA transfers */ limit = (atari_dma_buffer && !STRAM_ADDR(virt_to_phys(cmd->SCp.ptr))) ? STRAM_BUFFER_SIZE : 255*512; if (possible_len > limit) possible_len = limit; if (possible_len != wanted_len) DMA_PRINTK("Sorry, must cut DMA transfer size to %ld bytes " "instead of %ld\n", possible_len, wanted_len); return possible_len; } #endif /* REAL_DMA */ /* NCR5380 register access functions * * There are separate functions for TT and Falcon, because the access * methods are quite different. The calling macros NCR5380_read and * NCR5380_write call these functions via function pointers. */ static unsigned char atari_scsi_tt_reg_read(unsigned char reg) { return tt_scsi_regp[reg * 2]; } static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value) { tt_scsi_regp[reg * 2] = value; } static unsigned char atari_scsi_falcon_reg_read(unsigned char reg) { dma_wd.dma_mode_status= (u_short)(0x88 + reg); return (u_char)dma_wd.fdc_acces_seccount; } static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value) { dma_wd.dma_mode_status = (u_short)(0x88 + reg); dma_wd.fdc_acces_seccount = (u_short)value; } #include "atari_NCR5380.c" static struct scsi_host_template driver_template = { .proc_info = atari_scsi_proc_info, .name = "Atari native SCSI", .detect = atari_scsi_detect, .release = atari_scsi_release, .info = atari_scsi_info, .queuecommand = atari_scsi_queue_command, .eh_abort_handler = atari_scsi_abort, .eh_bus_reset_handler = atari_scsi_bus_reset, .can_queue = 0, /* initialized at run-time */ .this_id = 0, /* initialized at run-time */ .sg_tablesize = 0, /* initialized at run-time */ .cmd_per_lun = 0, /* initialized at run-time */ .use_clustering = DISABLE_CLUSTERING }; #include "scsi_module.c" MODULE_LICENSE("GPL");
gpl-2.0
android-armv7a-belalang-tempur/Android_SpeedKernel
drivers/scsi/atari_scsi.c
2685
35496
/* * atari_scsi.c -- Device dependent functions for the Atari generic SCSI port * * Copyright 1994 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de> * * Loosely based on the work of Robert De Vries' team and added: * - working real DMA * - Falcon support (untested yet!) ++bjoern fixed and now it works * - lots of extensions and bug fixes. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. * */ /**************************************************************************/ /* */ /* Notes for Falcon SCSI: */ /* ---------------------- */ /* */ /* Since the Falcon SCSI uses the ST-DMA chip, that is shared among */ /* several device drivers, locking and unlocking the access to this */ /* chip is required. But locking is not possible from an interrupt, */ /* since it puts the process to sleep if the lock is not available. */ /* This prevents "late" locking of the DMA chip, i.e. locking it just */ /* before using it, since in case of disconnection-reconnection */ /* commands, the DMA is started from the reselection interrupt. */ /* */ /* Two possible schemes for ST-DMA-locking would be: */ /* 1) The lock is taken for each command separately and disconnecting */ /* is forbidden (i.e. can_queue = 1). */ /* 2) The DMA chip is locked when the first command comes in and */ /* released when the last command is finished and all queues are */ /* empty. */ /* The first alternative would result in bad performance, since the */ /* interleaving of commands would not be used. The second is unfair to */ /* other drivers using the ST-DMA, because the queues will seldom be */ /* totally empty if there is a lot of disk traffic. */ /* */ /* For this reasons I decided to employ a more elaborate scheme: */ /* - First, we give up the lock every time we can (for fairness), this */ /* means every time a command finishes and there are no other commands */ /* on the disconnected queue. */ /* - If there are others waiting to lock the DMA chip, we stop */ /* issuing commands, i.e. moving them onto the issue queue. */ /* Because of that, the disconnected queue will run empty in a */ /* while. Instead we go to sleep on a 'fairness_queue'. */ /* - If the lock is released, all processes waiting on the fairness */ /* queue will be woken. The first of them tries to re-lock the DMA, */ /* the others wait for the first to finish this task. After that, */ /* they can all run on and do their commands... */ /* This sounds complicated (and it is it :-(), but it seems to be a */ /* good compromise between fairness and performance: As long as no one */ /* else wants to work with the ST-DMA chip, SCSI can go along as */ /* usual. If now someone else comes, this behaviour is changed to a */ /* "fairness mode": just already initiated commands are finished and */ /* then the lock is released. The other one waiting will probably win */ /* the race for locking the DMA, since it was waiting for longer. And */ /* after it has finished, SCSI can go ahead again. Finally: I hope I */ /* have not produced any deadlock possibilities! */ /* */ /**************************************************************************/ #include <linux/module.h> #define NDEBUG (0) #define NDEBUG_ABORT 0x00100000 #define NDEBUG_TAGS 0x00200000 #define NDEBUG_MERGING 0x00400000 #define AUTOSENSE /* For the Atari version, use only polled IO or REAL_DMA */ #define REAL_DMA /* Support tagged queuing? (on devices that are able to... :-) */ #define SUPPORT_TAGS #define MAX_TAGS 32 #include <linux/types.h> #include <linux/stddef.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/nvram.h> #include <linux/bitops.h> #include <asm/setup.h> #include <asm/atarihw.h> #include <asm/atariints.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/traps.h> #include "scsi.h" #include <scsi/scsi_host.h> #include "atari_scsi.h" #include "NCR5380.h" #include <asm/atari_stdma.h> #include <asm/atari_stram.h> #include <asm/io.h> #include <linux/stat.h> #define IS_A_TT() ATARIHW_PRESENT(TT_SCSI) #define SCSI_DMA_WRITE_P(elt,val) \ do { \ unsigned long v = val; \ tt_scsi_dma.elt##_lo = v & 0xff; \ v >>= 8; \ tt_scsi_dma.elt##_lmd = v & 0xff; \ v >>= 8; \ tt_scsi_dma.elt##_hmd = v & 0xff; \ v >>= 8; \ tt_scsi_dma.elt##_hi = v & 0xff; \ } while(0) #define SCSI_DMA_READ_P(elt) \ (((((((unsigned long)tt_scsi_dma.elt##_hi << 8) | \ (unsigned long)tt_scsi_dma.elt##_hmd) << 8) | \ (unsigned long)tt_scsi_dma.elt##_lmd) << 8) | \ (unsigned long)tt_scsi_dma.elt##_lo) static inline void SCSI_DMA_SETADR(unsigned long adr) { st_dma.dma_lo = (unsigned char)adr; MFPDELAY(); adr >>= 8; st_dma.dma_md = (unsigned char)adr; MFPDELAY(); adr >>= 8; st_dma.dma_hi = (unsigned char)adr; MFPDELAY(); } static inline unsigned long SCSI_DMA_GETADR(void) { unsigned long adr; adr = st_dma.dma_lo; MFPDELAY(); adr |= (st_dma.dma_md & 0xff) << 8; MFPDELAY(); adr |= (st_dma.dma_hi & 0xff) << 16; MFPDELAY(); return adr; } static inline void ENABLE_IRQ(void) { if (IS_A_TT()) atari_enable_irq(IRQ_TT_MFP_SCSI); else atari_enable_irq(IRQ_MFP_FSCSI); } static inline void DISABLE_IRQ(void) { if (IS_A_TT()) atari_disable_irq(IRQ_TT_MFP_SCSI); else atari_disable_irq(IRQ_MFP_FSCSI); } #define HOSTDATA_DMALEN (((struct NCR5380_hostdata *) \ (atari_scsi_host->hostdata))->dma_len) /* Time (in jiffies) to wait after a reset; the SCSI standard calls for 250ms, * we usually do 0.5s to be on the safe side. But Toshiba CD-ROMs once more * need ten times the standard value... */ #ifndef CONFIG_ATARI_SCSI_TOSHIBA_DELAY #define AFTER_RESET_DELAY (HZ/2) #else #define AFTER_RESET_DELAY (5*HZ/2) #endif /***************************** Prototypes *****************************/ #ifdef REAL_DMA static int scsi_dma_is_ignored_buserr(unsigned char dma_stat); static void atari_scsi_fetch_restbytes(void); static long atari_scsi_dma_residual(struct Scsi_Host *instance); static int falcon_classify_cmd(Scsi_Cmnd *cmd); static unsigned long atari_dma_xfer_len(unsigned long wanted_len, Scsi_Cmnd *cmd, int write_flag); #endif static irqreturn_t scsi_tt_intr(int irq, void *dummy); static irqreturn_t scsi_falcon_intr(int irq, void *dummy); static void falcon_release_lock_if_possible(struct NCR5380_hostdata *hostdata); static void falcon_get_lock(void); #ifdef CONFIG_ATARI_SCSI_RESET_BOOT static void atari_scsi_reset_boot(void); #endif static unsigned char atari_scsi_tt_reg_read(unsigned char reg); static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value); static unsigned char atari_scsi_falcon_reg_read(unsigned char reg); static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value); /************************* End of Prototypes **************************/ static struct Scsi_Host *atari_scsi_host; static unsigned char (*atari_scsi_reg_read)(unsigned char reg); static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value); #ifdef REAL_DMA static unsigned long atari_dma_residual, atari_dma_startaddr; static short atari_dma_active; /* pointer to the dribble buffer */ static char *atari_dma_buffer; /* precalculated physical address of the dribble buffer */ static unsigned long atari_dma_phys_buffer; /* != 0 tells the Falcon int handler to copy data from the dribble buffer */ static char *atari_dma_orig_addr; /* size of the dribble buffer; 4k seems enough, since the Falcon cannot use * scatter-gather anyway, so most transfers are 1024 byte only. In the rare * cases where requests to physical contiguous buffers have been merged, this * request is <= 4k (one page). So I don't think we have to split transfers * just due to this buffer size... */ #define STRAM_BUFFER_SIZE (4096) /* mask for address bits that can't be used with the ST-DMA */ static unsigned long atari_dma_stram_mask; #define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0) /* number of bytes to cut from a transfer to handle NCR overruns */ static int atari_read_overruns; #endif static int setup_can_queue = -1; module_param(setup_can_queue, int, 0); static int setup_cmd_per_lun = -1; module_param(setup_cmd_per_lun, int, 0); static int setup_sg_tablesize = -1; module_param(setup_sg_tablesize, int, 0); #ifdef SUPPORT_TAGS static int setup_use_tagged_queuing = -1; module_param(setup_use_tagged_queuing, int, 0); #endif static int setup_hostid = -1; module_param(setup_hostid, int, 0); #if defined(REAL_DMA) static int scsi_dma_is_ignored_buserr(unsigned char dma_stat) { int i; unsigned long addr = SCSI_DMA_READ_P(dma_addr), end_addr; if (dma_stat & 0x01) { /* A bus error happens when DMA-ing from the last page of a * physical memory chunk (DMA prefetch!), but that doesn't hurt. * Check for this case: */ for (i = 0; i < m68k_num_memory; ++i) { end_addr = m68k_memory[i].addr + m68k_memory[i].size; if (end_addr <= addr && addr <= end_addr + 4) return 1; } } return 0; } #if 0 /* Dead code... wasn't called anyway :-) and causes some trouble, because at * end-of-DMA, both SCSI ints are triggered simultaneously, so the NCR int has * to clear the DMA int pending bit before it allows other level 6 interrupts. */ static void scsi_dma_buserr(int irq, void *dummy) { unsigned char dma_stat = tt_scsi_dma.dma_ctrl; /* Don't do anything if a NCR interrupt is pending. Probably it's just * masked... */ if (atari_irq_pending(IRQ_TT_MFP_SCSI)) return; printk("Bad SCSI DMA interrupt! dma_addr=0x%08lx dma_stat=%02x dma_cnt=%08lx\n", SCSI_DMA_READ_P(dma_addr), dma_stat, SCSI_DMA_READ_P(dma_cnt)); if (dma_stat & 0x80) { if (!scsi_dma_is_ignored_buserr(dma_stat)) printk("SCSI DMA bus error -- bad DMA programming!\n"); } else { /* Under normal circumstances we never should get to this point, * since both interrupts are triggered simultaneously and the 5380 * int has higher priority. When this irq is handled, that DMA * interrupt is cleared. So a warning message is printed here. */ printk("SCSI DMA intr ?? -- this shouldn't happen!\n"); } } #endif #endif static irqreturn_t scsi_tt_intr(int irq, void *dummy) { #ifdef REAL_DMA int dma_stat; dma_stat = tt_scsi_dma.dma_ctrl; INT_PRINTK("scsi%d: NCR5380 interrupt, DMA status = %02x\n", atari_scsi_host->host_no, dma_stat & 0xff); /* Look if it was the DMA that has interrupted: First possibility * is that a bus error occurred... */ if (dma_stat & 0x80) { if (!scsi_dma_is_ignored_buserr(dma_stat)) { printk(KERN_ERR "SCSI DMA caused bus error near 0x%08lx\n", SCSI_DMA_READ_P(dma_addr)); printk(KERN_CRIT "SCSI DMA bus error -- bad DMA programming!"); } } /* If the DMA is active but not finished, we have the case * that some other 5380 interrupt occurred within the DMA transfer. * This means we have residual bytes, if the desired end address * is not yet reached. Maybe we have to fetch some bytes from the * rest data register, too. The residual must be calculated from * the address pointer, not the counter register, because only the * addr reg counts bytes not yet written and pending in the rest * data reg! */ if ((dma_stat & 0x02) && !(dma_stat & 0x40)) { atari_dma_residual = HOSTDATA_DMALEN - (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr); DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", atari_dma_residual); if ((signed int)atari_dma_residual < 0) atari_dma_residual = 0; if ((dma_stat & 1) == 0) { /* * After read operations, we maybe have to * transport some rest bytes */ atari_scsi_fetch_restbytes(); } else { /* * There seems to be a nasty bug in some SCSI-DMA/NCR * combinations: If a target disconnects while a write * operation is going on, the address register of the * DMA may be a few bytes farer than it actually read. * This is probably due to DMA prefetching and a delay * between DMA and NCR. Experiments showed that the * dma_addr is 9 bytes to high, but this could vary. * The problem is, that the residual is thus calculated * wrong and the next transfer will start behind where * it should. So we round up the residual to the next * multiple of a sector size, if it isn't already a * multiple and the originally expected transfer size * was. The latter condition is there to ensure that * the correction is taken only for "real" data * transfers and not for, e.g., the parameters of some * other command. These shouldn't disconnect anyway. */ if (atari_dma_residual & 0x1ff) { DMA_PRINTK("SCSI DMA: DMA bug corrected, " "difference %ld bytes\n", 512 - (atari_dma_residual & 0x1ff)); atari_dma_residual = (atari_dma_residual + 511) & ~0x1ff; } } tt_scsi_dma.dma_ctrl = 0; } /* If the DMA is finished, fetch the rest bytes and turn it off */ if (dma_stat & 0x40) { atari_dma_residual = 0; if ((dma_stat & 1) == 0) atari_scsi_fetch_restbytes(); tt_scsi_dma.dma_ctrl = 0; } #endif /* REAL_DMA */ NCR5380_intr(irq, dummy); #if 0 /* To be sure the int is not masked */ atari_enable_irq(IRQ_TT_MFP_SCSI); #endif return IRQ_HANDLED; } static irqreturn_t scsi_falcon_intr(int irq, void *dummy) { #ifdef REAL_DMA int dma_stat; /* Turn off DMA and select sector counter register before * accessing the status register (Atari recommendation!) */ st_dma.dma_mode_status = 0x90; dma_stat = st_dma.dma_mode_status; /* Bit 0 indicates some error in the DMA process... don't know * what happened exactly (no further docu). */ if (!(dma_stat & 0x01)) { /* DMA error */ printk(KERN_CRIT "SCSI DMA error near 0x%08lx!\n", SCSI_DMA_GETADR()); } /* If the DMA was active, but now bit 1 is not clear, it is some * other 5380 interrupt that finishes the DMA transfer. We have to * calculate the number of residual bytes and give a warning if * bytes are stuck in the ST-DMA fifo (there's no way to reach them!) */ if (atari_dma_active && (dma_stat & 0x02)) { unsigned long transferred; transferred = SCSI_DMA_GETADR() - atari_dma_startaddr; /* The ST-DMA address is incremented in 2-byte steps, but the * data are written only in 16-byte chunks. If the number of * transferred bytes is not divisible by 16, the remainder is * lost somewhere in outer space. */ if (transferred & 15) printk(KERN_ERR "SCSI DMA error: %ld bytes lost in " "ST-DMA fifo\n", transferred & 15); atari_dma_residual = HOSTDATA_DMALEN - transferred; DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", atari_dma_residual); } else atari_dma_residual = 0; atari_dma_active = 0; if (atari_dma_orig_addr) { /* If the dribble buffer was used on a read operation, copy the DMA-ed * data to the original destination address. */ memcpy(atari_dma_orig_addr, phys_to_virt(atari_dma_startaddr), HOSTDATA_DMALEN - atari_dma_residual); atari_dma_orig_addr = NULL; } #endif /* REAL_DMA */ NCR5380_intr(irq, dummy); return IRQ_HANDLED; } #ifdef REAL_DMA static void atari_scsi_fetch_restbytes(void) { int nr; char *src, *dst; unsigned long phys_dst; /* fetch rest bytes in the DMA register */ phys_dst = SCSI_DMA_READ_P(dma_addr); nr = phys_dst & 3; if (nr) { /* there are 'nr' bytes left for the last long address before the DMA pointer */ phys_dst ^= nr; DMA_PRINTK("SCSI DMA: there are %d rest bytes for phys addr 0x%08lx", nr, phys_dst); /* The content of the DMA pointer is a physical address! */ dst = phys_to_virt(phys_dst); DMA_PRINTK(" = virt addr %p\n", dst); for (src = (char *)&tt_scsi_dma.dma_restdata; nr != 0; --nr) *dst++ = *src++; } } #endif /* REAL_DMA */ static int falcon_got_lock = 0; static DECLARE_WAIT_QUEUE_HEAD(falcon_fairness_wait); static int falcon_trying_lock = 0; static DECLARE_WAIT_QUEUE_HEAD(falcon_try_wait); static int falcon_dont_release = 0; /* This function releases the lock on the DMA chip if there is no * connected command and the disconnected queue is empty. On * releasing, instances of falcon_get_lock are awoken, that put * themselves to sleep for fairness. They can now try to get the lock * again (but others waiting longer more probably will win). */ static void falcon_release_lock_if_possible(struct NCR5380_hostdata *hostdata) { unsigned long flags; if (IS_A_TT()) return; local_irq_save(flags); if (falcon_got_lock && !hostdata->disconnected_queue && !hostdata->issue_queue && !hostdata->connected) { if (falcon_dont_release) { #if 0 printk("WARNING: Lock release not allowed. Ignored\n"); #endif local_irq_restore(flags); return; } falcon_got_lock = 0; stdma_release(); wake_up(&falcon_fairness_wait); } local_irq_restore(flags); } /* This function manages the locking of the ST-DMA. * If the DMA isn't locked already for SCSI, it tries to lock it by * calling stdma_lock(). But if the DMA is locked by the SCSI code and * there are other drivers waiting for the chip, we do not issue the * command immediately but wait on 'falcon_fairness_queue'. We will be * waked up when the DMA is unlocked by some SCSI interrupt. After that * we try to get the lock again. * But we must be prepared that more than one instance of * falcon_get_lock() is waiting on the fairness queue. They should not * try all at once to call stdma_lock(), one is enough! For that, the * first one sets 'falcon_trying_lock', others that see that variable * set wait on the queue 'falcon_try_wait'. * Complicated, complicated.... Sigh... */ static void falcon_get_lock(void) { unsigned long flags; if (IS_A_TT()) return; local_irq_save(flags); while (!in_irq() && falcon_got_lock && stdma_others_waiting()) sleep_on(&falcon_fairness_wait); while (!falcon_got_lock) { if (in_irq()) panic("Falcon SCSI hasn't ST-DMA lock in interrupt"); if (!falcon_trying_lock) { falcon_trying_lock = 1; stdma_lock(scsi_falcon_intr, NULL); falcon_got_lock = 1; falcon_trying_lock = 0; wake_up(&falcon_try_wait); } else { sleep_on(&falcon_try_wait); } } local_irq_restore(flags); if (!falcon_got_lock) panic("Falcon SCSI: someone stole the lock :-(\n"); } int __init atari_scsi_detect(struct scsi_host_template *host) { static int called = 0; struct Scsi_Host *instance; if (!MACH_IS_ATARI || (!ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(TT_SCSI)) || called) return 0; host->proc_name = "Atari"; atari_scsi_reg_read = IS_A_TT() ? atari_scsi_tt_reg_read : atari_scsi_falcon_reg_read; atari_scsi_reg_write = IS_A_TT() ? atari_scsi_tt_reg_write : atari_scsi_falcon_reg_write; /* setup variables */ host->can_queue = (setup_can_queue > 0) ? setup_can_queue : IS_A_TT() ? ATARI_TT_CAN_QUEUE : ATARI_FALCON_CAN_QUEUE; host->cmd_per_lun = (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : IS_A_TT() ? ATARI_TT_CMD_PER_LUN : ATARI_FALCON_CMD_PER_LUN; /* Force sg_tablesize to 0 on a Falcon! */ host->sg_tablesize = !IS_A_TT() ? ATARI_FALCON_SG_TABLESIZE : (setup_sg_tablesize >= 0) ? setup_sg_tablesize : ATARI_TT_SG_TABLESIZE; if (setup_hostid >= 0) host->this_id = setup_hostid; else { /* use 7 as default */ host->this_id = 7; /* Test if a host id is set in the NVRam */ if (ATARIHW_PRESENT(TT_CLK) && nvram_check_checksum()) { unsigned char b = nvram_read_byte( 14 ); /* Arbitration enabled? (for TOS) If yes, use configured host ID */ if (b & 0x80) host->this_id = b & 7; } } #ifdef SUPPORT_TAGS if (setup_use_tagged_queuing < 0) setup_use_tagged_queuing = DEFAULT_USE_TAGGED_QUEUING; #endif #ifdef REAL_DMA /* If running on a Falcon and if there's TT-Ram (i.e., more than one * memory block, since there's always ST-Ram in a Falcon), then allocate a * STRAM_BUFFER_SIZE byte dribble buffer for transfers from/to alternative * Ram. */ if (MACH_IS_ATARI && ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(EXTD_DMA) && m68k_num_memory > 1) { atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI"); if (!atari_dma_buffer) { printk(KERN_ERR "atari_scsi_detect: can't allocate ST-RAM " "double buffer\n"); return 0; } atari_dma_phys_buffer = virt_to_phys(atari_dma_buffer); atari_dma_orig_addr = 0; } #endif instance = scsi_register(host, sizeof(struct NCR5380_hostdata)); if (instance == NULL) { atari_stram_free(atari_dma_buffer); atari_dma_buffer = 0; return 0; } atari_scsi_host = instance; /* * Set irq to 0, to avoid that the mid-level code disables our interrupt * during queue_command calls. This is completely unnecessary, and even * worse causes bad problems on the Falcon, where the int is shared with * IDE and floppy! */ instance->irq = 0; #ifdef CONFIG_ATARI_SCSI_RESET_BOOT atari_scsi_reset_boot(); #endif NCR5380_init(instance, 0); if (IS_A_TT()) { /* This int is actually "pseudo-slow", i.e. it acts like a slow * interrupt after having cleared the pending flag for the DMA * interrupt. */ if (request_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr, IRQ_TYPE_SLOW, "SCSI NCR5380", instance)) { printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting",IRQ_TT_MFP_SCSI); scsi_unregister(atari_scsi_host); atari_stram_free(atari_dma_buffer); atari_dma_buffer = 0; return 0; } tt_mfp.active_edge |= 0x80; /* SCSI int on L->H */ #ifdef REAL_DMA tt_scsi_dma.dma_ctrl = 0; atari_dma_residual = 0; if (MACH_IS_MEDUSA) { /* While the read overruns (described by Drew Eckhardt in * NCR5380.c) never happened on TTs, they do in fact on the Medusa * (This was the cause why SCSI didn't work right for so long * there.) Since handling the overruns slows down a bit, I turned * the #ifdef's into a runtime condition. * * In principle it should be sufficient to do max. 1 byte with * PIO, but there is another problem on the Medusa with the DMA * rest data register. So 'atari_read_overruns' is currently set * to 4 to avoid having transfers that aren't a multiple of 4. If * the rest data bug is fixed, this can be lowered to 1. */ atari_read_overruns = 4; } #endif /*REAL_DMA*/ } else { /* ! IS_A_TT */ /* Nothing to do for the interrupt: the ST-DMA is initialized * already by atari_init_INTS() */ #ifdef REAL_DMA atari_dma_residual = 0; atari_dma_active = 0; atari_dma_stram_mask = (ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 : 0xff000000); #endif } printk(KERN_INFO "scsi%d: options CAN_QUEUE=%d CMD_PER_LUN=%d SCAT-GAT=%d " #ifdef SUPPORT_TAGS "TAGGED-QUEUING=%s " #endif "HOSTID=%d", instance->host_no, instance->hostt->can_queue, instance->hostt->cmd_per_lun, instance->hostt->sg_tablesize, #ifdef SUPPORT_TAGS setup_use_tagged_queuing ? "yes" : "no", #endif instance->hostt->this_id ); NCR5380_print_options(instance); printk("\n"); called = 1; return 1; } int atari_scsi_release(struct Scsi_Host *sh) { if (IS_A_TT()) free_irq(IRQ_TT_MFP_SCSI, sh); if (atari_dma_buffer) atari_stram_free(atari_dma_buffer); return 1; } void __init atari_scsi_setup(char *str, int *ints) { /* Format of atascsi parameter is: * atascsi=<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags> * Defaults depend on TT or Falcon, hostid determined at run time. * Negative values mean don't change. */ if (ints[0] < 1) { printk("atari_scsi_setup: no arguments!\n"); return; } if (ints[0] >= 1) { if (ints[1] > 0) /* no limits on this, just > 0 */ setup_can_queue = ints[1]; } if (ints[0] >= 2) { if (ints[2] > 0) setup_cmd_per_lun = ints[2]; } if (ints[0] >= 3) { if (ints[3] >= 0) { setup_sg_tablesize = ints[3]; /* Must be <= SG_ALL (255) */ if (setup_sg_tablesize > SG_ALL) setup_sg_tablesize = SG_ALL; } } if (ints[0] >= 4) { /* Must be between 0 and 7 */ if (ints[4] >= 0 && ints[4] <= 7) setup_hostid = ints[4]; else if (ints[4] > 7) printk("atari_scsi_setup: invalid host ID %d !\n", ints[4]); } #ifdef SUPPORT_TAGS if (ints[0] >= 5) { if (ints[5] >= 0) setup_use_tagged_queuing = !!ints[5]; } #endif } int atari_scsi_bus_reset(Scsi_Cmnd *cmd) { int rv; struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)cmd->device->host->hostdata; /* For doing the reset, SCSI interrupts must be disabled first, * since the 5380 raises its IRQ line while _RST is active and we * can't disable interrupts completely, since we need the timer. */ /* And abort a maybe active DMA transfer */ if (IS_A_TT()) { atari_turnoff_irq(IRQ_TT_MFP_SCSI); #ifdef REAL_DMA tt_scsi_dma.dma_ctrl = 0; #endif /* REAL_DMA */ } else { atari_turnoff_irq(IRQ_MFP_FSCSI); #ifdef REAL_DMA st_dma.dma_mode_status = 0x90; atari_dma_active = 0; atari_dma_orig_addr = NULL; #endif /* REAL_DMA */ } rv = NCR5380_bus_reset(cmd); /* Re-enable ints */ if (IS_A_TT()) { atari_turnon_irq(IRQ_TT_MFP_SCSI); } else { atari_turnon_irq(IRQ_MFP_FSCSI); } if ((rv & SCSI_RESET_ACTION) == SCSI_RESET_SUCCESS) falcon_release_lock_if_possible(hostdata); return rv; } #ifdef CONFIG_ATARI_SCSI_RESET_BOOT static void __init atari_scsi_reset_boot(void) { unsigned long end; /* * Do a SCSI reset to clean up the bus during initialization. No messing * with the queues, interrupts, or locks necessary here. */ printk("Atari SCSI: resetting the SCSI bus..."); /* get in phase */ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG))); /* assert RST */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST); /* The min. reset hold time is 25us, so 40us should be enough */ udelay(50); /* reset RST and interrupt */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_read(RESET_PARITY_INTERRUPT_REG); end = jiffies + AFTER_RESET_DELAY; while (time_before(jiffies, end)) barrier(); printk(" done\n"); } #endif const char *atari_scsi_info(struct Scsi_Host *host) { /* atari_scsi_detect() is verbose enough... */ static const char string[] = "Atari native SCSI"; return string; } #if defined(REAL_DMA) unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, void *data, unsigned long count, int dir) { unsigned long addr = virt_to_phys(data); DMA_PRINTK("scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, " "dir = %d\n", instance->host_no, data, addr, count, dir); if (!IS_A_TT() && !STRAM_ADDR(addr)) { /* If we have a non-DMAable address on a Falcon, use the dribble * buffer; 'orig_addr' != 0 in the read case tells the interrupt * handler to copy data from the dribble buffer to the originally * wanted address. */ if (dir) memcpy(atari_dma_buffer, data, count); else atari_dma_orig_addr = data; addr = atari_dma_phys_buffer; } atari_dma_startaddr = addr; /* Needed for calculating residual later. */ /* Cache cleanup stuff: On writes, push any dirty cache out before sending * it to the peripheral. (Must be done before DMA setup, since at least * the ST-DMA begins to fill internal buffers right after setup. For * reads, invalidate any cache, may be altered after DMA without CPU * knowledge. * * ++roman: For the Medusa, there's no need at all for that cache stuff, * because the hardware does bus snooping (fine!). */ dma_cache_maintenance(addr, count, dir); if (count == 0) printk(KERN_NOTICE "SCSI warning: DMA programmed for 0 bytes !\n"); if (IS_A_TT()) { tt_scsi_dma.dma_ctrl = dir; SCSI_DMA_WRITE_P(dma_addr, addr); SCSI_DMA_WRITE_P(dma_cnt, count); tt_scsi_dma.dma_ctrl = dir | 2; } else { /* ! IS_A_TT */ /* set address */ SCSI_DMA_SETADR(addr); /* toggle direction bit to clear FIFO and set DMA direction */ dir <<= 8; st_dma.dma_mode_status = 0x90 | dir; st_dma.dma_mode_status = 0x90 | (dir ^ 0x100); st_dma.dma_mode_status = 0x90 | dir; udelay(40); /* On writes, round up the transfer length to the next multiple of 512 * (see also comment at atari_dma_xfer_len()). */ st_dma.fdc_acces_seccount = (count + (dir ? 511 : 0)) >> 9; udelay(40); st_dma.dma_mode_status = 0x10 | dir; udelay(40); /* need not restore value of dir, only boolean value is tested */ atari_dma_active = 1; } return count; } static long atari_scsi_dma_residual(struct Scsi_Host *instance) { return atari_dma_residual; } #define CMD_SURELY_BLOCK_MODE 0 #define CMD_SURELY_BYTE_MODE 1 #define CMD_MODE_UNKNOWN 2 static int falcon_classify_cmd(Scsi_Cmnd *cmd) { unsigned char opcode = cmd->cmnd[0]; if (opcode == READ_DEFECT_DATA || opcode == READ_LONG || opcode == READ_BUFFER) return CMD_SURELY_BYTE_MODE; else if (opcode == READ_6 || opcode == READ_10 || opcode == 0xa8 /* READ_12 */ || opcode == READ_REVERSE || opcode == RECOVER_BUFFERED_DATA) { /* In case of a sequential-access target (tape), special care is * needed here: The transfer is block-mode only if the 'fixed' bit is * set! */ if (cmd->device->type == TYPE_TAPE && !(cmd->cmnd[1] & 1)) return CMD_SURELY_BYTE_MODE; else return CMD_SURELY_BLOCK_MODE; } else return CMD_MODE_UNKNOWN; } /* This function calculates the number of bytes that can be transferred via * DMA. On the TT, this is arbitrary, but on the Falcon we have to use the * ST-DMA chip. There are only multiples of 512 bytes possible and max. * 255*512 bytes :-( This means also, that defining READ_OVERRUNS is not * possible on the Falcon, since that would require to program the DMA for * n*512 - atari_read_overrun bytes. But it seems that the Falcon doesn't have * the overrun problem, so this question is academic :-) */ static unsigned long atari_dma_xfer_len(unsigned long wanted_len, Scsi_Cmnd *cmd, int write_flag) { unsigned long possible_len, limit; if (IS_A_TT()) /* TT SCSI DMA can transfer arbitrary #bytes */ return wanted_len; /* ST DMA chip is stupid -- only multiples of 512 bytes! (and max. * 255*512 bytes, but this should be enough) * * ++roman: Aaargl! Another Falcon-SCSI problem... There are some commands * that return a number of bytes which cannot be known beforehand. In this * case, the given transfer length is an "allocation length". Now it * can happen that this allocation length is a multiple of 512 bytes and * the DMA is used. But if not n*512 bytes really arrive, some input data * will be lost in the ST-DMA's FIFO :-( Thus, we have to distinguish * between commands that do block transfers and those that do byte * transfers. But this isn't easy... there are lots of vendor specific * commands, and the user can issue any command via the * SCSI_IOCTL_SEND_COMMAND. * * The solution: We classify SCSI commands in 1) surely block-mode cmd.s, * 2) surely byte-mode cmd.s and 3) cmd.s with unknown mode. In case 1) * and 3), the thing to do is obvious: allow any number of blocks via DMA * or none. In case 2), we apply some heuristic: Byte mode is assumed if * the transfer (allocation) length is < 1024, hoping that no cmd. not * explicitly known as byte mode have such big allocation lengths... * BTW, all the discussion above applies only to reads. DMA writes are * unproblematic anyways, since the targets aborts the transfer after * receiving a sufficient number of bytes. * * Another point: If the transfer is from/to an non-ST-RAM address, we * use the dribble buffer and thus can do only STRAM_BUFFER_SIZE bytes. */ if (write_flag) { /* Write operation can always use the DMA, but the transfer size must * be rounded up to the next multiple of 512 (atari_dma_setup() does * this). */ possible_len = wanted_len; } else { /* Read operations: if the wanted transfer length is not a multiple of * 512, we cannot use DMA, since the ST-DMA cannot split transfers * (no interrupt on DMA finished!) */ if (wanted_len & 0x1ff) possible_len = 0; else { /* Now classify the command (see above) and decide whether it is * allowed to do DMA at all */ switch (falcon_classify_cmd(cmd)) { case CMD_SURELY_BLOCK_MODE: possible_len = wanted_len; break; case CMD_SURELY_BYTE_MODE: possible_len = 0; /* DMA prohibited */ break; case CMD_MODE_UNKNOWN: default: /* For unknown commands assume block transfers if the transfer * size/allocation length is >= 1024 */ possible_len = (wanted_len < 1024) ? 0 : wanted_len; break; } } } /* Last step: apply the hard limit on DMA transfers */ limit = (atari_dma_buffer && !STRAM_ADDR(virt_to_phys(cmd->SCp.ptr))) ? STRAM_BUFFER_SIZE : 255*512; if (possible_len > limit) possible_len = limit; if (possible_len != wanted_len) DMA_PRINTK("Sorry, must cut DMA transfer size to %ld bytes " "instead of %ld\n", possible_len, wanted_len); return possible_len; } #endif /* REAL_DMA */ /* NCR5380 register access functions * * There are separate functions for TT and Falcon, because the access * methods are quite different. The calling macros NCR5380_read and * NCR5380_write call these functions via function pointers. */ static unsigned char atari_scsi_tt_reg_read(unsigned char reg) { return tt_scsi_regp[reg * 2]; } static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value) { tt_scsi_regp[reg * 2] = value; } static unsigned char atari_scsi_falcon_reg_read(unsigned char reg) { dma_wd.dma_mode_status= (u_short)(0x88 + reg); return (u_char)dma_wd.fdc_acces_seccount; } static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value) { dma_wd.dma_mode_status = (u_short)(0x88 + reg); dma_wd.fdc_acces_seccount = (u_short)value; } #include "atari_NCR5380.c" static struct scsi_host_template driver_template = { .proc_info = atari_scsi_proc_info, .name = "Atari native SCSI", .detect = atari_scsi_detect, .release = atari_scsi_release, .info = atari_scsi_info, .queuecommand = atari_scsi_queue_command, .eh_abort_handler = atari_scsi_abort, .eh_bus_reset_handler = atari_scsi_bus_reset, .can_queue = 0, /* initialized at run-time */ .this_id = 0, /* initialized at run-time */ .sg_tablesize = 0, /* initialized at run-time */ .cmd_per_lun = 0, /* initialized at run-time */ .use_clustering = DISABLE_CLUSTERING }; #include "scsi_module.c" MODULE_LICENSE("GPL");
gpl-2.0
Ca1ne/Nitrous-Sense-Kernel
arch/arm/mach-integrator/pci.c
4221
3190
/* * linux/arch/arm/mach-integrator/pci-integrator.c * * Copyright (C) 1999 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * PCI functions for Integrator */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/init.h> #include <asm/irq.h> #include <asm/system.h> #include <asm/mach/pci.h> #include <asm/mach-types.h> /* * A small note about bridges and interrupts. The DECchip 21050 (and * later) adheres to the PCI-PCI bridge specification. This says that * the interrupts on the other side of a bridge are swizzled in the * following manner: * * Dev Interrupt Interrupt * Pin on Pin on * Device Connector * * 4 A A * B B * C C * D D * * 5 A B * B C * C D * D A * * 6 A C * B D * C A * D B * * 7 A D * B A * C B * D C * * Where A = pin 1, B = pin 2 and so on and pin=0 = default = A. * Thus, each swizzle is ((pin-1) + (device#-4)) % 4 */ /* * This routine handles multiple bridges. */ static u8 __init integrator_swizzle(struct pci_dev *dev, u8 *pinp) { int pin = *pinp; if (pin == 0) pin = 1; while (dev->bus->self) { pin = pci_swizzle_interrupt_pin(dev, pin); /* * move up the chain of bridges, swizzling as we go. */ dev = dev->bus->self; } *pinp = pin; return PCI_SLOT(dev->devfn); } static int irq_tab[4] __initdata = { IRQ_AP_PCIINT0, IRQ_AP_PCIINT1, IRQ_AP_PCIINT2, IRQ_AP_PCIINT3 }; /* * map the specified device/slot/pin to an IRQ. This works out such * that slot 9 pin 1 is INT0, pin 2 is INT1, and slot 10 pin 1 is INT1. */ static int __init integrator_map_irq(struct pci_dev *dev, u8 slot, u8 pin) { int intnr = ((slot - 9) + (pin - 1)) & 3; return irq_tab[intnr]; } extern void pci_v3_init(void *); static struct hw_pci integrator_pci __initdata = { .swizzle = integrator_swizzle, .map_irq = integrator_map_irq, .setup = pci_v3_setup, .nr_controllers = 1, .scan = pci_v3_scan_bus, .preinit = pci_v3_preinit, .postinit = pci_v3_postinit, }; static int __init integrator_pci_init(void) { if (machine_is_integrator()) pci_common_init(&integrator_pci); return 0; } subsys_initcall(integrator_pci_init);
gpl-2.0
davidftv/CC-A80-kernel-source
arch/arm/mach-omap1/io.c
4733
3838
/* * linux/arch/arm/mach-omap1/io.c * * OMAP1 I/O mapping code * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <asm/tlb.h> #include <asm/mach/map.h> #include <plat/mux.h> #include <plat/tc.h> #include "iomap.h" #include "common.h" #include "clock.h" extern void omap_check_revision(void); /* * The machine specific code may provide the extra mapping besides the * default mapping provided here. */ static struct map_desc omap_io_desc[] __initdata = { { .virtual = OMAP1_IO_VIRT, .pfn = __phys_to_pfn(OMAP1_IO_PHYS), .length = OMAP1_IO_SIZE, .type = MT_DEVICE } }; #if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850) static struct map_desc omap7xx_io_desc[] __initdata = { { .virtual = OMAP7XX_DSP_BASE, .pfn = __phys_to_pfn(OMAP7XX_DSP_START), .length = OMAP7XX_DSP_SIZE, .type = MT_DEVICE }, { .virtual = OMAP7XX_DSPREG_BASE, .pfn = __phys_to_pfn(OMAP7XX_DSPREG_START), .length = OMAP7XX_DSPREG_SIZE, .type = MT_DEVICE } }; #endif #ifdef CONFIG_ARCH_OMAP15XX static struct map_desc omap1510_io_desc[] __initdata = { { .virtual = OMAP1510_DSP_BASE, .pfn = __phys_to_pfn(OMAP1510_DSP_START), .length = OMAP1510_DSP_SIZE, .type = MT_DEVICE }, { .virtual = OMAP1510_DSPREG_BASE, .pfn = __phys_to_pfn(OMAP1510_DSPREG_START), .length = OMAP1510_DSPREG_SIZE, .type = MT_DEVICE } }; #endif #if defined(CONFIG_ARCH_OMAP16XX) static struct map_desc omap16xx_io_desc[] __initdata = { { .virtual = OMAP16XX_DSP_BASE, .pfn = __phys_to_pfn(OMAP16XX_DSP_START), .length = OMAP16XX_DSP_SIZE, .type = MT_DEVICE }, { .virtual = OMAP16XX_DSPREG_BASE, .pfn = __phys_to_pfn(OMAP16XX_DSPREG_START), .length = OMAP16XX_DSPREG_SIZE, .type = MT_DEVICE } }; #endif /* * Maps common IO regions for omap1 */ static void __init omap1_map_common_io(void) { iotable_init(omap_io_desc, ARRAY_SIZE(omap_io_desc)); } #if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850) void __init omap7xx_map_io(void) { omap1_map_common_io(); iotable_init(omap7xx_io_desc, ARRAY_SIZE(omap7xx_io_desc)); } #endif #ifdef CONFIG_ARCH_OMAP15XX void __init omap15xx_map_io(void) { omap1_map_common_io(); iotable_init(omap1510_io_desc, ARRAY_SIZE(omap1510_io_desc)); } #endif #if defined(CONFIG_ARCH_OMAP16XX) void __init omap16xx_map_io(void) { omap1_map_common_io(); iotable_init(omap16xx_io_desc, ARRAY_SIZE(omap16xx_io_desc)); } #endif /* * Common low-level hardware init for omap1. */ void __init omap1_init_early(void) { omap_check_revision(); /* REVISIT: Refer to OMAP5910 Errata, Advisory SYS_1: "Timeout Abort * on a Posted Write in the TIPB Bridge". */ omap_writew(0x0, MPU_PUBLIC_TIPB_CNTL); omap_writew(0x0, MPU_PRIVATE_TIPB_CNTL); /* Must init clocks early to assure that timer interrupt works */ omap1_clk_init(); omap1_mux_init(); omap_init_consistent_dma_size(); } /* * NOTE: Please use ioremap + __raw_read/write where possible instead of these */ u8 omap_readb(u32 pa) { return __raw_readb(OMAP1_IO_ADDRESS(pa)); } EXPORT_SYMBOL(omap_readb); u16 omap_readw(u32 pa) { return __raw_readw(OMAP1_IO_ADDRESS(pa)); } EXPORT_SYMBOL(omap_readw); u32 omap_readl(u32 pa) { return __raw_readl(OMAP1_IO_ADDRESS(pa)); } EXPORT_SYMBOL(omap_readl); void omap_writeb(u8 v, u32 pa) { __raw_writeb(v, OMAP1_IO_ADDRESS(pa)); } EXPORT_SYMBOL(omap_writeb); void omap_writew(u16 v, u32 pa) { __raw_writew(v, OMAP1_IO_ADDRESS(pa)); } EXPORT_SYMBOL(omap_writew); void omap_writel(u32 v, u32 pa) { __raw_writel(v, OMAP1_IO_ADDRESS(pa)); } EXPORT_SYMBOL(omap_writel);
gpl-2.0
snandlal/samsung_kernel
drivers/misc/ibmasm/heartbeat.c
4989
3205
/* * IBM ASM Service Processor Device Driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2004 * * Author: Max Asböck <amax@us.ibm.com> * */ #include <linux/notifier.h> #include "ibmasm.h" #include "dot_command.h" #include "lowlevel.h" static int suspend_heartbeats = 0; /* * Once the driver indicates to the service processor that it is running * - see send_os_state() - the service processor sends periodic heartbeats * to the driver. The driver must respond to the heartbeats or else the OS * will be rebooted. * In the case of a panic the interrupt handler continues to work and thus * continues to respond to heartbeats, making the service processor believe * the OS is still running and thus preventing a reboot. * To prevent this from happening a callback is added the panic_notifier_list. * Before responding to a heartbeat the driver checks if a panic has happened, * if yes it suspends heartbeat, causing the service processor to reboot as * expected. */ static int panic_happened(struct notifier_block *n, unsigned long val, void *v) { suspend_heartbeats = 1; return 0; } static struct notifier_block panic_notifier = { panic_happened, NULL, 1 }; void ibmasm_register_panic_notifier(void) { atomic_notifier_chain_register(&panic_notifier_list, &panic_notifier); } void ibmasm_unregister_panic_notifier(void) { atomic_notifier_chain_unregister(&panic_notifier_list, &panic_notifier); } int ibmasm_heartbeat_init(struct service_processor *sp) { sp->heartbeat = ibmasm_new_command(sp, HEARTBEAT_BUFFER_SIZE); if (sp->heartbeat == NULL) return -ENOMEM; return 0; } void ibmasm_heartbeat_exit(struct service_processor *sp) { char tsbuf[32]; dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); ibmasm_wait_for_response(sp->heartbeat, IBMASM_CMD_TIMEOUT_NORMAL); dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); suspend_heartbeats = 1; command_put(sp->heartbeat); } void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size_t size) { struct command *cmd = sp->heartbeat; struct dot_command_header *header = (struct dot_command_header *)cmd->buffer; char tsbuf[32]; dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); if (suspend_heartbeats) return; /* return the received dot command to sender */ cmd->status = IBMASM_CMD_PENDING; size = min(size, cmd->buffer_size); memcpy_fromio(cmd->buffer, message, size); header->type = sp_write; ibmasm_exec_command(sp, cmd); }
gpl-2.0
smac0628/htc_gpe_51
arch/mips/bcm63xx/dev-uart.c
6781
1747
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <bcm63xx_cpu.h> static struct resource uart0_resources[] = { { /* start & end filled at runtime */ .flags = IORESOURCE_MEM, }, { /* start filled at runtime */ .flags = IORESOURCE_IRQ, }, }; static struct resource uart1_resources[] = { { /* start & end filled at runtime */ .flags = IORESOURCE_MEM, }, { /* start filled at runtime */ .flags = IORESOURCE_IRQ, }, }; static struct platform_device bcm63xx_uart_devices[] = { { .name = "bcm63xx_uart", .id = 0, .num_resources = ARRAY_SIZE(uart0_resources), .resource = uart0_resources, }, { .name = "bcm63xx_uart", .id = 1, .num_resources = ARRAY_SIZE(uart1_resources), .resource = uart1_resources, } }; int __init bcm63xx_uart_register(unsigned int id) { if (id >= ARRAY_SIZE(bcm63xx_uart_devices)) return -ENODEV; if (id == 1 && (!BCMCPU_IS_6358() && !BCMCPU_IS_6368())) return -ENODEV; if (id == 0) { uart0_resources[0].start = bcm63xx_regset_address(RSET_UART0); uart0_resources[0].end = uart0_resources[0].start + RSET_UART_SIZE - 1; uart0_resources[1].start = bcm63xx_get_irq_number(IRQ_UART0); } if (id == 1) { uart1_resources[0].start = bcm63xx_regset_address(RSET_UART1); uart1_resources[0].end = uart1_resources[0].start + RSET_UART_SIZE - 1; uart1_resources[1].start = bcm63xx_get_irq_number(IRQ_UART1); } return platform_device_register(&bcm63xx_uart_devices[id]); }
gpl-2.0
shane87/linux_holiday-ics
drivers/ata/pata_cs5530.c
9085
9692
/* * pata-cs5530.c - CS5530 PATA for new ATA layer * (C) 2005 Red Hat Inc * * based upon cs5530.c by Mark Lord. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Loosely based on the piix & svwks drivers. * * Documentation: * Available from AMD web site. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/dmi.h> #define DRV_NAME "pata_cs5530" #define DRV_VERSION "0.7.4" static void __iomem *cs5530_port_base(struct ata_port *ap) { unsigned long bmdma = (unsigned long)ap->ioaddr.bmdma_addr; return (void __iomem *)((bmdma & ~0x0F) + 0x20 + 0x10 * ap->port_no); } /** * cs5530_set_piomode - PIO setup * @ap: ATA interface * @adev: device on the interface * * Set our PIO requirements. This is fairly simple on the CS5530 * chips. */ static void cs5530_set_piomode(struct ata_port *ap, struct ata_device *adev) { static const unsigned int cs5530_pio_timings[2][5] = { {0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010}, {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010} }; void __iomem *base = cs5530_port_base(ap); u32 tuning; int format; /* Find out which table to use */ tuning = ioread32(base + 0x04); format = (tuning & 0x80000000UL) ? 1 : 0; /* Now load the right timing register */ if (adev->devno) base += 0x08; iowrite32(cs5530_pio_timings[format][adev->pio_mode - XFER_PIO_0], base); } /** * cs5530_set_dmamode - DMA timing setup * @ap: ATA interface * @adev: Device being configured * * We cannot mix MWDMA and UDMA without reloading timings each switch * master to slave. We track the last DMA setup in order to minimise * reloads. */ static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev) { void __iomem *base = cs5530_port_base(ap); u32 tuning, timing = 0; u8 reg; /* Find out which table to use */ tuning = ioread32(base + 0x04); switch(adev->dma_mode) { case XFER_UDMA_0: timing = 0x00921250;break; case XFER_UDMA_1: timing = 0x00911140;break; case XFER_UDMA_2: timing = 0x00911030;break; case XFER_MW_DMA_0: timing = 0x00077771;break; case XFER_MW_DMA_1: timing = 0x00012121;break; case XFER_MW_DMA_2: timing = 0x00002020;break; default: BUG(); } /* Merge in the PIO format bit */ timing |= (tuning & 0x80000000UL); if (adev->devno == 0) /* Master */ iowrite32(timing, base + 0x04); else { if (timing & 0x00100000) tuning |= 0x00100000; /* UDMA for both */ else tuning &= ~0x00100000; /* MWDMA for both */ iowrite32(tuning, base + 0x04); iowrite32(timing, base + 0x0C); } /* Set the DMA capable bit in the BMDMA area */ reg = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); reg |= (1 << (5 + adev->devno)); iowrite8(reg, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); /* Remember the last DMA setup we did */ ap->private_data = adev; } /** * cs5530_qc_issue - command issue * @qc: command pending * * Called when the libata layer is about to issue a command. We wrap * this interface so that we can load the correct ATA timings if * necessary. Specifically we have a problem that there is only * one MWDMA/UDMA bit. */ static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; struct ata_device *prev = ap->private_data; /* See if the DMA settings could be wrong */ if (ata_dma_enabled(adev) && adev != prev && prev != NULL) { /* Maybe, but do the channels match MWDMA/UDMA ? */ if ((ata_using_udma(adev) && !ata_using_udma(prev)) || (ata_using_udma(prev) && !ata_using_udma(adev))) /* Switch the mode bits */ cs5530_set_dmamode(ap, adev); } return ata_bmdma_qc_issue(qc); } static struct scsi_host_template cs5530_sht = { ATA_BMDMA_SHT(DRV_NAME), .sg_tablesize = LIBATA_DUMB_MAX_PRD, }; static struct ata_port_operations cs5530_port_ops = { .inherits = &ata_bmdma_port_ops, .qc_prep = ata_bmdma_dumb_qc_prep, .qc_issue = cs5530_qc_issue, .cable_detect = ata_cable_40wire, .set_piomode = cs5530_set_piomode, .set_dmamode = cs5530_set_dmamode, }; static const struct dmi_system_id palmax_dmi_table[] = { { .ident = "Palmax PD1100", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Cyrix"), DMI_MATCH(DMI_PRODUCT_NAME, "Caddis"), }, }, { } }; static int cs5530_is_palmax(void) { if (dmi_check_system(palmax_dmi_table)) { printk(KERN_INFO "Palmax PD1100: Disabling DMA on docking port.\n"); return 1; } return 0; } /** * cs5530_init_chip - Chipset init * * Perform the chip initialisation work that is shared between both * setup and resume paths */ static int cs5530_init_chip(void) { struct pci_dev *master_0 = NULL, *cs5530_0 = NULL, *dev = NULL; while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) { switch (dev->device) { case PCI_DEVICE_ID_CYRIX_PCI_MASTER: master_0 = pci_dev_get(dev); break; case PCI_DEVICE_ID_CYRIX_5530_LEGACY: cs5530_0 = pci_dev_get(dev); break; } } if (!master_0) { printk(KERN_ERR DRV_NAME ": unable to locate PCI MASTER function\n"); goto fail_put; } if (!cs5530_0) { printk(KERN_ERR DRV_NAME ": unable to locate CS5530 LEGACY function\n"); goto fail_put; } pci_set_master(cs5530_0); pci_try_set_mwi(cs5530_0); /* * Set PCI CacheLineSize to 16-bytes: * --> Write 0x04 into 8-bit PCI CACHELINESIZE reg of function 0 of the cs5530 * * Note: This value is constant because the 5530 is only a Geode companion */ pci_write_config_byte(cs5530_0, PCI_CACHE_LINE_SIZE, 0x04); /* * Disable trapping of UDMA register accesses (Win98 hack): * --> Write 0x5006 into 16-bit reg at offset 0xd0 of function 0 of the cs5530 */ pci_write_config_word(cs5530_0, 0xd0, 0x5006); /* * Bit-1 at 0x40 enables MemoryWriteAndInvalidate on internal X-bus: * The other settings are what is necessary to get the register * into a sane state for IDE DMA operation. */ pci_write_config_byte(master_0, 0x40, 0x1e); /* * Set max PCI burst size (16-bytes seems to work best): * 16bytes: set bit-1 at 0x41 (reg value of 0x16) * all others: clear bit-1 at 0x41, and do: * 128bytes: OR 0x00 at 0x41 * 256bytes: OR 0x04 at 0x41 * 512bytes: OR 0x08 at 0x41 * 1024bytes: OR 0x0c at 0x41 */ pci_write_config_byte(master_0, 0x41, 0x14); /* * These settings are necessary to get the chip * into a sane state for IDE DMA operation. */ pci_write_config_byte(master_0, 0x42, 0x00); pci_write_config_byte(master_0, 0x43, 0xc1); pci_dev_put(master_0); pci_dev_put(cs5530_0); return 0; fail_put: if (master_0) pci_dev_put(master_0); if (cs5530_0) pci_dev_put(cs5530_0); return -ENODEV; } /** * cs5530_init_one - Initialise a CS5530 * @dev: PCI device * @id: Entry in match table * * Install a driver for the newly found CS5530 companion chip. Most of * this is just housekeeping. We have to set the chip up correctly and * turn off various bits of emulation magic. */ static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, .port_ops = &cs5530_port_ops }; /* The docking connector doesn't do UDMA, and it seems not MWDMA */ static const struct ata_port_info info_palmax_secondary = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .port_ops = &cs5530_port_ops }; const struct ata_port_info *ppi[] = { &info, NULL }; int rc; rc = pcim_enable_device(pdev); if (rc) return rc; /* Chip initialisation */ if (cs5530_init_chip()) return -ENODEV; if (cs5530_is_palmax()) ppi[1] = &info_palmax_secondary; /* Now kick off ATA set up */ return ata_pci_bmdma_init_one(pdev, ppi, &cs5530_sht, NULL, 0); } #ifdef CONFIG_PM static int cs5530_reinit_one(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; /* If we fail on resume we are doomed */ if (cs5530_init_chip()) return -EIO; ata_host_resume(host); return 0; } #endif /* CONFIG_PM */ static const struct pci_device_id cs5530[] = { { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), }, { }, }; static struct pci_driver cs5530_pci_driver = { .name = DRV_NAME, .id_table = cs5530, .probe = cs5530_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = cs5530_reinit_one, #endif }; static int __init cs5530_init(void) { return pci_register_driver(&cs5530_pci_driver); } static void __exit cs5530_exit(void) { pci_unregister_driver(&cs5530_pci_driver); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for the Cyrix/NS/AMD 5530"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, cs5530); MODULE_VERSION(DRV_VERSION); module_init(cs5530_init); module_exit(cs5530_exit);
gpl-2.0
SlimForce/kernel_lge_hammerhead
drivers/tty/hvc/hvc_iucv.c
10365
38281
/* * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver * * This HVC device driver provides terminal access using * z/VM IUCV communication paths. * * Copyright IBM Corp. 2008, 2009 * * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> */ #define KMSG_COMPONENT "hvc_iucv" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/types.h> #include <linux/slab.h> #include <asm/ebcdic.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/init.h> #include <linux/mempool.h> #include <linux/moduleparam.h> #include <linux/tty.h> #include <linux/wait.h> #include <net/iucv/iucv.h> #include "hvc_console.h" /* General device driver settings */ #define HVC_IUCV_MAGIC 0xc9e4c3e5 #define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS #define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4) /* IUCV TTY message */ #define MSG_VERSION 0x02 /* Message version */ #define MSG_TYPE_ERROR 0x01 /* Error message */ #define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */ #define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */ #define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */ #define MSG_TYPE_DATA 0x10 /* Terminal data */ struct iucv_tty_msg { u8 version; /* Message version */ u8 type; /* Message type */ #define MSG_MAX_DATALEN ((u16)(~0)) u16 datalen; /* Payload length */ u8 data[]; /* Payload buffer */ } __attribute__((packed)); #define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data)) enum iucv_state_t { IUCV_DISCONN = 0, IUCV_CONNECTED = 1, IUCV_SEVERED = 2, }; enum tty_state_t { TTY_CLOSED = 0, TTY_OPENED = 1, }; struct hvc_iucv_private { struct hvc_struct *hvc; /* HVC struct reference */ u8 srv_name[8]; /* IUCV service name (ebcdic) */ unsigned char is_console; /* Linux console usage flag */ enum iucv_state_t iucv_state; /* IUCV connection status */ enum tty_state_t tty_state; /* TTY status */ struct iucv_path *path; /* IUCV path pointer */ spinlock_t lock; /* hvc_iucv_private lock */ #define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */ void *sndbuf; /* send buffer */ size_t sndbuf_len; /* length of send buffer */ #define QUEUE_SNDBUF_DELAY (HZ / 25) struct delayed_work sndbuf_work; /* work: send iucv msg(s) */ wait_queue_head_t sndbuf_waitq; /* wait for send completion */ struct list_head tty_outqueue; /* outgoing IUCV messages */ struct list_head tty_inqueue; /* incoming IUCV messages */ struct device *dev; /* device structure */ }; struct iucv_tty_buffer { struct list_head list; /* list pointer */ struct iucv_message msg; /* store an IUCV message */ size_t offset; /* data buffer offset */ struct iucv_tty_msg *mbuf; /* buffer to store input/output data */ }; /* IUCV callback handler */ static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]); static void hvc_iucv_path_severed(struct iucv_path *, u8[16]); static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *); static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *); /* Kernel module parameter: use one terminal device as default */ static unsigned long hvc_iucv_devices = 1; /* Array of allocated hvc iucv tty lines... */ static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES]; #define IUCV_HVC_CON_IDX (0) /* List of z/VM user ID filter entries (struct iucv_vmid_filter) */ #define MAX_VMID_FILTER (500) static size_t hvc_iucv_filter_size; static void *hvc_iucv_filter; static const char *hvc_iucv_filter_string; static DEFINE_RWLOCK(hvc_iucv_filter_lock); /* Kmem cache and mempool for iucv_tty_buffer elements */ static struct kmem_cache *hvc_iucv_buffer_cache; static mempool_t *hvc_iucv_mempool; /* IUCV handler callback functions */ static struct iucv_handler hvc_iucv_handler = { .path_pending = hvc_iucv_path_pending, .path_severed = hvc_iucv_path_severed, .message_complete = hvc_iucv_msg_complete, .message_pending = hvc_iucv_msg_pending, }; /** * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance. * @num: The HVC virtual terminal number (vtermno) * * This function returns the struct hvc_iucv_private instance that corresponds * to the HVC virtual terminal number specified as parameter @num. */ struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num) { if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices)) return NULL; return hvc_iucv_table[num - HVC_IUCV_MAGIC]; } /** * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element. * @size: Size of the internal buffer used to store data. * @flags: Memory allocation flags passed to mempool. * * This function allocates a new struct iucv_tty_buffer element and, optionally, * allocates an internal data buffer with the specified size @size. * The internal data buffer is always allocated with GFP_DMA which is * required for receiving and sending data with IUCV. * Note: The total message size arises from the internal buffer size and the * members of the iucv_tty_msg structure. * The function returns NULL if memory allocation has failed. */ static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags) { struct iucv_tty_buffer *bufp; bufp = mempool_alloc(hvc_iucv_mempool, flags); if (!bufp) return NULL; memset(bufp, 0, sizeof(*bufp)); if (size > 0) { bufp->msg.length = MSG_SIZE(size); bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA); if (!bufp->mbuf) { mempool_free(bufp, hvc_iucv_mempool); return NULL; } bufp->mbuf->version = MSG_VERSION; bufp->mbuf->type = MSG_TYPE_DATA; bufp->mbuf->datalen = (u16) size; } return bufp; } /** * destroy_tty_buffer() - destroy struct iucv_tty_buffer element. * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL. */ static void destroy_tty_buffer(struct iucv_tty_buffer *bufp) { kfree(bufp->mbuf); mempool_free(bufp, hvc_iucv_mempool); } /** * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element. * @list: List containing struct iucv_tty_buffer elements. */ static void destroy_tty_buffer_list(struct list_head *list) { struct iucv_tty_buffer *ent, *next; list_for_each_entry_safe(ent, next, list, list) { list_del(&ent->list); destroy_tty_buffer(ent); } } /** * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer. * @priv: Pointer to struct hvc_iucv_private * @buf: HVC buffer for writing received terminal data. * @count: HVC buffer size. * @has_more_data: Pointer to an int variable. * * The function picks up pending messages from the input queue and receives * the message data that is then written to the specified buffer @buf. * If the buffer size @count is less than the data message size, the * message is kept on the input queue and @has_more_data is set to 1. * If all message data has been written, the message is removed from * the input queue. * * The function returns the number of bytes written to the terminal, zero if * there are no pending data messages available or if there is no established * IUCV path. * If the IUCV path has been severed, then -EPIPE is returned to cause a * hang up (that is issued by the HVC layer). */ static int hvc_iucv_write(struct hvc_iucv_private *priv, char *buf, int count, int *has_more_data) { struct iucv_tty_buffer *rb; int written; int rc; /* immediately return if there is no IUCV connection */ if (priv->iucv_state == IUCV_DISCONN) return 0; /* if the IUCV path has been severed, return -EPIPE to inform the * HVC layer to hang up the tty device. */ if (priv->iucv_state == IUCV_SEVERED) return -EPIPE; /* check if there are pending messages */ if (list_empty(&priv->tty_inqueue)) return 0; /* receive an iucv message and flip data to the tty (ldisc) */ rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list); written = 0; if (!rb->mbuf) { /* message not yet received ... */ /* allocate mem to store msg data; if no memory is available * then leave the buffer on the list and re-try later */ rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA); if (!rb->mbuf) return -ENOMEM; rc = __iucv_message_receive(priv->path, &rb->msg, 0, rb->mbuf, rb->msg.length, NULL); switch (rc) { case 0: /* Successful */ break; case 2: /* No message found */ case 9: /* Message purged */ break; default: written = -EIO; } /* remove buffer if an error has occurred or received data * is not correct */ if (rc || (rb->mbuf->version != MSG_VERSION) || (rb->msg.length != MSG_SIZE(rb->mbuf->datalen))) goto out_remove_buffer; } switch (rb->mbuf->type) { case MSG_TYPE_DATA: written = min_t(int, rb->mbuf->datalen - rb->offset, count); memcpy(buf, rb->mbuf->data + rb->offset, written); if (written < (rb->mbuf->datalen - rb->offset)) { rb->offset += written; *has_more_data = 1; goto out_written; } break; case MSG_TYPE_WINSIZE: if (rb->mbuf->datalen != sizeof(struct winsize)) break; /* The caller must ensure that the hvc is locked, which * is the case when called from hvc_iucv_get_chars() */ __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data)); break; case MSG_TYPE_ERROR: /* ignored ... */ case MSG_TYPE_TERMENV: /* ignored ... */ case MSG_TYPE_TERMIOS: /* ignored ... */ break; } out_remove_buffer: list_del(&rb->list); destroy_tty_buffer(rb); *has_more_data = !list_empty(&priv->tty_inqueue); out_written: return written; } /** * hvc_iucv_get_chars() - HVC get_chars operation. * @vtermno: HVC virtual terminal number. * @buf: Pointer to a buffer to store data * @count: Size of buffer available for writing * * The HVC thread calls this method to read characters from the back-end. * If an IUCV communication path has been established, pending IUCV messages * are received and data is copied into buffer @buf up to @count bytes. * * Locking: The routine gets called under an irqsave() spinlock; and * the routine locks the struct hvc_iucv_private->lock to call * helper functions. */ static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count) { struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno); int written; int has_more_data; if (count <= 0) return 0; if (!priv) return -ENODEV; spin_lock(&priv->lock); has_more_data = 0; written = hvc_iucv_write(priv, buf, count, &has_more_data); spin_unlock(&priv->lock); /* if there are still messages on the queue... schedule another run */ if (has_more_data) hvc_kick(); return written; } /** * hvc_iucv_queue() - Buffer terminal data for sending. * @priv: Pointer to struct hvc_iucv_private instance. * @buf: Buffer containing data to send. * @count: Size of buffer and amount of data to send. * * The function queues data for sending. To actually send the buffered data, * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY). * The function returns the number of data bytes that has been buffered. * * If the device is not connected, data is ignored and the function returns * @count. * If the buffer is full, the function returns 0. * If an existing IUCV communicaton path has been severed, -EPIPE is returned * (that can be passed to HVC layer to cause a tty hangup). */ static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf, int count) { size_t len; if (priv->iucv_state == IUCV_DISCONN) return count; /* ignore data */ if (priv->iucv_state == IUCV_SEVERED) return -EPIPE; len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len); if (!len) return 0; memcpy(priv->sndbuf + priv->sndbuf_len, buf, len); priv->sndbuf_len += len; if (priv->iucv_state == IUCV_CONNECTED) schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY); return len; } /** * hvc_iucv_send() - Send an IUCV message containing terminal data. * @priv: Pointer to struct hvc_iucv_private instance. * * If an IUCV communication path has been established, the buffered output data * is sent via an IUCV message and the number of bytes sent is returned. * Returns 0 if there is no established IUCV communication path or * -EPIPE if an existing IUCV communicaton path has been severed. */ static int hvc_iucv_send(struct hvc_iucv_private *priv) { struct iucv_tty_buffer *sb; int rc, len; if (priv->iucv_state == IUCV_SEVERED) return -EPIPE; if (priv->iucv_state == IUCV_DISCONN) return -EIO; if (!priv->sndbuf_len) return 0; /* allocate internal buffer to store msg data and also compute total * message length */ sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC); if (!sb) return -ENOMEM; memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len); sb->mbuf->datalen = (u16) priv->sndbuf_len; sb->msg.length = MSG_SIZE(sb->mbuf->datalen); list_add_tail(&sb->list, &priv->tty_outqueue); rc = __iucv_message_send(priv->path, &sb->msg, 0, 0, (void *) sb->mbuf, sb->msg.length); if (rc) { /* drop the message here; however we might want to handle * 0x03 (msg limit reached) by trying again... */ list_del(&sb->list); destroy_tty_buffer(sb); } len = priv->sndbuf_len; priv->sndbuf_len = 0; return len; } /** * hvc_iucv_sndbuf_work() - Send buffered data over IUCV * @work: Work structure. * * This work queue function sends buffered output data over IUCV and, * if not all buffered data could be sent, reschedules itself. */ static void hvc_iucv_sndbuf_work(struct work_struct *work) { struct hvc_iucv_private *priv; priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work); if (!priv) return; spin_lock_bh(&priv->lock); hvc_iucv_send(priv); spin_unlock_bh(&priv->lock); } /** * hvc_iucv_put_chars() - HVC put_chars operation. * @vtermno: HVC virtual terminal number. * @buf: Pointer to an buffer to read data from * @count: Size of buffer available for reading * * The HVC thread calls this method to write characters to the back-end. * The function calls hvc_iucv_queue() to queue terminal data for sending. * * Locking: The method gets called under an irqsave() spinlock; and * locks struct hvc_iucv_private->lock. */ static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count) { struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno); int queued; if (count <= 0) return 0; if (!priv) return -ENODEV; spin_lock(&priv->lock); queued = hvc_iucv_queue(priv, buf, count); spin_unlock(&priv->lock); return queued; } /** * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time. * @hp: Pointer to the HVC device (struct hvc_struct) * @id: Additional data (originally passed to hvc_alloc): the index of an struct * hvc_iucv_private instance. * * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private * instance that is derived from @id. Always returns 0. * * Locking: struct hvc_iucv_private->lock, spin_lock_bh */ static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id) { struct hvc_iucv_private *priv; priv = hvc_iucv_get_private(id); if (!priv) return 0; spin_lock_bh(&priv->lock); priv->tty_state = TTY_OPENED; spin_unlock_bh(&priv->lock); return 0; } /** * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance. * @priv: Pointer to the struct hvc_iucv_private instance. */ static void hvc_iucv_cleanup(struct hvc_iucv_private *priv) { destroy_tty_buffer_list(&priv->tty_outqueue); destroy_tty_buffer_list(&priv->tty_inqueue); priv->tty_state = TTY_CLOSED; priv->iucv_state = IUCV_DISCONN; priv->sndbuf_len = 0; } /** * tty_outqueue_empty() - Test if the tty outq is empty * @priv: Pointer to struct hvc_iucv_private instance. */ static inline int tty_outqueue_empty(struct hvc_iucv_private *priv) { int rc; spin_lock_bh(&priv->lock); rc = list_empty(&priv->tty_outqueue); spin_unlock_bh(&priv->lock); return rc; } /** * flush_sndbuf_sync() - Flush send buffer and wait for completion * @priv: Pointer to struct hvc_iucv_private instance. * * The routine cancels a pending sndbuf work, calls hvc_iucv_send() * to flush any buffered terminal output data and waits for completion. */ static void flush_sndbuf_sync(struct hvc_iucv_private *priv) { int sync_wait; cancel_delayed_work_sync(&priv->sndbuf_work); spin_lock_bh(&priv->lock); hvc_iucv_send(priv); /* force sending buffered data */ sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */ spin_unlock_bh(&priv->lock); if (sync_wait) wait_event_timeout(priv->sndbuf_waitq, tty_outqueue_empty(priv), HZ/10); } /** * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up * @priv: Pointer to hvc_iucv_private structure * * This routine severs an existing IUCV communication path and hangs * up the underlying HVC terminal device. * The hang-up occurs only if an IUCV communication path is established; * otherwise there is no need to hang up the terminal device. * * The IUCV HVC hang-up is separated into two steps: * 1. After the IUCV path has been severed, the iucv_state is set to * IUCV_SEVERED. * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the * IUCV_SEVERED state causes the tty hang-up in the HVC layer. * * If the tty has not yet been opened, clean up the hvc_iucv_private * structure to allow re-connects. * If the tty has been opened, let get_chars() return -EPIPE to signal * the HVC layer to hang up the tty and, if so, wake up the HVC thread * to call get_chars()... * * Special notes on hanging up a HVC terminal instantiated as console: * Hang-up: 1. do_tty_hangup() replaces file ops (= hung_up_tty_fops) * 2. do_tty_hangup() calls tty->ops->close() for console_filp * => no hangup notifier is called by HVC (default) * 2. hvc_close() returns because of tty_hung_up_p(filp) * => no delete notifier is called! * Finally, the back-end is not being notified, thus, the tty session is * kept active (TTY_OPEN) to be ready for re-connects. * * Locking: spin_lock(&priv->lock) w/o disabling bh */ static void hvc_iucv_hangup(struct hvc_iucv_private *priv) { struct iucv_path *path; path = NULL; spin_lock(&priv->lock); if (priv->iucv_state == IUCV_CONNECTED) { path = priv->path; priv->path = NULL; priv->iucv_state = IUCV_SEVERED; if (priv->tty_state == TTY_CLOSED) hvc_iucv_cleanup(priv); else /* console is special (see above) */ if (priv->is_console) { hvc_iucv_cleanup(priv); priv->tty_state = TTY_OPENED; } else hvc_kick(); } spin_unlock(&priv->lock); /* finally sever path (outside of priv->lock due to lock ordering) */ if (path) { iucv_path_sever(path, NULL); iucv_path_free(path); } } /** * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups. * @hp: Pointer to the HVC device (struct hvc_struct) * @id: Additional data (originally passed to hvc_alloc): * the index of an struct hvc_iucv_private instance. * * This routine notifies the HVC back-end that a tty hangup (carrier loss, * virtual or otherwise) has occurred. * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup()) * to keep an existing IUCV communication path established. * (Background: vhangup() is called from user space (by getty or login) to * disable writing to the tty by other applications). * If the tty has been opened and an established IUCV path has been severed * (we caused the tty hangup), the function calls hvc_iucv_cleanup(). * * Locking: struct hvc_iucv_private->lock */ static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id) { struct hvc_iucv_private *priv; priv = hvc_iucv_get_private(id); if (!priv) return; flush_sndbuf_sync(priv); spin_lock_bh(&priv->lock); /* NOTE: If the hangup was scheduled by ourself (from the iucv * path_servered callback [IUCV_SEVERED]), we have to clean up * our structure and to set state to TTY_CLOSED. * If the tty was hung up otherwise (e.g. vhangup()), then we * ignore this hangup and keep an established IUCV path open... * (...the reason is that we are not able to connect back to the * client if we disconnect on hang up) */ priv->tty_state = TTY_CLOSED; if (priv->iucv_state == IUCV_SEVERED) hvc_iucv_cleanup(priv); spin_unlock_bh(&priv->lock); } /** * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time. * @hp: Pointer to the HVC device (struct hvc_struct) * @id: Additional data (originally passed to hvc_alloc): * the index of an struct hvc_iucv_private instance. * * This routine notifies the HVC back-end that the last tty device fd has been * closed. The function calls hvc_iucv_cleanup() to clean up the struct * hvc_iucv_private instance. * * Locking: struct hvc_iucv_private->lock */ static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id) { struct hvc_iucv_private *priv; struct iucv_path *path; priv = hvc_iucv_get_private(id); if (!priv) return; flush_sndbuf_sync(priv); spin_lock_bh(&priv->lock); path = priv->path; /* save reference to IUCV path */ priv->path = NULL; hvc_iucv_cleanup(priv); spin_unlock_bh(&priv->lock); /* sever IUCV path outside of priv->lock due to lock ordering of: * priv->lock <--> iucv_table_lock */ if (path) { iucv_path_sever(path, NULL); iucv_path_free(path); } } /** * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID * @ipvmid: Originating z/VM user ID (right padded with blanks) * * Returns 0 if the z/VM user ID @ipvmid is allowed to connection, otherwise * non-zero. */ static int hvc_iucv_filter_connreq(u8 ipvmid[8]) { size_t i; /* Note: default policy is ACCEPT if no filter is set */ if (!hvc_iucv_filter_size) return 0; for (i = 0; i < hvc_iucv_filter_size; i++) if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8)) return 0; return 1; } /** * hvc_iucv_path_pending() - IUCV handler to process a connection request. * @path: Pending path (struct iucv_path) * @ipvmid: z/VM system identifier of originator * @ipuser: User specified data for this path * (AF_IUCV: port/service name and originator port) * * The function uses the @ipuser data to determine if the pending path belongs * to a terminal managed by this device driver. * If the path belongs to this driver, ensure that the terminal is not accessed * multiple times (only one connection to a terminal is allowed). * If the terminal is not yet connected, the pending path is accepted and is * associated to the appropriate struct hvc_iucv_private instance. * * Returns 0 if @path belongs to a terminal managed by the this device driver; * otherwise returns -ENODEV in order to dispatch this path to other handlers. * * Locking: struct hvc_iucv_private->lock */ static int hvc_iucv_path_pending(struct iucv_path *path, u8 ipvmid[8], u8 ipuser[16]) { struct hvc_iucv_private *priv; u8 nuser_data[16]; u8 vm_user_id[9]; int i, rc; priv = NULL; for (i = 0; i < hvc_iucv_devices; i++) if (hvc_iucv_table[i] && (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) { priv = hvc_iucv_table[i]; break; } if (!priv) return -ENODEV; /* Enforce that ipvmid is allowed to connect to us */ read_lock(&hvc_iucv_filter_lock); rc = hvc_iucv_filter_connreq(ipvmid); read_unlock(&hvc_iucv_filter_lock); if (rc) { iucv_path_sever(path, ipuser); iucv_path_free(path); memcpy(vm_user_id, ipvmid, 8); vm_user_id[8] = 0; pr_info("A connection request from z/VM user ID %s " "was refused\n", vm_user_id); return 0; } spin_lock(&priv->lock); /* If the terminal is already connected or being severed, then sever * this path to enforce that there is only ONE established communication * path per terminal. */ if (priv->iucv_state != IUCV_DISCONN) { iucv_path_sever(path, ipuser); iucv_path_free(path); goto out_path_handled; } /* accept path */ memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */ memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */ path->msglim = 0xffff; /* IUCV MSGLIMIT */ path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */ rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv); if (rc) { iucv_path_sever(path, ipuser); iucv_path_free(path); goto out_path_handled; } priv->path = path; priv->iucv_state = IUCV_CONNECTED; /* flush buffered output data... */ schedule_delayed_work(&priv->sndbuf_work, 5); out_path_handled: spin_unlock(&priv->lock); return 0; } /** * hvc_iucv_path_severed() - IUCV handler to process a path sever. * @path: Pending path (struct iucv_path) * @ipuser: User specified data for this path * (AF_IUCV: port/service name and originator port) * * This function calls the hvc_iucv_hangup() function for the * respective IUCV HVC terminal. * * Locking: struct hvc_iucv_private->lock */ static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) { struct hvc_iucv_private *priv = path->private; hvc_iucv_hangup(priv); } /** * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message. * @path: Pending path (struct iucv_path) * @msg: Pointer to the IUCV message * * The function puts an incoming message on the input queue for later * processing (by hvc_iucv_get_chars() / hvc_iucv_write()). * If the tty has not yet been opened, the message is rejected. * * Locking: struct hvc_iucv_private->lock */ static void hvc_iucv_msg_pending(struct iucv_path *path, struct iucv_message *msg) { struct hvc_iucv_private *priv = path->private; struct iucv_tty_buffer *rb; /* reject messages that exceed max size of iucv_tty_msg->datalen */ if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) { iucv_message_reject(path, msg); return; } spin_lock(&priv->lock); /* reject messages if tty has not yet been opened */ if (priv->tty_state == TTY_CLOSED) { iucv_message_reject(path, msg); goto unlock_return; } /* allocate tty buffer to save iucv msg only */ rb = alloc_tty_buffer(0, GFP_ATOMIC); if (!rb) { iucv_message_reject(path, msg); goto unlock_return; /* -ENOMEM */ } rb->msg = *msg; list_add_tail(&rb->list, &priv->tty_inqueue); hvc_kick(); /* wake up hvc thread */ unlock_return: spin_unlock(&priv->lock); } /** * hvc_iucv_msg_complete() - IUCV handler to process message completion * @path: Pending path (struct iucv_path) * @msg: Pointer to the IUCV message * * The function is called upon completion of message delivery to remove the * message from the outqueue. Additional delivery information can be found * msg->audit: rejected messages (0x040000 (IPADRJCT)), and * purged messages (0x010000 (IPADPGNR)). * * Locking: struct hvc_iucv_private->lock */ static void hvc_iucv_msg_complete(struct iucv_path *path, struct iucv_message *msg) { struct hvc_iucv_private *priv = path->private; struct iucv_tty_buffer *ent, *next; LIST_HEAD(list_remove); spin_lock(&priv->lock); list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list) if (ent->msg.id == msg->id) { list_move(&ent->list, &list_remove); break; } wake_up(&priv->sndbuf_waitq); spin_unlock(&priv->lock); destroy_tty_buffer_list(&list_remove); } /** * hvc_iucv_pm_freeze() - Freeze PM callback * @dev: IUVC HVC terminal device * * Sever an established IUCV communication path and * trigger a hang-up of the underlying HVC terminal. */ static int hvc_iucv_pm_freeze(struct device *dev) { struct hvc_iucv_private *priv = dev_get_drvdata(dev); local_bh_disable(); hvc_iucv_hangup(priv); local_bh_enable(); return 0; } /** * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback * @dev: IUVC HVC terminal device * * Wake up the HVC thread to trigger hang-up and respective * HVC back-end notifier invocations. */ static int hvc_iucv_pm_restore_thaw(struct device *dev) { hvc_kick(); return 0; } /* HVC operations */ static const struct hv_ops hvc_iucv_ops = { .get_chars = hvc_iucv_get_chars, .put_chars = hvc_iucv_put_chars, .notifier_add = hvc_iucv_notifier_add, .notifier_del = hvc_iucv_notifier_del, .notifier_hangup = hvc_iucv_notifier_hangup, }; /* Suspend / resume device operations */ static const struct dev_pm_ops hvc_iucv_pm_ops = { .freeze = hvc_iucv_pm_freeze, .thaw = hvc_iucv_pm_restore_thaw, .restore = hvc_iucv_pm_restore_thaw, }; /* IUCV HVC device driver */ static struct device_driver hvc_iucv_driver = { .name = KMSG_COMPONENT, .bus = &iucv_bus, .pm = &hvc_iucv_pm_ops, }; /** * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance * @id: hvc_iucv_table index * @is_console: Flag if the instance is used as Linux console * * This function allocates a new hvc_iucv_private structure and stores * the instance in hvc_iucv_table at index @id. * Returns 0 on success; otherwise non-zero. */ static int __init hvc_iucv_alloc(int id, unsigned int is_console) { struct hvc_iucv_private *priv; char name[9]; int rc; priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL); if (!priv) return -ENOMEM; spin_lock_init(&priv->lock); INIT_LIST_HEAD(&priv->tty_outqueue); INIT_LIST_HEAD(&priv->tty_inqueue); INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work); init_waitqueue_head(&priv->sndbuf_waitq); priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL); if (!priv->sndbuf) { kfree(priv); return -ENOMEM; } /* set console flag */ priv->is_console = is_console; /* allocate hvc device */ priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */ HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256); if (IS_ERR(priv->hvc)) { rc = PTR_ERR(priv->hvc); goto out_error_hvc; } /* notify HVC thread instead of using polling */ priv->hvc->irq_requested = 1; /* setup iucv related information */ snprintf(name, 9, "lnxhvc%-2d", id); memcpy(priv->srv_name, name, 8); ASCEBC(priv->srv_name, 8); /* create and setup device */ priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL); if (!priv->dev) { rc = -ENOMEM; goto out_error_dev; } dev_set_name(priv->dev, "hvc_iucv%d", id); dev_set_drvdata(priv->dev, priv); priv->dev->bus = &iucv_bus; priv->dev->parent = iucv_root; priv->dev->driver = &hvc_iucv_driver; priv->dev->release = (void (*)(struct device *)) kfree; rc = device_register(priv->dev); if (rc) { put_device(priv->dev); goto out_error_dev; } hvc_iucv_table[id] = priv; return 0; out_error_dev: hvc_remove(priv->hvc); out_error_hvc: free_page((unsigned long) priv->sndbuf); kfree(priv); return rc; } /** * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances */ static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv) { hvc_remove(priv->hvc); device_unregister(priv->dev); free_page((unsigned long) priv->sndbuf); kfree(priv); } /** * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID * @filter: String containing a comma-separated list of z/VM user IDs */ static const char *hvc_iucv_parse_filter(const char *filter, char *dest) { const char *nextdelim, *residual; size_t len; nextdelim = strchr(filter, ','); if (nextdelim) { len = nextdelim - filter; residual = nextdelim + 1; } else { len = strlen(filter); residual = filter + len; } if (len == 0) return ERR_PTR(-EINVAL); /* check for '\n' (if called from sysfs) */ if (filter[len - 1] == '\n') len--; if (len > 8) return ERR_PTR(-EINVAL); /* pad with blanks and save upper case version of user ID */ memset(dest, ' ', 8); while (len--) dest[len] = toupper(filter[len]); return residual; } /** * hvc_iucv_setup_filter() - Set up z/VM user ID filter * @filter: String consisting of a comma-separated list of z/VM user IDs * * The function parses the @filter string and creates an array containing * the list of z/VM user ID filter entries. * Return code 0 means success, -EINVAL if the filter is syntactically * incorrect, -ENOMEM if there was not enough memory to allocate the * filter list array, or -ENOSPC if too many z/VM user IDs have been specified. */ static int hvc_iucv_setup_filter(const char *val) { const char *residual; int err; size_t size, count; void *array, *old_filter; count = strlen(val); if (count == 0 || (count == 1 && val[0] == '\n')) { size = 0; array = NULL; goto out_replace_filter; /* clear filter */ } /* count user IDs in order to allocate sufficient memory */ size = 1; residual = val; while ((residual = strchr(residual, ',')) != NULL) { residual++; size++; } /* check if the specified list exceeds the filter limit */ if (size > MAX_VMID_FILTER) return -ENOSPC; array = kzalloc(size * 8, GFP_KERNEL); if (!array) return -ENOMEM; count = size; residual = val; while (*residual && count) { residual = hvc_iucv_parse_filter(residual, array + ((size - count) * 8)); if (IS_ERR(residual)) { err = PTR_ERR(residual); kfree(array); goto out_err; } count--; } out_replace_filter: write_lock_bh(&hvc_iucv_filter_lock); old_filter = hvc_iucv_filter; hvc_iucv_filter_size = size; hvc_iucv_filter = array; write_unlock_bh(&hvc_iucv_filter_lock); kfree(old_filter); err = 0; out_err: return err; } /** * param_set_vmidfilter() - Set z/VM user ID filter parameter * @val: String consisting of a comma-separated list of z/VM user IDs * @kp: Kernel parameter pointing to hvc_iucv_filter array * * The function sets up the z/VM user ID filter specified as comma-separated * list of user IDs in @val. * Note: If it is called early in the boot process, @val is stored and * parsed later in hvc_iucv_init(). */ static int param_set_vmidfilter(const char *val, const struct kernel_param *kp) { int rc; if (!MACHINE_IS_VM || !hvc_iucv_devices) return -ENODEV; if (!val) return -EINVAL; rc = 0; if (slab_is_available()) rc = hvc_iucv_setup_filter(val); else hvc_iucv_filter_string = val; /* defer... */ return rc; } /** * param_get_vmidfilter() - Get z/VM user ID filter * @buffer: Buffer to store z/VM user ID filter, * (buffer size assumption PAGE_SIZE) * @kp: Kernel parameter pointing to the hvc_iucv_filter array * * The function stores the filter as a comma-separated list of z/VM user IDs * in @buffer. Typically, sysfs routines call this function for attr show. */ static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp) { int rc; size_t index, len; void *start, *end; if (!MACHINE_IS_VM || !hvc_iucv_devices) return -ENODEV; rc = 0; read_lock_bh(&hvc_iucv_filter_lock); for (index = 0; index < hvc_iucv_filter_size; index++) { start = hvc_iucv_filter + (8 * index); end = memchr(start, ' ', 8); len = (end) ? end - start : 8; memcpy(buffer + rc, start, len); rc += len; buffer[rc++] = ','; } read_unlock_bh(&hvc_iucv_filter_lock); if (rc) buffer[--rc] = '\0'; /* replace last comma and update rc */ return rc; } #define param_check_vmidfilter(name, p) __param_check(name, p, void) static struct kernel_param_ops param_ops_vmidfilter = { .set = param_set_vmidfilter, .get = param_get_vmidfilter, }; /** * hvc_iucv_init() - z/VM IUCV HVC device driver initialization */ static int __init hvc_iucv_init(void) { int rc; unsigned int i; if (!hvc_iucv_devices) return -ENODEV; if (!MACHINE_IS_VM) { pr_notice("The z/VM IUCV HVC device driver cannot " "be used without z/VM\n"); rc = -ENODEV; goto out_error; } if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) { pr_err("%lu is not a valid value for the hvc_iucv= " "kernel parameter\n", hvc_iucv_devices); rc = -EINVAL; goto out_error; } /* register IUCV HVC device driver */ rc = driver_register(&hvc_iucv_driver); if (rc) goto out_error; /* parse hvc_iucv_allow string and create z/VM user ID filter list */ if (hvc_iucv_filter_string) { rc = hvc_iucv_setup_filter(hvc_iucv_filter_string); switch (rc) { case 0: break; case -ENOMEM: pr_err("Allocating memory failed with " "reason code=%d\n", 3); goto out_error; case -EINVAL: pr_err("hvc_iucv_allow= does not specify a valid " "z/VM user ID list\n"); goto out_error; case -ENOSPC: pr_err("hvc_iucv_allow= specifies too many " "z/VM user IDs\n"); goto out_error; default: goto out_error; } } hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT, sizeof(struct iucv_tty_buffer), 0, 0, NULL); if (!hvc_iucv_buffer_cache) { pr_err("Allocating memory failed with reason code=%d\n", 1); rc = -ENOMEM; goto out_error; } hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR, hvc_iucv_buffer_cache); if (!hvc_iucv_mempool) { pr_err("Allocating memory failed with reason code=%d\n", 2); kmem_cache_destroy(hvc_iucv_buffer_cache); rc = -ENOMEM; goto out_error; } /* register the first terminal device as console * (must be done before allocating hvc terminal devices) */ rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops); if (rc) { pr_err("Registering HVC terminal device as " "Linux console failed\n"); goto out_error_memory; } /* allocate hvc_iucv_private structs */ for (i = 0; i < hvc_iucv_devices; i++) { rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0); if (rc) { pr_err("Creating a new HVC terminal device " "failed with error code=%d\n", rc); goto out_error_hvc; } } /* register IUCV callback handler */ rc = iucv_register(&hvc_iucv_handler, 0); if (rc) { pr_err("Registering IUCV handlers failed with error code=%d\n", rc); goto out_error_hvc; } return 0; out_error_hvc: for (i = 0; i < hvc_iucv_devices; i++) if (hvc_iucv_table[i]) hvc_iucv_destroy(hvc_iucv_table[i]); out_error_memory: mempool_destroy(hvc_iucv_mempool); kmem_cache_destroy(hvc_iucv_buffer_cache); out_error: if (hvc_iucv_filter) kfree(hvc_iucv_filter); hvc_iucv_devices = 0; /* ensure that we do not provide any device */ return rc; } /** * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter * @val: Parameter value (numeric) */ static int __init hvc_iucv_config(char *val) { return strict_strtoul(val, 10, &hvc_iucv_devices); } device_initcall(hvc_iucv_init); __setup("hvc_iucv=", hvc_iucv_config); core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);
gpl-2.0
deafnote/kernel_cross_a28
arch/mips/math-emu/dp_simple.c
10365
1879
/* IEEE754 floating point arithmetic * double precision: common utilities */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754dp.h" int ieee754dp_finite(ieee754dp x) { return DPBEXP(x) != DP_EMAX + 1 + DP_EBIAS; } ieee754dp ieee754dp_copysign(ieee754dp x, ieee754dp y) { CLEARCX; DPSIGN(x) = DPSIGN(y); return x; } ieee754dp ieee754dp_neg(ieee754dp x) { COMPXDP; EXPLODEXDP; CLEARCX; FLUSHXDP; /* * Invert the sign ALWAYS to prevent an endless recursion on * pow() in libc. */ /* quick fix up */ DPSIGN(x) ^= 1; if (xc == IEEE754_CLASS_SNAN) { ieee754dp y = ieee754dp_indef(); SETCX(IEEE754_INVALID_OPERATION); DPSIGN(y) = DPSIGN(x); return ieee754dp_nanxcpt(y, "neg"); } return x; } ieee754dp ieee754dp_abs(ieee754dp x) { COMPXDP; EXPLODEXDP; CLEARCX; FLUSHXDP; /* Clear sign ALWAYS, irrespective of NaN */ DPSIGN(x) = 0; if (xc == IEEE754_CLASS_SNAN) { SETCX(IEEE754_INVALID_OPERATION); return ieee754dp_nanxcpt(ieee754dp_indef(), "abs"); } return x; }
gpl-2.0
ea4862/boeffla43_e210k
drivers/gpu/vithar/ump/src/devicedrv/linux/ump_kernel_linux_mem.c
126
5691
/* * * (C) COPYRIGHT 2008-2012 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * */ #include <ump/ump_kernel_interface.h> #include <ump/src/ump_ioctl.h> #include <linux/module.h> /* kernel module definitions */ #include <linux/fs.h> /* file system operations */ #include <linux/cdev.h> /* character device definitions */ #include <linux/ioport.h> /* request_mem_region */ #include <linux/mm.h> /* memory mananger definitions */ #include <linux/pfn.h> #include <linux/highmem.h> /*kmap*/ #include <linux/compat.h> /* is_compat_task */ #include <common/ump_kernel_core.h> #include <ump_arch.h> #include <common/ump_kernel_priv.h> static void umpp_vm_close(struct vm_area_struct *vma) { umpp_cpu_mapping * mapping; umpp_session * session; ump_dd_handle handle; mapping = (umpp_cpu_mapping*)vma->vm_private_data; UMP_ASSERT(mapping); session = mapping->session; handle = mapping->handle; umpp_dd_remove_cpu_mapping(mapping->handle, mapping); /* will free the mapping object */ ump_dd_release(handle); } static const struct vm_operations_struct umpp_vm_ops = { .close = umpp_vm_close }; int umpp_phys_commit(umpp_allocation * alloc) { uint64_t i; /* round up to a page boundary */ alloc->size = (alloc->size + PAGE_SIZE - 1) & ~((uint64_t)PAGE_SIZE-1) ; /* calculate number of pages */ alloc->blocksCount = alloc->size >> PAGE_SHIFT; if( (sizeof(ump_dd_physical_block_64) * alloc->blocksCount) > ((size_t)-1)) { printk(KERN_WARNING "UMP: umpp_phys_commit - trying to allocate more than possible\n"); return -ENOMEM; } alloc->block_array = kmalloc(sizeof(ump_dd_physical_block_64) * alloc->blocksCount, __GFP_HARDWALL | GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); if (NULL == alloc->block_array) { return -ENOMEM; } for (i = 0; i < alloc->blocksCount; i++) { void * mp; struct page * page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY | __GFP_NOWARN | __GFP_COLD); if (NULL == page) { break; } alloc->block_array[i].addr = page_to_pfn(page) << PAGE_SHIFT; alloc->block_array[i].size = PAGE_SIZE; mp = kmap(page); if (NULL == mp) { __free_page(page); break; } memset(mp, 0x00, PAGE_SIZE); /* instead of __GFP_ZERO, so we can do cache maintenance */ ump_sync_to_memory(PFN_PHYS(page_to_pfn(page)), mp, PAGE_SIZE); kunmap(page); } if (i == alloc->blocksCount) { return 0; } else { uint64_t j; for (j = 0; j < i; j++) { struct page * page; page = pfn_to_page(alloc->block_array[j].addr >> PAGE_SHIFT); __free_page(page); } kfree(alloc->block_array); return -ENOMEM; } } void umpp_phys_free(umpp_allocation * alloc) { uint64_t i; for (i = 0; i < alloc->blocksCount; i++) { __free_page(pfn_to_page(alloc->block_array[i].addr >> PAGE_SHIFT)); } kfree(alloc->block_array); } int umpp_linux_mmap(struct file * filp, struct vm_area_struct * vma) { ump_secure_id id; ump_dd_handle h; size_t offset; int err = -EINVAL; size_t length = vma->vm_end - vma->vm_start; umpp_cpu_mapping * map = NULL; umpp_session *session = filp->private_data; if ( 0 == length ) { return -EINVAL; } map = kzalloc(sizeof(*map), GFP_KERNEL); if (NULL == map) { WARN_ON(1); err = -ENOMEM; goto out; } /* unpack our arg */ #if defined CONFIG_64BIT && CONFIG_64BIT if (is_compat_task()) { #endif id = vma->vm_pgoff >> UMP_LINUX_OFFSET_BITS_32; offset = vma->vm_pgoff & UMP_LINUX_OFFSET_MASK_32; #if defined CONFIG_64BIT && CONFIG_64BIT } else { id = vma->vm_pgoff >> UMP_LINUX_OFFSET_BITS_64; offset = vma->vm_pgoff & UMP_LINUX_OFFSET_MASK_64; } #endif h = ump_dd_from_secure_id(id); if (UMP_DD_INVALID_MEMORY_HANDLE != h) { uint64_t i; uint64_t block_idx; uint64_t block_offset; uint64_t paddr; umpp_allocation * alloc; uint64_t last_byte; vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO | VM_MIXEDMAP; vma->vm_ops = &umpp_vm_ops; vma->vm_private_data = map; alloc = (umpp_allocation*)h; if( (alloc->flags & UMP_CONSTRAINT_UNCACHED) != 0) { /* cache disabled flag set, disable caching for cpu mappings */ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); } last_byte = length + (offset << PAGE_SHIFT) - 1; if (last_byte >= alloc->size || last_byte < (offset << PAGE_SHIFT)) { goto err_out; } if (umpp_dd_find_start_block(alloc, offset << PAGE_SHIFT, &block_idx, &block_offset)) { goto err_out; } paddr = alloc->block_array[block_idx].addr + block_offset; for (i = 0; i < (length >> PAGE_SHIFT); i++) { /* check if we've overrrun the current block, if so move to the next block */ if (paddr >= (alloc->block_array[block_idx].addr + alloc->block_array[block_idx].size)) { block_idx++; UMP_ASSERT(block_idx < alloc->blocksCount); paddr = alloc->block_array[block_idx].addr; } err = vm_insert_mixed(vma, vma->vm_start + (i << PAGE_SHIFT), paddr >> PAGE_SHIFT); paddr += PAGE_SIZE; } map->vaddr_start = (void*)vma->vm_start; map->nr_pages = length >> PAGE_SHIFT; map->page_off = offset; map->handle = h; map->session = session; umpp_dd_add_cpu_mapping(h, map); return 0; err_out: ump_dd_release(h); } kfree(map); out: return err; }
gpl-2.0
KOala888/GB_kernel
linux_kernel_galaxyplayer-master/drivers/scsi/nsp32.c
894
91218
/* * NinjaSCSI-32Bi Cardbus, NinjaSCSI-32UDE PCI/CardBus SCSI driver * Copyright (C) 2001, 2002, 2003 * YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp> * GOTO Masanori <gotom@debian.or.jp>, <gotom@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * Revision History: * 1.0: Initial Release. * 1.1: Add /proc SDTR status. * Remove obsolete error handler nsp32_reset. * Some clean up. * 1.2: PowerPC (big endian) support. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/major.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/ctype.h> #include <linux/dma-mapping.h> #include <asm/dma.h> #include <asm/system.h> #include <asm/io.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_ioctl.h> #include "nsp32.h" /*********************************************************************** * Module parameters */ static int trans_mode = 0; /* default: BIOS */ module_param (trans_mode, int, 0); MODULE_PARM_DESC(trans_mode, "transfer mode (0: BIOS(default) 1: Async 2: Ultra20M"); #define ASYNC_MODE 1 #define ULTRA20M_MODE 2 static int auto_param = 0; /* default: ON */ module_param (auto_param, bool, 0); MODULE_PARM_DESC(auto_param, "AutoParameter mode (0: ON(default) 1: OFF)"); static int disc_priv = 1; /* default: OFF */ module_param (disc_priv, bool, 0); MODULE_PARM_DESC(disc_priv, "disconnection privilege mode (0: ON 1: OFF(default))"); MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>, GOTO Masanori <gotom@debian.or.jp>"); MODULE_DESCRIPTION("Workbit NinjaSCSI-32Bi/UDE CardBus/PCI SCSI host bus adapter module"); MODULE_LICENSE("GPL"); static const char *nsp32_release_version = "1.2"; /**************************************************************************** * Supported hardware */ static struct pci_device_id nsp32_pci_table[] __devinitdata = { { .vendor = PCI_VENDOR_ID_IODATA, .device = PCI_DEVICE_ID_NINJASCSI_32BI_CBSC_II, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_IODATA, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_NINJASCSI_32BI_KME, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_KME, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_NINJASCSI_32BI_WBT, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_WORKBIT, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_WORKBIT_STANDARD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_PCI_WORKBIT, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_NINJASCSI_32BI_LOGITEC, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_LOGITEC, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_NINJASCSI_32BIB_LOGITEC, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_PCI_LOGITEC, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_PCI_MELCO, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO_II, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_PCI_MELCO, }, {0,0,}, }; MODULE_DEVICE_TABLE(pci, nsp32_pci_table); static nsp32_hw_data nsp32_data_base; /* probe <-> detect glue */ /* * Period/AckWidth speed conversion table * * Note: This period/ackwidth speed table must be in descending order. */ static nsp32_sync_table nsp32_sync_table_40M[] = { /* {PNo, AW, SP, EP, SREQ smpl} Speed(MB/s) Period AckWidth */ {0x1, 0, 0x0c, 0x0c, SMPL_40M}, /* 20.0 : 50ns, 25ns */ {0x2, 0, 0x0d, 0x18, SMPL_40M}, /* 13.3 : 75ns, 25ns */ {0x3, 1, 0x19, 0x19, SMPL_40M}, /* 10.0 : 100ns, 50ns */ {0x4, 1, 0x1a, 0x1f, SMPL_20M}, /* 8.0 : 125ns, 50ns */ {0x5, 2, 0x20, 0x25, SMPL_20M}, /* 6.7 : 150ns, 75ns */ {0x6, 2, 0x26, 0x31, SMPL_20M}, /* 5.7 : 175ns, 75ns */ {0x7, 3, 0x32, 0x32, SMPL_20M}, /* 5.0 : 200ns, 100ns */ {0x8, 3, 0x33, 0x38, SMPL_10M}, /* 4.4 : 225ns, 100ns */ {0x9, 3, 0x39, 0x3e, SMPL_10M}, /* 4.0 : 250ns, 100ns */ }; static nsp32_sync_table nsp32_sync_table_20M[] = { {0x1, 0, 0x19, 0x19, SMPL_40M}, /* 10.0 : 100ns, 50ns */ {0x2, 0, 0x1a, 0x25, SMPL_20M}, /* 6.7 : 150ns, 50ns */ {0x3, 1, 0x26, 0x32, SMPL_20M}, /* 5.0 : 200ns, 100ns */ {0x4, 1, 0x33, 0x3e, SMPL_10M}, /* 4.0 : 250ns, 100ns */ {0x5, 2, 0x3f, 0x4b, SMPL_10M}, /* 3.3 : 300ns, 150ns */ {0x6, 2, 0x4c, 0x57, SMPL_10M}, /* 2.8 : 350ns, 150ns */ {0x7, 3, 0x58, 0x64, SMPL_10M}, /* 2.5 : 400ns, 200ns */ {0x8, 3, 0x65, 0x70, SMPL_10M}, /* 2.2 : 450ns, 200ns */ {0x9, 3, 0x71, 0x7d, SMPL_10M}, /* 2.0 : 500ns, 200ns */ }; static nsp32_sync_table nsp32_sync_table_pci[] = { {0x1, 0, 0x0c, 0x0f, SMPL_40M}, /* 16.6 : 60ns, 30ns */ {0x2, 0, 0x10, 0x16, SMPL_40M}, /* 11.1 : 90ns, 30ns */ {0x3, 1, 0x17, 0x1e, SMPL_20M}, /* 8.3 : 120ns, 60ns */ {0x4, 1, 0x1f, 0x25, SMPL_20M}, /* 6.7 : 150ns, 60ns */ {0x5, 2, 0x26, 0x2d, SMPL_20M}, /* 5.6 : 180ns, 90ns */ {0x6, 2, 0x2e, 0x34, SMPL_10M}, /* 4.8 : 210ns, 90ns */ {0x7, 3, 0x35, 0x3c, SMPL_10M}, /* 4.2 : 240ns, 120ns */ {0x8, 3, 0x3d, 0x43, SMPL_10M}, /* 3.7 : 270ns, 120ns */ {0x9, 3, 0x44, 0x4b, SMPL_10M}, /* 3.3 : 300ns, 120ns */ }; /* * function declaration */ /* module entry point */ static int __devinit nsp32_probe (struct pci_dev *, const struct pci_device_id *); static void __devexit nsp32_remove(struct pci_dev *); static int __init init_nsp32 (void); static void __exit exit_nsp32 (void); /* struct struct scsi_host_template */ static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int); static int nsp32_detect (struct pci_dev *pdev); static int nsp32_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); static const char *nsp32_info (struct Scsi_Host *); static int nsp32_release (struct Scsi_Host *); /* SCSI error handler */ static int nsp32_eh_abort (struct scsi_cmnd *); static int nsp32_eh_bus_reset (struct scsi_cmnd *); static int nsp32_eh_host_reset(struct scsi_cmnd *); /* generate SCSI message */ static void nsp32_build_identify(struct scsi_cmnd *); static void nsp32_build_nop (struct scsi_cmnd *); static void nsp32_build_reject (struct scsi_cmnd *); static void nsp32_build_sdtr (struct scsi_cmnd *, unsigned char, unsigned char); /* SCSI message handler */ static int nsp32_busfree_occur(struct scsi_cmnd *, unsigned short); static void nsp32_msgout_occur (struct scsi_cmnd *); static void nsp32_msgin_occur (struct scsi_cmnd *, unsigned long, unsigned short); static int nsp32_setup_sg_table (struct scsi_cmnd *); static int nsp32_selection_autopara(struct scsi_cmnd *); static int nsp32_selection_autoscsi(struct scsi_cmnd *); static void nsp32_scsi_done (struct scsi_cmnd *); static int nsp32_arbitration (struct scsi_cmnd *, unsigned int); static int nsp32_reselection (struct scsi_cmnd *, unsigned char); static void nsp32_adjust_busfree (struct scsi_cmnd *, unsigned int); static void nsp32_restart_autoscsi (struct scsi_cmnd *, unsigned short); /* SCSI SDTR */ static void nsp32_analyze_sdtr (struct scsi_cmnd *); static int nsp32_search_period_entry(nsp32_hw_data *, nsp32_target *, unsigned char); static void nsp32_set_async (nsp32_hw_data *, nsp32_target *); static void nsp32_set_max_sync (nsp32_hw_data *, nsp32_target *, unsigned char *, unsigned char *); static void nsp32_set_sync_entry (nsp32_hw_data *, nsp32_target *, int, unsigned char); /* SCSI bus status handler */ static void nsp32_wait_req (nsp32_hw_data *, int); static void nsp32_wait_sack (nsp32_hw_data *, int); static void nsp32_sack_assert (nsp32_hw_data *); static void nsp32_sack_negate (nsp32_hw_data *); static void nsp32_do_bus_reset(nsp32_hw_data *); /* hardware interrupt handler */ static irqreturn_t do_nsp32_isr(int, void *); /* initialize hardware */ static int nsp32hw_init(nsp32_hw_data *); /* EEPROM handler */ static int nsp32_getprom_param (nsp32_hw_data *); static int nsp32_getprom_at24 (nsp32_hw_data *); static int nsp32_getprom_c16 (nsp32_hw_data *); static void nsp32_prom_start (nsp32_hw_data *); static void nsp32_prom_stop (nsp32_hw_data *); static int nsp32_prom_read (nsp32_hw_data *, int); static int nsp32_prom_read_bit (nsp32_hw_data *); static void nsp32_prom_write_bit(nsp32_hw_data *, int); static void nsp32_prom_set (nsp32_hw_data *, int, int); static int nsp32_prom_get (nsp32_hw_data *, int); /* debug/warning/info message */ static void nsp32_message (const char *, int, char *, char *, ...); #ifdef NSP32_DEBUG static void nsp32_dmessage(const char *, int, int, char *, ...); #endif /* * max_sectors is currently limited up to 128. */ static struct scsi_host_template nsp32_template = { .proc_name = "nsp32", .name = "Workbit NinjaSCSI-32Bi/UDE", .proc_info = nsp32_proc_info, .info = nsp32_info, .queuecommand = nsp32_queuecommand, .can_queue = 1, .sg_tablesize = NSP32_SG_SIZE, .max_sectors = 128, .cmd_per_lun = 1, .this_id = NSP32_HOST_SCSIID, .use_clustering = DISABLE_CLUSTERING, .eh_abort_handler = nsp32_eh_abort, .eh_bus_reset_handler = nsp32_eh_bus_reset, .eh_host_reset_handler = nsp32_eh_host_reset, /* .highmem_io = 1, */ }; #include "nsp32_io.h" /*********************************************************************** * debug, error print */ #ifndef NSP32_DEBUG # define NSP32_DEBUG_MASK 0x000000 # define nsp32_msg(type, args...) nsp32_message ("", 0, (type), args) # define nsp32_dbg(mask, args...) /* */ #else # define NSP32_DEBUG_MASK 0xffffff # define nsp32_msg(type, args...) \ nsp32_message (__func__, __LINE__, (type), args) # define nsp32_dbg(mask, args...) \ nsp32_dmessage(__func__, __LINE__, (mask), args) #endif #define NSP32_DEBUG_QUEUECOMMAND BIT(0) #define NSP32_DEBUG_REGISTER BIT(1) #define NSP32_DEBUG_AUTOSCSI BIT(2) #define NSP32_DEBUG_INTR BIT(3) #define NSP32_DEBUG_SGLIST BIT(4) #define NSP32_DEBUG_BUSFREE BIT(5) #define NSP32_DEBUG_CDB_CONTENTS BIT(6) #define NSP32_DEBUG_RESELECTION BIT(7) #define NSP32_DEBUG_MSGINOCCUR BIT(8) #define NSP32_DEBUG_EEPROM BIT(9) #define NSP32_DEBUG_MSGOUTOCCUR BIT(10) #define NSP32_DEBUG_BUSRESET BIT(11) #define NSP32_DEBUG_RESTART BIT(12) #define NSP32_DEBUG_SYNC BIT(13) #define NSP32_DEBUG_WAIT BIT(14) #define NSP32_DEBUG_TARGETFLAG BIT(15) #define NSP32_DEBUG_PROC BIT(16) #define NSP32_DEBUG_INIT BIT(17) #define NSP32_SPECIAL_PRINT_REGISTER BIT(20) #define NSP32_DEBUG_BUF_LEN 100 static void nsp32_message(const char *func, int line, char *type, char *fmt, ...) { va_list args; char buf[NSP32_DEBUG_BUF_LEN]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); #ifndef NSP32_DEBUG printk("%snsp32: %s\n", type, buf); #else printk("%snsp32: %s (%d): %s\n", type, func, line, buf); #endif } #ifdef NSP32_DEBUG static void nsp32_dmessage(const char *func, int line, int mask, char *fmt, ...) { va_list args; char buf[NSP32_DEBUG_BUF_LEN]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); if (mask & NSP32_DEBUG_MASK) { printk("nsp32-debug: 0x%x %s (%d): %s\n", mask, func, line, buf); } } #endif #ifdef NSP32_DEBUG # include "nsp32_debug.c" #else # define show_command(arg) /* */ # define show_busphase(arg) /* */ # define show_autophase(arg) /* */ #endif /* * IDENTIFY Message */ static void nsp32_build_identify(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; int pos = data->msgout_len; int mode = FALSE; /* XXX: Auto DiscPriv detection is progressing... */ if (disc_priv == 0) { /* mode = TRUE; */ } data->msgoutbuf[pos] = IDENTIFY(mode, SCpnt->device->lun); pos++; data->msgout_len = pos; } /* * SDTR Message Routine */ static void nsp32_build_sdtr(struct scsi_cmnd *SCpnt, unsigned char period, unsigned char offset) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; int pos = data->msgout_len; data->msgoutbuf[pos] = EXTENDED_MESSAGE; pos++; data->msgoutbuf[pos] = EXTENDED_SDTR_LEN; pos++; data->msgoutbuf[pos] = EXTENDED_SDTR; pos++; data->msgoutbuf[pos] = period; pos++; data->msgoutbuf[pos] = offset; pos++; data->msgout_len = pos; } /* * No Operation Message */ static void nsp32_build_nop(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; int pos = data->msgout_len; if (pos != 0) { nsp32_msg(KERN_WARNING, "Some messages are already contained!"); return; } data->msgoutbuf[pos] = NOP; pos++; data->msgout_len = pos; } /* * Reject Message */ static void nsp32_build_reject(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; int pos = data->msgout_len; data->msgoutbuf[pos] = MESSAGE_REJECT; pos++; data->msgout_len = pos; } /* * timer */ #if 0 static void nsp32_start_timer(struct scsi_cmnd *SCpnt, int time) { unsigned int base = SCpnt->host->io_port; nsp32_dbg(NSP32_DEBUG_INTR, "timer=%d", time); if (time & (~TIMER_CNT_MASK)) { nsp32_dbg(NSP32_DEBUG_INTR, "timer set overflow"); } nsp32_write2(base, TIMER_SET, time & TIMER_CNT_MASK); } #endif /* * set SCSI command and other parameter to asic, and start selection phase */ static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; unsigned int host_id = SCpnt->device->host->this_id; unsigned char target = scmd_id(SCpnt); nsp32_autoparam *param = data->autoparam; unsigned char phase; int i, ret; unsigned int msgout; u16_le s; nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in"); /* * check bus free */ phase = nsp32_read1(base, SCSI_BUS_MONITOR); if (phase != BUSMON_BUS_FREE) { nsp32_msg(KERN_WARNING, "bus busy"); show_busphase(phase & BUSMON_PHASE_MASK); SCpnt->result = DID_BUS_BUSY << 16; return FALSE; } /* * message out * * Note: If the range of msgout_len is 1 - 3, fill scsi_msgout. * over 3 messages needs another routine. */ if (data->msgout_len == 0) { nsp32_msg(KERN_ERR, "SCSI MsgOut without any message!"); SCpnt->result = DID_ERROR << 16; return FALSE; } else if (data->msgout_len > 0 && data->msgout_len <= 3) { msgout = 0; for (i = 0; i < data->msgout_len; i++) { /* * the sending order of the message is: * MCNT 3: MSG#0 -> MSG#1 -> MSG#2 * MCNT 2: MSG#1 -> MSG#2 * MCNT 1: MSG#2 */ msgout >>= 8; msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24); } msgout |= MV_VALID; /* MV valid */ msgout |= (unsigned int)data->msgout_len; /* len */ } else { /* data->msgout_len > 3 */ msgout = 0; } // nsp_dbg(NSP32_DEBUG_AUTOSCSI, "sel time out=0x%x\n", nsp32_read2(base, SEL_TIME_OUT)); // nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME); /* * setup asic parameter */ memset(param, 0, sizeof(nsp32_autoparam)); /* cdb */ for (i = 0; i < SCpnt->cmd_len; i++) { param->cdb[4 * i] = SCpnt->cmnd[i]; } /* outgoing messages */ param->msgout = cpu_to_le32(msgout); /* syncreg, ackwidth, target id, SREQ sampling rate */ param->syncreg = data->cur_target->syncreg; param->ackwidth = data->cur_target->ackwidth; param->target_id = BIT(host_id) | BIT(target); param->sample_reg = data->cur_target->sample_reg; // nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "sample rate=0x%x\n", data->cur_target->sample_reg); /* command control */ param->command_control = cpu_to_le16(CLEAR_CDB_FIFO_POINTER | AUTOSCSI_START | AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02 | AUTO_ATN ); /* transfer control */ s = 0; switch (data->trans_method) { case NSP32_TRANSFER_BUSMASTER: s |= BM_START; break; case NSP32_TRANSFER_MMIO: s |= CB_MMIO_MODE; break; case NSP32_TRANSFER_PIO: s |= CB_IO_MODE; break; default: nsp32_msg(KERN_ERR, "unknown trans_method"); break; } /* * OR-ed BLIEND_MODE, FIFO intr is decreased, instead of PCI bus waits. * For bus master transfer, it's taken off. */ s |= (TRANSFER_GO | ALL_COUNTER_CLR); param->transfer_control = cpu_to_le16(s); /* sg table addr */ param->sgt_pointer = cpu_to_le32(data->cur_lunt->sglun_paddr); /* * transfer parameter to ASIC */ nsp32_write4(base, SGT_ADR, data->auto_paddr); nsp32_write2(base, COMMAND_CONTROL, CLEAR_CDB_FIFO_POINTER | AUTO_PARAMETER ); /* * Check arbitration */ ret = nsp32_arbitration(SCpnt, base); return ret; } /* * Selection with AUTO SCSI (without AUTO PARAMETER) */ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; unsigned int host_id = SCpnt->device->host->this_id; unsigned char target = scmd_id(SCpnt); unsigned char phase; int status; unsigned short command = 0; unsigned int msgout = 0; unsigned short execph; int i; nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in"); /* * IRQ disable */ nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); /* * check bus line */ phase = nsp32_read1(base, SCSI_BUS_MONITOR); if(((phase & BUSMON_BSY) == 1) || (phase & BUSMON_SEL) == 1) { nsp32_msg(KERN_WARNING, "bus busy"); SCpnt->result = DID_BUS_BUSY << 16; status = 1; goto out; } /* * clear execph */ execph = nsp32_read2(base, SCSI_EXECUTE_PHASE); /* * clear FIFO counter to set CDBs */ nsp32_write2(base, COMMAND_CONTROL, CLEAR_CDB_FIFO_POINTER); /* * set CDB0 - CDB15 */ for (i = 0; i < SCpnt->cmd_len; i++) { nsp32_write1(base, COMMAND_DATA, SCpnt->cmnd[i]); } nsp32_dbg(NSP32_DEBUG_CDB_CONTENTS, "CDB[0]=[0x%x]", SCpnt->cmnd[0]); /* * set SCSIOUT LATCH(initiator)/TARGET(target) (OR-ed) ID */ nsp32_write1(base, SCSI_OUT_LATCH_TARGET_ID, BIT(host_id) | BIT(target)); /* * set SCSI MSGOUT REG * * Note: If the range of msgout_len is 1 - 3, fill scsi_msgout. * over 3 messages needs another routine. */ if (data->msgout_len == 0) { nsp32_msg(KERN_ERR, "SCSI MsgOut without any message!"); SCpnt->result = DID_ERROR << 16; status = 1; goto out; } else if (data->msgout_len > 0 && data->msgout_len <= 3) { msgout = 0; for (i = 0; i < data->msgout_len; i++) { /* * the sending order of the message is: * MCNT 3: MSG#0 -> MSG#1 -> MSG#2 * MCNT 2: MSG#1 -> MSG#2 * MCNT 1: MSG#2 */ msgout >>= 8; msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24); } msgout |= MV_VALID; /* MV valid */ msgout |= (unsigned int)data->msgout_len; /* len */ nsp32_write4(base, SCSI_MSG_OUT, msgout); } else { /* data->msgout_len > 3 */ nsp32_write4(base, SCSI_MSG_OUT, 0); } /* * set selection timeout(= 250ms) */ nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME); /* * set SREQ hazard killer sampling rate * * TODO: sample_rate (BASE+0F) is 0 when internal clock = 40MHz. * check other internal clock! */ nsp32_write1(base, SREQ_SMPL_RATE, data->cur_target->sample_reg); /* * clear Arbit */ nsp32_write1(base, SET_ARBIT, ARBIT_CLEAR); /* * set SYNCREG * Don't set BM_START_ADR before setting this register. */ nsp32_write1(base, SYNC_REG, data->cur_target->syncreg); /* * set ACKWIDTH */ nsp32_write1(base, ACK_WIDTH, data->cur_target->ackwidth); nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "syncreg=0x%x, ackwidth=0x%x, sgtpaddr=0x%x, id=0x%x", nsp32_read1(base, SYNC_REG), nsp32_read1(base, ACK_WIDTH), nsp32_read4(base, SGT_ADR), nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID)); nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "msgout_len=%d, msgout=0x%x", data->msgout_len, msgout); /* * set SGT ADDR (physical address) */ nsp32_write4(base, SGT_ADR, data->cur_lunt->sglun_paddr); /* * set TRANSFER CONTROL REG */ command = 0; command |= (TRANSFER_GO | ALL_COUNTER_CLR); if (data->trans_method & NSP32_TRANSFER_BUSMASTER) { if (scsi_bufflen(SCpnt) > 0) { command |= BM_START; } } else if (data->trans_method & NSP32_TRANSFER_MMIO) { command |= CB_MMIO_MODE; } else if (data->trans_method & NSP32_TRANSFER_PIO) { command |= CB_IO_MODE; } nsp32_write2(base, TRANSFER_CONTROL, command); /* * start AUTO SCSI, kick off arbitration */ command = (CLEAR_CDB_FIFO_POINTER | AUTOSCSI_START | AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02 | AUTO_ATN ); nsp32_write2(base, COMMAND_CONTROL, command); /* * Check arbitration */ status = nsp32_arbitration(SCpnt, base); out: /* * IRQ enable */ nsp32_write2(base, IRQ_CONTROL, 0); return status; } /* * Arbitration Status Check * * Note: Arbitration counter is waited during ARBIT_GO is not lifting. * Using udelay(1) consumes CPU time and system time, but * arbitration delay time is defined minimal 2.4us in SCSI * specification, thus udelay works as coarse grained wait timer. */ static int nsp32_arbitration(struct scsi_cmnd *SCpnt, unsigned int base) { unsigned char arbit; int status = TRUE; int time = 0; do { arbit = nsp32_read1(base, ARBIT_STATUS); time++; } while ((arbit & (ARBIT_WIN | ARBIT_FAIL)) == 0 && (time <= ARBIT_TIMEOUT_TIME)); nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "arbit: 0x%x, delay time: %d", arbit, time); if (arbit & ARBIT_WIN) { /* Arbitration succeeded */ SCpnt->result = DID_OK << 16; nsp32_index_write1(base, EXT_PORT, LED_ON); /* PCI LED on */ } else if (arbit & ARBIT_FAIL) { /* Arbitration failed */ SCpnt->result = DID_BUS_BUSY << 16; status = FALSE; } else { /* * unknown error or ARBIT_GO timeout, * something lock up! guess no connection. */ nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "arbit timeout"); SCpnt->result = DID_NO_CONNECT << 16; status = FALSE; } /* * clear Arbit */ nsp32_write1(base, SET_ARBIT, ARBIT_CLEAR); return status; } /* * reselection * * Note: This reselection routine is called from msgin_occur, * reselection target id&lun must be already set. * SCSI-2 says IDENTIFY implies RESTORE_POINTER operation. */ static int nsp32_reselection(struct scsi_cmnd *SCpnt, unsigned char newlun) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int host_id = SCpnt->device->host->this_id; unsigned int base = SCpnt->device->host->io_port; unsigned char tmpid, newid; nsp32_dbg(NSP32_DEBUG_RESELECTION, "enter"); /* * calculate reselected SCSI ID */ tmpid = nsp32_read1(base, RESELECT_ID); tmpid &= (~BIT(host_id)); newid = 0; while (tmpid) { if (tmpid & 1) { break; } tmpid >>= 1; newid++; } /* * If reselected New ID:LUN is not existed * or current nexus is not existed, unexpected * reselection is occurred. Send reject message. */ if (newid >= ARRAY_SIZE(data->lunt) || newlun >= ARRAY_SIZE(data->lunt[0])) { nsp32_msg(KERN_WARNING, "unknown id/lun"); return FALSE; } else if(data->lunt[newid][newlun].SCpnt == NULL) { nsp32_msg(KERN_WARNING, "no SCSI command is processing"); return FALSE; } data->cur_id = newid; data->cur_lun = newlun; data->cur_target = &(data->target[newid]); data->cur_lunt = &(data->lunt[newid][newlun]); /* reset SACK/SavedACK counter (or ALL clear?) */ nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK); return TRUE; } /* * nsp32_setup_sg_table - build scatter gather list for transfer data * with bus master. * * Note: NinjaSCSI-32Bi/UDE bus master can not transfer over 64KB at a time. */ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; struct scatterlist *sg; nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt; int num, i; u32_le l; if (sgt == NULL) { nsp32_dbg(NSP32_DEBUG_SGLIST, "SGT == null"); return FALSE; } num = scsi_dma_map(SCpnt); if (!num) return TRUE; else if (num < 0) return FALSE; else { scsi_for_each_sg(SCpnt, sg, num, i) { /* * Build nsp32_sglist, substitute sg dma addresses. */ sgt[i].addr = cpu_to_le32(sg_dma_address(sg)); sgt[i].len = cpu_to_le32(sg_dma_len(sg)); if (le32_to_cpu(sgt[i].len) > 0x10000) { nsp32_msg(KERN_ERR, "can't transfer over 64KB at a time, size=0x%lx", le32_to_cpu(sgt[i].len)); return FALSE; } nsp32_dbg(NSP32_DEBUG_SGLIST, "num 0x%x : addr 0x%lx len 0x%lx", i, le32_to_cpu(sgt[i].addr), le32_to_cpu(sgt[i].len )); } /* set end mark */ l = le32_to_cpu(sgt[num-1].len); sgt[num-1].len = cpu_to_le32(l | SGTEND); } return TRUE; } static int nsp32_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; nsp32_target *target; nsp32_lunt *cur_lunt; int ret; nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "enter. target: 0x%x LUN: 0x%x cmnd: 0x%x cmndlen: 0x%x " "use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x", SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len, scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt)); if (data->CurrentSC != NULL) { nsp32_msg(KERN_ERR, "Currentsc != NULL. Cancel this command request"); data->CurrentSC = NULL; SCpnt->result = DID_NO_CONNECT << 16; done(SCpnt); return 0; } /* check target ID is not same as this initiator ID */ if (scmd_id(SCpnt) == SCpnt->device->host->this_id) { nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "terget==host???"); SCpnt->result = DID_BAD_TARGET << 16; done(SCpnt); return 0; } /* check target LUN is allowable value */ if (SCpnt->device->lun >= MAX_LUN) { nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "no more lun"); SCpnt->result = DID_BAD_TARGET << 16; done(SCpnt); return 0; } show_command(SCpnt); SCpnt->scsi_done = done; data->CurrentSC = SCpnt; SCpnt->SCp.Status = CHECK_CONDITION; SCpnt->SCp.Message = 0; scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); SCpnt->SCp.ptr = (char *)scsi_sglist(SCpnt); SCpnt->SCp.this_residual = scsi_bufflen(SCpnt); SCpnt->SCp.buffer = NULL; SCpnt->SCp.buffers_residual = 0; /* initialize data */ data->msgout_len = 0; data->msgin_len = 0; cur_lunt = &(data->lunt[SCpnt->device->id][SCpnt->device->lun]); cur_lunt->SCpnt = SCpnt; cur_lunt->save_datp = 0; cur_lunt->msgin03 = FALSE; data->cur_lunt = cur_lunt; data->cur_id = SCpnt->device->id; data->cur_lun = SCpnt->device->lun; ret = nsp32_setup_sg_table(SCpnt); if (ret == FALSE) { nsp32_msg(KERN_ERR, "SGT fail"); SCpnt->result = DID_ERROR << 16; nsp32_scsi_done(SCpnt); return 0; } /* Build IDENTIFY */ nsp32_build_identify(SCpnt); /* * If target is the first time to transfer after the reset * (target don't have SDTR_DONE and SDTR_INITIATOR), sync * message SDTR is needed to do synchronous transfer. */ target = &data->target[scmd_id(SCpnt)]; data->cur_target = target; if (!(target->sync_flag & (SDTR_DONE | SDTR_INITIATOR | SDTR_TARGET))) { unsigned char period, offset; if (trans_mode != ASYNC_MODE) { nsp32_set_max_sync(data, target, &period, &offset); nsp32_build_sdtr(SCpnt, period, offset); target->sync_flag |= SDTR_INITIATOR; } else { nsp32_set_async(data, target); target->sync_flag |= SDTR_DONE; } nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "SDTR: entry: %d start_period: 0x%x offset: 0x%x\n", target->limit_entry, period, offset); } else if (target->sync_flag & SDTR_INITIATOR) { /* * It was negotiating SDTR with target, sending from the * initiator, but there are no chance to remove this flag. * Set async because we don't get proper negotiation. */ nsp32_set_async(data, target); target->sync_flag &= ~SDTR_INITIATOR; target->sync_flag |= SDTR_DONE; nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "SDTR_INITIATOR: fall back to async"); } else if (target->sync_flag & SDTR_TARGET) { /* * It was negotiating SDTR with target, sending from target, * but there are no chance to remove this flag. Set async * because we don't get proper negotiation. */ nsp32_set_async(data, target); target->sync_flag &= ~SDTR_TARGET; target->sync_flag |= SDTR_DONE; nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "Unknown SDTR from target is reached, fall back to async."); } nsp32_dbg(NSP32_DEBUG_TARGETFLAG, "target: %d sync_flag: 0x%x syncreg: 0x%x ackwidth: 0x%x", SCpnt->device->id, target->sync_flag, target->syncreg, target->ackwidth); /* Selection */ if (auto_param == 0) { ret = nsp32_selection_autopara(SCpnt); } else { ret = nsp32_selection_autoscsi(SCpnt); } if (ret != TRUE) { nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "selection fail"); nsp32_scsi_done(SCpnt); } return 0; } /* initialize asic */ static int nsp32hw_init(nsp32_hw_data *data) { unsigned int base = data->BaseAddress; unsigned short irq_stat; unsigned long lc_reg; unsigned char power; lc_reg = nsp32_index_read4(base, CFG_LATE_CACHE); if ((lc_reg & 0xff00) == 0) { lc_reg |= (0x20 << 8); nsp32_index_write2(base, CFG_LATE_CACHE, lc_reg & 0xffff); } nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); nsp32_write2(base, TRANSFER_CONTROL, 0); nsp32_write4(base, BM_CNT, 0); nsp32_write2(base, SCSI_EXECUTE_PHASE, 0); do { irq_stat = nsp32_read2(base, IRQ_STATUS); nsp32_dbg(NSP32_DEBUG_INIT, "irq_stat 0x%x", irq_stat); } while (irq_stat & IRQSTATUS_ANY_IRQ); /* * Fill FIFO_FULL_SHLD, FIFO_EMPTY_SHLD. Below parameter is * designated by specification. */ if ((data->trans_method & NSP32_TRANSFER_PIO) || (data->trans_method & NSP32_TRANSFER_MMIO)) { nsp32_index_write1(base, FIFO_FULL_SHLD_COUNT, 0x40); nsp32_index_write1(base, FIFO_EMPTY_SHLD_COUNT, 0x40); } else if (data->trans_method & NSP32_TRANSFER_BUSMASTER) { nsp32_index_write1(base, FIFO_FULL_SHLD_COUNT, 0x10); nsp32_index_write1(base, FIFO_EMPTY_SHLD_COUNT, 0x60); } else { nsp32_dbg(NSP32_DEBUG_INIT, "unknown transfer mode"); } nsp32_dbg(NSP32_DEBUG_INIT, "full 0x%x emp 0x%x", nsp32_index_read1(base, FIFO_FULL_SHLD_COUNT), nsp32_index_read1(base, FIFO_EMPTY_SHLD_COUNT)); nsp32_index_write1(base, CLOCK_DIV, data->clock); nsp32_index_write1(base, BM_CYCLE, MEMRD_CMD1 | SGT_AUTO_PARA_MEMED_CMD); nsp32_write1(base, PARITY_CONTROL, 0); /* parity check is disable */ /* * initialize MISC_WRRD register * * Note: Designated parameters is obeyed as following: * MISC_SCSI_DIRECTION_DETECTOR_SELECT: It must be set. * MISC_MASTER_TERMINATION_SELECT: It must be set. * MISC_BMREQ_NEGATE_TIMING_SEL: It should be set. * MISC_AUTOSEL_TIMING_SEL: It should be set. * MISC_BMSTOP_CHANGE2_NONDATA_PHASE: It should be set. * MISC_DELAYED_BMSTART: It's selected for safety. * * Note: If MISC_BMSTOP_CHANGE2_NONDATA_PHASE is set, then * we have to set TRANSFERCONTROL_BM_START as 0 and set * appropriate value before restarting bus master transfer. */ nsp32_index_write2(base, MISC_WR, (SCSI_DIRECTION_DETECTOR_SELECT | DELAYED_BMSTART | MASTER_TERMINATION_SELECT | BMREQ_NEGATE_TIMING_SEL | AUTOSEL_TIMING_SEL | BMSTOP_CHANGE2_NONDATA_PHASE)); nsp32_index_write1(base, TERM_PWR_CONTROL, 0); power = nsp32_index_read1(base, TERM_PWR_CONTROL); if (!(power & SENSE)) { nsp32_msg(KERN_INFO, "term power on"); nsp32_index_write1(base, TERM_PWR_CONTROL, BPWR); } nsp32_write2(base, TIMER_SET, TIMER_STOP); nsp32_write2(base, TIMER_SET, TIMER_STOP); /* Required 2 times */ nsp32_write1(base, SYNC_REG, 0); nsp32_write1(base, ACK_WIDTH, 0); nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME); /* * enable to select designated IRQ (except for * IRQSELECT_SERR, IRQSELECT_PERR, IRQSELECT_BMCNTERR) */ nsp32_index_write2(base, IRQ_SELECT, IRQSELECT_TIMER_IRQ | IRQSELECT_SCSIRESET_IRQ | IRQSELECT_FIFO_SHLD_IRQ | IRQSELECT_RESELECT_IRQ | IRQSELECT_PHASE_CHANGE_IRQ | IRQSELECT_AUTO_SCSI_SEQ_IRQ | // IRQSELECT_BMCNTERR_IRQ | IRQSELECT_TARGET_ABORT_IRQ | IRQSELECT_MASTER_ABORT_IRQ ); nsp32_write2(base, IRQ_CONTROL, 0); /* PCI LED off */ nsp32_index_write1(base, EXT_PORT_DDR, LED_OFF); nsp32_index_write1(base, EXT_PORT, LED_OFF); return TRUE; } /* interrupt routine */ static irqreturn_t do_nsp32_isr(int irq, void *dev_id) { nsp32_hw_data *data = dev_id; unsigned int base = data->BaseAddress; struct scsi_cmnd *SCpnt = data->CurrentSC; unsigned short auto_stat, irq_stat, trans_stat; unsigned char busmon, busphase; unsigned long flags; int ret; int handled = 0; struct Scsi_Host *host = data->Host; spin_lock_irqsave(host->host_lock, flags); /* * IRQ check, then enable IRQ mask */ irq_stat = nsp32_read2(base, IRQ_STATUS); nsp32_dbg(NSP32_DEBUG_INTR, "enter IRQ: %d, IRQstatus: 0x%x", irq, irq_stat); /* is this interrupt comes from Ninja asic? */ if ((irq_stat & IRQSTATUS_ANY_IRQ) == 0) { nsp32_dbg(NSP32_DEBUG_INTR, "shared interrupt: irq other 0x%x", irq_stat); goto out2; } handled = 1; nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); busmon = nsp32_read1(base, SCSI_BUS_MONITOR); busphase = busmon & BUSMON_PHASE_MASK; trans_stat = nsp32_read2(base, TRANSFER_STATUS); if ((irq_stat == 0xffff) && (trans_stat == 0xffff)) { nsp32_msg(KERN_INFO, "card disconnect"); if (data->CurrentSC != NULL) { nsp32_msg(KERN_INFO, "clean up current SCSI command"); SCpnt->result = DID_BAD_TARGET << 16; nsp32_scsi_done(SCpnt); } goto out; } /* Timer IRQ */ if (irq_stat & IRQSTATUS_TIMER_IRQ) { nsp32_dbg(NSP32_DEBUG_INTR, "timer stop"); nsp32_write2(base, TIMER_SET, TIMER_STOP); goto out; } /* SCSI reset */ if (irq_stat & IRQSTATUS_SCSIRESET_IRQ) { nsp32_msg(KERN_INFO, "detected someone do bus reset"); nsp32_do_bus_reset(data); if (SCpnt != NULL) { SCpnt->result = DID_RESET << 16; nsp32_scsi_done(SCpnt); } goto out; } if (SCpnt == NULL) { nsp32_msg(KERN_WARNING, "SCpnt==NULL this can't be happened"); nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat); goto out; } /* * AutoSCSI Interrupt. * Note: This interrupt is occurred when AutoSCSI is finished. Then * check SCSIEXECUTEPHASE, and do appropriate action. Each phases are * recorded when AutoSCSI sequencer has been processed. */ if(irq_stat & IRQSTATUS_AUTOSCSI_IRQ) { /* getting SCSI executed phase */ auto_stat = nsp32_read2(base, SCSI_EXECUTE_PHASE); nsp32_write2(base, SCSI_EXECUTE_PHASE, 0); /* Selection Timeout, go busfree phase. */ if (auto_stat & SELECTION_TIMEOUT) { nsp32_dbg(NSP32_DEBUG_INTR, "selection timeout occurred"); SCpnt->result = DID_TIME_OUT << 16; nsp32_scsi_done(SCpnt); goto out; } if (auto_stat & MSGOUT_PHASE) { /* * MsgOut phase was processed. * If MSG_IN_OCCUER is not set, then MsgOut phase is * completed. Thus, msgout_len must reset. Otherwise, * nothing to do here. If MSG_OUT_OCCUER is occurred, * then we will encounter the condition and check. */ if (!(auto_stat & MSG_IN_OCCUER) && (data->msgout_len <= 3)) { /* * !MSG_IN_OCCUER && msgout_len <=3 * ---> AutoSCSI with MSGOUTreg is processed. */ data->msgout_len = 0; }; nsp32_dbg(NSP32_DEBUG_INTR, "MsgOut phase processed"); } if ((auto_stat & DATA_IN_PHASE) && (scsi_get_resid(SCpnt) > 0) && ((nsp32_read2(base, FIFO_REST_CNT) & FIFO_REST_MASK) != 0)) { printk( "auto+fifo\n"); //nsp32_pio_read(SCpnt); } if (auto_stat & (DATA_IN_PHASE | DATA_OUT_PHASE)) { /* DATA_IN_PHASE/DATA_OUT_PHASE was processed. */ nsp32_dbg(NSP32_DEBUG_INTR, "Data in/out phase processed"); /* read BMCNT, SGT pointer addr */ nsp32_dbg(NSP32_DEBUG_INTR, "BMCNT=0x%lx", nsp32_read4(base, BM_CNT)); nsp32_dbg(NSP32_DEBUG_INTR, "addr=0x%lx", nsp32_read4(base, SGT_ADR)); nsp32_dbg(NSP32_DEBUG_INTR, "SACK=0x%lx", nsp32_read4(base, SACK_CNT)); nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx", nsp32_read4(base, SAVED_SACK_CNT)); scsi_set_resid(SCpnt, 0); /* all data transfered! */ } /* * MsgIn Occur */ if (auto_stat & MSG_IN_OCCUER) { nsp32_msgin_occur(SCpnt, irq_stat, auto_stat); } /* * MsgOut Occur */ if (auto_stat & MSG_OUT_OCCUER) { nsp32_msgout_occur(SCpnt); } /* * Bus Free Occur */ if (auto_stat & BUS_FREE_OCCUER) { ret = nsp32_busfree_occur(SCpnt, auto_stat); if (ret == TRUE) { goto out; } } if (auto_stat & STATUS_PHASE) { /* * Read CSB and substitute CSB for SCpnt->result * to save status phase stutas byte. * scsi error handler checks host_byte (DID_*: * low level driver to indicate status), then checks * status_byte (SCSI status byte). */ SCpnt->result = (int)nsp32_read1(base, SCSI_CSB_IN); } if (auto_stat & ILLEGAL_PHASE) { /* Illegal phase is detected. SACK is not back. */ nsp32_msg(KERN_WARNING, "AUTO SCSI ILLEGAL PHASE OCCUR!!!!"); /* TODO: currently we don't have any action... bus reset? */ /* * To send back SACK, assert, wait, and negate. */ nsp32_sack_assert(data); nsp32_wait_req(data, NEGATE); nsp32_sack_negate(data); } if (auto_stat & COMMAND_PHASE) { /* nothing to do */ nsp32_dbg(NSP32_DEBUG_INTR, "Command phase processed"); } if (auto_stat & AUTOSCSI_BUSY) { /* AutoSCSI is running */ } show_autophase(auto_stat); } /* FIFO_SHLD_IRQ */ if (irq_stat & IRQSTATUS_FIFO_SHLD_IRQ) { nsp32_dbg(NSP32_DEBUG_INTR, "FIFO IRQ"); switch(busphase) { case BUSPHASE_DATA_OUT: nsp32_dbg(NSP32_DEBUG_INTR, "fifo/write"); //nsp32_pio_write(SCpnt); break; case BUSPHASE_DATA_IN: nsp32_dbg(NSP32_DEBUG_INTR, "fifo/read"); //nsp32_pio_read(SCpnt); break; case BUSPHASE_STATUS: nsp32_dbg(NSP32_DEBUG_INTR, "fifo/status"); SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN); break; default: nsp32_dbg(NSP32_DEBUG_INTR, "fifo/other phase"); nsp32_dbg(NSP32_DEBUG_INTR, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat); show_busphase(busphase); break; } goto out; } /* Phase Change IRQ */ if (irq_stat & IRQSTATUS_PHASE_CHANGE_IRQ) { nsp32_dbg(NSP32_DEBUG_INTR, "phase change IRQ"); switch(busphase) { case BUSPHASE_MESSAGE_IN: nsp32_dbg(NSP32_DEBUG_INTR, "phase chg/msg in"); nsp32_msgin_occur(SCpnt, irq_stat, 0); break; default: nsp32_msg(KERN_WARNING, "phase chg/other phase?"); nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x\n", irq_stat, trans_stat); show_busphase(busphase); break; } goto out; } /* PCI_IRQ */ if (irq_stat & IRQSTATUS_PCI_IRQ) { nsp32_dbg(NSP32_DEBUG_INTR, "PCI IRQ occurred"); /* Do nothing */ } /* BMCNTERR_IRQ */ if (irq_stat & IRQSTATUS_BMCNTERR_IRQ) { nsp32_msg(KERN_ERR, "Received unexpected BMCNTERR IRQ! "); /* * TODO: To be implemented improving bus master * transfer reliability when BMCNTERR is occurred in * AutoSCSI phase described in specification. */ } #if 0 nsp32_dbg(NSP32_DEBUG_INTR, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat); show_busphase(busphase); #endif out: /* disable IRQ mask */ nsp32_write2(base, IRQ_CONTROL, 0); out2: spin_unlock_irqrestore(host->host_lock, flags); nsp32_dbg(NSP32_DEBUG_INTR, "exit"); return IRQ_RETVAL(handled); } #undef SPRINTF #define SPRINTF(args...) \ do { \ if(length > (pos - buffer)) { \ pos += snprintf(pos, length - (pos - buffer) + 1, ## args); \ nsp32_dbg(NSP32_DEBUG_PROC, "buffer=0x%p pos=0x%p length=%d %d\n", buffer, pos, length, length - (pos - buffer));\ } \ } while(0) static int nsp32_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout) { char *pos = buffer; int thislength; unsigned long flags; nsp32_hw_data *data; int hostno; unsigned int base; unsigned char mode_reg; int id, speed; long model; /* Write is not supported, just return. */ if (inout == TRUE) { return -EINVAL; } hostno = host->host_no; data = (nsp32_hw_data *)host->hostdata; base = host->io_port; SPRINTF("NinjaSCSI-32 status\n\n"); SPRINTF("Driver version: %s, $Revision: 1.33 $\n", nsp32_release_version); SPRINTF("SCSI host No.: %d\n", hostno); SPRINTF("IRQ: %d\n", host->irq); SPRINTF("IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1); SPRINTF("MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1); SPRINTF("sg_tablesize: %d\n", host->sg_tablesize); SPRINTF("Chip revision: 0x%x\n", (nsp32_read2(base, INDEX_REG) >> 8) & 0xff); mode_reg = nsp32_index_read1(base, CHIP_MODE); model = data->pci_devid->driver_data; #ifdef CONFIG_PM SPRINTF("Power Management: %s\n", (mode_reg & OPTF) ? "yes" : "no"); #endif SPRINTF("OEM: %ld, %s\n", (mode_reg & (OEM0|OEM1)), nsp32_model[model]); spin_lock_irqsave(&(data->Lock), flags); SPRINTF("CurrentSC: 0x%p\n\n", data->CurrentSC); spin_unlock_irqrestore(&(data->Lock), flags); SPRINTF("SDTR status\n"); for (id = 0; id < ARRAY_SIZE(data->target); id++) { SPRINTF("id %d: ", id); if (id == host->this_id) { SPRINTF("----- NinjaSCSI-32 host adapter\n"); continue; } if (data->target[id].sync_flag == SDTR_DONE) { if (data->target[id].period == 0 && data->target[id].offset == ASYNC_OFFSET ) { SPRINTF("async"); } else { SPRINTF(" sync"); } } else { SPRINTF(" none"); } if (data->target[id].period != 0) { speed = 1000000 / (data->target[id].period * 4); SPRINTF(" transfer %d.%dMB/s, offset %d", speed / 1000, speed % 1000, data->target[id].offset ); } SPRINTF("\n"); } thislength = pos - (buffer + offset); if(thislength < 0) { *start = NULL; return 0; } thislength = min(thislength, length); *start = buffer + offset; return thislength; } #undef SPRINTF /* * Reset parameters and call scsi_done for data->cur_lunt. * Be careful setting SCpnt->result = DID_* before calling this function. */ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; scsi_dma_unmap(SCpnt); /* * clear TRANSFERCONTROL_BM_START */ nsp32_write2(base, TRANSFER_CONTROL, 0); nsp32_write4(base, BM_CNT, 0); /* * call scsi_done */ (*SCpnt->scsi_done)(SCpnt); /* * reset parameters */ data->cur_lunt->SCpnt = NULL; data->cur_lunt = NULL; data->cur_target = NULL; data->CurrentSC = NULL; } /* * Bus Free Occur * * Current Phase is BUSFREE. AutoSCSI is automatically execute BUSFREE phase * with ACK reply when below condition is matched: * MsgIn 00: Command Complete. * MsgIn 02: Save Data Pointer. * MsgIn 04: Diconnect. * In other case, unexpected BUSFREE is detected. */ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; nsp32_dbg(NSP32_DEBUG_BUSFREE, "enter execph=0x%x", execph); show_autophase(execph); nsp32_write4(base, BM_CNT, 0); nsp32_write2(base, TRANSFER_CONTROL, 0); /* * MsgIn 02: Save Data Pointer * * VALID: * Save Data Pointer is received. Adjust pointer. * * NO-VALID: * SCSI-3 says if Save Data Pointer is not received, then we restart * processing and we can't adjust any SCSI data pointer in next data * phase. */ if (execph & MSGIN_02_VALID) { nsp32_dbg(NSP32_DEBUG_BUSFREE, "MsgIn02_Valid"); /* * Check sack_cnt/saved_sack_cnt, then adjust sg table if * needed. */ if (!(execph & MSGIN_00_VALID) && ((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE))) { unsigned int sacklen, s_sacklen; /* * Read SACK count and SAVEDSACK count, then compare. */ sacklen = nsp32_read4(base, SACK_CNT ); s_sacklen = nsp32_read4(base, SAVED_SACK_CNT); /* * If SAVEDSACKCNT == 0, it means SavedDataPointer is * come after data transfering. */ if (s_sacklen > 0) { /* * Comparing between sack and savedsack to * check the condition of AutoMsgIn03. * * If they are same, set msgin03 == TRUE, * COMMANDCONTROL_AUTO_MSGIN_03 is enabled at * reselection. On the other hand, if they * aren't same, set msgin03 == FALSE, and * COMMANDCONTROL_AUTO_MSGIN_03 is disabled at * reselection. */ if (sacklen != s_sacklen) { data->cur_lunt->msgin03 = FALSE; } else { data->cur_lunt->msgin03 = TRUE; } nsp32_adjust_busfree(SCpnt, s_sacklen); } } /* This value has not substitude with valid value yet... */ //data->cur_lunt->save_datp = data->cur_datp; } else { /* * no processing. */ } if (execph & MSGIN_03_VALID) { /* MsgIn03 was valid to be processed. No need processing. */ } /* * target SDTR check */ if (data->cur_target->sync_flag & SDTR_INITIATOR) { /* * SDTR negotiation pulled by the initiator has not * finished yet. Fall back to ASYNC mode. */ nsp32_set_async(data, data->cur_target); data->cur_target->sync_flag &= ~SDTR_INITIATOR; data->cur_target->sync_flag |= SDTR_DONE; } else if (data->cur_target->sync_flag & SDTR_TARGET) { /* * SDTR negotiation pulled by the target has been * negotiating. */ if (execph & (MSGIN_00_VALID | MSGIN_04_VALID)) { /* * If valid message is received, then * negotiation is succeeded. */ } else { /* * On the contrary, if unexpected bus free is * occurred, then negotiation is failed. Fall * back to ASYNC mode. */ nsp32_set_async(data, data->cur_target); } data->cur_target->sync_flag &= ~SDTR_TARGET; data->cur_target->sync_flag |= SDTR_DONE; } /* * It is always ensured by SCSI standard that initiator * switches into Bus Free Phase after * receiving message 00 (Command Complete), 04 (Disconnect). * It's the reason that processing here is valid. */ if (execph & MSGIN_00_VALID) { /* MsgIn 00: Command Complete */ nsp32_dbg(NSP32_DEBUG_BUSFREE, "command complete"); SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN); SCpnt->SCp.Message = 0; nsp32_dbg(NSP32_DEBUG_BUSFREE, "normal end stat=0x%x resid=0x%x\n", SCpnt->SCp.Status, scsi_get_resid(SCpnt)); SCpnt->result = (DID_OK << 16) | (SCpnt->SCp.Message << 8) | (SCpnt->SCp.Status << 0); nsp32_scsi_done(SCpnt); /* All operation is done */ return TRUE; } else if (execph & MSGIN_04_VALID) { /* MsgIn 04: Disconnect */ SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN); SCpnt->SCp.Message = 4; nsp32_dbg(NSP32_DEBUG_BUSFREE, "disconnect"); return TRUE; } else { /* Unexpected bus free */ nsp32_msg(KERN_WARNING, "unexpected bus free occurred"); /* DID_ERROR? */ //SCpnt->result = (DID_OK << 16) | (SCpnt->SCp.Message << 8) | (SCpnt->SCp.Status << 0); SCpnt->result = DID_ERROR << 16; nsp32_scsi_done(SCpnt); return TRUE; } return FALSE; } /* * nsp32_adjust_busfree - adjusting SG table * * Note: This driver adjust the SG table using SCSI ACK * counter instead of BMCNT counter! */ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; int old_entry = data->cur_entry; int new_entry; int sg_num = data->cur_lunt->sg_num; nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt; unsigned int restlen, sentlen; u32_le len, addr; nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", scsi_get_resid(SCpnt)); /* adjust saved SACK count with 4 byte start address boundary */ s_sacklen -= le32_to_cpu(sgt[old_entry].addr) & 3; /* * calculate new_entry from sack count and each sgt[].len * calculate the byte which is intent to send */ sentlen = 0; for (new_entry = old_entry; new_entry < sg_num; new_entry++) { sentlen += (le32_to_cpu(sgt[new_entry].len) & ~SGTEND); if (sentlen > s_sacklen) { break; } } /* all sgt is processed */ if (new_entry == sg_num) { goto last; } if (sentlen == s_sacklen) { /* XXX: confirm it's ok or not */ /* In this case, it's ok because we are at the head element of the sg. restlen is correctly calculated. */ } /* calculate the rest length for transfering */ restlen = sentlen - s_sacklen; /* update adjusting current SG table entry */ len = le32_to_cpu(sgt[new_entry].len); addr = le32_to_cpu(sgt[new_entry].addr); addr += (len - restlen); sgt[new_entry].addr = cpu_to_le32(addr); sgt[new_entry].len = cpu_to_le32(restlen); /* set cur_entry with new_entry */ data->cur_entry = new_entry; return; last: if (scsi_get_resid(SCpnt) < sentlen) { nsp32_msg(KERN_ERR, "resid underflow"); } scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) - sentlen); nsp32_dbg(NSP32_DEBUG_SGLIST, "new resid=0x%x", scsi_get_resid(SCpnt)); /* update hostdata and lun */ return; } /* * It's called MsgOut phase occur. * NinjaSCSI-32Bi/UDE automatically processes up to 3 messages in * message out phase. It, however, has more than 3 messages, * HBA creates the interrupt and we have to process by hand. */ static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; //unsigned short command; long new_sgtp; int i; nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "enter: msgout_len: 0x%x", data->msgout_len); /* * If MsgOut phase is occurred without having any * message, then No_Operation is sent (SCSI-2). */ if (data->msgout_len == 0) { nsp32_build_nop(SCpnt); } /* * Set SGTP ADDR current entry for restarting AUTOSCSI, * because SGTP is incremented next point. * There is few statement in the specification... */ new_sgtp = data->cur_lunt->sglun_paddr + (data->cur_lunt->cur_entry * sizeof(nsp32_sgtable)); /* * send messages */ for (i = 0; i < data->msgout_len; i++) { nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "%d : 0x%x", i, data->msgoutbuf[i]); /* * Check REQ is asserted. */ nsp32_wait_req(data, ASSERT); if (i == (data->msgout_len - 1)) { /* * If the last message, set the AutoSCSI restart * before send back the ack message. AutoSCSI * restart automatically negate ATN signal. */ //command = (AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02); //nsp32_restart_autoscsi(SCpnt, command); nsp32_write2(base, COMMAND_CONTROL, (CLEAR_CDB_FIFO_POINTER | AUTO_COMMAND_PHASE | AUTOSCSI_RESTART | AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02 )); } /* * Write data with SACK, then wait sack is * automatically negated. */ nsp32_write1(base, SCSI_DATA_WITH_ACK, data->msgoutbuf[i]); nsp32_wait_sack(data, NEGATE); nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "bus: 0x%x\n", nsp32_read1(base, SCSI_BUS_MONITOR)); }; data->msgout_len = 0; nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "exit"); } /* * Restart AutoSCSI * * Note: Restarting AutoSCSI needs set: * SYNC_REG, ACK_WIDTH, SGT_ADR, TRANSFER_CONTROL */ static void nsp32_restart_autoscsi(struct scsi_cmnd *SCpnt, unsigned short command) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = data->BaseAddress; unsigned short transfer = 0; nsp32_dbg(NSP32_DEBUG_RESTART, "enter"); if (data->cur_target == NULL || data->cur_lunt == NULL) { nsp32_msg(KERN_ERR, "Target or Lun is invalid"); } /* * set SYNC_REG * Don't set BM_START_ADR before setting this register. */ nsp32_write1(base, SYNC_REG, data->cur_target->syncreg); /* * set ACKWIDTH */ nsp32_write1(base, ACK_WIDTH, data->cur_target->ackwidth); /* * set SREQ hazard killer sampling rate */ nsp32_write1(base, SREQ_SMPL_RATE, data->cur_target->sample_reg); /* * set SGT ADDR (physical address) */ nsp32_write4(base, SGT_ADR, data->cur_lunt->sglun_paddr); /* * set TRANSFER CONTROL REG */ transfer = 0; transfer |= (TRANSFER_GO | ALL_COUNTER_CLR); if (data->trans_method & NSP32_TRANSFER_BUSMASTER) { if (scsi_bufflen(SCpnt) > 0) { transfer |= BM_START; } } else if (data->trans_method & NSP32_TRANSFER_MMIO) { transfer |= CB_MMIO_MODE; } else if (data->trans_method & NSP32_TRANSFER_PIO) { transfer |= CB_IO_MODE; } nsp32_write2(base, TRANSFER_CONTROL, transfer); /* * restart AutoSCSI * * TODO: COMMANDCONTROL_AUTO_COMMAND_PHASE is needed ? */ command |= (CLEAR_CDB_FIFO_POINTER | AUTO_COMMAND_PHASE | AUTOSCSI_RESTART ); nsp32_write2(base, COMMAND_CONTROL, command); nsp32_dbg(NSP32_DEBUG_RESTART, "exit"); } /* * cannot run automatically message in occur */ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt, unsigned long irq_status, unsigned short execph) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; unsigned char msg; unsigned char msgtype; unsigned char newlun; unsigned short command = 0; int msgclear = TRUE; long new_sgtp; int ret; /* * read first message * Use SCSIDATA_W_ACK instead of SCSIDATAIN, because the procedure * of Message-In have to be processed before sending back SCSI ACK. */ msg = nsp32_read1(base, SCSI_DATA_IN); data->msginbuf[(unsigned char)data->msgin_len] = msg; msgtype = data->msginbuf[0]; nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "enter: msglen: 0x%x msgin: 0x%x msgtype: 0x%x", data->msgin_len, msg, msgtype); /* * TODO: We need checking whether bus phase is message in? */ /* * assert SCSI ACK */ nsp32_sack_assert(data); /* * processing IDENTIFY */ if (msgtype & 0x80) { if (!(irq_status & IRQSTATUS_RESELECT_OCCUER)) { /* Invalid (non reselect) phase */ goto reject; } newlun = msgtype & 0x1f; /* TODO: SPI-3 compliant? */ ret = nsp32_reselection(SCpnt, newlun); if (ret == TRUE) { goto restart; } else { goto reject; } } /* * processing messages except for IDENTIFY * * TODO: Messages are all SCSI-2 terminology. SCSI-3 compliance is TODO. */ switch (msgtype) { /* * 1-byte message */ case COMMAND_COMPLETE: case DISCONNECT: /* * These messages should not be occurred. * They should be processed on AutoSCSI sequencer. */ nsp32_msg(KERN_WARNING, "unexpected message of AutoSCSI MsgIn: 0x%x", msg); break; case RESTORE_POINTERS: /* * AutoMsgIn03 is disabled, and HBA gets this message. */ if ((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE)) { unsigned int s_sacklen; s_sacklen = nsp32_read4(base, SAVED_SACK_CNT); if ((execph & MSGIN_02_VALID) && (s_sacklen > 0)) { nsp32_adjust_busfree(SCpnt, s_sacklen); } else { /* No need to rewrite SGT */ } } data->cur_lunt->msgin03 = FALSE; /* Update with the new value */ /* reset SACK/SavedACK counter (or ALL clear?) */ nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK); /* * set new sg pointer */ new_sgtp = data->cur_lunt->sglun_paddr + (data->cur_lunt->cur_entry * sizeof(nsp32_sgtable)); nsp32_write4(base, SGT_ADR, new_sgtp); break; case SAVE_POINTERS: /* * These messages should not be occurred. * They should be processed on AutoSCSI sequencer. */ nsp32_msg (KERN_WARNING, "unexpected message of AutoSCSI MsgIn: SAVE_POINTERS"); break; case MESSAGE_REJECT: /* If previous message_out is sending SDTR, and get message_reject from target, SDTR negotiation is failed */ if (data->cur_target->sync_flag & (SDTR_INITIATOR | SDTR_TARGET)) { /* * Current target is negotiating SDTR, but it's * failed. Fall back to async transfer mode, and set * SDTR_DONE. */ nsp32_set_async(data, data->cur_target); data->cur_target->sync_flag &= ~SDTR_INITIATOR; data->cur_target->sync_flag |= SDTR_DONE; } break; case LINKED_CMD_COMPLETE: case LINKED_FLG_CMD_COMPLETE: /* queue tag is not supported currently */ nsp32_msg (KERN_WARNING, "unsupported message: 0x%x", msgtype); break; case INITIATE_RECOVERY: /* staring ECA (Extended Contingent Allegiance) state. */ /* This message is declined in SPI2 or later. */ goto reject; /* * 2-byte message */ case SIMPLE_QUEUE_TAG: case 0x23: /* * 0x23: Ignore_Wide_Residue is not declared in scsi.h. * No support is needed. */ if (data->msgin_len >= 1) { goto reject; } /* current position is 1-byte of 2 byte */ msgclear = FALSE; break; /* * extended message */ case EXTENDED_MESSAGE: if (data->msgin_len < 1) { /* * Current position does not reach 2-byte * (2-byte is extended message length). */ msgclear = FALSE; break; } if ((data->msginbuf[1] + 1) > data->msgin_len) { /* * Current extended message has msginbuf[1] + 2 * (msgin_len starts counting from 0, so buf[1] + 1). * If current message position is not finished, * continue receiving message. */ msgclear = FALSE; break; } /* * Reach here means regular length of each type of * extended messages. */ switch (data->msginbuf[2]) { case EXTENDED_MODIFY_DATA_POINTER: /* TODO */ goto reject; /* not implemented yet */ break; case EXTENDED_SDTR: /* * Exchange this message between initiator and target. */ if (data->msgin_len != EXTENDED_SDTR_LEN + 1) { /* * received inappropriate message. */ goto reject; break; } nsp32_analyze_sdtr(SCpnt); break; case EXTENDED_EXTENDED_IDENTIFY: /* SCSI-I only, not supported. */ goto reject; /* not implemented yet */ break; case EXTENDED_WDTR: goto reject; /* not implemented yet */ break; default: goto reject; } break; default: goto reject; } restart: if (msgclear == TRUE) { data->msgin_len = 0; /* * If restarting AutoSCSI, but there are some message to out * (msgout_len > 0), set AutoATN, and set SCSIMSGOUT as 0 * (MV_VALID = 0). When commandcontrol is written with * AutoSCSI restart, at the same time MsgOutOccur should be * happened (however, such situation is really possible...?). */ if (data->msgout_len > 0) { nsp32_write4(base, SCSI_MSG_OUT, 0); command |= AUTO_ATN; } /* * restart AutoSCSI * If it's failed, COMMANDCONTROL_AUTO_COMMAND_PHASE is needed. */ command |= (AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02); /* * If current msgin03 is TRUE, then flag on. */ if (data->cur_lunt->msgin03 == TRUE) { command |= AUTO_MSGIN_03; } data->cur_lunt->msgin03 = FALSE; } else { data->msgin_len++; } /* * restart AutoSCSI */ nsp32_restart_autoscsi(SCpnt, command); /* * wait SCSI REQ negate for REQ-ACK handshake */ nsp32_wait_req(data, NEGATE); /* * negate SCSI ACK */ nsp32_sack_negate(data); nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit"); return; reject: nsp32_msg(KERN_WARNING, "invalid or unsupported MessageIn, rejected. " "current msg: 0x%x (len: 0x%x), processing msg: 0x%x", msg, data->msgin_len, msgtype); nsp32_build_reject(SCpnt); data->msgin_len = 0; goto restart; } /* * */ static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; nsp32_target *target = data->cur_target; nsp32_sync_table *synct; unsigned char get_period = data->msginbuf[3]; unsigned char get_offset = data->msginbuf[4]; int entry; int syncnum; nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "enter"); synct = data->synct; syncnum = data->syncnum; /* * If this inititor sent the SDTR message, then target responds SDTR, * initiator SYNCREG, ACKWIDTH from SDTR parameter. * Messages are not appropriate, then send back reject message. * If initiator did not send the SDTR, but target sends SDTR, * initiator calculator the appropriate parameter and send back SDTR. */ if (target->sync_flag & SDTR_INITIATOR) { /* * Initiator sent SDTR, the target responds and * send back negotiation SDTR. */ nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target responds SDTR"); target->sync_flag &= ~SDTR_INITIATOR; target->sync_flag |= SDTR_DONE; /* * offset: */ if (get_offset > SYNC_OFFSET) { /* * Negotiation is failed, the target send back * unexpected offset value. */ goto reject; } if (get_offset == ASYNC_OFFSET) { /* * Negotiation is succeeded, the target want * to fall back into asynchronous transfer mode. */ goto async; } /* * period: * Check whether sync period is too short. If too short, * fall back to async mode. If it's ok, then investigate * the received sync period. If sync period is acceptable * between sync table start_period and end_period, then * set this I_T nexus as sent offset and period. * If it's not acceptable, send back reject and fall back * to async mode. */ if (get_period < data->synct[0].period_num) { /* * Negotiation is failed, the target send back * unexpected period value. */ goto reject; } entry = nsp32_search_period_entry(data, target, get_period); if (entry < 0) { /* * Target want to use long period which is not * acceptable NinjaSCSI-32Bi/UDE. */ goto reject; } /* * Set new sync table and offset in this I_T nexus. */ nsp32_set_sync_entry(data, target, entry, get_offset); } else { /* Target send SDTR to initiator. */ nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target send SDTR"); target->sync_flag |= SDTR_INITIATOR; /* offset: */ if (get_offset > SYNC_OFFSET) { /* send back as SYNC_OFFSET */ get_offset = SYNC_OFFSET; } /* period: */ if (get_period < data->synct[0].period_num) { get_period = data->synct[0].period_num; } entry = nsp32_search_period_entry(data, target, get_period); if (get_offset == ASYNC_OFFSET || entry < 0) { nsp32_set_async(data, target); nsp32_build_sdtr(SCpnt, 0, ASYNC_OFFSET); } else { nsp32_set_sync_entry(data, target, entry, get_offset); nsp32_build_sdtr(SCpnt, get_period, get_offset); } } target->period = get_period; nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit"); return; reject: /* * If the current message is unacceptable, send back to the target * with reject message. */ nsp32_build_reject(SCpnt); async: nsp32_set_async(data, target); /* set as ASYNC transfer mode */ target->period = 0; nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit: set async"); return; } /* * Search config entry number matched in sync_table from given * target and speed period value. If failed to search, return negative value. */ static int nsp32_search_period_entry(nsp32_hw_data *data, nsp32_target *target, unsigned char period) { int i; if (target->limit_entry >= data->syncnum) { nsp32_msg(KERN_ERR, "limit_entry exceeds syncnum!"); target->limit_entry = 0; } for (i = target->limit_entry; i < data->syncnum; i++) { if (period >= data->synct[i].start_period && period <= data->synct[i].end_period) { break; } } /* * Check given period value is over the sync_table value. * If so, return max value. */ if (i == data->syncnum) { i = -1; } return i; } /* * target <-> initiator use ASYNC transfer */ static void nsp32_set_async(nsp32_hw_data *data, nsp32_target *target) { unsigned char period = data->synct[target->limit_entry].period_num; target->offset = ASYNC_OFFSET; target->period = 0; target->syncreg = TO_SYNCREG(period, ASYNC_OFFSET); target->ackwidth = 0; target->sample_reg = 0; nsp32_dbg(NSP32_DEBUG_SYNC, "set async"); } /* * target <-> initiator use maximum SYNC transfer */ static void nsp32_set_max_sync(nsp32_hw_data *data, nsp32_target *target, unsigned char *period, unsigned char *offset) { unsigned char period_num, ackwidth; period_num = data->synct[target->limit_entry].period_num; *period = data->synct[target->limit_entry].start_period; ackwidth = data->synct[target->limit_entry].ackwidth; *offset = SYNC_OFFSET; target->syncreg = TO_SYNCREG(period_num, *offset); target->ackwidth = ackwidth; target->offset = *offset; target->sample_reg = 0; /* disable SREQ sampling */ } /* * target <-> initiator use entry number speed */ static void nsp32_set_sync_entry(nsp32_hw_data *data, nsp32_target *target, int entry, unsigned char offset) { unsigned char period, ackwidth, sample_rate; period = data->synct[entry].period_num; ackwidth = data->synct[entry].ackwidth; offset = offset; sample_rate = data->synct[entry].sample_rate; target->syncreg = TO_SYNCREG(period, offset); target->ackwidth = ackwidth; target->offset = offset; target->sample_reg = sample_rate | SAMPLING_ENABLE; nsp32_dbg(NSP32_DEBUG_SYNC, "set sync"); } /* * It waits until SCSI REQ becomes assertion or negation state. * * Note: If nsp32_msgin_occur is called, we asserts SCSI ACK. Then * connected target responds SCSI REQ negation. We have to wait * SCSI REQ becomes negation in order to negate SCSI ACK signal for * REQ-ACK handshake. */ static void nsp32_wait_req(nsp32_hw_data *data, int state) { unsigned int base = data->BaseAddress; int wait_time = 0; unsigned char bus, req_bit; if (!((state == ASSERT) || (state == NEGATE))) { nsp32_msg(KERN_ERR, "unknown state designation"); } /* REQ is BIT(5) */ req_bit = (state == ASSERT ? BUSMON_REQ : 0); do { bus = nsp32_read1(base, SCSI_BUS_MONITOR); if ((bus & BUSMON_REQ) == req_bit) { nsp32_dbg(NSP32_DEBUG_WAIT, "wait_time: %d", wait_time); return; } udelay(1); wait_time++; } while (wait_time < REQSACK_TIMEOUT_TIME); nsp32_msg(KERN_WARNING, "wait REQ timeout, req_bit: 0x%x", req_bit); } /* * It waits until SCSI SACK becomes assertion or negation state. */ static void nsp32_wait_sack(nsp32_hw_data *data, int state) { unsigned int base = data->BaseAddress; int wait_time = 0; unsigned char bus, ack_bit; if (!((state == ASSERT) || (state == NEGATE))) { nsp32_msg(KERN_ERR, "unknown state designation"); } /* ACK is BIT(4) */ ack_bit = (state == ASSERT ? BUSMON_ACK : 0); do { bus = nsp32_read1(base, SCSI_BUS_MONITOR); if ((bus & BUSMON_ACK) == ack_bit) { nsp32_dbg(NSP32_DEBUG_WAIT, "wait_time: %d", wait_time); return; } udelay(1); wait_time++; } while (wait_time < REQSACK_TIMEOUT_TIME); nsp32_msg(KERN_WARNING, "wait SACK timeout, ack_bit: 0x%x", ack_bit); } /* * assert SCSI ACK * * Note: SCSI ACK assertion needs with ACKENB=1, AUTODIRECTION=1. */ static void nsp32_sack_assert(nsp32_hw_data *data) { unsigned int base = data->BaseAddress; unsigned char busctrl; busctrl = nsp32_read1(base, SCSI_BUS_CONTROL); busctrl |= (BUSCTL_ACK | AUTODIRECTION | ACKENB); nsp32_write1(base, SCSI_BUS_CONTROL, busctrl); } /* * negate SCSI ACK */ static void nsp32_sack_negate(nsp32_hw_data *data) { unsigned int base = data->BaseAddress; unsigned char busctrl; busctrl = nsp32_read1(base, SCSI_BUS_CONTROL); busctrl &= ~BUSCTL_ACK; nsp32_write1(base, SCSI_BUS_CONTROL, busctrl); } /* * Note: n_io_port is defined as 0x7f because I/O register port is * assigned as: * 0x800-0x8ff: memory mapped I/O port * 0x900-0xbff: (map same 0x800-0x8ff I/O port image repeatedly) * 0xc00-0xfff: CardBus status registers */ static int nsp32_detect(struct pci_dev *pdev) { struct Scsi_Host *host; /* registered host structure */ struct resource *res; nsp32_hw_data *data; int ret; int i, j; nsp32_dbg(NSP32_DEBUG_REGISTER, "enter"); /* * register this HBA as SCSI device */ host = scsi_host_alloc(&nsp32_template, sizeof(nsp32_hw_data)); if (host == NULL) { nsp32_msg (KERN_ERR, "failed to scsi register"); goto err; } /* * set nsp32_hw_data */ data = (nsp32_hw_data *)host->hostdata; memcpy(data, &nsp32_data_base, sizeof(nsp32_hw_data)); host->irq = data->IrqNumber; host->io_port = data->BaseAddress; host->unique_id = data->BaseAddress; host->n_io_port = data->NumAddress; host->base = (unsigned long)data->MmioAddress; data->Host = host; spin_lock_init(&(data->Lock)); data->cur_lunt = NULL; data->cur_target = NULL; /* * Bus master transfer mode is supported currently. */ data->trans_method = NSP32_TRANSFER_BUSMASTER; /* * Set clock div, CLOCK_4 (HBA has own external clock, and * dividing * 100ns/4). * Currently CLOCK_4 has only tested, not for CLOCK_2/PCICLK yet. */ data->clock = CLOCK_4; /* * Select appropriate nsp32_sync_table and set I_CLOCKDIV. */ switch (data->clock) { case CLOCK_4: /* If data->clock is CLOCK_4, then select 40M sync table. */ data->synct = nsp32_sync_table_40M; data->syncnum = ARRAY_SIZE(nsp32_sync_table_40M); break; case CLOCK_2: /* If data->clock is CLOCK_2, then select 20M sync table. */ data->synct = nsp32_sync_table_20M; data->syncnum = ARRAY_SIZE(nsp32_sync_table_20M); break; case PCICLK: /* If data->clock is PCICLK, then select pci sync table. */ data->synct = nsp32_sync_table_pci; data->syncnum = ARRAY_SIZE(nsp32_sync_table_pci); break; default: nsp32_msg(KERN_WARNING, "Invalid clock div is selected, set CLOCK_4."); /* Use default value CLOCK_4 */ data->clock = CLOCK_4; data->synct = nsp32_sync_table_40M; data->syncnum = ARRAY_SIZE(nsp32_sync_table_40M); } /* * setup nsp32_lunt */ /* * setup DMA */ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { nsp32_msg (KERN_ERR, "failed to set PCI DMA mask"); goto scsi_unregister; } /* * allocate autoparam DMA resource. */ data->autoparam = pci_alloc_consistent(pdev, sizeof(nsp32_autoparam), &(data->auto_paddr)); if (data->autoparam == NULL) { nsp32_msg(KERN_ERR, "failed to allocate DMA memory"); goto scsi_unregister; } /* * allocate scatter-gather DMA resource. */ data->sg_list = pci_alloc_consistent(pdev, NSP32_SG_TABLE_SIZE, &(data->sg_paddr)); if (data->sg_list == NULL) { nsp32_msg(KERN_ERR, "failed to allocate DMA memory"); goto free_autoparam; } for (i = 0; i < ARRAY_SIZE(data->lunt); i++) { for (j = 0; j < ARRAY_SIZE(data->lunt[0]); j++) { int offset = i * ARRAY_SIZE(data->lunt[0]) + j; nsp32_lunt tmp = { .SCpnt = NULL, .save_datp = 0, .msgin03 = FALSE, .sg_num = 0, .cur_entry = 0, .sglun = &(data->sg_list[offset]), .sglun_paddr = data->sg_paddr + (offset * sizeof(nsp32_sglun)), }; data->lunt[i][j] = tmp; } } /* * setup target */ for (i = 0; i < ARRAY_SIZE(data->target); i++) { nsp32_target *target = &(data->target[i]); target->limit_entry = 0; target->sync_flag = 0; nsp32_set_async(data, target); } /* * EEPROM check */ ret = nsp32_getprom_param(data); if (ret == FALSE) { data->resettime = 3; /* default 3 */ } /* * setup HBA */ nsp32hw_init(data); snprintf(data->info_str, sizeof(data->info_str), "NinjaSCSI-32Bi/UDE: irq %d, io 0x%lx+0x%x", host->irq, host->io_port, host->n_io_port); /* * SCSI bus reset * * Note: It's important to reset SCSI bus in initialization phase. * NinjaSCSI-32Bi/UDE HBA EEPROM seems to exchange SDTR when * system is coming up, so SCSI devices connected to HBA is set as * un-asynchronous mode. It brings the merit that this HBA is * ready to start synchronous transfer without any preparation, * but we are difficult to control transfer speed. In addition, * it prevents device transfer speed from effecting EEPROM start-up * SDTR. NinjaSCSI-32Bi/UDE has the feature if EEPROM is set as * Auto Mode, then FAST-10M is selected when SCSI devices are * connected same or more than 4 devices. It should be avoided * depending on this specification. Thus, resetting the SCSI bus * restores all connected SCSI devices to asynchronous mode, then * this driver set SDTR safely later, and we can control all SCSI * device transfer mode. */ nsp32_do_bus_reset(data); ret = request_irq(host->irq, do_nsp32_isr, IRQF_SHARED, "nsp32", data); if (ret < 0) { nsp32_msg(KERN_ERR, "Unable to allocate IRQ for NinjaSCSI32 " "SCSI PCI controller. Interrupt: %d", host->irq); goto free_sg_list; } /* * PCI IO register */ res = request_region(host->io_port, host->n_io_port, "nsp32"); if (res == NULL) { nsp32_msg(KERN_ERR, "I/O region 0x%lx+0x%lx is already used", data->BaseAddress, data->NumAddress); goto free_irq; } ret = scsi_add_host(host, &pdev->dev); if (ret) { nsp32_msg(KERN_ERR, "failed to add scsi host"); goto free_region; } scsi_scan_host(host); pci_set_drvdata(pdev, host); return 0; free_region: release_region(host->io_port, host->n_io_port); free_irq: free_irq(host->irq, data); free_sg_list: pci_free_consistent(pdev, NSP32_SG_TABLE_SIZE, data->sg_list, data->sg_paddr); free_autoparam: pci_free_consistent(pdev, sizeof(nsp32_autoparam), data->autoparam, data->auto_paddr); scsi_unregister: scsi_host_put(host); err: return 1; } static int nsp32_release(struct Scsi_Host *host) { nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata; if (data->autoparam) { pci_free_consistent(data->Pci, sizeof(nsp32_autoparam), data->autoparam, data->auto_paddr); } if (data->sg_list) { pci_free_consistent(data->Pci, NSP32_SG_TABLE_SIZE, data->sg_list, data->sg_paddr); } if (host->irq) { free_irq(host->irq, data); } if (host->io_port && host->n_io_port) { release_region(host->io_port, host->n_io_port); } if (data->MmioAddress) { iounmap(data->MmioAddress); } return 0; } static const char *nsp32_info(struct Scsi_Host *shpnt) { nsp32_hw_data *data = (nsp32_hw_data *)shpnt->hostdata; return data->info_str; } /**************************************************************************** * error handler */ static int nsp32_eh_abort(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; nsp32_msg(KERN_WARNING, "abort"); if (data->cur_lunt->SCpnt == NULL) { nsp32_dbg(NSP32_DEBUG_BUSRESET, "abort failed"); return FAILED; } if (data->cur_target->sync_flag & (SDTR_INITIATOR | SDTR_TARGET)) { /* reset SDTR negotiation */ data->cur_target->sync_flag = 0; nsp32_set_async(data, data->cur_target); } nsp32_write2(base, TRANSFER_CONTROL, 0); nsp32_write2(base, BM_CNT, 0); SCpnt->result = DID_ABORT << 16; nsp32_scsi_done(SCpnt); nsp32_dbg(NSP32_DEBUG_BUSRESET, "abort success"); return SUCCESS; } static int nsp32_eh_bus_reset(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; spin_lock_irq(SCpnt->device->host->host_lock); nsp32_msg(KERN_INFO, "Bus Reset"); nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt); nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); nsp32_do_bus_reset(data); nsp32_write2(base, IRQ_CONTROL, 0); spin_unlock_irq(SCpnt->device->host->host_lock); return SUCCESS; /* SCSI bus reset is succeeded at any time. */ } static void nsp32_do_bus_reset(nsp32_hw_data *data) { unsigned int base = data->BaseAddress; unsigned short intrdat; int i; nsp32_dbg(NSP32_DEBUG_BUSRESET, "in"); /* * stop all transfer * clear TRANSFERCONTROL_BM_START * clear counter */ nsp32_write2(base, TRANSFER_CONTROL, 0); nsp32_write4(base, BM_CNT, 0); nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK); /* * fall back to asynchronous transfer mode * initialize SDTR negotiation flag */ for (i = 0; i < ARRAY_SIZE(data->target); i++) { nsp32_target *target = &data->target[i]; target->sync_flag = 0; nsp32_set_async(data, target); } /* * reset SCSI bus */ nsp32_write1(base, SCSI_BUS_CONTROL, BUSCTL_RST); udelay(RESET_HOLD_TIME); nsp32_write1(base, SCSI_BUS_CONTROL, 0); for(i = 0; i < 5; i++) { intrdat = nsp32_read2(base, IRQ_STATUS); /* dummy read */ nsp32_dbg(NSP32_DEBUG_BUSRESET, "irq:1: 0x%x", intrdat); } data->CurrentSC = NULL; } static int nsp32_eh_host_reset(struct scsi_cmnd *SCpnt) { struct Scsi_Host *host = SCpnt->device->host; unsigned int base = SCpnt->device->host->io_port; nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata; nsp32_msg(KERN_INFO, "Host Reset"); nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt); spin_lock_irq(SCpnt->device->host->host_lock); nsp32hw_init(data); nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); nsp32_do_bus_reset(data); nsp32_write2(base, IRQ_CONTROL, 0); spin_unlock_irq(SCpnt->device->host->host_lock); return SUCCESS; /* Host reset is succeeded at any time. */ } /************************************************************************** * EEPROM handler */ /* * getting EEPROM parameter */ static int nsp32_getprom_param(nsp32_hw_data *data) { int vendor = data->pci_devid->vendor; int device = data->pci_devid->device; int ret, val, i; /* * EEPROM checking. */ ret = nsp32_prom_read(data, 0x7e); if (ret != 0x55) { nsp32_msg(KERN_INFO, "No EEPROM detected: 0x%x", ret); return FALSE; } ret = nsp32_prom_read(data, 0x7f); if (ret != 0xaa) { nsp32_msg(KERN_INFO, "Invalid number: 0x%x", ret); return FALSE; } /* * check EEPROM type */ if (vendor == PCI_VENDOR_ID_WORKBIT && device == PCI_DEVICE_ID_WORKBIT_STANDARD) { ret = nsp32_getprom_c16(data); } else if (vendor == PCI_VENDOR_ID_WORKBIT && device == PCI_DEVICE_ID_NINJASCSI_32BIB_LOGITEC) { ret = nsp32_getprom_at24(data); } else if (vendor == PCI_VENDOR_ID_WORKBIT && device == PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO ) { ret = nsp32_getprom_at24(data); } else { nsp32_msg(KERN_WARNING, "Unknown EEPROM"); ret = FALSE; } /* for debug : SPROM data full checking */ for (i = 0; i <= 0x1f; i++) { val = nsp32_prom_read(data, i); nsp32_dbg(NSP32_DEBUG_EEPROM, "rom address 0x%x : 0x%x", i, val); } return ret; } /* * AT24C01A (Logitec: LHA-600S), AT24C02 (Melco Buffalo: IFC-USLP) data map: * * ROMADDR * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6) * Value 0x0: ASYNC, 0x0c: Ultra-20M, 0x19: Fast-10M * 0x07 : HBA Synchronous Transfer Period * Value 0: AutoSync, 1: Manual Setting * 0x08 - 0x0f : Not Used? (0x0) * 0x10 : Bus Termination * Value 0: Auto[ON], 1: ON, 2: OFF * 0x11 : Not Used? (0) * 0x12 : Bus Reset Delay Time (0x03) * 0x13 : Bootable CD Support * Value 0: Disable, 1: Enable * 0x14 : Device Scan * Bit 7 6 5 4 3 2 1 0 * | <-----------------> * | SCSI ID: Value 0: Skip, 1: YES * |-> Value 0: ALL scan, Value 1: Manual * 0x15 - 0x1b : Not Used? (0) * 0x1c : Constant? (0x01) (clock div?) * 0x1d - 0x7c : Not Used (0xff) * 0x7d : Not Used? (0xff) * 0x7e : Constant (0x55), Validity signature * 0x7f : Constant (0xaa), Validity signature */ static int nsp32_getprom_at24(nsp32_hw_data *data) { int ret, i; int auto_sync; nsp32_target *target; int entry; /* * Reset time which is designated by EEPROM. * * TODO: Not used yet. */ data->resettime = nsp32_prom_read(data, 0x12); /* * HBA Synchronous Transfer Period * * Note: auto_sync = 0: auto, 1: manual. Ninja SCSI HBA spec says * that if auto_sync is 0 (auto), and connected SCSI devices are * same or lower than 3, then transfer speed is set as ULTRA-20M. * On the contrary if connected SCSI devices are same or higher * than 4, then transfer speed is set as FAST-10M. * * I break this rule. The number of connected SCSI devices are * only ignored. If auto_sync is 0 (auto), then transfer speed is * forced as ULTRA-20M. */ ret = nsp32_prom_read(data, 0x07); switch (ret) { case 0: auto_sync = TRUE; break; case 1: auto_sync = FALSE; break; default: nsp32_msg(KERN_WARNING, "Unsupported Auto Sync mode. Fall back to manual mode."); auto_sync = TRUE; } if (trans_mode == ULTRA20M_MODE) { auto_sync = TRUE; } /* * each device Synchronous Transfer Period */ for (i = 0; i < NSP32_HOST_SCSIID; i++) { target = &data->target[i]; if (auto_sync == TRUE) { target->limit_entry = 0; /* set as ULTRA20M */ } else { ret = nsp32_prom_read(data, i); entry = nsp32_search_period_entry(data, target, ret); if (entry < 0) { /* search failed... set maximum speed */ entry = 0; } target->limit_entry = entry; } } return TRUE; } /* * C16 110 (I-O Data: SC-NBD) data map: * * ROMADDR * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6) * Value 0x0: 20MB/S, 0x1: 10MB/S, 0x2: 5MB/S, 0x3: ASYNC * 0x07 : 0 (HBA Synchronous Transfer Period: Auto Sync) * 0x08 - 0x0f : Not Used? (0x0) * 0x10 : Transfer Mode * Value 0: PIO, 1: Busmater * 0x11 : Bus Reset Delay Time (0x00-0x20) * 0x12 : Bus Termination * Value 0: Disable, 1: Enable * 0x13 - 0x19 : Disconnection * Value 0: Disable, 1: Enable * 0x1a - 0x7c : Not Used? (0) * 0x7d : Not Used? (0xf8) * 0x7e : Constant (0x55), Validity signature * 0x7f : Constant (0xaa), Validity signature */ static int nsp32_getprom_c16(nsp32_hw_data *data) { int ret, i; nsp32_target *target; int entry, val; /* * Reset time which is designated by EEPROM. * * TODO: Not used yet. */ data->resettime = nsp32_prom_read(data, 0x11); /* * each device Synchronous Transfer Period */ for (i = 0; i < NSP32_HOST_SCSIID; i++) { target = &data->target[i]; ret = nsp32_prom_read(data, i); switch (ret) { case 0: /* 20MB/s */ val = 0x0c; break; case 1: /* 10MB/s */ val = 0x19; break; case 2: /* 5MB/s */ val = 0x32; break; case 3: /* ASYNC */ val = 0x00; break; default: /* default 20MB/s */ val = 0x0c; break; } entry = nsp32_search_period_entry(data, target, val); if (entry < 0 || trans_mode == ULTRA20M_MODE) { /* search failed... set maximum speed */ entry = 0; } target->limit_entry = entry; } return TRUE; } /* * Atmel AT24C01A (drived in 5V) serial EEPROM routines */ static int nsp32_prom_read(nsp32_hw_data *data, int romaddr) { int i, val; /* start condition */ nsp32_prom_start(data); /* device address */ nsp32_prom_write_bit(data, 1); /* 1 */ nsp32_prom_write_bit(data, 0); /* 0 */ nsp32_prom_write_bit(data, 1); /* 1 */ nsp32_prom_write_bit(data, 0); /* 0 */ nsp32_prom_write_bit(data, 0); /* A2: 0 (GND) */ nsp32_prom_write_bit(data, 0); /* A1: 0 (GND) */ nsp32_prom_write_bit(data, 0); /* A0: 0 (GND) */ /* R/W: W for dummy write */ nsp32_prom_write_bit(data, 0); /* ack */ nsp32_prom_write_bit(data, 0); /* word address */ for (i = 7; i >= 0; i--) { nsp32_prom_write_bit(data, ((romaddr >> i) & 1)); } /* ack */ nsp32_prom_write_bit(data, 0); /* start condition */ nsp32_prom_start(data); /* device address */ nsp32_prom_write_bit(data, 1); /* 1 */ nsp32_prom_write_bit(data, 0); /* 0 */ nsp32_prom_write_bit(data, 1); /* 1 */ nsp32_prom_write_bit(data, 0); /* 0 */ nsp32_prom_write_bit(data, 0); /* A2: 0 (GND) */ nsp32_prom_write_bit(data, 0); /* A1: 0 (GND) */ nsp32_prom_write_bit(data, 0); /* A0: 0 (GND) */ /* R/W: R */ nsp32_prom_write_bit(data, 1); /* ack */ nsp32_prom_write_bit(data, 0); /* data... */ val = 0; for (i = 7; i >= 0; i--) { val += (nsp32_prom_read_bit(data) << i); } /* no ack */ nsp32_prom_write_bit(data, 1); /* stop condition */ nsp32_prom_stop(data); return val; } static void nsp32_prom_set(nsp32_hw_data *data, int bit, int val) { int base = data->BaseAddress; int tmp; tmp = nsp32_index_read1(base, SERIAL_ROM_CTL); if (val == 0) { tmp &= ~bit; } else { tmp |= bit; } nsp32_index_write1(base, SERIAL_ROM_CTL, tmp); udelay(10); } static int nsp32_prom_get(nsp32_hw_data *data, int bit) { int base = data->BaseAddress; int tmp, ret; if (bit != SDA) { nsp32_msg(KERN_ERR, "return value is not appropriate"); return 0; } tmp = nsp32_index_read1(base, SERIAL_ROM_CTL) & bit; if (tmp == 0) { ret = 0; } else { ret = 1; } udelay(10); return ret; } static void nsp32_prom_start (nsp32_hw_data *data) { /* start condition */ nsp32_prom_set(data, SCL, 1); nsp32_prom_set(data, SDA, 1); nsp32_prom_set(data, ENA, 1); /* output mode */ nsp32_prom_set(data, SDA, 0); /* keeping SCL=1 and transiting * SDA 1->0 is start condition */ nsp32_prom_set(data, SCL, 0); } static void nsp32_prom_stop (nsp32_hw_data *data) { /* stop condition */ nsp32_prom_set(data, SCL, 1); nsp32_prom_set(data, SDA, 0); nsp32_prom_set(data, ENA, 1); /* output mode */ nsp32_prom_set(data, SDA, 1); nsp32_prom_set(data, SCL, 0); } static void nsp32_prom_write_bit(nsp32_hw_data *data, int val) { /* write */ nsp32_prom_set(data, SDA, val); nsp32_prom_set(data, SCL, 1 ); nsp32_prom_set(data, SCL, 0 ); } static int nsp32_prom_read_bit(nsp32_hw_data *data) { int val; /* read */ nsp32_prom_set(data, ENA, 0); /* input mode */ nsp32_prom_set(data, SCL, 1); val = nsp32_prom_get(data, SDA); nsp32_prom_set(data, SCL, 0); nsp32_prom_set(data, ENA, 1); /* output mode */ return val; } /************************************************************************** * Power Management */ #ifdef CONFIG_PM /* Device suspended */ static int nsp32_suspend(struct pci_dev *pdev, pm_message_t state) { struct Scsi_Host *host = pci_get_drvdata(pdev); nsp32_msg(KERN_INFO, "pci-suspend: pdev=0x%p, state=%ld, slot=%s, host=0x%p", pdev, state, pci_name(pdev), host); pci_save_state (pdev); pci_disable_device (pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } /* Device woken up */ static int nsp32_resume(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata; unsigned short reg; nsp32_msg(KERN_INFO, "pci-resume: pdev=0x%p, slot=%s, host=0x%p", pdev, pci_name(pdev), host); pci_set_power_state(pdev, PCI_D0); pci_enable_wake (pdev, PCI_D0, 0); pci_restore_state (pdev); reg = nsp32_read2(data->BaseAddress, INDEX_REG); nsp32_msg(KERN_INFO, "io=0x%x reg=0x%x", data->BaseAddress, reg); if (reg == 0xffff) { nsp32_msg(KERN_INFO, "missing device. abort resume."); return 0; } nsp32hw_init (data); nsp32_do_bus_reset(data); nsp32_msg(KERN_INFO, "resume success"); return 0; } #endif /************************************************************************ * PCI/Cardbus probe/remove routine */ static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int ret; nsp32_hw_data *data = &nsp32_data_base; nsp32_dbg(NSP32_DEBUG_REGISTER, "enter"); ret = pci_enable_device(pdev); if (ret) { nsp32_msg(KERN_ERR, "failed to enable pci device"); return ret; } data->Pci = pdev; data->pci_devid = id; data->IrqNumber = pdev->irq; data->BaseAddress = pci_resource_start(pdev, 0); data->NumAddress = pci_resource_len (pdev, 0); data->MmioAddress = pci_ioremap_bar(pdev, 1); data->MmioLength = pci_resource_len (pdev, 1); pci_set_master(pdev); ret = nsp32_detect(pdev); nsp32_msg(KERN_INFO, "irq: %i mmio: %p+0x%lx slot: %s model: %s", pdev->irq, data->MmioAddress, data->MmioLength, pci_name(pdev), nsp32_model[id->driver_data]); nsp32_dbg(NSP32_DEBUG_REGISTER, "exit %d", ret); return ret; } static void __devexit nsp32_remove(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); nsp32_dbg(NSP32_DEBUG_REGISTER, "enter"); scsi_remove_host(host); nsp32_release(host); scsi_host_put(host); } static struct pci_driver nsp32_driver = { .name = "nsp32", .id_table = nsp32_pci_table, .probe = nsp32_probe, .remove = __devexit_p(nsp32_remove), #ifdef CONFIG_PM .suspend = nsp32_suspend, .resume = nsp32_resume, #endif }; /********************************************************************* * Moule entry point */ static int __init init_nsp32(void) { nsp32_msg(KERN_INFO, "loading..."); return pci_register_driver(&nsp32_driver); } static void __exit exit_nsp32(void) { nsp32_msg(KERN_INFO, "unloading..."); pci_unregister_driver(&nsp32_driver); } module_init(init_nsp32); module_exit(exit_nsp32); /* end */
gpl-2.0
Ninpo/ninphetamine
drivers/staging/comedi/drivers/addi-data/hwdrv_apci2032.c
894
23554
/** @verbatim Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module. ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier Tel: +19(0)7223/9493-0 Fax: +49(0)7223/9493-92 http://www.addi-data-com info@addi-data.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA You should also find the complete GPL in the COPYING file accompanying this source code. @endverbatim */ /* +-----------------------------------------------------------------------+ | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier | +-----------------------------------------------------------------------+ | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com | | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com | +-------------------------------+---------------------------------------+ | Project : APCI-2032 | Compiler : GCC | | Module name : hwdrv_apci2032.c| Version : 2.96 | +-------------------------------+---------------------------------------+ | Project manager: Eric Stolz | Date : 02/12/2002 | +-------------------------------+---------------------------------------+ | Description : Hardware Layer Acces For APCI-2032 | +-----------------------------------------------------------------------+ | UPDATES | +----------+-----------+------------------------------------------------+ | Date | Author | Description of updates | +----------+-----------+------------------------------------------------+ | | | | | | | | | | | | +----------+-----------+------------------------------------------------+ */ /* +----------------------------------------------------------------------------+ | Included files | +----------------------------------------------------------------------------+ */ #include "hwdrv_apci2032.h" static unsigned int ui_InterruptData, ui_Type; /* +----------------------------------------------------------------------------+ | Function Name : int i_APCI2032_ConfigDigitalOutput | | (struct comedi_device *dev,struct comedi_subdevice *s, | | struct comedi_insn *insn,unsigned int *data) | +----------------------------------------------------------------------------+ | Task : Configures The Digital Output Subdevice. | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev : Driver handle | | unsigned int *data : Data Pointer contains | | configuration parameters as below | | | | data[1] : 1 Enable VCC Interrupt | | 0 Disable VCC Interrupt | | data[2] : 1 Enable CC Interrupt | | 0 Disable CC Interrupt | | | +----------------------------------------------------------------------------+ | Output Parameters : -- | +----------------------------------------------------------------------------+ | Return Value : TRUE : No error occur | | : FALSE : Error occur. Return the error | | | +----------------------------------------------------------------------------+ */ int i_APCI2032_ConfigDigitalOutput(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int ul_Command = 0; devpriv->tsk_Current = current; if ((data[0] != 0) && (data[0] != 1)) { comedi_error(dev, "Not a valid Data !!! ,Data should be 1 or 0\n"); return -EINVAL; } /* if ( (data[0]!=0) && (data[0]!=1) ) */ if (data[0]) { devpriv->b_OutputMemoryStatus = ADDIDATA_ENABLE; } /* if (data[0]) */ else { devpriv->b_OutputMemoryStatus = ADDIDATA_DISABLE; } /* else if (data[0]) */ if (data[1] == ADDIDATA_ENABLE) { ul_Command = ul_Command | 0x1; } /* if (data[1] == ADDIDATA_ENABLE) */ else { ul_Command = ul_Command & 0xFFFFFFFE; } /* elseif (data[1] == ADDIDATA_ENABLE) */ if (data[2] == ADDIDATA_ENABLE) { ul_Command = ul_Command | 0x2; } /* if (data[2] == ADDIDATA_ENABLE) */ else { ul_Command = ul_Command & 0xFFFFFFFD; } /* elseif (data[2] == ADDIDATA_ENABLE) */ outl(ul_Command, devpriv->iobase + APCI2032_DIGITAL_OP_INTERRUPT); ui_InterruptData = inl(devpriv->iobase + APCI2032_DIGITAL_OP_INTERRUPT); return insn->n; } /* +----------------------------------------------------------------------------+ | Function Name : int i_APCI2032_WriteDigitalOutput | | (struct comedi_device *dev,struct comedi_subdevice *s, | | struct comedi_insn *insn,unsigned int *data) | +----------------------------------------------------------------------------+ | Task : Writes port value To the selected port | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev : Driver handle | | unsigned int ui_NoOfChannels : No Of Channels To Write | | unsigned int *data : Data Pointer to read status | +----------------------------------------------------------------------------+ | Output Parameters : -- | +----------------------------------------------------------------------------+ | Return Value : TRUE : No error occur | | : FALSE : Error occur. Return the error | | | +----------------------------------------------------------------------------+ */ int i_APCI2032_WriteDigitalOutput(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int ui_Temp, ui_Temp1; unsigned int ui_NoOfChannel = CR_CHAN(insn->chanspec); /* get the channel */ if (devpriv->b_OutputMemoryStatus) { ui_Temp = inl(devpriv->iobase + APCI2032_DIGITAL_OP); } /* if(devpriv->b_OutputMemoryStatus ) */ else { ui_Temp = 0; } /* if(devpriv->b_OutputMemoryStatus ) */ if (data[3] == 0) { if (data[1] == 0) { data[0] = (data[0] << ui_NoOfChannel) | ui_Temp; outl(data[0], devpriv->iobase + APCI2032_DIGITAL_OP); } /* if(data[1]==0) */ else { if (data[1] == 1) { switch (ui_NoOfChannel) { case 2: data[0] = (data[0] << (2 * data[2])) | ui_Temp; break; case 4: data[0] = (data[0] << (4 * data[2])) | ui_Temp; break; case 8: data[0] = (data[0] << (8 * data[2])) | ui_Temp; break; case 16: data[0] = (data[0] << (16 * data[2])) | ui_Temp; break; case 31: data[0] = data[0] | ui_Temp; break; default: comedi_error(dev, " chan spec wrong"); return -EINVAL; /* "sorry channel spec wrong " */ } /* switch(ui_NoOfChannels) */ outl(data[0], devpriv->iobase + APCI2032_DIGITAL_OP); } /* if(data[1]==1) */ else { printk("\nSpecified channel not supported\n"); } /* else if(data[1]==1) */ } /* elseif(data[1]==0) */ } /* if(data[3]==0) */ else { if (data[3] == 1) { if (data[1] == 0) { data[0] = ~data[0] & 0x1; ui_Temp1 = 1; ui_Temp1 = ui_Temp1 << ui_NoOfChannel; ui_Temp = ui_Temp | ui_Temp1; data[0] = (data[0] << ui_NoOfChannel) ^ 0xffffffff; data[0] = data[0] & ui_Temp; outl(data[0], devpriv->iobase + APCI2032_DIGITAL_OP); } /* if(data[1]==0) */ else { if (data[1] == 1) { switch (ui_NoOfChannel) { case 2: data[0] = ~data[0] & 0x3; ui_Temp1 = 3; ui_Temp1 = ui_Temp1 << 2 * data[2]; ui_Temp = ui_Temp | ui_Temp1; data[0] = ((data[0] << (2 * data [2])) ^ 0xffffffff) & ui_Temp; break; case 4: data[0] = ~data[0] & 0xf; ui_Temp1 = 15; ui_Temp1 = ui_Temp1 << 4 * data[2]; ui_Temp = ui_Temp | ui_Temp1; data[0] = ((data[0] << (4 * data [2])) ^ 0xffffffff) & ui_Temp; break; case 8: data[0] = ~data[0] & 0xff; ui_Temp1 = 255; ui_Temp1 = ui_Temp1 << 8 * data[2]; ui_Temp = ui_Temp | ui_Temp1; data[0] = ((data[0] << (8 * data [2])) ^ 0xffffffff) & ui_Temp; break; case 16: data[0] = ~data[0] & 0xffff; ui_Temp1 = 65535; ui_Temp1 = ui_Temp1 << 16 * data[2]; ui_Temp = ui_Temp | ui_Temp1; data[0] = ((data[0] << (16 * data [2])) ^ 0xffffffff) & ui_Temp; break; case 31: break; default: comedi_error(dev, " chan spec wrong"); return -EINVAL; /* "sorry channel spec wrong " */ } /* switch(ui_NoOfChannels) */ outl(data[0], devpriv->iobase + APCI2032_DIGITAL_OP); } /* if(data[1]==1) */ else { printk("\nSpecified channel not supported\n"); } /* else if(data[1]==1) */ } /* elseif(data[1]==0) */ } /* if(data[3]==1); */ else { printk("\nSpecified functionality does not exist\n"); return -EINVAL; } /* if else data[3]==1) */ } /* if else data[3]==0) */ return insn->n; } /* +----------------------------------------------------------------------------+ | Function Name : int i_APCI2032_ReadDigitalOutput | | (struct comedi_device *dev,struct comedi_subdevice *s, | | struct comedi_insn *insn,unsigned int *data) | +----------------------------------------------------------------------------+ | Task : Read value of the selected channel or port | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev : Driver handle | | unsigned int ui_NoOfChannels : No Of Channels To read | | unsigned int *data : Data Pointer to read status | +----------------------------------------------------------------------------+ | Output Parameters : -- | +----------------------------------------------------------------------------+ | Return Value : TRUE : No error occur | | : FALSE : Error occur. Return the error | | | +----------------------------------------------------------------------------+ */ int i_APCI2032_ReadDigitalOutput(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int ui_Temp; unsigned int ui_NoOfChannel; ui_NoOfChannel = CR_CHAN(insn->chanspec); ui_Temp = data[0]; *data = inl(devpriv->iobase + APCI2032_DIGITAL_OP_RW); if (ui_Temp == 0) { *data = (*data >> ui_NoOfChannel) & 0x1; } /* if (ui_Temp==0) */ else { if (ui_Temp == 1) { switch (ui_NoOfChannel) { case 2: *data = (*data >> (2 * data[1])) & 3; break; case 4: *data = (*data >> (4 * data[1])) & 15; break; case 8: *data = (*data >> (8 * data[1])) & 255; break; case 16: *data = (*data >> (16 * data[1])) & 65535; break; case 31: break; default: comedi_error(dev, " chan spec wrong"); return -EINVAL; /* "sorry channel spec wrong " */ } /* switch(ui_NoOfChannels) */ } /* if (ui_Temp==1) */ else { printk("\nSpecified channel not supported \n"); } /* elseif (ui_Temp==1) */ } return insn->n; } /* +----------------------------------------------------------------------------+ | Function Name : int i_APCI2032_ConfigWatchdog(comedi_device | *dev,struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data)| | | +----------------------------------------------------------------------------+ | Task : Configures The Watchdog | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev : Driver handle | | struct comedi_subdevice *s, :pointer to subdevice structure | struct comedi_insn *insn :pointer to insn structure | | unsigned int *data : Data Pointer to read status | +----------------------------------------------------------------------------+ | Output Parameters : -- | +----------------------------------------------------------------------------+ | Return Value : TRUE : No error occur | | : FALSE : Error occur. Return the error | | | +----------------------------------------------------------------------------+ */ int i_APCI2032_ConfigWatchdog(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (data[0] == 0) { /* Disable the watchdog */ outl(0x0, devpriv->iobase + APCI2032_DIGITAL_OP_WATCHDOG + APCI2032_TCW_PROG); /* Loading the Reload value */ outl(data[1], devpriv->iobase + APCI2032_DIGITAL_OP_WATCHDOG + APCI2032_TCW_RELOAD_VALUE); } else { printk("\nThe input parameters are wrong\n"); return -EINVAL; } return insn->n; } /* +----------------------------------------------------------------------------+ | Function Name : int i_APCI2032_StartStopWriteWatchdog | | (struct comedi_device *dev,struct comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data); | +----------------------------------------------------------------------------+ | Task : Start / Stop The Watchdog | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev : Driver handle | | struct comedi_subdevice *s, :pointer to subdevice structure struct comedi_insn *insn :pointer to insn structure | | unsigned int *data : Data Pointer to read status | +----------------------------------------------------------------------------+ | Output Parameters : -- | +----------------------------------------------------------------------------+ | Return Value : TRUE : No error occur | | : FALSE : Error occur. Return the error | | | +----------------------------------------------------------------------------+ */ int i_APCI2032_StartStopWriteWatchdog(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { switch (data[0]) { case 0: /* stop the watchdog */ outl(0x0, devpriv->iobase + APCI2032_DIGITAL_OP_WATCHDOG + APCI2032_TCW_PROG); /* disable the watchdog */ break; case 1: /* start the watchdog */ outl(0x0001, devpriv->iobase + APCI2032_DIGITAL_OP_WATCHDOG + APCI2032_TCW_PROG); break; case 2: /* Software trigger */ outl(0x0201, devpriv->iobase + APCI2032_DIGITAL_OP_WATCHDOG + APCI2032_TCW_PROG); break; default: printk("\nSpecified functionality does not exist\n"); return -EINVAL; } return insn->n; } /* +----------------------------------------------------------------------------+ | Function Name : int i_APCI2032_ReadWatchdog | | (struct comedi_device *dev,struct comedi_subdevice *s,struct comedi_insn *insn, | unsigned int *data); | +----------------------------------------------------------------------------+ | Task : Read The Watchdog | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev : Driver handle | | struct comedi_subdevice *s, :pointer to subdevice structure | struct comedi_insn *insn :pointer to insn structure | | unsigned int *data : Data Pointer to read status | +----------------------------------------------------------------------------+ | Output Parameters : -- | +----------------------------------------------------------------------------+ | Return Value : TRUE : No error occur | | : FALSE : Error occur. Return the error | | | +----------------------------------------------------------------------------+ */ int i_APCI2032_ReadWatchdog(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = inl(devpriv->iobase + APCI2032_DIGITAL_OP_WATCHDOG + APCI2032_TCW_TRIG_STATUS) & 0x1; return insn->n; } /* +----------------------------------------------------------------------------+ | Function Name : void v_APCI2032_Interrupt | | (int irq , void *d) | +----------------------------------------------------------------------------+ | Task : Writes port value To the selected port | +----------------------------------------------------------------------------+ | Input Parameters : int irq : irq number | | void *d : void pointer | +----------------------------------------------------------------------------+ | Output Parameters : -- | +----------------------------------------------------------------------------+ | Return Value : TRUE : No error occur | | : FALSE : Error occur. Return the error | | | +----------------------------------------------------------------------------+ */ void v_APCI2032_Interrupt(int irq, void *d) { struct comedi_device *dev = d; unsigned int ui_DO; ui_DO = inl(devpriv->iobase + APCI2032_DIGITAL_OP_IRQ) & 0x1; /* Check if VCC OR CC interrupt has occured. */ if (ui_DO == 0) { printk("\nInterrupt from unKnown source\n"); } /* if(ui_DO==0) */ if (ui_DO) { /* Check for Digital Output interrupt Type - 1: Vcc interrupt 2: CC interrupt. */ ui_Type = inl(devpriv->iobase + APCI2032_DIGITAL_OP_INTERRUPT_STATUS) & 0x3; outl(0x0, devpriv->iobase + APCI2032_DIGITAL_OP + APCI2032_DIGITAL_OP_INTERRUPT); if (ui_Type == 1) { /* Sends signal to user space */ send_sig(SIGIO, devpriv->tsk_Current, 0); } /* if (ui_Type==1) */ else { if (ui_Type == 2) { /* Sends signal to user space */ send_sig(SIGIO, devpriv->tsk_Current, 0); } /* if (ui_Type==2) */ } /* else if (ui_Type==1) */ } /* if(ui_DO) */ return; } /* +----------------------------------------------------------------------------+ | Function Name : int i_APCI2032_ReadInterruptStatus | | (struct comedi_device *dev,struct comedi_subdevice *s, | | struct comedi_insn *insn,unsigned int *data) | +----------------------------------------------------------------------------+ | Task :Reads the interrupt status register | +----------------------------------------------------------------------------+ | Input Parameters : | +----------------------------------------------------------------------------+ | Output Parameters : -- | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ int i_APCI2032_ReadInterruptStatus(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { *data = ui_Type; return insn->n; } /* +----------------------------------------------------------------------------+ | Function Name : int i_APCI2032_Reset(struct comedi_device *dev) | | | +----------------------------------------------------------------------------+ | Task :Resets the registers of the card | +----------------------------------------------------------------------------+ | Input Parameters : | +----------------------------------------------------------------------------+ | Output Parameters : -- | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ int i_APCI2032_Reset(struct comedi_device *dev) { devpriv->b_DigitalOutputRegister = 0; ui_Type = 0; outl(0x0, devpriv->iobase + APCI2032_DIGITAL_OP); /* Resets the output channels */ outl(0x0, devpriv->iobase + APCI2032_DIGITAL_OP_INTERRUPT); /* Disables the interrupt. */ outl(0x0, devpriv->iobase + APCI2032_DIGITAL_OP_WATCHDOG + APCI2032_TCW_PROG); /* disable the watchdog */ outl(0x0, devpriv->iobase + APCI2032_DIGITAL_OP_WATCHDOG + APCI2032_TCW_RELOAD_VALUE); /* reload=0 */ return 0; }
gpl-2.0
giveen/kernel_dell_streak7
drivers/oprofile/oprofile_files.c
1406
4546
/** * @file oprofile_files.c * * @remark Copyright 2002 OProfile authors * @remark Read the file COPYING * * @author John Levon <levon@movementarian.org> */ #include <linux/fs.h> #include <linux/oprofile.h> #include <linux/jiffies.h> #include "event_buffer.h" #include "oprofile_stats.h" #include "oprof.h" #define BUFFER_SIZE_DEFAULT 131072 #define CPU_BUFFER_SIZE_DEFAULT 8192 #define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */ #define TIME_SLICE_DEFAULT 1 unsigned long oprofile_buffer_size; unsigned long oprofile_cpu_buffer_size; unsigned long oprofile_buffer_watershed; unsigned long oprofile_time_slice; #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX static ssize_t timeout_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice), buf, count, offset); } static ssize_t timeout_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) { unsigned long val; int retval; if (*offset) return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); if (retval) return retval; retval = oprofile_set_timeout(val); if (retval) return retval; return count; } static const struct file_operations timeout_fops = { .read = timeout_read, .write = timeout_write, }; #endif static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { return oprofilefs_ulong_to_user(oprofile_backtrace_depth, buf, count, offset); } static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) { unsigned long val; int retval; if (*offset) return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); if (retval) return retval; retval = oprofile_set_backtrace(val); if (retval) return retval; return count; } static const struct file_operations depth_fops = { .read = depth_read, .write = depth_write }; static ssize_t pointer_size_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { return oprofilefs_ulong_to_user(sizeof(void *), buf, count, offset); } static const struct file_operations pointer_size_fops = { .read = pointer_size_read, }; static ssize_t cpu_type_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { return oprofilefs_str_to_user(oprofile_ops.cpu_type, buf, count, offset); } static const struct file_operations cpu_type_fops = { .read = cpu_type_read, }; static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset); } static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) { unsigned long val; int retval; if (*offset) return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); if (retval) return retval; if (val) retval = oprofile_start(); else oprofile_stop(); if (retval) return retval; return count; } static const struct file_operations enable_fops = { .read = enable_read, .write = enable_write, }; static ssize_t dump_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) { wake_up_buffer_waiter(); return count; } static const struct file_operations dump_fops = { .write = dump_write, }; void oprofile_create_files(struct super_block *sb, struct dentry *root) { /* reinitialize default values */ oprofile_buffer_size = BUFFER_SIZE_DEFAULT; oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT; oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT; oprofile_time_slice = msecs_to_jiffies(TIME_SLICE_DEFAULT); oprofilefs_create_file(sb, root, "enable", &enable_fops); oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops); oprofilefs_create_ulong(sb, root, "buffer_size", &oprofile_buffer_size); oprofilefs_create_ulong(sb, root, "buffer_watershed", &oprofile_buffer_watershed); oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &oprofile_cpu_buffer_size); oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX oprofilefs_create_file(sb, root, "time_slice", &timeout_fops); #endif oprofile_create_stats_files(sb, root); if (oprofile_ops.create_files) oprofile_ops.create_files(sb, root); }
gpl-2.0
Martix/shr-fcse
arch/arm/mach-sa1100/pm.c
2430
2663
/* * SA1100 Power Management Routines * * Copyright (c) 2001 Cliff Brake <cbrake@accelent.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License. * * History: * * 2001-02-06: Cliff Brake Initial code * * 2001-02-25: Sukjae Cho <sjcho@east.isi.edu> & * Chester Kuo <chester@linux.org.tw> * Save more value for the resume function! Support * Bitsy/Assabet/Freebird board * * 2001-08-29: Nicolas Pitre <nico@fluxnic.net> * Cleaned up, pushed platform dependent stuff * in the platform specific files. * * 2002-05-27: Nicolas Pitre Killed sleep.h and the kmalloced save array. * Storage is local on the stack now. */ #include <linux/init.h> #include <linux/suspend.h> #include <linux/errno.h> #include <linux/time.h> #include <mach/hardware.h> #include <asm/memory.h> #include <asm/system.h> #include <asm/mach/time.h> extern void sa1100_cpu_suspend(long); #define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x #define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x] /* * List of global SA11x0 peripheral registers to preserve. * More ones like CP and general purpose register values are preserved * on the stack and then the stack pointer is stored last in sleep.S. */ enum { SLEEP_SAVE_GPDR, SLEEP_SAVE_GAFR, SLEEP_SAVE_PPDR, SLEEP_SAVE_PPSR, SLEEP_SAVE_PPAR, SLEEP_SAVE_PSDR, SLEEP_SAVE_Ser1SDCR0, SLEEP_SAVE_COUNT }; static int sa11x0_pm_enter(suspend_state_t state) { unsigned long gpio, sleep_save[SLEEP_SAVE_COUNT]; gpio = GPLR; /* save vital registers */ SAVE(GPDR); SAVE(GAFR); SAVE(PPDR); SAVE(PPSR); SAVE(PPAR); SAVE(PSDR); SAVE(Ser1SDCR0); /* Clear previous reset status */ RCSR = RCSR_HWR | RCSR_SWR | RCSR_WDR | RCSR_SMR; /* set resume return address */ PSPR = virt_to_phys(cpu_resume); /* go zzz */ sa1100_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET); cpu_init(); /* * Ensure not to come back here if it wasn't intended */ PSPR = 0; /* * Ensure interrupt sources are disabled; we will re-init * the interrupt subsystem via the device manager. */ ICLR = 0; ICCR = 1; ICMR = 0; /* restore registers */ RESTORE(GPDR); RESTORE(GAFR); RESTORE(PPDR); RESTORE(PPSR); RESTORE(PPAR); RESTORE(PSDR); RESTORE(Ser1SDCR0); GPSR = gpio; GPCR = ~gpio; /* * Clear the peripheral sleep-hold bit. */ PSSR = PSSR_PH; return 0; } static const struct platform_suspend_ops sa11x0_pm_ops = { .enter = sa11x0_pm_enter, .valid = suspend_valid_only_mem, }; static int __init sa11x0_pm_init(void) { suspend_set_ops(&sa11x0_pm_ops); return 0; } late_initcall(sa11x0_pm_init);
gpl-2.0
yank555-lu/SGS3-JB
drivers/net/phy/icplus.c
2686
4311
/* * Driver for ICPlus PHYs * * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/phy.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> MODULE_DESCRIPTION("ICPlus IP175C/IC1001 PHY drivers"); MODULE_AUTHOR("Michael Barkowski"); MODULE_LICENSE("GPL"); static int ip175c_config_init(struct phy_device *phydev) { int err, i; static int full_reset_performed = 0; if (full_reset_performed == 0) { /* master reset */ err = phydev->bus->write(phydev->bus, 30, 0, 0x175c); if (err < 0) return err; /* ensure no bus delays overlap reset period */ err = phydev->bus->read(phydev->bus, 30, 0); /* data sheet specifies reset period is 2 msec */ mdelay(2); /* enable IP175C mode */ err = phydev->bus->write(phydev->bus, 29, 31, 0x175c); if (err < 0) return err; /* Set MII0 speed and duplex (in PHY mode) */ err = phydev->bus->write(phydev->bus, 29, 22, 0x420); if (err < 0) return err; /* reset switch ports */ for (i = 0; i < 5; i++) { err = phydev->bus->write(phydev->bus, i, MII_BMCR, BMCR_RESET); if (err < 0) return err; } for (i = 0; i < 5; i++) err = phydev->bus->read(phydev->bus, i, MII_BMCR); mdelay(2); full_reset_performed = 1; } if (phydev->addr != 4) { phydev->state = PHY_RUNNING; phydev->speed = SPEED_100; phydev->duplex = DUPLEX_FULL; phydev->link = 1; netif_carrier_on(phydev->attached_dev); } return 0; } static int ip1001_config_init(struct phy_device *phydev) { int err, value; /* Software Reset PHY */ value = phy_read(phydev, MII_BMCR); value |= BMCR_RESET; err = phy_write(phydev, MII_BMCR, value); if (err < 0) return err; do { value = phy_read(phydev, MII_BMCR); } while (value & BMCR_RESET); /* Additional delay (2ns) used to adjust RX clock phase * at GMII/ RGMII interface */ value = phy_read(phydev, 16); value |= 0x3; err = phy_write(phydev, 16, value); if (err < 0) return err; return err; } static int ip175c_read_status(struct phy_device *phydev) { if (phydev->addr == 4) /* WAN port */ genphy_read_status(phydev); else /* Don't need to read status for switch ports */ phydev->irq = PHY_IGNORE_INTERRUPT; return 0; } static int ip175c_config_aneg(struct phy_device *phydev) { if (phydev->addr == 4) /* WAN port */ genphy_config_aneg(phydev); return 0; } static struct phy_driver ip175c_driver = { .phy_id = 0x02430d80, .name = "ICPlus IP175C", .phy_id_mask = 0x0ffffff0, .features = PHY_BASIC_FEATURES, .config_init = &ip175c_config_init, .config_aneg = &ip175c_config_aneg, .read_status = &ip175c_read_status, .suspend = genphy_suspend, .resume = genphy_resume, .driver = { .owner = THIS_MODULE,}, }; static struct phy_driver ip1001_driver = { .phy_id = 0x02430d90, .name = "ICPlus IP1001", .phy_id_mask = 0x0ffffff0, .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, .config_init = &ip1001_config_init, .config_aneg = &genphy_config_aneg, .read_status = &genphy_read_status, .suspend = genphy_suspend, .resume = genphy_resume, .driver = { .owner = THIS_MODULE,}, }; static int __init icplus_init(void) { int ret = 0; ret = phy_driver_register(&ip1001_driver); if (ret < 0) return -ENODEV; return phy_driver_register(&ip175c_driver); } static void __exit icplus_exit(void) { phy_driver_unregister(&ip1001_driver); phy_driver_unregister(&ip175c_driver); } module_init(icplus_init); module_exit(icplus_exit); static struct mdio_device_id __maybe_unused icplus_tbl[] = { { 0x02430d80, 0x0ffffff0 }, { 0x02430d90, 0x0ffffff0 }, { } }; MODULE_DEVICE_TABLE(mdio, icplus_tbl);
gpl-2.0
erorcun/android_kernel_oneplus_msm8974-3.10
drivers/media/usb/dvb-usb/dib0700_core.c
2686
21683
/* Linux driver for devices based on the DiBcom DiB0700 USB bridge * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation, version 2. * * Copyright (C) 2005-6 DiBcom, SA */ #include "dib0700.h" /* debug */ int dvb_usb_dib0700_debug; module_param_named(debug,dvb_usb_dib0700_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info,2=fw,4=fwdata,8=data (or-able))." DVB_USB_DEBUG_STATUS); static int nb_packet_buffer_size = 21; module_param(nb_packet_buffer_size, int, 0644); MODULE_PARM_DESC(nb_packet_buffer_size, "Set the dib0700 driver data buffer size. This parameter " "corresponds to the number of TS packets. The actual size of " "the data buffer corresponds to this parameter " "multiplied by 188 (default: 21)"); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion, u32 *romversion, u32 *ramversion, u32 *fwtype) { struct dib0700_state *st = d->priv; int ret; if (mutex_lock_interruptible(&d->usb_mutex) < 0) { err("could not acquire lock"); return -EINTR; } ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), REQUEST_GET_VERSION, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, st->buf, 16, USB_CTRL_GET_TIMEOUT); if (hwversion != NULL) *hwversion = (st->buf[0] << 24) | (st->buf[1] << 16) | (st->buf[2] << 8) | st->buf[3]; if (romversion != NULL) *romversion = (st->buf[4] << 24) | (st->buf[5] << 16) | (st->buf[6] << 8) | st->buf[7]; if (ramversion != NULL) *ramversion = (st->buf[8] << 24) | (st->buf[9] << 16) | (st->buf[10] << 8) | st->buf[11]; if (fwtype != NULL) *fwtype = (st->buf[12] << 24) | (st->buf[13] << 16) | (st->buf[14] << 8) | st->buf[15]; mutex_unlock(&d->usb_mutex); return ret; } /* expecting rx buffer: request data[0] data[1] ... data[2] */ static int dib0700_ctrl_wr(struct dvb_usb_device *d, u8 *tx, u8 txlen) { int status; deb_data(">>> "); debug_dump(tx, txlen, deb_data); status = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev,0), tx[0], USB_TYPE_VENDOR | USB_DIR_OUT, 0, 0, tx, txlen, USB_CTRL_GET_TIMEOUT); if (status != txlen) deb_data("ep 0 write error (status = %d, len: %d)\n",status,txlen); return status < 0 ? status : 0; } /* expecting tx buffer: request data[0] ... data[n] (n <= 4) */ int dib0700_ctrl_rd(struct dvb_usb_device *d, u8 *tx, u8 txlen, u8 *rx, u8 rxlen) { u16 index, value; int status; if (txlen < 2) { err("tx buffer length is smaller than 2. Makes no sense."); return -EINVAL; } if (txlen > 4) { err("tx buffer length is larger than 4. Not supported."); return -EINVAL; } deb_data(">>> "); debug_dump(tx,txlen,deb_data); value = ((txlen - 2) << 8) | tx[1]; index = 0; if (txlen > 2) index |= (tx[2] << 8); if (txlen > 3) index |= tx[3]; status = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev,0), tx[0], USB_TYPE_VENDOR | USB_DIR_IN, value, index, rx, rxlen, USB_CTRL_GET_TIMEOUT); if (status < 0) deb_info("ep 0 read error (status = %d)\n",status); deb_data("<<< "); debug_dump(rx, rxlen, deb_data); return status; /* length in case of success */ } int dib0700_set_gpio(struct dvb_usb_device *d, enum dib07x0_gpios gpio, u8 gpio_dir, u8 gpio_val) { struct dib0700_state *st = d->priv; int ret; if (mutex_lock_interruptible(&d->usb_mutex) < 0) { err("could not acquire lock"); return -EINTR; } st->buf[0] = REQUEST_SET_GPIO; st->buf[1] = gpio; st->buf[2] = ((gpio_dir & 0x01) << 7) | ((gpio_val & 0x01) << 6); ret = dib0700_ctrl_wr(d, st->buf, 3); mutex_unlock(&d->usb_mutex); return ret; } static int dib0700_set_usb_xfer_len(struct dvb_usb_device *d, u16 nb_ts_packets) { struct dib0700_state *st = d->priv; int ret; if (st->fw_version >= 0x10201) { if (mutex_lock_interruptible(&d->usb_mutex) < 0) { err("could not acquire lock"); return -EINTR; } st->buf[0] = REQUEST_SET_USB_XFER_LEN; st->buf[1] = (nb_ts_packets >> 8) & 0xff; st->buf[2] = nb_ts_packets & 0xff; deb_info("set the USB xfer len to %i Ts packet\n", nb_ts_packets); ret = dib0700_ctrl_wr(d, st->buf, 3); mutex_unlock(&d->usb_mutex); } else { deb_info("this firmware does not allow to change the USB xfer len\n"); ret = -EIO; } return ret; } /* * I2C master xfer function (supported in 1.20 firmware) */ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg, int num) { /* The new i2c firmware messages are more reliable and in particular properly support i2c read calls not preceded by a write */ struct dvb_usb_device *d = i2c_get_adapdata(adap); struct dib0700_state *st = d->priv; uint8_t bus_mode = 1; /* 0=eeprom bus, 1=frontend bus */ uint8_t gen_mode = 0; /* 0=master i2c, 1=gpio i2c */ uint8_t en_start = 0; uint8_t en_stop = 0; int result, i; /* Ensure nobody else hits the i2c bus while we're sending our sequence of messages, (such as the remote control thread) */ if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EINTR; for (i = 0; i < num; i++) { if (i == 0) { /* First message in the transaction */ en_start = 1; } else if (!(msg[i].flags & I2C_M_NOSTART)) { /* Device supports repeated-start */ en_start = 1; } else { /* Not the first packet and device doesn't support repeated start */ en_start = 0; } if (i == (num - 1)) { /* Last message in the transaction */ en_stop = 1; } if (msg[i].flags & I2C_M_RD) { /* Read request */ u16 index, value; uint8_t i2c_dest; i2c_dest = (msg[i].addr << 1); value = ((en_start << 7) | (en_stop << 6) | (msg[i].len & 0x3F)) << 8 | i2c_dest; /* I2C ctrl + FE bus; */ index = ((gen_mode << 6) & 0xC0) | ((bus_mode << 4) & 0x30); result = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), REQUEST_NEW_I2C_READ, USB_TYPE_VENDOR | USB_DIR_IN, value, index, msg[i].buf, msg[i].len, USB_CTRL_GET_TIMEOUT); if (result < 0) { deb_info("i2c read error (status = %d)\n", result); break; } deb_data("<<< "); debug_dump(msg[i].buf, msg[i].len, deb_data); } else { /* Write request */ if (mutex_lock_interruptible(&d->usb_mutex) < 0) { err("could not acquire lock"); mutex_unlock(&d->i2c_mutex); return -EINTR; } st->buf[0] = REQUEST_NEW_I2C_WRITE; st->buf[1] = msg[i].addr << 1; st->buf[2] = (en_start << 7) | (en_stop << 6) | (msg[i].len & 0x3F); /* I2C ctrl + FE bus; */ st->buf[3] = ((gen_mode << 6) & 0xC0) | ((bus_mode << 4) & 0x30); /* The Actual i2c payload */ memcpy(&st->buf[4], msg[i].buf, msg[i].len); deb_data(">>> "); debug_dump(st->buf, msg[i].len + 4, deb_data); result = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0), REQUEST_NEW_I2C_WRITE, USB_TYPE_VENDOR | USB_DIR_OUT, 0, 0, st->buf, msg[i].len + 4, USB_CTRL_GET_TIMEOUT); mutex_unlock(&d->usb_mutex); if (result < 0) { deb_info("i2c write error (status = %d)\n", result); break; } } } mutex_unlock(&d->i2c_mutex); return i; } /* * I2C master xfer function (pre-1.20 firmware) */ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap, struct i2c_msg *msg, int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct dib0700_state *st = d->priv; int i,len; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EINTR; if (mutex_lock_interruptible(&d->usb_mutex) < 0) { err("could not acquire lock"); mutex_unlock(&d->i2c_mutex); return -EINTR; } for (i = 0; i < num; i++) { /* fill in the address */ st->buf[1] = msg[i].addr << 1; /* fill the buffer */ memcpy(&st->buf[2], msg[i].buf, msg[i].len); /* write/read request */ if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) { st->buf[0] = REQUEST_I2C_READ; st->buf[1] |= 1; /* special thing in the current firmware: when length is zero the read-failed */ len = dib0700_ctrl_rd(d, st->buf, msg[i].len + 2, msg[i+1].buf, msg[i+1].len); if (len <= 0) { deb_info("I2C read failed on address 0x%02x\n", msg[i].addr); break; } msg[i+1].len = len; i++; } else { st->buf[0] = REQUEST_I2C_WRITE; if (dib0700_ctrl_wr(d, st->buf, msg[i].len + 2) < 0) break; } } mutex_unlock(&d->usb_mutex); mutex_unlock(&d->i2c_mutex); return i; } static int dib0700_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct dib0700_state *st = d->priv; if (st->fw_use_new_i2c_api == 1) { /* User running at least fw 1.20 */ return dib0700_i2c_xfer_new(adap, msg, num); } else { /* Use legacy calls */ return dib0700_i2c_xfer_legacy(adap, msg, num); } } static u32 dib0700_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } struct i2c_algorithm dib0700_i2c_algo = { .master_xfer = dib0700_i2c_xfer, .functionality = dib0700_i2c_func, }; int dib0700_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props, struct dvb_usb_device_description **desc, int *cold) { s16 ret; u8 *b; b = kmalloc(16, GFP_KERNEL); if (!b) return -ENOMEM; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), REQUEST_GET_VERSION, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, b, 16, USB_CTRL_GET_TIMEOUT); deb_info("FW GET_VERSION length: %d\n",ret); *cold = ret <= 0; deb_info("cold: %d\n", *cold); kfree(b); return 0; } static int dib0700_set_clock(struct dvb_usb_device *d, u8 en_pll, u8 pll_src, u8 pll_range, u8 clock_gpio3, u16 pll_prediv, u16 pll_loopdiv, u16 free_div, u16 dsuScaler) { struct dib0700_state *st = d->priv; int ret; if (mutex_lock_interruptible(&d->usb_mutex) < 0) { err("could not acquire lock"); return -EINTR; } st->buf[0] = REQUEST_SET_CLOCK; st->buf[1] = (en_pll << 7) | (pll_src << 6) | (pll_range << 5) | (clock_gpio3 << 4); st->buf[2] = (pll_prediv >> 8) & 0xff; /* MSB */ st->buf[3] = pll_prediv & 0xff; /* LSB */ st->buf[4] = (pll_loopdiv >> 8) & 0xff; /* MSB */ st->buf[5] = pll_loopdiv & 0xff; /* LSB */ st->buf[6] = (free_div >> 8) & 0xff; /* MSB */ st->buf[7] = free_div & 0xff; /* LSB */ st->buf[8] = (dsuScaler >> 8) & 0xff; /* MSB */ st->buf[9] = dsuScaler & 0xff; /* LSB */ ret = dib0700_ctrl_wr(d, st->buf, 10); mutex_unlock(&d->usb_mutex); return ret; } int dib0700_set_i2c_speed(struct dvb_usb_device *d, u16 scl_kHz) { struct dib0700_state *st = d->priv; u16 divider; int ret; if (scl_kHz == 0) return -EINVAL; if (mutex_lock_interruptible(&d->usb_mutex) < 0) { err("could not acquire lock"); return -EINTR; } st->buf[0] = REQUEST_SET_I2C_PARAM; divider = (u16) (30000 / scl_kHz); st->buf[1] = 0; st->buf[2] = (u8) (divider >> 8); st->buf[3] = (u8) (divider & 0xff); divider = (u16) (72000 / scl_kHz); st->buf[4] = (u8) (divider >> 8); st->buf[5] = (u8) (divider & 0xff); divider = (u16) (72000 / scl_kHz); /* clock: 72MHz */ st->buf[6] = (u8) (divider >> 8); st->buf[7] = (u8) (divider & 0xff); deb_info("setting I2C speed: %04x %04x %04x (%d kHz).", (st->buf[2] << 8) | (st->buf[3]), (st->buf[4] << 8) | st->buf[5], (st->buf[6] << 8) | st->buf[7], scl_kHz); ret = dib0700_ctrl_wr(d, st->buf, 8); mutex_unlock(&d->usb_mutex); return ret; } int dib0700_ctrl_clock(struct dvb_usb_device *d, u32 clk_MHz, u8 clock_out_gp3) { switch (clk_MHz) { case 72: dib0700_set_clock(d, 1, 0, 1, clock_out_gp3, 2, 24, 0, 0x4c); break; default: return -EINVAL; } return 0; } static int dib0700_jumpram(struct usb_device *udev, u32 address) { int ret = 0, actlen; u8 *buf; buf = kmalloc(8, GFP_KERNEL); if (!buf) return -ENOMEM; buf[0] = REQUEST_JUMPRAM; buf[1] = 0; buf[2] = 0; buf[3] = 0; buf[4] = (address >> 24) & 0xff; buf[5] = (address >> 16) & 0xff; buf[6] = (address >> 8) & 0xff; buf[7] = address & 0xff; if ((ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 0x01),buf,8,&actlen,1000)) < 0) { deb_fw("jumpram to 0x%x failed\n",address); goto out; } if (actlen != 8) { deb_fw("jumpram to 0x%x failed\n",address); ret = -EIO; goto out; } out: kfree(buf); return ret; } int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw) { struct hexline hx; int pos = 0, ret, act_len, i, adap_num; u8 *buf; u32 fw_version; buf = kmalloc(260, GFP_KERNEL); if (!buf) return -ENOMEM; while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) { deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n", hx.addr, hx.len, hx.chk); buf[0] = hx.len; buf[1] = (hx.addr >> 8) & 0xff; buf[2] = hx.addr & 0xff; buf[3] = hx.type; memcpy(&buf[4],hx.data,hx.len); buf[4+hx.len] = hx.chk; ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 0x01), buf, hx.len + 5, &act_len, 1000); if (ret < 0) { err("firmware download failed at %d with %d",pos,ret); goto out; } } if (ret == 0) { /* start the firmware */ if ((ret = dib0700_jumpram(udev, 0x70000000)) == 0) { info("firmware started successfully."); msleep(500); } } else ret = -EIO; /* the number of ts packet has to be at least 1 */ if (nb_packet_buffer_size < 1) nb_packet_buffer_size = 1; /* get the fimware version */ usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), REQUEST_GET_VERSION, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, buf, 16, USB_CTRL_GET_TIMEOUT); fw_version = (buf[8] << 24) | (buf[9] << 16) | (buf[10] << 8) | buf[11]; /* set the buffer size - DVB-USB is allocating URB buffers * only after the firwmare download was successful */ for (i = 0; i < dib0700_device_count; i++) { for (adap_num = 0; adap_num < dib0700_devices[i].num_adapters; adap_num++) { if (fw_version >= 0x10201) { dib0700_devices[i].adapter[adap_num].fe[0].stream.u.bulk.buffersize = 188*nb_packet_buffer_size; } else { /* for fw version older than 1.20.1, * the buffersize has to be n times 512 */ dib0700_devices[i].adapter[adap_num].fe[0].stream.u.bulk.buffersize = ((188*nb_packet_buffer_size+188/2)/512)*512; if (dib0700_devices[i].adapter[adap_num].fe[0].stream.u.bulk.buffersize < 512) dib0700_devices[i].adapter[adap_num].fe[0].stream.u.bulk.buffersize = 512; } } } out: kfree(buf); return ret; } int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { struct dib0700_state *st = adap->dev->priv; int ret; if ((onoff != 0) && (st->fw_version >= 0x10201)) { /* for firmware later than 1.20.1, * the USB xfer length can be set */ ret = dib0700_set_usb_xfer_len(adap->dev, st->nb_packet_buffer_size); if (ret < 0) { deb_info("can not set the USB xfer len\n"); return ret; } } mutex_lock(&adap->dev->usb_mutex); st->buf[0] = REQUEST_ENABLE_VIDEO; /* this bit gives a kind of command, * rather than enabling something or not */ st->buf[1] = (onoff << 4) | 0x00; if (st->disable_streaming_master_mode == 1) st->buf[2] = 0x00; else st->buf[2] = 0x01 << 4; /* Master mode */ st->buf[3] = 0x00; deb_info("modifying (%d) streaming state for %d\n", onoff, adap->id); st->channel_state &= ~0x3; if ((adap->fe_adap[0].stream.props.endpoint != 2) && (adap->fe_adap[0].stream.props.endpoint != 3)) { deb_info("the endpoint number (%i) is not correct, use the adapter id instead", adap->fe_adap[0].stream.props.endpoint); if (onoff) st->channel_state |= 1 << (adap->id); else st->channel_state |= 1 << ~(adap->id); } else { if (onoff) st->channel_state |= 1 << (adap->fe_adap[0].stream.props.endpoint-2); else st->channel_state |= 1 << (3-adap->fe_adap[0].stream.props.endpoint); } st->buf[2] |= st->channel_state; deb_info("data for streaming: %x %x\n", st->buf[1], st->buf[2]); ret = dib0700_ctrl_wr(adap->dev, st->buf, 4); mutex_unlock(&adap->dev->usb_mutex); return ret; } int dib0700_change_protocol(struct rc_dev *rc, u64 *rc_type) { struct dvb_usb_device *d = rc->priv; struct dib0700_state *st = d->priv; int new_proto, ret; if (mutex_lock_interruptible(&d->usb_mutex) < 0) { err("could not acquire lock"); return -EINTR; } st->buf[0] = REQUEST_SET_RC; st->buf[1] = 0; st->buf[2] = 0; /* Set the IR mode */ if (*rc_type & RC_BIT_RC5) { new_proto = 1; *rc_type = RC_BIT_RC5; } else if (*rc_type & RC_BIT_NEC) { new_proto = 0; *rc_type = RC_BIT_NEC; } else if (*rc_type & RC_BIT_RC6_MCE) { if (st->fw_version < 0x10200) { ret = -EINVAL; goto out; } new_proto = 2; *rc_type = RC_BIT_RC6_MCE; } else { ret = -EINVAL; goto out; } st->buf[1] = new_proto; ret = dib0700_ctrl_wr(d, st->buf, 3); if (ret < 0) { err("ir protocol setup failed"); goto out; } d->props.rc.core.protocol = *rc_type; out: mutex_unlock(&d->usb_mutex); return ret; } /* Number of keypresses to ignore before start repeating */ #define RC_REPEAT_DELAY_V1_20 10 /* This is the structure of the RC response packet starting in firmware 1.20 */ struct dib0700_rc_response { u8 report_id; u8 data_state; union { u16 system16; struct { u8 not_system; u8 system; }; }; u8 data; u8 not_data; }; #define RC_MSG_SIZE_V1_20 6 static void dib0700_rc_urb_completion(struct urb *purb) { struct dvb_usb_device *d = purb->context; struct dib0700_rc_response *poll_reply; u32 uninitialized_var(keycode); u8 toggle; deb_info("%s()\n", __func__); if (d->rc_dev == NULL) { /* This will occur if disable_rc_polling=1 */ kfree(purb->transfer_buffer); usb_free_urb(purb); return; } poll_reply = purb->transfer_buffer; if (purb->status < 0) { deb_info("discontinuing polling\n"); kfree(purb->transfer_buffer); usb_free_urb(purb); return; } if (purb->actual_length != RC_MSG_SIZE_V1_20) { deb_info("malformed rc msg size=%d\n", purb->actual_length); goto resubmit; } deb_data("IR ID = %02X state = %02X System = %02X %02X Cmd = %02X %02X (len %d)\n", poll_reply->report_id, poll_reply->data_state, poll_reply->system, poll_reply->not_system, poll_reply->data, poll_reply->not_data, purb->actual_length); switch (d->props.rc.core.protocol) { case RC_BIT_NEC: toggle = 0; /* NEC protocol sends repeat code as 0 0 0 FF */ if ((poll_reply->system == 0x00) && (poll_reply->data == 0x00) && (poll_reply->not_data == 0xff)) { poll_reply->data_state = 2; break; } if ((poll_reply->system ^ poll_reply->not_system) != 0xff) { deb_data("NEC extended protocol\n"); /* NEC extended code - 24 bits */ keycode = be16_to_cpu(poll_reply->system16) << 8 | poll_reply->data; } else { deb_data("NEC normal protocol\n"); /* normal NEC code - 16 bits */ keycode = poll_reply->system << 8 | poll_reply->data; } break; default: deb_data("RC5 protocol\n"); /* RC5 Protocol */ toggle = poll_reply->report_id; keycode = poll_reply->system << 8 | poll_reply->data; break; } if ((poll_reply->data + poll_reply->not_data) != 0xff) { /* Key failed integrity check */ err("key failed integrity check: %04x %02x %02x", poll_reply->system, poll_reply->data, poll_reply->not_data); goto resubmit; } rc_keydown(d->rc_dev, keycode, toggle); resubmit: /* Clean the buffer before we requeue */ memset(purb->transfer_buffer, 0, RC_MSG_SIZE_V1_20); /* Requeue URB */ usb_submit_urb(purb, GFP_ATOMIC); } int dib0700_rc_setup(struct dvb_usb_device *d) { struct dib0700_state *st = d->priv; struct urb *purb; int ret; /* Poll-based. Don't initialize bulk mode */ if (st->fw_version < 0x10200) return 0; /* Starting in firmware 1.20, the RC info is provided on a bulk pipe */ purb = usb_alloc_urb(0, GFP_KERNEL); if (purb == NULL) { err("rc usb alloc urb failed"); return -ENOMEM; } purb->transfer_buffer = kzalloc(RC_MSG_SIZE_V1_20, GFP_KERNEL); if (purb->transfer_buffer == NULL) { err("rc kzalloc failed"); usb_free_urb(purb); return -ENOMEM; } purb->status = -EINPROGRESS; usb_fill_bulk_urb(purb, d->udev, usb_rcvbulkpipe(d->udev, 1), purb->transfer_buffer, RC_MSG_SIZE_V1_20, dib0700_rc_urb_completion, d); ret = usb_submit_urb(purb, GFP_ATOMIC); if (ret) { err("rc submit urb failed"); kfree(purb->transfer_buffer); usb_free_urb(purb); } return ret; } static int dib0700_probe(struct usb_interface *intf, const struct usb_device_id *id) { int i; struct dvb_usb_device *dev; for (i = 0; i < dib0700_device_count; i++) if (dvb_usb_device_init(intf, &dib0700_devices[i], THIS_MODULE, &dev, adapter_nr) == 0) { struct dib0700_state *st = dev->priv; u32 hwversion, romversion, fw_version, fwtype; dib0700_get_version(dev, &hwversion, &romversion, &fw_version, &fwtype); deb_info("Firmware version: %x, %d, 0x%x, %d\n", hwversion, romversion, fw_version, fwtype); st->fw_version = fw_version; st->nb_packet_buffer_size = (u32)nb_packet_buffer_size; /* Disable polling mode on newer firmwares */ if (st->fw_version >= 0x10200) dev->props.rc.core.bulk_mode = true; else dev->props.rc.core.bulk_mode = false; dib0700_rc_setup(dev); return 0; } return -ENODEV; } static struct usb_driver dib0700_driver = { .name = "dvb_usb_dib0700", .probe = dib0700_probe, .disconnect = dvb_usb_device_exit, .id_table = dib0700_usb_id_table, }; module_usb_driver(dib0700_driver); MODULE_FIRMWARE("dvb-usb-dib0700-1.20.fw"); MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
gpl-2.0
ChangYeoun/10.1
drivers/platform/x86/dell-laptop.c
4478
19736
/* * Driver for Dell laptop extras * * Copyright (c) Red Hat <mjg@redhat.com> * * Based on documentation in the libsmbios package, Copyright (C) 2005 Dell * Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/backlight.h> #include <linux/err.h> #include <linux/dmi.h> #include <linux/io.h> #include <linux/rfkill.h> #include <linux/power_supply.h> #include <linux/acpi.h> #include <linux/mm.h> #include <linux/i8042.h> #include <linux/slab.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include "../../firmware/dcdbas.h" #define BRIGHTNESS_TOKEN 0x7d /* This structure will be modified by the firmware when we enter * system management mode, hence the volatiles */ struct calling_interface_buffer { u16 class; u16 select; volatile u32 input[4]; volatile u32 output[4]; } __packed; struct calling_interface_token { u16 tokenID; u16 location; union { u16 value; u16 stringlength; }; }; struct calling_interface_structure { struct dmi_header header; u16 cmdIOAddress; u8 cmdIOCode; u32 supportedCmds; struct calling_interface_token tokens[]; } __packed; struct quirk_entry { u8 touchpad_led; }; static struct quirk_entry *quirks; static struct quirk_entry quirk_dell_vostro_v130 = { .touchpad_led = 1, }; static int dmi_matched(const struct dmi_system_id *dmi) { quirks = dmi->driver_data; return 1; } static int da_command_address; static int da_command_code; static int da_num_tokens; static struct calling_interface_token *da_tokens; static struct platform_driver platform_driver = { .driver = { .name = "dell-laptop", .owner = THIS_MODULE, } }; static struct platform_device *platform_device; static struct backlight_device *dell_backlight_device; static struct rfkill *wifi_rfkill; static struct rfkill *bluetooth_rfkill; static struct rfkill *wwan_rfkill; static const struct dmi_system_id __initdata dell_device_table[] = { { .ident = "Dell laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_CHASSIS_TYPE, "8"), }, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /*Laptop*/ }, }, { .ident = "Dell Computer Corporation", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_CHASSIS_TYPE, "8"), }, }, { } }; MODULE_DEVICE_TABLE(dmi, dell_device_table); static struct dmi_system_id __devinitdata dell_blacklist[] = { /* Supported by compal-laptop */ { .ident = "Dell Mini 9", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 910"), }, }, { .ident = "Dell Mini 10", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1010"), }, }, { .ident = "Dell Mini 10v", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1011"), }, }, { .ident = "Dell Mini 1012", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), }, }, { .ident = "Dell Inspiron 11z", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1110"), }, }, { .ident = "Dell Mini 12", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1210"), }, }, {} }; static struct dmi_system_id __devinitdata dell_quirks[] = { { .callback = dmi_matched, .ident = "Dell Vostro V130", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V130"), }, .driver_data = &quirk_dell_vostro_v130, }, { .callback = dmi_matched, .ident = "Dell Vostro V131", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"), }, .driver_data = &quirk_dell_vostro_v130, }, { .callback = dmi_matched, .ident = "Dell Vostro 3555", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"), }, .driver_data = &quirk_dell_vostro_v130, }, { .callback = dmi_matched, .ident = "Dell Inspiron N311z", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"), }, .driver_data = &quirk_dell_vostro_v130, }, { .callback = dmi_matched, .ident = "Dell Inspiron M5110", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"), }, .driver_data = &quirk_dell_vostro_v130, }, { } }; static struct calling_interface_buffer *buffer; static struct page *bufferpage; static DEFINE_MUTEX(buffer_mutex); static int hwswitch_state; static void get_buffer(void) { mutex_lock(&buffer_mutex); memset(buffer, 0, sizeof(struct calling_interface_buffer)); } static void release_buffer(void) { mutex_unlock(&buffer_mutex); } static void __init parse_da_table(const struct dmi_header *dm) { /* Final token is a terminator, so we don't want to copy it */ int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1; struct calling_interface_structure *table = container_of(dm, struct calling_interface_structure, header); /* 4 bytes of table header, plus 7 bytes of Dell header, plus at least 6 bytes of entry */ if (dm->length < 17) return; da_command_address = table->cmdIOAddress; da_command_code = table->cmdIOCode; da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) * sizeof(struct calling_interface_token), GFP_KERNEL); if (!da_tokens) return; memcpy(da_tokens+da_num_tokens, table->tokens, sizeof(struct calling_interface_token) * tokens); da_num_tokens += tokens; } static void __init find_tokens(const struct dmi_header *dm, void *dummy) { switch (dm->type) { case 0xd4: /* Indexed IO */ case 0xd5: /* Protected Area Type 1 */ case 0xd6: /* Protected Area Type 2 */ break; case 0xda: /* Calling interface */ parse_da_table(dm); break; } } static int find_token_location(int tokenid) { int i; for (i = 0; i < da_num_tokens; i++) { if (da_tokens[i].tokenID == tokenid) return da_tokens[i].location; } return -1; } static struct calling_interface_buffer * dell_send_request(struct calling_interface_buffer *buffer, int class, int select) { struct smi_cmd command; command.magic = SMI_CMD_MAGIC; command.command_address = da_command_address; command.command_code = da_command_code; command.ebx = virt_to_phys(buffer); command.ecx = 0x42534931; buffer->class = class; buffer->select = select; dcdbas_smi_request(&command); return buffer; } /* Derived from information in DellWirelessCtl.cpp: Class 17, select 11 is radio control. It returns an array of 32-bit values. Input byte 0 = 0: Wireless information result[0]: return code result[1]: Bit 0: Hardware switch supported Bit 1: Wifi locator supported Bit 2: Wifi is supported Bit 3: Bluetooth is supported Bit 4: WWAN is supported Bit 5: Wireless keyboard supported Bits 6-7: Reserved Bit 8: Wifi is installed Bit 9: Bluetooth is installed Bit 10: WWAN is installed Bits 11-15: Reserved Bit 16: Hardware switch is on Bit 17: Wifi is blocked Bit 18: Bluetooth is blocked Bit 19: WWAN is blocked Bits 20-31: Reserved result[2]: NVRAM size in bytes result[3]: NVRAM format version number Input byte 0 = 2: Wireless switch configuration result[0]: return code result[1]: Bit 0: Wifi controlled by switch Bit 1: Bluetooth controlled by switch Bit 2: WWAN controlled by switch Bits 3-6: Reserved Bit 7: Wireless switch config locked Bit 8: Wifi locator enabled Bits 9-14: Reserved Bit 15: Wifi locator setting locked Bits 16-31: Reserved */ static int dell_rfkill_set(void *data, bool blocked) { int disable = blocked ? 1 : 0; unsigned long radio = (unsigned long)data; int hwswitch_bit = (unsigned long)data - 1; int ret = 0; get_buffer(); dell_send_request(buffer, 17, 11); /* If the hardware switch controls this radio, and the hardware switch is disabled, don't allow changing the software state */ if ((hwswitch_state & BIT(hwswitch_bit)) && !(buffer->output[1] & BIT(16))) { ret = -EINVAL; goto out; } buffer->input[0] = (1 | (radio<<8) | (disable << 16)); dell_send_request(buffer, 17, 11); out: release_buffer(); return ret; } static void dell_rfkill_query(struct rfkill *rfkill, void *data) { int status; int bit = (unsigned long)data + 16; int hwswitch_bit = (unsigned long)data - 1; get_buffer(); dell_send_request(buffer, 17, 11); status = buffer->output[1]; release_buffer(); rfkill_set_sw_state(rfkill, !!(status & BIT(bit))); if (hwswitch_state & (BIT(hwswitch_bit))) rfkill_set_hw_state(rfkill, !(status & BIT(16))); } static const struct rfkill_ops dell_rfkill_ops = { .set_block = dell_rfkill_set, .query = dell_rfkill_query, }; static struct dentry *dell_laptop_dir; static int dell_debugfs_show(struct seq_file *s, void *data) { int status; get_buffer(); dell_send_request(buffer, 17, 11); status = buffer->output[1]; release_buffer(); seq_printf(s, "status:\t0x%X\n", status); seq_printf(s, "Bit 0 : Hardware switch supported: %lu\n", status & BIT(0)); seq_printf(s, "Bit 1 : Wifi locator supported: %lu\n", (status & BIT(1)) >> 1); seq_printf(s, "Bit 2 : Wifi is supported: %lu\n", (status & BIT(2)) >> 2); seq_printf(s, "Bit 3 : Bluetooth is supported: %lu\n", (status & BIT(3)) >> 3); seq_printf(s, "Bit 4 : WWAN is supported: %lu\n", (status & BIT(4)) >> 4); seq_printf(s, "Bit 5 : Wireless keyboard supported: %lu\n", (status & BIT(5)) >> 5); seq_printf(s, "Bit 8 : Wifi is installed: %lu\n", (status & BIT(8)) >> 8); seq_printf(s, "Bit 9 : Bluetooth is installed: %lu\n", (status & BIT(9)) >> 9); seq_printf(s, "Bit 10: WWAN is installed: %lu\n", (status & BIT(10)) >> 10); seq_printf(s, "Bit 16: Hardware switch is on: %lu\n", (status & BIT(16)) >> 16); seq_printf(s, "Bit 17: Wifi is blocked: %lu\n", (status & BIT(17)) >> 17); seq_printf(s, "Bit 18: Bluetooth is blocked: %lu\n", (status & BIT(18)) >> 18); seq_printf(s, "Bit 19: WWAN is blocked: %lu\n", (status & BIT(19)) >> 19); seq_printf(s, "\nhwswitch_state:\t0x%X\n", hwswitch_state); seq_printf(s, "Bit 0 : Wifi controlled by switch: %lu\n", hwswitch_state & BIT(0)); seq_printf(s, "Bit 1 : Bluetooth controlled by switch: %lu\n", (hwswitch_state & BIT(1)) >> 1); seq_printf(s, "Bit 2 : WWAN controlled by switch: %lu\n", (hwswitch_state & BIT(2)) >> 2); seq_printf(s, "Bit 7 : Wireless switch config locked: %lu\n", (hwswitch_state & BIT(7)) >> 7); seq_printf(s, "Bit 8 : Wifi locator enabled: %lu\n", (hwswitch_state & BIT(8)) >> 8); seq_printf(s, "Bit 15: Wifi locator setting locked: %lu\n", (hwswitch_state & BIT(15)) >> 15); return 0; } static int dell_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, dell_debugfs_show, inode->i_private); } static const struct file_operations dell_debugfs_fops = { .owner = THIS_MODULE, .open = dell_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void dell_update_rfkill(struct work_struct *ignored) { if (wifi_rfkill) dell_rfkill_query(wifi_rfkill, (void *)1); if (bluetooth_rfkill) dell_rfkill_query(bluetooth_rfkill, (void *)2); if (wwan_rfkill) dell_rfkill_query(wwan_rfkill, (void *)3); } static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill); static int __init dell_setup_rfkill(void) { int status; int ret; if (dmi_check_system(dell_blacklist)) { pr_info("Blacklisted hardware detected - not enabling rfkill\n"); return 0; } get_buffer(); dell_send_request(buffer, 17, 11); status = buffer->output[1]; buffer->input[0] = 0x2; dell_send_request(buffer, 17, 11); hwswitch_state = buffer->output[1]; release_buffer(); if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) { wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev, RFKILL_TYPE_WLAN, &dell_rfkill_ops, (void *) 1); if (!wifi_rfkill) { ret = -ENOMEM; goto err_wifi; } ret = rfkill_register(wifi_rfkill); if (ret) goto err_wifi; } if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) { bluetooth_rfkill = rfkill_alloc("dell-bluetooth", &platform_device->dev, RFKILL_TYPE_BLUETOOTH, &dell_rfkill_ops, (void *) 2); if (!bluetooth_rfkill) { ret = -ENOMEM; goto err_bluetooth; } ret = rfkill_register(bluetooth_rfkill); if (ret) goto err_bluetooth; } if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) { wwan_rfkill = rfkill_alloc("dell-wwan", &platform_device->dev, RFKILL_TYPE_WWAN, &dell_rfkill_ops, (void *) 3); if (!wwan_rfkill) { ret = -ENOMEM; goto err_wwan; } ret = rfkill_register(wwan_rfkill); if (ret) goto err_wwan; } return 0; err_wwan: rfkill_destroy(wwan_rfkill); if (bluetooth_rfkill) rfkill_unregister(bluetooth_rfkill); err_bluetooth: rfkill_destroy(bluetooth_rfkill); if (wifi_rfkill) rfkill_unregister(wifi_rfkill); err_wifi: rfkill_destroy(wifi_rfkill); return ret; } static void dell_cleanup_rfkill(void) { if (wifi_rfkill) { rfkill_unregister(wifi_rfkill); rfkill_destroy(wifi_rfkill); } if (bluetooth_rfkill) { rfkill_unregister(bluetooth_rfkill); rfkill_destroy(bluetooth_rfkill); } if (wwan_rfkill) { rfkill_unregister(wwan_rfkill); rfkill_destroy(wwan_rfkill); } } static int dell_send_intensity(struct backlight_device *bd) { int ret = 0; get_buffer(); buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); buffer->input[1] = bd->props.brightness; if (buffer->input[0] == -1) { ret = -ENODEV; goto out; } if (power_supply_is_system_supplied() > 0) dell_send_request(buffer, 1, 2); else dell_send_request(buffer, 1, 1); out: release_buffer(); return 0; } static int dell_get_intensity(struct backlight_device *bd) { int ret = 0; get_buffer(); buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); if (buffer->input[0] == -1) { ret = -ENODEV; goto out; } if (power_supply_is_system_supplied() > 0) dell_send_request(buffer, 0, 2); else dell_send_request(buffer, 0, 1); ret = buffer->output[1]; out: release_buffer(); return ret; } static const struct backlight_ops dell_ops = { .get_brightness = dell_get_intensity, .update_status = dell_send_intensity, }; static void touchpad_led_on(void) { int command = 0x97; char data = 1; i8042_command(&data, command | 1 << 12); } static void touchpad_led_off(void) { int command = 0x97; char data = 2; i8042_command(&data, command | 1 << 12); } static void touchpad_led_set(struct led_classdev *led_cdev, enum led_brightness value) { if (value > 0) touchpad_led_on(); else touchpad_led_off(); } static struct led_classdev touchpad_led = { .name = "dell-laptop::touchpad", .brightness_set = touchpad_led_set, .flags = LED_CORE_SUSPENDRESUME, }; static int __devinit touchpad_led_init(struct device *dev) { return led_classdev_register(dev, &touchpad_led); } static void touchpad_led_exit(void) { led_classdev_unregister(&touchpad_led); } static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str, struct serio *port) { static bool extended; if (str & 0x20) return false; if (unlikely(data == 0xe0)) { extended = true; return false; } else if (unlikely(extended)) { switch (data) { case 0x8: schedule_delayed_work(&dell_rfkill_work, round_jiffies_relative(HZ)); break; } extended = false; } return false; } static int __init dell_init(void) { int max_intensity = 0; int ret; if (!dmi_check_system(dell_device_table)) return -ENODEV; quirks = NULL; /* find if this machine support other functions */ dmi_check_system(dell_quirks); dmi_walk(find_tokens, NULL); if (!da_tokens) { pr_info("Unable to find dmi tokens\n"); return -ENODEV; } ret = platform_driver_register(&platform_driver); if (ret) goto fail_platform_driver; platform_device = platform_device_alloc("dell-laptop", -1); if (!platform_device) { ret = -ENOMEM; goto fail_platform_device1; } ret = platform_device_add(platform_device); if (ret) goto fail_platform_device2; /* * Allocate buffer below 4GB for SMI data--only 32-bit physical addr * is passed to SMI handler. */ bufferpage = alloc_page(GFP_KERNEL | GFP_DMA32); if (!bufferpage) goto fail_buffer; buffer = page_address(bufferpage); ret = dell_setup_rfkill(); if (ret) { pr_warn("Unable to setup rfkill\n"); goto fail_rfkill; } ret = i8042_install_filter(dell_laptop_i8042_filter); if (ret) { pr_warn("Unable to install key filter\n"); goto fail_filter; } if (quirks && quirks->touchpad_led) touchpad_led_init(&platform_device->dev); dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL); if (dell_laptop_dir != NULL) debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL, &dell_debugfs_fops); #ifdef CONFIG_ACPI /* In the event of an ACPI backlight being available, don't * register the platform controller. */ if (acpi_video_backlight_support()) return 0; #endif get_buffer(); buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); if (buffer->input[0] != -1) { dell_send_request(buffer, 0, 2); max_intensity = buffer->output[3]; } release_buffer(); if (max_intensity) { struct backlight_properties props; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = max_intensity; dell_backlight_device = backlight_device_register("dell_backlight", &platform_device->dev, NULL, &dell_ops, &props); if (IS_ERR(dell_backlight_device)) { ret = PTR_ERR(dell_backlight_device); dell_backlight_device = NULL; goto fail_backlight; } dell_backlight_device->props.brightness = dell_get_intensity(dell_backlight_device); backlight_update_status(dell_backlight_device); } return 0; fail_backlight: i8042_remove_filter(dell_laptop_i8042_filter); cancel_delayed_work_sync(&dell_rfkill_work); fail_filter: dell_cleanup_rfkill(); fail_rfkill: free_page((unsigned long)bufferpage); fail_buffer: platform_device_del(platform_device); fail_platform_device2: platform_device_put(platform_device); fail_platform_device1: platform_driver_unregister(&platform_driver); fail_platform_driver: kfree(da_tokens); return ret; } static void __exit dell_exit(void) { debugfs_remove_recursive(dell_laptop_dir); if (quirks && quirks->touchpad_led) touchpad_led_exit(); i8042_remove_filter(dell_laptop_i8042_filter); cancel_delayed_work_sync(&dell_rfkill_work); backlight_device_unregister(dell_backlight_device); dell_cleanup_rfkill(); if (platform_device) { platform_device_unregister(platform_device); platform_driver_unregister(&platform_driver); } kfree(da_tokens); free_page((unsigned long)buffer); } module_init(dell_init); module_exit(dell_exit); MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); MODULE_DESCRIPTION("Dell laptop driver"); MODULE_LICENSE("GPL");
gpl-2.0
juston-li/hammerhead
arch/arm/mach-ep93xx/vision_ep9307.c
4734
10316
/* * arch/arm/mach-ep93xx/vision_ep9307.c * Vision Engraving Systems EP9307 SoM support. * * Copyright (C) 2008-2011 Vision Engraving Systems * H Hartley Sweeten <hsweeten@visionengravers.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/fb.h> #include <linux/io.h> #include <linux/mtd/partitions.h> #include <linux/i2c.h> #include <linux/i2c-gpio.h> #include <linux/i2c/pca953x.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/spi/mmc_spi.h> #include <linux/mmc/host.h> #include <mach/hardware.h> #include <mach/fb.h> #include <mach/ep93xx_spi.h> #include <mach/gpio-ep93xx.h> #include <asm/hardware/vic.h> #include <asm/mach-types.h> #include <asm/mach/map.h> #include <asm/mach/arch.h> #include "soc.h" /************************************************************************* * Static I/O mappings for the FPGA *************************************************************************/ #define VISION_PHYS_BASE EP93XX_CS7_PHYS_BASE #define VISION_VIRT_BASE 0xfebff000 static struct map_desc vision_io_desc[] __initdata = { { .virtual = VISION_VIRT_BASE, .pfn = __phys_to_pfn(VISION_PHYS_BASE), .length = SZ_4K, .type = MT_DEVICE, }, }; static void __init vision_map_io(void) { ep93xx_map_io(); iotable_init(vision_io_desc, ARRAY_SIZE(vision_io_desc)); } /************************************************************************* * Ethernet *************************************************************************/ static struct ep93xx_eth_data vision_eth_data __initdata = { .phy_id = 1, }; /************************************************************************* * Framebuffer *************************************************************************/ #define VISION_LCD_ENABLE EP93XX_GPIO_LINE_EGPIO1 static int vision_lcd_setup(struct platform_device *pdev) { int err; err = gpio_request_one(VISION_LCD_ENABLE, GPIOF_INIT_HIGH, dev_name(&pdev->dev)); if (err) return err; ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_RAS | EP93XX_SYSCON_DEVCFG_RASONP3 | EP93XX_SYSCON_DEVCFG_EXVC); return 0; } static void vision_lcd_teardown(struct platform_device *pdev) { gpio_free(VISION_LCD_ENABLE); } static void vision_lcd_blank(int blank_mode, struct fb_info *info) { if (blank_mode) gpio_set_value(VISION_LCD_ENABLE, 0); else gpio_set_value(VISION_LCD_ENABLE, 1); } static struct ep93xxfb_mach_info ep93xxfb_info __initdata = { .num_modes = EP93XXFB_USE_MODEDB, .bpp = 16, .flags = EP93XXFB_USE_SDCSN0 | EP93XXFB_PCLK_FALLING, .setup = vision_lcd_setup, .teardown = vision_lcd_teardown, .blank = vision_lcd_blank, }; /************************************************************************* * GPIO Expanders *************************************************************************/ #define PCA9539_74_GPIO_BASE (EP93XX_GPIO_LINE_MAX + 1) #define PCA9539_75_GPIO_BASE (PCA9539_74_GPIO_BASE + 16) #define PCA9539_76_GPIO_BASE (PCA9539_75_GPIO_BASE + 16) #define PCA9539_77_GPIO_BASE (PCA9539_76_GPIO_BASE + 16) static struct pca953x_platform_data pca953x_74_gpio_data = { .gpio_base = PCA9539_74_GPIO_BASE, .irq_base = EP93XX_BOARD_IRQ(0), }; static struct pca953x_platform_data pca953x_75_gpio_data = { .gpio_base = PCA9539_75_GPIO_BASE, .irq_base = -1, }; static struct pca953x_platform_data pca953x_76_gpio_data = { .gpio_base = PCA9539_76_GPIO_BASE, .irq_base = -1, }; static struct pca953x_platform_data pca953x_77_gpio_data = { .gpio_base = PCA9539_77_GPIO_BASE, .irq_base = -1, }; /************************************************************************* * I2C Bus *************************************************************************/ static struct i2c_gpio_platform_data vision_i2c_gpio_data __initdata = { .sda_pin = EP93XX_GPIO_LINE_EEDAT, .scl_pin = EP93XX_GPIO_LINE_EECLK, }; static struct i2c_board_info vision_i2c_info[] __initdata = { { I2C_BOARD_INFO("isl1208", 0x6f), .irq = IRQ_EP93XX_EXT1, }, { I2C_BOARD_INFO("pca9539", 0x74), .platform_data = &pca953x_74_gpio_data, }, { I2C_BOARD_INFO("pca9539", 0x75), .platform_data = &pca953x_75_gpio_data, }, { I2C_BOARD_INFO("pca9539", 0x76), .platform_data = &pca953x_76_gpio_data, }, { I2C_BOARD_INFO("pca9539", 0x77), .platform_data = &pca953x_77_gpio_data, }, }; /************************************************************************* * SPI Flash *************************************************************************/ #define VISION_SPI_FLASH_CS EP93XX_GPIO_LINE_EGPIO7 static struct mtd_partition vision_spi_flash_partitions[] = { { .name = "SPI bootstrap", .offset = 0, .size = SZ_4K, }, { .name = "Bootstrap config", .offset = MTDPART_OFS_APPEND, .size = SZ_4K, }, { .name = "System config", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct flash_platform_data vision_spi_flash_data = { .name = "SPI Flash", .parts = vision_spi_flash_partitions, .nr_parts = ARRAY_SIZE(vision_spi_flash_partitions), }; static int vision_spi_flash_hw_setup(struct spi_device *spi) { return gpio_request_one(VISION_SPI_FLASH_CS, GPIOF_INIT_HIGH, spi->modalias); } static void vision_spi_flash_hw_cleanup(struct spi_device *spi) { gpio_free(VISION_SPI_FLASH_CS); } static void vision_spi_flash_hw_cs_control(struct spi_device *spi, int value) { gpio_set_value(VISION_SPI_FLASH_CS, value); } static struct ep93xx_spi_chip_ops vision_spi_flash_hw = { .setup = vision_spi_flash_hw_setup, .cleanup = vision_spi_flash_hw_cleanup, .cs_control = vision_spi_flash_hw_cs_control, }; /************************************************************************* * SPI SD/MMC host *************************************************************************/ #define VISION_SPI_MMC_CS EP93XX_GPIO_LINE_G(2) #define VISION_SPI_MMC_WP EP93XX_GPIO_LINE_F(0) #define VISION_SPI_MMC_CD EP93XX_GPIO_LINE_EGPIO15 static struct gpio vision_spi_mmc_gpios[] = { { VISION_SPI_MMC_WP, GPIOF_DIR_IN, "mmc_spi:wp" }, { VISION_SPI_MMC_CD, GPIOF_DIR_IN, "mmc_spi:cd" }, }; static int vision_spi_mmc_init(struct device *pdev, irqreturn_t (*func)(int, void *), void *pdata) { int err; err = gpio_request_array(vision_spi_mmc_gpios, ARRAY_SIZE(vision_spi_mmc_gpios)); if (err) return err; err = gpio_set_debounce(VISION_SPI_MMC_CD, 1); if (err) goto exit_err; err = request_irq(gpio_to_irq(VISION_SPI_MMC_CD), func, IRQ_TYPE_EDGE_BOTH, "mmc_spi:cd", pdata); if (err) goto exit_err; return 0; exit_err: gpio_free_array(vision_spi_mmc_gpios, ARRAY_SIZE(vision_spi_mmc_gpios)); return err; } static void vision_spi_mmc_exit(struct device *pdev, void *pdata) { free_irq(gpio_to_irq(VISION_SPI_MMC_CD), pdata); gpio_free_array(vision_spi_mmc_gpios, ARRAY_SIZE(vision_spi_mmc_gpios)); } static int vision_spi_mmc_get_ro(struct device *pdev) { return !!gpio_get_value(VISION_SPI_MMC_WP); } static int vision_spi_mmc_get_cd(struct device *pdev) { return !gpio_get_value(VISION_SPI_MMC_CD); } static struct mmc_spi_platform_data vision_spi_mmc_data = { .init = vision_spi_mmc_init, .exit = vision_spi_mmc_exit, .get_ro = vision_spi_mmc_get_ro, .get_cd = vision_spi_mmc_get_cd, .detect_delay = 100, .powerup_msecs = 100, .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, }; static int vision_spi_mmc_hw_setup(struct spi_device *spi) { return gpio_request_one(VISION_SPI_MMC_CS, GPIOF_INIT_HIGH, spi->modalias); } static void vision_spi_mmc_hw_cleanup(struct spi_device *spi) { gpio_free(VISION_SPI_MMC_CS); } static void vision_spi_mmc_hw_cs_control(struct spi_device *spi, int value) { gpio_set_value(VISION_SPI_MMC_CS, value); } static struct ep93xx_spi_chip_ops vision_spi_mmc_hw = { .setup = vision_spi_mmc_hw_setup, .cleanup = vision_spi_mmc_hw_cleanup, .cs_control = vision_spi_mmc_hw_cs_control, }; /************************************************************************* * SPI Bus *************************************************************************/ static struct spi_board_info vision_spi_board_info[] __initdata = { { .modalias = "sst25l", .platform_data = &vision_spi_flash_data, .controller_data = &vision_spi_flash_hw, .max_speed_hz = 20000000, .bus_num = 0, .chip_select = 0, .mode = SPI_MODE_3, }, { .modalias = "mmc_spi", .platform_data = &vision_spi_mmc_data, .controller_data = &vision_spi_mmc_hw, .max_speed_hz = 20000000, .bus_num = 0, .chip_select = 1, .mode = SPI_MODE_3, }, }; static struct ep93xx_spi_info vision_spi_master __initdata = { .num_chipselect = ARRAY_SIZE(vision_spi_board_info), }; /************************************************************************* * Machine Initialization *************************************************************************/ static void __init vision_init_machine(void) { ep93xx_init_devices(); ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_64M); ep93xx_register_eth(&vision_eth_data, 1); ep93xx_register_fb(&ep93xxfb_info); ep93xx_register_pwm(1, 0); /* * Request the gpio expander's interrupt gpio line now to prevent * the kernel from doing a WARN in gpiolib:gpio_ensure_requested(). */ if (gpio_request_one(EP93XX_GPIO_LINE_F(7), GPIOF_DIR_IN, "pca9539:74")) pr_warn("cannot request interrupt gpio for pca9539:74\n"); vision_i2c_info[1].irq = gpio_to_irq(EP93XX_GPIO_LINE_F(7)); ep93xx_register_i2c(&vision_i2c_gpio_data, vision_i2c_info, ARRAY_SIZE(vision_i2c_info)); ep93xx_register_spi(&vision_spi_master, vision_spi_board_info, ARRAY_SIZE(vision_spi_board_info)); } MACHINE_START(VISION_EP9307, "Vision Engraving Systems EP9307") /* Maintainer: H Hartley Sweeten <hsweeten@visionengravers.com> */ .atag_offset = 0x100, .map_io = vision_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = vision_init_machine, .restart = ep93xx_restart, MACHINE_END
gpl-2.0
poondog/m7_stock_443
arch/arm/mach-ep93xx/adssphere.c
4734
1196
/* * arch/arm/mach-ep93xx/adssphere.c * ADS Sphere support. * * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <mach/hardware.h> #include <asm/hardware/vic.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include "soc.h" static struct ep93xx_eth_data __initdata adssphere_eth_data = { .phy_id = 1, }; static void __init adssphere_init_machine(void) { ep93xx_init_devices(); ep93xx_register_flash(4, EP93XX_CS6_PHYS_BASE, SZ_32M); ep93xx_register_eth(&adssphere_eth_data, 1); } MACHINE_START(ADSSPHERE, "ADS Sphere board") /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = adssphere_init_machine, .restart = ep93xx_restart, MACHINE_END
gpl-2.0
WhiteNeo-/NeoKernel
lib/hexdump.c
4734
7158
/* * lib/hexdump.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. */ #include <linux/types.h> #include <linux/ctype.h> #include <linux/kernel.h> #include <linux/export.h> const char hex_asc[] = "0123456789abcdef"; EXPORT_SYMBOL(hex_asc); /** * hex_to_bin - convert a hex digit to its real value * @ch: ascii character represents hex digit * * hex_to_bin() converts one hex digit to its actual value or -1 in case of bad * input. */ int hex_to_bin(char ch) { if ((ch >= '0') && (ch <= '9')) return ch - '0'; ch = tolower(ch); if ((ch >= 'a') && (ch <= 'f')) return ch - 'a' + 10; return -1; } EXPORT_SYMBOL(hex_to_bin); /** * hex2bin - convert an ascii hexadecimal string to its binary representation * @dst: binary result * @src: ascii hexadecimal string * @count: result length * * Return 0 on success, -1 in case of bad input. */ int hex2bin(u8 *dst, const char *src, size_t count) { while (count--) { int hi = hex_to_bin(*src++); int lo = hex_to_bin(*src++); if ((hi < 0) || (lo < 0)) return -1; *dst++ = (hi << 4) | lo; } return 0; } EXPORT_SYMBOL(hex2bin); /** * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory * @buf: data blob to dump * @len: number of bytes in the @buf * @rowsize: number of bytes to print per line; must be 16 or 32 * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1) * @linebuf: where to put the converted data * @linebuflen: total size of @linebuf, including space for terminating NUL * @ascii: include ASCII after the hex output * * hex_dump_to_buffer() works on one "line" of output at a time, i.e., * 16 or 32 bytes of input data converted to hex + ASCII output. * * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data * to a hex + ASCII dump at the supplied memory location. * The converted output is always NUL-terminated. * * E.g.: * hex_dump_to_buffer(frame->data, frame->len, 16, 1, * linebuf, sizeof(linebuf), true); * * example output buffer: * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO */ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, char *linebuf, size_t linebuflen, bool ascii) { const u8 *ptr = buf; u8 ch; int j, lx = 0; int ascii_column; if (rowsize != 16 && rowsize != 32) rowsize = 16; if (!len) goto nil; if (len > rowsize) /* limit to one line at a time */ len = rowsize; if ((len % groupsize) != 0) /* no mixed size output */ groupsize = 1; switch (groupsize) { case 8: { const u64 *ptr8 = buf; int ngroups = len / groupsize; for (j = 0; j < ngroups; j++) lx += scnprintf(linebuf + lx, linebuflen - lx, "%s%16.16llx", j ? " " : "", (unsigned long long)*(ptr8 + j)); ascii_column = 17 * ngroups + 2; break; } case 4: { const u32 *ptr4 = buf; int ngroups = len / groupsize; for (j = 0; j < ngroups; j++) lx += scnprintf(linebuf + lx, linebuflen - lx, "%s%8.8x", j ? " " : "", *(ptr4 + j)); ascii_column = 9 * ngroups + 2; break; } case 2: { const u16 *ptr2 = buf; int ngroups = len / groupsize; for (j = 0; j < ngroups; j++) lx += scnprintf(linebuf + lx, linebuflen - lx, "%s%4.4x", j ? " " : "", *(ptr2 + j)); ascii_column = 5 * ngroups + 2; break; } default: for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { ch = ptr[j]; linebuf[lx++] = hex_asc_hi(ch); linebuf[lx++] = hex_asc_lo(ch); linebuf[lx++] = ' '; } if (j) lx--; ascii_column = 3 * rowsize + 2; break; } if (!ascii) goto nil; while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) linebuf[lx++] = ' '; for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) { ch = ptr[j]; linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.'; } nil: linebuf[lx++] = '\0'; } EXPORT_SYMBOL(hex_dump_to_buffer); #ifdef CONFIG_PRINTK /** * print_hex_dump - print a text hex dump to syslog for a binary blob of data * @level: kernel log level (e.g. KERN_DEBUG) * @prefix_str: string to prefix each line with; * caller supplies trailing spaces for alignment if desired * @prefix_type: controls whether prefix of an offset, address, or none * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) * @rowsize: number of bytes to print per line; must be 16 or 32 * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1) * @buf: data blob to dump * @len: number of bytes in the @buf * @ascii: include ASCII after the hex output * * Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump * to the kernel log at the specified kernel log level, with an optional * leading prefix. * * print_hex_dump() works on one "line" of output at a time, i.e., * 16 or 32 bytes of input data converted to hex + ASCII output. * print_hex_dump() iterates over the entire input @buf, breaking it into * "line size" chunks to format and print. * * E.g.: * print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS, * 16, 1, frame->data, frame->len, true); * * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode: * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO * Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode: * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~. */ void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii) { const u8 *ptr = buf; int i, linelen, remaining = len; unsigned char linebuf[32 * 3 + 2 + 32 + 1]; if (rowsize != 16 && rowsize != 32) rowsize = 16; for (i = 0; i < len; i += rowsize) { linelen = min(remaining, rowsize); remaining -= rowsize; hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, linebuf, sizeof(linebuf), ascii); switch (prefix_type) { case DUMP_PREFIX_ADDRESS: printk("%s%s%p: %s\n", level, prefix_str, ptr + i, linebuf); break; case DUMP_PREFIX_OFFSET: printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); break; default: printk("%s%s%s\n", level, prefix_str, linebuf); break; } } } EXPORT_SYMBOL(print_hex_dump); /** * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params * @prefix_str: string to prefix each line with; * caller supplies trailing spaces for alignment if desired * @prefix_type: controls whether prefix of an offset, address, or none * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) * @buf: data blob to dump * @len: number of bytes in the @buf * * Calls print_hex_dump(), with log level of KERN_DEBUG, * rowsize of 16, groupsize of 1, and ASCII output included. */ void print_hex_dump_bytes(const char *prefix_str, int prefix_type, const void *buf, size_t len) { print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1, buf, len, true); } EXPORT_SYMBOL(print_hex_dump_bytes); #endif
gpl-2.0
ghbhaha/AK-OnePone
arch/arm/mach-ep93xx/edb93xx.c
4734
9856
/* * arch/arm/mach-ep93xx/edb93xx.c * Cirrus Logic EDB93xx Development Board support. * * EDB93XX, EDB9301, EDB9307A * Copyright (C) 2008-2009 H Hartley Sweeten <hsweeten@visionengravers.com> * * EDB9302 * Copyright (C) 2006 George Kashperko <george@chas.com.ua> * * EDB9302A, EDB9315, EDB9315A * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> * * EDB9307 * Copyright (C) 2007 Herbert Valerio Riedel <hvr@gnu.org> * * EDB9312 * Copyright (C) 2006 Infosys Technologies Limited * Toufeeq Hussain <toufeeq_hussain@infosys.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/i2c-gpio.h> #include <linux/spi/spi.h> #include <sound/cs4271.h> #include <mach/hardware.h> #include <mach/fb.h> #include <mach/ep93xx_spi.h> #include <mach/gpio-ep93xx.h> #include <asm/hardware/vic.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include "soc.h" static void __init edb93xx_register_flash(void) { if (machine_is_edb9307() || machine_is_edb9312() || machine_is_edb9315()) { ep93xx_register_flash(4, EP93XX_CS6_PHYS_BASE, SZ_32M); } else { ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_16M); } } static struct ep93xx_eth_data __initdata edb93xx_eth_data = { .phy_id = 1, }; /************************************************************************* * EDB93xx i2c peripheral handling *************************************************************************/ static struct i2c_gpio_platform_data __initdata edb93xx_i2c_gpio_data = { .sda_pin = EP93XX_GPIO_LINE_EEDAT, .sda_is_open_drain = 0, .scl_pin = EP93XX_GPIO_LINE_EECLK, .scl_is_open_drain = 0, .udelay = 0, /* default to 100 kHz */ .timeout = 0, /* default to 100 ms */ }; static struct i2c_board_info __initdata edb93xxa_i2c_board_info[] = { { I2C_BOARD_INFO("isl1208", 0x6f), }, }; static struct i2c_board_info __initdata edb93xx_i2c_board_info[] = { { I2C_BOARD_INFO("ds1337", 0x68), }, }; static void __init edb93xx_register_i2c(void) { if (machine_is_edb9302a() || machine_is_edb9307a() || machine_is_edb9315a()) { ep93xx_register_i2c(&edb93xx_i2c_gpio_data, edb93xxa_i2c_board_info, ARRAY_SIZE(edb93xxa_i2c_board_info)); } else if (machine_is_edb9307() || machine_is_edb9312() || machine_is_edb9315()) { ep93xx_register_i2c(&edb93xx_i2c_gpio_data, edb93xx_i2c_board_info, ARRAY_SIZE(edb93xx_i2c_board_info)); } } /************************************************************************* * EDB93xx SPI peripheral handling *************************************************************************/ static struct cs4271_platform_data edb93xx_cs4271_data = { .gpio_nreset = -EINVAL, /* filled in later */ }; static int edb93xx_cs4271_hw_setup(struct spi_device *spi) { return gpio_request_one(EP93XX_GPIO_LINE_EGPIO6, GPIOF_OUT_INIT_HIGH, spi->modalias); } static void edb93xx_cs4271_hw_cleanup(struct spi_device *spi) { gpio_free(EP93XX_GPIO_LINE_EGPIO6); } static void edb93xx_cs4271_hw_cs_control(struct spi_device *spi, int value) { gpio_set_value(EP93XX_GPIO_LINE_EGPIO6, value); } static struct ep93xx_spi_chip_ops edb93xx_cs4271_hw = { .setup = edb93xx_cs4271_hw_setup, .cleanup = edb93xx_cs4271_hw_cleanup, .cs_control = edb93xx_cs4271_hw_cs_control, }; static struct spi_board_info edb93xx_spi_board_info[] __initdata = { { .modalias = "cs4271", .platform_data = &edb93xx_cs4271_data, .controller_data = &edb93xx_cs4271_hw, .max_speed_hz = 6000000, .bus_num = 0, .chip_select = 0, .mode = SPI_MODE_3, }, }; static struct ep93xx_spi_info edb93xx_spi_info __initdata = { .num_chipselect = ARRAY_SIZE(edb93xx_spi_board_info), }; static void __init edb93xx_register_spi(void) { if (machine_is_edb9301() || machine_is_edb9302()) edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_EGPIO1; else if (machine_is_edb9302a() || machine_is_edb9307a()) edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_H(2); else if (machine_is_edb9315a()) edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_EGPIO14; ep93xx_register_spi(&edb93xx_spi_info, edb93xx_spi_board_info, ARRAY_SIZE(edb93xx_spi_board_info)); } /************************************************************************* * EDB93xx I2S *************************************************************************/ static struct platform_device edb93xx_audio_device = { .name = "edb93xx-audio", .id = -1, }; static int __init edb93xx_has_audio(void) { return (machine_is_edb9301() || machine_is_edb9302() || machine_is_edb9302a() || machine_is_edb9307a() || machine_is_edb9315a()); } static void __init edb93xx_register_i2s(void) { if (edb93xx_has_audio()) { ep93xx_register_i2s(); platform_device_register(&edb93xx_audio_device); } } /************************************************************************* * EDB93xx pwm *************************************************************************/ static void __init edb93xx_register_pwm(void) { if (machine_is_edb9301() || machine_is_edb9302() || machine_is_edb9302a()) { /* EP9301 and EP9302 only have pwm.1 (EGPIO14) */ ep93xx_register_pwm(0, 1); } else if (machine_is_edb9307() || machine_is_edb9307a()) { /* EP9307 only has pwm.0 (PWMOUT) */ ep93xx_register_pwm(1, 0); } else { /* EP9312 and EP9315 have both */ ep93xx_register_pwm(1, 1); } } /************************************************************************* * EDB93xx framebuffer *************************************************************************/ static struct ep93xxfb_mach_info __initdata edb93xxfb_info = { .num_modes = EP93XXFB_USE_MODEDB, .bpp = 16, .flags = 0, }; static int __init edb93xx_has_fb(void) { /* These platforms have an ep93xx with video capability */ return machine_is_edb9307() || machine_is_edb9307a() || machine_is_edb9312() || machine_is_edb9315() || machine_is_edb9315a(); } static void __init edb93xx_register_fb(void) { if (!edb93xx_has_fb()) return; if (machine_is_edb9307a() || machine_is_edb9315a()) edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN0; else edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN3; ep93xx_register_fb(&edb93xxfb_info); } static void __init edb93xx_init_machine(void) { ep93xx_init_devices(); edb93xx_register_flash(); ep93xx_register_eth(&edb93xx_eth_data, 1); edb93xx_register_i2c(); edb93xx_register_spi(); edb93xx_register_i2s(); edb93xx_register_pwm(); edb93xx_register_fb(); } #ifdef CONFIG_MACH_EDB9301 MACHINE_START(EDB9301, "Cirrus Logic EDB9301 Evaluation Board") /* Maintainer: H Hartley Sweeten <hsweeten@visionengravers.com> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9302 MACHINE_START(EDB9302, "Cirrus Logic EDB9302 Evaluation Board") /* Maintainer: George Kashperko <george@chas.com.ua> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9302A MACHINE_START(EDB9302A, "Cirrus Logic EDB9302A Evaluation Board") /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9307 MACHINE_START(EDB9307, "Cirrus Logic EDB9307 Evaluation Board") /* Maintainer: Herbert Valerio Riedel <hvr@gnu.org> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9307A MACHINE_START(EDB9307A, "Cirrus Logic EDB9307A Evaluation Board") /* Maintainer: H Hartley Sweeten <hsweeten@visionengravers.com> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9312 MACHINE_START(EDB9312, "Cirrus Logic EDB9312 Evaluation Board") /* Maintainer: Toufeeq Hussain <toufeeq_hussain@infosys.com> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9315 MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board") /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9315A MACHINE_START(EDB9315A, "Cirrus Logic EDB9315A Evaluation Board") /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .restart = ep93xx_restart, MACHINE_END #endif
gpl-2.0
AOSPA-L/android_kernel_oppo_msm8974
arch/arm/mach-davinci/board-dm355-leopard.c
4734
7158
/* * DM355 leopard board support * * Based on board-dm355-evm.c * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/nand.h> #include <linux/i2c.h> #include <linux/gpio.h> #include <linux/clk.h> #include <linux/spi/spi.h> #include <linux/spi/eeprom.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/i2c.h> #include <mach/serial.h> #include <mach/nand.h> #include <mach/mmc.h> #include <mach/usb.h> #include "davinci.h" /* NOTE: this is geared for the standard config, with a socketed * 2 GByte Micron NAND (MT29F16G08FAA) using 128KB sectors. If you * swap chips, maybe with a different block size, partitioning may * need to be changed. */ #define NAND_BLOCK_SIZE SZ_128K static struct mtd_partition davinci_nand_partitions[] = { { /* UBL (a few copies) plus U-Boot */ .name = "bootloader", .offset = 0, .size = 15 * NAND_BLOCK_SIZE, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { /* U-Boot environment */ .name = "params", .offset = MTDPART_OFS_APPEND, .size = 1 * NAND_BLOCK_SIZE, .mask_flags = 0, }, { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = SZ_4M, .mask_flags = 0, }, { .name = "filesystem1", .offset = MTDPART_OFS_APPEND, .size = SZ_512M, .mask_flags = 0, }, { .name = "filesystem2", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, .mask_flags = 0, } /* two blocks with bad block table (and mirror) at the end */ }; static struct davinci_nand_pdata davinci_nand_data = { .mask_chipsel = BIT(14), .parts = davinci_nand_partitions, .nr_parts = ARRAY_SIZE(davinci_nand_partitions), .ecc_mode = NAND_ECC_HW_SYNDROME, .bbt_options = NAND_BBT_USE_FLASH, }; static struct resource davinci_nand_resources[] = { { .start = DM355_ASYNC_EMIF_DATA_CE0_BASE, .end = DM355_ASYNC_EMIF_DATA_CE0_BASE + SZ_32M - 1, .flags = IORESOURCE_MEM, }, { .start = DM355_ASYNC_EMIF_CONTROL_BASE, .end = DM355_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device davinci_nand_device = { .name = "davinci_nand", .id = 0, .num_resources = ARRAY_SIZE(davinci_nand_resources), .resource = davinci_nand_resources, .dev = { .platform_data = &davinci_nand_data, }, }; static struct davinci_i2c_platform_data i2c_pdata = { .bus_freq = 400 /* kHz */, .bus_delay = 0 /* usec */, }; static int leopard_mmc_gpio = -EINVAL; static void dm355leopard_mmcsd_gpios(unsigned gpio) { gpio_request(gpio + 0, "mmc0_ro"); gpio_request(gpio + 1, "mmc0_cd"); gpio_request(gpio + 2, "mmc1_ro"); gpio_request(gpio + 3, "mmc1_cd"); /* we "know" these are input-only so we don't * need to call gpio_direction_input() */ leopard_mmc_gpio = gpio; } static struct i2c_board_info dm355leopard_i2c_info[] = { { I2C_BOARD_INFO("dm355leopard_msp", 0x25), .platform_data = dm355leopard_mmcsd_gpios, /* plus irq */ }, /* { I2C_BOARD_INFO("tlv320aic3x", 0x1b), }, */ /* { I2C_BOARD_INFO("tvp5146", 0x5d), }, */ }; static void __init leopard_init_i2c(void) { davinci_init_i2c(&i2c_pdata); gpio_request(5, "dm355leopard_msp"); gpio_direction_input(5); dm355leopard_i2c_info[0].irq = gpio_to_irq(5); i2c_register_board_info(1, dm355leopard_i2c_info, ARRAY_SIZE(dm355leopard_i2c_info)); } static struct resource dm355leopard_dm9000_rsrc[] = { { /* addr */ .start = 0x04000000, .end = 0x04000001, .flags = IORESOURCE_MEM, }, { /* data */ .start = 0x04000016, .end = 0x04000017, .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE /* rising (active high) */, }, }; static struct platform_device dm355leopard_dm9000 = { .name = "dm9000", .id = -1, .resource = dm355leopard_dm9000_rsrc, .num_resources = ARRAY_SIZE(dm355leopard_dm9000_rsrc), }; static struct platform_device *davinci_leopard_devices[] __initdata = { &dm355leopard_dm9000, &davinci_nand_device, }; static struct davinci_uart_config uart_config __initdata = { .enabled_uarts = (1 << 0), }; static void __init dm355_leopard_map_io(void) { dm355_init(); } static int dm355leopard_mmc_get_cd(int module) { if (!gpio_is_valid(leopard_mmc_gpio)) return -ENXIO; /* low == card present */ return !gpio_get_value_cansleep(leopard_mmc_gpio + 2 * module + 1); } static int dm355leopard_mmc_get_ro(int module) { if (!gpio_is_valid(leopard_mmc_gpio)) return -ENXIO; /* high == card's write protect switch active */ return gpio_get_value_cansleep(leopard_mmc_gpio + 2 * module + 0); } static struct davinci_mmc_config dm355leopard_mmc_config = { .get_cd = dm355leopard_mmc_get_cd, .get_ro = dm355leopard_mmc_get_ro, .wires = 4, .max_freq = 50000000, .caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED, }; /* Don't connect anything to J10 unless you're only using USB host * mode *and* have to do so with some kind of gender-bender. If * you have proper Mini-B or Mini-A cables (or Mini-A adapters) * the ID pin won't need any help. */ #ifdef CONFIG_USB_MUSB_PERIPHERAL #define USB_ID_VALUE 0 /* ID pulled high; *should* float */ #else #define USB_ID_VALUE 1 /* ID pulled low */ #endif static struct spi_eeprom at25640a = { .byte_len = SZ_64K / 8, .name = "at25640a", .page_size = 32, .flags = EE_ADDR2, }; static struct spi_board_info dm355_leopard_spi_info[] __initconst = { { .modalias = "at25", .platform_data = &at25640a, .max_speed_hz = 10 * 1000 * 1000, /* at 3v3 */ .bus_num = 0, .chip_select = 0, .mode = SPI_MODE_0, }, }; static __init void dm355_leopard_init(void) { struct clk *aemif; gpio_request(9, "dm9000"); gpio_direction_input(9); dm355leopard_dm9000_rsrc[2].start = gpio_to_irq(9); aemif = clk_get(&dm355leopard_dm9000.dev, "aemif"); if (IS_ERR(aemif)) WARN("%s: unable to get AEMIF clock\n", __func__); else clk_enable(aemif); platform_add_devices(davinci_leopard_devices, ARRAY_SIZE(davinci_leopard_devices)); leopard_init_i2c(); davinci_serial_init(&uart_config); /* NOTE: NAND flash timings set by the UBL are slower than * needed by MT29F16G08FAA chips ... EMIF.A1CR is 0x40400204 * but could be 0x0400008c for about 25% faster page reads. */ gpio_request(2, "usb_id_toggle"); gpio_direction_output(2, USB_ID_VALUE); /* irlml6401 switches over 1A in under 8 msec */ davinci_setup_usb(1000, 8); davinci_setup_mmc(0, &dm355leopard_mmc_config); davinci_setup_mmc(1, &dm355leopard_mmc_config); dm355_init_spi0(BIT(0), dm355_leopard_spi_info, ARRAY_SIZE(dm355_leopard_spi_info)); } MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard") .atag_offset = 0x100, .map_io = dm355_leopard_map_io, .init_irq = davinci_irq_init, .timer = &davinci_timer, .init_machine = dm355_leopard_init, .dma_zone_size = SZ_128M, .restart = davinci_restart, MACHINE_END
gpl-2.0
revjunkie/galbi-g2
drivers/media/video/adv7183.c
4990
18224
/* * adv7183.c Analog Devices ADV7183 video decoder driver * * Copyright (c) 2011 Analog Devices Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/videodev2.h> #include <media/adv7183.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include "adv7183_regs.h" struct adv7183 { struct v4l2_subdev sd; struct v4l2_ctrl_handler hdl; v4l2_std_id std; /* Current set standard */ u32 input; u32 output; unsigned reset_pin; unsigned oe_pin; struct v4l2_mbus_framefmt fmt; }; /* EXAMPLES USING 27 MHz CLOCK * Mode 1 CVBS Input (Composite Video on AIN5) * All standards are supported through autodetect, 8-bit, 4:2:2, ITU-R BT.656 output on P15 to P8. */ static const unsigned char adv7183_init_regs[] = { ADV7183_IN_CTRL, 0x04, /* CVBS input on AIN5 */ ADV7183_DIGI_CLAMP_CTRL_1, 0x00, /* Slow down digital clamps */ ADV7183_SHAP_FILT_CTRL, 0x41, /* Set CSFM to SH1 */ ADV7183_ADC_CTRL, 0x16, /* Power down ADC 1 and ADC 2 */ ADV7183_CTI_DNR_CTRL_4, 0x04, /* Set DNR threshold to 4 for flat response */ /* ADI recommended programming sequence */ ADV7183_ADI_CTRL, 0x80, ADV7183_CTI_DNR_CTRL_4, 0x20, 0x52, 0x18, 0x58, 0xED, 0x77, 0xC5, 0x7C, 0x93, 0x7D, 0x00, 0xD0, 0x48, 0xD5, 0xA0, 0xD7, 0xEA, ADV7183_SD_SATURATION_CR, 0x3E, ADV7183_PAL_V_END, 0x3E, ADV7183_PAL_F_TOGGLE, 0x0F, ADV7183_ADI_CTRL, 0x00, }; static inline struct adv7183 *to_adv7183(struct v4l2_subdev *sd) { return container_of(sd, struct adv7183, sd); } static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl) { return &container_of(ctrl->handler, struct adv7183, hdl)->sd; } static inline int adv7183_read(struct v4l2_subdev *sd, unsigned char reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_read_byte_data(client, reg); } static inline int adv7183_write(struct v4l2_subdev *sd, unsigned char reg, unsigned char value) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_write_byte_data(client, reg, value); } static int adv7183_writeregs(struct v4l2_subdev *sd, const unsigned char *regs, unsigned int num) { unsigned char reg, data; unsigned int cnt = 0; if (num & 0x1) { v4l2_err(sd, "invalid regs array\n"); return -1; } while (cnt < num) { reg = *regs++; data = *regs++; cnt += 2; adv7183_write(sd, reg, data); } return 0; } static int adv7183_log_status(struct v4l2_subdev *sd) { struct adv7183 *decoder = to_adv7183(sd); v4l2_info(sd, "adv7183: Input control = 0x%02x\n", adv7183_read(sd, ADV7183_IN_CTRL)); v4l2_info(sd, "adv7183: Video selection = 0x%02x\n", adv7183_read(sd, ADV7183_VD_SEL)); v4l2_info(sd, "adv7183: Output control = 0x%02x\n", adv7183_read(sd, ADV7183_OUT_CTRL)); v4l2_info(sd, "adv7183: Extended output control = 0x%02x\n", adv7183_read(sd, ADV7183_EXT_OUT_CTRL)); v4l2_info(sd, "adv7183: Autodetect enable = 0x%02x\n", adv7183_read(sd, ADV7183_AUTO_DET_EN)); v4l2_info(sd, "adv7183: Contrast = 0x%02x\n", adv7183_read(sd, ADV7183_CONTRAST)); v4l2_info(sd, "adv7183: Brightness = 0x%02x\n", adv7183_read(sd, ADV7183_BRIGHTNESS)); v4l2_info(sd, "adv7183: Hue = 0x%02x\n", adv7183_read(sd, ADV7183_HUE)); v4l2_info(sd, "adv7183: Default value Y = 0x%02x\n", adv7183_read(sd, ADV7183_DEF_Y)); v4l2_info(sd, "adv7183: Default value C = 0x%02x\n", adv7183_read(sd, ADV7183_DEF_C)); v4l2_info(sd, "adv7183: ADI control = 0x%02x\n", adv7183_read(sd, ADV7183_ADI_CTRL)); v4l2_info(sd, "adv7183: Power Management = 0x%02x\n", adv7183_read(sd, ADV7183_POW_MANAGE)); v4l2_info(sd, "adv7183: Status 1 2 and 3 = 0x%02x 0x%02x 0x%02x\n", adv7183_read(sd, ADV7183_STATUS_1), adv7183_read(sd, ADV7183_STATUS_2), adv7183_read(sd, ADV7183_STATUS_3)); v4l2_info(sd, "adv7183: Ident = 0x%02x\n", adv7183_read(sd, ADV7183_IDENT)); v4l2_info(sd, "adv7183: Analog clamp control = 0x%02x\n", adv7183_read(sd, ADV7183_ANAL_CLAMP_CTRL)); v4l2_info(sd, "adv7183: Digital clamp control 1 = 0x%02x\n", adv7183_read(sd, ADV7183_DIGI_CLAMP_CTRL_1)); v4l2_info(sd, "adv7183: Shaping filter control 1 and 2 = 0x%02x 0x%02x\n", adv7183_read(sd, ADV7183_SHAP_FILT_CTRL), adv7183_read(sd, ADV7183_SHAP_FILT_CTRL_2)); v4l2_info(sd, "adv7183: Comb filter control = 0x%02x\n", adv7183_read(sd, ADV7183_COMB_FILT_CTRL)); v4l2_info(sd, "adv7183: ADI control 2 = 0x%02x\n", adv7183_read(sd, ADV7183_ADI_CTRL_2)); v4l2_info(sd, "adv7183: Pixel delay control = 0x%02x\n", adv7183_read(sd, ADV7183_PIX_DELAY_CTRL)); v4l2_info(sd, "adv7183: Misc gain control = 0x%02x\n", adv7183_read(sd, ADV7183_MISC_GAIN_CTRL)); v4l2_info(sd, "adv7183: AGC mode control = 0x%02x\n", adv7183_read(sd, ADV7183_AGC_MODE_CTRL)); v4l2_info(sd, "adv7183: Chroma gain control 1 and 2 = 0x%02x 0x%02x\n", adv7183_read(sd, ADV7183_CHRO_GAIN_CTRL_1), adv7183_read(sd, ADV7183_CHRO_GAIN_CTRL_2)); v4l2_info(sd, "adv7183: Luma gain control 1 and 2 = 0x%02x 0x%02x\n", adv7183_read(sd, ADV7183_LUMA_GAIN_CTRL_1), adv7183_read(sd, ADV7183_LUMA_GAIN_CTRL_2)); v4l2_info(sd, "adv7183: Vsync field control 1 2 and 3 = 0x%02x 0x%02x 0x%02x\n", adv7183_read(sd, ADV7183_VS_FIELD_CTRL_1), adv7183_read(sd, ADV7183_VS_FIELD_CTRL_2), adv7183_read(sd, ADV7183_VS_FIELD_CTRL_3)); v4l2_info(sd, "adv7183: Hsync positon control 1 2 and 3 = 0x%02x 0x%02x 0x%02x\n", adv7183_read(sd, ADV7183_HS_POS_CTRL_1), adv7183_read(sd, ADV7183_HS_POS_CTRL_2), adv7183_read(sd, ADV7183_HS_POS_CTRL_3)); v4l2_info(sd, "adv7183: Polarity = 0x%02x\n", adv7183_read(sd, ADV7183_POLARITY)); v4l2_info(sd, "adv7183: ADC control = 0x%02x\n", adv7183_read(sd, ADV7183_ADC_CTRL)); v4l2_info(sd, "adv7183: SD offset Cb and Cr = 0x%02x 0x%02x\n", adv7183_read(sd, ADV7183_SD_OFFSET_CB), adv7183_read(sd, ADV7183_SD_OFFSET_CR)); v4l2_info(sd, "adv7183: SD saturation Cb and Cr = 0x%02x 0x%02x\n", adv7183_read(sd, ADV7183_SD_SATURATION_CB), adv7183_read(sd, ADV7183_SD_SATURATION_CR)); v4l2_info(sd, "adv7183: Drive strength = 0x%02x\n", adv7183_read(sd, ADV7183_DRIVE_STR)); v4l2_ctrl_handler_log_status(&decoder->hdl, sd->name); return 0; } static int adv7183_g_std(struct v4l2_subdev *sd, v4l2_std_id *std) { struct adv7183 *decoder = to_adv7183(sd); *std = decoder->std; return 0; } static int adv7183_s_std(struct v4l2_subdev *sd, v4l2_std_id std) { struct adv7183 *decoder = to_adv7183(sd); int reg; reg = adv7183_read(sd, ADV7183_IN_CTRL) & 0xF; if (std == V4L2_STD_PAL_60) reg |= 0x60; else if (std == V4L2_STD_NTSC_443) reg |= 0x70; else if (std == V4L2_STD_PAL_N) reg |= 0x90; else if (std == V4L2_STD_PAL_M) reg |= 0xA0; else if (std == V4L2_STD_PAL_Nc) reg |= 0xC0; else if (std & V4L2_STD_PAL) reg |= 0x80; else if (std & V4L2_STD_NTSC) reg |= 0x50; else if (std & V4L2_STD_SECAM) reg |= 0xE0; else return -EINVAL; adv7183_write(sd, ADV7183_IN_CTRL, reg); decoder->std = std; return 0; } static int adv7183_reset(struct v4l2_subdev *sd, u32 val) { int reg; reg = adv7183_read(sd, ADV7183_POW_MANAGE) | 0x80; adv7183_write(sd, ADV7183_POW_MANAGE, reg); /* wait 5ms before any further i2c writes are performed */ usleep_range(5000, 10000); return 0; } static int adv7183_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct adv7183 *decoder = to_adv7183(sd); int reg; if ((input > ADV7183_COMPONENT1) || (output > ADV7183_16BIT_OUT)) return -EINVAL; if (input != decoder->input) { decoder->input = input; reg = adv7183_read(sd, ADV7183_IN_CTRL) & 0xF0; switch (input) { case ADV7183_COMPOSITE1: reg |= 0x1; break; case ADV7183_COMPOSITE2: reg |= 0x2; break; case ADV7183_COMPOSITE3: reg |= 0x3; break; case ADV7183_COMPOSITE4: reg |= 0x4; break; case ADV7183_COMPOSITE5: reg |= 0x5; break; case ADV7183_COMPOSITE6: reg |= 0xB; break; case ADV7183_COMPOSITE7: reg |= 0xC; break; case ADV7183_COMPOSITE8: reg |= 0xD; break; case ADV7183_COMPOSITE9: reg |= 0xE; break; case ADV7183_COMPOSITE10: reg |= 0xF; break; case ADV7183_SVIDEO0: reg |= 0x6; break; case ADV7183_SVIDEO1: reg |= 0x7; break; case ADV7183_SVIDEO2: reg |= 0x8; break; case ADV7183_COMPONENT0: reg |= 0x9; break; case ADV7183_COMPONENT1: reg |= 0xA; break; default: break; } adv7183_write(sd, ADV7183_IN_CTRL, reg); } if (output != decoder->output) { decoder->output = output; reg = adv7183_read(sd, ADV7183_OUT_CTRL) & 0xC0; switch (output) { case ADV7183_16BIT_OUT: reg |= 0x9; break; default: reg |= 0xC; break; } adv7183_write(sd, ADV7183_OUT_CTRL, reg); } return 0; } static int adv7183_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); int val = ctrl->val; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: if (val < 0) val = 127 - val; adv7183_write(sd, ADV7183_BRIGHTNESS, val); break; case V4L2_CID_CONTRAST: adv7183_write(sd, ADV7183_CONTRAST, val); break; case V4L2_CID_SATURATION: adv7183_write(sd, ADV7183_SD_SATURATION_CB, val >> 8); adv7183_write(sd, ADV7183_SD_SATURATION_CR, (val & 0xFF)); break; case V4L2_CID_HUE: adv7183_write(sd, ADV7183_SD_OFFSET_CB, val >> 8); adv7183_write(sd, ADV7183_SD_OFFSET_CR, (val & 0xFF)); break; default: return -EINVAL; } return 0; } static int adv7183_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) { struct adv7183 *decoder = to_adv7183(sd); int reg; /* enable autodetection block */ reg = adv7183_read(sd, ADV7183_IN_CTRL) & 0xF; adv7183_write(sd, ADV7183_IN_CTRL, reg); /* wait autodetection switch */ mdelay(10); /* get autodetection result */ reg = adv7183_read(sd, ADV7183_STATUS_1); switch ((reg >> 0x4) & 0x7) { case 0: *std = V4L2_STD_NTSC; break; case 1: *std = V4L2_STD_NTSC_443; break; case 2: *std = V4L2_STD_PAL_M; break; case 3: *std = V4L2_STD_PAL_60; break; case 4: *std = V4L2_STD_PAL; break; case 5: *std = V4L2_STD_SECAM; break; case 6: *std = V4L2_STD_PAL_Nc; break; case 7: *std = V4L2_STD_SECAM; break; default: *std = V4L2_STD_UNKNOWN; break; } /* after std detection, write back user set std */ adv7183_s_std(sd, decoder->std); return 0; } static int adv7183_g_input_status(struct v4l2_subdev *sd, u32 *status) { int reg; *status = V4L2_IN_ST_NO_SIGNAL; reg = adv7183_read(sd, ADV7183_STATUS_1); if (reg < 0) return reg; if (reg & 0x1) *status = 0; return 0; } static int adv7183_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index, enum v4l2_mbus_pixelcode *code) { if (index > 0) return -EINVAL; *code = V4L2_MBUS_FMT_UYVY8_2X8; return 0; } static int adv7183_try_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt) { struct adv7183 *decoder = to_adv7183(sd); fmt->code = V4L2_MBUS_FMT_UYVY8_2X8; fmt->colorspace = V4L2_COLORSPACE_SMPTE170M; if (decoder->std & V4L2_STD_525_60) { fmt->field = V4L2_FIELD_SEQ_TB; fmt->width = 720; fmt->height = 480; } else { fmt->field = V4L2_FIELD_SEQ_BT; fmt->width = 720; fmt->height = 576; } return 0; } static int adv7183_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt) { struct adv7183 *decoder = to_adv7183(sd); adv7183_try_mbus_fmt(sd, fmt); decoder->fmt = *fmt; return 0; } static int adv7183_g_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt) { struct adv7183 *decoder = to_adv7183(sd); *fmt = decoder->fmt; return 0; } static int adv7183_s_stream(struct v4l2_subdev *sd, int enable) { struct adv7183 *decoder = to_adv7183(sd); if (enable) gpio_direction_output(decoder->oe_pin, 0); else gpio_direction_output(decoder->oe_pin, 1); udelay(1); return 0; } static int adv7183_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { int rev; struct i2c_client *client = v4l2_get_subdevdata(sd); /* 0x11 for adv7183, 0x13 for adv7183b */ rev = adv7183_read(sd, ADV7183_IDENT); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7183, rev); } #ifdef CONFIG_VIDEO_ADV_DEBUG static int adv7183_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; reg->val = adv7183_read(sd, reg->reg & 0xff); reg->size = 1; return 0; } static int adv7183_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; adv7183_write(sd, reg->reg & 0xff, reg->val & 0xff); return 0; } #endif static const struct v4l2_ctrl_ops adv7183_ctrl_ops = { .s_ctrl = adv7183_s_ctrl, }; static const struct v4l2_subdev_core_ops adv7183_core_ops = { .log_status = adv7183_log_status, .g_std = adv7183_g_std, .s_std = adv7183_s_std, .reset = adv7183_reset, .g_chip_ident = adv7183_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = adv7183_g_register, .s_register = adv7183_s_register, #endif }; static const struct v4l2_subdev_video_ops adv7183_video_ops = { .s_routing = adv7183_s_routing, .querystd = adv7183_querystd, .g_input_status = adv7183_g_input_status, .enum_mbus_fmt = adv7183_enum_mbus_fmt, .try_mbus_fmt = adv7183_try_mbus_fmt, .s_mbus_fmt = adv7183_s_mbus_fmt, .g_mbus_fmt = adv7183_g_mbus_fmt, .s_stream = adv7183_s_stream, }; static const struct v4l2_subdev_ops adv7183_ops = { .core = &adv7183_core_ops, .video = &adv7183_video_ops, }; static int adv7183_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct adv7183 *decoder; struct v4l2_subdev *sd; struct v4l2_ctrl_handler *hdl; int ret; struct v4l2_mbus_framefmt fmt; const unsigned *pin_array; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; v4l_info(client, "chip found @ 0x%02x (%s)\n", client->addr << 1, client->adapter->name); pin_array = client->dev.platform_data; if (pin_array == NULL) return -EINVAL; decoder = kzalloc(sizeof(struct adv7183), GFP_KERNEL); if (decoder == NULL) return -ENOMEM; decoder->reset_pin = pin_array[0]; decoder->oe_pin = pin_array[1]; if (gpio_request(decoder->reset_pin, "ADV7183 Reset")) { v4l_err(client, "failed to request GPIO %d\n", decoder->reset_pin); ret = -EBUSY; goto err_free_decoder; } if (gpio_request(decoder->oe_pin, "ADV7183 Output Enable")) { v4l_err(client, "failed to request GPIO %d\n", decoder->oe_pin); ret = -EBUSY; goto err_free_reset; } sd = &decoder->sd; v4l2_i2c_subdev_init(sd, client, &adv7183_ops); hdl = &decoder->hdl; v4l2_ctrl_handler_init(hdl, 4); v4l2_ctrl_new_std(hdl, &adv7183_ctrl_ops, V4L2_CID_BRIGHTNESS, -128, 127, 1, 0); v4l2_ctrl_new_std(hdl, &adv7183_ctrl_ops, V4L2_CID_CONTRAST, 0, 0xFF, 1, 0x80); v4l2_ctrl_new_std(hdl, &adv7183_ctrl_ops, V4L2_CID_SATURATION, 0, 0xFFFF, 1, 0x8080); v4l2_ctrl_new_std(hdl, &adv7183_ctrl_ops, V4L2_CID_HUE, 0, 0xFFFF, 1, 0x8080); /* hook the control handler into the driver */ sd->ctrl_handler = hdl; if (hdl->error) { ret = hdl->error; v4l2_ctrl_handler_free(hdl); goto err_free_oe; } /* v4l2 doesn't support an autodetect standard, pick PAL as default */ decoder->std = V4L2_STD_PAL; decoder->input = ADV7183_COMPOSITE4; decoder->output = ADV7183_8BIT_OUT; gpio_direction_output(decoder->oe_pin, 1); /* reset chip */ gpio_direction_output(decoder->reset_pin, 0); /* reset pulse width at least 5ms */ mdelay(10); gpio_direction_output(decoder->reset_pin, 1); /* wait 5ms before any further i2c writes are performed */ mdelay(5); adv7183_writeregs(sd, adv7183_init_regs, ARRAY_SIZE(adv7183_init_regs)); adv7183_s_std(sd, decoder->std); fmt.width = 720; fmt.height = 576; adv7183_s_mbus_fmt(sd, &fmt); /* initialize the hardware to the default control values */ ret = v4l2_ctrl_handler_setup(hdl); if (ret) { v4l2_ctrl_handler_free(hdl); goto err_free_oe; } return 0; err_free_oe: gpio_free(decoder->oe_pin); err_free_reset: gpio_free(decoder->reset_pin); err_free_decoder: kfree(decoder); return ret; } static int adv7183_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct adv7183 *decoder = to_adv7183(sd); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(sd->ctrl_handler); gpio_free(decoder->oe_pin); gpio_free(decoder->reset_pin); kfree(decoder); return 0; } static const struct i2c_device_id adv7183_id[] = { {"adv7183", 0}, {}, }; MODULE_DEVICE_TABLE(i2c, adv7183_id); static struct i2c_driver adv7183_driver = { .driver = { .owner = THIS_MODULE, .name = "adv7183", }, .probe = adv7183_probe, .remove = __devexit_p(adv7183_remove), .id_table = adv7183_id, }; static __init int adv7183_init(void) { return i2c_add_driver(&adv7183_driver); } static __exit void adv7183_exit(void) { i2c_del_driver(&adv7183_driver); } module_init(adv7183_init); module_exit(adv7183_exit); MODULE_DESCRIPTION("Analog Devices ADV7183 video decoder driver"); MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
xclusive36/android_kernel_lge_fx3
sound/arm/aaci.c
4990
25625
/* * linux/sound/arm/aaci.c - ARM PrimeCell AACI PL041 driver * * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Documentation: ARM DDI 0173B */ #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/device.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/amba/bus.h> #include <linux/io.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/ac97_codec.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "aaci.h" #define DRIVER_NAME "aaci-pl041" #define FRAME_PERIOD_US 21 /* * PM support is not complete. Turn it off. */ #undef CONFIG_PM static void aaci_ac97_select_codec(struct aaci *aaci, struct snd_ac97 *ac97) { u32 v, maincr = aaci->maincr | MAINCR_SCRA(ac97->num); /* * Ensure that the slot 1/2 RX registers are empty. */ v = readl(aaci->base + AACI_SLFR); if (v & SLFR_2RXV) readl(aaci->base + AACI_SL2RX); if (v & SLFR_1RXV) readl(aaci->base + AACI_SL1RX); if (maincr != readl(aaci->base + AACI_MAINCR)) { writel(maincr, aaci->base + AACI_MAINCR); readl(aaci->base + AACI_MAINCR); udelay(1); } } /* * P29: * The recommended use of programming the external codec through slot 1 * and slot 2 data is to use the channels during setup routines and the * slot register at any other time. The data written into slot 1, slot 2 * and slot 12 registers is transmitted only when their corresponding * SI1TxEn, SI2TxEn and SI12TxEn bits are set in the AACI_MAINCR * register. */ static void aaci_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct aaci *aaci = ac97->private_data; int timeout; u32 v; if (ac97->num >= 4) return; mutex_lock(&aaci->ac97_sem); aaci_ac97_select_codec(aaci, ac97); /* * P54: You must ensure that AACI_SL2TX is always written * to, if required, before data is written to AACI_SL1TX. */ writel(val << 4, aaci->base + AACI_SL2TX); writel(reg << 12, aaci->base + AACI_SL1TX); /* Initially, wait one frame period */ udelay(FRAME_PERIOD_US); /* And then wait an additional eight frame periods for it to be sent */ timeout = FRAME_PERIOD_US * 8; do { udelay(1); v = readl(aaci->base + AACI_SLFR); } while ((v & (SLFR_1TXB|SLFR_2TXB)) && --timeout); if (v & (SLFR_1TXB|SLFR_2TXB)) dev_err(&aaci->dev->dev, "timeout waiting for write to complete\n"); mutex_unlock(&aaci->ac97_sem); } /* * Read an AC'97 register. */ static unsigned short aaci_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { struct aaci *aaci = ac97->private_data; int timeout, retries = 10; u32 v; if (ac97->num >= 4) return ~0; mutex_lock(&aaci->ac97_sem); aaci_ac97_select_codec(aaci, ac97); /* * Write the register address to slot 1. */ writel((reg << 12) | (1 << 19), aaci->base + AACI_SL1TX); /* Initially, wait one frame period */ udelay(FRAME_PERIOD_US); /* And then wait an additional eight frame periods for it to be sent */ timeout = FRAME_PERIOD_US * 8; do { udelay(1); v = readl(aaci->base + AACI_SLFR); } while ((v & SLFR_1TXB) && --timeout); if (v & SLFR_1TXB) { dev_err(&aaci->dev->dev, "timeout on slot 1 TX busy\n"); v = ~0; goto out; } /* Now wait for the response frame */ udelay(FRAME_PERIOD_US); /* And then wait an additional eight frame periods for data */ timeout = FRAME_PERIOD_US * 8; do { udelay(1); cond_resched(); v = readl(aaci->base + AACI_SLFR) & (SLFR_1RXV|SLFR_2RXV); } while ((v != (SLFR_1RXV|SLFR_2RXV)) && --timeout); if (v != (SLFR_1RXV|SLFR_2RXV)) { dev_err(&aaci->dev->dev, "timeout on RX valid\n"); v = ~0; goto out; } do { v = readl(aaci->base + AACI_SL1RX) >> 12; if (v == reg) { v = readl(aaci->base + AACI_SL2RX) >> 4; break; } else if (--retries) { dev_warn(&aaci->dev->dev, "ac97 read back fail. retry\n"); continue; } else { dev_warn(&aaci->dev->dev, "wrong ac97 register read back (%x != %x)\n", v, reg); v = ~0; } } while (retries); out: mutex_unlock(&aaci->ac97_sem); return v; } static inline void aaci_chan_wait_ready(struct aaci_runtime *aacirun, unsigned long mask) { u32 val; int timeout = 5000; do { udelay(1); val = readl(aacirun->base + AACI_SR); } while (val & mask && timeout--); } /* * Interrupt support. */ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask) { if (mask & ISR_ORINTR) { dev_warn(&aaci->dev->dev, "RX overrun on chan %d\n", channel); writel(ICLR_RXOEC1 << channel, aaci->base + AACI_INTCLR); } if (mask & ISR_RXTOINTR) { dev_warn(&aaci->dev->dev, "RX timeout on chan %d\n", channel); writel(ICLR_RXTOFEC1 << channel, aaci->base + AACI_INTCLR); } if (mask & ISR_RXINTR) { struct aaci_runtime *aacirun = &aaci->capture; bool period_elapsed = false; void *ptr; if (!aacirun->substream || !aacirun->start) { dev_warn(&aaci->dev->dev, "RX interrupt???\n"); writel(0, aacirun->base + AACI_IE); return; } spin_lock(&aacirun->lock); ptr = aacirun->ptr; do { unsigned int len = aacirun->fifo_bytes; u32 val; if (aacirun->bytes <= 0) { aacirun->bytes += aacirun->period; period_elapsed = true; } if (!(aacirun->cr & CR_EN)) break; val = readl(aacirun->base + AACI_SR); if (!(val & SR_RXHF)) break; if (!(val & SR_RXFF)) len >>= 1; aacirun->bytes -= len; /* reading 16 bytes at a time */ for( ; len > 0; len -= 16) { asm( "ldmia %1, {r0, r1, r2, r3}\n\t" "stmia %0!, {r0, r1, r2, r3}" : "+r" (ptr) : "r" (aacirun->fifo) : "r0", "r1", "r2", "r3", "cc"); if (ptr >= aacirun->end) ptr = aacirun->start; } } while(1); aacirun->ptr = ptr; spin_unlock(&aacirun->lock); if (period_elapsed) snd_pcm_period_elapsed(aacirun->substream); } if (mask & ISR_URINTR) { dev_dbg(&aaci->dev->dev, "TX underrun on chan %d\n", channel); writel(ICLR_TXUEC1 << channel, aaci->base + AACI_INTCLR); } if (mask & ISR_TXINTR) { struct aaci_runtime *aacirun = &aaci->playback; bool period_elapsed = false; void *ptr; if (!aacirun->substream || !aacirun->start) { dev_warn(&aaci->dev->dev, "TX interrupt???\n"); writel(0, aacirun->base + AACI_IE); return; } spin_lock(&aacirun->lock); ptr = aacirun->ptr; do { unsigned int len = aacirun->fifo_bytes; u32 val; if (aacirun->bytes <= 0) { aacirun->bytes += aacirun->period; period_elapsed = true; } if (!(aacirun->cr & CR_EN)) break; val = readl(aacirun->base + AACI_SR); if (!(val & SR_TXHE)) break; if (!(val & SR_TXFE)) len >>= 1; aacirun->bytes -= len; /* writing 16 bytes at a time */ for ( ; len > 0; len -= 16) { asm( "ldmia %0!, {r0, r1, r2, r3}\n\t" "stmia %1, {r0, r1, r2, r3}" : "+r" (ptr) : "r" (aacirun->fifo) : "r0", "r1", "r2", "r3", "cc"); if (ptr >= aacirun->end) ptr = aacirun->start; } } while (1); aacirun->ptr = ptr; spin_unlock(&aacirun->lock); if (period_elapsed) snd_pcm_period_elapsed(aacirun->substream); } } static irqreturn_t aaci_irq(int irq, void *devid) { struct aaci *aaci = devid; u32 mask; int i; mask = readl(aaci->base + AACI_ALLINTS); if (mask) { u32 m = mask; for (i = 0; i < 4; i++, m >>= 7) { if (m & 0x7f) { aaci_fifo_irq(aaci, i, m); } } } return mask ? IRQ_HANDLED : IRQ_NONE; } /* * ALSA support. */ static struct snd_pcm_hardware aaci_hw_info = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_RESUME, /* * ALSA doesn't support 18-bit or 20-bit packed into 32-bit * words. It also doesn't support 12-bit at all. */ .formats = SNDRV_PCM_FMTBIT_S16_LE, /* rates are setup from the AC'97 codec */ .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 64 * 1024, .period_bytes_min = 256, .period_bytes_max = PAGE_SIZE, .periods_min = 4, .periods_max = PAGE_SIZE / 16, }; /* * We can support two and four channel audio. Unfortunately * six channel audio requires a non-standard channel ordering: * 2 -> FL(3), FR(4) * 4 -> FL(3), FR(4), SL(7), SR(8) * 6 -> FL(3), FR(4), SL(7), SR(8), C(6), LFE(9) (required) * FL(3), FR(4), C(6), SL(7), SR(8), LFE(9) (actual) * This requires an ALSA configuration file to correct. */ static int aaci_rule_channels(struct snd_pcm_hw_params *p, struct snd_pcm_hw_rule *rule) { static unsigned int channel_list[] = { 2, 4, 6 }; struct aaci *aaci = rule->private; unsigned int mask = 1 << 0, slots; /* pcms[0] is the our 5.1 PCM instance. */ slots = aaci->ac97_bus->pcms[0].r[0].slots; if (slots & (1 << AC97_SLOT_PCM_SLEFT)) { mask |= 1 << 1; if (slots & (1 << AC97_SLOT_LFE)) mask |= 1 << 2; } return snd_interval_list(hw_param_interval(p, rule->var), ARRAY_SIZE(channel_list), channel_list, mask); } static int aaci_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct aaci *aaci = substream->private_data; struct aaci_runtime *aacirun; int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { aacirun = &aaci->playback; } else { aacirun = &aaci->capture; } aacirun->substream = substream; runtime->private_data = aacirun; runtime->hw = aaci_hw_info; runtime->hw.rates = aacirun->pcm->rates; snd_pcm_limit_hw_rates(runtime); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { runtime->hw.channels_max = 6; /* Add rule describing channel dependency. */ ret = snd_pcm_hw_rule_add(substream->runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, aaci_rule_channels, aaci, SNDRV_PCM_HW_PARAM_CHANNELS, -1); if (ret) return ret; if (aacirun->pcm->r[1].slots) snd_ac97_pcm_double_rate_rules(runtime); } /* * ALSA wants the byte-size of the FIFOs. As we only support * 16-bit samples, this is twice the FIFO depth irrespective * of whether it's in compact mode or not. */ runtime->hw.fifo_size = aaci->fifo_depth * 2; mutex_lock(&aaci->irq_lock); if (!aaci->users++) { ret = request_irq(aaci->dev->irq[0], aaci_irq, IRQF_SHARED, DRIVER_NAME, aaci); if (ret != 0) aaci->users--; } mutex_unlock(&aaci->irq_lock); return ret; } /* * Common ALSA stuff */ static int aaci_pcm_close(struct snd_pcm_substream *substream) { struct aaci *aaci = substream->private_data; struct aaci_runtime *aacirun = substream->runtime->private_data; WARN_ON(aacirun->cr & CR_EN); aacirun->substream = NULL; mutex_lock(&aaci->irq_lock); if (!--aaci->users) free_irq(aaci->dev->irq[0], aaci); mutex_unlock(&aaci->irq_lock); return 0; } static int aaci_pcm_hw_free(struct snd_pcm_substream *substream) { struct aaci_runtime *aacirun = substream->runtime->private_data; /* * This must not be called with the device enabled. */ WARN_ON(aacirun->cr & CR_EN); if (aacirun->pcm_open) snd_ac97_pcm_close(aacirun->pcm); aacirun->pcm_open = 0; /* * Clear out the DMA and any allocated buffers. */ snd_pcm_lib_free_pages(substream); return 0; } /* Channel to slot mask */ static const u32 channels_to_slotmask[] = { [2] = CR_SL3 | CR_SL4, [4] = CR_SL3 | CR_SL4 | CR_SL7 | CR_SL8, [6] = CR_SL3 | CR_SL4 | CR_SL7 | CR_SL8 | CR_SL6 | CR_SL9, }; static int aaci_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct aaci_runtime *aacirun = substream->runtime->private_data; unsigned int channels = params_channels(params); unsigned int rate = params_rate(params); int dbl = rate > 48000; int err; aaci_pcm_hw_free(substream); if (aacirun->pcm_open) { snd_ac97_pcm_close(aacirun->pcm); aacirun->pcm_open = 0; } /* channels is already limited to 2, 4, or 6 by aaci_rule_channels */ if (dbl && channels != 2) return -EINVAL; err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params)); if (err >= 0) { struct aaci *aaci = substream->private_data; err = snd_ac97_pcm_open(aacirun->pcm, rate, channels, aacirun->pcm->r[dbl].slots); aacirun->pcm_open = err == 0; aacirun->cr = CR_FEN | CR_COMPACT | CR_SZ16; aacirun->cr |= channels_to_slotmask[channels + dbl * 2]; /* * fifo_bytes is the number of bytes we transfer to/from * the FIFO, including padding. So that's x4. As we're * in compact mode, the FIFO is half the size. */ aacirun->fifo_bytes = aaci->fifo_depth * 4 / 2; } return err; } static int aaci_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct aaci_runtime *aacirun = runtime->private_data; aacirun->period = snd_pcm_lib_period_bytes(substream); aacirun->start = runtime->dma_area; aacirun->end = aacirun->start + snd_pcm_lib_buffer_bytes(substream); aacirun->ptr = aacirun->start; aacirun->bytes = aacirun->period; return 0; } static snd_pcm_uframes_t aaci_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct aaci_runtime *aacirun = runtime->private_data; ssize_t bytes = aacirun->ptr - aacirun->start; return bytes_to_frames(runtime, bytes); } /* * Playback specific ALSA stuff */ static void aaci_pcm_playback_stop(struct aaci_runtime *aacirun) { u32 ie; ie = readl(aacirun->base + AACI_IE); ie &= ~(IE_URIE|IE_TXIE); writel(ie, aacirun->base + AACI_IE); aacirun->cr &= ~CR_EN; aaci_chan_wait_ready(aacirun, SR_TXB); writel(aacirun->cr, aacirun->base + AACI_TXCR); } static void aaci_pcm_playback_start(struct aaci_runtime *aacirun) { u32 ie; aaci_chan_wait_ready(aacirun, SR_TXB); aacirun->cr |= CR_EN; ie = readl(aacirun->base + AACI_IE); ie |= IE_URIE | IE_TXIE; writel(ie, aacirun->base + AACI_IE); writel(aacirun->cr, aacirun->base + AACI_TXCR); } static int aaci_pcm_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct aaci_runtime *aacirun = substream->runtime->private_data; unsigned long flags; int ret = 0; spin_lock_irqsave(&aacirun->lock, flags); switch (cmd) { case SNDRV_PCM_TRIGGER_START: aaci_pcm_playback_start(aacirun); break; case SNDRV_PCM_TRIGGER_RESUME: aaci_pcm_playback_start(aacirun); break; case SNDRV_PCM_TRIGGER_STOP: aaci_pcm_playback_stop(aacirun); break; case SNDRV_PCM_TRIGGER_SUSPEND: aaci_pcm_playback_stop(aacirun); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: break; default: ret = -EINVAL; } spin_unlock_irqrestore(&aacirun->lock, flags); return ret; } static struct snd_pcm_ops aaci_playback_ops = { .open = aaci_pcm_open, .close = aaci_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = aaci_pcm_hw_params, .hw_free = aaci_pcm_hw_free, .prepare = aaci_pcm_prepare, .trigger = aaci_pcm_playback_trigger, .pointer = aaci_pcm_pointer, }; static void aaci_pcm_capture_stop(struct aaci_runtime *aacirun) { u32 ie; aaci_chan_wait_ready(aacirun, SR_RXB); ie = readl(aacirun->base + AACI_IE); ie &= ~(IE_ORIE | IE_RXIE); writel(ie, aacirun->base+AACI_IE); aacirun->cr &= ~CR_EN; writel(aacirun->cr, aacirun->base + AACI_RXCR); } static void aaci_pcm_capture_start(struct aaci_runtime *aacirun) { u32 ie; aaci_chan_wait_ready(aacirun, SR_RXB); #ifdef DEBUG /* RX Timeout value: bits 28:17 in RXCR */ aacirun->cr |= 0xf << 17; #endif aacirun->cr |= CR_EN; writel(aacirun->cr, aacirun->base + AACI_RXCR); ie = readl(aacirun->base + AACI_IE); ie |= IE_ORIE |IE_RXIE; // overrun and rx interrupt -- half full writel(ie, aacirun->base + AACI_IE); } static int aaci_pcm_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct aaci_runtime *aacirun = substream->runtime->private_data; unsigned long flags; int ret = 0; spin_lock_irqsave(&aacirun->lock, flags); switch (cmd) { case SNDRV_PCM_TRIGGER_START: aaci_pcm_capture_start(aacirun); break; case SNDRV_PCM_TRIGGER_RESUME: aaci_pcm_capture_start(aacirun); break; case SNDRV_PCM_TRIGGER_STOP: aaci_pcm_capture_stop(aacirun); break; case SNDRV_PCM_TRIGGER_SUSPEND: aaci_pcm_capture_stop(aacirun); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: break; default: ret = -EINVAL; } spin_unlock_irqrestore(&aacirun->lock, flags); return ret; } static int aaci_pcm_capture_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct aaci *aaci = substream->private_data; aaci_pcm_prepare(substream); /* allow changing of sample rate */ aaci_ac97_write(aaci->ac97, AC97_EXTENDED_STATUS, 0x0001); /* VRA */ aaci_ac97_write(aaci->ac97, AC97_PCM_LR_ADC_RATE, runtime->rate); aaci_ac97_write(aaci->ac97, AC97_PCM_MIC_ADC_RATE, runtime->rate); /* Record select: Mic: 0, Aux: 3, Line: 4 */ aaci_ac97_write(aaci->ac97, AC97_REC_SEL, 0x0404); return 0; } static struct snd_pcm_ops aaci_capture_ops = { .open = aaci_pcm_open, .close = aaci_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = aaci_pcm_hw_params, .hw_free = aaci_pcm_hw_free, .prepare = aaci_pcm_capture_prepare, .trigger = aaci_pcm_capture_trigger, .pointer = aaci_pcm_pointer, }; /* * Power Management. */ #ifdef CONFIG_PM static int aaci_do_suspend(struct snd_card *card, unsigned int state) { struct aaci *aaci = card->private_data; snd_power_change_state(card, SNDRV_CTL_POWER_D3cold); snd_pcm_suspend_all(aaci->pcm); return 0; } static int aaci_do_resume(struct snd_card *card, unsigned int state) { snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } static int aaci_suspend(struct amba_device *dev, pm_message_t state) { struct snd_card *card = amba_get_drvdata(dev); return card ? aaci_do_suspend(card) : 0; } static int aaci_resume(struct amba_device *dev) { struct snd_card *card = amba_get_drvdata(dev); return card ? aaci_do_resume(card) : 0; } #else #define aaci_do_suspend NULL #define aaci_do_resume NULL #define aaci_suspend NULL #define aaci_resume NULL #endif static struct ac97_pcm ac97_defs[] __devinitdata = { [0] = { /* Front PCM */ .exclusive = 1, .r = { [0] = { .slots = (1 << AC97_SLOT_PCM_LEFT) | (1 << AC97_SLOT_PCM_RIGHT) | (1 << AC97_SLOT_PCM_CENTER) | (1 << AC97_SLOT_PCM_SLEFT) | (1 << AC97_SLOT_PCM_SRIGHT) | (1 << AC97_SLOT_LFE), }, [1] = { .slots = (1 << AC97_SLOT_PCM_LEFT) | (1 << AC97_SLOT_PCM_RIGHT) | (1 << AC97_SLOT_PCM_LEFT_0) | (1 << AC97_SLOT_PCM_RIGHT_0), }, }, }, [1] = { /* PCM in */ .stream = 1, .exclusive = 1, .r = { [0] = { .slots = (1 << AC97_SLOT_PCM_LEFT) | (1 << AC97_SLOT_PCM_RIGHT), }, }, }, [2] = { /* Mic in */ .stream = 1, .exclusive = 1, .r = { [0] = { .slots = (1 << AC97_SLOT_MIC), }, }, } }; static struct snd_ac97_bus_ops aaci_bus_ops = { .write = aaci_ac97_write, .read = aaci_ac97_read, }; static int __devinit aaci_probe_ac97(struct aaci *aaci) { struct snd_ac97_template ac97_template; struct snd_ac97_bus *ac97_bus; struct snd_ac97 *ac97; int ret; /* * Assert AACIRESET for 2us */ writel(0, aaci->base + AACI_RESET); udelay(2); writel(RESET_NRST, aaci->base + AACI_RESET); /* * Give the AC'97 codec more than enough time * to wake up. (42us = ~2 frames at 48kHz.) */ udelay(FRAME_PERIOD_US * 2); ret = snd_ac97_bus(aaci->card, 0, &aaci_bus_ops, aaci, &ac97_bus); if (ret) goto out; ac97_bus->clock = 48000; aaci->ac97_bus = ac97_bus; memset(&ac97_template, 0, sizeof(struct snd_ac97_template)); ac97_template.private_data = aaci; ac97_template.num = 0; ac97_template.scaps = AC97_SCAP_SKIP_MODEM; ret = snd_ac97_mixer(ac97_bus, &ac97_template, &ac97); if (ret) goto out; aaci->ac97 = ac97; /* * Disable AC97 PC Beep input on audio codecs. */ if (ac97_is_audio(ac97)) snd_ac97_write_cache(ac97, AC97_PC_BEEP, 0x801e); ret = snd_ac97_pcm_assign(ac97_bus, ARRAY_SIZE(ac97_defs), ac97_defs); if (ret) goto out; aaci->playback.pcm = &ac97_bus->pcms[0]; aaci->capture.pcm = &ac97_bus->pcms[1]; out: return ret; } static void aaci_free_card(struct snd_card *card) { struct aaci *aaci = card->private_data; if (aaci->base) iounmap(aaci->base); } static struct aaci * __devinit aaci_init_card(struct amba_device *dev) { struct aaci *aaci; struct snd_card *card; int err; err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, THIS_MODULE, sizeof(struct aaci), &card); if (err < 0) return NULL; card->private_free = aaci_free_card; strlcpy(card->driver, DRIVER_NAME, sizeof(card->driver)); strlcpy(card->shortname, "ARM AC'97 Interface", sizeof(card->shortname)); snprintf(card->longname, sizeof(card->longname), "%s PL%03x rev%u at 0x%08llx, irq %d", card->shortname, amba_part(dev), amba_rev(dev), (unsigned long long)dev->res.start, dev->irq[0]); aaci = card->private_data; mutex_init(&aaci->ac97_sem); mutex_init(&aaci->irq_lock); aaci->card = card; aaci->dev = dev; /* Set MAINCR to allow slot 1 and 2 data IO */ aaci->maincr = MAINCR_IE | MAINCR_SL1RXEN | MAINCR_SL1TXEN | MAINCR_SL2RXEN | MAINCR_SL2TXEN; return aaci; } static int __devinit aaci_init_pcm(struct aaci *aaci) { struct snd_pcm *pcm; int ret; ret = snd_pcm_new(aaci->card, "AACI AC'97", 0, 1, 1, &pcm); if (ret == 0) { aaci->pcm = pcm; pcm->private_data = aaci; pcm->info_flags = 0; strlcpy(pcm->name, DRIVER_NAME, sizeof(pcm->name)); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &aaci_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &aaci_capture_ops); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, NULL, 0, 64 * 1024); } return ret; } static unsigned int __devinit aaci_size_fifo(struct aaci *aaci) { struct aaci_runtime *aacirun = &aaci->playback; int i; /* * Enable the channel, but don't assign it to any slots, so * it won't empty onto the AC'97 link. */ writel(CR_FEN | CR_SZ16 | CR_EN, aacirun->base + AACI_TXCR); for (i = 0; !(readl(aacirun->base + AACI_SR) & SR_TXFF) && i < 4096; i++) writel(0, aacirun->fifo); writel(0, aacirun->base + AACI_TXCR); /* * Re-initialise the AACI after the FIFO depth test, to * ensure that the FIFOs are empty. Unfortunately, merely * disabling the channel doesn't clear the FIFO. */ writel(aaci->maincr & ~MAINCR_IE, aaci->base + AACI_MAINCR); readl(aaci->base + AACI_MAINCR); udelay(1); writel(aaci->maincr, aaci->base + AACI_MAINCR); /* * If we hit 4096 entries, we failed. Go back to the specified * fifo depth. */ if (i == 4096) i = 8; return i; } static int __devinit aaci_probe(struct amba_device *dev, const struct amba_id *id) { struct aaci *aaci; int ret, i; ret = amba_request_regions(dev, NULL); if (ret) return ret; aaci = aaci_init_card(dev); if (!aaci) { ret = -ENOMEM; goto out; } aaci->base = ioremap(dev->res.start, resource_size(&dev->res)); if (!aaci->base) { ret = -ENOMEM; goto out; } /* * Playback uses AACI channel 0 */ spin_lock_init(&aaci->playback.lock); aaci->playback.base = aaci->base + AACI_CSCH1; aaci->playback.fifo = aaci->base + AACI_DR1; /* * Capture uses AACI channel 0 */ spin_lock_init(&aaci->capture.lock); aaci->capture.base = aaci->base + AACI_CSCH1; aaci->capture.fifo = aaci->base + AACI_DR1; for (i = 0; i < 4; i++) { void __iomem *base = aaci->base + i * 0x14; writel(0, base + AACI_IE); writel(0, base + AACI_TXCR); writel(0, base + AACI_RXCR); } writel(0x1fff, aaci->base + AACI_INTCLR); writel(aaci->maincr, aaci->base + AACI_MAINCR); /* * Fix: ac97 read back fail errors by reading * from any arbitrary aaci register. */ readl(aaci->base + AACI_CSCH1); ret = aaci_probe_ac97(aaci); if (ret) goto out; /* * Size the FIFOs (must be multiple of 16). * This is the number of entries in the FIFO. */ aaci->fifo_depth = aaci_size_fifo(aaci); if (aaci->fifo_depth & 15) { printk(KERN_WARNING "AACI: FIFO depth %d not supported\n", aaci->fifo_depth); ret = -ENODEV; goto out; } ret = aaci_init_pcm(aaci); if (ret) goto out; snd_card_set_dev(aaci->card, &dev->dev); ret = snd_card_register(aaci->card); if (ret == 0) { dev_info(&dev->dev, "%s\n", aaci->card->longname); dev_info(&dev->dev, "FIFO %u entries\n", aaci->fifo_depth); amba_set_drvdata(dev, aaci->card); return ret; } out: if (aaci) snd_card_free(aaci->card); amba_release_regions(dev); return ret; } static int __devexit aaci_remove(struct amba_device *dev) { struct snd_card *card = amba_get_drvdata(dev); amba_set_drvdata(dev, NULL); if (card) { struct aaci *aaci = card->private_data; writel(0, aaci->base + AACI_MAINCR); snd_card_free(card); amba_release_regions(dev); } return 0; } static struct amba_id aaci_ids[] = { { .id = 0x00041041, .mask = 0x000fffff, }, { 0, 0 }, }; MODULE_DEVICE_TABLE(amba, aaci_ids); static struct amba_driver aaci_driver = { .drv = { .name = DRIVER_NAME, }, .probe = aaci_probe, .remove = __devexit_p(aaci_remove), .suspend = aaci_suspend, .resume = aaci_resume, .id_table = aaci_ids, }; module_amba_driver(aaci_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ARM PrimeCell PL041 Advanced Audio CODEC Interface driver");
gpl-2.0
Trustonic/kernel-goldfish
drivers/net/ppp/pppopns.c
4990
11343
/* drivers/net/pppopns.c * * Driver for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637) * * Copyright (C) 2009 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* This driver handles PPTP data packets between a RAW socket and a PPP channel. * The socket is created in the kernel space and connected to the same address * of the control socket. Outgoing packets are always sent with sequences but * without acknowledgements. Incoming packets with sequences are reordered * within a sliding window of one second. Currently reordering only happens when * a packet is received. It is done for simplicity since no additional locks or * threads are required. This driver should work on both IPv4 and IPv6. */ #include <linux/module.h> #include <linux/jiffies.h> #include <linux/workqueue.h> #include <linux/skbuff.h> #include <linux/file.h> #include <linux/netdevice.h> #include <linux/net.h> #include <linux/ppp_defs.h> #include <linux/if.h> #include <linux/if_ppp.h> #include <linux/if_pppox.h> #include <linux/ppp_channel.h> #include <asm/uaccess.h> #define GRE_HEADER_SIZE 8 #define PPTP_GRE_BITS htons(0x2001) #define PPTP_GRE_BITS_MASK htons(0xEF7F) #define PPTP_GRE_SEQ_BIT htons(0x1000) #define PPTP_GRE_ACK_BIT htons(0x0080) #define PPTP_GRE_TYPE htons(0x880B) #define PPP_ADDR 0xFF #define PPP_CTRL 0x03 struct header { __u16 bits; __u16 type; __u16 length; __u16 call; __u32 sequence; } __attribute__((packed)); struct meta { __u32 sequence; __u32 timestamp; }; static inline struct meta *skb_meta(struct sk_buff *skb) { return (struct meta *)skb->cb; } /******************************************************************************/ static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb) { struct sock *sk = (struct sock *)sk_raw->sk_user_data; struct pppopns_opt *opt = &pppox_sk(sk)->proto.pns; struct meta *meta = skb_meta(skb); __u32 now = jiffies; struct header *hdr; /* Skip transport header */ skb_pull(skb, skb_transport_header(skb) - skb->data); /* Drop the packet if GRE header is missing. */ if (skb->len < GRE_HEADER_SIZE) goto drop; hdr = (struct header *)skb->data; /* Check the header. */ if (hdr->type != PPTP_GRE_TYPE || hdr->call != opt->local || (hdr->bits & PPTP_GRE_BITS_MASK) != PPTP_GRE_BITS) goto drop; /* Skip all fields including optional ones. */ if (!skb_pull(skb, GRE_HEADER_SIZE + (hdr->bits & PPTP_GRE_SEQ_BIT ? 4 : 0) + (hdr->bits & PPTP_GRE_ACK_BIT ? 4 : 0))) goto drop; /* Check the length. */ if (skb->len != ntohs(hdr->length)) goto drop; /* Check the sequence if it is present. */ if (hdr->bits & PPTP_GRE_SEQ_BIT) { meta->sequence = ntohl(hdr->sequence); if ((__s32)(meta->sequence - opt->recv_sequence) < 0) goto drop; } /* Skip PPP address and control if they are present. */ if (skb->len >= 2 && skb->data[0] == PPP_ADDR && skb->data[1] == PPP_CTRL) skb_pull(skb, 2); /* Fix PPP protocol if it is compressed. */ if (skb->len >= 1 && skb->data[0] & 1) skb_push(skb, 1)[0] = 0; /* Drop the packet if PPP protocol is missing. */ if (skb->len < 2) goto drop; /* Perform reordering if sequencing is enabled. */ if (hdr->bits & PPTP_GRE_SEQ_BIT) { struct sk_buff *skb1; /* Insert the packet into receive queue in order. */ skb_set_owner_r(skb, sk); skb_queue_walk(&sk->sk_receive_queue, skb1) { struct meta *meta1 = skb_meta(skb1); __s32 order = meta->sequence - meta1->sequence; if (order == 0) goto drop; if (order < 0) { meta->timestamp = meta1->timestamp; skb_insert(skb1, skb, &sk->sk_receive_queue); skb = NULL; break; } } if (skb) { meta->timestamp = now; skb_queue_tail(&sk->sk_receive_queue, skb); } /* Remove packets from receive queue as long as * 1. the receive buffer is full, * 2. they are queued longer than one second, or * 3. there are no missing packets before them. */ skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) { meta = skb_meta(skb); if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && now - meta->timestamp < HZ && meta->sequence != opt->recv_sequence) break; skb_unlink(skb, &sk->sk_receive_queue); opt->recv_sequence = meta->sequence + 1; skb_orphan(skb); ppp_input(&pppox_sk(sk)->chan, skb); } return NET_RX_SUCCESS; } /* Flush receive queue if sequencing is disabled. */ skb_queue_purge(&sk->sk_receive_queue); skb_orphan(skb); ppp_input(&pppox_sk(sk)->chan, skb); return NET_RX_SUCCESS; drop: kfree_skb(skb); return NET_RX_DROP; } static void pppopns_recv(struct sock *sk_raw, int length) { struct sk_buff *skb; while ((skb = skb_dequeue(&sk_raw->sk_receive_queue))) { sock_hold(sk_raw); sk_receive_skb(sk_raw, skb, 0); } } static struct sk_buff_head delivery_queue; static void pppopns_xmit_core(struct work_struct *delivery_work) { mm_segment_t old_fs = get_fs(); struct sk_buff *skb; set_fs(KERNEL_DS); while ((skb = skb_dequeue(&delivery_queue))) { struct sock *sk_raw = skb->sk; struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len}; struct msghdr msg = { .msg_iov = (struct iovec *)&iov, .msg_iovlen = 1, .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT, }; sk_raw->sk_prot->sendmsg(NULL, sk_raw, &msg, skb->len); kfree_skb(skb); } set_fs(old_fs); } static DECLARE_WORK(delivery_work, pppopns_xmit_core); static int pppopns_xmit(struct ppp_channel *chan, struct sk_buff *skb) { struct sock *sk_raw = (struct sock *)chan->private; struct pppopns_opt *opt = &pppox_sk(sk_raw->sk_user_data)->proto.pns; struct header *hdr; __u16 length; /* Install PPP address and control. */ skb_push(skb, 2); skb->data[0] = PPP_ADDR; skb->data[1] = PPP_CTRL; length = skb->len; /* Install PPTP GRE header. */ hdr = (struct header *)skb_push(skb, 12); hdr->bits = PPTP_GRE_BITS | PPTP_GRE_SEQ_BIT; hdr->type = PPTP_GRE_TYPE; hdr->length = htons(length); hdr->call = opt->remote; hdr->sequence = htonl(opt->xmit_sequence); opt->xmit_sequence++; /* Now send the packet via the delivery queue. */ skb_set_owner_w(skb, sk_raw); skb_queue_tail(&delivery_queue, skb); schedule_work(&delivery_work); return 1; } /******************************************************************************/ static struct ppp_channel_ops pppopns_channel_ops = { .start_xmit = pppopns_xmit, }; static int pppopns_connect(struct socket *sock, struct sockaddr *useraddr, int addrlen, int flags) { struct sock *sk = sock->sk; struct pppox_sock *po = pppox_sk(sk); struct sockaddr_pppopns *addr = (struct sockaddr_pppopns *)useraddr; struct sockaddr_storage ss; struct socket *sock_tcp = NULL; struct socket *sock_raw = NULL; struct sock *sk_tcp; struct sock *sk_raw; int error; if (addrlen != sizeof(struct sockaddr_pppopns)) return -EINVAL; lock_sock(sk); error = -EALREADY; if (sk->sk_state != PPPOX_NONE) goto out; sock_tcp = sockfd_lookup(addr->tcp_socket, &error); if (!sock_tcp) goto out; sk_tcp = sock_tcp->sk; error = -EPROTONOSUPPORT; if (sk_tcp->sk_protocol != IPPROTO_TCP) goto out; addrlen = sizeof(struct sockaddr_storage); error = kernel_getpeername(sock_tcp, (struct sockaddr *)&ss, &addrlen); if (error) goto out; if (!sk_tcp->sk_bound_dev_if) { struct dst_entry *dst = sk_dst_get(sk_tcp); error = -ENODEV; if (!dst) goto out; sk_tcp->sk_bound_dev_if = dst->dev->ifindex; dst_release(dst); } error = sock_create(ss.ss_family, SOCK_RAW, IPPROTO_GRE, &sock_raw); if (error) goto out; sk_raw = sock_raw->sk; sk_raw->sk_bound_dev_if = sk_tcp->sk_bound_dev_if; error = kernel_connect(sock_raw, (struct sockaddr *)&ss, addrlen, 0); if (error) goto out; po->chan.hdrlen = 14; po->chan.private = sk_raw; po->chan.ops = &pppopns_channel_ops; po->chan.mtu = PPP_MRU - 80; po->proto.pns.local = addr->local; po->proto.pns.remote = addr->remote; po->proto.pns.data_ready = sk_raw->sk_data_ready; po->proto.pns.backlog_rcv = sk_raw->sk_backlog_rcv; error = ppp_register_channel(&po->chan); if (error) goto out; sk->sk_state = PPPOX_CONNECTED; lock_sock(sk_raw); sk_raw->sk_data_ready = pppopns_recv; sk_raw->sk_backlog_rcv = pppopns_recv_core; sk_raw->sk_user_data = sk; release_sock(sk_raw); out: if (sock_tcp) sockfd_put(sock_tcp); if (error && sock_raw) sock_release(sock_raw); release_sock(sk); return error; } static int pppopns_release(struct socket *sock) { struct sock *sk = sock->sk; if (!sk) return 0; lock_sock(sk); if (sock_flag(sk, SOCK_DEAD)) { release_sock(sk); return -EBADF; } if (sk->sk_state != PPPOX_NONE) { struct sock *sk_raw = (struct sock *)pppox_sk(sk)->chan.private; lock_sock(sk_raw); skb_queue_purge(&sk->sk_receive_queue); pppox_unbind_sock(sk); sk_raw->sk_data_ready = pppox_sk(sk)->proto.pns.data_ready; sk_raw->sk_backlog_rcv = pppox_sk(sk)->proto.pns.backlog_rcv; sk_raw->sk_user_data = NULL; release_sock(sk_raw); sock_release(sk_raw->sk_socket); } sock_orphan(sk); sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; } /******************************************************************************/ static struct proto pppopns_proto = { .name = "PPPOPNS", .owner = THIS_MODULE, .obj_size = sizeof(struct pppox_sock), }; static struct proto_ops pppopns_proto_ops = { .family = PF_PPPOX, .owner = THIS_MODULE, .release = pppopns_release, .bind = sock_no_bind, .connect = pppopns_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = sock_no_poll, .ioctl = pppox_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = sock_no_sendmsg, .recvmsg = sock_no_recvmsg, .mmap = sock_no_mmap, }; static int pppopns_create(struct net *net, struct socket *sock) { struct sock *sk; sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppopns_proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock->state = SS_UNCONNECTED; sock->ops = &pppopns_proto_ops; sk->sk_protocol = PX_PROTO_OPNS; sk->sk_state = PPPOX_NONE; return 0; } /******************************************************************************/ static struct pppox_proto pppopns_pppox_proto = { .create = pppopns_create, .owner = THIS_MODULE, }; static int __init pppopns_init(void) { int error; error = proto_register(&pppopns_proto, 0); if (error) return error; error = register_pppox_proto(PX_PROTO_OPNS, &pppopns_pppox_proto); if (error) proto_unregister(&pppopns_proto); else skb_queue_head_init(&delivery_queue); return error; } static void __exit pppopns_exit(void) { unregister_pppox_proto(PX_PROTO_OPNS); proto_unregister(&pppopns_proto); } module_init(pppopns_init); module_exit(pppopns_exit); MODULE_DESCRIPTION("PPP on PPTP Network Server (PPPoPNS)"); MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>"); MODULE_LICENSE("GPL");
gpl-2.0
KryptonOmni/android_kernel_lge_mako
drivers/media/video/cx23885/cx23885-alsa.c
5246
13521
/* * * Support for CX23885 analog audio capture * * (c) 2008 Mijhail Moreyra <mijhail.moreyra@gmail.com> * Adapted from cx88-alsa.c * (c) 2009 Steven Toth <stoth@kernellabs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/vmalloc.h> #include <linux/dma-mapping.h> #include <linux/pci.h> #include <asm/delay.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/control.h> #include <sound/initval.h> #include <sound/tlv.h> #include "cx23885.h" #include "cx23885-reg.h" #define AUDIO_SRAM_CHANNEL SRAM_CH07 #define dprintk(level, fmt, arg...) if (audio_debug >= level) \ printk(KERN_INFO "%s: " fmt, chip->dev->name , ## arg) #define dprintk_core(level, fmt, arg...) if (audio_debug >= level) \ printk(KERN_DEBUG "%s: " fmt, chip->dev->name , ## arg) /**************************************************************************** Module global static vars ****************************************************************************/ static unsigned int disable_analog_audio; module_param(disable_analog_audio, int, 0644); MODULE_PARM_DESC(disable_analog_audio, "disable analog audio ALSA driver"); static unsigned int audio_debug; module_param(audio_debug, int, 0644); MODULE_PARM_DESC(audio_debug, "enable debug messages [analog audio]"); /**************************************************************************** Board specific funtions ****************************************************************************/ /* Constants taken from cx88-reg.h */ #define AUD_INT_DN_RISCI1 (1 << 0) #define AUD_INT_UP_RISCI1 (1 << 1) #define AUD_INT_RDS_DN_RISCI1 (1 << 2) #define AUD_INT_DN_RISCI2 (1 << 4) /* yes, 3 is skipped */ #define AUD_INT_UP_RISCI2 (1 << 5) #define AUD_INT_RDS_DN_RISCI2 (1 << 6) #define AUD_INT_DN_SYNC (1 << 12) #define AUD_INT_UP_SYNC (1 << 13) #define AUD_INT_RDS_DN_SYNC (1 << 14) #define AUD_INT_OPC_ERR (1 << 16) #define AUD_INT_BER_IRQ (1 << 20) #define AUD_INT_MCHG_IRQ (1 << 21) #define GP_COUNT_CONTROL_RESET 0x3 /* * BOARD Specific: Sets audio DMA */ static int cx23885_start_audio_dma(struct cx23885_audio_dev *chip) { struct cx23885_audio_buffer *buf = chip->buf; struct cx23885_dev *dev = chip->dev; struct sram_channel *audio_ch = &dev->sram_channels[AUDIO_SRAM_CHANNEL]; dprintk(1, "%s()\n", __func__); /* Make sure RISC/FIFO are off before changing FIFO/RISC settings */ cx_clear(AUD_INT_DMA_CTL, 0x11); /* setup fifo + format - out channel */ cx23885_sram_channel_setup(chip->dev, audio_ch, buf->bpl, buf->risc.dma); /* sets bpl size */ cx_write(AUD_INT_A_LNGTH, buf->bpl); /* This is required to get good audio (1 seems to be ok) */ cx_write(AUD_INT_A_MODE, 1); /* reset counter */ cx_write(AUD_INT_A_GPCNT_CTL, GP_COUNT_CONTROL_RESET); atomic_set(&chip->count, 0); dprintk(1, "Start audio DMA, %d B/line, %d lines/FIFO, %d periods, %d " "byte buffer\n", buf->bpl, cx_read(audio_ch->cmds_start+12)>>1, chip->num_periods, buf->bpl * chip->num_periods); /* Enables corresponding bits at AUD_INT_STAT */ cx_write(AUDIO_INT_INT_MSK, AUD_INT_OPC_ERR | AUD_INT_DN_SYNC | AUD_INT_DN_RISCI1); /* Clean any pending interrupt bits already set */ cx_write(AUDIO_INT_INT_STAT, ~0); /* enable audio irqs */ cx_set(PCI_INT_MSK, chip->dev->pci_irqmask | PCI_MSK_AUD_INT); /* start dma */ cx_set(DEV_CNTRL2, (1<<5)); /* Enables Risc Processor */ cx_set(AUD_INT_DMA_CTL, 0x11); /* audio downstream FIFO and RISC enable */ if (audio_debug) cx23885_sram_channel_dump(chip->dev, audio_ch); return 0; } /* * BOARD Specific: Resets audio DMA */ static int cx23885_stop_audio_dma(struct cx23885_audio_dev *chip) { struct cx23885_dev *dev = chip->dev; dprintk(1, "Stopping audio DMA\n"); /* stop dma */ cx_clear(AUD_INT_DMA_CTL, 0x11); /* disable irqs */ cx_clear(PCI_INT_MSK, PCI_MSK_AUD_INT); cx_clear(AUDIO_INT_INT_MSK, AUD_INT_OPC_ERR | AUD_INT_DN_SYNC | AUD_INT_DN_RISCI1); if (audio_debug) cx23885_sram_channel_dump(chip->dev, &dev->sram_channels[AUDIO_SRAM_CHANNEL]); return 0; } /* * BOARD Specific: Handles audio IRQ */ int cx23885_audio_irq(struct cx23885_dev *dev, u32 status, u32 mask) { struct cx23885_audio_dev *chip = dev->audio_dev; if (0 == (status & mask)) return 0; cx_write(AUDIO_INT_INT_STAT, status); /* risc op code error */ if (status & AUD_INT_OPC_ERR) { printk(KERN_WARNING "%s/1: Audio risc op code error\n", dev->name); cx_clear(AUD_INT_DMA_CTL, 0x11); cx23885_sram_channel_dump(dev, &dev->sram_channels[AUDIO_SRAM_CHANNEL]); } if (status & AUD_INT_DN_SYNC) { dprintk(1, "Downstream sync error\n"); cx_write(AUD_INT_A_GPCNT_CTL, GP_COUNT_CONTROL_RESET); return 1; } /* risc1 downstream */ if (status & AUD_INT_DN_RISCI1) { atomic_set(&chip->count, cx_read(AUD_INT_A_GPCNT)); snd_pcm_period_elapsed(chip->substream); } /* FIXME: Any other status should deserve a special handling? */ return 1; } static int dsp_buffer_free(struct cx23885_audio_dev *chip) { BUG_ON(!chip->dma_size); dprintk(2, "Freeing buffer\n"); videobuf_dma_unmap(&chip->pci->dev, chip->dma_risc); videobuf_dma_free(chip->dma_risc); btcx_riscmem_free(chip->pci, &chip->buf->risc); kfree(chip->buf); chip->dma_risc = NULL; chip->dma_size = 0; return 0; } /**************************************************************************** ALSA PCM Interface ****************************************************************************/ /* * Digital hardware definition */ #define DEFAULT_FIFO_SIZE 4096 static struct snd_pcm_hardware snd_cx23885_digital_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, /* Analog audio output will be full of clicks and pops if there are not exactly four lines in the SRAM FIFO buffer. */ .period_bytes_min = DEFAULT_FIFO_SIZE/4, .period_bytes_max = DEFAULT_FIFO_SIZE/4, .periods_min = 1, .periods_max = 1024, .buffer_bytes_max = (1024*1024), }; /* * audio pcm capture open callback */ static int snd_cx23885_pcm_open(struct snd_pcm_substream *substream) { struct cx23885_audio_dev *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; if (!chip) { printk(KERN_ERR "BUG: cx23885 can't find device struct." " Can't proceed with open\n"); return -ENODEV; } err = snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS); if (err < 0) goto _error; chip->substream = substream; runtime->hw = snd_cx23885_digital_hw; if (chip->dev->sram_channels[AUDIO_SRAM_CHANNEL].fifo_size != DEFAULT_FIFO_SIZE) { unsigned int bpl = chip->dev-> sram_channels[AUDIO_SRAM_CHANNEL].fifo_size / 4; bpl &= ~7; /* must be multiple of 8 */ runtime->hw.period_bytes_min = bpl; runtime->hw.period_bytes_max = bpl; } return 0; _error: dprintk(1, "Error opening PCM!\n"); return err; } /* * audio close callback */ static int snd_cx23885_close(struct snd_pcm_substream *substream) { return 0; } /* * hw_params callback */ static int snd_cx23885_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct cx23885_audio_dev *chip = snd_pcm_substream_chip(substream); struct videobuf_dmabuf *dma; struct cx23885_audio_buffer *buf; int ret; if (substream->runtime->dma_area) { dsp_buffer_free(chip); substream->runtime->dma_area = NULL; } chip->period_size = params_period_bytes(hw_params); chip->num_periods = params_periods(hw_params); chip->dma_size = chip->period_size * params_periods(hw_params); BUG_ON(!chip->dma_size); BUG_ON(chip->num_periods & (chip->num_periods-1)); buf = kzalloc(sizeof(*buf), GFP_KERNEL); if (NULL == buf) return -ENOMEM; buf->bpl = chip->period_size; dma = &buf->dma; videobuf_dma_init(dma); ret = videobuf_dma_init_kernel(dma, PCI_DMA_FROMDEVICE, (PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT)); if (ret < 0) goto error; ret = videobuf_dma_map(&chip->pci->dev, dma); if (ret < 0) goto error; ret = cx23885_risc_databuffer(chip->pci, &buf->risc, dma->sglist, chip->period_size, chip->num_periods, 1); if (ret < 0) goto error; /* Loop back to start of program */ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP|RISC_IRQ1|RISC_CNT_INC); buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma); buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */ chip->buf = buf; chip->dma_risc = dma; substream->runtime->dma_area = chip->dma_risc->vaddr; substream->runtime->dma_bytes = chip->dma_size; substream->runtime->dma_addr = 0; return 0; error: kfree(buf); return ret; } /* * hw free callback */ static int snd_cx23885_hw_free(struct snd_pcm_substream *substream) { struct cx23885_audio_dev *chip = snd_pcm_substream_chip(substream); if (substream->runtime->dma_area) { dsp_buffer_free(chip); substream->runtime->dma_area = NULL; } return 0; } /* * prepare callback */ static int snd_cx23885_prepare(struct snd_pcm_substream *substream) { return 0; } /* * trigger callback */ static int snd_cx23885_card_trigger(struct snd_pcm_substream *substream, int cmd) { struct cx23885_audio_dev *chip = snd_pcm_substream_chip(substream); int err; /* Local interrupts are already disabled by ALSA */ spin_lock(&chip->lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: err = cx23885_start_audio_dma(chip); break; case SNDRV_PCM_TRIGGER_STOP: err = cx23885_stop_audio_dma(chip); break; default: err = -EINVAL; break; } spin_unlock(&chip->lock); return err; } /* * pointer callback */ static snd_pcm_uframes_t snd_cx23885_pointer( struct snd_pcm_substream *substream) { struct cx23885_audio_dev *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; u16 count; count = atomic_read(&chip->count); return runtime->period_size * (count & (runtime->periods-1)); } /* * page callback (needed for mmap) */ static struct page *snd_cx23885_page(struct snd_pcm_substream *substream, unsigned long offset) { void *pageptr = substream->runtime->dma_area + offset; return vmalloc_to_page(pageptr); } /* * operators */ static struct snd_pcm_ops snd_cx23885_pcm_ops = { .open = snd_cx23885_pcm_open, .close = snd_cx23885_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cx23885_hw_params, .hw_free = snd_cx23885_hw_free, .prepare = snd_cx23885_prepare, .trigger = snd_cx23885_card_trigger, .pointer = snd_cx23885_pointer, .page = snd_cx23885_page, }; /* * create a PCM device */ static int snd_cx23885_pcm(struct cx23885_audio_dev *chip, int device, char *name) { int err; struct snd_pcm *pcm; err = snd_pcm_new(chip->card, name, device, 0, 1, &pcm); if (err < 0) return err; pcm->private_data = chip; strcpy(pcm->name, name); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_cx23885_pcm_ops); return 0; } /**************************************************************************** Basic Flow for Sound Devices ****************************************************************************/ /* * Alsa Constructor - Component probe */ struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev) { struct snd_card *card; struct cx23885_audio_dev *chip; int err; if (disable_analog_audio) return NULL; if (dev->sram_channels[AUDIO_SRAM_CHANNEL].cmds_start == 0) { printk(KERN_WARNING "%s(): Missing SRAM channel configuration " "for analog TV Audio\n", __func__); return NULL; } err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, THIS_MODULE, sizeof(struct cx23885_audio_dev), &card); if (err < 0) goto error; chip = (struct cx23885_audio_dev *) card->private_data; chip->dev = dev; chip->pci = dev->pci; chip->card = card; spin_lock_init(&chip->lock); snd_card_set_dev(card, &dev->pci->dev); err = snd_cx23885_pcm(chip, 0, "CX23885 Digital"); if (err < 0) goto error; strcpy(card->driver, "CX23885"); sprintf(card->shortname, "Conexant CX23885"); sprintf(card->longname, "%s at %s", card->shortname, dev->name); err = snd_card_register(card); if (err < 0) goto error; dprintk(0, "registered ALSA audio device\n"); return chip; error: snd_card_free(card); printk(KERN_ERR "%s(): Failed to register analog " "audio adapter\n", __func__); return NULL; } /* * ALSA destructor */ void cx23885_audio_unregister(struct cx23885_dev *dev) { struct cx23885_audio_dev *chip = dev->audio_dev; snd_card_free(chip->card); }
gpl-2.0
Jackeagle/android_kernel_htc_dlxub1
drivers/ata/pata_radisys.c
5502
6962
/* * pata_radisys.c - Intel PATA/SATA controllers * * (C) 2006 Red Hat <alan@lxorguk.ukuu.org.uk> * * Some parts based on ata_piix.c by Jeff Garzik and others. * * A PIIX relative, this device has a single ATA channel and no * slave timings, SITRE or PPE. In that sense it is a close relative * of the original PIIX. It does however support UDMA 33/66 per channel * although no other modes/timings. Also lacking is 32bit I/O on the ATA * port. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/ata.h> #define DRV_NAME "pata_radisys" #define DRV_VERSION "0.4.4" /** * radisys_set_piomode - Initialize host controller PATA PIO timings * @ap: ATA port * @adev: Device whose timings we are configuring * * Set PIO mode for device, in host controller PCI config space. * * LOCKING: * None (inherited from caller). */ static void radisys_set_piomode (struct ata_port *ap, struct ata_device *adev) { unsigned int pio = adev->pio_mode - XFER_PIO_0; struct pci_dev *dev = to_pci_dev(ap->host->dev); u16 idetm_data; int control = 0; /* * See Intel Document 298600-004 for the timing programing rules * for PIIX/ICH. Note that the early PIIX does not have the slave * timing port at 0x44. The Radisys is a relative of the PIIX * but not the same so be careful. */ static const /* ISP RTC */ u8 timings[][2] = { { 0, 0 }, /* Check me */ { 0, 0 }, { 1, 1 }, { 2, 2 }, { 3, 3 }, }; if (pio > 0) control |= 1; /* TIME1 enable */ if (ata_pio_need_iordy(adev)) control |= 2; /* IE IORDY */ pci_read_config_word(dev, 0x40, &idetm_data); /* Enable IE and TIME as appropriate. Clear the other drive timing bits */ idetm_data &= 0xCCCC; idetm_data |= (control << (4 * adev->devno)); idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8); pci_write_config_word(dev, 0x40, idetm_data); /* Track which port is configured */ ap->private_data = adev; } /** * radisys_set_dmamode - Initialize host controller PATA DMA timings * @ap: Port whose timings we are configuring * @adev: Device to program * * Set MWDMA mode for device, in host controller PCI config space. * * LOCKING: * None (inherited from caller). */ static void radisys_set_dmamode (struct ata_port *ap, struct ata_device *adev) { struct pci_dev *dev = to_pci_dev(ap->host->dev); u16 idetm_data; u8 udma_enable; static const /* ISP RTC */ u8 timings[][2] = { { 0, 0 }, { 0, 0 }, { 1, 1 }, { 2, 2 }, { 3, 3 }, }; /* * MWDMA is driven by the PIO timings. We must also enable * IORDY unconditionally. */ pci_read_config_word(dev, 0x40, &idetm_data); pci_read_config_byte(dev, 0x48, &udma_enable); if (adev->dma_mode < XFER_UDMA_0) { unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0; const unsigned int needed_pio[3] = { XFER_PIO_0, XFER_PIO_3, XFER_PIO_4 }; int pio = needed_pio[mwdma] - XFER_PIO_0; int control = 3; /* IORDY|TIME0 */ /* If the drive MWDMA is faster than it can do PIO then we must force PIO0 for PIO cycles. */ if (adev->pio_mode < needed_pio[mwdma]) control = 1; /* Mask out the relevant control and timing bits we will load. Also clear the other drive TIME register as a precaution */ idetm_data &= 0xCCCC; idetm_data |= control << (4 * adev->devno); idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8); udma_enable &= ~(1 << adev->devno); } else { u8 udma_mode; /* UDMA66 on: UDMA 33 and 66 are switchable via register 0x4A */ pci_read_config_byte(dev, 0x4A, &udma_mode); if (adev->xfer_mode == XFER_UDMA_2) udma_mode &= ~(2 << (adev->devno * 4)); else /* UDMA 4 */ udma_mode |= (2 << (adev->devno * 4)); pci_write_config_byte(dev, 0x4A, udma_mode); udma_enable |= (1 << adev->devno); } pci_write_config_word(dev, 0x40, idetm_data); pci_write_config_byte(dev, 0x48, udma_enable); /* Track which port is configured */ ap->private_data = adev; } /** * radisys_qc_issue - command issue * @qc: command pending * * Called when the libata layer is about to issue a command. We wrap * this interface so that we can load the correct ATA timings if * necessary. Our logic also clears TIME0/TIME1 for the other device so * that, even if we get this wrong, cycles to the other device will * be made PIO0. */ static unsigned int radisys_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; if (adev != ap->private_data) { /* UDMA timing is not shared */ if (adev->dma_mode < XFER_UDMA_0) { if (adev->dma_mode) radisys_set_dmamode(ap, adev); else if (adev->pio_mode) radisys_set_piomode(ap, adev); } } return ata_bmdma_qc_issue(qc); } static struct scsi_host_template radisys_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations radisys_pata_ops = { .inherits = &ata_bmdma_port_ops, .qc_issue = radisys_qc_issue, .cable_detect = ata_cable_unknown, .set_piomode = radisys_set_piomode, .set_dmamode = radisys_set_dmamode, }; /** * radisys_init_one - Register PIIX ATA PCI device with kernel services * @pdev: PCI device to register * @ent: Entry in radisys_pci_tbl matching with @pdev * * Called from kernel PCI layer. We probe for combined mode (sigh), * and then hand over control to libata, for it to do the rest. * * LOCKING: * Inherited from PCI layer (may sleep). * * RETURNS: * Zero on success, or -ERRNO value. */ static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA12_ONLY, .udma_mask = ATA_UDMA24_ONLY, .port_ops = &radisys_pata_ops, }; const struct ata_port_info *ppi[] = { &info, NULL }; ata_print_version_once(&pdev->dev, DRV_VERSION); return ata_pci_bmdma_init_one(pdev, ppi, &radisys_sht, NULL, 0); } static const struct pci_device_id radisys_pci_tbl[] = { { PCI_VDEVICE(RADISYS, 0x8201), }, { } /* terminate list */ }; static struct pci_driver radisys_pci_driver = { .name = DRV_NAME, .id_table = radisys_pci_tbl, .probe = radisys_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = ata_pci_device_resume, #endif }; static int __init radisys_init(void) { return pci_register_driver(&radisys_pci_driver); } static void __exit radisys_exit(void) { pci_unregister_driver(&radisys_pci_driver); } module_init(radisys_init); module_exit(radisys_exit); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("SCSI low-level driver for Radisys R82600 controllers"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, radisys_pci_tbl); MODULE_VERSION(DRV_VERSION);
gpl-2.0
ubuntustudio-kernel/ubuntu-raring-lowlatency
drivers/w1/masters/ds2490.c
7806
24010
/* * dscore.c * * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/usb.h> #include <linux/slab.h> #include "../w1_int.h" #include "../w1.h" /* COMMAND TYPE CODES */ #define CONTROL_CMD 0x00 #define COMM_CMD 0x01 #define MODE_CMD 0x02 /* CONTROL COMMAND CODES */ #define CTL_RESET_DEVICE 0x0000 #define CTL_START_EXE 0x0001 #define CTL_RESUME_EXE 0x0002 #define CTL_HALT_EXE_IDLE 0x0003 #define CTL_HALT_EXE_DONE 0x0004 #define CTL_FLUSH_COMM_CMDS 0x0007 #define CTL_FLUSH_RCV_BUFFER 0x0008 #define CTL_FLUSH_XMT_BUFFER 0x0009 #define CTL_GET_COMM_CMDS 0x000A /* MODE COMMAND CODES */ #define MOD_PULSE_EN 0x0000 #define MOD_SPEED_CHANGE_EN 0x0001 #define MOD_1WIRE_SPEED 0x0002 #define MOD_STRONG_PU_DURATION 0x0003 #define MOD_PULLDOWN_SLEWRATE 0x0004 #define MOD_PROG_PULSE_DURATION 0x0005 #define MOD_WRITE1_LOWTIME 0x0006 #define MOD_DSOW0_TREC 0x0007 /* COMMUNICATION COMMAND CODES */ #define COMM_ERROR_ESCAPE 0x0601 #define COMM_SET_DURATION 0x0012 #define COMM_BIT_IO 0x0020 #define COMM_PULSE 0x0030 #define COMM_1_WIRE_RESET 0x0042 #define COMM_BYTE_IO 0x0052 #define COMM_MATCH_ACCESS 0x0064 #define COMM_BLOCK_IO 0x0074 #define COMM_READ_STRAIGHT 0x0080 #define COMM_DO_RELEASE 0x6092 #define COMM_SET_PATH 0x00A2 #define COMM_WRITE_SRAM_PAGE 0x00B2 #define COMM_WRITE_EPROM 0x00C4 #define COMM_READ_CRC_PROT_PAGE 0x00D4 #define COMM_READ_REDIRECT_PAGE_CRC 0x21E4 #define COMM_SEARCH_ACCESS 0x00F4 /* Communication command bits */ #define COMM_TYPE 0x0008 #define COMM_SE 0x0008 #define COMM_D 0x0008 #define COMM_Z 0x0008 #define COMM_CH 0x0008 #define COMM_SM 0x0008 #define COMM_R 0x0008 #define COMM_IM 0x0001 #define COMM_PS 0x4000 #define COMM_PST 0x4000 #define COMM_CIB 0x4000 #define COMM_RTS 0x4000 #define COMM_DT 0x2000 #define COMM_SPU 0x1000 #define COMM_F 0x0800 #define COMM_NTF 0x0400 #define COMM_ICP 0x0200 #define COMM_RST 0x0100 #define PULSE_PROG 0x01 #define PULSE_SPUE 0x02 #define BRANCH_MAIN 0xCC #define BRANCH_AUX 0x33 /* Status flags */ #define ST_SPUA 0x01 /* Strong Pull-up is active */ #define ST_PRGA 0x02 /* 12V programming pulse is being generated */ #define ST_12VP 0x04 /* external 12V programming voltage is present */ #define ST_PMOD 0x08 /* DS2490 powered from USB and external sources */ #define ST_HALT 0x10 /* DS2490 is currently halted */ #define ST_IDLE 0x20 /* DS2490 is currently idle */ #define ST_EPOF 0x80 /* Result Register flags */ #define RR_DETECT 0xA5 /* New device detected */ #define RR_NRS 0x01 /* Reset no presence or ... */ #define RR_SH 0x02 /* short on reset or set path */ #define RR_APP 0x04 /* alarming presence on reset */ #define RR_VPP 0x08 /* 12V expected not seen */ #define RR_CMP 0x10 /* compare error */ #define RR_CRC 0x20 /* CRC error detected */ #define RR_RDP 0x40 /* redirected page */ #define RR_EOS 0x80 /* end of search error */ #define SPEED_NORMAL 0x00 #define SPEED_FLEXIBLE 0x01 #define SPEED_OVERDRIVE 0x02 #define NUM_EP 4 #define EP_CONTROL 0 #define EP_STATUS 1 #define EP_DATA_OUT 2 #define EP_DATA_IN 3 struct ds_device { struct list_head ds_entry; struct usb_device *udev; struct usb_interface *intf; int ep[NUM_EP]; /* Strong PullUp * 0: pullup not active, else duration in milliseconds */ int spu_sleep; /* spu_bit contains COMM_SPU or 0 depending on if the strong pullup * should be active or not for writes. */ u16 spu_bit; struct w1_bus_master master; }; struct ds_status { u8 enable; u8 speed; u8 pullup_dur; u8 ppuls_dur; u8 pulldown_slew; u8 write1_time; u8 write0_time; u8 reserved0; u8 status; u8 command0; u8 command1; u8 command_buffer_status; u8 data_out_buffer_status; u8 data_in_buffer_status; u8 reserved1; u8 reserved2; }; static struct usb_device_id ds_id_table [] = { { USB_DEVICE(0x04fa, 0x2490) }, { }, }; MODULE_DEVICE_TABLE(usb, ds_id_table); static int ds_probe(struct usb_interface *, const struct usb_device_id *); static void ds_disconnect(struct usb_interface *); static int ds_send_control(struct ds_device *, u16, u16); static int ds_send_control_cmd(struct ds_device *, u16, u16); static LIST_HEAD(ds_devices); static DEFINE_MUTEX(ds_mutex); static struct usb_driver ds_driver = { .name = "DS9490R", .probe = ds_probe, .disconnect = ds_disconnect, .id_table = ds_id_table, }; static int ds_send_control_cmd(struct ds_device *dev, u16 value, u16 index) { int err; err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]), CONTROL_CMD, 0x40, value, index, NULL, 0, 1000); if (err < 0) { printk(KERN_ERR "Failed to send command control message %x.%x: err=%d.\n", value, index, err); return err; } return err; } static int ds_send_control_mode(struct ds_device *dev, u16 value, u16 index) { int err; err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]), MODE_CMD, 0x40, value, index, NULL, 0, 1000); if (err < 0) { printk(KERN_ERR "Failed to send mode control message %x.%x: err=%d.\n", value, index, err); return err; } return err; } static int ds_send_control(struct ds_device *dev, u16 value, u16 index) { int err; err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]), COMM_CMD, 0x40, value, index, NULL, 0, 1000); if (err < 0) { printk(KERN_ERR "Failed to send control message %x.%x: err=%d.\n", value, index, err); return err; } return err; } static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st, unsigned char *buf, int size) { int count, err; memset(st, 0, sizeof(*st)); count = 0; err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_STATUS]), buf, size, &count, 100); if (err < 0) { printk(KERN_ERR "Failed to read 1-wire data from 0x%x: err=%d.\n", dev->ep[EP_STATUS], err); return err; } if (count >= sizeof(*st)) memcpy(st, buf, sizeof(*st)); return count; } static inline void ds_print_msg(unsigned char *buf, unsigned char *str, int off) { printk(KERN_INFO "%45s: %8x\n", str, buf[off]); } static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count) { int i; printk(KERN_INFO "0x%x: count=%d, status: ", dev->ep[EP_STATUS], count); for (i=0; i<count; ++i) printk("%02x ", buf[i]); printk(KERN_INFO "\n"); if (count >= 16) { ds_print_msg(buf, "enable flag", 0); ds_print_msg(buf, "1-wire speed", 1); ds_print_msg(buf, "strong pullup duration", 2); ds_print_msg(buf, "programming pulse duration", 3); ds_print_msg(buf, "pulldown slew rate control", 4); ds_print_msg(buf, "write-1 low time", 5); ds_print_msg(buf, "data sample offset/write-0 recovery time", 6); ds_print_msg(buf, "reserved (test register)", 7); ds_print_msg(buf, "device status flags", 8); ds_print_msg(buf, "communication command byte 1", 9); ds_print_msg(buf, "communication command byte 2", 10); ds_print_msg(buf, "communication command buffer status", 11); ds_print_msg(buf, "1-wire data output buffer status", 12); ds_print_msg(buf, "1-wire data input buffer status", 13); ds_print_msg(buf, "reserved", 14); ds_print_msg(buf, "reserved", 15); } for (i = 16; i < count; ++i) { if (buf[i] == RR_DETECT) { ds_print_msg(buf, "new device detect", i); continue; } ds_print_msg(buf, "Result Register Value: ", i); if (buf[i] & RR_NRS) printk(KERN_INFO "NRS: Reset no presence or ...\n"); if (buf[i] & RR_SH) printk(KERN_INFO "SH: short on reset or set path\n"); if (buf[i] & RR_APP) printk(KERN_INFO "APP: alarming presence on reset\n"); if (buf[i] & RR_VPP) printk(KERN_INFO "VPP: 12V expected not seen\n"); if (buf[i] & RR_CMP) printk(KERN_INFO "CMP: compare error\n"); if (buf[i] & RR_CRC) printk(KERN_INFO "CRC: CRC error detected\n"); if (buf[i] & RR_RDP) printk(KERN_INFO "RDP: redirected page\n"); if (buf[i] & RR_EOS) printk(KERN_INFO "EOS: end of search error\n"); } } static void ds_reset_device(struct ds_device *dev) { ds_send_control_cmd(dev, CTL_RESET_DEVICE, 0); /* Always allow strong pullup which allow individual writes to use * the strong pullup. */ if (ds_send_control_mode(dev, MOD_PULSE_EN, PULSE_SPUE)) printk(KERN_ERR "ds_reset_device: " "Error allowing strong pullup\n"); /* Chip strong pullup time was cleared. */ if (dev->spu_sleep) { /* lower 4 bits are 0, see ds_set_pullup */ u8 del = dev->spu_sleep>>4; if (ds_send_control(dev, COMM_SET_DURATION | COMM_IM, del)) printk(KERN_ERR "ds_reset_device: " "Error setting duration\n"); } } static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size) { int count, err; struct ds_status st; /* Careful on size. If size is less than what is available in * the input buffer, the device fails the bulk transfer and * clears the input buffer. It could read the maximum size of * the data buffer, but then do you return the first, last, or * some set of the middle size bytes? As long as the rest of * the code is correct there will be size bytes waiting. A * call to ds_wait_status will wait until the device is idle * and any data to be received would have been available. */ count = 0; err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]), buf, size, &count, 1000); if (err < 0) { u8 buf[0x20]; int count; printk(KERN_INFO "Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]); usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN])); count = ds_recv_status_nodump(dev, &st, buf, sizeof(buf)); ds_dump_status(dev, buf, count); return err; } #if 0 { int i; printk("%s: count=%d: ", __func__, count); for (i=0; i<count; ++i) printk("%02x ", buf[i]); printk("\n"); } #endif return count; } static int ds_send_data(struct ds_device *dev, unsigned char *buf, int len) { int count, err; count = 0; err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, dev->ep[EP_DATA_OUT]), buf, len, &count, 1000); if (err < 0) { printk(KERN_ERR "Failed to write 1-wire data to ep0x%x: " "err=%d.\n", dev->ep[EP_DATA_OUT], err); return err; } return err; } #if 0 int ds_stop_pulse(struct ds_device *dev, int limit) { struct ds_status st; int count = 0, err = 0; u8 buf[0x20]; do { err = ds_send_control(dev, CTL_HALT_EXE_IDLE, 0); if (err) break; err = ds_send_control(dev, CTL_RESUME_EXE, 0); if (err) break; err = ds_recv_status_nodump(dev, &st, buf, sizeof(buf)); if (err) break; if ((st.status & ST_SPUA) == 0) { err = ds_send_control_mode(dev, MOD_PULSE_EN, 0); if (err) break; } } while(++count < limit); return err; } int ds_detect(struct ds_device *dev, struct ds_status *st) { int err; err = ds_send_control_cmd(dev, CTL_RESET_DEVICE, 0); if (err) return err; err = ds_send_control(dev, COMM_SET_DURATION | COMM_IM, 0); if (err) return err; err = ds_send_control(dev, COMM_SET_DURATION | COMM_IM | COMM_TYPE, 0x40); if (err) return err; err = ds_send_control_mode(dev, MOD_PULSE_EN, PULSE_PROG); if (err) return err; err = ds_dump_status(dev, st); return err; } #endif /* 0 */ static int ds_wait_status(struct ds_device *dev, struct ds_status *st) { u8 buf[0x20]; int err, count = 0; do { err = ds_recv_status_nodump(dev, st, buf, sizeof(buf)); #if 0 if (err >= 0) { int i; printk("0x%x: count=%d, status: ", dev->ep[EP_STATUS], err); for (i=0; i<err; ++i) printk("%02x ", buf[i]); printk("\n"); } #endif } while (!(buf[0x08] & ST_IDLE) && !(err < 0) && ++count < 100); if (err >= 16 && st->status & ST_EPOF) { printk(KERN_INFO "Resetting device after ST_EPOF.\n"); ds_reset_device(dev); /* Always dump the device status. */ count = 101; } /* Dump the status for errors or if there is extended return data. * The extended status includes new device detection (maybe someone * can do something with it). */ if (err > 16 || count >= 100 || err < 0) ds_dump_status(dev, buf, err); /* Extended data isn't an error. Well, a short is, but the dump * would have already told the user that and we can't do anything * about it in software anyway. */ if (count >= 100 || err < 0) return -1; else return 0; } static int ds_reset(struct ds_device *dev) { int err; /* Other potentionally interesting flags for reset. * * COMM_NTF: Return result register feedback. This could be used to * detect some conditions such as short, alarming presence, or * detect if a new device was detected. * * COMM_SE which allows SPEED_NORMAL, SPEED_FLEXIBLE, SPEED_OVERDRIVE: * Select the data transfer rate. */ err = ds_send_control(dev, COMM_1_WIRE_RESET | COMM_IM, SPEED_NORMAL); if (err) return err; return 0; } #if 0 static int ds_set_speed(struct ds_device *dev, int speed) { int err; if (speed != SPEED_NORMAL && speed != SPEED_FLEXIBLE && speed != SPEED_OVERDRIVE) return -EINVAL; if (speed != SPEED_OVERDRIVE) speed = SPEED_FLEXIBLE; speed &= 0xff; err = ds_send_control_mode(dev, MOD_1WIRE_SPEED, speed); if (err) return err; return err; } #endif /* 0 */ static int ds_set_pullup(struct ds_device *dev, int delay) { int err = 0; u8 del = 1 + (u8)(delay >> 4); /* Just storing delay would not get the trunication and roundup. */ int ms = del<<4; /* Enable spu_bit if a delay is set. */ dev->spu_bit = delay ? COMM_SPU : 0; /* If delay is zero, it has already been disabled, if the time is * the same as the hardware was last programmed to, there is also * nothing more to do. Compare with the recalculated value ms * rather than del or delay which can have a different value. */ if (delay == 0 || ms == dev->spu_sleep) return err; err = ds_send_control(dev, COMM_SET_DURATION | COMM_IM, del); if (err) return err; dev->spu_sleep = ms; return err; } static int ds_touch_bit(struct ds_device *dev, u8 bit, u8 *tbit) { int err; struct ds_status st; err = ds_send_control(dev, COMM_BIT_IO | COMM_IM | (bit ? COMM_D : 0), 0); if (err) return err; ds_wait_status(dev, &st); err = ds_recv_data(dev, tbit, sizeof(*tbit)); if (err < 0) return err; return 0; } #if 0 static int ds_write_bit(struct ds_device *dev, u8 bit) { int err; struct ds_status st; /* Set COMM_ICP to write without a readback. Note, this will * produce one time slot, a down followed by an up with COMM_D * only determing the timing. */ err = ds_send_control(dev, COMM_BIT_IO | COMM_IM | COMM_ICP | (bit ? COMM_D : 0), 0); if (err) return err; ds_wait_status(dev, &st); return 0; } #endif static int ds_write_byte(struct ds_device *dev, u8 byte) { int err; struct ds_status st; u8 rbyte; err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM | dev->spu_bit, byte); if (err) return err; if (dev->spu_bit) msleep(dev->spu_sleep); err = ds_wait_status(dev, &st); if (err) return err; err = ds_recv_data(dev, &rbyte, sizeof(rbyte)); if (err < 0) return err; return !(byte == rbyte); } static int ds_read_byte(struct ds_device *dev, u8 *byte) { int err; struct ds_status st; err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM , 0xff); if (err) return err; ds_wait_status(dev, &st); err = ds_recv_data(dev, byte, sizeof(*byte)); if (err < 0) return err; return 0; } static int ds_read_block(struct ds_device *dev, u8 *buf, int len) { struct ds_status st; int err; if (len > 64*1024) return -E2BIG; memset(buf, 0xFF, len); err = ds_send_data(dev, buf, len); if (err < 0) return err; err = ds_send_control(dev, COMM_BLOCK_IO | COMM_IM, len); if (err) return err; ds_wait_status(dev, &st); memset(buf, 0x00, len); err = ds_recv_data(dev, buf, len); return err; } static int ds_write_block(struct ds_device *dev, u8 *buf, int len) { int err; struct ds_status st; err = ds_send_data(dev, buf, len); if (err < 0) return err; err = ds_send_control(dev, COMM_BLOCK_IO | COMM_IM | dev->spu_bit, len); if (err) return err; if (dev->spu_bit) msleep(dev->spu_sleep); ds_wait_status(dev, &st); err = ds_recv_data(dev, buf, len); if (err < 0) return err; return !(err == len); } #if 0 static int ds_search(struct ds_device *dev, u64 init, u64 *buf, u8 id_number, int conditional_search) { int err; u16 value, index; struct ds_status st; memset(buf, 0, sizeof(buf)); err = ds_send_data(ds_dev, (unsigned char *)&init, 8); if (err) return err; ds_wait_status(ds_dev, &st); value = COMM_SEARCH_ACCESS | COMM_IM | COMM_SM | COMM_F | COMM_RTS; index = (conditional_search ? 0xEC : 0xF0) | (id_number << 8); err = ds_send_control(ds_dev, value, index); if (err) return err; ds_wait_status(ds_dev, &st); err = ds_recv_data(ds_dev, (unsigned char *)buf, 8*id_number); if (err < 0) return err; return err/8; } static int ds_match_access(struct ds_device *dev, u64 init) { int err; struct ds_status st; err = ds_send_data(dev, (unsigned char *)&init, sizeof(init)); if (err) return err; ds_wait_status(dev, &st); err = ds_send_control(dev, COMM_MATCH_ACCESS | COMM_IM | COMM_RST, 0x0055); if (err) return err; ds_wait_status(dev, &st); return 0; } static int ds_set_path(struct ds_device *dev, u64 init) { int err; struct ds_status st; u8 buf[9]; memcpy(buf, &init, 8); buf[8] = BRANCH_MAIN; err = ds_send_data(dev, buf, sizeof(buf)); if (err) return err; ds_wait_status(dev, &st); err = ds_send_control(dev, COMM_SET_PATH | COMM_IM | COMM_RST, 0); if (err) return err; ds_wait_status(dev, &st); return 0; } #endif /* 0 */ static u8 ds9490r_touch_bit(void *data, u8 bit) { u8 ret; struct ds_device *dev = data; if (ds_touch_bit(dev, bit, &ret)) return 0; return ret; } #if 0 static void ds9490r_write_bit(void *data, u8 bit) { struct ds_device *dev = data; ds_write_bit(dev, bit); } static u8 ds9490r_read_bit(void *data) { struct ds_device *dev = data; int err; u8 bit = 0; err = ds_touch_bit(dev, 1, &bit); if (err) return 0; return bit & 1; } #endif static void ds9490r_write_byte(void *data, u8 byte) { struct ds_device *dev = data; ds_write_byte(dev, byte); } static u8 ds9490r_read_byte(void *data) { struct ds_device *dev = data; int err; u8 byte = 0; err = ds_read_byte(dev, &byte); if (err) return 0; return byte; } static void ds9490r_write_block(void *data, const u8 *buf, int len) { struct ds_device *dev = data; ds_write_block(dev, (u8 *)buf, len); } static u8 ds9490r_read_block(void *data, u8 *buf, int len) { struct ds_device *dev = data; int err; err = ds_read_block(dev, buf, len); if (err < 0) return 0; return len; } static u8 ds9490r_reset(void *data) { struct ds_device *dev = data; int err; err = ds_reset(dev); if (err) return 1; return 0; } static u8 ds9490r_set_pullup(void *data, int delay) { struct ds_device *dev = data; if (ds_set_pullup(dev, delay)) return 1; return 0; } static int ds_w1_init(struct ds_device *dev) { memset(&dev->master, 0, sizeof(struct w1_bus_master)); /* Reset the device as it can be in a bad state. * This is necessary because a block write will wait for data * to be placed in the output buffer and block any later * commands which will keep accumulating and the device will * not be idle. Another case is removing the ds2490 module * while a bus search is in progress, somehow a few commands * get through, but the input transfers fail leaving data in * the input buffer. This will cause the next read to fail * see the note in ds_recv_data. */ ds_reset_device(dev); dev->master.data = dev; dev->master.touch_bit = &ds9490r_touch_bit; /* read_bit and write_bit in w1_bus_master are expected to set and * sample the line level. For write_bit that means it is expected to * set it to that value and leave it there. ds2490 only supports an * individual time slot at the lowest level. The requirement from * pulling the bus state down to reading the state is 15us, something * that isn't realistic on the USB bus anyway. dev->master.read_bit = &ds9490r_read_bit; dev->master.write_bit = &ds9490r_write_bit; */ dev->master.read_byte = &ds9490r_read_byte; dev->master.write_byte = &ds9490r_write_byte; dev->master.read_block = &ds9490r_read_block; dev->master.write_block = &ds9490r_write_block; dev->master.reset_bus = &ds9490r_reset; dev->master.set_pullup = &ds9490r_set_pullup; return w1_add_master_device(&dev->master); } static void ds_w1_fini(struct ds_device *dev) { w1_remove_master_device(&dev->master); } static int ds_probe(struct usb_interface *intf, const struct usb_device_id *udev_id) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_endpoint_descriptor *endpoint; struct usb_host_interface *iface_desc; struct ds_device *dev; int i, err; dev = kmalloc(sizeof(struct ds_device), GFP_KERNEL); if (!dev) { printk(KERN_INFO "Failed to allocate new DS9490R structure.\n"); return -ENOMEM; } dev->spu_sleep = 0; dev->spu_bit = 0; dev->udev = usb_get_dev(udev); if (!dev->udev) { err = -ENOMEM; goto err_out_free; } memset(dev->ep, 0, sizeof(dev->ep)); usb_set_intfdata(intf, dev); err = usb_set_interface(dev->udev, intf->altsetting[0].desc.bInterfaceNumber, 3); if (err) { printk(KERN_ERR "Failed to set alternative setting 3 for %d interface: err=%d.\n", intf->altsetting[0].desc.bInterfaceNumber, err); goto err_out_clear; } err = usb_reset_configuration(dev->udev); if (err) { printk(KERN_ERR "Failed to reset configuration: err=%d.\n", err); goto err_out_clear; } iface_desc = &intf->altsetting[0]; if (iface_desc->desc.bNumEndpoints != NUM_EP-1) { printk(KERN_INFO "Num endpoints=%d. It is not DS9490R.\n", iface_desc->desc.bNumEndpoints); err = -EINVAL; goto err_out_clear; } /* * This loop doesn'd show control 0 endpoint, * so we will fill only 1-3 endpoints entry. */ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; dev->ep[i+1] = endpoint->bEndpointAddress; #if 0 printk("%d: addr=%x, size=%d, dir=%s, type=%x\n", i, endpoint->bEndpointAddress, le16_to_cpu(endpoint->wMaxPacketSize), (endpoint->bEndpointAddress & USB_DIR_IN)?"IN":"OUT", endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK); #endif } err = ds_w1_init(dev); if (err) goto err_out_clear; mutex_lock(&ds_mutex); list_add_tail(&dev->ds_entry, &ds_devices); mutex_unlock(&ds_mutex); return 0; err_out_clear: usb_set_intfdata(intf, NULL); usb_put_dev(dev->udev); err_out_free: kfree(dev); return err; } static void ds_disconnect(struct usb_interface *intf) { struct ds_device *dev; dev = usb_get_intfdata(intf); if (!dev) return; mutex_lock(&ds_mutex); list_del(&dev->ds_entry); mutex_unlock(&ds_mutex); ds_w1_fini(dev); usb_set_intfdata(intf, NULL); usb_put_dev(dev->udev); kfree(dev); } module_usb_driver(ds_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)");
gpl-2.0
DragunKorr/dragun-android_kernel_htc_pyramid
drivers/message/fusion/mptfc.c
8318
42837
/* * linux/drivers/message/fusion/mptfc.c * For use with LSI PCI chip/adapter(s) * running LSI Fusion MPT (Message Passing Technology) firmware. * * Copyright (c) 1999-2008 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. NO WARRANTY THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. DISCLAIMER OF LIABILITY NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/kdev_t.h> #include <linux/blkdev.h> #include <linux/delay.h> /* for mdelay */ #include <linux/interrupt.h> /* needed for in_interrupt() proto */ #include <linux/reboot.h> /* notifier code */ #include <linux/workqueue.h> #include <linux/sort.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_fc.h> #include "mptbase.h" #include "mptscsih.h" /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #define my_NAME "Fusion MPT FC Host driver" #define my_VERSION MPT_LINUX_VERSION_COMMON #define MYNAM "mptfc" MODULE_AUTHOR(MODULEAUTHOR); MODULE_DESCRIPTION(my_NAME); MODULE_LICENSE("GPL"); MODULE_VERSION(my_VERSION); /* Command line args */ #define MPTFC_DEV_LOSS_TMO (60) static int mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; /* reasonable default */ module_param(mptfc_dev_loss_tmo, int, 0); MODULE_PARM_DESC(mptfc_dev_loss_tmo, " Initial time the driver programs the " " transport to wait for an rport to " " return following a device loss event." " Default=60."); /* scsi-mid layer global parmeter is max_report_luns, which is 511 */ #define MPTFC_MAX_LUN (16895) static int max_lun = MPTFC_MAX_LUN; module_param(max_lun, int, 0); MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); static u8 mptfcDoneCtx = MPT_MAX_PROTOCOL_DRIVERS; static u8 mptfcTaskCtx = MPT_MAX_PROTOCOL_DRIVERS; static u8 mptfcInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; static int mptfc_target_alloc(struct scsi_target *starget); static int mptfc_slave_alloc(struct scsi_device *sdev); static int mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt); static void mptfc_target_destroy(struct scsi_target *starget); static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout); static void __devexit mptfc_remove(struct pci_dev *pdev); static int mptfc_abort(struct scsi_cmnd *SCpnt); static int mptfc_dev_reset(struct scsi_cmnd *SCpnt); static int mptfc_bus_reset(struct scsi_cmnd *SCpnt); static int mptfc_host_reset(struct scsi_cmnd *SCpnt); static struct scsi_host_template mptfc_driver_template = { .module = THIS_MODULE, .proc_name = "mptfc", .proc_info = mptscsih_proc_info, .name = "MPT FC Host", .info = mptscsih_info, .queuecommand = mptfc_qcmd, .target_alloc = mptfc_target_alloc, .slave_alloc = mptfc_slave_alloc, .slave_configure = mptscsih_slave_configure, .target_destroy = mptfc_target_destroy, .slave_destroy = mptscsih_slave_destroy, .change_queue_depth = mptscsih_change_queue_depth, .eh_abort_handler = mptfc_abort, .eh_device_reset_handler = mptfc_dev_reset, .eh_bus_reset_handler = mptfc_bus_reset, .eh_host_reset_handler = mptfc_host_reset, .bios_param = mptscsih_bios_param, .can_queue = MPT_FC_CAN_QUEUE, .this_id = -1, .sg_tablesize = MPT_SCSI_SG_DEPTH, .max_sectors = 8192, .cmd_per_lun = 7, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = mptscsih_host_attrs, }; /**************************************************************************** * Supported hardware */ static struct pci_device_id mptfc_pci_table[] = { { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC909, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC919, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC929, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC919X, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC929X, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC939X, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC949X, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC949E, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_BROCADE, MPI_MANUFACTPAGE_DEVICEID_FC949E, PCI_ANY_ID, PCI_ANY_ID }, {0} /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, mptfc_pci_table); static struct scsi_transport_template *mptfc_transport_template = NULL; static struct fc_function_template mptfc_transport_functions = { .dd_fcrport_size = 8, .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_port_id = 1, .show_rport_supported_classes = 1, .show_starget_node_name = 1, .show_starget_port_name = 1, .show_starget_port_id = 1, .set_rport_dev_loss_tmo = mptfc_set_rport_loss_tmo, .show_rport_dev_loss_tmo = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, .show_host_speed = 1, .show_host_fabric_name = 1, .show_host_port_type = 1, .show_host_port_state = 1, .show_host_symbolic_name = 1, }; static int mptfc_block_error_handler(struct scsi_cmnd *SCpnt, int (*func)(struct scsi_cmnd *SCpnt), const char *caller) { MPT_SCSI_HOST *hd; struct scsi_device *sdev = SCpnt->device; struct Scsi_Host *shost = sdev->host; struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); unsigned long flags; int ready; MPT_ADAPTER *ioc; int loops = 40; /* seconds */ hd = shost_priv(SCpnt->device->host); ioc = hd->ioc; spin_lock_irqsave(shost->host_lock, flags); while ((ready = fc_remote_port_chkready(rport) >> 16) == DID_IMM_RETRY || (loops > 0 && ioc->active == 0)) { spin_unlock_irqrestore(shost->host_lock, flags); dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT "mptfc_block_error_handler.%d: %d:%d, port status is " "%x, active flag %d, deferring %s recovery.\n", ioc->name, ioc->sh->host_no, SCpnt->device->id, SCpnt->device->lun, ready, ioc->active, caller)); msleep(1000); spin_lock_irqsave(shost->host_lock, flags); loops --; } spin_unlock_irqrestore(shost->host_lock, flags); if (ready == DID_NO_CONNECT || !SCpnt->device->hostdata || ioc->active == 0) { dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT "%s.%d: %d:%d, failing recovery, " "port state %x, active %d, vdevice %p.\n", caller, ioc->name, ioc->sh->host_no, SCpnt->device->id, SCpnt->device->lun, ready, ioc->active, SCpnt->device->hostdata)); return FAILED; } dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT "%s.%d: %d:%d, executing recovery.\n", caller, ioc->name, ioc->sh->host_no, SCpnt->device->id, SCpnt->device->lun)); return (*func)(SCpnt); } static int mptfc_abort(struct scsi_cmnd *SCpnt) { return mptfc_block_error_handler(SCpnt, mptscsih_abort, __func__); } static int mptfc_dev_reset(struct scsi_cmnd *SCpnt) { return mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __func__); } static int mptfc_bus_reset(struct scsi_cmnd *SCpnt) { return mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __func__); } static int mptfc_host_reset(struct scsi_cmnd *SCpnt) { return mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __func__); } static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) { if (timeout > 0) rport->dev_loss_tmo = timeout; else rport->dev_loss_tmo = mptfc_dev_loss_tmo; } static int mptfc_FcDevPage0_cmp_func(const void *a, const void *b) { FCDevicePage0_t **aa = (FCDevicePage0_t **)a; FCDevicePage0_t **bb = (FCDevicePage0_t **)b; if ((*aa)->CurrentBus == (*bb)->CurrentBus) { if ((*aa)->CurrentTargetID == (*bb)->CurrentTargetID) return 0; if ((*aa)->CurrentTargetID < (*bb)->CurrentTargetID) return -1; return 1; } if ((*aa)->CurrentBus < (*bb)->CurrentBus) return -1; return 1; } static int mptfc_GetFcDevPage0(MPT_ADAPTER *ioc, int ioc_port, void(*func)(MPT_ADAPTER *ioc,int channel, FCDevicePage0_t *arg)) { ConfigPageHeader_t hdr; CONFIGPARMS cfg; FCDevicePage0_t *ppage0_alloc, *fc; dma_addr_t page0_dma; int data_sz; int ii; FCDevicePage0_t *p0_array=NULL, *p_p0; FCDevicePage0_t **pp0_array=NULL, **p_pp0; int rc = -ENOMEM; U32 port_id = 0xffffff; int num_targ = 0; int max_bus = ioc->facts.MaxBuses; int max_targ; max_targ = (ioc->facts.MaxDevices == 0) ? 256 : ioc->facts.MaxDevices; data_sz = sizeof(FCDevicePage0_t) * max_bus * max_targ; p_p0 = p0_array = kzalloc(data_sz, GFP_KERNEL); if (!p0_array) goto out; data_sz = sizeof(FCDevicePage0_t *) * max_bus * max_targ; p_pp0 = pp0_array = kzalloc(data_sz, GFP_KERNEL); if (!pp0_array) goto out; do { /* Get FC Device Page 0 header */ hdr.PageVersion = 0; hdr.PageLength = 0; hdr.PageNumber = 0; hdr.PageType = MPI_CONFIG_PAGETYPE_FC_DEVICE; cfg.cfghdr.hdr = &hdr; cfg.physAddr = -1; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; cfg.pageAddr = port_id; cfg.timeout = 0; if ((rc = mpt_config(ioc, &cfg)) != 0) break; if (hdr.PageLength <= 0) break; data_sz = hdr.PageLength * 4; ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma); rc = -ENOMEM; if (!ppage0_alloc) break; cfg.physAddr = page0_dma; cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; if ((rc = mpt_config(ioc, &cfg)) == 0) { ppage0_alloc->PortIdentifier = le32_to_cpu(ppage0_alloc->PortIdentifier); ppage0_alloc->WWNN.Low = le32_to_cpu(ppage0_alloc->WWNN.Low); ppage0_alloc->WWNN.High = le32_to_cpu(ppage0_alloc->WWNN.High); ppage0_alloc->WWPN.Low = le32_to_cpu(ppage0_alloc->WWPN.Low); ppage0_alloc->WWPN.High = le32_to_cpu(ppage0_alloc->WWPN.High); ppage0_alloc->BBCredit = le16_to_cpu(ppage0_alloc->BBCredit); ppage0_alloc->MaxRxFrameSize = le16_to_cpu(ppage0_alloc->MaxRxFrameSize); port_id = ppage0_alloc->PortIdentifier; num_targ++; *p_p0 = *ppage0_alloc; /* save data */ *p_pp0++ = p_p0++; /* save addr */ } pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma); if (rc != 0) break; } while (port_id <= 0xff0000); if (num_targ) { /* sort array */ if (num_targ > 1) sort (pp0_array, num_targ, sizeof(FCDevicePage0_t *), mptfc_FcDevPage0_cmp_func, NULL); /* call caller's func for each targ */ for (ii = 0; ii < num_targ; ii++) { fc = *(pp0_array+ii); func(ioc, ioc_port, fc); } } out: kfree(pp0_array); kfree(p0_array); return rc; } static int mptfc_generate_rport_ids(FCDevicePage0_t *pg0, struct fc_rport_identifiers *rid) { /* not currently usable */ if (pg0->Flags & (MPI_FC_DEVICE_PAGE0_FLAGS_PLOGI_INVALID | MPI_FC_DEVICE_PAGE0_FLAGS_PRLI_INVALID)) return -1; if (!(pg0->Flags & MPI_FC_DEVICE_PAGE0_FLAGS_TARGETID_BUS_VALID)) return -1; if (!(pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_TARGET)) return -1; /* * board data structure already normalized to platform endianness * shifted to avoid unaligned access on 64 bit architecture */ rid->node_name = ((u64)pg0->WWNN.High) << 32 | (u64)pg0->WWNN.Low; rid->port_name = ((u64)pg0->WWPN.High) << 32 | (u64)pg0->WWPN.Low; rid->port_id = pg0->PortIdentifier; rid->roles = FC_RPORT_ROLE_UNKNOWN; return 0; } static void mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0) { struct fc_rport_identifiers rport_ids; struct fc_rport *rport; struct mptfc_rport_info *ri; int new_ri = 1; u64 pn, nn; VirtTarget *vtarget; u32 roles = FC_RPORT_ROLE_UNKNOWN; if (mptfc_generate_rport_ids(pg0, &rport_ids) < 0) return; roles |= FC_RPORT_ROLE_FCP_TARGET; if (pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR) roles |= FC_RPORT_ROLE_FCP_INITIATOR; /* scan list looking for a match */ list_for_each_entry(ri, &ioc->fc_rports, list) { pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; if (pn == rport_ids.port_name) { /* match */ list_move_tail(&ri->list, &ioc->fc_rports); new_ri = 0; break; } } if (new_ri) { /* allocate one */ ri = kzalloc(sizeof(struct mptfc_rport_info), GFP_KERNEL); if (!ri) return; list_add_tail(&ri->list, &ioc->fc_rports); } ri->pg0 = *pg0; /* add/update pg0 data */ ri->flags &= ~MPT_RPORT_INFO_FLAGS_MISSING; /* MPT_RPORT_INFO_FLAGS_REGISTERED - rport not previously deleted */ if (!(ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED)) { ri->flags |= MPT_RPORT_INFO_FLAGS_REGISTERED; rport = fc_remote_port_add(ioc->sh, channel, &rport_ids); if (rport) { ri->rport = rport; if (new_ri) /* may have been reset by user */ rport->dev_loss_tmo = mptfc_dev_loss_tmo; /* * if already mapped, remap here. If not mapped, * target_alloc will allocate vtarget and map, * slave_alloc will fill in vdevice from vtarget. */ if (ri->starget) { vtarget = ri->starget->hostdata; if (vtarget) { vtarget->id = pg0->CurrentTargetID; vtarget->channel = pg0->CurrentBus; vtarget->deleted = 0; } } *((struct mptfc_rport_info **)rport->dd_data) = ri; /* scan will be scheduled once rport becomes a target */ fc_remote_port_rolechg(rport,roles); pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low; dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT "mptfc_reg_dev.%d: %x, %llx / %llx, tid %d, " "rport tid %d, tmo %d\n", ioc->name, ioc->sh->host_no, pg0->PortIdentifier, (unsigned long long)nn, (unsigned long long)pn, pg0->CurrentTargetID, ri->rport->scsi_target_id, ri->rport->dev_loss_tmo)); } else { list_del(&ri->list); kfree(ri); ri = NULL; } } } /* * OS entry point to allow for host driver to free allocated memory * Called if no device present or device being unloaded */ static void mptfc_target_destroy(struct scsi_target *starget) { struct fc_rport *rport; struct mptfc_rport_info *ri; rport = starget_to_rport(starget); if (rport) { ri = *((struct mptfc_rport_info **)rport->dd_data); if (ri) /* better be! */ ri->starget = NULL; } if (starget->hostdata) kfree(starget->hostdata); starget->hostdata = NULL; } /* * OS entry point to allow host driver to alloc memory * for each scsi target. Called once per device the bus scan. * Return non-zero if allocation fails. */ static int mptfc_target_alloc(struct scsi_target *starget) { VirtTarget *vtarget; struct fc_rport *rport; struct mptfc_rport_info *ri; int rc; vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL); if (!vtarget) return -ENOMEM; starget->hostdata = vtarget; rc = -ENODEV; rport = starget_to_rport(starget); if (rport) { ri = *((struct mptfc_rport_info **)rport->dd_data); if (ri) { /* better be! */ vtarget->id = ri->pg0.CurrentTargetID; vtarget->channel = ri->pg0.CurrentBus; ri->starget = starget; rc = 0; } } if (rc != 0) { kfree(vtarget); starget->hostdata = NULL; } return rc; } /* * mptfc_dump_lun_info * @ioc * @rport * @sdev * */ static void mptfc_dump_lun_info(MPT_ADAPTER *ioc, struct fc_rport *rport, struct scsi_device *sdev, VirtTarget *vtarget) { u64 nn, pn; struct mptfc_rport_info *ri; ri = *((struct mptfc_rport_info **)rport->dd_data); pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low; dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT "mptfc_slv_alloc.%d: num_luns %d, sdev.id %d, " "CurrentTargetID %d, %x %llx %llx\n", ioc->name, sdev->host->host_no, vtarget->num_luns, sdev->id, ri->pg0.CurrentTargetID, ri->pg0.PortIdentifier, (unsigned long long)pn, (unsigned long long)nn)); } /* * OS entry point to allow host driver to alloc memory * for each scsi device. Called once per device the bus scan. * Return non-zero if allocation fails. * Init memory once per LUN. */ static int mptfc_slave_alloc(struct scsi_device *sdev) { MPT_SCSI_HOST *hd; VirtTarget *vtarget; VirtDevice *vdevice; struct scsi_target *starget; struct fc_rport *rport; MPT_ADAPTER *ioc; starget = scsi_target(sdev); rport = starget_to_rport(starget); if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; hd = shost_priv(sdev->host); ioc = hd->ioc; vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL); if (!vdevice) { printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n", ioc->name, sizeof(VirtDevice)); return -ENOMEM; } sdev->hostdata = vdevice; vtarget = starget->hostdata; if (vtarget->num_luns == 0) { vtarget->ioc_id = ioc->id; vtarget->tflags = MPT_TARGET_FLAGS_Q_YES; } vdevice->vtarget = vtarget; vdevice->lun = sdev->lun; vtarget->num_luns++; mptfc_dump_lun_info(ioc, rport, sdev, vtarget); return 0; } static int mptfc_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { struct mptfc_rport_info *ri; struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device)); int err; VirtDevice *vdevice = SCpnt->device->hostdata; if (!vdevice || !vdevice->vtarget) { SCpnt->result = DID_NO_CONNECT << 16; done(SCpnt); return 0; } err = fc_remote_port_chkready(rport); if (unlikely(err)) { SCpnt->result = err; done(SCpnt); return 0; } /* dd_data is null until finished adding target */ ri = *((struct mptfc_rport_info **)rport->dd_data); if (unlikely(!ri)) { SCpnt->result = DID_IMM_RETRY << 16; done(SCpnt); return 0; } return mptscsih_qcmd(SCpnt,done); } static DEF_SCSI_QCMD(mptfc_qcmd) /* * mptfc_display_port_link_speed - displaying link speed * @ioc: Pointer to MPT_ADAPTER structure * @portnum: IOC Port number * @pp0dest: port page0 data payload * */ static void mptfc_display_port_link_speed(MPT_ADAPTER *ioc, int portnum, FCPortPage0_t *pp0dest) { u8 old_speed, new_speed, state; char *old, *new; if (portnum >= 2) return; old_speed = ioc->fc_link_speed[portnum]; new_speed = pp0dest->CurrentSpeed; state = pp0dest->PortState; if (state != MPI_FCPORTPAGE0_PORTSTATE_OFFLINE && new_speed != MPI_FCPORTPAGE0_CURRENT_SPEED_UKNOWN) { old = old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT ? "1 Gbps" : old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT ? "2 Gbps" : old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT ? "4 Gbps" : "Unknown"; new = new_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT ? "1 Gbps" : new_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT ? "2 Gbps" : new_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT ? "4 Gbps" : "Unknown"; if (old_speed == 0) printk(MYIOC_s_NOTE_FMT "FC Link Established, Speed = %s\n", ioc->name, new); else if (old_speed != new_speed) printk(MYIOC_s_WARN_FMT "FC Link Speed Change, Old Speed = %s, New Speed = %s\n", ioc->name, old, new); ioc->fc_link_speed[portnum] = new_speed; } } /* * mptfc_GetFcPortPage0 - Fetch FCPort config Page0. * @ioc: Pointer to MPT_ADAPTER structure * @portnum: IOC Port number * * Return: 0 for success * -ENOMEM if no memory available * -EPERM if not allowed due to ISR context * -EAGAIN if no msg frames currently available * -EFAULT for non-successful reply or no reply (timeout) * -EINVAL portnum arg out of range (hardwired to two elements) */ static int mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum) { ConfigPageHeader_t hdr; CONFIGPARMS cfg; FCPortPage0_t *ppage0_alloc; FCPortPage0_t *pp0dest; dma_addr_t page0_dma; int data_sz; int copy_sz; int rc; int count = 400; if (portnum > 1) return -EINVAL; /* Get FCPort Page 0 header */ hdr.PageVersion = 0; hdr.PageLength = 0; hdr.PageNumber = 0; hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT; cfg.cfghdr.hdr = &hdr; cfg.physAddr = -1; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; cfg.pageAddr = portnum; cfg.timeout = 0; if ((rc = mpt_config(ioc, &cfg)) != 0) return rc; if (hdr.PageLength == 0) return 0; data_sz = hdr.PageLength * 4; rc = -ENOMEM; ppage0_alloc = (FCPortPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma); if (ppage0_alloc) { try_again: memset((u8 *)ppage0_alloc, 0, data_sz); cfg.physAddr = page0_dma; cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; if ((rc = mpt_config(ioc, &cfg)) == 0) { /* save the data */ pp0dest = &ioc->fc_port_page0[portnum]; copy_sz = min_t(int, sizeof(FCPortPage0_t), data_sz); memcpy(pp0dest, ppage0_alloc, copy_sz); /* * Normalize endianness of structure data, * by byte-swapping all > 1 byte fields! */ pp0dest->Flags = le32_to_cpu(pp0dest->Flags); pp0dest->PortIdentifier = le32_to_cpu(pp0dest->PortIdentifier); pp0dest->WWNN.Low = le32_to_cpu(pp0dest->WWNN.Low); pp0dest->WWNN.High = le32_to_cpu(pp0dest->WWNN.High); pp0dest->WWPN.Low = le32_to_cpu(pp0dest->WWPN.Low); pp0dest->WWPN.High = le32_to_cpu(pp0dest->WWPN.High); pp0dest->SupportedServiceClass = le32_to_cpu(pp0dest->SupportedServiceClass); pp0dest->SupportedSpeeds = le32_to_cpu(pp0dest->SupportedSpeeds); pp0dest->CurrentSpeed = le32_to_cpu(pp0dest->CurrentSpeed); pp0dest->MaxFrameSize = le32_to_cpu(pp0dest->MaxFrameSize); pp0dest->FabricWWNN.Low = le32_to_cpu(pp0dest->FabricWWNN.Low); pp0dest->FabricWWNN.High = le32_to_cpu(pp0dest->FabricWWNN.High); pp0dest->FabricWWPN.Low = le32_to_cpu(pp0dest->FabricWWPN.Low); pp0dest->FabricWWPN.High = le32_to_cpu(pp0dest->FabricWWPN.High); pp0dest->DiscoveredPortsCount = le32_to_cpu(pp0dest->DiscoveredPortsCount); pp0dest->MaxInitiators = le32_to_cpu(pp0dest->MaxInitiators); /* * if still doing discovery, * hang loose a while until finished */ if ((pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) || (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE && (pp0dest->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) == MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT)) { if (count-- > 0) { msleep(100); goto try_again; } printk(MYIOC_s_INFO_FMT "Firmware discovery not" " complete.\n", ioc->name); } mptfc_display_port_link_speed(ioc, portnum, pp0dest); } pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma); } return rc; } static int mptfc_WriteFcPortPage1(MPT_ADAPTER *ioc, int portnum) { ConfigPageHeader_t hdr; CONFIGPARMS cfg; int rc; if (portnum > 1) return -EINVAL; if (!(ioc->fc_data.fc_port_page1[portnum].data)) return -EINVAL; /* get fcport page 1 header */ hdr.PageVersion = 0; hdr.PageLength = 0; hdr.PageNumber = 1; hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT; cfg.cfghdr.hdr = &hdr; cfg.physAddr = -1; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; cfg.pageAddr = portnum; cfg.timeout = 0; if ((rc = mpt_config(ioc, &cfg)) != 0) return rc; if (hdr.PageLength == 0) return -ENODEV; if (hdr.PageLength*4 != ioc->fc_data.fc_port_page1[portnum].pg_sz) return -EINVAL; cfg.physAddr = ioc->fc_data.fc_port_page1[portnum].dma; cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; cfg.dir = 1; rc = mpt_config(ioc, &cfg); return rc; } static int mptfc_GetFcPortPage1(MPT_ADAPTER *ioc, int portnum) { ConfigPageHeader_t hdr; CONFIGPARMS cfg; FCPortPage1_t *page1_alloc; dma_addr_t page1_dma; int data_sz; int rc; if (portnum > 1) return -EINVAL; /* get fcport page 1 header */ hdr.PageVersion = 0; hdr.PageLength = 0; hdr.PageNumber = 1; hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT; cfg.cfghdr.hdr = &hdr; cfg.physAddr = -1; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; cfg.pageAddr = portnum; cfg.timeout = 0; if ((rc = mpt_config(ioc, &cfg)) != 0) return rc; if (hdr.PageLength == 0) return -ENODEV; start_over: if (ioc->fc_data.fc_port_page1[portnum].data == NULL) { data_sz = hdr.PageLength * 4; if (data_sz < sizeof(FCPortPage1_t)) data_sz = sizeof(FCPortPage1_t); page1_alloc = (FCPortPage1_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma); if (!page1_alloc) return -ENOMEM; } else { page1_alloc = ioc->fc_data.fc_port_page1[portnum].data; page1_dma = ioc->fc_data.fc_port_page1[portnum].dma; data_sz = ioc->fc_data.fc_port_page1[portnum].pg_sz; if (hdr.PageLength * 4 > data_sz) { ioc->fc_data.fc_port_page1[portnum].data = NULL; pci_free_consistent(ioc->pcidev, data_sz, (u8 *) page1_alloc, page1_dma); goto start_over; } } memset(page1_alloc,0,data_sz); cfg.physAddr = page1_dma; cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; if ((rc = mpt_config(ioc, &cfg)) == 0) { ioc->fc_data.fc_port_page1[portnum].data = page1_alloc; ioc->fc_data.fc_port_page1[portnum].pg_sz = data_sz; ioc->fc_data.fc_port_page1[portnum].dma = page1_dma; } else { ioc->fc_data.fc_port_page1[portnum].data = NULL; pci_free_consistent(ioc->pcidev, data_sz, (u8 *) page1_alloc, page1_dma); } return rc; } static void mptfc_SetFcPortPage1_defaults(MPT_ADAPTER *ioc) { int ii; FCPortPage1_t *pp1; #define MPTFC_FW_DEVICE_TIMEOUT (1) #define MPTFC_FW_IO_PEND_TIMEOUT (1) #define ON_FLAGS (MPI_FCPORTPAGE1_FLAGS_IMMEDIATE_ERROR_REPLY) #define OFF_FLAGS (MPI_FCPORTPAGE1_FLAGS_VERBOSE_RESCAN_EVENTS) for (ii=0; ii<ioc->facts.NumberOfPorts; ii++) { if (mptfc_GetFcPortPage1(ioc, ii) != 0) continue; pp1 = ioc->fc_data.fc_port_page1[ii].data; if ((pp1->InitiatorDeviceTimeout == MPTFC_FW_DEVICE_TIMEOUT) && (pp1->InitiatorIoPendTimeout == MPTFC_FW_IO_PEND_TIMEOUT) && ((pp1->Flags & ON_FLAGS) == ON_FLAGS) && ((pp1->Flags & OFF_FLAGS) == 0)) continue; pp1->InitiatorDeviceTimeout = MPTFC_FW_DEVICE_TIMEOUT; pp1->InitiatorIoPendTimeout = MPTFC_FW_IO_PEND_TIMEOUT; pp1->Flags &= ~OFF_FLAGS; pp1->Flags |= ON_FLAGS; mptfc_WriteFcPortPage1(ioc, ii); } } static void mptfc_init_host_attr(MPT_ADAPTER *ioc,int portnum) { unsigned class = 0; unsigned cos = 0; unsigned speed; unsigned port_type; unsigned port_state; FCPortPage0_t *pp0; struct Scsi_Host *sh; char *sn; /* don't know what to do as only one scsi (fc) host was allocated */ if (portnum != 0) return; pp0 = &ioc->fc_port_page0[portnum]; sh = ioc->sh; sn = fc_host_symbolic_name(sh); snprintf(sn, FC_SYMBOLIC_NAME_SIZE, "%s %s%08xh", ioc->prod_name, MPT_FW_REV_MAGIC_ID_STRING, ioc->facts.FWVersion.Word); fc_host_tgtid_bind_type(sh) = FC_TGTID_BIND_BY_WWPN; fc_host_maxframe_size(sh) = pp0->MaxFrameSize; fc_host_node_name(sh) = (u64)pp0->WWNN.High << 32 | (u64)pp0->WWNN.Low; fc_host_port_name(sh) = (u64)pp0->WWPN.High << 32 | (u64)pp0->WWPN.Low; fc_host_port_id(sh) = pp0->PortIdentifier; class = pp0->SupportedServiceClass; if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_1) cos |= FC_COS_CLASS1; if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_2) cos |= FC_COS_CLASS2; if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_3) cos |= FC_COS_CLASS3; fc_host_supported_classes(sh) = cos; if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT) speed = FC_PORTSPEED_1GBIT; else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT) speed = FC_PORTSPEED_2GBIT; else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT) speed = FC_PORTSPEED_4GBIT; else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT) speed = FC_PORTSPEED_10GBIT; else speed = FC_PORTSPEED_UNKNOWN; fc_host_speed(sh) = speed; speed = 0; if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED) speed |= FC_PORTSPEED_1GBIT; if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED) speed |= FC_PORTSPEED_2GBIT; if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED) speed |= FC_PORTSPEED_4GBIT; if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED) speed |= FC_PORTSPEED_10GBIT; fc_host_supported_speeds(sh) = speed; port_state = FC_PORTSTATE_UNKNOWN; if (pp0->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE) port_state = FC_PORTSTATE_ONLINE; else if (pp0->PortState == MPI_FCPORTPAGE0_PORTSTATE_OFFLINE) port_state = FC_PORTSTATE_LINKDOWN; fc_host_port_state(sh) = port_state; port_type = FC_PORTTYPE_UNKNOWN; if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT) port_type = FC_PORTTYPE_PTP; else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP) port_type = FC_PORTTYPE_LPORT; else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP) port_type = FC_PORTTYPE_NLPORT; else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT) port_type = FC_PORTTYPE_NPORT; fc_host_port_type(sh) = port_type; fc_host_fabric_name(sh) = (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_FABRIC_WWN_VALID) ? (u64) pp0->FabricWWNN.High << 32 | (u64) pp0->FabricWWPN.Low : (u64)pp0->WWNN.High << 32 | (u64)pp0->WWNN.Low; } static void mptfc_link_status_change(struct work_struct *work) { MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, fc_rescan_work); int ii; for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) (void) mptfc_GetFcPortPage0(ioc, ii); } static void mptfc_setup_reset(struct work_struct *work) { MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, fc_setup_reset_work); u64 pn; struct mptfc_rport_info *ri; struct scsi_target *starget; VirtTarget *vtarget; /* reset about to happen, delete (block) all rports */ list_for_each_entry(ri, &ioc->fc_rports, list) { if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { ri->flags &= ~MPT_RPORT_INFO_FLAGS_REGISTERED; fc_remote_port_delete(ri->rport); /* won't sleep */ ri->rport = NULL; starget = ri->starget; if (starget) { vtarget = starget->hostdata; if (vtarget) vtarget->deleted = 1; } pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT "mptfc_setup_reset.%d: %llx deleted\n", ioc->name, ioc->sh->host_no, (unsigned long long)pn)); } } } static void mptfc_rescan_devices(struct work_struct *work) { MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, fc_rescan_work); int ii; u64 pn; struct mptfc_rport_info *ri; struct scsi_target *starget; VirtTarget *vtarget; /* start by tagging all ports as missing */ list_for_each_entry(ri, &ioc->fc_rports, list) { if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING; } } /* * now rescan devices known to adapter, * will reregister existing rports */ for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { (void) mptfc_GetFcPortPage0(ioc, ii); mptfc_init_host_attr(ioc, ii); /* refresh */ mptfc_GetFcDevPage0(ioc, ii, mptfc_register_dev); } /* delete devices still missing */ list_for_each_entry(ri, &ioc->fc_rports, list) { /* if newly missing, delete it */ if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) { ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED| MPT_RPORT_INFO_FLAGS_MISSING); fc_remote_port_delete(ri->rport); /* won't sleep */ ri->rport = NULL; starget = ri->starget; if (starget) { vtarget = starget->hostdata; if (vtarget) vtarget->deleted = 1; } pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT "mptfc_rescan.%d: %llx deleted\n", ioc->name, ioc->sh->host_no, (unsigned long long)pn)); } } } static int mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *sh; MPT_SCSI_HOST *hd; MPT_ADAPTER *ioc; unsigned long flags; int ii; int numSGE = 0; int scale; int ioc_cap; int error=0; int r; if ((r = mpt_attach(pdev,id)) != 0) return r; ioc = pci_get_drvdata(pdev); ioc->DoneCtx = mptfcDoneCtx; ioc->TaskCtx = mptfcTaskCtx; ioc->InternalCtx = mptfcInternalCtx; /* Added sanity check on readiness of the MPT adapter. */ if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) { printk(MYIOC_s_WARN_FMT "Skipping because it's not operational!\n", ioc->name); error = -ENODEV; goto out_mptfc_probe; } if (!ioc->active) { printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n", ioc->name); error = -ENODEV; goto out_mptfc_probe; } /* Sanity check - ensure at least 1 port is INITIATOR capable */ ioc_cap = 0; for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { if (ioc->pfacts[ii].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) ioc_cap ++; } if (!ioc_cap) { printk(MYIOC_s_WARN_FMT "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n", ioc->name, ioc); return 0; } sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST)); if (!sh) { printk(MYIOC_s_WARN_FMT "Unable to register controller with SCSI subsystem\n", ioc->name); error = -1; goto out_mptfc_probe; } spin_lock_init(&ioc->fc_rescan_work_lock); INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices); INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset); INIT_WORK(&ioc->fc_lsc_work, mptfc_link_status_change); spin_lock_irqsave(&ioc->FreeQlock, flags); /* Attach the SCSI Host to the IOC structure */ ioc->sh = sh; sh->io_port = 0; sh->n_io_port = 0; sh->irq = 0; /* set 16 byte cdb's */ sh->max_cmd_len = 16; sh->max_id = ioc->pfacts->MaxDevices; sh->max_lun = max_lun; /* Required entry. */ sh->unique_id = ioc->id; /* Verify that we won't exceed the maximum * number of chain buffers * We can optimize: ZZ = req_sz/sizeof(SGE) * For 32bit SGE's: * numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ * + (req_sz - 64)/sizeof(SGE) * A slightly different algorithm is required for * 64bit SGEs. */ scale = ioc->req_sz/ioc->SGE_size; if (ioc->sg_addr_size == sizeof(u64)) { numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + (ioc->req_sz - 60) / ioc->SGE_size; } else { numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + (ioc->req_sz - 64) / ioc->SGE_size; } if (numSGE < sh->sg_tablesize) { /* Reset this value */ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Resetting sg_tablesize to %d from %d\n", ioc->name, numSGE, sh->sg_tablesize)); sh->sg_tablesize = numSGE; } spin_unlock_irqrestore(&ioc->FreeQlock, flags); hd = shost_priv(sh); hd->ioc = ioc; /* SCSI needs scsi_cmnd lookup table! * (with size equal to req_depth*PtrSz!) */ ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_ATOMIC); if (!ioc->ScsiLookup) { error = -ENOMEM; goto out_mptfc_probe; } spin_lock_init(&ioc->scsi_lookup_lock); dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n", ioc->name, ioc->ScsiLookup)); hd->last_queue_full = 0; sh->transportt = mptfc_transport_template; error = scsi_add_host (sh, &ioc->pcidev->dev); if(error) { dprintk(ioc, printk(MYIOC_s_ERR_FMT "scsi_add_host failed\n", ioc->name)); goto out_mptfc_probe; } /* initialize workqueue */ snprintf(ioc->fc_rescan_work_q_name, sizeof(ioc->fc_rescan_work_q_name), "mptfc_wq_%d", sh->host_no); ioc->fc_rescan_work_q = create_singlethread_workqueue(ioc->fc_rescan_work_q_name); if (!ioc->fc_rescan_work_q) goto out_mptfc_probe; /* * Pre-fetch FC port WWN and stuff... * (FCPortPage0_t stuff) */ for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { (void) mptfc_GetFcPortPage0(ioc, ii); } mptfc_SetFcPortPage1_defaults(ioc); /* * scan for rports - * by doing it via the workqueue, some locking is eliminated */ queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work); flush_workqueue(ioc->fc_rescan_work_q); return 0; out_mptfc_probe: mptscsih_remove(pdev); return error; } static struct pci_driver mptfc_driver = { .name = "mptfc", .id_table = mptfc_pci_table, .probe = mptfc_probe, .remove = __devexit_p(mptfc_remove), .shutdown = mptscsih_shutdown, #ifdef CONFIG_PM .suspend = mptscsih_suspend, .resume = mptscsih_resume, #endif }; static int mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) { MPT_SCSI_HOST *hd; u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; unsigned long flags; int rc=1; if (ioc->bus_type != FC) return 0; devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", ioc->name, event)); if (ioc->sh == NULL || ((hd = shost_priv(ioc->sh)) == NULL)) return 1; switch (event) { case MPI_EVENT_RESCAN: spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); if (ioc->fc_rescan_work_q) { queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work); } spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); break; case MPI_EVENT_LINK_STATUS_CHANGE: spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); if (ioc->fc_rescan_work_q) { queue_work(ioc->fc_rescan_work_q, &ioc->fc_lsc_work); } spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); break; default: rc = mptscsih_event_process(ioc,pEvReply); break; } return rc; } static int mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) { int rc; unsigned long flags; rc = mptscsih_ioc_reset(ioc,reset_phase); if ((ioc->bus_type != FC) || (!rc)) return rc; dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": IOC %s_reset routed to FC host driver!\n",ioc->name, reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"))); if (reset_phase == MPT_IOC_SETUP_RESET) { spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); if (ioc->fc_rescan_work_q) { queue_work(ioc->fc_rescan_work_q, &ioc->fc_setup_reset_work); } spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); } else if (reset_phase == MPT_IOC_PRE_RESET) { } else { /* MPT_IOC_POST_RESET */ mptfc_SetFcPortPage1_defaults(ioc); spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); if (ioc->fc_rescan_work_q) { queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work); } spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); } return 1; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mptfc_init - Register MPT adapter(s) as SCSI host(s) with SCSI mid-layer. * * Returns 0 for success, non-zero for failure. */ static int __init mptfc_init(void) { int error; show_mptmod_ver(my_NAME, my_VERSION); /* sanity check module parameters */ if (mptfc_dev_loss_tmo <= 0) mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; mptfc_transport_template = fc_attach_transport(&mptfc_transport_functions); if (!mptfc_transport_template) return -ENODEV; mptfcDoneCtx = mpt_register(mptscsih_io_done, MPTFC_DRIVER, "mptscsih_scandv_complete"); mptfcTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTFC_DRIVER, "mptscsih_scandv_complete"); mptfcInternalCtx = mpt_register(mptscsih_scandv_complete, MPTFC_DRIVER, "mptscsih_scandv_complete"); mpt_event_register(mptfcDoneCtx, mptfc_event_process); mpt_reset_register(mptfcDoneCtx, mptfc_ioc_reset); error = pci_register_driver(&mptfc_driver); if (error) fc_release_transport(mptfc_transport_template); return error; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mptfc_remove - Remove fc infrastructure for devices * @pdev: Pointer to pci_dev structure * */ static void __devexit mptfc_remove(struct pci_dev *pdev) { MPT_ADAPTER *ioc = pci_get_drvdata(pdev); struct mptfc_rport_info *p, *n; struct workqueue_struct *work_q; unsigned long flags; int ii; /* destroy workqueue */ if ((work_q=ioc->fc_rescan_work_q)) { spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); ioc->fc_rescan_work_q = NULL; spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); destroy_workqueue(work_q); } fc_remove_host(ioc->sh); list_for_each_entry_safe(p, n, &ioc->fc_rports, list) { list_del(&p->list); kfree(p); } for (ii=0; ii<ioc->facts.NumberOfPorts; ii++) { if (ioc->fc_data.fc_port_page1[ii].data) { pci_free_consistent(ioc->pcidev, ioc->fc_data.fc_port_page1[ii].pg_sz, (u8 *) ioc->fc_data.fc_port_page1[ii].data, ioc->fc_data.fc_port_page1[ii].dma); ioc->fc_data.fc_port_page1[ii].data = NULL; } } mptscsih_remove(pdev); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mptfc_exit - Unregisters MPT adapter(s) * */ static void __exit mptfc_exit(void) { pci_unregister_driver(&mptfc_driver); fc_release_transport(mptfc_transport_template); mpt_reset_deregister(mptfcDoneCtx); mpt_event_deregister(mptfcDoneCtx); mpt_deregister(mptfcInternalCtx); mpt_deregister(mptfcTaskCtx); mpt_deregister(mptfcDoneCtx); } module_init(mptfc_init); module_exit(mptfc_exit);
gpl-2.0